blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
55aa7baf5a1939931d0771a762ff8f4e5f872773 | eb0249d6912d430f2c4535de08670e92679f6d7b | /src/PReLIM.py | 435955e339345f30c1fd62a18024d8767bff9ba8 | [] | no_license | xflicsu/PReLIM | 80ce7ec64e79d8f13a25f902d2d6176c4f66f5c6 | 686e95a5250b9f44034043afb08c01774796e5c2 | refs/heads/master | 2022-09-10T02:34:16.146128 | 2020-05-27T21:04:15 | 2020-05-27T21:04:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,154 | py | from __future__ import print_function
"""
Author:
Jack Duryea
Waterland Lab
Computational Epigenetics Section
Baylor College of Medicine
PReLIM: Preceise Read Level Imputation of Methylation
PReLIM imputes missing CpG methylation
states in CpG matrices.
"""
# standard imports
from scipy import stats
import numpy as np
import warnings
import numpy as np
import sys
from tqdm import tqdm
import copy
import time
from random import shuffle
from collections import defaultdict
import random
# sklearn imports
from sklearn.preprocessing import normalize
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve, auc
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
# Pickle
try:
import cPickle as p
except ModuleNotFoundError:
import pickle as p
# warnings suck, turn them off
if sys.version_info[0] < 3:
warnings.simplefilter("ignore", DeprecationWarning)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
import md5, sha
# TODO: most of these fields are redundant in our application
class CpGBin():
"""
A class that contains information about a CpG Bin. Does not need
to be used directly, PReLIM will use this class internally.
"""
def __init__(self,
matrix,
#relative_positions
binStartInc=None,
binEndInc=None,
cpgPositions=None,
sequence="",
encoding=None,
missingToken= -1,
chromosome=None,
binSize=100,
species="MM10",
verbose=True,
tag1=None,
tag2=None):
"""
Constructor for a bin
Inputs:
matrix: numpy array, the bin's CpG matrix.
binStartInc: integer, the starting, inclusive, chromosomal index of the bin.
binEndInc: integer, the ending, inclusive, chromosomal index of the bin.
cpgPositions: array of integers, the chromosomal positions of the CpGs in the bin.
sequence: string, nucleotide sequence (A,C,G,T)
encoding: array, a reduced representation of the bin's CpG matrix
missingToken: integer, the token that represents missing data in the matrix.
chromosome: string, the chromosome this bin resides in.
binSize: integer, the number of base pairs this bin covers
species: string, the speices this bin belongs too.
verbose: boolean, print warnings, set to "false" for no error checking and faster speed
tag1: anything, for custom use.
tag2: anything, for custom use.
"""
self.cpgDensity = matrix.shape[1]
self.readDepth = matrix.shape[0]
self.matrix = np.array(matrix, dtype=float)
self.binStartInc = binStartInc
self.binEndInc = binEndInc
self.cpgPositions = cpgPositions
self.sequence = sequence
self.missingToken = missingToken
self.chromosome = chromosome
self.binSize = binSize
self.species = species
self.tag1 = tag1
self.tag2 = tag2
class PReLIM():
"""
Class for a PReLIM model.
Example usage: \n
from PReLIM import PReLIM \n
import numpy as np \n
# Collect methylation matrices, 1 is methylated, 0 is unmethylated, -1 is unknown \n
# Each column is a cpg site, each row is a read \n
bin1 = np.array([[1,0],[0,-1],[-1,1],[0,0]],dtype=float) \n
bin2 = np.array([[1,0],[1,0],[-1,1],[0,0],[0,1],[1,1],[0,0]],dtype=float) \n
bin3 = np.array([[-1,1],[0,-1],[-1,1],[0,0]],dtype=float) \n
etc\n
bin1000 = np.array([[1,-1],[0,1],[-1,1],[1,0]],dtype=float) \n
bin1001 = np.array([[1,1],[0,0],[0,1],[1,1]],dtype=float) \n
bin1002 = np.array([[1,1],[1,1],[0,1],[1,0]],dtype=float) \n
bin1003 = np.array([[0,0],[1,0],[0,1],[1,1]],dtype=float) \n
# Collection of bins \n
bins = [bin1, bin2, bin3, ... bin1000, bin1001, bin1002, bin1003] \n
model = PReLIM(cpgDensity=2) \n
# Options for training/saving model \n
model.train(bins, model_file="no") # don't want a model file, must use "no" \n
# Use model for imputation \n
imputed_bin1 = model.impute(bin1) \n
# You can also use batch imputation to impute on many bins at once \n
imputed_bins = model.impute_many(bins) \n\n\n
"""
def __init__(self, cpgDensity=2):
"""
Constructor for a PReLIM model.
:param cpgDensity: the density of the bins that will be used
"""
self.model = None
self.cpgDensity = cpgDensity
self.METHYLATED = 1
self.UNMETHYLATED = 0
self.MISSING = -1
self.methylated = 1
self.unmethylated = 0
self.unknown = -1
# Train a model
def train(self, bin_matrices, model_file="no", verbose=False):
"""
Train a PReLIM model using cpg matrices.
:param bin_matrices: list of cpg matrices
:param model_file: The name of the file to save the model to. If None, then create a file name that includes a timestamp. If you don't want to save a file, set this to "no"
:param verbose: prints more info if true
"""
X,y = self.get_X_y(bin_matrices, verbose=verbose)
# Train the neural network model
self.fit(X,y, model_file=model_file, verbose=verbose)
def fit(self,
X_train,
y_train,
n_estimators = [10, 50, 100, 500, 1000],
cores = -1,
max_depths = [1, 5, 10, 20, 30],
model_file=None,
verbose=False
):
"""
Train a random forest model using grid search on a feature matrix (X) and class labels (y)
Usage:
model.fit(X_train, y_train)
:param X_train: numpy array, Contains feature vectors.
:param y_train: numpy array, Contains labels for training data.
:param n_estimators: list, the number of estimators to try during a grid search.
:param max_depths: list, the maximum depths of trees to try during a grid search.
:param cores: integer, the number of cores to use during training, helpful for grid search.
:param model_file: string,The name of the file to save the model to.
If None, then create a file name that includes a timestamp.
If you don't want to save a file, set this to "no"
:return: The trained sklearn model
"""
grid_param = {
"n_estimators": n_estimators,
"max_depth": max_depths,
}
# Note: let the grid search use a lot of cores, but only use 1 for each forest
# since dispatching can take a lot of time
rf = RandomForestClassifier(n_jobs=1)
self.model = GridSearchCV(rf, grid_param, n_jobs=2, cv=5, verbose=verbose)
self.model.fit(X_train, y_train)
# save the model
if model_file == "no":
return self.model
if not model_file:
model_file = "PReLIM_model" + str(time.time())
p.dump(self.model, open(model_file,"wb"))
return self.model
# Feature collection directly from bins
def get_X_y(self, bin_matrices, verbose=False):
"""
:param bin_matrices: list of CpG matrices
:param verbose: prints more info if true
:return: feature matrix (X) and class labels (y)
"""
bins = []
# convert to bin objects for ease of use
for matrix in bin_matrices:
mybin = CpGBin( matrix=matrix )
bins.append( mybin )
# find bins with no missing data
complete_bins = _filter_missing_data( bins )
shuffle( complete_bins )
# apply masks
masked_bins = _apply_masks( complete_bins, bins )
# extract features
X, y = self._collectFeatures( masked_bins )
return X, y
# Return a vector of predicted classes
def predict_classes(self, X):
"""
Predict the classes of the samples in the given feature matrix
Usage:
y_pred = CpGNet.predict_classes(X)
:param X: numpy array, contains feature vectors
:param verbose: prints more info if true
:return: 1-d numpy array of predicted classes
"""
return self.model.predict(X)
# Return a vector of probabilities for methylation
def predict(self, X):
"""
Predict the probability of methylation for each sample in the given feature matrix
Usage:
y_pred = CpGNet.predict(X)
:param X: numpy array, contains feature vectors
:param verbose: prints more info if true
:return: 1-d numpy array of prediction values
"""
return self.model.predict_proba(X)[:,1]
def predict_proba(self, X):
"""
Predict the classes of the samples in the given feature matrix
Same as predict, just a convenience to have in case of differen styles
Usage:
y_pred = CpGNet.predict_classes(X)
:param X: numpy array, contains feature vectors
:param verbose: prints more info if true
:return: 1-d numpy array of predicted classes
"""
return self.model.predict_proba(X)[:1]
# Load a saved model
def loadWeights(self, model_file):
"""
self.model is loaded with the provided weights
:param model_file: string, name of file with a saved model
"""
self.model = p.load(open(model_file,"rb"))
# Imputes missing values in Bins
def impute(self, matrix):
"""
Impute the missing values in a CpG matrix. Values are filled with the
predicted probability of methylation.
:param matrix: a 2d np array, dtype=float, representing a CpG matrix, 1=methylated, 0=unmethylated, -1=unknown
:return: A 2d numpy array with predicted probabilities of methylation
"""
X = self._get_imputation_features(matrix)
if len(X) == 0: # nothing to impute
return matrix
predictions = self.predict(X)
k = 0 # keep track of prediction index for missing states
predicted_matrix = np.copy(matrix)
for i in range(predicted_matrix.shape[0]):
for j in range(predicted_matrix.shape[1]):
if predicted_matrix[i, j] == -1:
predicted_matrix[i, j] = predictions[k]
k += 1
return predicted_matrix
# Extract all features for all matrices so we can predict in bulk, this is where the speedup comes from
def impute_many(self, matrices):
'''
Imputes a bunch of matrices at the same time to help speed up imputation time.
:param matrices: list of CpG matrices, where each matrix is a 2d np array, dtype=float, representing a CpG matrix, 1=methylated, 0=unmethylated, -1=unknown
:return: A List of 2d numpy arrays with predicted probabilities of methylation for unknown values.
'''
X = np.array([features for matrix_features in [self._get_imputation_features(matrix) for matrix in matrices] for features in matrix_features])
if len(X) == 0:
return matrices
predictions = self.predict(X)
predicted_matrices = []
k = 0 # keep track of prediction index for missing states, order is crucial!
for matrix in matrices:
predicted_matrix = np.copy(matrix)
for i in range(predicted_matrix.shape[0]):
for j in range(predicted_matrix.shape[1]):
if predicted_matrix[i, j] == -1:
predicted_matrix[i, j] = predictions[k]
k += 1
predicted_matrices.append(predicted_matrix)
return predicted_matrices
### Helper functions, for private use only ###
# get a feature matrix for the given cpg matrix
def _get_imputation_features(self,matrix):
'''
Returns a vector of features needed for the imputation of this matrix
Each sample is an individual CpG, and the features are
the row mean, the column mean, the position of the cpg in the matrix,
the row, and the relative proportions of each methylation pattern
:param matrix: a 2d np array, dtype=float, representing a CpG matrix, 1=methylated, 0=unmethylated, -1=unknown
:return: A feature vector for the matrix
'''
X = []
numReads = matrix.shape[0]
density = matrix.shape[1]
nan_copy = np.copy(matrix)
nan_copy[nan_copy == -1] = np.nan
# get the column and row means
column_means = np.nanmean(nan_copy, axis=0)
row_means = np.nanmean(nan_copy, axis=1)
encoding = self._encode_input_matrix(matrix)[0]
# iterate over all values in the matrix
for i in range(numReads):
for j in range(density):
observed_state = matrix[i, j]
# only record missing values
if observed_state != -1:
continue
row_mean = row_means[i]
col_mean = column_means[j]
row = np.copy(matrix[i])
row[j] = -1
# features for a single sample
data = [row_mean] + [col_mean] + [i, j] + list(row) + list(encoding)
X.append(data)
# list to np array
X = np.array(X)
return X
# Returns a matrix encoding of a CpG matrix
def _encode_input_matrix(self, m):
"""
:param m: a 2d np array, dtype=float, representing a CpG matrix, 1=methylated, 0=unmethylated, -1=unknown
:return: list of relative proportions of each type of methylation pattern, number of reads
"""
matrix = np.copy(m)
n_cpgs = matrix.shape[1]
matrix += 1 # deal with -1s
base_3_vec = np.power(3, np.arange(n_cpgs - 1, -1, -1))
encodings = np.dot(base_3_vec, matrix.T)
encoded_vector_dim = np.power(3, n_cpgs)
encoded_vector = np.zeros(encoded_vector_dim)
for x in encodings:
encoded_vector[int(x)] += 1
num_reads = encodings.shape[0]
# Now we normalize
encoded_vector_norm = normalize([encoded_vector], norm="l1")
return encoded_vector_norm[0], num_reads
# finds the majority class of the given column, discounting the current cpg
def _get_column_mean(self, matrix, col_i, current_cpg_state):
"""
:param matrix: a 2d np array, dtype=float, representing a CpG matrix, 1=methylated, 0=unmethylated, -1=unknown
:param col_i: integer, the column index
:param current_cpg_state: the cpg to discount
:return: the mean value of column col_i, discounting current_cpg_state
"""
sub = matrix[:, col_i]
return self._get_mean(sub, current_cpg_state)
# finds the majority class of the given read, discounting the current cpg
def _get_read_mean(self, matrix, read_i, current_cpg_state):
"""
:param matrix: a 2d np array, dtype=float, representing a CpG matrix, 1=methylated, 0=unmethylated, -1=unknown
:param read_i: integer, the row index
:param current_cpg_state: the cpg to discount
:return: the mean value of row read_i, discounting current_cpg_state
"""
sub = matrix[read_i, :]
return self._get_mean(sub, current_cpg_state)
# Return the mean of sub matrix, discounting the current cpg methylation state
def _get_mean(self, sub_matrix, current_cpg_state):
'''
:param sub_matrix: a list of individual cpgs
:param current_cpg_state: the cpg to discount
:return: the mean value of the list, discounting current_cpg_state
'''
num_methy = np.count_nonzero(sub_matrix == self.METHYLATED)
num_unmethy = np.count_nonzero(sub_matrix == self.UNMETHYLATED)
if current_cpg_state == self.METHYLATED:
num_methy -= 1
num_methy = max(0, num_methy)
if current_cpg_state == self.UNMETHYLATED:
num_unmethy -= 1
num_unmethy = max(0, num_unmethy)
if float(num_methy + num_unmethy) == 0:
return -2
return float(num_methy) / float(num_methy + num_unmethy)
# Returns X, y
# note: y can contain the labels 1,0, -1
def _collectFeatures(self, bins):
"""
Given a list of cpg bins, collect features for each artificially masked CpG
and record the hidden value as the class label.
:param matrix: bins: list of CpG bins that contain CpG matrices
:return: feature matrix X and class labels y
"""
X = []
Y = []
for Bin in tqdm(bins):
observed_matrix = Bin.tag2["observed"]
truth_matrix = Bin.tag2["truth"]
encoding = self._encode_input_matrix(observed_matrix)[0]
numReads = observed_matrix.shape[0]
density = observed_matrix.shape[1]
#positions = Bin.cpgPositions
nan_copy = np.copy(observed_matrix)
nan_copy[nan_copy == -1] = np.nan
column_means = np.nanmean(nan_copy,axis=0)
row_means = np.nanmean(nan_copy,axis=1)
for i in range(numReads):
for j in range(density):
observed_state = observed_matrix[i,j]
if observed_state != -1:
continue
state = truth_matrix[i,j]
Y.append(state)
# row and column means
row_mean = row_means[i]
col_mean = column_means[j]
# j is the current index in the row
# encoding is the matrix encoding vector
# differences is the difference in positions of the cpgs
row = np.copy(observed_matrix[i])
row[j] = -1
data = [row_mean] + [col_mean] + [i, j] + list(row) + list(encoding)
X.append(data)
X = np.array(X)
Y = np.array(Y)
Y.astype(int)
return X, Y
#### Helper functions ####
# Returns a list of bins similar to the input
# but matrix rows with missing values are removed
def _filter_bad_reads(bins):
"""
Given a list of cpg bins, remove reads with missing values
so we can mask them.
:param matrix: bins: list of CpG bins that contain CpG matrices
:return: bins, but all reads wiht missing values have been removed
"""
filtered_bins = []
for Bin in bins:
newBin = copy.deepcopy(Bin)
matrix = newBin.matrix
# find rows with missing values
counts = np.count_nonzero(matrix == -1, axis=1)
idx = counts == 0
matrix_filtered = matrix[idx]
newBin.matrix = matrix_filtered
filtered_bins.append(newBin)
return filtered_bins
# Returns a mapping of dimensions to list of masks that can be used on data
# of that size. the missing pattern is in matrix form.
# -1 is missing, 2 is known
def _extract_masks( bins):
"""
Given a list of cpg bins, return a list matrices that
represent the patterns of missing values, or "masks"
:param matrix: bins: list of CpG bins that contain CpG matrices
:return: list of matrices that represent the patterns of missing values
"""
masks = defaultdict(lambda: [])
for Bin in tqdm(bins):
matrix = np.copy(Bin.matrix)
matrix[matrix >= 0] = 2
#min_missing = 10
min_missing = 1 # must have at least 1 missing value
if np.count_nonzero(matrix == -1) >= min_missing:
masks[matrix.shape].append(matrix)
return masks
# Extract masks from original matrices and apply them to the complete matrices
def _apply_masks( filtered_bins, all_bins ):
"""
Given a list of filtered cpg bins and a list of all the bins,
extract masks from the original bins and apply them to the filtered bins.
:param filtered_bins: bins with no reads with missing values.
:param all_bins: list of CpG bins that contain CpG matrices
:return: list of matrices that represent the patterns of missing values
"""
masks = _extract_masks( all_bins )
ready_bins = []
for Bin in filtered_bins:
truth_matrix = Bin.matrix
m_shape = truth_matrix.shape
if m_shape in masks:
if len( masks [ m_shape ] ) > 0:
mask = random.choice(masks[m_shape])
observed = np.minimum(truth_matrix, mask)
Bin.tag2 = {"truth":truth_matrix, "observed":observed, "mask":mask}
ready_bins.append(Bin)
return ready_bins
# Get a list of bins with no missing data
def _filter_missing_data( bins, min_read_depth=1 ):
"""
Given a list of filtered cpg bins and a list of all the bins,
extract masks from the original bins and apply them to the filtered bins.
:param bins: list of CpG bins that contain CpG matrices
:param min_read_depth: minimum number of reads needed for a bin to be complete.
:return: remove reads with missing values from bins
"""
cpg_bins_complete = _filter_bad_reads(bins)
# secondary depth filter
cpg_bins_complete_depth = [bin_ for bin_ in cpg_bins_complete if bin_.matrix.shape[0] >= min_read_depth]
return cpg_bins_complete_depth
| [
"jd50@rice.edu"
] | jd50@rice.edu |
b41399f6dc58e4f54b33c3b69f61ba1f94ba8a91 | 92ca35ebc9e2c97132a7ce4411b7cb867d9a0663 | /main.py | 115eb4ab278d816331856e093eef0998a98b5e18 | [] | no_license | YarovoyDaniil/pandasplotly2 | bbe556da966c48be87dea540e8c1a76e9033f66c | 7c6c16adf4ff29c031d1c8bbe69157789c57bce6 | refs/heads/master | 2020-11-25T18:52:01.788227 | 2019-12-18T10:49:57 | 2019-12-18T10:49:57 | 228,801,031 | 0 | 0 | null | 2019-12-18T09:09:24 | 2019-12-18T09:09:23 | null | UTF-8 | Python | false | false | 1,319 | py | import os
os.environ["GOOGLE_APPLICATION_CREDENTIALS"]="keys.json"
import pandas as pd
from bq_helper import BigQueryHelper
import plotly.graph_objs as go
from plotly.offline import plot
bq_assistant = BigQueryHelper('bigquery-public-data','epa_historical_air_quality')
QUERY = """
SELECT `state_code`, `date_local`, `mdl`, `parameter_name`
FROM `bigquery-public-data.epa_historical_air_quality.co_hourly_summary`
LIMIT 1000
"""
df = bq_assistant.query_to_pandas(QUERY)
state_code_count=df.groupby(['state_code'])['parameter_name'].count()
date_local_count=df.groupby(['date_local'])['parameter_name'].count()
mdl_count=df.groupby(['mdl'])['parameter_name'].count()
trace1 = go.Scatter(
x=state_code_count.index,
y=state_code_count.values
)
trace2 = go.Pie(
labels=date_local_count.index,
values=date_local_count.values
)
trace3 = go.Bar(
x=mdl_count.index,
y=mdl_count.values
)
layout1= go.Layout(
xaxis=dict(title='state_code'),
yaxis=dict(title='parmater_name')
)
layout2=go.Layout(
title='data',
xaxis=dict(title=''),
yaxis=dict(title='')
)
figure1 = go.Figure(data=[trace1], layout=layout1)
figure2 = go.Figure(data=[trace2], layout=layout2)
fig = dict(data = [trace3]) | [
"noreply@github.com"
] | noreply@github.com |
887cf3466a28d10d8d3c08c83d3ecff0f5782c68 | a5feead69352f75ed49793f0489d16ce7445a48c | /day010/prob026.py | 2aae3350185eef691427b9093043f97c2636cfc6 | [] | no_license | junkeon/programmers | 98cefa485fc23bb7bbdb369cacf6ed726b6a4f87 | 0615cbe1cbffdb12434970f96a7325e6eaf745b3 | refs/heads/master | 2021-05-18T22:00:46.431799 | 2020-05-22T14:57:08 | 2020-05-22T14:57:08 | 251,443,077 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 79 | py | def solution(arr):
arr.remove(min(arr))
return arr if arr else [-1] | [
"palex3012@gmail.com"
] | palex3012@gmail.com |
15f95d4a87c76a4b3ff34c7d8d6b5dee3be3e112 | 301fcb8bea1c054d39214691f80fbdea734d5a5d | /LiveGrip/app/api/utils.py | e5b8eaf346c71f83c9cdf4e2ec6c4e98ec572b05 | [] | no_license | SykoTheKiD/LiveGrip-Backend | e245edb87591d3b1cfdbcd3acb533d294fe3a7bf | 44689198c12ff4f698c937deb766855beb71538a | refs/heads/master | 2021-03-27T15:02:30.027428 | 2016-08-16T22:22:41 | 2016-08-16T22:22:41 | 67,225,787 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | py | from django.utils import timezone
import datetime
TOKEN_VALID_DATE = 30 # days
def new_token_expiry_date():
"""
Generates a new expiry date for a token
"""
return timezone.now() + datetime.timedelta(days=TOKEN_VALID_DATE) | [
"jay@jaysyko.com"
] | jay@jaysyko.com |
b39d36331b8f1e4f851d206c61a3e1cc0ae2df1e | 9125dd7cefe406db4c4f30e147cf2a709c4e11ac | /flask/bin/easy_install-2.7 | 42e4774853bca430e0e773f89a08a94783f39a59 | [] | no_license | ahg223/CloudCamAPI | 37199afe38ae039330038a5c6b2c345cd5fa2144 | 04ed1925b094a1ad62b8c85bf5e5ebe797af6a9a | refs/heads/master | 2020-04-27T18:11:55.118509 | 2019-04-13T07:43:43 | 2019-04-13T07:43:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 276 | 7 | #!/Users/hyunggeunahn/Desktop/MyGit/Flask/flask/bin/python2.7
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"ahg223@naver.com"
] | ahg223@naver.com |
a794cd368971ddd7da52ea42ef11f525d6acfa03 | 4ebfb207661bafcebb9b75936318c7dc84db3d80 | /myvenv/Scripts/rst2s5.py | 8f3e1f9599c87a7c8383bf0cf545291231482abe | [] | no_license | YooInKeun/ToDo-App | 2c72a91488cb376ff6c380ccbe5106dfdae09ecb | c231e5b13936a33bf60e42268c1ad856495aa432 | refs/heads/master | 2022-12-11T18:41:06.856657 | 2019-09-12T07:39:25 | 2019-09-12T07:39:25 | 200,203,474 | 0 | 0 | null | 2022-11-22T04:13:58 | 2019-08-02T09:07:51 | Python | UTF-8 | Python | false | false | 718 | py | #!c:\users\keun0\onedrive\바탕 화면\창업 경진대회\todo\todo-app\myvenv\scripts\python.exe
# $Id: rst2s5.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: Chris Liechti <cliechti@gmx.net>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing HTML slides using
the S5 template system.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates S5 (X)HTML slideshow documents from standalone '
'reStructuredText sources. ' + default_description)
publish_cmdline(writer_name='s5', description=description)
| [
"keun0390@naver.com"
] | keun0390@naver.com |
11d7586b7b9a1d62b6b3ea9257ab5501beab53b7 | fb66c010704bafb9a4bfa8ba98099fc48f4dc0dd | /main.py | 173b99801df64bac7b4dd8ea8ed37c2c7d37fd3f | [] | no_license | mkhmirza/cryptopy | 6ab2742a40a4c3a0ada3ef3bb425bf60e70fa32f | d321e4beb982e3e2ed731f7f3f0ca1d49e4fe42b | refs/heads/master | 2023-05-31T14:26:45.218684 | 2021-06-14T14:20:10 | 2021-06-14T14:20:10 | 285,309,696 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,796 | py | #!/usr/bin/python env
import getopt
import sys
from crypto import Cryptography
import argparse
parser = argparse.ArgumentParser(description="Encrypt & Decrypt Files using different techniques")
parser.add_argument('-e', '--encrypt', help='encryption to be performed', action='store_true')
parser.add_argument('-d', '--decrypt', help="decryption operation to be performed", action='store_true')
parser.add_argument('-i','--input', help='specify input file (with extension)')
parser.add_argument('-k', '--key', help='generates a new keyfile', action='store_true')
parser.add_argument('-f', '--key-file', help='key file name')
parser.add_argument('-o', '--output', help='specify outputfilename (without extension)')
args = vars(parser.parse_args())
# encryption and decryption option given
encrypt = args['encrypt']
decrypt = args['decrypt']
# init vars
inputf = args['input']
outputf = args['output']
keyf = args['key']
keyFile = args['key_file']
verbose = args['verbose']
if encrypt and decrypt:
raise Exception("Encryption and Decryption cannot be performed together.")
if encrypt and not keyf:
raise Exception("For encrpytion generating random key is recommended")
if decrypt and not keyFile:
raise Exception("For decrpytion key file is required")
crypto = Cryptography()
# if encryption option '-e' is given
if encrypt:
print("Generating a random key file for encrypting data")
key = crypto.generateKey()
with open(key, "r") as f:
key = f.read()
print("Encrypting Data..")
crypto.encryption(key, inputf)
# if decryption option '-d' is given
elif decrypt:
print("Reading key for decrypting data")
key = keyFile
with open(key, 'r') as f:
key = f.read()
print("Decrypting Data..")
crypto.decryption(key, inputf)
| [
"kumailhabib12@gmail.com"
] | kumailhabib12@gmail.com |
61e68303488b6e117e1a6b2188ab4413659acbb6 | 3068bdf533bbd1dfddbbc22176bf5837844ac48a | /final/poi_id.py | 42065913ea2b79446b99bd76203f53961826848d | [] | no_license | jparimaa/nn | c2c67869a08779ce2f1e173489aaf6eb5e6d0b66 | 197425b7adceba0a8ea22d6008ac0c4fde21f438 | refs/heads/master | 2021-06-10T23:55:03.933529 | 2016-10-22T09:46:05 | 2016-10-22T09:46:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,353 | py | import sys
import pickle
sys.path.append("../tools/")
from feature_format import feature_format, target_feature_split
from tester import dump_classifier_and_data, test_classifier
def add_ratio_feature(data_dict, key, new_feature, dividend, divisor):
try:
data_dict[key][new_feature] = data_dict[key][dividend] / data_dict[name][divisor]
except TypeError:
data_dict[key][new_feature] = "NaN"
except:
print "Unexpected error:", sys.exc_info()[0]
features_list = ["poi", "salary", "bonus", "total_payments", "total_stock_value"]
### Load the dictionary containing the dataset
with open("final_project_dataset.pkl", "r") as data_file:
data_dict = pickle.load(data_file)
### Remove outliers
outliers = ["TOTAL", "THE TRAVEL AGENCY IN THE PARK"]
for outlier in outliers:
data_dict.pop(outlier)
### Create new features
for name in data_dict:
add_ratio_feature(data_dict, name, "from_poi_ratio", "from_poi_to_this_person", "to_messages")
add_ratio_feature(data_dict, name, "to_poi_ratio", "from_this_person_to_poi", "from_messages")
features_list += ["from_poi_ratio", "to_poi_ratio"]
my_dataset = data_dict
### Extract features and labels from dataset for local testing
data = feature_format(my_dataset, features_list, sort_keys = True)
labels, features = target_feature_split(data)
### Classify
### Name your classifier clf for easy export below.
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import StratifiedKFold
from sklearn.feature_selection import SelectKBest
selection = SelectKBest()
rfc = RandomForestClassifier()
pipeline = Pipeline([('features', selection), ('classifier', rfc)])
parameters = {'features__k': [5, 'all'],
'classifier__n_estimators': [50, 100, 200],
'classifier__min_samples_split': [2, 4, 6],
'classifier__criterion': ['entropy', 'gini'],
'classifier__class_weight': ['balanced_subsample', 'auto', None],
'classifier__max_depth': [2, 4, 6]
}
clf = GridSearchCV(pipeline, parameters, scoring='recall')
clf.fit(features, labels)
test_classifier(clf.best_estimator_, my_dataset, features_list)
### Dump the classifier
dump_classifier_and_data(clf, my_dataset, features_list)
| [
"juhapekka.arimaa@gmail.com"
] | juhapekka.arimaa@gmail.com |
df119986e7fe6e7dc635c2fc9dc41f4eb6cb67eb | b2c44f71e04786fd1b8708d5881b7844975659c0 | /ranger/colorschemes/solarized.py | 027871c5b08595cdec5ab0bbc5aa5bf7237955af | [] | no_license | okubax/dotfiles-old | 2ae15f2bae13bdabda2293e08b3bc27ad899503c | cc98fe71caa2e6a1ac6215fff61c9f0b3c3b4bdf | refs/heads/master | 2021-01-14T12:44:53.625051 | 2017-07-08T20:36:10 | 2017-07-08T20:36:10 | 35,935,966 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,144 | py | # This file is part of ranger, the console file manager.
# License: GNU GPL version 3, see the file "AUTHORS" for details.
# Author: Joseph Tannhuber <sepp.tannhuber@yahoo.de>, 2013
# Solarized like colorscheme, similar to solarized-dircolors
# from https://github.com/seebi/dircolors-solarized.
# This is a modification of Roman Zimbelmann's default colorscheme.
from ranger.gui.colorscheme import ColorScheme
from ranger.gui.color import *
class Solarized(ColorScheme):
progress_bar_color = 33
def use(self, context):
fg, bg, attr = default_colors
if context.reset:
return default_colors
elif context.in_browser:
fg = 244
if context.selected:
attr = reverse
else:
attr = normal
if context.empty or context.error:
fg = 235
bg = 160
if context.border:
fg = default
if context.media:
if context.image:
fg = 136
else:
fg = 166
if context.container:
fg = 61
if context.directory:
fg = 33
elif context.executable and not \
any((context.media, context.container,
context.fifo, context.socket)):
fg = 64
attr |= bold
if context.socket:
fg = 136
bg = 230
attr |= bold
if context.fifo:
fg = 136
bg = 230
attr |= bold
if context.device:
fg = 244
bg = 230
attr |= bold
if context.link:
fg = context.good and 37 or 160
attr |= bold
if context.bad:
bg = 235
if context.tag_marker and not context.selected:
attr |= bold
if fg in (red, magenta):
fg = white
else:
fg = red
if not context.selected and (context.cut or context.copied):
fg = 234
attr |= bold
if context.main_column:
if context.selected:
attr |= bold
if context.marked:
attr |= bold
bg = 237
if context.badinfo:
if attr & reverse:
bg = magenta
else:
fg = magenta
# if context.inactive_pane:
# fg = 241
elif context.in_titlebar:
attr |= bold
if context.hostname:
fg = context.bad and 16 or 255
if context.bad:
bg = 166
elif context.directory:
fg = 33
elif context.tab:
fg = context.good and 47 or 33
bg = 239
elif context.link:
fg = cyan
elif context.in_statusbar:
if context.permissions:
if context.good:
fg = 93
elif context.bad:
fg = 160
bg = 235
if context.marked:
attr |= bold | reverse
fg = 237
bg = 47
if context.message:
if context.bad:
attr |= bold
fg = 160
bg = 235
if context.loaded:
bg = self.progress_bar_color
if context.text:
if context.highlight:
attr |= reverse
if context.in_taskview:
if context.title:
fg = 93
if context.selected:
attr |= reverse
if context.loaded:
if context.selected:
fg = self.progress_bar_color
else:
bg = self.progress_bar_color
return fg, bg, attr
| [
"okubax@gmail.com"
] | okubax@gmail.com |
351258959249189c8f6ffc9c3aea21baf176bc4c | 0277e19a9d82e35c731aec2772d3c4f4ec977644 | /www/app.py | 89b7c5a9c5e548486ce75f85f8775ee05bec92c1 | [] | no_license | xingzhihe/python3-webapp | 8402d9e7d1491c19aa46ed2ff03455e21857222f | 5ef9627835a7f27059c057c32dd94093b2fa3af7 | refs/heads/master | 2021-06-25T00:05:29.443748 | 2019-07-24T02:01:59 | 2019-07-24T02:01:59 | 150,948,948 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,639 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'zhihe xing'
'''
async web application.
'''
import logging; logging.basicConfig(level=logging.INFO)
import asyncio, os, json, time
from datetime import datetime
from aiohttp import web
from jinja2 import Environment, FileSystemLoader
import com.phoenix.orm as orm
from com.phoenix.config import configs
from handlers import cookie2user, COOKIE_NAME
from coroweb import add_routes, add_static, get_modules
async def logger_factory(app, handler):
async def logger(request):
logging.info('Request: %s %s' % (request.method, request.path))
# await asyncio.sleep(0.3)
return (await handler(request))
return logger
async def auth_factory(app, handler):
async def auth(request):
logging.info('check user: %s %s' % (request.method, request.path))
request.__user__ = None
cookie_str = request.cookies.get(COOKIE_NAME)
if cookie_str:
user = await cookie2user(cookie_str)
if user:
logging.info('set current user: %s' % user.email)
request.__user__ = user
if request.path.startswith('/manage/') and (request.__user__ is None or not request.__user__.admin):
return web.HTTPFound('/signin')
return (await handler(request))
return auth
async def data_factory(app, handler):
async def parse_data(request):
if request.method == 'POST':
if request.content_type.startswith('application/json'):
request.__data__ = await request.json()
logging.info('request json: %s' % str(request.__data__))
elif request.content_type.startswith('application/x-www-form-urlencoded'):
request.__data__ = await request.post()
logging.info('request form: %s' % str(request.__data__))
return (await handler(request))
return parse_data
async def response_factory(app, handler):
async def response(request):
logging.info('Response handler...')
r = await handler(request)
if isinstance(r, web.StreamResponse):
return r
if isinstance(r, bytes):
resp = web.Response(body=r)
resp.content_type = 'application/octet-stream'
return resp
if isinstance(r, str):
if r.startswith('redirect:'):
return web.HTTPFound(r[9:])
resp = web.Response(body=r.encode('utf-8'))
resp.content_type = 'text/html;charset=utf-8'
return resp
if isinstance(r, dict):
template = r.get('__template__')
if template is None:
resp = web.Response(body=json.dumps(r, ensure_ascii=False, default=lambda o: o.__dict__).encode('utf-8'))
resp.content_type = 'application/json;charset=utf-8'
return resp
else:
r['__user__'] = request.__user__
resp = web.Response(body=app['__templating__'].get_template(template).render(**r).encode('utf-8'))
resp.content_type = 'text/html;charset=utf-8'
return resp
if isinstance(r, int) and r >= 100 and r < 600:
return web.Response(r)
if isinstance(r, tuple) and len(r) == 2:
t, m = r
if isinstance(t, int) and t >= 100 and t < 600:
return web.Response(t, str(m))
# default:
resp = web.Response(body=str(r).encode('utf-8'))
resp.content_type = 'text/plain;charset=utf-8'
return resp
return response
def init_jinja2(app, **kw):
logging.info('init jinja2...')
options = dict(
autoescape = kw.get('autoescape', True),
block_start_string = kw.get('block_start_string', '{%'),
block_end_string = kw.get('block_end_string', '%}'),
variable_start_string = kw.get('variable_start_string', '{{'),
variable_end_string = kw.get('variable_end_string', '}}'),
auto_reload = kw.get('auto_reload', True)
)
path = kw.get('path', None)
if path is None:
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')
logging.info('set jinja2 template path: %s' % path)
env = Environment(loader=FileSystemLoader(path), **options)
filters = kw.get('filters', None)
if filters is not None:
for name, f in filters.items():
env.filters[name] = f
app['__templating__'] = env
def datetime_filter(t):
delta = int(time.time() - t)
if delta < 60:
return u'1分钟前'
if delta < 3600:
return u'%s分钟前' % (delta // 60)
if delta < 86400:
return u'%s小时前' % (delta // 3600)
if delta < 604800:
return u'%s天前' % (delta // 86400)
dt = datetime.fromtimestamp(t)
return u'%s年%s月%s日' % (dt.year, dt.month, dt.day)
async def init(loop):
ds = configs['ds']
await orm.create_pool(loop=loop, host=ds['host'], port=3306, user=ds['user'], password=ds['password'], db=ds['db'])
app = web.Application(loop=loop, middlewares=[
logger_factory, auth_factory, response_factory
])
init_jinja2(app, filters=dict(datetime=datetime_filter))
add_static(app)
#add_routes(app, 'handlers')
for module in get_modules('controllers'):
logging.info(module)
add_routes(app, module)
srv = await loop.create_server(app.make_handler(), '127.0.0.1', 9000)
logging.info('server started at http://127.0.0.1:9000...')
return srv
loop = asyncio.get_event_loop()
loop.run_until_complete(init(loop))
loop.run_forever() | [
"xingzhihe@foresee.com.cn"
] | xingzhihe@foresee.com.cn |
305c9b3f41cbbbb26fb9defc09ab47c5ab0ce0d3 | 0e908f1a62d1143762c6928bf6b7a549a6e1e254 | /amstrong.py | 2c7c7465e38d2fbebf68e20bd8e7bb3b5b7fdc74 | [] | no_license | CastleOfCodes/Pythoncodes | c18e79a3378339366554f1bec77477119a97e2d1 | 0ff8cd482fd046477e2c2bc841e5d9b58b0c1348 | refs/heads/master | 2023-06-29T17:05:19.796200 | 2021-08-04T04:39:57 | 2021-08-04T04:39:57 | 392,550,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 147 | py | n=int(input())
temp=n
sum=0
while(n>0):
d=n%10
sum+=(d**3)
n=n//10
if(temp==sum):
print("Amstrong")
else:
print("Not amstrong") | [
"joyalt6@gmail.com"
] | joyalt6@gmail.com |
5edc2b831a243780efc54733a7aa4d7d4f44e259 | 50b9a05e54c3ea4247673e7d126109eda1a13243 | /SNA4Slack_API/SlackCrawler/tests/Test_DataPrep.py | b22692539f415d427228cd4bad4900db6934e9e4 | [] | no_license | aman-srivastava/SNA4Slack | 6f7a00708f693fac7f8bd51791f164c5c91a2ed2 | c0f735d83e0a1ffb769b1c00e168ddaa22b46374 | refs/heads/master | 2021-09-13T09:05:28.073800 | 2018-04-27T13:35:34 | 2018-04-27T13:35:34 | 104,130,254 | 9 | 1 | null | 2018-04-27T02:44:00 | 2017-09-19T21:23:52 | HTML | UTF-8 | Python | false | false | 2,134 | py | #!/bin/python
# -*- coding: utf-8 -*-
import json
import csv
import uuid
import sys
from time import sleep
from random import randint
from selenium import webdriver
from pyvirtualdisplay import Display
from objects.slack_archive import *
import datetime
from cassandra.auth import PlainTextAuthProvider
from cassandra.cluster import Cluster
from cassandra.cqlengine.management import sync_table
from cassandra.cqlengine.models import Model
from cassandra.cqlengine import columns, connection
from utils import Utils
class DataPrep():
def LoadTextData(self, csv_file):
Utils.get_Connection_SNA4Slack()
sync_table(SlackArchive)
msg_sender = ''
msg_time = ''
msg_body = ''
msg_sender_avatar = ''
with open(csv_file, 'rb') as csvfile:
fileReader = csv.reader(csvfile)
for row in fileReader:
if row:
if len(row) > 2:
channelName = row[2]
else:
channelName = 'TestData_' + \
csv_file.split('/')[-1].split('.')[0]
node_object = SlackArchive(id=uuid.uuid1(),
teamName=csv_file.split(
'/')[-1].split('.')[0],
channelName=channelName,
messageSender=str(row[0]),
senderAvatar='https://buffercommunity.slack.com/archives/-general/p1458841440001473',
messageBody=str(row[1]),
messageTime=datetime.datetime.strptime(
'Oct 25, 2017 05:41', "%b %d, %Y %I:%M"))
print str(node_object)
node_object.save()
print row
if __name__ == '__main__':
dataPrep = DataPrep()
dataPrep.LoadTextData(
'/home/shuchir/SER517/slack/SNA4Slack/SNA4Slack_API/NetworkX/resources/subscriptionTest.csv')
| [
"sinamda2@asu.edu"
] | sinamda2@asu.edu |
afc1960b9e604fdb66a3939bdb40fa1fd79f9cc7 | 090a4e026addc9e78ed6118f09fd0d7d4d517857 | /graph_objs/scattermapbox/_marker.py | 5bece0565289746dd7c67705ab0f857215310d98 | [
"MIT"
] | permissive | wwwidonja/new_plotly | 0777365e53ea7d4b661880f1aa7859de19ed9b9a | 1bda35a438539a97c84a3ab3952e95e8848467bd | refs/heads/master | 2023-06-04T19:09:18.993538 | 2021-06-10T18:33:28 | 2021-06-10T18:33:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 48,618 | py | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Marker(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scattermapbox"
_path_str = "scattermapbox.marker"
_valid_props = {
"allowoverlap",
"angle",
"anglesrc",
"autocolorscale",
"cauto",
"cmax",
"cmid",
"cmin",
"color",
"coloraxis",
"colorbar",
"colorscale",
"colorsrc",
"opacity",
"opacitysrc",
"reversescale",
"showscale",
"size",
"sizemin",
"sizemode",
"sizeref",
"sizesrc",
"symbol",
"symbolsrc",
}
# allowoverlap
# ------------
@property
def allowoverlap(self):
"""
Flag to draw all symbols, even if they overlap.
The 'allowoverlap' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["allowoverlap"]
@allowoverlap.setter
def allowoverlap(self, val):
self["allowoverlap"] = val
# angle
# -----
@property
def angle(self):
"""
Sets the marker orientation from true North, in degrees
clockwise. When using the "auto" default, no rotation would be
applied in perspective views which is different from using a
zero angle.
The 'angle' property is a number and may be specified as:
- An int or float
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["angle"]
@angle.setter
def angle(self, val):
self["angle"] = val
# anglesrc
# --------
@property
def anglesrc(self):
"""
Sets the source reference on Chart Studio Cloud for angle .
The 'anglesrc' property must be specified as a string or
as a new_plotly.grid_objs.Column object
Returns
-------
str
"""
return self["anglesrc"]
@anglesrc.setter
def anglesrc(self, val):
self["anglesrc"] = val
# autocolorscale
# --------------
@property
def autocolorscale(self):
"""
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in `marker.color`is
set to a numerical array. In case `colorscale` is unspecified
or `autocolorscale` is true, the default palette will be
chosen according to whether numbers in the `color` array are
all positive, all negative or mixed.
The 'autocolorscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autocolorscale"]
@autocolorscale.setter
def autocolorscale(self, val):
self["autocolorscale"] = val
# cauto
# -----
@property
def cauto(self):
"""
Determines whether or not the color domain is computed with
respect to the input data (here in `marker.color`) or the
bounds set in `marker.cmin` and `marker.cmax` Has an effect
only if in `marker.color`is set to a numerical array. Defaults
to `false` when `marker.cmin` and `marker.cmax` are set by the
user.
The 'cauto' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["cauto"]
@cauto.setter
def cauto(self, val):
self["cauto"] = val
# cmax
# ----
@property
def cmax(self):
"""
Sets the upper bound of the color domain. Has an effect only if
in `marker.color`is set to a numerical array. Value should have
the same units as in `marker.color` and if set, `marker.cmin`
must be set as well.
The 'cmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmax"]
@cmax.setter
def cmax(self, val):
self["cmax"] = val
# cmid
# ----
@property
def cmid(self):
"""
Sets the mid-point of the color domain by scaling `marker.cmin`
and/or `marker.cmax` to be equidistant to this point. Has an
effect only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`. Has no
effect when `marker.cauto` is `false`.
The 'cmid' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmid"]
@cmid.setter
def cmid(self, val):
self["cmid"] = val
# cmin
# ----
@property
def cmin(self):
"""
Sets the lower bound of the color domain. Has an effect only if
in `marker.color`is set to a numerical array. Value should have
the same units as in `marker.color` and if set, `marker.cmax`
must be set as well.
The 'cmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmin"]
@cmin.setter
def cmin(self, val):
self["cmin"] = val
# color
# -----
@property
def color(self):
"""
Sets themarkercolor. It accepts either a specific color or an
array of numbers that are mapped to the colorscale relative to
the max and min values of the array or relative to
`marker.cmin` and `marker.cmax` if set.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A number that will be interpreted as a color
according to scattermapbox.marker.colorscale
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# coloraxis
# ---------
@property
def coloraxis(self):
"""
Sets a reference to a shared color axis. References to these
shared color axes are "coloraxis", "coloraxis2", "coloraxis3",
etc. Settings for these shared color axes are set in the
layout, under `layout.coloraxis`, `layout.coloraxis2`, etc.
Note that multiple color scales can be linked to the same color
axis.
The 'coloraxis' property is an identifier of a particular
subplot, of type 'coloraxis', that may be specified as the string 'coloraxis'
optionally followed by an integer >= 1
(e.g. 'coloraxis', 'coloraxis1', 'coloraxis2', 'coloraxis3', etc.)
Returns
-------
str
"""
return self["coloraxis"]
@coloraxis.setter
def coloraxis(self, val):
self["coloraxis"] = val
# colorbar
# --------
@property
def colorbar(self):
"""
The 'colorbar' property is an instance of ColorBar
that may be specified as:
- An instance of :class:`new_plotly.graph_objs.scattermapbox.marker.ColorBar`
- A dict of string/value properties that will be passed
to the ColorBar constructor
Supported dict properties:
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing
this color bar.
dtick
Sets the step in-between ticks on this axis.
Use with `tick0`. Must be a positive number, or
special strings available to "log" and "date"
axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick
number. For example, to set a tick mark at 1,
10, 100, 1000, ... set dtick to 1. To set tick
marks at 1, 100, 10000, ... set dtick to 2. To
set tick marks at 1, 5, 25, 125, 625, 3125, ...
set dtick to log_10(5), or 0.69897000433. "log"
has several special values; "L<f>", where `f`
is a positive number, gives ticks linearly
spaced in value (but not position). For example
`tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10
plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is
ignored for "D1" and "D2". If the axis `type`
is "date", then you must convert the time to
milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to
86400000.0. "date" also has special values
"M<n>" gives ticks spaced by a number of
months. `n` must be a positive integer. To set
ticks on the 15th of every third month, set
`tick0` to "2000-01-15" and `dtick` to "M3". To
set ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick
exponents. For example, consider the number
1,000,000,000. If "none", it appears as
1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If
"SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure
excludes the padding of both ends. That is, the
color bar length is this length minus the
padding on both ends.
lenmode
Determines whether this color bar's length
(i.e. the measure in the color variation
direction) is set in units of plot "fraction"
or in *pixels. Use `len` to set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this
number. This only has an effect when
`tickformat` is "SI" or "B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks
will be chosen automatically to be less than or
equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of
the first tick is shown. If "last", only the
exponent of the last tick is shown. If "none",
no exponents appear.
showticklabels
Determines whether or not the tick labels are
drawn.
showtickprefix
If "all", all tick labels are displayed with a
prefix. If "first", only the first tick is
displayed with a prefix. If "last", only the
last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This
measure excludes the size of the padding, ticks
and labels.
thicknessmode
Determines whether this color bar's thickness
(i.e. the measure in the constant color
direction) is set in units of plot "fraction"
or in "pixels". Use `thickness` to set the
value.
tick0
Sets the placement of the first tick on this
axis. Use with `dtick`. If the axis `type` is
"log", then you must take the log of your
starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when
`dtick`=*L<f>* (see `dtick` for more info). If
the axis `type` is "date", it should be a date
string, like date data. If the axis `type` is
"category", it should be a number, using the
scale where each category is assigned a serial
number from zero in the order it appears.
tickangle
Sets the angle of the tick labels with respect
to the horizontal. For example, a `tickangle`
of -90 draws the tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
And for dates see:
https://github.com/d3/d3-time-
format#locale_format We add one item to d3's
date formatter: "%{n}f" for fractional seconds
with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f"
would display "09~15~23.46"
tickformatstops
A tuple of :class:`new_plotly.graph_objects.scatter
mapbox.marker.colorbar.Tickformatstop`
instances or dicts with compatible properties
tickformatstopdefaults
When used in a template (as layout.template.dat
a.scattermapbox.marker.colorbar.tickformatstopd
efaults), sets the default property values to
use for elements of
scattermapbox.marker.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of
the axis. The default value for inside tick
labels is *hide past domain*. In other cases
the default is *hide past div*.
ticklabelposition
Determines where tick labels are drawn.
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto",
the number of ticks is set via `nticks`. If
"linear", the placement of the ticks is
determined by a starting position `tick0` and a
tick step `dtick` ("linear" is the default
value if `tick0` and `dtick` are provided). If
"array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`.
("array" is the default value if `tickvals` is
provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If
"", this axis' ticks are not drawn. If
"outside" ("inside"), this axis' are drawn
outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position
via `tickvals`. Only has an effect if
`tickmode` is set to "array". Used with
`tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud
for ticktext .
tickvals
Sets the values at which ticks on this axis
appear. Only has an effect if `tickmode` is set
to "array". Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud
for tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`new_plotly.graph_objects.scattermapbox.mark
er.colorbar.Title` instance or dict with
compatible properties
titlefont
Deprecated: Please use
scattermapbox.marker.colorbar.title.font
instead. Sets this color bar's title font. Note
that the title's font used to be set by the now
deprecated `titlefont` attribute.
titleside
Deprecated: Please use
scattermapbox.marker.colorbar.title.side
instead. Determines the location of color bar's
title with respect to the color bar. Note that
the title's location used to be set by the now
deprecated `titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position
anchor. This anchor binds the `x` position to
the "left", "center" or "right" of the color
bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor
This anchor binds the `y` position to the
"top", "middle" or "bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
Returns
-------
new_plotly.graph_objs.scattermapbox.marker.ColorBar
"""
return self["colorbar"]
@colorbar.setter
def colorbar(self, val):
self["colorbar"] = val
# colorscale
# ----------
@property
def colorscale(self):
"""
Sets the colorscale. Has an effect only if in `marker.color`is
set to a numerical array. The colorscale must be an array
containing arrays mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At minimum, a mapping for
the lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`. To
control the bounds of the colorscale in color space,
use`marker.cmin` and `marker.cmax`. Alternatively, `colorscale`
may be a palette name string of the following list: Greys,YlGnB
u,Greens,YlOrRd,Bluered,RdBu,Reds,Blues,Picnic,Rainbow,Portland
,Jet,Hot,Blackbody,Earth,Electric,Viridis,Cividis.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of colors that will be spaced evenly to create the colorscale.
Many predefined colorscale lists are included in the sequential, diverging,
and cyclical modules in the new_plotly.colors package.
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance',
'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg',
'brwnyl', 'bugn', 'bupu', 'burg', 'burgyl', 'cividis', 'curl',
'darkmint', 'deep', 'delta', 'dense', 'earth', 'edge', 'electric',
'emrld', 'fall', 'geyser', 'gnbu', 'gray', 'greens', 'greys',
'haline', 'hot', 'hsv', 'ice', 'icefire', 'inferno', 'jet',
'magenta', 'magma', 'matter', 'mint', 'mrybm', 'mygbm', 'oranges',
'orrd', 'oryel', 'oxy', 'peach', 'phase', 'picnic', 'pinkyl',
'piyg', 'plasma', 'plotly3', 'portland', 'prgn', 'pubu', 'pubugn',
'puor', 'purd', 'purp', 'purples', 'purpor', 'rainbow', 'rdbu',
'rdgy', 'rdpu', 'rdylbu', 'rdylgn', 'redor', 'reds', 'solar',
'spectral', 'speed', 'sunset', 'sunsetdark', 'teal', 'tealgrn',
'tealrose', 'tempo', 'temps', 'thermal', 'tropic', 'turbid',
'turbo', 'twilight', 'viridis', 'ylgn', 'ylgnbu', 'ylorbr',
'ylorrd'].
Appending '_r' to a named colorscale reverses it.
Returns
-------
str
"""
return self["colorscale"]
@colorscale.setter
def colorscale(self, val):
self["colorscale"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for color .
The 'colorsrc' property must be specified as a string or
as a new_plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the marker opacity.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
# opacitysrc
# ----------
@property
def opacitysrc(self):
"""
Sets the source reference on Chart Studio Cloud for opacity .
The 'opacitysrc' property must be specified as a string or
as a new_plotly.grid_objs.Column object
Returns
-------
str
"""
return self["opacitysrc"]
@opacitysrc.setter
def opacitysrc(self, val):
self["opacitysrc"] = val
# reversescale
# ------------
@property
def reversescale(self):
"""
Reverses the color mapping if true. Has an effect only if in
`marker.color`is set to a numerical array. If true,
`marker.cmin` will correspond to the last color in the array
and `marker.cmax` will correspond to the first color.
The 'reversescale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["reversescale"]
@reversescale.setter
def reversescale(self, val):
self["reversescale"] = val
# showscale
# ---------
@property
def showscale(self):
"""
Determines whether or not a colorbar is displayed for this
trace. Has an effect only if in `marker.color`is set to a
numerical array.
The 'showscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showscale"]
@showscale.setter
def showscale(self, val):
self["showscale"] = val
# size
# ----
@property
def size(self):
"""
Sets the marker size (in px).
The 'size' property is a number and may be specified as:
- An int or float in the interval [0, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizemin
# -------
@property
def sizemin(self):
"""
Has an effect only if `marker.size` is set to a numerical
array. Sets the minimum size (in px) of the rendered marker
points.
The 'sizemin' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["sizemin"]
@sizemin.setter
def sizemin(self, val):
self["sizemin"] = val
# sizemode
# --------
@property
def sizemode(self):
"""
Has an effect only if `marker.size` is set to a numerical
array. Sets the rule for which the data in `size` is converted
to pixels.
The 'sizemode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['diameter', 'area']
Returns
-------
Any
"""
return self["sizemode"]
@sizemode.setter
def sizemode(self, val):
self["sizemode"] = val
# sizeref
# -------
@property
def sizeref(self):
"""
Has an effect only if `marker.size` is set to a numerical
array. Sets the scale factor used to determine the rendered
size of marker points. Use with `sizemin` and `sizemode`.
The 'sizeref' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["sizeref"]
@sizeref.setter
def sizeref(self, val):
self["sizeref"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for size .
The 'sizesrc' property must be specified as a string or
as a new_plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# symbol
# ------
@property
def symbol(self):
"""
Sets the marker symbol. Full list: https://www.mapbox.com/maki-
icons/ Note that the array `marker.color` and `marker.size` are
only available for "circle" symbols.
The 'symbol' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["symbol"]
@symbol.setter
def symbol(self, val):
self["symbol"] = val
# symbolsrc
# ---------
@property
def symbolsrc(self):
"""
Sets the source reference on Chart Studio Cloud for symbol .
The 'symbolsrc' property must be specified as a string or
as a new_plotly.grid_objs.Column object
Returns
-------
str
"""
return self["symbolsrc"]
@symbolsrc.setter
def symbolsrc(self, val):
self["symbolsrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
allowoverlap
Flag to draw all symbols, even if they overlap.
angle
Sets the marker orientation from true North, in degrees
clockwise. When using the "auto" default, no rotation
would be applied in perspective views which is
different from using a zero angle.
anglesrc
Sets the source reference on Chart Studio Cloud for
angle .
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in
`marker.color`is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in `marker.color`)
or the bounds set in `marker.cmin` and `marker.cmax`
Has an effect only if in `marker.color`is set to a
numerical array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.cmin` and/or `marker.cmax` to be equidistant to
this point. Has an effect only if in `marker.color`is
set to a numerical array. Value should have the same
units as in `marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmax` must be set as well.
color
Sets themarkercolor. It accepts either a specific color
or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.cmin` and `marker.cmax` if
set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`new_plotly.graph_objects.scattermapbox.marker.Color
Bar` instance or dict with compatible properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color`is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use`marker.cmin` and `marker.cmax`. Alternatively,
`colorscale` may be a palette name string of the
following list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu
,Reds,Blues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,E
arth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
opacity
Sets the marker opacity.
opacitysrc
Sets the source reference on Chart Studio Cloud for
opacity .
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.color`is set to a numerical array. If
true, `marker.cmin` will correspond to the last color
in the array and `marker.cmax` will correspond to the
first color.
showscale
Determines whether or not a colorbar is displayed for
this trace. Has an effect only if in `marker.color`is
set to a numerical array.
size
Sets the marker size (in px).
sizemin
Has an effect only if `marker.size` is set to a
numerical array. Sets the minimum size (in px) of the
rendered marker points.
sizemode
Has an effect only if `marker.size` is set to a
numerical array. Sets the rule for which the data in
`size` is converted to pixels.
sizeref
Has an effect only if `marker.size` is set to a
numerical array. Sets the scale factor used to
determine the rendered size of marker points. Use with
`sizemin` and `sizemode`.
sizesrc
Sets the source reference on Chart Studio Cloud for
size .
symbol
Sets the marker symbol. Full list:
https://www.mapbox.com/maki-icons/ Note that the array
`marker.color` and `marker.size` are only available for
"circle" symbols.
symbolsrc
Sets the source reference on Chart Studio Cloud for
symbol .
"""
def __init__(
self,
arg=None,
allowoverlap=None,
angle=None,
anglesrc=None,
autocolorscale=None,
cauto=None,
cmax=None,
cmid=None,
cmin=None,
color=None,
coloraxis=None,
colorbar=None,
colorscale=None,
colorsrc=None,
opacity=None,
opacitysrc=None,
reversescale=None,
showscale=None,
size=None,
sizemin=None,
sizemode=None,
sizeref=None,
sizesrc=None,
symbol=None,
symbolsrc=None,
**kwargs
):
"""
Construct a new Marker object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`new_plotly.graph_objs.scattermapbox.Marker`
allowoverlap
Flag to draw all symbols, even if they overlap.
angle
Sets the marker orientation from true North, in degrees
clockwise. When using the "auto" default, no rotation
would be applied in perspective views which is
different from using a zero angle.
anglesrc
Sets the source reference on Chart Studio Cloud for
angle .
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in
`marker.color`is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in `marker.color`)
or the bounds set in `marker.cmin` and `marker.cmax`
Has an effect only if in `marker.color`is set to a
numerical array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.cmin` and/or `marker.cmax` to be equidistant to
this point. Has an effect only if in `marker.color`is
set to a numerical array. Value should have the same
units as in `marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmax` must be set as well.
color
Sets themarkercolor. It accepts either a specific color
or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.cmin` and `marker.cmax` if
set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`new_plotly.graph_objects.scattermapbox.marker.Color
Bar` instance or dict with compatible properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color`is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use`marker.cmin` and `marker.cmax`. Alternatively,
`colorscale` may be a palette name string of the
following list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu
,Reds,Blues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,E
arth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
opacity
Sets the marker opacity.
opacitysrc
Sets the source reference on Chart Studio Cloud for
opacity .
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.color`is set to a numerical array. If
true, `marker.cmin` will correspond to the last color
in the array and `marker.cmax` will correspond to the
first color.
showscale
Determines whether or not a colorbar is displayed for
this trace. Has an effect only if in `marker.color`is
set to a numerical array.
size
Sets the marker size (in px).
sizemin
Has an effect only if `marker.size` is set to a
numerical array. Sets the minimum size (in px) of the
rendered marker points.
sizemode
Has an effect only if `marker.size` is set to a
numerical array. Sets the rule for which the data in
`size` is converted to pixels.
sizeref
Has an effect only if `marker.size` is set to a
numerical array. Sets the scale factor used to
determine the rendered size of marker points. Use with
`sizemin` and `sizemode`.
sizesrc
Sets the source reference on Chart Studio Cloud for
size .
symbol
Sets the marker symbol. Full list:
https://www.mapbox.com/maki-icons/ Note that the array
`marker.color` and `marker.size` are only available for
"circle" symbols.
symbolsrc
Sets the source reference on Chart Studio Cloud for
symbol .
Returns
-------
Marker
"""
super(Marker, self).__init__("marker")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the new_plotly.graph_objs.scattermapbox.Marker
constructor must be a dict or
an instance of :class:`new_plotly.graph_objs.scattermapbox.Marker`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("allowoverlap", None)
_v = allowoverlap if allowoverlap is not None else _v
if _v is not None:
self["allowoverlap"] = _v
_v = arg.pop("angle", None)
_v = angle if angle is not None else _v
if _v is not None:
self["angle"] = _v
_v = arg.pop("anglesrc", None)
_v = anglesrc if anglesrc is not None else _v
if _v is not None:
self["anglesrc"] = _v
_v = arg.pop("autocolorscale", None)
_v = autocolorscale if autocolorscale is not None else _v
if _v is not None:
self["autocolorscale"] = _v
_v = arg.pop("cauto", None)
_v = cauto if cauto is not None else _v
if _v is not None:
self["cauto"] = _v
_v = arg.pop("cmax", None)
_v = cmax if cmax is not None else _v
if _v is not None:
self["cmax"] = _v
_v = arg.pop("cmid", None)
_v = cmid if cmid is not None else _v
if _v is not None:
self["cmid"] = _v
_v = arg.pop("cmin", None)
_v = cmin if cmin is not None else _v
if _v is not None:
self["cmin"] = _v
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("coloraxis", None)
_v = coloraxis if coloraxis is not None else _v
if _v is not None:
self["coloraxis"] = _v
_v = arg.pop("colorbar", None)
_v = colorbar if colorbar is not None else _v
if _v is not None:
self["colorbar"] = _v
_v = arg.pop("colorscale", None)
_v = colorscale if colorscale is not None else _v
if _v is not None:
self["colorscale"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("opacity", None)
_v = opacity if opacity is not None else _v
if _v is not None:
self["opacity"] = _v
_v = arg.pop("opacitysrc", None)
_v = opacitysrc if opacitysrc is not None else _v
if _v is not None:
self["opacitysrc"] = _v
_v = arg.pop("reversescale", None)
_v = reversescale if reversescale is not None else _v
if _v is not None:
self["reversescale"] = _v
_v = arg.pop("showscale", None)
_v = showscale if showscale is not None else _v
if _v is not None:
self["showscale"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("sizemin", None)
_v = sizemin if sizemin is not None else _v
if _v is not None:
self["sizemin"] = _v
_v = arg.pop("sizemode", None)
_v = sizemode if sizemode is not None else _v
if _v is not None:
self["sizemode"] = _v
_v = arg.pop("sizeref", None)
_v = sizeref if sizeref is not None else _v
if _v is not None:
self["sizeref"] = _v
_v = arg.pop("sizesrc", None)
_v = sizesrc if sizesrc is not None else _v
if _v is not None:
self["sizesrc"] = _v
_v = arg.pop("symbol", None)
_v = symbol if symbol is not None else _v
if _v is not None:
self["symbol"] = _v
_v = arg.pop("symbolsrc", None)
_v = symbolsrc if symbolsrc is not None else _v
if _v is not None:
self["symbolsrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| [
"wwwidonja@gmail.com"
] | wwwidonja@gmail.com |
fe382577a093500adc301a74f49535d7edc1e416 | d472c845d34583f34b16918706e3ee9f19a0c818 | /config.production.py | ede0b218f528a882d3619de9bf02e18f762123b5 | [] | no_license | alien9/bigrs | 3e4217a8fe50b96b0f9d3c516af47344de69ee5e | 0f6c70f21c94cc2590a21686ea7b949051ece453 | refs/heads/master | 2022-12-10T02:20:14.022406 | 2019-10-31T14:32:51 | 2019-10-31T14:32:51 | 218,782,597 | 0 | 0 | null | 2022-12-08T00:02:51 | 2019-10-31T14:16:46 | TSQL | UTF-8 | Python | false | false | 138 | py | cstring="dbname='bigrs' user='bigrs' host='localhost' port='5432' password='bigrs'"
geoserver="http://bigrs.alien9.net:8080"
DISPLAY=":99" | [
"barufi@gmail.com"
] | barufi@gmail.com |
ae96763c2dfcfe2d0b4604a12b62c68f102cd078 | 3d32dad4f5476d369d4f5510291ec55fbe8700b1 | /lastprog/venv/bin/futurize | 15081e228226475d87f0bc586fd2591277ccc237 | [] | no_license | Mancancode/Python | 59e0af3b33e4d0453f56686e4814638d0f123020 | 10b3a79f8db403dcc535517b2bd8bc4bbf12263c | refs/heads/master | 2020-03-20T15:23:08.119951 | 2018-06-16T02:31:24 | 2018-06-16T02:31:24 | 137,511,590 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 424 | #!/home/manmiliki/PycharmProjects/lastprog/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'future==0.16.0','console_scripts','futurize'
__requires__ = 'future==0.16.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('future==0.16.0', 'console_scripts', 'futurize')()
)
| [
"achonwaechris@outlook.com"
] | achonwaechris@outlook.com | |
62de0d4b13ffe8a54f556b37db6ba423e609c33e | 9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb | /sdk/netapp/azure-mgmt-netapp/generated_samples/snapshots_get.py | 09de9918ef91a8f9e53d95e483bca18b08f524f3 | [
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] | permissive | openapi-env-test/azure-sdk-for-python | b334a2b65eeabcf9b7673879a621abb9be43b0f6 | f61090e96094cfd4f43650be1a53425736bd8985 | refs/heads/main | 2023-08-30T14:22:14.300080 | 2023-06-08T02:53:04 | 2023-06-08T02:53:04 | 222,384,897 | 1 | 0 | MIT | 2023-09-08T08:38:48 | 2019-11-18T07:09:24 | Python | UTF-8 | Python | false | false | 1,614 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.netapp import NetAppManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-netapp
# USAGE
python snapshots_get.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = NetAppManagementClient(
credential=DefaultAzureCredential(),
subscription_id="D633CC2E-722B-4AE1-B636-BBD9E4C60ED9",
)
response = client.snapshots.get(
resource_group_name="myRG",
account_name="account1",
pool_name="pool1",
volume_name="volume1",
snapshot_name="snapshot1",
)
print(response)
# x-ms-original-file: specification/netapp/resource-manager/Microsoft.NetApp/stable/2022-09-01/examples/Snapshots_Get.json
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | noreply@github.com |
32b0e7a217eb7c08f989d9cf4ed742c04c128e45 | 3f93949f24e27476916696d50c75d4f93d49644b | /bin/cadastre-housenumber/bin/check_osm_id_ref_insee_csv.py | 6eb0c114c376c7e954dcba4748f3123fa7516172 | [] | no_license | bagage/export-cadastre | c038a83104051029c04ee2ee1ebd04249041203f | dd919c6474062aca5594972d6954c44a67625f49 | refs/heads/master | 2020-12-02T22:25:55.952425 | 2019-12-27T21:15:45 | 2019-12-27T21:15:45 | 96,131,733 | 0 | 0 | null | 2017-07-03T16:43:32 | 2017-07-03T16:43:32 | null | UTF-8 | Python | false | false | 1,403 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This script is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# It is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with it. If not, see <http://www.gnu.org/licenses/>.
"""
Vérifie que toutes les villes sont présente
dans le fichier associatedStreet/osm_id_ref_insee.csv
"""
import os
import sys
import os.path
from glob import glob
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from cadastre_fr.website import code_insee
insee_set = set()
for line in open("associatedStreet/osm_id_ref_insee.csv"):
insee=line.strip().split(",")[1]
insee_set.add(insee)
for f in glob("/data/work/cadastre.openstreetmap.fr/data/*/*.txt"):
for line in open(f):
items = line.split()
dep,com = items[:2]
name = " ".join(items[2:])
insee = code_insee(dep,com)
if not insee in insee_set:
print "ERREUR: id area manquant pour le code insee %s (%s)" % (insee, name)
| [
"tyndare@wanadoo.fr"
] | tyndare@wanadoo.fr |
77873623690a54262fa767c5faf2a14eb148e99c | d710731763e0445a2551b11983f6e05ef7c92ba1 | /wxpython/chp9/9.10.py | 03117d361322ae85bcfb6dc1e25dfd0bfe47350a | [] | no_license | 642237240/python | 666b4cdd476fc9d8620f1b2414aa9f2e1bdf88f7 | 097e91a7a855822df696fac8d558b9a247490fbc | refs/heads/master | 2022-10-11T06:31:30.674093 | 2020-05-28T13:26:48 | 2020-05-28T13:26:48 | 262,321,438 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 275 | py | import wx
import wx.lib.imagebrowser as imagebrowser
if __name__ == '__main__':
app = wx.App()
dialog = imagebrowser.ImageDialog(None)
ret = dialog.ShowModal()
if ret == wx.ID_OK:
print('You Selected File:' + dialog.GetFile())
dialog.Destroy()
| [
"642237240@qq.com"
] | 642237240@qq.com |
999b382c9ad57d0ddfec93969cb49d317a2255d2 | a1a86ccffff5f1a8fdab92f58fe46cd66f9cc0e2 | /docrec/ocr__/recognition.py | 63a91f818d84a4929e0fb5014006eafe4fed7c67 | [] | no_license | ZhengHui-Z/deeprec-sib18 | 8ec2c3b5b2fb6bfc6879f28a28c56776a7aa4faa | 036171c33bc2f90645d8b9794aa0850c34a3ad05 | refs/heads/master | 2020-06-19T20:09:57.953934 | 2019-06-24T14:03:00 | 2019-06-24T14:03:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 815 | py | import numpy as np
from pytesseract import image_to_string
from PIL import Image
from ..text.textprocessing import text2words
# http://www.nltk.org/howto/portuguese_en.html
# http://stanford.edu/~rjweiss/public_html/IRiSS2013/text2/notebooks/cleaningtext.html
# Run OCR
def image2text(image, language='en_US'):
lang = {'en_US': 'eng', 'pt_BR': 'por'}[language]
text = image_to_string(
Image.fromarray(image.astype(np.uint8)), lang=lang
).encode('utf-8', 'ignore')
return text
def image2words(image, language='en_US', min_length=3):
return text2words(
image2text(image, language=language),
min_length=min_length
)
def number_of_words(image, language='en_US', min_length=3):
return len(image2words(image, language=language, min_length=min_length))
| [
"paixao@gmail.com"
] | paixao@gmail.com |
657fe89cd2b81c2f222c3e4b5ec744eb2230ebac | 0f40272f221acd09932e5e4b7f6287300526c902 | /Programmers/Python/Code/줄 서는 방법.py | c1ff2fd5f910d0a0f0592e1a715b923f87a01fb8 | [] | no_license | wansang93/Algorithm | 60cfa0d5a3cda7e41096cb22537a35c4789fc9e8 | 65425d1bf8e49cc3a732680c0c1030a2dc0333ca | refs/heads/master | 2023-08-05T04:35:44.128549 | 2023-07-29T17:57:00 | 2023-07-29T17:57:00 | 225,189,092 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 308 | py | import math
def solution(n, k):
answer = []
nums = [i for i in range(1, n+1)]
k -= 1
while nums:
index = k // math.factorial(n-1)
answer.append(nums.pop(index))
k %= math.factorial(n-1)
n -= 1
return answer
data1 = 3, 5
print(solution(*data1))
| [
"wansang93@naver.com"
] | wansang93@naver.com |
e9d41c59809ceb3af1a341138be9851fd0de4169 | 7e8a3efee959cdf7306aba0e4613091da7131a2d | /src/marof/sensor/Sensor.py | 60ec3450429e4136c2e7b7f84b98f00cc494585d | [] | no_license | tderensis/marof | 94cfc7f939688e89fe4a7c54ebf20e8840886cc7 | 15b481a2c6caa8186a04e3286c535148d7c4b24b | refs/heads/master | 2020-05-18T05:41:51.464514 | 2000-01-01T05:58:05 | 2000-01-01T05:58:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 973 | py | import abc
from marof import MarofModule
class Sensor(MarofModule):
""" A sensor module. Has an optional filter. """
__metaclass__ = abc.ABCMeta
def __init__(self, name, updateInterval, filt):
""" Initialize sensor. """
super(Sensor, self).__init__(name, updateInterval)
self._filter = filt
self._filterOutput = None
@property
def filter(self):
return self._filter
@property
def filterOutput(self):
""" The output of the filter. """
return self._filterOutput
@abc.abstractproperty
def filterInput(self):
""" The input to the filter. """
return
@abc.abstractmethod
def sensorStep(self):
""" Where the sensor does all of its work. """
return
def step(self):
self.sensorStep()
if self._filter is not None:
self._filterOutput = self._filter.step(self.filterInput)
| [
"tderensis@gmail.com"
] | tderensis@gmail.com |
9bb4fcaa0d9de4ed146ebbd8468fa1931beaa63a | b039da79b60a0ba0ff54db5a0c1773f38da709f9 | /sql.py | bd6a7a41d51ff75d3855550c3ee191e75ca1c93e | [] | no_license | test998998/Sqlmapapi-scan-getsqli-in-txt | 7aabbd8cc52c79b3052c5e8dd14f03e552227ab8 | 7662d0fefa131d815c967bfbcb99e290470bc989 | refs/heads/master | 2021-07-16T13:16:09.511873 | 2017-10-23T10:30:19 | 2017-10-23T10:30:19 | 107,966,982 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,307 | py | # coding:utf-8
import os
import requests
import json
import threading
from time import sleep
file = open("url.txt")
def sql(url) :
try:
r = requests.get("http://127.0.0.1:8775/task/new")
taskid= r.json()['taskid']
r = requests.post('http://127.0.0.1:8775/scan/'+taskid+'/start', data=json.dumps({'url': url}), headers={'content-type': 'application/json'})
sleep(5)
r = requests.get('http://127.0.0.1:8775/scan/'+taskid+'/status')
running_status = r.json()['status']
while running_status == "running":
if running_status == "running":
sleep(5)
r = requests.get('http://127.0.0.1:8775/scan/'+taskid+'/status')
running_status = r.json()['status']
elif running_status == "terminated":
break
r = requests.get('http://127.0.0.1:8775/scan/'+taskid+'/data')
requests.get('http://127.0.0.1:8775/scan/' + taskid + '/stop')
requests.get('http://127.0.0.1:8775/scan/'+taskid+'/delete')
if r.json()['data']:
print " [√]: " + url
else:
print " [x]: " + url
except requests.ConnectionError:
print '无法连接到SQLMAPAPI服务,请在SQLMAP根目录下运行python sqlmapapi.py -s 来启动'
for line in file:
threads = []
url = line.strip()
threads.append(threading.Thread(target=sql,args=(url,)))
for t in threads:
t.setDaemon(True)
t.start()
t.join() | [
"test998998@icloud.com"
] | test998998@icloud.com |
dbc10d0194e60cd16d96aa341c4b3291121a9723 | ae2e0845f8bc7581c163f94796435bebb36ffa03 | /Proyecto World/saludos.py | c0b4fcc59dcaa044921dd467fdd1d321a93fab74 | [] | no_license | vicssan/CSD | 1325bee8ecf24197713554ba214ba89679bfb0da | cbba2ea5a64a95de7dec7bfd3fba4a181edbb407 | refs/heads/master | 2023-02-07T22:34:54.945515 | 2023-01-26T14:53:06 | 2023-01-26T14:53:06 | 282,976,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | py | print "Saludo en aleman"
import aleman
print "Saludo en castellano"
import castellano
print "Saludo en frances"
import frances
print "Saludo en ingles"
import ingles
print "Saludo en italiano"
import italiano
print "Saludo en finlandes"
import finlandes | [
"victor.sanchez.sanchez@gmail.com"
] | victor.sanchez.sanchez@gmail.com |
f7f7c4ae920feb66bc7be4c30f81b07b1cf34892 | c0f7cfdd437f929ac57388888feb5407be9ec79f | /dit_flow/dit_widget/chk_statistics.py | 88eac4d0bccf891c867d197e6e35120bf3d812f1 | [
"MIT"
] | permissive | KyleManley/DIT | 7f2691c0995ab0d6df0e68120a757fc6a4e090d0 | e85338f84854ca8424d1ad7c69098c6f288bf441 | refs/heads/master | 2021-01-25T10:28:33.693176 | 2017-12-11T20:10:41 | 2017-12-11T20:10:41 | 123,354,368 | 0 | 0 | null | 2018-02-28T23:19:29 | 2018-02-28T23:19:29 | null | UTF-8 | Python | false | false | 2,792 | py | #!/usr/bin/python
"""Calculates statistics for each input column."""
import argparse as ap
import csv
import statistics
from dit_flow.dit_widget.common.logger_message import logger_message, DEFAULT_LOG_LEVEL
def chk_statistics(missing_value, input_data_file=None, output_data_file=None, log_file=None, log_level=DEFAULT_LOG_LEVEL):
# Calculates statistics for each input column in input_data_file.
logger = logger_message(__name__, log_file, log_level)
assert input_data_file is not None, 'An input CSV file with columns of values.'
with open(input_data_file, newline='') as _in:
logger.info('Count distinct values')
reader = csv.reader(_in)
original_values = []
# transfer input values to local array
record = 0
for i, line in enumerate(reader):
record = record + 1
original_values.append([])
column = 0
for j, item in enumerate(line):
column = column+1
original_values[i].append(item)
logger.info('\tTotal number ={}'.format(column))
# extract valid values each column and calculate statistics
logger.info('{:>10}{:>10}{:>10}{:>10}{:>10}{:>10}{:>10}'
.format('Col', 'nrec', 'Mean', 'Stdev', 'Median', 'Min', 'Max'))
for i in range(column):
Column_valid = []
count = 0
for j, line in enumerate(original_values):
if float(line[i]) != float(missing_value):
count = count + 1
Column_valid.append(float(line[i]))
mean = statistics.mean(Column_valid)
stdev = statistics.stdev(Column_valid)
median = statistics.median(Column_valid)
minimum = min(Column_valid)
maximum = max(Column_valid)
logger.info('{:>10.0f}{:>10.0f}{:>10.3f}{:>10.3f}{:>10.3f}{:>10.3f}{:>10.3f}'
.format(i+1, count, mean, stdev, median, minimum, maximum))
def parse_arguments():
parser = ap.ArgumentParser(description="Counts number of distinct values in a col A \
then corresponing distinct values in col B in input_data_file.")
parser.add_argument('missing_value', type=float, help='Missing data value in file.')
parser.add_argument('-i', '--input_data_file',
help='Step file containing input data to manipulate.')
parser.add_argument('-o', '--output_data_file', help='unused')
parser.add_argument('-l', '--log_file', help='Step file to collect log information.')
return parser.parse_args()
if __name__ == '__main__':
args = parse_arguments()
chk_statistics(args.missing_value,
args.input_data_file, args.output_data_file, args.log_file)
| [
"hwilcox@vmslickmonoski.apps.int.nsidc.org"
] | hwilcox@vmslickmonoski.apps.int.nsidc.org |
6b209ae9bd25f4d7717fb6ec8adb50225f77ee8e | c81e339be809bd903cb9f64bc0bbe762e2d03664 | /BOJ/Gold IV/BOJ9935.py | bec629f11d8173997c09d79dabbf58011b660214 | [] | no_license | ccc96360/Algorithm | 1012ef74b17c1204460f640e415cd11868586601 | b1346d9a285dac31723bb903ca504dd373e77797 | refs/heads/master | 2023-09-02T15:49:42.769437 | 2021-11-07T06:59:22 | 2021-11-07T06:59:22 | 325,761,400 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 796 | py | #BOJ9935 문자열 폭발 20210514
import sys
from collections import deque
input = sys.stdin.readline
def main():
q = deque(input().rstrip())
bomb = input().rstrip()
bombSize = len(bomb)
res = []
last = deque()
while q:
v = q.popleft()
res.append(v)
last.append(v)
if len(last) > bombSize:
last.popleft()
#print("현재 문자: {0}, 저장된 문자{1}, 마지막 문자{2}개 {3}".format(v, res, bombSize, last))
if "".join(last) == bomb:
for _ in range(bombSize):
res.pop()
last = deque()
for i in res[-bombSize:]:
last.append(i)
if res:
print("".join(res))
else:
print("FRULA")
if __name__ == '__main__':
main() | [
"ccc96360@naver.com"
] | ccc96360@naver.com |
42e1fd898441182a034fd968ee5d9e69efa7a13e | 101e31d207c2a406522fb9f1c08524bd4b9f09e2 | /test.py | fd1add63796a6ba736f1ef9acaf805f16b3ddc9c | [] | no_license | amrs12145/ir | 84095a2582dedddd61d9af5ec876a563c0dcdc91 | 407e73db75b9e73792eb65545310424f8af5f2dd | refs/heads/master | 2023-05-08T22:58:33.550160 | 2021-06-04T22:38:29 | 2021-06-04T22:38:29 | 370,883,064 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 576 | py |
from flask import Flask,redirect,url_for,request
app = Flask(
__name__
)
@app.route('/home')
def fun1():
return 'home/test'
@app.route('/test')
def fun2():
return 'test'
@app.route('/home/h/test/<name>')
def fun3(name):
return 'home/h/test =>' + name
@app.route('/',methods=['POST','GET'])
def local():
if request.method=='POST':
user = request.form['btn']
return redirect(url_for('fun3',name='amr'))
else :
user= request.args.get('btn2')
return 'GET request ' + user
if __name__ == '__main__':
app.run() | [
"amrs12145@gmail.com"
] | amrs12145@gmail.com |
a99693ef3da2e1e820f3243aa54fb397ec17d653 | bd51543af3f5a3a615728900e54c5c0e679f6f37 | /src/mr.roboto/src/mr/roboto/tests/__init__.py | a8e1407a62afb2bc0704facda0b97daa1b5aac6b | [] | no_license | plone/mr.roboto | 0dbe51b9b4eff5c18d9db76dc32c5cb4272a8cf6 | 083525c369a2a821101a4fa2c402db39d436565a | refs/heads/master | 2023-08-14T15:15:35.832184 | 2023-06-04T07:04:32 | 2023-06-04T07:04:32 | 8,120,484 | 0 | 3 | null | 2023-09-06T18:25:25 | 2013-02-10T08:19:11 | Python | UTF-8 | Python | false | false | 1,640 | py | def default_settings(github=None, parsed=True, override_settings=None):
plone = ["5.2", "6.0"]
python = {
"5.2": ["2.7", "3.6"],
"6.0": ["3.8", "3.9"],
}
github_users = ["mister-roboto", "jenkins-plone-org"]
if not parsed:
plone = str(plone)
python = str(python)
github_users = str(github_users)
data = {
"plone_versions": plone,
"py_versions": python,
"roboto_url": "http://jenkins.plone.org/roboto",
"api_key": "1234567890",
"sources_file": "sources.pickle",
"checkouts_file": "checkouts.pickle",
"github_token": "secret",
"jenkins_user_id": "jenkins-plone-org",
"jenkins_user_token": "some-random-token",
"jenkins_url": "https://jenkins.plone.org",
"collective_repos": "",
"github": github,
"github_users": github_users,
"debug": "True",
}
if override_settings:
data.update(override_settings)
return data
def minimal_main(override_settings=None, scan_path=""):
from github import Github
from pyramid.config import Configurator
settings = default_settings(override_settings=override_settings)
config = Configurator(settings=settings)
config.include("cornice")
for key, value in settings.items():
config.registry.settings[key] = value
config.registry.settings["github_users"] = (
settings["jenkins_user_id"],
"mister-roboto",
)
config.registry.settings["github"] = Github(settings["github_token"])
config.scan(scan_path)
config.end()
return config.make_wsgi_app()
| [
"gil.gnome@gmail.com"
] | gil.gnome@gmail.com |
7c07abfe45a78368fccc1684dd15011fba059c07 | 56ca0c81e6f8f984737f57c43ad8d44a84f0e6cf | /src/bpp/migrations/0293_pbn_api_kasowanie_przed_nie_eksp_zero.py | e14f1181b74a62c95a3a32e731aa17b60a5f7220 | [
"MIT",
"CC0-1.0"
] | permissive | iplweb/bpp | c40f64c78c0da9f21c1bd5cf35d56274a491f840 | a3d36a8d76733a479e6b580ba6ea57034574e14a | refs/heads/dev | 2023-08-09T22:10:49.509079 | 2023-07-25T04:55:54 | 2023-07-25T04:55:54 | 87,017,024 | 2 | 0 | NOASSERTION | 2023-03-04T04:02:36 | 2017-04-02T21:22:20 | Python | UTF-8 | Python | false | false | 784 | py | # Generated by Django 3.0.14 on 2021-09-15 19:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("bpp", "0292_przypinanie_dyscyplin"),
]
operations = [
migrations.AddField(
model_name="uczelnia",
name="pbn_api_kasuj_przed_wysylka",
field=models.BooleanField(
default=False,
verbose_name="Kasuj oświadczenia rekordu przed wysłaniem do PBN",
),
),
migrations.AddField(
model_name="uczelnia",
name="pbn_api_nie_wysylaj_prac_bez_pk",
field=models.BooleanField(
default=False, verbose_name="Nie wysyłaj do PBN prac z PK=0"
),
),
]
| [
"michal.dtz@gmail.com"
] | michal.dtz@gmail.com |
985639d4736b7727eb55662717ebdd7f942603b3 | 841a24b5db1fa2f0bb8ec23fc7914ca68b155d40 | /primo3.py | 037377a354f41abe8ca477cc89d5586b8ae84ef6 | [] | no_license | GabrielJar/Python | 6dbabf389abb063fa9929ccb9f5a0c3aaaa5e499 | 1e3e444ed6c02eeae078c8c9f09db98bec13b3d8 | refs/heads/master | 2021-07-11T01:57:06.063162 | 2017-10-11T21:50:49 | 2017-10-11T21:50:49 | 104,657,044 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | def is_prime(x):
if x < 2:
return False
else:
if x == 2:
return True
else:
n = 2
primo = True
while n < (x - 1) and primo == True:
if x % n != 0:
n = n + 1
else:
n = n + 1
primo = False
return primo
entrada = int(input("Digite um número: "))
if is_prime(entrada):
print(str(entrada) + " é primo!")
else:
print(str(entrada) + " não é primo!")
| [
"gracco@gmail.com"
] | gracco@gmail.com |
5224eadbda0dcfdd3c5803de8a172e94230526fd | 06a64628eb3486ed9587db2299cde4d6239be81f | /fitters.py | 754adc4f7f5f25340240f36fc09f07db7a38ba06 | [] | no_license | yandaikang/ROACH | 5ac996ad61c89d9419e0a2ca0c2ea379bcad4645 | 71ff47cf3e4f0f5be65966c720798c6d6fa38de4 | refs/heads/master | 2020-05-23T08:02:34.819303 | 2016-10-05T03:09:06 | 2016-10-05T03:09:06 | 70,026,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 70,970 | py | import os
import scipy
import scipy.linalg
import mpfit
import struct
from numpy import *
import time,fractions, math,inspect,random
import threading
import numpy
import h5py
print "Loading fitters.py"
########################################################################
#
#
#
#
#######################################################################
class fitters:
def __init__(self):
#hdf file to write
self.hdffile=None
#hdffile to read
self.hdffile_r=None
self.reslist=[]
self.device_name='NULL'
self.resonator=resonatorData(0,self.device_name)
#plot when doing fitting for status...0 mean no plots
self.fit_plots=1
self.fit_prints=1
def fitprint(self,stx):
if self.fit_prints==1:
print stx
def setResonator(self,res):
self.resonator=res
def setResIndex(self,ii):
self.resonator=self.reslist[ii]
def addRes(self,res):
self.reslist.append(res)
def clearResList(self):
self.reslist=[]
def listResonators(self):
for rr in self.reslist:
rr.info()
def plotIQNoiseCircle(self,noise_tr_indx):
resdata = self.resonator
tsr=resdata.iqnoise[noise_tr_indx]
tsr_tr=fit.trans_rot3(resdata, tsr)
figure(15);clf()
plot(resdata.trot_xf,resdata.trot_yf,'x')
plot(resdata.iqdata[0],resdata.iqdata[1],'x')
plot(tsr[0],tsr[1],'x')
plot(tsr_tr[0],tsr_tr[1],'x')
def addMkidList(self):
for mkid in MKID_list:
self.addResList(mkid.reslist)
def plotResonators(self):
#8 per plot, 4x4 plots
#amp amp amp amp
#ph ph ph ph
#amp amp amp amp
#ph ph ph ph
fignum=13;
figure(fignum)
fignum=fignum+1;
clf()
#count rows
plotrow=0
prows=2
for rr in self.reslist:
try:
rr.info()
if (rr.lorentz_fr==0):
#fits not run
IQ=rr.iqdata
freqs=rr.freqs
IQp=rr.RectToPolar(IQ)
pcols=2
subplot(prows,pcols,plotrow*pcols+1);
plot(freqs,IQp[0])
ylabel('Magnitude')
txt_y=(max(IQp[0]) + min(IQp[0]))/2.0;
txt_x=rr.rough_cent_freq;
text(txt_x,txt_y,"%4f"%(rr.rough_cent_freq/1e6))
subplot(prows,pcols,plotrow*pcols+2);
plot(freqs,rr.removeTwoPi(IQp[1]))
ylabel('Phase')
else:
mags=rr.phig_mag2s21
freqs=rr.freqs
params=rr.lorentz_params
mag2s21fit = lorentzFunc(freqs,params);
phase=rr.phig_phase
frindx=min(where(rr.lorentz_fr<=rr.freqs)[0])
pcols=3
subplot(prows,pcols,plotrow*pcols+1,polar=False);
plot(freqs,mag2s21fit,'r')
plot(freqs,mags,'x')
#plot(self.resonator.lorentz_fr,lorentzFunc(self.resonator.lorentz_fr,params),'^')
plot(freqs[frindx],mags[frindx],'o')
ylabel('Magnitude')
txt_y=(max(mag2s21fit) + min(mag2s21fit))/2.0;
txt_x=rr.rough_cent_freq;
text(txt_x,txt_y,"%4f"%(rr.lorentz_fr/1e6))
subplot(prows,pcols,plotrow*pcols+2,polar=False);
plot(freqs,phase,'x')
ylabel('Phase')
plot(freqs,phaseFunc(freqs,rr.phig_phase_guesses[0],rr.phig_phase_guesses[1],rr.phig_phase_guesses[2] ,rr.phig_phase_guesses[3]),'g')
plot(freqs,phaseFunc(freqs,rr.ph_Qf,rr.ph_fr,rr.ph_theta,rr.ph_sgn ),'r')
subplot(prows,pcols,plotrow*pcols+3,polar=False);
#pp=self.RectToPolar([rr.trot_xf,rr.trot_yf ])
#polar(pp[1],pp[0],'x')
#polar(pp[1][frindx],pp[0][frindx],'o')
plot(rr.trot_xf,rr.trot_yf,'x' )
plot(rr.trot_xf[frindx],rr.trot_yf[frindx],'o')
for noise_trace in rr.iqnoise:
tsr=noise_trace
tsr_tr=fit.trans_rot3(rr, tsr)
ts_tr = self.RectToPolar(tsr_tr)
#polar(ts_tr[1],ts_tr[0],'rx')
plot(tsr_tr[0],tsr_tr[1],'.')
plotrow=plotrow+1
if (plotrow==prows):
plotrow=0;
figure(fignum)
clf()
fignum=fignum+1
except:
print "problem w/ plotting resonator"
def addResList(self,rl):
for r in rl:
self.reslist.append(r)
def extractResonators(self,res,nsd):
self.setResonator(res)
a=self.findResPhase(nsd);
if a[0]!=None:
res.info()
indices=a[0]
flist=a[1]
dfreq=res.freqs[1]-res.freqs[0]
#span of res in Hz, 1/2 span actually
hlfspan=4e5
#num indices for 1/2 span
ihspan=ceil(hlfspan/dfreq)
reslist=[]
for ii in indices:
ist=int(max(ii-ihspan,0))
ied=int(min(1+ii+ihspan,res.datalen))
newres=resonatorData(ii,self.device_name);
newres.setData([res.iqdata[0][ist:ied],res.iqdata[1][ist:ied]],res.freqs[ist:ied],res.delayraw,res.carrierfreq)
reslist.append(newres)
return(reslist)
#trim the freq span of data, and add to reslist
def trimAddResonator(self,res,freqindex):
self.setResonator(res)
res.info()
indices=[freqindex]
flist=[res.freqs[freqindex]]
dfreq=res.freqs[1]-res.freqs[0]
#span of res in Hz, 1/2 span actually
hlfspan=4e5
#num indices for 1/2 span
ihspan=ceil(hlfspan/dfreq)
ii=indices[0]
ist=int(max(ii-ihspan,0))
ied=int(min(1+ii+ihspan,res.datalen))
newres=resonatorData(ii,self.device_name);
newres.setData([res.iqdata[0][ist:ied],res.iqdata[1][ist:ied]],res.freqs[ist:ied],res.delayraw,res.carrierfreq)
self.addRes(newres)
return(newres)
#do median filter on I and Q to take out impulse noise
def medianFilter(self):
for res in self.reslist:
self.setResonator(res)
x = self.resonator.iqdata[0]
y = self.resonator.iqdata[1]
self.resonator.iqdata[0] = scipy.signal.medfilt(x,5)
self.resonator.iqdata[1] = scipy.signal.medfilt(y,5)
#do median filter on I and Q to take out impulse noise
def medianFilter2(self,x,y):
xf = scipy.signal.medfilt(x,5)
yf = scipy.signal.medfilt(y,5)
return( (xf,yf) )
#do median filter on I and Q to take out impulse noise
def lowPassFilter2(self,x,y):
xf = scipy.signal.lfilter([0.5,0.5],[1],x)
yf = scipy.signal.lfilter([0.5,0.5],[1],y)
xf[0] = xf[1]
yf[0] = yf[1]
return( (xf,yf) )
#do median filter on I and Q to take out impulse noise
def lowPassFilter(self):
for res in self.reslist:
self.setResonator(res)
x = self.resonator.iqdata[0]
y = self.resonator.iqdata[1]
self.resonator.iqdata[0] = scipy.signal.lfilter([0.5,0.5],[1],x)
self.resonator.iqdata[1] = scipy.signal.lfilter([0.5,0.5],[1],y)
self.resonator.iqdata[0][0] = self.resonator.iqdata[0][1]
self.resonator.iqdata[1][0] = self.resonator.iqdata[1][1]
def IQvelocityCalc(self):
for res in self.reslist:
self.setResonator(res)
x = self.resonator.iqdata[0]
y = self.resonator.iqdata[1]
(x,y)=self.medianFilter2(x,y)
(x,y)=self.lowPassFilter2(x,y)
maxIQvel = 0
maxIQIndex=0
self.resonator.maxIQVel_z=[]
for i in range(0,len(x)-2): ### correct range?
z = sqrt((x[i+1]-x[i])**2 + (y[i+1]-y[i])**2)
self.resonator.maxIQVel_z.append(z)
if z > maxIQvel:
maxIQvel = z
maxIQIndex=i
self.resonator.maxIQvel = maxIQvel
self.resonator.maxIQvel_freq=self.resonator.freqs[maxIQIndex]
self.resonator.maxIQVel_gz=numpy.gradient(numpy.array(self.resonator.maxIQVel_z)).tolist()
s=numpy.sort(self.resonator.maxIQVel_z)[::-1]
self.resonator.maxIQvel_ratio=s[0]/(s[1]+1e-12)
def clearFitsFlag(self):
for res in self.reslist:
res.is_ran_fits=0
def fitResonators(self):
self.fitprint("HELLO")
for res in self.reslist:
#fit the res if not a noise trace
if res.is_ran_fits==0:
self.setResonator(res)
res.is_fit_error=0
try:
#if 1==1:
if self.fit_plots:
figure(11)
clf()
subplot(2,1,1)
plot(self.resonator.iqdata[0],self.resonator.iqdata[1])
subplot(2,1,2)
plot(self.resonator.freqs,self.resonator.iqdata[0])
plot(self.resonator.freqs,self.resonator.iqdata[1])
legend("I","Q")
#tim madden- if the time delay is bad, phase has a lean to it.
#we correct the lean...
#self.addLineToPhase()
self.NinoInitialGuess()
self.fitprint("self.NinoInitialGuess() done")
self.NinoFitPhase()
self.fitprint("self.NinoFitPhase() done")
self.NinoLorentzGuess()
self.fitprint("self.NinoLorentzGuess()")
self.NinoFitLorentz()
self.fitprint("self.NinoFitLorentz() done")
self.lorentzEndCalcs()
self.fitprint("self.lorentzEndCalcs()done")
self.CecilSkewcircleGuess()
self.fitprint("self.CecilskewcircleGuess() done")
self.CecilfitSkewcircle()
self.fitprint("self.CecilfitSkewcircle() done")
self.SkewcircleEndCalcs()
self.fitprint("self.SkewcircleEndCalcs() done")
if self.fit_plots:
self.lorentzPlots()
self.SkewcirclePlots()
res.is_ran_fits=1
except:
#else:
self.fitprint("Problem fitting Resonator")
res.is_fit_error=1
def cirFitTransRotResonators(self):
self.fitprint("HELLO")
for res in self.reslist:
self.setResonator(res)
if 1==1:
self.fit_circle2(); #fit a circle to data
self.trans_rot2(); #move coordinate system to center of circle
else:
print "problem w/ resonaot"
def saveResonators(self,fname):
fp=self.resonator.openHDF(fname)
ii=1
for res in self.reslist:
res.writeHDF(fp,'res%d'%(ii))
ii=ii+1
self.resonator.closeHDF(fp)
def loadResonators(self,fname):
fp=self.resonator.openHDFR(fname)
ii=1
self.reslist=[]
for k in fp.keys():
if k[0:7]=='ResData':
res=resonatorData(int(random.random()*1000000),self.device_name)
res.readHDF(fp,k[8:])
self.addRes(res)
self.resonator.closeHDF(fp)
#
# correct for bad xmission line delay meas. add line to the phase to change its slope to flat.
#
def addLineToPhase(self):
print 'fit.addLineToPhase'
iqp=self.resonator.RectToPolar(self.resonator.iqdata)
phase=iqp[1];
phase = self.removeTwoPi(phase)
lx=len(phase)
slope=(phase[lx-1] - phase[0])/lx
newline=arange(0,lx)*slope
phase = phase-newline;
iqp[1]=phase
self.resonator.iqdata=self.resonator.PolarToRect(iqp)
#rf band freq of noise data
fv=self.resonator.fftcarrierfreq[0] - self.resonator.srcfreq[0]
#find the offset of newline at that freq.
dfreq=self.resonator.freqs[1] - self.resonator.freqs[0]
freq0=self.resonator.freqs[0]
#noise freqoffset
npoints=(fv-freq0)/dfreq;
#noise phase change
nphase=npoints*slope
ntr = int(self.resonator.num_noise_traces)
for k in range(ntr):
iqn=self.resonator.iqnoise[k]
iqnp=self.resonator.RectToPolar(iqn)
phase=iqnp[1]-nphase
iqnp[1] = phase;
self.resonator.iqnoise[k]=self.resonator.PolarToRect(iqnp)
#store to resonator...
self.resonator.newline=newline
self.resonator.newline_slope=slope
self.resonator.noise_linephase=nphase
#self.fitprint(na.findResPhase(na.iqdata,0,3500e6)
#self.fitprint(na.findResAmp(na.iqdata,0,3500e6)
def findResAmp(self,thresh):
iqp=self.resonator.RectToPolar(self.resonator.iqdata)
freqs=self.resonator.freqs
#take 2nd dirivitive and take over thresh.
# 2nd diriv is the "acceleration" or curvature of the amp versus freq curve
# the max of the 2nd deriv will be at centers of resonance.
iqpd2=diff(diff(iqp[0]))
if self.fit_plots:
figure(3);clf();plot(iqpd2)
if thresh==0.0:
baseline=median(iqpd2)
thresh=2*std(iqpd2) + baseline
#add 1 because diff() takes the 1st point away from array
indices=1+where(iqpd2>thresh)[0]
return([indices, freqs[indices] , freqs[indices]])
#assume ascending order numbers. group into
#gropups for numbers less then dstx apart
def toGroups(self,data,dstx):
allgroups=[]
group=[]
lastitem=data[0]
group.append(lastitem)
for k in range(1,len(data)):
if (data[k]-lastitem) <dstx:
group.append(data[k])
else:
allgroups.append(array(group))
group=[]
group.append(data[k])
lastitem=data[k]
allgroups.append(array(group))
return(allgroups)
def removeTwoPi(self,phases):
offset=0;
for k in range(len(phases)-1):
dphs=phases[k+1]-phases[k]
if abs(dphs)>3.1416:
offset= (-1.0 * sign(dphs) * 2*3.141592653589793)
for k2 in range(k,len(phases)-1):
phases[k2+1]=phases[k2+1] + offset
return(phases)
def findResPhase(self,nsd=2.0):
thresh=0.0
iq=self.resonator.iqdata
iqp=self.RectToPolar(iq)
freqs=self.resonator.freqs
#take out 2pi jumps in phase generated by atan function.
phases=iqp[1];
#unwrap the phaes- remove the 2pi jumps
phases=self.removeTwoPi(phases)
#take dirivitive and take over thresh.
#iqpd1=diff(iqp[1])
iqpd1 = sqrt(( diff(iq[0]) )**2 + (diff(iq[1]))**2)
iqpd2=numpy.copy(iqpd1)
baseline=median(iqpd1)
#because the 1st bin has a spike for some reason., set to median.
iqpd2[0]=baseline
iqpd1[0]=baseline
#set 300 largest values to median. so the resonator does not contribute
#to the std. this allows searching data w/ no resonator. so only noise contrib
#to std and tresh, and not res itself.
for kk in range(300):
ii=numpy.argmax(iqpd2);
iqpd2[ii]=baseline
#calc thresh on the version of data w/ max'es removed.
thresh=nsd*std(iqpd2) + baseline
if self.fit_plots:
figure(4);clf();
subplot(3,1,1)
plot(iqpd1);
subplot(3,1,2)
plot(iqp[0])
subplot(3,1,3)
plot(phases,'g')
#threshold line
tline=thresh*ones(len(phases))
subplot(3,1,1)
plot(tline,'y')
#add 1 because diff() takes the 1st point away from array
_indices=1+where(iqpd1>thresh)[0]
if (len(_indices)>0):
allgroups=self.toGroups(_indices,20)
self.fitprint("Number of Resonances Found: %d"%(len(allgroups)))
indices=[]
for group in allgroups:
idx=int(round(median(group)))
indices.append(idx)
if self.fit_plots:
subplot(3,1,1)
plot(idx,iqpd1[idx-1],'rx')
subplot(3,1,3)
plot(idx,phases[idx],'rx')
subplot(3,1,2)
plot(idx,iqp[0][idx],'rx')
self.resonator.ig_numresfreq=len(indices)
self.resonator.ig_indices=indices
self.resonator.ig_bump=iqpd1
self.resonator.ig_resfreqlist=freqs[indices]
return([indices, freqs[indices] ])
else:
return([None,None])
def RectToPolar(self,data):
mags = numpy.sqrt(data[0]*data[0] + data[1]*data[1])
phase=numpy.arctan2(data[1],data[0])
return([mags,phase])
def PolarToRect(self,data):
mags=data[0]
phase=data[1]
re=mags*numpy.cos(phase)
im=mags*numpy.sin(phase)
return([re,im])
def report(self):
contents= inspect.getmembers(self)
for c in contents:
self.fitprint(c)
def report2(self):
contents= inspect.getmembers(self)
return(contents)
def getTimestamp(self):
timestamp = "T".join( str( datetime.datetime.now() ).split() )
return(timestamp)
####################################################
#Calculate center and radius of a circle given x,y
# Uses circle fitting routine from Gao dissertation
#From publication Chernov and Lesort, Journal of Mathematical Imaging and
#Vision 23: 239-252, 2005. Springer Science
# Updated: 01-09-2012 - alterted to work with 'Resonator' IQ data structure
#The eigenvalue problem is to determine the nontrivial solutions of the
#equation Ax = ?xwhere A is an n-by-n matrix, x is a length n column vector,
#and ? is a scalar. The n values of ? that satisfy the equation are
#the eigenvalues, and the corresponding values of x are the right
#eigenvectors. The MATLAB function eig solves for the eigenvalues ?,
#and optionally the eigenvectors x. The generalized eigenvalue problem
#is to determine the nontrivial solutions of the equation Ax = ?Bx
#where both A and B are n-by-n matrices and ? is a scalar. The values
#of ? that satisfy the equation are the generalized eigenvalues and
#the corresponding values of x are the generalized right eigenvectors.
#If B is nonsingular, the problem could be solved by reducing it to a
# standard eigenvalue problem B?1Ax = ?x
#Because B can be singular, an alternative algorithm, called the QZ method,
#is necessary.
#
#############################################
def fit_circle2(self):
self.resonator.applyDelay()
x = self.resonator.iqdata_dly[0]
y = self.resonator.iqdata_dly[1];
n = len(x);
w =(x**2+y**2);
M=zeros([4,4])
#create moment matrix
M[0,0] = sum(w*w);
M[1,0] = sum(x*w);
M[2,0] = sum(y*w);
M[3,0] = sum(w);
M[0,1] = sum(x*w);
M[1,1] = sum(x*x);
M[2,1] = sum(x*y);
M[3,1] = sum(x);
M[0,2] = sum(y*w);
M[1,2] = sum(x*y);
M[2,2] = sum(y*y);
M[3,2] = sum(y);
M[0,3] = sum(w);
M[1,3] = sum(x);
M[2,3] = sum(y);
M[3,3] = n;
#constraint matrix
B = array([[0,0,0,-2],[0,1,0,0],[0,0,1,0],[-2,0,0,0]])
#Calculate eigenvalues and functions
#[V,D] = eig(M,B); %calculate eigens
VX=scipy.linalg.eig(M, B)
X=VX[0]
V=VX[1]
#X = linalg.diag(D); %creates column array of eigenvalues
#X=diag(D)
#[C,IX] = sort(X); %sorts iegen values into Y, places index in IX
C=sort(X,0)
IX=argsort(X,0)
#Values = V[:,IX[2]]); % we want eigenfunction of first positive eigenvalue (IX(2)) becuase IX(1) is neg
Values = V[:,IX[1]]
#% Column vector Values is then [A,B,C,D] from Gao dissertaion
xc = -Values[1]/(2*Values[0]);
yc = -Values[2]/(2*Values[0]);
R = (xc**2+yc**2-Values[3]/Values[0])**0.5;
#store to res structure
self.resonator.cir_xc=xc
self.resonator.cir_yc=yc
self.resonator.cir_R=R
return([xc,yc,R])
####################################################
#Calculate center and radius of a circle given x,y
# Uses circle fitting routine from Gao dissertation
#From publication Chernov and Lesort, Journal of Mathematical Imaging and
#Vision 23: 239-252, 2005. Springer Science
# Updated: 01-09-2012 - alterted to work with 'Resonator' IQ data structure
#The eigenvalue problem is to determine the nontrivial solutions of the
#equation Ax = ?xwhere A is an n-by-n matrix, x is a length n column vector,
#and ? is a scalar. The n values of ? that satisfy the equation are
#the eigenvalues, and the corresponding values of x are the right
#eigenvectors. The MATLAB function eig solves for the eigenvalues ?,
#and optionally the eigenvectors x. The generalized eigenvalue problem
#is to determine the nontrivial solutions of the equation Ax = ?Bx
#where both A and B are n-by-n matrices and ? is a scalar. The values
#of ? that satisfy the equation are the generalized eigenvalues and
#the corresponding values of x are the generalized right eigenvectors.
#If B is nonsingular, the problem could be solved by reducing it to a
# standard eigenvalue problem B?1Ax = ?x
#Because B can be singular, an alternative algorithm, called the QZ method,
#is necessary.
#
#supply resonator objhect. give center freq we think whre res is, then fit only a span
#this is for plots w/ curles at end of the res, and for data w/ wide span.
#############################################
def fit_circle3(self,resdata,fc_rf_Hz,span_Hz):
self.resonator= resdata
#get index in freqs where fc is in Hz, rf freq.
fc_i = int(len(self.resonator.freqs)/2)
for i in range(len(self.resonator.freqs)):
if self.resonator.freqs[i] < fc_rf_Hz:
fc_i = i
d_i = (span_Hz/2.0)/self.resonator.incrFreq_Hz
st=fc_i - d_i
ed = fc_i + d_i
if st<0: st=0
if ed>len(self.resonator.freqs): ed = len(self.resonator.freqs)
self.resonator.applyDelay()
x = self.resonator.iqdata_dly[0][st:ed]
y = self.resonator.iqdata_dly[1][st:ed]
n = len(x);
w =(x**2+y**2);
M=zeros([4,4])
#create moment matrix
M[0,0] = sum(w*w);
M[1,0] = sum(x*w);
M[2,0] = sum(y*w);
M[3,0] = sum(w);
M[0,1] = sum(x*w);
M[1,1] = sum(x*x);
M[2,1] = sum(x*y);
M[3,1] = sum(x);
M[0,2] = sum(y*w);
M[1,2] = sum(x*y);
M[2,2] = sum(y*y);
M[3,2] = sum(y);
M[0,3] = sum(w);
M[1,3] = sum(x);
M[2,3] = sum(y);
M[3,3] = n;
#constraint matrix
B = array([[0,0,0,-2],[0,1,0,0],[0,0,1,0],[-2,0,0,0]])
#Calculate eigenvalues and functions
#[V,D] = eig(M,B); %calculate eigens
VX=scipy.linalg.eig(M, B)
X=VX[0]
V=VX[1]
#X = linalg.diag(D); %creates column array of eigenvalues
#X=diag(D)
#[C,IX] = sort(X); %sorts iegen values into Y, places index in IX
C=sort(X,0)
IX=argsort(X,0)
#Values = V[:,IX[2]]); % we want eigenfunction of first positive eigenvalue (IX(2)) becuase IX(1) is neg
Values = V[:,IX[1]]
#% Column vector Values is then [A,B,C,D] from Gao dissertaion
xc = -Values[1]/(2*Values[0]);
yc = -Values[2]/(2*Values[0]);
R = (xc**2+yc**2-Values[3]/Values[0])**0.5;
#store to res structure
self.resonator.cir_xc=xc
self.resonator.cir_yc=yc
self.resonator.cir_R=R
return([xc,yc,R])
# function width = fwhm(x,y)
#
# Full-Width at Half-Maximum (FWHM) of the waveform y(x)
# and its polarity.
# The FWHM result in 'width' will be in units of 'x'
#
#
# Rev 1.2, April 2006 (Patrick Egan)
# Remove portion about if not pulse and only one edge (Nino)
def fwhm(self,x,y):
y = y / max(y);
N = len(y);
# lev50 = 0.5;
lev50 = 1-abs(max(y)-min(y))/2.0;
# find index of center (max or min) of pulse
if y[0] < lev50:
garbage=max(y);
centerindex = where(y==garbage)[0][0]
Pol = +1;
else:
garbage=min(y);
centerindex = where(y==garbage)[0][0]
Pol = -1;
i = 1;
while sign(y[i]-lev50) == sign(y[i-1]-lev50):
i = i+1;
interp = (lev50-y[i-1]) / (y[i]-y[i-1]);
tlead = x[i-1] + interp*(x[i]-x[i-1]);
#start search for next crossing at center
i = centerindex+1;
while ((sign(y[i]-lev50) == sign(y[i-1]-lev50)) & (i <= N-1)):
i = i+1;
interp = (lev50-y[i-1]) / (y[i]-y[i-1]);
ttrail = x[i-1] + interp*(x[i]-x[i-1]);
width = ttrail - tlead;
return(width)
#[xf,yf] Rotates and translates circle to origin
# Step 3 in Gao fitting procedure
# Takes intial x,y circle data and center and radius from fit_circle.m
# Updated 01-09-2012: changed to work with 'Resonator' IQ data structure
# and function 'fit_circle2' to generat 'Circle' structure.
def trans_rot2(self):
xc=self.resonator.cir_xc
yc=self.resonator.cir_yc
r=self.resonator.cir_R
#Import data
x = self.resonator.iqdata_dly[0];
y = self.resonator.iqdata_dly[1];
#correct data
alpha = arctan2(yc,xc);
xf = (xc-x)*cos(alpha) + (yc-y)*sin(alpha);
yf = -(xc-x)*sin(alpha) + (yc-y)*cos(alpha);
#find S21 and Fcenter
mag2s21 = xf**2+yf**2;
#This is the data format to work with for fitting |s21|^2 in dB
mag2s21dB = 10*log10(mag2s21/max(mag2s21));
c= min(mag2s21);
cidx= argmin(mag2s21);
self.resonator.trot_S21=mag2s21dB
self.resonator.trot_xf=xf
self.resonator.trot_yf=yf
self.resonator.trot_Fcenter=self.resonator.freqs[cidx]
return([xf,yf])
def trans_rot3(self,resdata, iq):
xc=resdata.cir_xc
yc=resdata.cir_yc
r=resdata.cir_R
#Import data
x = iq[0];
y = iq[1];
#correct data
alpha = arctan2(yc,xc);
xf = (xc-x)*cos(alpha) + (yc-y)*sin(alpha);
yf = -(xc-x)*sin(alpha) + (yc-y)*cos(alpha);
return([xf,yf])
#Ninos code converted to py
# Using FitAllLMFnlsq (no Toolbox needed!) ... Perform the following Fits to IQ resonator data
# 1.) Phase fit on centered IQ data
# 2.) Skewed Lorentz
#
# Notes:
# 1.) The second optional argument is a filename to save the output
# to a PDF file.
#
# 2.) Data should already be DC subtracted and cable delay applied
# already by other functions (e.g., DCbias_subtract() and IQ_cable_delay()
#
# 3.) Each fitting does an initial fit some reasonable gueses, then
# we randomly vary the initial fitted parameters some reasonable
# amount and re-run the fit to see if the ssq improves. This is add
# some robustness to the fit since it can sometimes get stuck in a
# local minimum
#
# 2/15/2012 Much code derived from Tom's Lorentz_fitter6 Nino
#
def NinoInitialGuess(self):
NUM_GUESSES_PHASE = 5000;
NUM_GUESSES_LORENTZ = 1000;
iq=self.resonator.iqdata
#array of offset baseband freqs for each I Q sample
freqs=self.resonator.freqs
j=complex(0,1)
#I = iq[0][::-1]
#Q = iq[1][::-1]
I = iq[0]
Q = iq[1]
z = I +j*Q;
mag2s21 = I**2+Q**2;
mag2s21dB = 10*log10(mag2s21/max(mag2s21));
mindex = where(mag2s21==min(mag2s21))[0][0];
Fcenter = freqs[mindex];
S21 = mag2s21dB;
self.fitprint('PHASE FITTING!!!!!!')
#### Phase Angle Fit
#### ---- First fit circle and then translate and rotate to center.
#ise self.resonator.iqdata
circle = self.fit_circle2(); #fit a circle to data
#use self.resonator.iqdata, prev. circle fit stored in self,resonator
IQcentered = self.trans_rot2(); #move coordinate system to center of circle
z_centered = IQcentered[0] + j*IQcentered[1];
phase = self.removeTwoPi(self.RectToPolar(IQcentered)[1]);
# Using fwhm function in MKID\Matlab_code\Borrowed Code (from MathWorks
# Exchange
# figure(1000);
# plot(Resonator.freq, mag2s21,'r--');
Qguess = Fcenter/self.fwhm(freqs, mag2s21);
self.fitprint('Qguess for phase fit: %f\n'%(Qguess))
#tim added this: guess sign.
sgn=1.0
if phase[0] > phase[len(phase)-1]:
sgn=-1.0
phase_guesses = [Qguess, Fcenter, median(phase),sgn];
phzfit=phaseFunc(freqs,Qguess,Fcenter,median(phase),sgn )
if self.fit_plots:
figure(50);
clf();
plot(freqs,phase,'x')
plot(freqs,phzfit,'g')
self.resonator.phig_phase_guesses=phase_guesses
self.resonator.phig_phase=phase
self.resonator.phig_IQcentered=IQcentered
self.resonator.phig_mag2s21=mag2s21
self.resonator.phig_mag2s21dB=mag2s21dB
return([phase_guesses, IQcentered,phase,freqs])
def NinoFitPhase(self):
###########################################################################
### PHASE FITTING!!!!!!
# Fitting function: phase(x) = theta0 - 2*atan(2*Q* (1-x/fr))
# param(1) = Q
# param(2) = fr
# param(3) = theta0
# Reference: Gao's thesis Equation E.11 (Also: Petersan, P. J. and Anlage, S.
# M. 1998, J. Appl. Phys., 84, 3392 250)
#!! changed sign...
phase_guesses = self.resonator.phig_phase_guesses;
phase=self.resonator.phig_phase
freqs=self.resonator.freqs
#parinfo = [{'value':0., 'fixed':0, 'limited':[1,1], 'limits':[0.,0.]}]*10
parinfo=[ {'value':0., 'fixed':0, 'limited':[1,1], 'limits':[0.,0.], 'parname':'NULL'} for i in range(4) ]
# Q = p[0] ; Q
# f0 = p[1] ; resonance frequency
# phasecenter = p[2] ; amplitude of leakage
#Q
parinfo[0]['parname']='Q factor'
parinfo[0]['value'] = phase_guesses[0]
parinfo[0]['limits'] = [100,1e6]
#f0
parinfo[1]['parname']='f0, Res freq'
parinfo[1]['value'] = phase_guesses[1]
parinfo[1]['limits'] = [ min(freqs),max(freqs)]
parinfo[2]['parname']='phase median'
parinfo[2]['value'] = phase_guesses[2]
parinfo[2]['limits'] = [-20.0,20.0]
parinfo[3]['parname']='phase sign'
parinfo[3]['value'] = phase_guesses[3]
parinfo[3]['limits'] = [-1.0,1.0]
parinfo[3]['fixed'] = 1
weights = ones(len(freqs))
fa = {'x':freqs, 'y':phase, 'err':weights}
m = mpfit.mpfit(residPhase,functkw=fa,parinfo=parinfo,quiet=1)
#now wqe run fitter many times w/ random guesses.
q_guess = abs(m.params[0]);
f_guess = m.params[1];
# Frequency parameter was out of the range for some reason; Set back to the
# Fcenter
if (m.params[1] < min(self.resonator.freqs)) or (m.params[1] > max(self.resonator.freqs)):
m.params[1] = self.resonator.trot_Fcenter;
chisq = m.fnorm
iter_phase=m.niter
phase_func_params=m.params
# Randomly change the fit parameters and re-run the fitter....
for ii in range(int(self.resonator.NUM_GUESSES_PHASE)):
q_guess = abs(q_guess + 2*q_guess*(random.random()-0.5));
freq_guess = f_guess + (max(self.resonator.freqs) - min(self.resonator.freqs))*(random.random()-0.5);
if freq_guess > max(self.resonator.freqs) or freq_guess < min(self.resonator.freqs):
freq_guess = self.resonator.trot_Fcenter;
phase_guess = phase_func_params[2] + 2*phase_func_params[2]*(random.random()-0.5);
if random.random()>0.5:
sgn=1.0
else:
sgn=-1.0
phase_guesses = [q_guess, freq_guess, phase_guess, sgn];
parinfo[0]['value'] = phase_guesses[0]
parinfo[1]['value'] = phase_guesses[1]
parinfo[2]['value'] = phase_guesses[2]
parinfo[3]['value'] = phase_guesses[3]
mtry = mpfit.mpfit(residPhase,functkw=fa,parinfo=parinfo,quiet=1)
newchisq = mtry.fnorm
if (mtry.niter>0 and newchisq<chisq and (mtry.params[1] > min(self.resonator.freqs)) and (mtry.params[1] < max(self.resonator.freqs))):
phase_func_params = mtry.params;
chisq = newchisq;
iter_phase = mtry.niter;
self.fitprint('**** Phase fitting: Newest Ssq_phase: %.8f, iteration: %d\n'%(chisq,ii))
self.fitprint('**** Phase fitting: fr:%f, q:%f iter_phase: %d \n'%(phase_func_params[1],phase_func_params[0], iter_phase))
if self.fit_plots:
figure(100)
clf()
plot(freqs,phase,'x');
plot(freqs,phaseFunc(freqs,self.resonator.phig_phase_guesses[0],self.resonator.phig_phase_guesses[1],self.resonator.phig_phase_guesses[2] ,self.resonator.phig_phase_guesses[3]),'g')
plot(freqs,phaseFunc(freqs,m.params[0],phase_func_params[1],phase_func_params[2],phase_func_params[3] ),'r')
self.resonator.ph_Qf=phase_func_params[0]
self.resonator.ph_fr=phase_func_params[1]
self.resonator.ph_theta=phase_func_params[2]
self.resonator.ph_sgn=phase_func_params[3]
return(phase_func_params)
###########################################################################
#### SKEWED LORENTZ!!!!
#### -- NB: requires data that is NOT centered!!!!
#### -- Only DC offset has to be removed!!!!
#### Reference: Gao's thesis Equation E.17
# # A(1) = A
# # A(2) = B
# # A(3) = C
# # A(4) = D
# # A(5) = fr
# # A(6) = Q
# # f = A(1) + A(2).*(x-A(5))+((A(3)+A(4).*(x-A(5)))./(1+4.*(A(6).^2).*((x-A(5))./A(5)).^2));
def NinoLorentzGuess(self):
iq=self.resonator.iqdata
#array of offset baseband freqs for each I Q sample
freqs=self.resonator.freqs
j=complex(0,1)
mag2s21 = self.resonator.phig_mag2s21
lorentz_guesses =[0,0,0,0,0,0]
lorentz_guesses[0] = sum(mag2s21[0:50])/50.0
lorentz_guesses[1] = 0.5/self.resonator.ph_fr
lorentz_guesses[2] = 1.0
lorentz_guesses[3] = 1.0/self.resonator.ph_fr
lorentz_guesses[4] = self.resonator.ph_fr
lorentz_guesses[5] = self.resonator.ph_Qf
self.fitprint('lorentz_guess_params \n')
self.fitprint(lorentz_guesses)
self.resonator.lrnzig_params=lorentz_guesses
if self.fit_plots:
figure(101)
clf()
plot(freqs,lorentzFunc(freqs,lorentz_guesses),'r')
plot(freqs,mag2s21,'x')
return(lorentz_guesses)
def NinoFitLorentz(self):
###########################################################################
### PHASE FITTING!!!!!!
# Fitting function: phase(x) = theta0 - 2*atan(2*Q* (1-x/fr))
# param(1) = Q
# param(2) = fr
# param(3) = theta0
# Reference: Gao's thesis Equation E.11 (Also: Petersan, P. J. and Anlage, S.
# M. 1998, J. Appl. Phys., 84, 3392 250)
#!! changed sign...
lorentz_guesses = self.resonator.lrnzig_params;
mags=self.resonator.phig_mag2s21
freqs=self.resonator.freqs
#parinfo = [{'value':0., 'fixed':0, 'limited':[1,1], 'limits':[0.,0.]}]*10
parinfo=[ {'value':0., 'fixed':0, 'limited':[1,1], 'limits':[0.,0.], 'parname':'NULL'} for i in range(6) ]
# Q = p[0] ; Q
# f0 = p[1] ; resonance frequency
# phasecenter = p[2] ; amplitude of leakage
#Q
parinfo[0]['parname']='A'
parinfo[0]['value'] = lorentz_guesses[0]
parinfo[0]['limits'] = [-10.,10.]
#f0
parinfo[1]['parname']='B'
parinfo[1]['value'] = lorentz_guesses[1]
parinfo[1]['limits'] = [-10.,10.]
parinfo[2]['parname']='C'
parinfo[2]['value'] = lorentz_guesses[2]
parinfo[2]['limits'] = [-10.,10.]
parinfo[3]['parname']='D'
parinfo[3]['value'] = lorentz_guesses[3]
parinfo[3]['limits'] = [-10.,10.]
parinfo[4]['parname']='fr'
parinfo[4]['value'] = lorentz_guesses[4]
parinfo[4]['limits'] = [ min(self.resonator.freqs),max(self.resonator.freqs)]
parinfo[5]['parname']='Qf'
parinfo[5]['value'] = lorentz_guesses[5]
parinfo[5]['limits'] = [1.0,1.0e7]
weights = ones(len(freqs))
fa = {'x':freqs, 'y':mags, 'err':weights}
m = mpfit.mpfit(residPhase,functkw=fa,parinfo=parinfo,quiet=1)
lorentz_func_params=m.params
chisq = m.fnorm
iter_lrnz=m.niter
# Frequency parameter was out of the range for some reason; Set back to the
# Fcenter
if (lorentz_func_params[4] < min(self.resonator.freqs)) or (lorentz_func_params[4] > max(self.resonator.freqs)):
m.lorentz_func_params[4] = self.resonator.trot_Fcenter;
self.fitprint('SKEWED LORENTZ FITTING: First Fr out of span.. setting to Fcenter....\n')
fitgood=lorentzFunc(self.resonator.freqs,lorentz_func_params);
# Randomly change the fit parameters and re-run the fitter....
for ii in range(int(self.resonator.NUM_GUESSES_LORENTZ)):
self.fitprint(ii)
A = lorentz_func_params[0] + 10.0*lorentz_func_params[0]*(random.random()-0.5);
B = lorentz_func_params[1] + 10.0*lorentz_func_params[1]*(random.random()-0.5);
C = lorentz_func_params[2] + 10.0*lorentz_func_params[2]*(random.random()-0.5);
D = lorentz_func_params[3] + 10.0*lorentz_func_params[3]*(random.random()-0.5);
q_guess = abs(self.resonator.ph_Qf + self.resonator.ph_Qf*(random.random()-0.5));
freq_guess = lorentz_func_params[4] + (max(self.resonator.freqs) - min(self.resonator.freqs))*(random.random()-0.5);
if freq_guess > max(self.resonator.freqs) or freq_guess < min(self.resonator.freqs):
freq_guess = self.resonator.trot_Fcenter;
lorentz_guesses = [A, B, C, D, freq_guess, q_guess ];
parinfo[0]['value'] = lorentz_guesses[0]
parinfo[1]['value'] = lorentz_guesses[1]
parinfo[2]['value'] = lorentz_guesses[2]
parinfo[3]['value'] = lorentz_guesses[3]
parinfo[4]['value'] = lorentz_guesses[4]
parinfo[5]['value'] = lorentz_guesses[5]
mtry = mpfit.mpfit(residLorentz,functkw=fa,parinfo=parinfo,quiet=1)
newchisq = mtry.fnorm
fitx=lorentzFunc(self.resonator.freqs,mtry.params);
if self.fit_plots:
figure(60);clf();
plot(freqs, fitx,'g')
plot(freqs, fitgood,'r')
plot(freqs, mags,'x')
draw()
if (mtry.niter>0 and newchisq<chisq and (mtry.params[4] > min(self.resonator.freqs)) and (mtry.params[4] < max(self.resonator.freqs))):
lorentz_func_params = mtry.params;
fitgood=lorentzFunc(self.resonator.freqs,lorentz_func_params);
chisq = newchisq;
iter_lrnz = mtry.niter;
self.resonator.lorentz_params=lorentz_func_params
self.resonator.lorentz_fr = lorentz_func_params[4];
self.resonator.lorentz_ssq = chisq;
self.resonator.lorentz_iter = iter_lrnz;
self.fitprint('**** LRnz fitting: Newest Ssq_phase: %.8f, iteration: %d\n'%(chisq,ii))
self.fitprint('**** LRnz fitting: fr:%f, q:%f iter_phase: %d \n'%(lorentz_func_params[4],lorentz_func_params[5], iter_lrnz))
return(lorentz_func_params)
def lorentzEndCalcs(self):
lorentz_func_params=self.resonator.lorentz_params
mag2s21 = self.resonator.phig_mag2s21
# Calculate Qc and Qi from s21min from the Lorentz fit
mag2s21fit = lorentzFunc(self.resonator.freqs,lorentz_func_params);
mag2s21norm = mag2s21/max(mag2s21fit);
mag2s21fitnorm = mag2s21fit/max(mag2s21fit);
mag2s21dB = 10*log10(mag2s21norm);
mag2s21fitdB = 10*log10(mag2s21fitnorm);
s21min = 10**(min(mag2s21fitdB)/20);
# Use min of s21 from the data if the fit min is 1dB (i.e., 0 in absolute units)
if s21min == 1.0:
s21min = 10^(min(mag2s21dB)/20);
self.fitprint('** lorentz WARNING: Using min21 from data, not fit... \n')
Qr = abs(lorentz_func_params[5]);
Qc = Qr/(1-s21min);
Qi = (Qc*Qr)/(Qc-Qr);
### self.resonator
self.resonator.lorentz_Q = Qr;
self.resonator.lorentz_Qc = Qc;
self.resonator.lorentz_Qi = Qi;
# get theta from Fr using Lorentz fit (I think theta is different from the
# theta from the phase fit!!! Figure out the differnce!!!
self.fitprint('self.resonator.lorentz_fr: %f\n'%(self.resonator.lorentz_fr))
#self.resonator.lorentz_theta = theta_find_fr(self.resonator.lorentz_fr,Resonator);
#temporarily deactivated for the fit IQ traces from VNA. There is an error
#message not well understood. For the purpose of the paper on noise
#reduction with thickness we don't need theta. Rememeber to reactivate when
#needed to fit data from the IQ mixer. 09-17-2012
self.resonator.lorentz_s21min = s21min;
def lorentzPlots(self):
mags=self.resonator.phig_mag2s21
freqs=self.resonator.freqs
params=self.resonator.lorentz_params
mag2s21fit = lorentzFunc(freqs,params);
phase=self.resonator.phig_phase
frindx=min(where(self.resonator.lorentz_fr<=self.resonator.freqs)[0])
figure(102)
clf()
subplot(2,1,1)
plot(freqs,mag2s21fit,'r')
plot(freqs,mags,'x')
plot(self.resonator.lorentz_fr,lorentzFunc(self.resonator.lorentz_fr,params),'^')
plot(freqs[frindx],mags[frindx],'o')
subplot(2,1,2)
plot(freqs,phase,'x')
figure(103)
pp=self.RectToPolar([self.resonator.trot_xf,self.resonator.trot_yf ])
clf()
polar(pp[1],pp[0],'x')
polar(pp[1][frindx],pp[0][frindx],'o')
figure(100)
clf()
plot(freqs,phase,'x');
plot(freqs,phaseFunc(freqs,self.resonator.phig_phase_guesses[0],self.resonator.phig_phase_guesses[1],self.resonator.phig_phase_guesses[2] ,self.resonator.phig_phase_guesses[3]),'g')
plot(freqs,phaseFunc(freqs,self.resonator.ph_Qf,self.resonator.ph_fr,self.resonator.ph_theta,self.resonator.ph_sgn ),'r')
###########################################################################
#### SKEWED Circle!!!! # added by cecil
#### -- NB: requires data that is NOT centered!!!!
#### -- Only DC offset has to be removed!!!!
#### Reference: Geerlings et al Applied Physics letters 100, 192601 (2012)
# # A(0) = Qo
# # A(1) = Qc
# # A(2) = wr
# # A(3) = dw
# # A(4) = mag2S21 offset
# # f (S21) = 1+ ((A(1)/A(2)-2j*A(1)*A(4)/A(3))/(1+2j*A(1)*(w-A(3))/A(3)));
def CecilSkewcircleGuess(self):
iq=self.resonator.iqdata
#array of offset baseband freqs for each I Q sample
freqs=self.resonator.freqs
j=complex(0,1)
I = iq[0]
Q = iq[1]
#S21 = abs(I + j*Q)
#S21norm = S21/S21[0] #normalize to 'off resonance' value
mag2s21 = I**2 + Q**2
mag2s21norm = mag2s21/mag2s21[0]
mag2S21dB = 10*log10(mag2s21norm)
mags = mag2S21dB
skewcircle_guesses =[0,0,0,0]
skewcircle_guesses[0] = self.resonator.lorentz_Q
skewcircle_guesses[1] = self.resonator.lorentz_Qc
skewcircle_guesses[2] = self.resonator.lorentz_fr
#skewcircle_guesses[3] = self.resonator.lorentz_fr/self.resonator.lorentz_Q
skewcircle_guesses[3] = 0.0
#skewcircle_guesses[4] = sum(mag2S21dB[0:50])/50.0
self.fitprint('skewcircle_guess_params \n')
self.fitprint(skewcircle_guesses)
self.resonator.skewcircle_params=skewcircle_guesses
if self.fit_plots:
figure(401)
clf()
plot(freqs,SkewcircleFunc(freqs,skewcircle_guesses),'r')
plot(freqs,mag2S21dB,'x')
return(skewcircle_guesses)
def CecilfitSkewcircle(self):
###########################################################################
### Skewed Circle FITTING!!!!!!
# uses S21 equation from Appl. Phys. Lett. 100, 192601 (2012)
j=complex(0,1)
self.fitprint("Skewed circle fitting")
skewcircle_guesses = self.resonator.skewcircle_params;
I = self.resonator.iqdata[0]
Q = self.resonator.iqdata[1]
#S21 = abs(I + j*Q)
#S21norm = S21/S21[0] #normalize to 'off resonance' value
mag2s21 = I**2 + Q**2
mag2s21norm = mag2s21/mag2s21[0]
mag2S21dB = 10*log10(mag2s21norm)
mags = mag2S21dB
freqs=self.resonator.freqs
#parinfo = [{'value':0., 'fixed':0, 'limited':[1,1], 'limits':[0.,0.]}]*10
parinfo=[ {'value':0., 'fixed':0, 'limited':[1,1], 'limits':[0.,0.], 'parname':'NULL'} for i in range(4) ]
#Fitter parameters
parinfo[0]['parname']='Q0'
parinfo[0]['value'] = skewcircle_guesses[0]
parinfo[0]['limits'] = [1.0,1.0e7]
parinfo[1]['parname']='Qc'
parinfo[1]['value'] = skewcircle_guesses[1]
parinfo[1]['limits'] = [1.0,1.0e7]
parinfo[2]['parname']='fr'
parinfo[2]['value'] = skewcircle_guesses[2]
parinfo[2]['limits'] = [ min(self.resonator.freqs),max(self.resonator.freqs)]
parinfo[3]['parname']='dw'
parinfo[3]['value'] = skewcircle_guesses[3]
parinfo[3]['limits'] = [-1.0e6,1.0e6]
#parinfo[4]['parname']='mag2S21_offset'
#parinfo[4]['value'] = skewcircle_guesses[4]
#parinfo[4]['limits'] = [skewcircle_guesses[4]-10.0,skewcircle_guesses[4]+10.0]
weights = ones(len(freqs))
fa = {'x':freqs, 'y':mags, 'err':weights}
m = mpfit.mpfit(residSkewcircle,functkw=fa,parinfo=parinfo,quiet=1)
skewcircle_func_params=m.params
chisq = m.fnorm
iter_swcrl=m.niter
# Frequency parameter was out of the range for some reason; Set back to the
# Fcenter
if (skewcircle_func_params[2] < min(self.resonator.freqs)) or (skewcircle_func_params[2] > max(self.resonator.freqs)):
m.skewcircle_func_params[2] = self.resonator.trot_Fcenter;
self.fitprint('SKEWED CIRCLE FITTING: First Fr out of span.. setting to Fcenter....\n')
fitgood=SkewcircleFunc(self.resonator.freqs,skewcircle_func_params);
# Randomly change the fit parameters and re-run the fitter.... multiples by up to 5X and adds
for ii in range(int(self.resonator.NUM_GUESSES_SKEWCIRCLE)):
self.fitprint(ii)
self.fitprint( chisq)
A = skewcircle_func_params[0] + 2.0*skewcircle_func_params[0]*(random.random()-0.5);
B = skewcircle_func_params[1] + 2.0*skewcircle_func_params[1]*(random.random()-0.5);
C = skewcircle_func_params[2] + 1.0e-4*skewcircle_func_params[2]*(random.random()-0.5);
D = skewcircle_func_params[3] + 1.0e5*(random.random()-0.5);
#D = skewcircle_func_params[3] + 1e4*skewcircle_func_params[3]*(random.random()-0.5);
#E = skewcircle_func_params[4]# + 10.0*skewcircle_func_params[4]*(random.random()-0.5);
#self.fitprint(ii, chisq, A, B, C, D
if C > max(self.resonator.freqs) or C < min(self.resonator.freqs):
#freq_guess = self.resonator.trot_Fcenter;
C = self.resonator.trot_Fcenter;
skewcircle_guesses = [A, B, C, D]; ### should I be using f_guess and q_guess in here
parinfo[0]['value'] = skewcircle_guesses[0]
parinfo[1]['value'] = skewcircle_guesses[1]
parinfo[2]['value'] = skewcircle_guesses[2]
parinfo[3]['value'] = skewcircle_guesses[3]
#parinfo[4]['value'] = skewcircle_guesses[4]
mtry = mpfit.mpfit(residSkewcircle,functkw=fa,parinfo=parinfo,quiet=1)
newchisq = mtry.fnorm
fitx=SkewcircleFunc(self.resonator.freqs,mtry.params);
if self.fit_plots:
figure(402);clf();
plot(freqs, fitx,'g')
plot(freqs, fitgood,'r')
plot(freqs, mags,'x')
draw()
if (mtry.niter>0 and newchisq<chisq and (mtry.params[2] > min(self.resonator.freqs)) and (mtry.params[2] < max(self.resonator.freqs))):
skewcircle_func_params = mtry.params;
fitgood=SkewcircleFunc(self.resonator.freqs,skewcircle_func_params);
chisq = newchisq;
iter_swcrl = mtry.niter;
self.resonator.skewcircle_params=skewcircle_func_params
self.resonator.skewcircle_fr = skewcircle_func_params[2];
self.resonator.skewcircle_ssq = chisq;
self.resonator.skewcircle_iter = iter_swcrl;
self.fitprint('**** Skewcircle fitting: Newest Ssq_phase: %.8f, iteration: %d\n'%(chisq,ii))
self.fitprint('**** Skewcircle fitting: fr:%f, q:%f iter_phase: %d \n'%(skewcircle_func_params[2],skewcircle_func_params[0], iter_swcrl))
return(skewcircle_func_params)
def SkewcircleEndCalcs(self):
skewcircle_func_params=self.resonator.skewcircle_params
mag2s21 = self.resonator.phig_mag2s21
Qr = abs(skewcircle_func_params[0]);
Qc = abs(skewcircle_func_params[1]);
Qi = (Qc*Qr)/(Qc-Qr);
Pg = -5.0 - self.resonator.atten_U6 - self.resonator.atten_U7 - self.resonator.cryoAtt
Pdiss = Pg*(2.0/Qi)*(Qr**2.0/Qc)
Pint = Pg*(2.0/pi)*(Qr**2.0/Qc)
### self.resonator
self.resonator.skewcircle_Q = Qr;
self.resonator.skewcircle_Qc = Qc;
self.resonator.skewcircle_Qi = Qi;
#self.resonator.skewcircle_s21min = s21min;
def SkewcirclePlots(self):
j = complex(0,1)
I = self.resonator.iqdata[0]
Q = self.resonator.iqdata[1]
S21 = abs(I + j*Q)
S21norm = S21/S21[0] #normalize to 'off resonance' value
mag2s21 = S21norm**2 #don't think I need this
mag2S21dB = 20.0*log10(S21norm)
mags = mag2S21dB
mags=self.resonator.phig_mag2s21
freqs=self.resonator.freqs
params=self.resonator.skewcircle_params
mag2s21dBfit = SkewcircleFunc(freqs,params);
phase=self.resonator.phig_phase
frindx=min(where(self.resonator.lorentz_fr<=self.resonator.freqs)[0])
figure(403)
clf()
subplot(2,1,1)
plot(freqs,mag2s21dBfit,'r')
plot(freqs,mag2S21dB,'x')
subplot(2,1,2)
plot(freqs,mags,'x')
#plot(self.resonator.skewcircle_fr,SkewcircleFunc(self.resonator.skewcircle_fr,params),'^')
plot(freqs[frindx],mags[frindx],'o')
#subplot(2,1,2)
#plot(freqs,phase,'x')
# figure(333)
# pp=self.RectToPolar([self.resonator.trot_xf,self.resonator.trot_yf ])
# clf()
# polar(pp[1],pp[0],'x')
# polar(pp[1][frindx],pp[0][frindx],'o')
#mazin code
def smooth(self, x, window_len=10, window='hanning'):
# smooth the data using a window with requested size.
#
# This method is based on the convolution of a scaled window with the signal.
# The signal is prepared by introducing reflected copies of the signal
# (with the window size) in both ends so that transient parts are minimized
# in the begining and end part of the output signal.
#
# input:
# x: the input signal
# window_len: the dimension of the smoothing window
# window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
# flat window will produce a moving average smoothing.
#
# output:
# the smoothed signal
#
# example:
#
# import numpy as np
# t = np.linspace(-2,2,0.1)
# x = np.sin(t)+np.random.randn(len(t))*0.1
# y = smooth(x)
#
# see also:
#
# numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve
# scipy.signal.lfilter
#
# TODO: the window parameter could be the window itself if an array instead of a string
#
if x.ndim != 1:
raise ValueError, "smooth only accepts 1 dimension arrays."
if x.size < window_len:
raise ValueError, "Input vector needs to be bigger than window size."
if window_len < 3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError, "Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'"
s=np.r_[2*x[0]-x[window_len:1:-1], x, 2*x[-1]-x[-1:-window_len:-1]]
#self.fitprint(len(s))
if window == 'flat': #moving average
w = np.ones(window_len,'d')
else:
w = getattr(np, window)(window_len)
y = np.convolve(w/w.sum(), s, mode='same')
return y[window_len-1:-window_len+1]
def FitLoopMP(self): # Fit the sweep using the full IQ data with MPFIT!
import mpfit
# find center from IQ max
#array of offset baseband freqs for each I Q sample
freqs=self.startFreq_Hz + (numpy.arange(self.memLen4) * self.incrFreq_Hz)
# as we use the negative sideband, we suybtract
freqs=self.carrierfreq - freqs;
I=self.iqdata[0]
Q=self.iqdata[1]
I=I[::-1]
Q=Q[::-1]
freqs=freqs[::-1]
fsteps=len(I)
vel = np.sqrt( (diff(I))**2 + (diff(Q))**2)
svel = self.smooth(vel)
cidx = (np.where(svel==max(svel)))[0]
vmaxidx = cidx[0]
if self.fit_plots:
figure(12)
clf()
subplot(4,1,1)
plot(svel);plot(vmaxidx,svel[vmaxidx],'rx')
#center=self.findResPhase();
# Try to pass fsteps/2 points but work even if closer to the edge than that
low = cidx - fsteps/4
if low < 0:
low = 0
high = cidx + fsteps/4
if cidx > fsteps :
high = fsteps
#self.fitprint(cidx,low,high
idx = freqs[low:high]
#I = self.I[low:high]-self.I0
#Q = self.Q[low:high]-self.Q0
I = I[low:high]
Q = Q[low:high]
if self.fit_plots:
figure(12);subplot(4,1,2)
plot(I);plot(Q,'g')
plot(self.RectToPolar([I,Q])[0],'r')
s21 = np.zeros(len(I)*2)
s21[:len(I)] = I
s21[len(I):] = Q
sigma = np.zeros(len(I)*2)
#is this sd mening st deviation? Is it a point by point std based on noise?
#sigma[:len(I)] = self.Isd[low:high]
#sigma[len(I):] = self.Qsd[low:high]
#just make some stupid guess as I dont know what Isd is....
#we set weights to all 1. We should have more weight at the center....
#sigma[:len(I)] = ones(len(I))
#sigma[len(I):] = ones(len(I))
erweight= 1.0 + 5.0*(svel/max(svel))
erweight = erweight[low:high]
sigma[:len(I)]=erweight
sigma[len(I):]=erweight
# take a guess at center
Iceng = (max(I)-min(I))/2.0 + min(I)
Qceng = (max(Q)-min(Q))/2.0 + min(Q)
self.fitprint('Iceng %f Qceng %f'%(Iceng,Qceng))
ang = np.arctan2( Q[fsteps/4] - Qceng, I[fsteps/4] - Iceng )
self.fitprint(ang)
if ang >= 0 and ang <= np.pi:
ang -= np.pi/2
if ang >= -np.pi and ang < 0:
ang += np.pi/2
#self.fitprint(Q[self.fsteps/4]-self.Qceng, I[self.fsteps/4]-self.Iceng
#self.fitprint(ang
#parinfo = [{'value':0., 'fixed':0, 'limited':[1,1], 'limits':[0.,0.]}]*10
parinfo=[ {'value':0., 'fixed':0, 'limited':[1,1], 'limits':[0.,0.], 'parname':'NULL'} for i in range(10) ]
# Q = p[0] ; Q
# f0 = p[1] ; resonance frequency
# aleak = p[2] ; amplitude of leakage
# ph1 = p[3] ; phase shift of leakage
# da = p[4] ; variation of carrier amplitude
# ang1 = p[5] ; Rotation angle of data
# Igain = p[6] ; Gain of I channel
# Qgain = p[7] ; Gain of Q channel
# Ioff = p[8] ; Offset of I channel
# Qoff = p[9] ; Offset of Q channel
#Q
parinfo[0]['parname']='Q factor'
parinfo[0]['value'] = 50000.0
parinfo[0]['limits'] = [5000.0,1e6]
#f0
parinfo[1]['parname']='f0, Res freq'
parinfo[1]['value'] = mean(idx)
parinfo[1]['limits'] = [ min(idx),max(idx)]
parinfo[2]['parname']='amplitude of leakage'
parinfo[2]['value'] = 1.0
parinfo[2]['limits'] = [1e-4,1e2]
parinfo[3]['parname']='phase shift of leakage'
parinfo[3]['value'] = 800.0
parinfo[3]['limits'] = [1.0,4e4]
parinfo[4]['parname']='variation of carrier amplitude'
parinfo[4]['value'] = 500.0
parinfo[4]['limits'] = [-5000.0,5000.0]
parinfo[5]['parname']='Rotation angle of data'
parinfo[5]['value'] = ang
parinfo[5]['limits'] = [-np.pi*1.1,np.pi*1.1]
parinfo[6]['parname']='Gain of I channel'
parinfo[6]['value'] = max(I[low:high]) - min(I[low:high])
parinfo[6]['limits'] = [parinfo[6]['value'] - 0.5*parinfo[6]['value'] , parinfo[6]['value'] + 0.5*parinfo[6]['value'] ]
parinfo[7]['parname']='Gain of Q channel'
parinfo[7]['value'] = max(Q[low:high]) - min(Q[low:high])
parinfo[7]['limits'] = [parinfo[7]['value'] - 0.5*parinfo[7]['value'] , parinfo[7]['value'] + 0.5*parinfo[6]['value'] ]
parinfo[8]['parname']='Offset of I channel'
parinfo[8]['value'] = Iceng
parinfo[8]['limits'] = [parinfo[8]['value'] - np.abs(0.5*parinfo[8]['value']) , parinfo[8]['value'] + np.abs(0.5*parinfo[8]['value']) ]
parinfo[9]['parname']='Offset of Q channel'
parinfo[9]['value'] = Qceng
parinfo[9]['limits'] = [parinfo[9]['value'] - np.abs(0.5*parinfo[9]['value']) , parinfo[9]['value'] + np.abs(0.5*parinfo[9]['value']) ]
fa = {'x':idx, 'y':s21, 'err':sigma}
self.fitprint(parinfo)
#pdb.set_trace()
# use magfit Q if available
#try:
# Qguess = np.repeat(self.mopt[0],10)
#except:
Qguess = np.repeat(arange(10)*10000,10)
chisq=1e50
if self.fit_plots:
figure(100);
clf()
plot(I,'bx')
plot(Q,'gx')
for x in range(20):
# Fit
self.fitprint('---------')
self.fitprint('iteratin: %d'%(x))
#self.fitprint(parinfo
Qtry = Qguess[x] + 20000.0*np.random.normal()
if Qtry < 5000.0:
Qtry = 5000.0
parinfo[0]['value'] = Qtry
parinfo[2]['value'] = 1.1e-4 + np.random.uniform()*90.0
parinfo[3]['value'] = 1.0 + np.random.uniform()*3e4
parinfo[4]['value'] = np.random.uniform()*9000.0 - 4500.0
if x > 5:
parinfo[5]['value'] = np.random.uniform(-1,1)*np.pi
# fit!
m = mpfit.mpfit(RESDIFFMP,functkw=fa,parinfo=parinfo,quiet=1)
#self.fitprint('-------')
#self.fitprint(m
popt = m.params
fit = RESDIFF(idx,popt[0],popt[1],popt[2],popt[3],popt[4],popt[5],popt[6],popt[7],popt[8],popt[9])
if self.fit_plots:
figure(100);
plot(fit[:len(fit)/2],'b')
plot(fit[len(fit)/2:],'g')
draw();
newchisq = m.fnorm
if newchisq < chisq:
chisq = newchisq
bestpopt = m.params
try:
popt = bestpopt
except:
popt = m.params
popt = popt
Icen = popt[8]
Qcen = popt[9]
fit = RESDIFF(idx,popt[0],popt[1],popt[2],popt[3],popt[4],popt[5],popt[6],popt[7],popt[8],popt[9])
Ifit=fit[:(len(fit)/2)]
Qfit=fit[(len(fit)/2):]
if self.fit_plots:
figure(12);
subplot(4,1,3)
plot(Ifit,'b' )
plot(Qfit,'g' )
plot(na.RectToPolar([Ifit,Qfit])[0],'r')
figure(12);
subplot(4,1,4)
plot(na.RectToPolar([Ifit,Qfit])[1],'g')
return(popt)
#pdb.set_trace()
# compute dipdb,Qc,Qi
radius = abs((popt[6]+popt[7]))/4.0
diam = (2.0*radius) / (np.sqrt(popt[8]**2 + popt[9]**2) + radius)
Qc = popt[0]/diam
Qi = popt[0]/(1.0-diam)
dip = 1.0 - 2.0*radius/(np.sqrt(popt[8]**2 + popt[9]**2) + radius)
dipdb = 20.0*np.log10(dip)
# internal power
power = 10.0**((-self.atten1-35.0)/10.0)
Pint = 10.0*np.log10((2.0 * self.popt[0]**2/(np.pi * Qc))*power)
#self.fitprint(popt
#self.fitprint(radius,diam,Qc,Qi,dip,dipdb
self.Qm = popt[0]
self.fm = popt[1]
self.Qc = Qc
self.Qi = Qi
self.dipdb = dipdb
self.Pint = Pint
self.fpoints = len(I)
self.fI = fit[:len(I)]
self.fQ = fit[len(I):]
self.ff = self.freq[low:high]
def RESDIFF(x,Q,f0,aleak,ph1,da,ang1,Igain,Qgain,Ioff,Qoff):
# Q = p[0] ; Q
# f0 = p[1] ; resonance frequency
# aleak = p[2] ; amplitude of leakage
# ph1 = p[3] ; phase shift of leakage
# da = p[4] ; variation of carrier amplitude
# ang1 = p[5] ; Rotation angle of data
# Igain = p[6] ; Gain of I channel
# Qgain = p[7] ; Gain of Q channel
# Ioff = p[8] ; Offset of I channel
# Qoff = p[9] ; Offset of Q channel
l = len(x)
dx = (x - f0) / f0
# resonance dip function
s21a = (np.vectorize(complex)(0,2.0*Q*dx)) / (complex(1,0) + np.vectorize(complex)(0,2.0*Q*dx))
s21a = s21a - complex(.5,0)
if False:
figure(13);
clf()
subplot(3,1,1);
plot(np.abs(s21a),'r')
plot(np.real(s21a),'b')
plot(np.imag(s21a),'g')
s21b = np.vectorize(complex)(da*dx,0) + s21a + aleak*np.vectorize(complex)(1.0-np.cos(dx*ph1),-np.sin(dx*ph1))
if False:
figure(13);subplot(3,1,2)
plot(abs(s21b))
# scale and rotate
Ix1 = s21b.real*Igain
Qx1 = s21b.imag*Qgain
nI1 = Ix1*np.cos(ang1) + Qx1*np.sin(ang1)
nQ1 = -Ix1*np.sin(ang1) + Qx1*np.cos(ang1)
#scale and offset
nI1 = nI1 + Ioff
nQ1 = nQ1 + Qoff
s21 = np.zeros(l*2)
s21[:l] = nI1
s21[l:] = nQ1
if False:
figure(13);subplot(3,1,3)
plot(abs(s21))
return s21
def RESDIFFMP(p, fjac=None, x=None, y=None, err=None):
Q = p[0] # Q
f0 = p[1] # resonance frequency
aleak = p[2] # amplitude of leakage
ph1 = p[3] # phase shift of leakage
da = p[4] # variation of carrier amplitude
ang1 = p[5] # Rotation angle of data
Igain = p[6] # Gain of I channel
Qgain = p[7] # Gain of Q channel
Ioff = p[8] # Offset of I channel
Qoff = p[9] # Offset of Q channel
l = len(x)
dx = (x - f0) / f0
# resonance dip function
s21a = (np.vectorize(complex)(0,2.0*Q*dx)) / (complex(1,0) + np.vectorize(complex)(0,2.0*Q*dx))
s21a = s21a - complex(.5,0)
s21b = np.vectorize(complex)(da*dx,0) + s21a + aleak*np.vectorize(complex)(1.0-np.cos(dx*ph1),-np.sin(dx*ph1))
# scale and rotate
Ix1 = s21b.real*Igain
Qx1 = s21b.imag*Qgain
nI1 = Ix1*np.cos(ang1) + Qx1*np.sin(ang1)
nQ1 = -Ix1*np.sin(ang1) + Qx1*np.cos(ang1)
#scale and offset
nI1 = nI1 + Ioff
nQ1 = nQ1 + Qoff
s21 = np.zeros(l*2)
s21[:l] = nI1
s21[l:] = nQ1
status=0
return [status, (y-s21)/err]
def phaseFunc(x,Qf,fr,theta0,sgn):
###########################################################################
### PHASE FITTING!!!!!!
# Fitting function: phase(x) = theta0 - 2*atan(2*Q* (1-x/fr))
# param(1) = Q factor
# param(2) = fr
# param(3) = theta0
# Reference: Gao's thesis Equation E.11 (Also: Petersan, P. J. and Anlage, S.
# M. 1998, J. Appl. Phys., 84, 3392 250)
#!! changed sign...
phz = sgn*(theta0 - 2*arctan(2*Qf* (1-x/fr)));
return(phz)
def residPhase(p, fjac=None, x=None, y=None, err=None):
#residuals_phase = @(param) (phase_func(param) - phase)./phase;
#normalize residual for data size
diff = y - phaseFunc(x,p[0],p[1],p[2],p[3]);
status=0;
return([status, diff/err])
#
# Nino, Gao lorenz function
#
def lorentzFunc(x,A):
lrnz0= A[0]+A[1]*(x-A[4])
lrnz1= (A[2]+A[3]*(x-A[4])) / (1+ 4*(A[5]**2) * ( (x-A[4])/A[4] )**2 ) ;
lrnz=lrnz0+lrnz1
return(lrnz)
def residLorentz(p, fjac=None, x=None, y=None, err=None):
lrnz=lorentzFunc(x,p)
diff = y-lrnz
status=0;
return([status, diff/err])
# Skewed circle fitting # added by cecil
# Skewed circle function
def SkewcircleFunc(x,A):
j=complex(0,1)
sc1= A[0]/A[1] - 2*j*A[0]*A[3]/A[2]
sc2= 1+ 2*j*A[0]*(x-A[2])/A[2]
#skewcircle = A[4]*ones(len(x)) + 20*log10(abs(1-(sc1/sc2)))
#skewcircle = A[4] + 20*log10(abs(1-(sc1/sc2)))
skewcircle = 20*log10(abs(1-(sc1/sc2)))
return(skewcircle)
# Skewed circle residuals
def residSkewcircle(p, fjac=None, x=None, y=None, err=None):
skewcircle=SkewcircleFunc(x,p)
diff = y-skewcircle
status=0;
return([status, diff/err])
| [
"yandaikang@gmail.com"
] | yandaikang@gmail.com |
0994278a4630f12ad2158c006cad40adf14f8817 | 788f24712349f071653f4ca55cf1e626ee9d2913 | /main.py | bdfdec07b0fab698ed20e3a1c69986d7e099ef6e | [] | no_license | The-Coding-Hub/PyNotePad | 94388b58bed8e84795287238d12aafa7bd6d2d57 | e678ebe72ae119076b5e212ac8a3d32755b42771 | refs/heads/master | 2023-06-13T02:23:29.123008 | 2021-07-10T07:50:46 | 2021-07-10T07:50:46 | 384,642,204 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,363 | py |
from tkinter import *
from tkinter.messagebox import showinfo
from tkinter.filedialog import askopenfilename, asksaveasfilename
import os
def newFile():
global file
root.title("Untitled - Notepad")
file = None
TextArea.delete(1.0, END)
def openFile():
global file
file = askopenfilename(defaultextension=".txt",
filetypes=[("All Files", "*.*"),
("Text Documents", "*.txt")])
if file == "":
file = None
else:
root.title(os.path.basename(file) + " - Notepad")
TextArea.delete(1.0, END)
f = open(file, "r")
TextArea.insert(1.0, f.read())
f.close()
def saveFile():
global file
if file == None:
file = asksaveasfilename(initialfile = 'Untitled.txt', defaultextension=".txt",
filetypes=[("All Files", "*.*"),
("Text Documents", "*.txt")])
if file =="":
file = None
else:
#Save as a new file
f = open(file, "w")
f.write(TextArea.get(1.0, END))
f.close()
root.title(os.path.basename(file) + " - Notepad")
print("File Saved")
else:
# Save the file
f = open(file, "w")
f.write(TextArea.get(1.0, END))
f.close()
def quitApp():
root.destroy()
def cut():
TextArea.event_generate(("<>"))
def copy():
TextArea.event_generate(("<>"))
def paste():
TextArea.event_generate(("<>"))
def about():
showinfo("Notepad", "Notepad by Prameya Mohanty")
if __name__ == '__main__':
#Basic tkinter setup
root = Tk()
root.title("Untitled - Notepad")
# root.wm_iconbitmap("icon.ico")
root.geometry("{0}x{1}+0+0".format(root.winfo_screenwidth(), root.winfo_screenheight()))
# Add TextArea
TextArea = Text(root, font="Consolas")
file = None
TextArea.pack(expand=True, fill=BOTH)
# Lets create a menubar
MenuBar = Menu(root)
#File Menu Starts
FileMenu = Menu(MenuBar, tearoff=0)
# To open new file
FileMenu.add_command(label="New", command=newFile)
#To Open already existing file
FileMenu.add_command(label="Open", command = openFile)
# To save the current file
FileMenu.add_command(label = "Save", command = saveFile)
FileMenu.add_separator()
FileMenu.add_command(label = "Exit", command = quitApp)
MenuBar.add_cascade(label = "File", menu=FileMenu)
# File Menu ends
# Edit Menu Starts
EditMenu = Menu(MenuBar, tearoff=0)
#To give a feature of cut, copy and paste
# EditMenu.add_command(label = "Cut", command=cut)
# EditMenu.add_command(label = "Copy", command=copy)
# EditMenu.add_command(label = "Paste", command=paste)
# MenuBar.add_cascade(label="Edit", menu = EditMenu)
# Edit Menu Ends
# Help Menu Starts
HelpMenu = Menu(MenuBar, tearoff=0)
HelpMenu.add_command(label = "About Notepad", command=about)
MenuBar.add_cascade(label="Help", menu=HelpMenu)
# Help Menu Ends
root.config(menu=MenuBar)
#Adding Scrollbar using rules from Tkinter lecture no 22
Scroll = Scrollbar(TextArea)
Scroll.pack(side=RIGHT, fill=Y)
Scroll.config(command=TextArea.yview)
TextArea.config(yscrollcommand=Scroll.set)
root.mainloop() | [
"yourcodinghub.py@gmail.com"
] | yourcodinghub.py@gmail.com |
b65850d93e8982cdcf8dc8c853dcf41ed1856bb7 | 1c6c77814f8daddc9e6adc25345d843a22449db2 | /server/etl/poi_convert_label.py | 385a71030d96f12d9f198a27554d04a629b660b6 | [] | no_license | D2KLab/CityMUS | c8ee6badf5bbe5c3f9084538aa27bda27eaa592d | d05ebc31884ae93bb27cd49002e97b169e56d9b3 | refs/heads/master | 2021-03-27T14:51:41.846994 | 2017-10-20T14:28:44 | 2017-10-20T14:28:44 | 86,473,203 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 209 | py | with open('../data/dbpedia_match_nogeo_distinct.csv','r') as input_fp:
reader=csv.reader(input_fp,)
# skip header
reader.next()
rows = [ [unicode(col,'utf-8') for col in row] for row in reader] | [
"ellena.fabio@gmail.com"
] | ellena.fabio@gmail.com |
93a02d466eccb5a9c95be3e2c4321267f16fdf80 | a301b8d9875c58df665d373b856fdeec4c12ea97 | /webhook_w.py | 30a8720b02772ed8d29646da15f99e758765d5af | [] | no_license | Bhawna5/Weather_chatbot_using_dialogflow | 1bc6c9170a838b795f783b10df1a9f0a1f3c247c | 38dbbd3fae6b23dcb74fca6938601a2b311a992d | refs/heads/master | 2022-04-17T21:50:08.379340 | 2020-04-17T09:12:52 | 2020-04-17T09:12:52 | 256,455,399 | 0 | 0 | null | 2020-04-17T09:12:40 | 2020-04-17T09:09:38 | Python | UTF-8 | Python | false | false | 1,837 | py | import json
#to convert list and dictionary to json
import os
import requests
from flask import Flask #it is microframework to develop a web app
from flask import request
from flask import make_response
#Falsk app for our web app
app=Flask(__name__)
# app route decorator. when webhook is called, the decorator would call the functions which are e defined
@app.route('/webhook', methods=['POST'])
def webhook():
# convert the data from json.
req=request.get_json(silent=True, force=True)
print(json.dumps(req, indent=4))
#extract the relevant information and use api and get the response and send it dialogflow. #helper function
res=makeResponse(req)
res=json.dumps(res, indent=4)
r=make_response(res)
r.headers['Content-Type']='application/json'
return r
# extract parameter values, query weahter api, construct the resposne
def makeResponse(req):
result=req.get("queryResult")
parameters = result.get("parameters")
city = parameters.get("geo-city")
date = parameters.get("date")
r = requests.get('http://api.openweathermap.org/data/2.5/forecast?q=hyderabad,in&appid=db91df44baf43361cbf73026ce5156cb')
json_object = r.json()
weather = json_object['list']
# for i in range(0,40): # if date in weather[i]['dt_txt']: # condition=weather[i]['weather'][0]['description']
condition=weather[0]['weather'][0]['description']
speech="The forecast for "+city+ " for "+date+" is "+ condition
return{
"fulfillmentMessages": [
{
"text": {
"text": speech } }]}
#return {
# "speech": speech, # "displayText":speech, # "source":"apiai-weather-webhook"}
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print("starting on port %d" % port)
app.run(debug=False, port=port, host='0.0.0.0') | [
"noreply@github.com"
] | noreply@github.com |
dea600a78ef3e1a87be92720c1bd383d6be60f6a | 8aff692ba932a52d550aa9302e5b8062778e8faf | /frontpage/migrations/0002_auto__add_contact.py | ba5aabb01fa83ca16a9e90fbf1082e6d760cf70f | [] | no_license | rdthree/addition_interiors_project | 3d1c4164816e26a284648ee26e585c7d31398d0a | 2485b71ac9a8894fa59ac2b8b71db3c1d7d3d167 | refs/heads/master | 2022-02-09T19:35:51.499562 | 2014-03-30T03:31:33 | 2014-03-30T03:31:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,645 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Contact'
db.create_table('frontpage_contact', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('first_name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('last_name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75)),
))
db.send_create_signal('frontpage', ['Contact'])
def backwards(self, orm):
# Deleting model 'Contact'
db.delete_table('frontpage_contact')
models = {
'frontpage.contact': {
'Meta': {'object_name': 'Contact'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'frontpage.feature': {
'Meta': {'object_name': 'Feature'},
'feature_image': ('django.db.models.fields.files.ImageField', [], {'null': 'True', 'blank': 'True', 'max_length': '100'}),
'feature_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True', 'default': '1'}),
'feature_subtitle': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'feature_text': ('django.db.models.fields.TextField', [], {'max_length': '300'}),
'feature_title': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'frontpage.marketing': {
'Meta': {'object_name': 'Marketing'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'marketing_image': ('django.db.models.fields.files.ImageField', [], {'null': 'True', 'blank': 'True', 'max_length': '100'}),
'marketing_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True', 'default': '1'}),
'marketing_text': ('django.db.models.fields.TextField', [], {'max_length': '300'}),
'marketing_title': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'frontpage.slider': {
'Meta': {'object_name': 'Slider'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slider_image': ('django.db.models.fields.files.ImageField', [], {'null': 'True', 'blank': 'True', 'max_length': '100'}),
'slider_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True', 'default': '1'}),
'slider_text': ('django.db.models.fields.TextField', [], {'max_length': '200'}),
'slider_title': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'frontpage.title': {
'Meta': {'object_name': 'Title'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'default': "'Addition Interiors'", 'max_length': '20'})
}
}
complete_apps = ['frontpage'] | [
"delgado.raymond@gmail.com"
] | delgado.raymond@gmail.com |
9ca724797bafc303ea2dc8dea965d71c1463a0e6 | 2f7e84a0ff1b6b3d7e0081d56afe635494a1582f | /Deno/人脸识别/test222.py | e41671956b645d6473fdf934345eeba629b2ceec | [] | no_license | wangbiao0912/TensorFlowLearn | eb978cd9e71d9985e33cff4c7df40438d462932f | 917fa6ecf761955c162c9016b9212be12bfbb77f | refs/heads/master | 2022-12-02T00:07:07.216572 | 2020-06-15T03:49:16 | 2020-06-15T03:49:16 | 232,243,666 | 0 | 0 | null | 2022-11-22T04:42:15 | 2020-01-07T04:40:19 | Python | UTF-8 | Python | false | false | 1,119 | py | #coding:utf-8
import cv2
import sys
from PIL import Image
def CatchUsbVideo(window_name, camera_idx):
cv2.namedWindow(window_name)#该方法是写入打开时视频框的名称
# 捕捉摄像头
cap = cv2.VideoCapture(camera_idx)#camera_idx 的参数是0代表是打开笔记本的内置摄像头,也可以写上自己录制的视频路径
while cap.isOpened():#判断摄像头是否打开,打开的话就是返回的是True
ok, frame = cap.read()#读取一帧数据,该方法返回两个参数,第一个参数是布尔值,frame就是每一帧的图像,是个三维矩阵,当输入的是一个是视频文件,读完ok==flase
if not ok:#如果读取帧数不是正确的则ok就是Flase则该语句就会执行
break
# 显示图像
cv2.imshow(window_name, frame)#该方法就是现实该图像
c = cv2.waitKey(10)
if c & 0xFF == ord('q'):#q退出视频
break
# 释放摄像头并销毁所有窗口
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
CatchUsbVideo("FaceRect", 0) | [
"wangbiao1012@gmail.com"
] | wangbiao1012@gmail.com |
2059a70b6d77faa17807325f41adf8b91a280812 | e50d1af38ee1ba8aeadc516bf9b44a145d92c724 | /adminrestrict/__init__.py | 8560e39eed2186d7a9b39c7f2b688b83a814f669 | [
"MIT"
] | permissive | robromano/django-adminrestrict | 540435388634eab30f6ac9668efa20d47e6e1919 | d7a01bcaf2a6cf9eaeb4f0e5ee09caf58c1ff291 | refs/heads/master | 2023-03-10T21:38:32.244402 | 2023-02-27T01:00:30 | 2023-02-27T01:00:30 | 24,990,752 | 36 | 26 | MIT | 2022-04-09T02:10:15 | 2014-10-09T13:39:39 | Python | UTF-8 | Python | false | false | 190 | py | try:
__version__ = __import__('pkg_resources').get_distribution(
'django-adminrestrict'
).version
except:
__version__ = '3.0'
def get_version():
return __version__
| [
"rromano@gmail.com"
] | rromano@gmail.com |
73d3d2feb587623fc39b8bc79371c17cf3d71e0d | ee230bfae781bdd2a75a6cd196f2651f82739d27 | /debang/download_excel/migrations/0003_auto_20180623_0935.py | dca4aa34e7861dce4bbd7e44d915e78df3009f11 | [] | no_license | Catchoo/mianshi | 5974783d769216516753f72859a6fd5238723c23 | 0218bda64291a6e5a78bb3b582138ab81d05e2f7 | refs/heads/master | 2020-03-21T10:36:34.723853 | 2018-06-25T10:07:55 | 2018-06-25T10:07:55 | 138,461,070 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 873 | py | # Generated by Django 2.0 on 2018-06-23 09:35
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('download_excel', '0002_auto_20180623_0934'),
]
operations = [
migrations.RemoveIndex(
model_name='orgnz',
name='type_idx',
),
migrations.AlterField(
model_name='people',
name='uuid',
field=models.UUIDField(default=uuid.UUID('ab9a452b-eb53-4dbf-afb2-d853744d3440'), editable=False, primary_key=True, serialize=False),
),
migrations.AddIndex(
model_name='orgnz',
index=models.Index(fields=['name'], name='name_idx'),
),
migrations.AddIndex(
model_name='people',
index=models.Index(fields=['ID'], name='ID_idx'),
),
]
| [
"cqa6688@126.com"
] | cqa6688@126.com |
eafff8475c96f2aa68ef7488e958c8a6291ef035 | ca04f1e28524aab7d779e08609b635b7d171790b | /apps/my_app/models.py | 4a607c94acc07a1b115a08f6fc52001123b55896 | [] | no_license | herimiguel/userName | 19cd337024e25619cdb7238ca195784967b83108 | 7a70d72ce88b2b0d66503b0eeacb0ed392f232fd | refs/heads/master | 2020-03-19T14:34:13.846543 | 2018-06-06T14:54:06 | 2018-06-06T14:54:06 | 136,344,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 270 | py | from __future__ import unicode_literals
from django.db import models
# Create your models here.
class User(models.Model):
user_name = models.CharField(max_length=101)
created_at = models.DateField(auto_now_add=True)
updated_at = models.DateField(auto_now=True)
| [
"herimiguel84@hotmail.com"
] | herimiguel84@hotmail.com |
9e6704d523c243109abeb3ad83e91af8302e9388 | c82fa8b3a408dd8007cc185be78a9a5e6ac496bc | /app/lab03/src/mean_shift.py | 216e1ec381d2600db4b5a3f37afc55cbf622b22c | [] | no_license | ssjf409/CV | fbaa78f02f5e09b89858ee69a36db4f5a54bb893 | 4986749585d0cc5612cfe6b45da0af1ae4a97e7d | refs/heads/master | 2022-10-05T08:00:00.808495 | 2020-06-07T07:00:18 | 2020-06-07T07:00:18 | 256,041,482 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 924 | py | import cv2
import numpy as np
image = cv2.imread('../data/fruits.png')
shifted = cv2.pyrMeanShiftFiltering(image, 30, 30)
shifted_list=shifted.tolist()
height, width, channels = image.shape
centers=[]
for m in range(0, height):
#print(m, height)
for n in range(0, width):
if len(centers)==0:
centers.append(shifted_list[m][n])
continue
if shifted_list[m][n] in centers:
continue
centers.append(shifted_list[m][n])
print(len(centers))
random_color=np.random.randint(0, 256, [len(centers), 3], np.uint8)
res_img=np.zeros(image.shape, np.uint8)
for m in range(0, height):
for n in range(0, width):
k=centers.index(shifted_list[m][n])
res_img[m,n,:]=random_color[k,:]
cv2.imshow("Input", image)
cv2.imshow("Mean-shifted", shifted)
cv2.imshow("Random colored", res_img)
cv2.waitKey()
cv2.destroyAllWindows() | [
"ssjf409@naver.com"
] | ssjf409@naver.com |
040780f0a66c35d9feada04c693a6b39fc7f7acc | 70f564990215f47b139a777826f211477e9b44f6 | /plan2vec_experiments/analysis_icml_2020/local_metric_img_visualization.py | 832991f25694a02b928ea7dc8ae3ba5d41a0fb3b | [] | no_license | geyang/plan2vec | de87f2d77732c4aacdefd00067ebebacb7cd763f | aeeb50aed3d7da4c266b4ca163e96d4c0747e3c1 | refs/heads/master | 2022-11-16T03:40:42.638239 | 2022-10-28T04:01:29 | 2022-10-28T04:01:29 | 261,273,420 | 65 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,107 | py | from plan2vec.plotting.maze_world.connect_the_dots_image_maze import Args, main
if __name__ == "__main__":
import jaynes
from plan2vec_experiments import instr, RUN, config_charts, dir_prefix
from os.path import join as pJoin, dirname, normpath
from ml_logger import logger
logger.configure(log_directory=RUN.server, register_experiment=False)
# glob_root = dir_prefix()
# glob_root = "/geyang/plan2vec/2019/12-16/analysis/local-metric-analysis/all_local_metric"
# glob_root = "/geyang/plan2vec/2020/02-08/neo_plan2vec/uvpn_image/quick_eval_new_local_metric/local_metric/10.50"
glob_root = "/geyang/plan2vec/2020/02-08/neo_plan2vec/uvpn_image/quick_eval_new_local_metric/local_metric/hige_loss/lr-sweep/12.24"
kwargs = []
with logger.PrefixContext(glob_root):
# note: rope uses {}-{} as postfix. maze do not.
weight_paths = logger.glob("**/models/**/f_lm.pkl")
logger.print('found these experiments')
logger.print(*weight_paths, sep="\n")
for p in weight_paths:
parameter_path = normpath(pJoin(dirname(p), '..', '..', 'parameters.pkl'))
env_id, local_metric, latent_dim = \
logger.get_parameters(
'Args.env_id', 'Args.local_metric', 'Args.latent_dim',
path=parameter_path, default=None)
logger.abspath(p)
kwargs.append(dict(env_id=env_id, load_local_metric=logger.abspath(p),
local_metric=local_metric, latent_dim=latent_dim))
jaynes.config()
for _ in kwargs:
jaynes.run(instr(main, n_rollouts=100, **_))
config_charts("""
charts:
- type: file
glob: "**/*render.png"
- type: file
glob: "**/*data.png"
- type: file
glob: "**/*connected.png"
- type: file
glob: "**/*gt.png"
- type: file
glob: "**/*gt_wider.png"
keys:
- run.status
- Args.env_id
- Args.load_local_metric
""")
jaynes.listen()
| [
"yangge1987@gmail.com"
] | yangge1987@gmail.com |
d1a57342b0da108df57c528d3f223c683ec0d1e0 | a2537157a9c3b8abc15a03028b29ee83c1fe25a1 | /handlers/conversation.py | 61b4d432ddf8f61525606cfb49ec389007f25f4d | [] | no_license | DahaWong/useless-ideas-bot | bd0fbca1699e4c90c190e56b278b06d4572968a0 | 684c380dfb95220409307ab2161faec605532ab7 | refs/heads/master | 2023-02-26T14:31:54.238539 | 2021-01-31T02:25:53 | 2021-01-31T02:25:53 | 291,990,131 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 341 | py | from telegram.ext import ConversationHandler, CallbackQueryHandler
from handlers.command import start_handler, generate_handler
GENERATE, = range(1)
conversation_handler = ConversationHandler(
entry_points=[start_handler],
states={
GENERATE: [generate_handler]
},
fallbacks=[start_handler],
allow_reentry=True
) | [
"dahawong@gmail.com"
] | dahawong@gmail.com |
9b05492d6031696c5b869d6d6436dfe722f80431 | 24dce477772aecb28062d032bc77feb9f0d927da | /python/csv-numpy.py | 3db6b101e90a1eedcda1471b69fe5bf5f3b0bf4d | [] | no_license | silviud/toolbox | cf38b698db18b6a5c8c19d7420ddb221ce6aa5a8 | 58ac66ed49e9f3d7e6b1621118bd7be320e82f16 | refs/heads/master | 2021-01-18T10:22:16.499211 | 2013-07-22T08:53:19 | 2013-07-22T08:53:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,549 | py | #!/usr/bin/env python
"""
Convert CSV Data into Numpy and sort n report
"""
import getopt
import math
import random
import datetime
import numpy as np
import re
import sys
import csv
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.mlab as mlab
import matplotlib.cbook as cbook
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.ticker import EngFormatter
from matplotlib.dates import AutoDateFormatter, AutoDateLocator
number = 21;
def readData():
print("Appdynamics Worst Performers")
csvfile = file('output.txt')
r = matplotlib.mlab.csv2rec(csvfile, comments='#', skiprows=0, checkrows=0, delimiter=',', converterd=None, names=None, missing='', missingd=None, use_mrecords=False)
return r
if __name__ == "__main__":
'''
dtype=[('id', '<i8'), ('name', '|S55'), ('original_name', '|S55'), ('service_levels', '|S8'), ('end_user_time_ms', '|O8'), ('page_render_time_ms', '|O8'), ('network_time_ms', '|O8'), ('server_time_ms', '<i8'), ('max_server_time_ms', '<i8'), ('min_server_time_ms', '<i8'), ('calls', '|S10'), ('calls__min', '|S5'), ('errors', '|S7'), ('error_', '<f8'), ('slow_requests', '|S6'), ('very_slow_requests', '|S7'), ('stalled_requests', '|S6'), ('cpu_used_ms', '|O8'), ('block_time_ms', '|O8'), ('wait_time_ms', '|O8'), ('tier', '|S17'), ('type', '|S11')])
'''
print("Starting")
r=readData()
nsorted = np.lexsort((r.calls, r.slow_requests, r.very_slow_requests, r.stalled_requests))
print
print("Worst by Slow and Very Slow reqs")
print("================================")
x = 1
while x < number:
t = list(r[nsorted[nsorted.size-x]])
print('Number %s: Transaction: %s Tier %s with Slow count of %s and Very slow count of %s and Stall count of %s out of %s Calls' % (x,t[1], t[20], t[14], t[15], t[16], t[10]))
x=x+1
nsorted = np.lexsort((r.calls,r.server_time_ms))
print
print("Worst by Server Time")
print("================================")
x = 1
while x < number:
t = list(r[nsorted[nsorted.size-x]])
print('Number %s: Transaction: %s Tier %s with Server Time of %s' % (x,t[1], t[20], t[7]))
x=x+1
nsorted = np.lexsort((r.calls,r.errors,r.error_))
print
print("Highest Error Percentage Rate")
print("================================")
x = 1
while x < number:
t = list(r[nsorted[nsorted.size-x]])
print('Number %s: Transaction: %s Tier %s with Error Rate of %s Percent out of %s transactions' % (x,t[1], t[20], t[13], t[10]))
x=x+1
| [
"unixunion@gmail.com"
] | unixunion@gmail.com |
a41438e4acb444093674b2f0a12897ce09c822f2 | 550c29e5eb1b767e1ba1d67970003ae60fe55038 | /docs/conf.py | aeb57f99fcc63784e75db695d17393fef961659d | [
"MIT"
] | permissive | Neraste/black | c5945e37a3dd90b6ab739e780b5f94fd57c586ce | 9d735d240bb5dee1ef5285883acfa69cddbcaa47 | refs/heads/master | 2020-03-20T00:51:26.343434 | 2018-06-12T12:43:22 | 2018-06-12T12:43:22 | 137,058,114 | 0 | 0 | MIT | 2018-06-12T10:48:43 | 2018-06-12T10:48:43 | null | UTF-8 | Python | false | false | 8,378 | py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import ast
from pathlib import Path
import re
import shutil
import string
from recommonmark.parser import CommonMarkParser
CURRENT_DIR = Path(__file__).parent
def get_version():
black_py = CURRENT_DIR / ".." / "black.py"
_version_re = re.compile(r"__version__\s+=\s+(?P<version>.*)")
with open(str(black_py), "r", encoding="utf8") as f:
version = _version_re.search(f.read()).group("version")
return str(ast.literal_eval(version))
def make_pypi_svg(version):
template = CURRENT_DIR / "_static" / "pypi_template.svg"
target = CURRENT_DIR / "_static" / "pypi.svg"
with open(str(template), "r", encoding="utf8") as f:
svg = string.Template(f.read()).substitute(version=version)
with open(str(target), "w", encoding="utf8") as f:
f.write(svg)
def make_filename(line):
non_letters = re.compile(r"[^a-z]+")
filename = line[3:].rstrip().lower()
filename = non_letters.sub("_", filename)
if filename.startswith("_"):
filename = filename[1:]
if filename.endswith("_"):
filename = filename[:-1]
return filename + ".md"
def generate_sections_from_readme():
target_dir = CURRENT_DIR / "_build" / "generated"
readme = CURRENT_DIR / ".." / "README.md"
shutil.rmtree(str(target_dir), ignore_errors=True)
target_dir.mkdir(parents=True)
output = None
target_dir = target_dir.relative_to(CURRENT_DIR)
with open(str(readme), "r", encoding="utf8") as f:
for line in f:
if line.startswith("## "):
if output is not None:
output.close()
filename = make_filename(line)
output_path = CURRENT_DIR / filename
if output_path.is_symlink() or output_path.is_file():
output_path.unlink()
output_path.symlink_to(target_dir / filename)
output = open(str(output_path), "w", encoding="utf8")
output.write(
"[//]: # (NOTE: THIS FILE IS AUTOGENERATED FROM README.md)\n\n"
)
if output is None:
continue
if line.startswith("##"):
line = line[1:]
output.write(line)
# -- Project information -----------------------------------------------------
project = "Black"
copyright = "2018, Łukasz Langa and contributors to Black"
author = "Łukasz Langa and contributors to Black"
# Autopopulate version
# The full version, including alpha/beta/rc tags.
release = get_version()
# The short X.Y version.
version = release
for sp in "abcfr":
version = version.split(sp)[0]
make_pypi_svg(release)
generate_sections_from_readme()
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.intersphinx", "sphinx.ext.napoleon"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
source_parsers = {".md": CommonMarkParser}
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = [".rst", ".md"]
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "alabaster"
html_sidebars = {
"**": [
"about.html",
"navigation.html",
"relations.html",
"sourcelink.html",
"searchbox.html",
]
}
html_theme_options = {
"show_related": False,
"description": "“Any color you like.”",
"github_button": True,
"github_user": "ambv",
"github_repo": "black",
"github_type": "star",
"show_powered_by": True,
"fixed_sidebar": True,
"logo": "logo2.png",
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "blackdoc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"black.tex",
"Documentation for Black",
"Łukasz Langa and contributors to Black",
"manual",
)
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "black", "Documentation for Black", [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"Black",
"Documentation for Black",
author,
"Black",
"The uncompromising Python code formatter",
"Miscellaneous",
)
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ["search.html"]
# -- Extension configuration -------------------------------------------------
autodoc_member_order = "bysource"
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {"https://docs.python.org/3/": None}
| [
"lukasz@langa.pl"
] | lukasz@langa.pl |
e35dfc2de400e639f2ab648d7437d3a339f804b0 | 09557f76830b73d95ac1c7175833bc74280db53b | /dmb/visualization/stereo/sparsification_plot.py | 92fbf793bd714d1485fba0f80dc0a93520068bc8 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | DeepMotionAIResearch/DenseMatchingBenchmark | e32c6c0846dcb4103289d539be28ef7382b70c8e | 010aeb66e3ceaf3d866036b0ca751861df39432d | refs/heads/master | 2021-11-11T18:56:05.160934 | 2021-11-08T10:28:47 | 2021-11-08T10:28:47 | 222,069,511 | 183 | 39 | null | null | null | null | UTF-8 | Python | false | false | 5,591 | py | import warnings
import numpy as np
import torch
def mask_to_neg(x, mask):
# if mask=1, keep x, if mask=0, convert x to -1
x = x * mask + (mask - 1)
return x
def norm(x):
x = x / (x.max() - x.min())
# scale x to [0.05, 0.9] for counting convenient, it doesn't influence the final result
x = x * 0.9 + 0.05
return x
def sparsification_plot(est_disp=None, gt_disp=None, est_conf=None, bins=10, lb=None, ub=None):
"""
Refer to paper: Uncertainty estimates and multi-hypotheses networks for optical flow
Args:
est_disp (Tensor): in (..., Height, Width) layout
gt_disp (Tensor): in (..., Height, Width) layout
est_conf (Tensor): in (..., Height, Width) layout, we will normalize it to [0,1] for convenient
bins (int): divide the all pixel into $bins factions, ie each fraction is (100/bins)%
lb (scaler): the lower bound of disparity you want to mask out
ub (scaler): the upper bound of disparity you want to mask out
Output:
dict: the average error epe when pixels with the lowest confidence are removed gradually
ideally, the error should monotonically decrease
"""
assert isinstance(bins, int) and (100 % bins == 0), \
"bins must be divided by 100, and should be int, but get {} is type {}".format(bins, type(bins))
error_dict = {}
percentages = []
part = 100 // bins
for i in range(bins + 1):
percentages.append(part * i)
error_dict['est_{}'.format(part * i)] = torch.Tensor([0.])
error_dict['oracle_{}'.format(part * i)] = torch.Tensor([0.])
error_dict['random_{}'.format(part * i)] = torch.Tensor([0.])
err_msg = '{} is supposed to be torch.Tensor; find {}'
if not isinstance(est_disp, torch.Tensor):
warnings.warn(err_msg.format('Estimated disparity map', type(est_disp)))
if not isinstance(gt_disp, torch.Tensor):
warnings.warn(err_msg.format('Ground truth disparity map', type(gt_disp)))
if not isinstance(est_conf, torch.Tensor):
warnings.warn(err_msg.format('Estimated confidence map', type(est_conf)))
if any([not isinstance(est_disp, torch.Tensor), not isinstance(gt_disp, torch.Tensor),
not isinstance(est_conf, torch.Tensor)]):
warnings.warn('Input maps contains None, expected given torch.Tensor')
return error_dict
if not est_disp.shape == gt_disp.shape:
warnings.warn('Estimated and ground truth disparity map should have same shape')
if not est_disp.shape == est_conf.shape:
warnings.warn('Estimated disparity and confidence map should have same shape')
if any([not (est_disp.shape == gt_disp.shape), not (est_disp.shape == est_conf.shape)]):
return error_dict
est_disp = est_disp.clone().cpu()
gt_disp = gt_disp.clone().cpu()
est_conf = est_conf.clone().cpu()
mask = torch.ones(gt_disp.shape, dtype=torch.uint8)
if lb is not None:
mask = mask & (gt_disp > lb)
if ub is not None:
mask = mask & (gt_disp < ub)
mask.detach_()
total_valid_num = mask.sum()
if total_valid_num < bins:
return error_dict
mask = mask.float()
est_disp = est_disp * mask
gt_disp = gt_disp * mask
abs_error = torch.abs(gt_disp - est_disp)
# normalize confidence map and error map
est_conf = norm(est_conf)
# error is lower the better, but confidence is bigger the better
neg_norm_abs_error = 1.0 - norm(abs_error)
# random remove map
randRemove = torch.rand_like(est_conf)
randRemove = norm(randRemove)
# let invalid pixels to -1
neg_norm_abs_error = mask_to_neg(neg_norm_abs_error, mask)
est_conf = mask_to_neg(est_conf, mask)
randRemove = mask_to_neg(randRemove, mask)
# flatten
flat_neg_norm_abs_error, _ = neg_norm_abs_error.view(-1).sort()
flat_est_conf, _ = est_conf.view(-1).sort()
flat_randRemove, _ = randRemove.view(-1).sort()
assert (flat_neg_norm_abs_error <= 0).sum() == (flat_est_conf <= 0).sum(), \
'The number of invalid confidence and disparity should be the same'
assert (flat_neg_norm_abs_error <= 0).sum() == (flat_randRemove <= 0).sum(), \
'The number of invalid random map and disparity should be the same'
start_pointer = (flat_neg_norm_abs_error <= 0).sum()
part = (total_valid_num - start_pointer - 1) // bins
pointer_edges = [start_pointer + part * i for i in range(bins + 1)]
conf_edges = []
error_edges = []
rand_edges = []
for pointer in pointer_edges:
conf_edges.append(flat_est_conf[pointer])
error_edges.append(flat_neg_norm_abs_error[pointer])
rand_edges.append(flat_randRemove[pointer])
for i in range(bins):
# kick out the lowest percentages[i]% confidence pixels, and evaluate the left
conf_mask = (est_conf >= conf_edges[i]).float()
# kick out the biggest percentages[i]% error pixels, and evaluate the left
# absolute error is lower is better, it's different from confidence value
error_mask = (neg_norm_abs_error >= error_edges[i]).float()
# kick out percentages[i]% random generated value
rand_mask = (randRemove >= rand_edges[i]).float()
error_dict['est_{}'.format(percentages[i])] = (abs_error * conf_mask).sum() / (conf_mask.sum())
error_dict['oracle_{}'.format(percentages[i])] = (abs_error * error_mask).sum() / (error_mask.sum())
error_dict['random_{}'.format(percentages[i])] = (abs_error * rand_mask).sum() / (rand_mask.sum())
return error_dict
| [
"chenyimin@workspace.chenyimin.ws2.wh-a.brainpp.cn"
] | chenyimin@workspace.chenyimin.ws2.wh-a.brainpp.cn |
b4c2459220e2b79500aed69990e4f1148cd6a89d | 1c7622f563fac18d9644e883742b6f539665aa26 | /pytradfri/device/signal_repeater.py | 02c70212591a795e6a7a67c5773590a5480e3ee6 | [
"MIT"
] | permissive | home-assistant-libs/pytradfri | 3708d65edb236c3b8f430cdf59f45b1d4849352f | 0d7bfbfc63670c050570c96bf3f4a1b20c8091a3 | refs/heads/master | 2023-08-29T08:44:07.477619 | 2023-08-28T06:10:00 | 2023-08-28T06:10:00 | 87,844,301 | 233 | 44 | MIT | 2023-09-13T02:00:41 | 2017-04-10T18:30:17 | Python | UTF-8 | Python | false | false | 910 | py | """Represent a signal repeater."""
from __future__ import annotations
from typing import TYPE_CHECKING
from ..resource import BaseResponse
if TYPE_CHECKING:
from . import Device
class SignalRepeaterResponse(BaseResponse):
"""Represent API response for a signal repeater."""
class SignalRepeater:
"""Represent a signal repeater."""
def __init__(self, device: Device, index: int) -> None:
"""Create object of class."""
self.device = device
self.index = index
@property
def id(self) -> int:
"""Return ID."""
return self.raw.id
@property
def raw(self) -> SignalRepeaterResponse:
"""Return raw data that it represents."""
signal_repeater_control_response = self.device.raw.signal_repeater_control
assert signal_repeater_control_response is not None
return signal_repeater_control_response[self.index]
| [
"noreply@github.com"
] | noreply@github.com |
e943f0e0c4be7056384cd373264a7855ac591e68 | 0b7dccbf82937fbcf63ac581c2f3f423a2741d81 | /ordering/tasks.py | d40cd5c6923e22d3440ccdd42fb675c871460e1e | [] | no_license | SepehrHasanabadi/coffeeshop | 457a45dc09db589bd6075eba8520fc10ccbad3e7 | 2150d2aa6cb21ba65c081c9d4ce0cce63815b6fa | refs/heads/master | 2023-07-26T14:30:20.104631 | 2021-09-08T11:51:25 | 2021-09-08T11:51:25 | 403,892,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | from django.core.mail import send_mail
from coffeeshop.celery import app
@app.task
def send_modified_status_mail(recipient_list):
send_mail('سفارش کافی شاپ', 'وضعیت سفارش شما تغییر کرد', 'info@coffeeshop.ir', recipient_list)
@app.task
def send_cancel_order_mail(recipient_list):
send_mail('سفارش کافی شاپ', 'سفارش شما لغو گردید', 'info@coffeeshop.ir', recipient_list)
| [
"mse.hasanabadi@gmail.com"
] | mse.hasanabadi@gmail.com |
06c80d20e3c96cfbb7552b5b1b2dad8302bbabc8 | 1e83c4f6c8cb701e43d5a3b9c9b09c84d37b314f | /predictor_app.py | c3bff58d1b66b136d17b04b5039469f0250225d3 | [] | no_license | VinitSR7/Toxic_Comment_Classifier-concetto-19- | 916effdbc8c2af2a10996782e752499eaea0b6b1 | e9917c1801b79722c0a7fbbf0d0af4182b728db6 | refs/heads/master | 2020-08-22T03:18:52.663640 | 2019-10-20T04:50:58 | 2019-10-20T04:50:58 | 216,306,589 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,269 | py | import flask
from flask import request
from predictor_api import make_prediction
from flask import jsonify, Flask, render_template
app = flask.Flask(__name__)
@app.route("/", methods=["POST"])
def print_piped():
if request.form['mes']:
msg = request.form['mes']
print(msg)
x_input, predictions = make_prediction(str(msg))
flask.render_template('predictor.html',
chat_in=x_input,
prediction=predictions)
return jsonify(predictions)
@app.route("/", methods=["GET"])
def predict():
print(request.args)
if(request.args):
x_input, predictions = make_prediction(request.args['chat_in'])
print(x_input)
return flask.render_template('predictor.html',
chat_in=x_input,
prediction=predictions)
else:
x_input, predictions = make_prediction('')
return flask.render_template('predictor.html',
chat_in=x_input,
prediction=predictions)
@app.route('/about')
def about():
return render_template('about.html')
if __name__=="__main__":
app.run(debug=True)
app.run()
| [
"noreply@github.com"
] | noreply@github.com |
08d5d7ad3f9177e18cc9fa1c9e216a49931c65f5 | a3a71273d5968079976a944b19000052c9db94c1 | /data_normalization/new_venv/Scripts/easy_install-3.7-script.py | abeeaf2247b0d3bd85a8ad6015c993aabcbd4f79 | [] | no_license | MichaelX9/1p-19q-Mutation-Classifier | 3f82d70aad4cf7603a71388c84bbee2d21f82e00 | 88e72b979c8fc0bccb264ca905b9db62d3d89b20 | refs/heads/main | 2023-03-17T22:32:35.267536 | 2021-03-18T21:14:32 | 2021-03-18T21:14:32 | 319,150,876 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 491 | py | #!C:\Users\Michael\Desktop\PyRadiomics\data_normalization\new_venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.7')()
)
| [
"michaelk.xie@mail.utoronto.ca"
] | michaelk.xie@mail.utoronto.ca |
29fabdd37b5eee248069bcbcc7c7fc5826ff0d69 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/bob/1ed1c792986545bca6955b9771a56a39.py | d6cd9ce8061599c2e2f464b8921262b171bc563d | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 327 | py | class Bob:
def __init__(self):
pass
def hey(self, msg):
if msg == None or msg.strip() == '':
return 'Fine. Be that way!'
if str.isupper(msg):
return 'Woah, chill out!'
if msg[-1] == '?':
return 'Sure.'
else:
return 'Whatever.'
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
e638a374509203346c08189d5d65df4d77766da4 | 6d600117e56e654e720fdccea57d111056aed97f | /projeto-do-ifal/venv2/Lib/site-packages/stdimage/validators.py | b609c558950e02b2085763a9d6e53223d0f4c5b5 | [
"BSD-3-Clause"
] | permissive | LaryssaGomes/Hist-ria- | 671b0508294e8b897da95c61f23d39e39393ad99 | 1e58a882157d217d4466f615bf78aeb6a3ec487c | refs/heads/master | 2023-04-27T19:30:59.192874 | 2022-11-22T06:29:53 | 2023-02-10T12:44:28 | 250,071,988 | 5 | 1 | null | 2023-04-21T20:56:30 | 2020-03-25T19:27:43 | Python | UTF-8 | Python | false | false | 1,909 | py | from io import BytesIO
from django.core.exceptions import ValidationError
from django.core.validators import BaseValidator
from django.utils.translation import gettext_lazy as _
from PIL import Image
class BaseSizeValidator(BaseValidator):
"""Base validator that validates the size of an image."""
def compare(self, x):
return True
def __init__(self, width, height):
self.limit_value = width or float('inf'), height or float('inf')
def __call__(self, value):
cleaned = self.clean(value)
if self.compare(cleaned, self.limit_value):
params = {
'width': self.limit_value[0],
'height': self.limit_value[1],
}
raise ValidationError(self.message, code=self.code, params=params)
@staticmethod
def clean(value):
value.seek(0)
stream = BytesIO(value.read())
size = Image.open(stream).size
value.seek(0)
return size
class MaxSizeValidator(BaseSizeValidator):
"""
ImageField validator to validate the max width and height of an image.
You may use None as an infinite boundary.
"""
def compare(self, img_size, max_size):
return img_size[0] > max_size[0] or img_size[1] > max_size[1]
message = _('The image you uploaded is too large.'
' The required maximum resolution is:'
' %(width)sx%(height)s px.')
code = 'max_resolution'
class MinSizeValidator(BaseSizeValidator):
"""
ImageField validator to validate the min width and height of an image.
You may use None as an infinite boundary.
"""
def compare(self, img_size, min_size):
return img_size[0] < min_size[0] or img_size[1] < min_size[1]
message = _('The image you uploaded is too small.'
' The required minimum resolution is:'
' %(width)sx%(height)s px.')
| [
"martinholsousa@gmail.com"
] | martinholsousa@gmail.com |
a5d4d1f27fbf614e337c5372ceecb4b302f9e0e9 | 36968f53367ff0be9e9244033899096d47b3e6b2 | /pegasus/params/ood_params.py | 969d48a2811c6a589871da9aea0c9d10668ce90a | [
"Apache-2.0"
] | permissive | google-research/pegasus | 9cd4dc8b30b135b37569a5314aa0137b923b217c | 1b4929016aba883d2f06fa1a51e343ccdbd631ed | refs/heads/main | 2023-09-01T22:03:20.369615 | 2023-07-13T22:47:11 | 2023-07-20T17:45:20 | 249,885,496 | 1,543 | 335 | Apache-2.0 | 2023-03-24T23:50:47 | 2020-03-25T04:18:20 | Python | UTF-8 | Python | false | false | 3,762 | py | # Copyright 2023 The PEGASUS Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pegasus Params for OOD detection."""
import functools
from pegasus.data import parsers
from pegasus.eval import estimator_metrics
from pegasus.eval import text_eval
from pegasus.models import transformer
from pegasus.ops import public_parsing_ops
from pegasus.params import pegasus_params
from pegasus.params import registry
from tensorflow.contrib import training as contrib_training
@registry.register("ood_pegasus_large")
def ood_pegasus_large_params(param_overrides):
"""Params for OODTransformerEncoderDecoderModel.
Args:
param_overrides: a string, comma separated list of name=value
Returns:
A instance of HParams
"""
hparams = contrib_training.HParams(
train_pattern="",
dev_pattern="",
test_pattern="tfds:xsum-test",
vocab_filename="pegasus/ops/testdata/sp_test.model",
encoder_type="sentencepiece_newline",
length_bucket_size=0,
add_task_id=False,
batch_size=2,
max_input_len=1024,
max_target_len=128,
max_decode_len=128,
hidden_size=1024,
filter_size=4096,
num_heads=16,
num_encoder_layers=16,
num_decoder_layers=16,
beam_size=5,
beam_start=5,
beam_alpha=0.8,
beam_min=0,
beam_max=-1,
temperature=0.0,
top_k=0,
top_p=0.0,
optimizer_name="adafactor",
train_steps=0,
learning_rate=0.0,
label_smoothing=0.1,
dropout=0.1,
eval_max_predictions=1000,
use_bfloat16=False,
model=None,
parser=None,
encoder=None,
estimator_prediction_fn=None,
eval=None,
estimator_eval_metrics_fn=estimator_metrics.gen_eval_metrics_fn,
)
if param_overrides:
hparams.parse(param_overrides)
hparams.parser = functools.partial(
parsers.supervised_strings_parser,
hparams.vocab_filename,
hparams.encoder_type,
hparams.max_input_len,
hparams.max_target_len,
length_bucket_size=hparams.length_bucket_size,
length_bucket_start_id=pegasus_params.LENGTH_BUCKET_START_ID,
length_bucket_max_id=pegasus_params.TASK_START_ID - 1,
add_task_id=hparams.add_task_id,
task_start_id=pegasus_params.TASK_START_ID)
hparams.encoder = public_parsing_ops.create_text_encoder(
hparams.encoder_type, hparams.vocab_filename)
hparams.model = functools.partial(
transformer.OODTransformerEncoderDecoderModel, hparams.encoder.vocab_size,
hparams.hidden_size, hparams.filter_size, hparams.num_heads,
hparams.num_encoder_layers, hparams.num_decoder_layers,
hparams.label_smoothing, hparams.dropout)
beam_keys = ("beam_start", "beam_alpha", "beam_min", "beam_max",
"temperature", "top_k", "top_p")
beam_kwargs = {k: hparams.get(k) for k in beam_keys if k in hparams.values()}
def decode_fn(features):
return hparams.model().predict(features, hparams.max_decode_len,
hparams.beam_size, **beam_kwargs)
hparams.estimator_prediction_fn = decode_fn
hparams.eval = functools.partial(
text_eval.text_eval,
hparams.encoder,
num_reserved=pegasus_params.NUM_RESERVED_TOKENS)
return hparams
| [
"peterjliu@google.com"
] | peterjliu@google.com |
7dfc34a72a923d55c2c32040084ba7e3d11191d6 | deab3b490986e8321efe376e27062eee26d3326e | /bin/mako-render | a968808398c91ce2e8a1dc689c6f4ceded2fdb17 | [] | no_license | germanponce/virtuan_env_python_2_7 | ebc9d35d051c1ff47dd8d14f60cca1e706f4451e | b2244a9938c75d4aed5631f60742f1425a6239e0 | refs/heads/main | 2023-04-15T05:05:00.182025 | 2021-04-24T05:15:05 | 2021-04-24T05:15:05 | 361,075,996 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | #!/opt/odoo10/python/bin/python2.7
# -*- coding: utf-8 -*-
import re
import sys
from mako.cmd import cmdline
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(cmdline())
| [
"german.ponce@argil.mx"
] | german.ponce@argil.mx | |
9710607654ccb1a4e3c8441040fe283486f2ab6d | 5ba69e8ae7ed0f72cb628090b044e9c4b373a48d | /src/generation/testing.py | e0f7e293788fefe7c8f99ba34cb46fa4eea5e384 | [] | no_license | rmit-s3607050-rei-ito/Algorithms_A1 | 8519da75d0fc1c7272bba69e24d36e4b3aa0dd25 | 58c6f8159b4bd4638213b5f564d35fc3421679b5 | refs/heads/master | 2021-01-01T16:43:44.313206 | 2017-08-30T08:48:55 | 2017-08-30T08:48:55 | 97,812,992 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,019 | py | #
# Script to perform automated testing for assignment 1 of AA, 2017.
#
# The provided Python script will be the same one used to test your implementation.
# We will be testing your code on the core teaching servers (titan, jupiter etc), so please try your code there.
# The script first compiles your Java code, runs one of the five implementations then runs a series of test.
# Each test consists of sequence of operations to execute, whose results will be saved to file, then compared against
# the expected output. If output from the tested implementation is the same as expected (script is tolerant for
# some formatting differences, but please try to stick to space separated output), then we pass that test.
# Otherwise, difference will be printed via 'diff' (if in verbose mode, see below).
#
# Usage, assuming you are in the directory where the test script " assign1TestScript.py" is located.
#
# > python assign1TestScript.py [-v] <codeDirectory> <name of implementation to test> <list of input files to test on>
#
#options:
#
# -v : verbose mode
#
#Input:
#
# code directory : directory where the Java files reside. E.g., if directory specified is Assign1-s1234,
# then Assign1-s1234/MultisetTester.java should exist. This is also where the script
# expects your program to be compiled and created in, e.g., Assign2-s1234/MultisetTester.class.
# name of implementation to test: This is the name of the implemention to test. The names
# should be the same as specified in the script or in MultisetTest.java
# input files: these are the input files, where each file is a list of commands to execute.
# IMPORTANT, the expected output file for the print operation must be in the same directory
# as the input files, and the should have the same basename - e.g., if we have input operation
# file of "test1.in", then we should have expected file "test1.exp". Similarly, the
# expected output file for the search operation must also be in the same directory and have
# the same basename - e.g., using the same example, if input file is "test1.in", then the
# expected file name for search results is "test1.search.exp"
#
#
# As an example, I can run the code like this when testing code directory "Assign1-s1234",
# all my input and expected files are located in a directory called "tests"
# and named test1.in, test2.in and testing for hash table implementation:
#
#> python assign1TestScript.py -v Assign1-s1234 hash tests/test1.in tests/test2.in
#
#
#
# Jeffrey Chan & Yongli Ren, 2017
#
import string
import csv
import sets
import getopt
import os
import os.path
import re
import sys
import subprocess as sp
def main():
# process command line arguments
try:
# option list
sOptions = "v"
# get options
optList, remainArgs = getopt.gnu_getopt(sys.argv[1:], sOptions)
except getopt.GetoptError, err:
print >> sys.stderr, str(err)
usage(sys.argv[0])
bVerbose = False
for opt, arg in optList:
if opt == "-v":
bVerbose = True
else:
usage(sys.argv[0])
if len(remainArgs) < 3:
usage(sys.argv[0])
# code directory
sCodeDir = remainArgs[0]
# which implementation to test (see MultiTester.java for the implementation strings)
sImpl = remainArgs[1]
# set of input files that contains the operation commands
lsInFile = remainArgs[2:]
# check implementatoin
setValidImpl = set(["linkedlist", "sortedlinkedlist", "bst", "hash", "baltree"])
if sImpl not in setValidImpl:
print >> sys.stderr, sImpl + " is not a valid implementation name."
sys.exit(1)
# compile the skeleton java files
sCompileCommand = "javac MultisetTester.java Multiset.java LinkedListMultiset.java\
SortedLinkedListMultiset.java BstMultiset.java HashMultiset.java BalTreeMultiset.java"
sExec = "MultisetTester"
# whether executable was compiled and constructed
bCompiled = False
sOrigPath = os.getcwd()
os.chdir(sCodeDir)
# compile
proc = sp.Popen([sCompileCommand], shell=True)
proc.communicate()
# check if executable was constructed
if not os.path.isfile(sExec + ".class"):
print >> sys.stderr, sExec + ".java didn't compile successfully."
else:
bCompiled = True
# variable to store the number of tests passed
passedNum = 0
vTestPassed = [False for x in range(len(lsInFile))]
print ""
if bCompiled:
# loop through each input test file
for (j, sInLoopFile) in enumerate(lsInFile):
sInFile = os.path.join(sOrigPath, sInLoopFile);
sTestName = os.path.splitext(os.path.basename(sInFile))[0]
#sOutputFile = os.path.join(sCodeDir, sTestName + "-" + sImpl + ".out")
sOutputFile = os.path.join(sTestName + "-" + sImpl + ".out")
sSearchOutputFile = os.path.join(sTestName + "-" + sImpl + ".search.out")
sExpectedFile = os.path.splitext(sInFile)[0] + ".exp"
sSearchExpectedFile = os.path.splitext(sInFile)[0] + ".search.exp"
stimeOutputFile = os.path.join(sTestName + "-" + sImpl + ".time.out")
# check if expected files exist
with open(sOutputFile, "w") as fOut:
#sCommand = os.path.join(sCodeDir, sExec + " " + sImpl)
# RUN JAVA COMMAND
sCommand = os.path.join("java " + sExec + " " + sImpl + " /tests/" + sSearchOutputFile + " /tests/" + stimeOutputFile)
# following command used by my dummy code to test possible output (don't replace above)
# lCommand = os.path.join(sCodeDir, sExec + " " + sExpectedFile + ".test")
if bVerbose:
print "Testing: " + sCommand
with open(sInFile, "r") as fIn:
proc = sp.Popen(sCommand, shell=True, stdin=fIn, stdout=fOut, stderr=sp.PIPE)
#proc = sp.Popen(sCommand, shell=True, stdin=sp.PIPE, stdout=sp.PIPE, stderr=sp.PIPE)
#(sStdout, sStderr) = proc.communicate("a hello\np\nq")
(sStdout, sStderr) = proc.communicate()
# change back to original path
os.chdir(sOrigPath)
#######################################################################################################
def evaluate(sExpectedFile, sOutputFile):
"""
Evaluate if the output is the same as expected input for the print operation.s
"""
ltExpMatches = []
ltActMatches = []
sDelimiter = " | "
with open(sExpectedFile, "r") as fExpected:
for sLine in fExpected:
# space delimiter
sLine1 = sLine.strip()
lFields = string.split(sLine1, sDelimiter)
ltExpMatches.append((lFields[0], int(lFields[1])))
with open(sOutputFile, "r") as fOut:
for sLine in fOut:
# space delimiter
sLine1 = sLine.strip()
# if line is empty, we continue (this also takes care of extra newline at end of file)
if len(sLine1) == 0:
continue
# should be space-delimited, but in case submissions use other delimiters
lFields = re.split("[\t ]*[,|\|]?[\t ]*", sLine1)
if len(lFields) != 2:
# less than 2 numbers on line, which is a valid matching if not empty line
return False
else:
try:
ltActMatches.append((lFields[0], int(lFields[1])))
except ValueError:
# one or both of the vertices are not integers
return False
setExpMatches = sets.Set(ltExpMatches)
setActMatches = sets.Set(ltActMatches)
# if there are differences between the sets
if len(setExpMatches.symmetric_difference(setActMatches)) > 0:
return False
# passed
return True
def evaluateSearch(sSearchExpectedFile, sSearchOutputFile):
"""
Evaluate if the output is the same as expected input for searching
"""
with open(sSearchExpectedFile, "r") as fExpected:
with open(sSearchOutputFile, "r") as fOut:
sameParts = set(fExpected).intersection(fOut);
# all lines should be the same
# count number of lines in expected file
lineNum = sum(1 for line in open(sSearchExpectedFile, "r"))
# if there are differences between the sets
if len(sameParts) != lineNum:
return False
# passed
return True
def usage(sProg):
print >> sys.stderr, sProg + "[-v] <code directory> <name of implementation to test> <list of test input files>"
sys.exit(1)
if __name__ == "__main__":
main()
| [
"s3429648@student.rmit.edu.au"
] | s3429648@student.rmit.edu.au |
c7def00acc752309b83cbd2243944b4503ad1289 | 5e08a351e2a4f373917e6f0aecac9341a9a614b5 | /gmail_helper.py | 8f5e63a34273419f269f995042dfa2544ee47fad | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | sheldonrampton/gmail_helper | 52e61297b71bd4b55f31311a2f8debd690c149df | 7af4855eb3467664c125d9f813b225e8cb01a143 | refs/heads/master | 2020-03-28T19:51:10.761499 | 2019-01-19T17:42:20 | 2019-01-19T17:42:20 | 149,015,329 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,363 | py | from __future__ import print_function
from googleapiclient.discovery import build
from httplib2 import Http
from oauth2client import file, client, tools
import pprint
import dateutil.parser as parser
from parse import *
from collections import defaultdict
from apiclient import errors
import re
from email.utils import parseaddr
import json
from shutil import copyfile
import time
import os
import stat
from shellbot_persisters import JsonFilePersister
# If modifying these scopes, delete the file token.json.
SCOPES = 'https://www.googleapis.com/auth/gmail.modify'
main_responses = {}
main_responses['intro'] = """I can do several things:
* Define new email rules based on sender domains (domains)
* Define new email rules based on sender email addresses (addresses)
* Backup rules (backup)
* Apply the rules (apply)
* Set a limit on the number of messages to process (limit)
* Set the number of seconds to cache sender counts (cache)
"""
main_responses['questions'] = "What would you like me to do?"
main_responses['conclusion'] = "OK, done."
class GmailHelper():
"""The GmailHelper object implements defining and apply rules for
managing messages in a Gmail account.
This requires using a token in file token.json with a valid
token key to establish access to a gmail service.
Attributes:
service (object): the gmail service
"""
persist = False
def __init__(self, persisters={}):
"""Initializes the GmailHelper object
"""
if persisters:
self.persist = True
self.config_persister = persisters['config']
self.rules_persister = persisters['rules']
self.cache_persister = persisters['cache']
store = file.Storage('token.json')
creds = store.get()
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets('credentials.json', SCOPES)
creds = tools.run_flow(flow, store)
self.service = build('gmail', 'v1', http=creds.authorize(Http()))
def ask_for_sender_rules(self, full_address=False):
"""Asks the user to specify rules for handling Gmail messages.
"""
messages = self.collect_messages_list()
cache_maxage = int(self.config_persister.get()['cache_maxage'])
age = self.cache_persister.age_in_seconds()
print("The cache is " + str(age) + " seconds old.")
if age > cache_maxage:
self.cache_persister.delete()
print("Deleting cache.")
cache = self.cache_persister.get()
if full_address:
sorted_counts = cache['sorted_address_counts']
else:
sorted_counts = cache['sorted_domain_counts']
if len(sorted_counts) == 0:
sender_counts = defaultdict(int)
count = 1
limit = int(self.config_persister.get()['limit'])
for message in messages:
sender = self.get_from_sender(message, full_address=full_address)
if sender:
sender_counts[sender] += 1
count += 1
if count % 100 == 0:
print(str(count) + " messages inspected.")
if limit > 0 and count > limit:
break
sorted_counts = sorted(sender_counts.iteritems(),
key=lambda (k,v): (v,k),
reverse=True)
if full_address:
cache['sorted_address_counts'] = sorted_counts
else:
cache['sorted_domain_counts'] = sorted_counts
self.cache_persister.set(cache)
print("""For each sender, tell me how you want it handled, as follows:
* Enter [return] if you want it tagged with its sender.
* Enter a word or phrase if you want it tagged with that word or phrase.
* Enter SKIP if you don't want to do anything.
* Enter END if you don't want to do anything with this sender
or any subsequent senders in the list.
* Enter CANCEL to cancel all of the processing you've specified.
OK? Let's get started...""")
rules = self.rules_persister.get()
api = self.service.users().messages()
for count_item in sorted_counts:
(sender, count) = count_item
print("sender " + sender + " has " + str(count) + " messages.")
handling = raw_input("How do you want it handled? ")
hint_aliases = ["h", "hint", "hints", "help"]
if handling.lower() in hint_aliases:
sender_messages=ListMessagesMatchingQuery(self.service, 'me', query='label:INBOX from:' + sender)
for sm in sender_messages[:5]:
mess = api.get(userId='me', id=sm['id'], format='metadata').execute()
headers = mess['payload']['headers']
print("OK, here are the subject lines of some messages from this sender:")
subject = ""
for header in headers:
if header['name'] == 'Subject':
subject = header['value']
break
print("* " + subject)
handling = raw_input("So how do you want it handled? ")
if handling.lower() == "cancel":
print("Your will be done, my liege. I will do nothing.")
break
elif handling.lower() == "end":
self.rules_persister.set(rules)
print("Sounds like a plan, Stan. Let me get to work.")
break
elif handling.lower() == "skip":
print("Gotcha. OK, let's look at the next one.")
elif handling == "":
print("OK, we'll tag all of these emails with \"" + sender + "\".")
rule = get_email_rule(sender, rules)
rule['add_tags'].append(sender)
set_email_rule(sender, rule, rules)
else:
rule = get_email_rule(sender, rules)
rule['add_tags'][handling.lower()] = handling
set_email_rule(sender, rule, rules)
print("OK, we'll tag all of these emails with \"" + handling + "\".")
def collect_messages_list(self):
# messages=ListMessagesMatchingQuery(service, 'me', query='label:INBOX is:unread')
messages=ListMessagesMatchingQuery(self.service, 'me', query='label:INBOX')
if not messages:
print('No messages found.')
else:
message_count = len(messages)
print(str(message_count) + ' Messages:')
return messages
def define_rule_tags(self):
"""Applies user-specified rules to emails in the inbox.
"""
rules = self.rules_persister.get()
try:
response = self.service.users().labels().list(userId='me').execute()
labels = response['labels']
label_tags = [l.get('name').lower() for l in labels]
except errors.HttpError, error:
print('An error occurred: %s' % error)
for sender in rules.keys():
# print("sender " + sender + " has the following tags:")
for key in rules[sender]['add_tags'].keys():
# print("* " + tag)
if not key.lower() in label_tags:
label = MakeLabel(rules[sender]['add_tags'][key])
CreateLabel(self.service, 'me', label)
def tag_messages(self, messages):
print("Filing messages. This may take awhile...")
limit = int(self.config_persister.get()['limit'])
api = self.service.users().messages()
response = self.service.users().labels().list(userId='me').execute()
labels = response['labels']
label_map = {l.get('name').lower(): l.get('id') for l in labels}
rules = self.rules_persister.get()
count = 1
for message in messages:
domain = self.get_from_sender(message, False)
email = self.get_from_sender(message, True)
for sender in [domain,email]:
rule = get_email_rule(sender, rules)
if len(rule['add_tags'].keys()) > 0:
add_rule = [label_map[key] for key in rule['add_tags'].keys()]
request_body = {'addLabelIds': add_rule,
'removeLabelIds': ['INBOX']}
message = api.modify(userId='me', id=message['id'],
body=request_body).execute()
count += 1
if count % 100 == 0:
print(str(count) + " messages processed.")
if limit != 0 and count > limit:
exit()
def get_from_sender(self, message, full_address=False):
api = self.service.users().messages()
mess = api.get(userId='me', id=message['id'], format='metadata').execute()
headers = mess['payload']['headers']
temp_dict = {}
for header in headers:
# if header['name'] == 'Subject':
# temp_dict['Subject'] = header['value']
# elif header['name'] == 'Date':
# msg_date = header['value']
# date_parse = (parser.parse(msg_date))
# temp_dict['Date'] = str(date_parse.date())
if header['name'] == 'From':
temp_dict['From'] = header['value']
if 'From' in temp_dict:
(name, email_address) = parseaddr(temp_dict['From'])
if full_address:
return email_address
else:
(username, domain) = parse("{}@{}", email_address)
return domain
return sender.lower()
else:
return False
class Dialog():
"""The Dialog object defines a sequence of steps that can take actions
and return values and text within a context.
"""
context = {}
persist = False
response = {}
def __init__(self, name,
intro="Let's start",
questions=[],
conclusion="OK, thanks.",
persisters={}):
"""Initializes the Dialog object
"""
self.response['intro'] = intro
self.response['questions'] = questions
self.response['conclusion'] = conclusion
if persisters:
self.persist = True
self.dialog_persister = persisters['dialog']
self.dialog_persister.set(self.response)
def intro(self):
return self.dialog_persister.get()['intro']
def questions(self):
return self.dialog_persister.get()['questions']
def conclusion(self):
return self.dialog_persister.get()['conclusion']
def set(self, attr, value):
self.response['attr'] = value
self.dialog_persister.set(self.response)
def backup_rules():
copyfile("rules.json", "rules.json.backup")
copyfile("config.json", "config.json.backup")
def get_email_rule(sender, rules):
"""Retrieves the email rule for a single sender."""
if sender in rules.keys():
return rules[sender]
else:
return {'add_tags': {}, 'remove_tags': {}, 'set_status': {}}
def set_email_rule(sender, rule, rules):
rules[sender] = rule
def ListMessagesMatchingQuery(service, user_id, query=''):
"""List all Messages of the user's mailbox matching the query.
Args:
service: Authorized Gmail API service instance.
user_id: User's email address. The special value "me"
can be used to indicate the authenticated user.
query: String used to filter messages returned.
Eg.- 'from:user@some_sender.com' for Messages from a particular email address.
Returns:
List of Messages that match the criteria of the query. Note that the
returned list contains Message IDs, you must use get with the
appropriate ID to get the details of a Message.
"""
try:
response = service.users().messages().list(userId=user_id,
q=query).execute()
# json.dumps(response);
messages = []
if 'messages' in response:
messages.extend(response['messages'])
while 'nextPageToken' in response:
page_token = response['nextPageToken']
response = service.users().messages().list(userId=user_id, q=query,
pageToken=page_token).execute()
messages.extend(response['messages'])
return messages
except errors.HttpError, error:
print('An error occurred: ' + str(error))
def ListMessagesWithLabels(service, user_id, label_ids=[]):
"""List all Messages of the user's mailbox with label_ids applied.
Args:
service: Authorized Gmail API service instance.
user_id: User's email address. The special value "me"
can be used to indicate the authenticated user.
label_ids: Only return Messages with these labelIds applied.
Returns:
List of Messages that have all required Labels applied. Note that the
returned list contains Message IDs, you must use get with the
appropriate id to get the details of a Message.
"""
try:
response = service.users().messages().list(userId=user_id,
labelIds=label_ids).execute()
messages = []
if 'messages' in response:
messages.extend(response['messages'])
while 'nextPageToken' in response:
page_token = response['nextPageToken']
response = service.users().messages().list(userId=user_id,
labelIds=label_ids,
pageToken=page_token).execute()
messages.extend(response['messages'])
return messages
except errors.HttpError, error:
print('An error occurred: ' + str(error))
def CreateLabel(service, user_id, label_object):
"""Creates a new label within user's mailbox, also prints Label ID.
Args:
service: Authorized Gmail API service instance.
user_id: User's email address. The special value "me"
can be used to indicate the authenticated user.
label_object: label to be added.
Returns:
Created Label.
"""
try:
label = service.users().labels().create(userId=user_id,
body=label_object).execute()
print(label['id'])
return label
except errors.HttpError, error:
print('An error occurred: %s' % error)
def MakeLabel(label_name, mlv='show', llv='labelShow'):
"""Create Label object.
Args:
label_name: The name of the Label.
mlv: Message list visibility, show/hide.
llv: Label list visibility, labelShow/labelHide.
Returns:
Created Label.
"""
label = {'messageListVisibility': mlv,
'name': label_name,
'labelListVisibility': llv}
return label
def main():
persisters = {}
persisters['config'] = JsonFilePersister('config',
{'limit': 0,
'cache_maxage': 60 * 60 * 6})
persisters['rules'] = JsonFilePersister('rules', {})
persisters['cache'] = JsonFilePersister('cache',
{'sorted_domain_counts': [],
'sorted_address_counts': []})
gmail_helper = GmailHelper(persisters)
service = gmail_helper.service
persisters = {}
persisters['dialog'] = JsonFilePersister('dialog', main_responses)
main_dialog = Dialog('main_dialog', intro = main_responses['intro'],
questions = main_responses['questions'],
conclusion = main_responses['conclusion'],
persisters = persisters)
print(main_dialog.intro())
handling = raw_input(main_dialog.questions() + " ")
if "domains" in handling.lower():
gmail_helper.ask_for_sender_rules(full_address=False)
elif "addresses" in handling.lower():
gmail_helper.ask_for_sender_rules(full_address=True)
elif "apply" in handling.lower():
messages = gmail_helper.collect_messages_list()
gmail_helper.define_rule_tags()
gmail_helper.tag_messages(messages)
elif "backup" in handling.lower():
backup_rules()
elif "limit" in handling.lower():
config = gmail_helper.config_persister.get()
print("Limit was previously " + str(config['limit']) + ".")
m = re.search(r'(\d*)\s*$',handling.lower())
limit = m.group(0)
if limit == '':
config['limit'] = 0
else:
config['limit'] = int(limit)
gmail_helper.config_persister.set(config)
print("I've changed the limit to " + str(config['limit']) + ".")
elif "cache" in handling.lower():
config = gmail_helper.config_persister.get()
print("Cache was previously " + str(config['cache_maxage']) + " seconds.")
m = re.search(r'(\d*)\s*$',handling.lower())
cache = m.group(0)
if cache == '':
config['cache_maxage'] = 0
else:
config['cache_maxage'] = int(cache)
gmail_helper.config_persister.set(config)
print("I've set caching to " + str(config['cache_maxage']) + " seconds.")
print(main_dialog.conclusion())
if __name__ == '__main__':
main()
| [
"sheldon@sheldonrampton.com"
] | sheldon@sheldonrampton.com |
2a707b03595d95c0e72578750e050585696592c0 | 95764ffd67cba039e9de37f84ed4269fef3ce0e6 | /contrib/spendfrom/spendfrom.py | 1e054236623e174cd22a6d53a0d3163cbd3a332f | [
"MIT"
] | permissive | KognitysPlayhouse/klpcoin | ed6a2e76bc579522da930a98c044d7127c0de740 | c39cbf10f3b4ab73383744c8805e0ae31e73fa51 | refs/heads/master | 2022-07-30T21:52:54.920801 | 2020-05-16T09:48:33 | 2020-05-16T09:48:33 | 262,539,399 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,053 | py | #!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 18422 if testnet else 8422
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bitcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| [
"kognitysplayhouse@gmail.com"
] | kognitysplayhouse@gmail.com |
99872aff1b1ab4b6fba6c80fd17c68a0be496834 | 70c9856a08b8e525b3185a258193307163f8426b | /NTAP-master/test/test_svm.py | 853ee9ea13e8a7e6ad0b7ce15b91246934ecc8da | [] | no_license | avral1810/CSSL | 664a8bc5dbc022a5c4de7f35e020a83b01d1b5ce | ec0c44185bbc877ab8aeab0a37d38c710539756c | refs/heads/master | 2022-12-08T06:43:39.052846 | 2020-08-26T07:44:46 | 2020-08-26T07:44:46 | 290,429,891 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,859 | py | # NOTE IMPORTANT: Must clone and run on leigh_dev branch
import sys
sys.path.append('.')
from ntap.data import Dataset
from ntap.models import SVM
import pandas as pd
import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument("--input", help="Path to input file")
parser.add_argument("--predict", help="Path to predict data")
parser.add_argument("--save", help="Path to save directory")
args = parser.parse_args()
SEED = 734 # Go Blue!
def save_results(res, name, path):
with open(os.path.join(path, name), 'w') as out:
res[0].to_csv(out)
print("Saved results ({}) to {}".format(name, path))
def chunk_data(input_path, chunksize=10000):
data_iter = pd.read_csv(input_path, chunksize=100000)
ret_list = []
for data in data_iter:
ret_list.append(data)
return pd.concat(ret_list)
def init_model(target, feature, dataset):
formula = target+" ~ "+feature+"(Text)"
model = SVM(formula, data=dataset, random_state=SEED)
return model
def cv(model, data):
results = model.CV(data=data)
return results
def train(model, data, params=None):
model.train(data, params=params)
def process_data(data):
data.dropna(subset=['body'], inplace=True)
data = Dataset(data)
data.clean(column='body')
return data
def predict(model, predict_path, feat, filename):
user_all = []
y_all = []
text_all = []
count = 0
# Chunk so its not read in all at once
data_iter = pd.read_csv(predict_path, sep='\t', chunksize=100000)
for data_chunk in data_iter:
count += 1
print("Chunk {}".format(count))
data_chunk = process_data(data_chunk)
# Get users and text after processing data (rows will be dropped)
users = data_chunk.data['id']
text = data_chunk.data['body']
if feat == "ddr":
data_chunk.dictionary="../../HateAnnotations/mfd2.json"
data_chunk.glove_path = "../../embeddings/glove.6B.300d.txt"
# Running tfidf/ddr method from Dataset()
getattr(data_chunk, feat)(column='body')
y_hat = model.predict(data_chunk)
y_all.extend(y_hat)
user_all.extend(users)
text_all.extend(text)
chunk_filename = filename + "_"+ str(count)
# Save over time, just in case it crashes
if count % 10 == 0:
pd.DataFrame(list(zip(user_all, text_all, y_all)), columns=["user_id", "text", "y"]).to_csv(chunk_filename, index=False)
pd.DataFrame(list(zip(user_all, text_all, y_all)), columns=["user_id", "text", "y"]).to_csv(filename, index=False)
return zip(user_all, y_all, text_all)
def evaluate(model, predictions, labels, target):
stats = model.evaluate(predictions, labels, 2, target)
return stats
if __name__=='__main__':
features = ["ddr"] # lda, ddr, liwc
targets = ["hate", "cv", "hd", "vo"] # cv, hd, vo
input_path = args.input
output_path = args.save if args.save else os.getcwd()
for feat in features:
for target in targets:
model_filename = os.path.join(output_path, "_".join([target, feat, "cv_model"]))
filename = os.path.join(output_path, "_".join([target, feat, "fullgabpred"]))
data = Dataset(input_path)
if feat == "ddr":
data.dictionary="../../HateAnnotations/mfd2.json"
data.glove_path = "../../embeddings/glove.6B.300d.txt"
model = init_model(target, feat, data)
cv_res = cv(model, data)
save_results(cv_res.dfs, model_filename, output_path)
print("Training...")
train(model, data)
print("Predicting...")
results = predict(model, args.predict, feat, filename)
pd.DataFrame(list(results), columns=["user_id", "y", "text"]).to_csv(filename, index=False)
| [
"aviralupadhyay@ymail.com"
] | aviralupadhyay@ymail.com |
0ac7c62654ff678d766278aba08a3084f9ae905f | 1df0dade13867d7ff66affefc4e695f9fd23e831 | /boot.py | 0472272098a46e008fca829c8aca100c2702a0a3 | [] | no_license | Benoit-LdL/uPython_ESP32-GPS_OLED_SD | bcd54cd8079878e162e9ed3be9d8287d23c162d8 | 496e5ab2156b30a094f3b0439ac4588a4630f700 | refs/heads/master | 2023-05-31T05:51:53.464184 | 2021-06-19T15:09:17 | 2021-06-19T15:09:17 | 378,438,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,015 | py | import config
import network
import utime
import ntptime
def do_connect():
sta_if = network.WLAN(network.STA_IF)
start = utime.time()
timed_out = False
if not sta_if.isconnected():
print('connecting to network...')
sta_if.active(True)
sta_if.connect(config.wifi_config["ssid"], config.wifi_config["password"])
while not sta_if.isconnected() and \
not timed_out:
if utime.time() - start >= 20:
timed_out = True
else:
pass
if sta_if.isconnected():
ntptime.settime()
print('network config:', sta_if.ifconfig())
else:
print('internet not available')
###AP MODE
'''
ssid = 'LdL-GPS'
password = '12345678'
ap = network.WLAN(network.AP_IF)
ap.active(True)
ap.config(essid=ssid, password=password)
while ap.active() == False:
pass
print('AP up and running')
print(ap.ifconfig())
'''
do_connect() | [
"benoitlagasse@hotmail.com"
] | benoitlagasse@hotmail.com |
04d11d4acd3a2a4c155b47375cb024987db8e049 | 4deabdd334cd476527d9b58214d1200c5733879b | /home/admin.py | be7fbd7a864413456bc442ced21f959ddc8f8cb8 | [] | no_license | Chaman1996/django-ecommerce | f34c8cbae3341edca231baeb5a30b33f48c4094b | f28f5e773bd4790388b8544913e9841b06c0342f | refs/heads/master | 2022-12-17T01:08:09.289872 | 2020-09-19T04:49:45 | 2020-09-19T04:49:45 | 296,787,419 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 514 | py | from django.contrib import admin
# Register your models here.
from .models import Setting, ContactMessage
class SettingAdmin(admin.ModelAdmin):
list_display = ['title', 'company','update_at', 'status']
list_filter = ['company']
class ContactAdmin(admin.ModelAdmin):
list_display = ['name', 'email', 'status']
list_filter = ['status']
readonly_fields = ('name', 'email','subject','message','ip')
admin.site.register(Setting, SettingAdmin)
admin.site.register(ContactMessage,ContactAdmin) | [
"knand4930@gamil.com"
] | knand4930@gamil.com |
3639853b3a16aace0ecfc8d1374e206313601604 | fe4a1aafc04c456ff351964bb6666298bc158239 | /fabio_lista2a/f2a_q25_validar_senha.py | d173f1c19ff4969860b23031507bb4b60ecb4c89 | [] | no_license | weverson23/ifpi-ads-algoritmos2020 | 60f82feb450f718e43fc0a0e9349675ca4444d3d | 3d696f2dec7d813af8d3ce6b4ad6eacce8b0a9da | refs/heads/master | 2021-03-05T13:08:40.484109 | 2021-02-03T23:57:15 | 2021-02-03T23:57:15 | 246,124,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 488 | py | # Lê uma senha de 4 números diz se é ou não válida
def valida_senha(a):
m = a // 1000
c = (a % 1000) // 100
d = ((a % 1000) % 100) // 10
u = ((a % 1000) % 100) % 10
if m == 1 and c == 2 and d == 3 and u == 4:
return True
else:
return False
def main():
senha = int(input('Digite uma senha de 4 números: '))
v = valida_senha(senha)
if v == True:
print('Senha válida!')
else:
print('Senha incorreta!')
main() | [
"weversonoliveira12@gmail.com"
] | weversonoliveira12@gmail.com |
a796302ae78dfbadfc61f4f8e0470dfaaeafbd45 | 14dd622ef84b3f48c2d66d8ab873084634cfb6d4 | /PythonLearning/Learning OpenCV/Test5.py | 01d3c443a10a39506fb290c29b88bdd8b7526454 | [] | no_license | ByronGe/Python-base-Learning | 648cbbf1c7a8431dece3638dfb4de754623bc84e | 7ade3250c4abc4b5e47e39080bf1ad8d53b04d78 | refs/heads/master | 2020-04-15T20:18:24.134950 | 2019-01-10T04:00:51 | 2019-01-10T04:00:51 | 164,987,206 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,055 | py | import cv2.cv as cv
im=cv.LoadImage('img/building.png', cv.CV_LOAD_IMAGE_COLOR)
# Laplace on a gray scale picture
gray = cv.CreateImage(cv.GetSize(im), 8, 1)
cv.CvtColor(im, gray, cv.CV_BGR2GRAY)
aperture=3
dst = cv.CreateImage(cv.GetSize(gray), cv.IPL_DEPTH_32F, 1)
cv.Laplace(gray, dst,aperture)
cv.Convert(dst,gray)
thresholded = cv.CloneImage(im)
cv.Threshold(im, thresholded, 50, 255, cv.CV_THRESH_BINARY_INV)
cv.ShowImage('Laplaced grayscale',gray)
#------------------------------------
# Laplace on color
planes = [cv.CreateImage(cv.GetSize(im), 8, 1) for i in range(3)]
laplace = cv.CreateImage(cv.GetSize(im), cv.IPL_DEPTH_16S, 1)
colorlaplace = cv.CreateImage(cv.GetSize(im), 8, 3)
cv.Split(im, planes[0], planes[1], planes[2], None) #Split channels to apply laplace on each
for plane in planes:
cv.Laplace(plane, laplace, 3)
cv.ConvertScaleAbs(laplace, plane, 1, 0)
cv.Merge(planes[0], planes[1], planes[2], None, colorlaplace)
cv.ShowImage('Laplace Color', colorlaplace)
#-------------------------------------
cv.WaitKey(0) | [
"2450894732@qq.com"
] | 2450894732@qq.com |
f7b82db9f43e48b83988af9e86231dbf6565fa49 | d0a9031ac909255bbb42c7931f41a3545c097717 | /math/0x00-linear_algebra/1-trim_me_down.py | 9294d95ab3a2c94880fb441199b795a32e84b025 | [] | no_license | bouchra-creator/mundiapolis-math | 36098bff1c5ea095f9ec3f64e3b47ecc4c6a10bc | d7dbd782e9d88604ed73cc497c9836fac265bc2c | refs/heads/main | 2023-03-19T13:48:13.145230 | 2021-03-19T15:03:46 | 2021-03-19T15:03:46 | 346,747,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271 | py | #!/usr/bin/env python3
matrix = [[1, 3, 9, 4, 5, 8],
[2, 4, 7, 3, 4, 0],
[0, 3, 4, 6, 1, 5]]
the_middle = []
for index in range(3):
the_middle.append(matrix[index][2:4])
print("The middle columns of the matrix are: {}".format(the_middle))
| [
"noreply@github.com"
] | noreply@github.com |
0bc6382d2b2a451e0a39023e46fd0faca3f9663d | ddeaed97673473936f2551e01bba61aa9a83dd35 | /Basic Programs/palidrome.py | a3c14223ea6e0b1bcef891a9348c3d24dde2650b | [] | no_license | darshanahire/Python-language | cf2415e8da57797612e10d7380f40b9047409e5f | e5ce268069f8506cc5a79a4641a85910e8b07c0f | refs/heads/main | 2023-02-15T19:57:34.835872 | 2021-01-12T15:31:04 | 2021-01-12T15:31:04 | 328,401,812 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 56 | py | a=(input("==>"))
if (a==a[ : : -1]):
print("Palidrome") | [
"noreply@github.com"
] | noreply@github.com |
2f478ed47a5fab20576a82e2a7f6a54c4778e19c | 55c508394d55dd150722e580e50895ef0fb28516 | /ioUtil.py | fcb0d28c8a7d922d9dc95b05a2a349268a4d5e37 | [
"MIT"
] | permissive | PeterZs/P2P-NET | 2fa405d94580627b400553f12d4716355fbe7afd | 5c5890a308cc84eafd6845c46d4bf0fc138e5dd8 | refs/heads/master | 2020-03-27T09:55:14.569559 | 2018-08-13T22:49:14 | 2018-08-13T22:49:14 | 146,382,712 | 1 | 0 | null | 2018-08-28T02:46:47 | 2018-08-28T02:46:47 | null | UTF-8 | Python | false | false | 1,711 | py | import os
import sys
import numpy as np
import h5py
import collections
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
Examples = collections.namedtuple("Examples", "names, pointsets_A, pointsets_B")
def shuffle_examples( data ):
idx = np.arange( data.names.shape[0] )
np.random.shuffle(idx)
return Examples(
names=data.names[idx, ...],
pointsets_A=data.pointsets_A[idx, ...],
pointsets_B=data.pointsets_B[idx, ...],
)
def load_examples(h5_filename, fieldname_A, fieldname_B, fieldname_modelname ):
f = h5py.File(h5_filename)
pointsets_A = f[fieldname_A][:]
pointsets_B = f[fieldname_B][:]
names = f[fieldname_modelname][:]
return Examples(
names=names,
pointsets_A=pointsets_A,
pointsets_B=pointsets_B,
)
def output_point_cloud_ply(xyzs, names, output_dir, foldername ):
if not os.path.exists( output_dir ):
os.mkdir( output_dir )
plydir = output_dir + '/' + foldername
if not os.path.exists( plydir ):
os.mkdir( plydir )
numFiles = len(names)
for fid in range(numFiles):
print('write: ' + plydir +'/'+names[fid]+'.ply')
with open( plydir +'/'+names[fid]+'.ply', 'w') as f:
pn = xyzs.shape[1]
f.write('ply\n')
f.write('format ascii 1.0\n')
f.write('element vertex %d\n' % (pn) )
f.write('property float x\n')
f.write('property float y\n')
f.write('property float z\n')
f.write('end_header\n')
for i in range(pn):
f.write('%f %f %f\n' % (xyzs[fid][i][0], xyzs[fid][i][1], xyzs[fid][i][2]) ) | [
"yinkangxue@qq.com"
] | yinkangxue@qq.com |
a5d4f84b1eca8423de3d1739de44f69aa5c73ff7 | d4300c1b72589f6e2d2519312f40565799360a3d | /dataloader/pascal_dataloader.py | b15c35db7f3523b7f02d1257e291575956ba52c2 | [] | no_license | JanLin0817/pytorch_learning | eb0cce4c932de76cf10b66e7967a6ef77c45af5c | 2f9352e3cc378a1dcab8bf59df535d7f60ca6cf6 | refs/heads/master | 2020-06-11T04:47:37.254417 | 2019-06-26T08:22:38 | 2019-06-26T08:22:38 | 193,852,563 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,268 | py | import os
import numpy as np
from PIL import Image
import torchvision
from torchvision import transforms
from torch.utils.data import Dataset, DataLoader
# self-define class
import custom_transforms
# debug
import matplotlib.pyplot as plt
category_names = ['background',
'aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
'bus', 'car', 'cat', 'chair', 'cow',
'diningtable', 'dog', 'horse', 'motorbike', 'person',
'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
class VOC_Instance_Segmentation(Dataset):
'''
VOC instance segmentation
* mask data as below
\- 0:background
\- 255: object's contour
\- 1~n: objects ID
TODO: pick object size, and download
'''
def __init__(self,
root='/home/riq/segmentation/benchmark/VOC/VOCdevkit/VOC2012',
split_sets='train',
transform=None,
transform_handcraft=None,
#download=False,
preprocess=False,
area_thres=0
):
self.root = root
self.transform = transform
self.area_thres = area_thres
self.transform_handcraft = transform_handcraft
# split set
if isinstance(split_sets, list):
split_sets = ''.join(split_sets)
# get file path of image and ground truth in specific set i.e. train.txt, trainval.txt, val.txt
self.images, self.instance_objs = self._get_pair_path(split_sets)
# get file path of image and objects(from ground truth)
self.pair_list = self._get_instance_obj()
print('INFO: number of data', len(self.pair_list))
def __getitem__(self, index):
to_PIL = transforms.ToPILImage()
ID, img_path, gt_path = self.pair_list[index]
img, gt = Image.open(img_path), Image.open(gt_path)
gt = to_PIL((np.array(gt)==ID).astype(np.float32))
if self.transform:
img, gt = self.transform([img, gt])
input_pair = {'image':img, 'gt': gt}
if self.transform_handcraft:
img, gt = self.transform_handcraft.ObjectCenterCrop(gt, img)
img, gt = self.transform_handcraft.Fix_size(img, gt)
# input_pair = {'image':img, 'gt': gt, }
heat_map = self.transform_handcraft.get_extreme_point_channel(gt)
concate_input = self.transform_handcraft.concat_inputs(img, heat_map)
# concate_input = self.transform_handcraft.ToTensor(concate_input)
# gt = self.transform_handcraft.ToTensor(gt)
input_pair = {'image':img, 'gt':gt, 'heat_map':heat_map, 'input':concate_input}
return input_pair
def __len__(self):
return len(self.pair_list)
def _get_instance_obj(self):
# TODO: threshold for object size
area_th_str = ""
if self.area_thres != 0:
area_th_str = '_area_thres-' + str(self.area_thres)
pair_list = []
for img_path, gt_path in zip(self.images, self.instance_objs):
gt = Image.open(gt_path)
object_ID = np.unique(gt)[1:-1]
for ID in object_ID:
pair_list.append([ID, img_path, gt_path])
return pair_list
def _get_pair_path(self, split_sets):
images_path, instance_objs_path = [], []
# A File denote image belong to which set
split_set_dir = os.path.join(self.root, 'ImageSets', 'Segmentation')
seg_obj_dir = os.path.join(self.root, 'SegmentationObject')
image_dir = os.path.join(self.root, 'JPEGImages')
# Read img name from whole set of .txt file
# Can't use glob, because we don't want to load all image in the folder at the same time
with open(os.path.join(os.path.join(split_set_dir, split_sets + '.txt')), "r") as f:
img_names = f.read().splitlines()
for img_name in img_names:
image = os.path.join(image_dir, img_name + ".jpg")
seg_obj = os.path.join(seg_obj_dir, img_name + ".png")
assert os.path.isfile(image)
assert os.path.isfile(seg_obj)
images_path.append(image)
instance_objs_path.append(seg_obj)
assert (len(images_path) == len(instance_objs_path))
return images_path, instance_objs_path
def torch_VOC():
'''
VOC SegmentationClass include (background, multi object, contour)
Mask which load by PIL Image value is as below
0: background
1~20: object ID
255: contour
Pytorch only supports semantic segmentation output pair so far, and didn't split contour
'''
voc = torchvision.datasets.VOCSegmentation('./data', year='2012', image_set='trainval', download=False)
print(type(voc[0]))
# # loop all pair
# for sample in voc:
# img, gt = sample[0], sample[1]
# if np.unique(np.array(gt))[-1] != 255:
# print("INFO: WTF")
# single pair
sample = voc[0]
img, gt = sample[0], sample[1]
# gt = np.array(gt)
print('INFO: Ground truth object ID {}'.format(np.unique(gt)))
print('INFO: 0=background, 255=contour, other=object ID')
plt.subplot(1, 2, 1)
plt.imshow(img)
plt.subplot(1, 2, 2)
plt.imshow(np.array(gt) * 255) # show object ID
plt.tight_layout()
plt.show()
if __name__ == "__main__":
# delete()
# torch_VOC()
# 1.Show transforms's result before transfer to tensor
# execute by PIL image
data_transforms = transforms.Compose([
custom_transforms.RandomHorizontalFlip(1.0),
custom_transforms.RandomRotation((-20,20), scales=(0.75, 1.0)),
custom_transforms.to_numpy()
])
# excute by numpy image
handcraft_transforms = custom_transforms.Compose_dict([
custom_transforms.ObjectCenterCrop(),
custom_transforms.Fix_size(size=512),
custom_transforms.get_extreme_point_channel(),
custom_transforms.concat_inputs()
])
voc = VOC_Instance_Segmentation(split_sets=['train'], transform=data_transforms, \
transform_handcraft=handcraft_transforms)
for ii, dct in enumerate(voc):
img, gt, concate = dct['image'], dct['gt'], dct['heat_map']
plt.subplot(1, 3, 1)
plt.imshow(img)
plt.subplot(1, 3, 2)
plt.imshow(gt)
plt.subplot(1, 3, 3)
plt.imshow(concate)
plt.tight_layout()
plt.pause(5)
if ii > 10:
exit()
# 2.Show transforms's result tensor size
handcraft_transforms = custom_transforms.Compose_dict([
custom_transforms.ObjectCenterCrop(),
custom_transforms.Fix_size(size=512),
custom_transforms.get_extreme_point_channel(),
custom_transforms.concat_inputs(),
transforms.ToTensor()
])
voc = VOC_Instance_Segmentation(split_sets=['train'], transform=data_transforms, \
transform_handcraft=handcraft_transforms)
for ii, dct in enumerate(voc):
img, gt, concate = dct['image'], dct['gt'], dct['input']
print('shape of tensor {}'.format(concate.shape))
if ii > 10:
exit() | [
"e8o1e8o1s7@gmail.com"
] | e8o1e8o1s7@gmail.com |
738e65035ec92179af5430b2fd42c44305c33746 | 0f95d221a396587a505586c39d0c7cf729efd777 | /producthunt/urls.py | ba05d4a3cffa229c09e0921913b965432f9c50a1 | [] | no_license | feiyangmeiyu/producthunt-project | affd8df636f6fdcd537012daa447f860b165a7cf | b3605bba80606589d8e3220e40591d2d691a462e | refs/heads/master | 2022-11-10T20:07:33.056961 | 2020-06-29T16:21:10 | 2020-06-29T16:21:10 | 275,866,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | from django.contrib import admin
from django.urls import path, include
from product import views
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('', views.home, name='home'),
path('account/', include('account.urls')),
path('product/', include('product.urls')),
path('admin/', admin.site.urls),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"806567171@qq.com"
] | 806567171@qq.com |
c726189af81802126db1c68fc78686ded22bee37 | bf34e4c85967f02af73e025f42828be9e4238a0d | /ChattingRoom-3.0/chat_window/base_window/add_friend_view.py | 24e8c9258482aa5064ebab500700f321996bb7bc | [] | no_license | crizydevl/chatroom-3.0 | c138e1536ca6387ceed56de173bdf07da4fe6f82 | 2f58ce8bf7a593693fd40bbb6755fa2939ac258c | refs/heads/master | 2020-04-12T09:57:58.119720 | 2018-12-19T09:03:33 | 2018-12-19T09:03:33 | 162,414,330 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,158 | py | from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QDialog, QApplication
class Add_friend_Dialog(QDialog):
def setup_ui(self):
self.setObjectName("Dialog")
self.resize(200, 100)
icon = QtGui.QIcon()
import os
print('地址', os.getcwd())
icon.addPixmap(QtGui.QPixmap("App/Views/images/logo/logo.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.setWindowIcon(icon)
self.setSizeGripEnabled(True)
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self)
self.verticalLayout_2.setObjectName("verticalLayout_2")
spacerItem = QtWidgets.QSpacerItem(20, 9, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_2.addItem(spacerItem)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.label = QtWidgets.QLabel(self)
self.label.setObjectName("label")
self.horizontalLayout.addWidget(self.label)
self.lineEdit_add = QtWidgets.QLineEdit(self)
self.lineEdit_add.setObjectName("lineEdit")
self.horizontalLayout.addWidget(self.lineEdit_add)
self.verticalLayout_2.addLayout(self.horizontalLayout)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
# self.label_3 = QtWidgets.QLabel(self)
# self.label_3.setObjectName("label_3")
# self.horizontalLayout_3.addWidget(self.label_3)
# self.comboBox = QtWidgets.QComboBox(self)
# self.comboBox.setObjectName("comboBox")
# self.horizontalLayout_3.addWidget(self.comboBox)
self.verticalLayout_2.addLayout(self.horizontalLayout_3)
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.verticalLayout_2.addLayout(self.verticalLayout)
spacerItem1 = QtWidgets.QSpacerItem(20, 10, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_2.addItem(spacerItem1)
self.buttonBox = QtWidgets.QDialogButtonBox(self)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.verticalLayout_2.addWidget(self.buttonBox)
self.retranslate_ui()
QtCore.QMetaObject.connectSlotsByName(self)
def retranslate_ui(self):
_translate = QtCore.QCoreApplication.translate
self.setWindowTitle(_translate("self", "添增联系人"))
self.setWhatsThis(_translate("self", "新增联系人"))
self.label.setText(_translate("self", "联系人帐号"))
# self.label_3.setText(_translate("self", "联系人分组"))
if __name__ == '__main__':
app = QApplication([])
s = Add_friend_Dialog()
s.setup_ui()
s.show()
import sys
sys.exit(app.exec_()) | [
"1096345766@qq.com"
] | 1096345766@qq.com |
bc60456546797e88828e2cb026b3f92362c9d68c | d85f63f93dd5f48eef383d70d8d45d1d4489a602 | /rti_opera.py | 811d09abdf26d12ce4ce806f933e29753441340f | [] | no_license | songzhenhua/rti_opera | 2cd580b7a8465bce6bba233882d65f38939c3b2b | b7a9a5edbbf9d1e9a429dc6100cc579b1ff775f0 | refs/heads/master | 2021-07-11T21:44:19.998168 | 2020-06-21T08:48:38 | 2020-06-21T08:48:38 | 166,568,547 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,932 | py | # coding=utf-8
from bs4 import BeautifulSoup
import requests
import os
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
file_path = os.getcwd() + '\\' # 下载目录
domain = "https://cn.rti.tw"
# 获取广播剧下载链接
def get_url_list():
novel_list_resp = requests.get('https://cn.rti.tw/radio/novelList')
opera_soup = BeautifulSoup(novel_list_resp.text, "lxml")
# 获取每个广播剧的div块
for div in opera_soup.find_all("div", class_="program-item"):
result = ''
# 获取每个广播剧的链接
opera_link = domain + div.find("a").get('href')
# 获取每个广播剧的名称
title = div.find("div", class_="title").string
print '当前爬取广播剧:' + title
# 访问单个广播剧页面
novel_view_resp = requests.get(opera_link)
view_soup = BeautifulSoup(novel_view_resp.text, "lxml")
# 先找到第一个h2,后面紧跟的ul里有单个广播剧的所有集链接
list_a = view_soup.find('h2').find_next_sibling('ul').find_all('a')
num = 1
for a in list_a:
view_link = domain + a.get('href')
print '获取%s单集链接%s' % (title, view_link)
# 打开单集的播放页面
play_resp = requests.get(view_link)
play_soup = BeautifulSoup(play_resp.text, "lxml")
src = play_soup.find('source').attrs['src']
print '获取%s%s下载链接%s' % (title, num, src)
# 将单个广播剧所有下载链接拼接
result += "%s%s:%s\n" % (title, str(num), src)
num += 1
# 将单个广播剧所有下载链接保存到txt文件
_save_src(title, result)
print '保存%s链接完毕' % title
def _save_src(name, content):
name = file_path + name + '.txt'
with open(name, 'wb') as f:
f.write(content)
def download_opera(opera):
# 保存下载链接的txt文件路径
path = r'' + file_path + opera
# 文件名有中文,需要解码为unicode
path = path.decode('utf-8')
# 将下载链接全部读出来
with open(path, 'rb') as f:
links = f.readlines()
# 循环下载
for link in links:
name, url = link.split(':', 1)
name = name.decode('utf-8')
url = url.split('\n')[0]
# 下载MP4文件的路径
file_name = "%s%s.mp4" % (file_path, name)
print file_name, url
_download_file(file_name, url)
print "%s下载完毕" % name
def _download_file(name, url):
r = requests.get(url)
with open(name, 'wb') as f:
f.write(r.content)
if __name__ == '__main__':
# 获取所有广播剧下载链接并保存成一个个txt
# get_url_list()
# 单独下载某个广播剧(其实可以在抓下载链接的时候就下载,但我得先试听一集感兴趣才下载哦)
download_opera('冰窟窿.txt') | [
"22459496@qq.com"
] | 22459496@qq.com |
55a3ea9dd99ff3bd699c788ab07cea3e89d23de7 | 3f73ce74b6fdfb7966abb71a98f4986edd727c5f | /lib/pandas_option.py | 0db90923fd1ec5c8f68491c947fc9cd7b40b1acc | [
"MIT"
] | permissive | yuta-komura/amateras | 9c2efd310b18f159b1354864d65f9894ab93737f | cf8cc8fe0b5d8c382090fd1784a3ce96e6953157 | refs/heads/master | 2023-01-21T19:57:18.763894 | 2020-11-25T04:02:28 | 2020-11-25T04:02:28 | 297,432,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | import pandas as pd
def display_max_columns():
pd.options.display.max_columns = None
def display_max_rows():
pd.options.display.max_rows = None
def display_round_down():
pd.options.display.float_format = '{:.2f}'.format
| [
"you@example.com"
] | you@example.com |
c953a6363d118b9c0af28b2b82293a506c2a1d22 | b689ba9dda8815907c33b9e83ed3a00d0a4f4950 | /tweeter_sentiment.py | b11b0f1f3214fe0c58c8c95fc7c4f2e013a6a951 | [] | no_license | rajvseetharaman/Twitter_Sentiment_Analysis | c3c271a683aa0760334a14c97426bb1848b37458 | 81a87908a859c744834378d2a10f10c17af4b390 | refs/heads/master | 2021-01-20T10:10:34.872301 | 2017-05-05T23:19:59 | 2017-05-05T23:19:59 | 90,331,473 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,993 | py | #importing the required data variables
from data.uw_ischool_sample import SAMPLE_TWEETS
from data.sentiments_nrc import EMOTIONS
from data.sentiments_nrc import SENTIMENTS
#importing needed modules
import re
from functools import reduce
import json
import requests
def text_split(text_string):
"""This function takes as input a string and return as output the list of words in the string in lower case having length greater than 1"""
#getting all the words in the string
words=re.compile('\w+').findall(text_string)
#converting all words to lowercase and filtering out words smaller in length than 2 characters
wd=[word.lower() for word in words if len(word)>1]
return wd
def has_emotion(wordlist,emotion):
"""This function takes as input a list of words and an emotion and returns as output a list of words which have that emotion"""
#for each word in the list of words, check the sentiments dictionary for emotions corresponding to each word
#If the given emotion is in the list of emotions for a word, add it to the list of words to be returned
newlist=[word for word in wordlist if SENTIMENTS.get(word,None)!=None if emotion in SENTIMENTS.get(word,None)]
return newlist
def word_emotion_map(wordlist):
"""This function takes as input a list of words and returns a dictionary which maps each emotion to the list of words in the wordlist which contain that emotion"""
#iterate through the EMOTIONS list and for each emotion, use the has_emotion function defined to determine which words have the specified emotion
emotion_dict=dict((emotion,has_emotion(wordlist,emotion)) for emotion in EMOTIONS)
return emotion_dict
def most_common(wordlist):
"""This function takes as input a list of words and returns a list of most common words in the list"""
#dictionary which counts frequency of each word in the input list
wordfreq=dict()
#populate the word frequency dictionary
for word in wordlist:
if word not in wordfreq.keys():
wordfreq[word]=1
else:
wordfreq[word]+=1
#create a list of tuples of each word and its corresponding frequency in the wordlist
wordcount=[(k,v) for k,v in wordfreq.items()]
#sort the list in descending order based on the frequency of each word in the wordlist
wordcount_sorted=[values[0] for values in sorted(wordcount ,key= lambda x:x[1],reverse=True)]
return wordcount_sorted
def analyze_tweets(tweetslist):
"""This function takes as input the list of tweets and returns as output a list of dictionaries with the following information for each emotion- The percentage of words across all tweets that have that emotion, The most common words across all tweets that have that emotion, and The most common hashtags across all tweets associated with that emotion"""
#add the wordslist and the dictionary which maps each emotion to words having that emotion, to the tweetslist dictionary
for val in tweetslist:
val['words']=text_split(val['text'])
val['emo-words']=word_emotion_map(text_split(val['text']))
tweetstats=[]
#find all the hashtags in the tweets
hashtags=[x['text'] for y in [c['hashtags'] for c in [tweet['entities'] for tweet in SAMPLE_TWEETS] if c['hashtags']!=[]] for x in y]
#create a dictionary in the tweetstats list for each emotion which stores percent words, common example words, and common hashtags
for emotion in EMOTIONS:
#compute the percent of words which have a certain emotion
dict_percent_words=round((100*reduce(lambda x,y:x+y,[len(val['emo-words'][emotion]) for val in tweetslist]))/reduce(lambda x,y:x+y,[len(val['words']) for val in tweetslist]),2)
#find the most common words which have the emotion
dict_example_words=most_common(reduce(lambda x,y:x+y,[has_emotion(val['words'],emotion) for val in tweetslist]))
#find the most common hashtags across tweets associated with the emotion
dict_hashtags=most_common([x['text'] for y in [c['hashtags'] for c in [tweet['entities'] for tweet in tweetslist if has_emotion(text_split(tweet['text']),emotion)] if c['hashtags']!=[]] for x in y])
#append the dictionary to the list to be returned
tweetstats.append({'EMOTION':emotion,'% of WORDS':dict_percent_words,'EXAMPLE WORDS':dict_example_words,'HASHTAGS':dict_hashtags})
return tweetstats
def print_stats(tweetslist):
"""This function takes as input the list of dictionaries corresponding to the tweets analyzed and prints it in a tabular format"""
print("{0:14} {1:11} {2:35} {3}".format('EMOTION','% of WORDS','EXAMPLE WORDS','HASHTAGS'))
#iterate through each emotion and print the statistics associated with it
for v in tweetslist:
row=[val for key,val in v.items()]
print("{0:14} {1:11} {2:35} {3}".format(row[0],str(row[1])+'%',','.join(row[2][:3]),','.join(['#'+x for x in row[3][:3] ])))
def download(scrname):
"""This function takes as input the twitter username for a user and returns as output the list of dictionaries corresponding to the tweets of the user"""
#set the screen name and tweet count parameters to be passed to the requests.get method
parameters={'screen_name':scrname,'count':200}
#send the get request and load the returned json data to dictionary
r=requests.get(url='https://faculty.washington.edu/joelross/proxy/twitter/timeline/',params=parameters)
twitterdata=json.loads(r.text)
#return the list of dictionaries corresponding to the tweets
return twitterdata
def main():
#Take as input the user name
scrname=input("Enter the Twitter Screen Name-")
#if user enters SAMPLE analyze SAMPLE_TWEETS else analyze data corresponding to the user name
if scrname=='SAMPLE':
print_stats(analyze_tweets(SAMPLE_TWEETS))
else:
twitterdata=download(scrname)
print_stats(analyze_tweets(twitterdata))
if __name__ == "__main__":
main()
| [
"rajsv@uw.edu"
] | rajsv@uw.edu |
cd202e682ff3b9d377b8c4377ab4e60783099806 | 9158030a7e30bc8055040b01a5ee9078dd8c2355 | /shops/shops/settings.py | 1546fde70343cc02ab0919cb912e444c273040cf | [] | no_license | mojun01/-pc- | 93ad5ac918c257f8fa2a0b02023cf296a4459bf6 | 88b8de3d6144e82b5ff713c7ca81448b271c2297 | refs/heads/master | 2020-04-23T14:12:32.124510 | 2019-02-18T10:20:31 | 2019-02-18T10:20:31 | 171,223,652 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,511 | py | """
Django settings for shops project.
Generated by 'django-admin startproject' using Django 1.11.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=5@fiy7cdtvupnt@38l2exh459e4^o9ug2n2)!o*-6-gpm!8xn'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'users.apps.UsersConfig',
'goods.apps.GoodsConfig',
'cart.apps.CartConfig',
'number.apps.NumberConfig',
'store.apps.StoreConfig',
'seller.apps.SellerConfig',
'order.apps.OrderConfig',
'openshop.apps.OpenshopConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'shops.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': False,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'shops.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
'ENGINE': 'django.db.backends.mysql',
'NAME': 'shop',
'USER':'root',
'PASSWORD':'root',
'HOST':'localhost',
'PORT':'3306',
},
# 'slave': {
# # 'ENGINE': 'django.db.backends.sqlite3',
# # 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# 'ENGINE': 'django.db.backends.mysql',
# 'NAME': 'shop',
# 'USER':'mojun',
# 'PASSWORD':'root',
# 'HOST':'localhost',
# 'PORT':'3308',
# },
}
# DATABASE_ROUTERS = ['shops.myrouter.DBRouter']
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS=(
os.path.join(BASE_DIR,"static"),
)
MEDIA_ROOT=os.path.join(BASE_DIR,'static')
MEDIA_URL="/uploads/"
#邮箱的配置
EMAIL_BACKEND='django.core.mail.backends.smtp.EmailBackend'
EMAIL_USE_TLS=False
EMAIL_USE_SSL = True
EMAIL_HOST='smtp.163.com'
EMAIL_PORT=994
EMAIL_HOST_USER='17576052970@163.com'
EMAIL_HOST_PASSWORD='yj5056'
DEFAULT_FROM_EMAIL = EMAIL_HOST_USER
EMAIL_FROM = '17576052970@163.com'
# change master to MASTER_HOST='127.0.0.1',MASTER_PORT=3307,MASTER_USER='repl',MASTER_PASSWORD='123456',master_log_file='mysql-bin.000006',master_log_pos=287497; | [
"1151804863@qq.com"
] | 1151804863@qq.com |
02935cbf796a180e2cd32e13cbae2a022054fd52 | 011edcff9b70f9bea31a8494b813d1fda248bf7c | /config.py | 663c478c698adc849d49a14613e55a0dd6222489 | [] | no_license | ryanxjhan/rotten-grapes-1.0 | 8991bbb312c95dfb78a346c152431da691826a26 | c1ff183ef4389c9b71555abd06ff058f25dd8256 | refs/heads/master | 2022-11-23T17:19:09.610428 | 2020-08-02T23:30:29 | 2020-08-02T23:30:29 | 284,221,870 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 79 | py | DEBUG = True
SQLALCHEMY_DATABASE_URI = 'mysql://root:hxjsgr33@127.0.0.1/wines'
| [
"noreply@github.com"
] | noreply@github.com |
f046ae87fb073cd2d95514cd604d366908002864 | c52de2621221c83025553c5103378b351bfdd10b | /10/10_12.py | a6ca29a3521894b2a8d74fbeba8bcdcec23bdb27 | [] | no_license | become-hero/mypython | 2b945ab2af091ce516e2569866e036c3a3b71ba9 | 81d64fa282aebde041b41bbe31c78f8caa1c5eec | refs/heads/master | 2020-10-01T13:50:45.887972 | 2019-12-18T05:03:15 | 2019-12-18T05:03:15 | 227,550,898 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 99 | py | import re
str="\python"
rs=re.match("\\\\\w+",str)
print(str)
rs=re.match(r"\\\w+",str)
print(str)
| [
"1772040722@qq.com"
] | 1772040722@qq.com |
474c5c6d151d1b4d7a8b912ad1a74352ab53ca44 | 988205a1bc674399272d2b77f06bb6ae2c0b96ab | /student/urls.py | 0b3401187a6dbb34b251f3f7cd4c6d64ac2fea80 | [] | no_license | safwanvk/as | 7797f63f84d52ba2857e99ae99b4aa5b9fd67983 | 1de9e1a6855a0b3f16ffdb4d693cd90579a37e40 | refs/heads/main | 2023-04-01T14:44:18.047169 | 2021-03-31T07:03:15 | 2021-03-31T07:03:15 | 349,309,666 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 249 | py | from django.urls import path
from .views import *
urlpatterns = [
path('login', student_login),
path('session-check', session_check),
path('create', create_student),
path('delete/<pk>', delete_student),
path('', get_student)
]
| [
"safwanvalakundil@gmail.com"
] | safwanvalakundil@gmail.com |
8b3afd8f0e28c91b5b2a7f89ccc1160556649db8 | 815d85a3248580b05017c3be7d2b9c26ffcacc4d | /CODE/RI.py | 9fdb069ddd69551c1f0eada252c0f72d248549fc | [] | no_license | jfitz02/DM-solver | 1ec21a610500766dfa3bef0e815dac1ae2ef94ff | a0a8cf6614fa3ecea30fef8d99bff351df54352b | refs/heads/main | 2023-06-03T03:02:43.583326 | 2021-06-21T15:15:01 | 2021-06-21T15:15:01 | 378,973,388 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,211 | py | import sys #Used to make the largest possible integer
from Matrix import Matrix #Personally made class representing the Matrix datatype
def all_pairs(lst): #returns a permutation of all possible pairings of nodes
if len(lst) < 2:
yield []
return
if len(lst) % 2 == 1:
for i in range(len(lst)):
for result in all_pairs(lst[:i] + lst[i+1:]):
yield result
else:
a = lst[0]
for i in range(1,len(lst)):
pair = (a,lst[i])
for rest in all_pairs(lst[1:i]+lst[i+1:]):
yield [pair] + rest
class RI(Matrix): #route inspection class that solves the problem
def __init__(self, matrix):
Matrix.__init__(self, matrix)
self.completed_matrix = self.complete_matrix() #creates a completed matrix
def solve(self): #solves the problem and returns the distance
odd_rows = self.get_odd_row_indexes()
dist = self.min_distance(odd_rows)
dist+=self.total_distance()
return dist
def get_odd_row_indexes(self): #finds all nodes with an odd degree
rows = []
for index, row in enumerate(self.matrix):
counter = 0
for val in row:
if val > 0:
counter += 1
if counter%2 != 0:
rows.append(index)
return rows
def total_distance(self): #calculates the total distance of the graph (without added arcs)
total = 0
for row in self.matrix:
for value in row:
if value>0:
total+=value
return int(total/2)
def min_distance(self, rows): #finds the minimum distance between all arrangements of pairs of nodes
perms = list(all_pairs(rows))
min_dist = sys.maxsize
for perm in perms:
dist = 0
for pair in perm:
dist += self.completed_matrix[pair[0]][pair[1]]
if dist<min_dist:
min_dist = dist
return min_dist
| [
"noreply@github.com"
] | noreply@github.com |
acbd735fcc185b617e53205179ff8c253af5c2de | c85cfd004c7d34c54c431c51d6e7b655574f3fe0 | /Service/urls.py | 68fe11e10f4378797538ddf7366d8c91df65c8f3 | [] | no_license | wuxinxi/MallService | e583a1258291e451c34d24501af87cb151a6ec4c | 1f06d62740702e0c754fc395774d9146839f0bbd | refs/heads/master | 2020-12-01T20:21:48.340595 | 2020-03-21T06:41:42 | 2020-03-21T06:41:42 | 230,757,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 941 | py | """Service URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('mall/', include('shopping.urls'))
]+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"996489865@qq.com"
] | 996489865@qq.com |
b4143060947db0249cb4b379bf6a879771385ca7 | cf111b440f33ba9741ff45c60ac33dfade24e2ac | /Projects/Autocal/attic/autocal-20100708/libautocal/autocal.py | 2c143bfd55996a5b498ff00611aacad771a6a1c2 | [
"Unlicense"
] | permissive | fredmorcos/attic | cd08e951f56c3b256899ef5ca4ccd030d3185bc1 | 36d5891a959cfc83f9eeef003b4e0b574dd7d7e1 | refs/heads/master | 2023-07-05T10:03:58.115062 | 2023-06-21T22:55:38 | 2023-06-22T07:07:58 | 154,962,425 | 4 | 1 | Unlicense | 2023-06-22T07:08:00 | 2018-10-27T12:30:38 | JavaScript | UTF-8 | Python | false | false | 795 | py | #!/usr/bin/env python
import sys
import config, optimize, loader
_help = '''AutoCal 0.1 - Fred Morcos <fred.morcos@gmail.com>
Usage: ./autocal.py [COMMANDS] < <input-file>
Commands:
\t--qt\t\t\tShow the Qt user interface.
\t--verbose,-v\t\tShow debug output.
\t--quiet,-q\t\tDo not output errors.
\t--help,-h\t\tShow this help.
'''
if __name__ == '__main__':
for a in sys.argv:
if a == '--verbose' or a == '-v':
config.debug = True
elif a == '--quiet' or a == '-q':
config.verbose_error = False
elif a == '--help' or a == '-h':
print _help
sys.exit(0)
elif a == '--qt':
from autocalqt import qt_start
qt_start()
sys.exit(0)
input_data = ''
for line in sys.stdin:
input_data += line
s = loader.load(input_data)
s = optimize.start(s)
print loader.save(s)
| [
"fred.morcos@gmail.com"
] | fred.morcos@gmail.com |
444a7786b5c7a8bd07929aceea4b8c0e8d44e648 | a7853b95403b527a527f58cc4b94783161eaaa1d | /graph/graph_drawing_objective.py | 40aa76c8871f0204c2b489d8310a9b482574fa7a | [] | no_license | jinnaiyuu/covering-options | 6c3d2b2818d4074893d2eb1eed72fb77920bb3c3 | 00539a00842d40ba2b397496ec351a683f43d38f | refs/heads/master | 2020-12-19T23:54:41.080277 | 2020-01-23T21:33:21 | 2020-01-23T21:33:21 | 235,890,627 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,714 | py | import numpy as np
import itertools
from scipy.optimize import minimize
from options.util import GetRandomWalk
def Attr(rho, P, fs):
# rho: numpy array of size N. prob.
# P : numpy array of size NxN. each row being a prob.
# F : list of numpy arrays of size N (TODO: should this be a numpy array?)
ret = 0.0
N = rho.shape[0]
for u in range(N):
for v in range(N):
prob = rho[u] * P[u, v]
# ret += (F[u] - F[v]) * (F[u] - F[v]) # TODO: 1 dimensional for now
for f in fs:
ret += (f[u] - f[v]) * (f[u] - f[v])
return ret / 2.0
def Repl(rho, P, delta, fs):
ret = 0.0
N = rho.shape[0]
for u in range(N):
for v in range(N):
prob = rho[u] * rho[v] # For repulsive term, we take exp. over rhos.
for j in range(len(fs)):
for k in range(j, len(fs)):
f1 = fs[j]
f2 = fs[k]
if j == k:
res = delta
else:
res = 0
ret += (f1[u] * f2[u] - res) * (f1[v] * f2[v] - res)
return ret
def GraphDrawingObjective(rho, P, delta, beta):
# TODO: delta should be a function instead of a constant value
N = rho.shape[0]
def GDO(F):
fs = []
for k in range(int(F.shape[0] / N)):
f = F[N * k:N * (k+1)]
fs.append(f)
return Attr(rho, P, fs) + beta * Repl(rho, P, delta, fs)
return GDO
if __name__ == "__main__":
# rho = np.array([0.25, 0.50, 0.25])
# P = np.array([[0.0, 1.0, 0.0],
# [0.5, 0.0, 0.5],
# [0.0, 1.0, 0.0]])
rho = np.full(9, 1.0/9.0, dtype=float)
A = np.zeros((9, 9), dtype=float)
A[0, 1] = 1.0
A[0, 3] = 1.0
A[1, 0] = 1.0
A[1, 2] = 1.0
A[1, 4] = 1.0
A[2, 1] = 1.0
A[2, 5] = 1.0
A[3, 0] = 1.0
A[3, 4] = 1.0
A[3, 6] = 1.0
A[4, 1] = 1.0
A[4, 3] = 1.0
A[4, 5] = 1.0
A[4, 7] = 1.0
A[5, 2] = 1.0
A[5, 4] = 1.0
A[5, 8] = 1.0
A[6, 3] = 1.0
A[6, 7] = 1.0
A[7, 4] = 1.0
A[7, 6] = 1.0
A[7, 8] = 1.0
A[8, 5] = 1.0
A[8, 7] = 1.0
P = GetRandomWalk(A)
print('P=', P)
delta = 0.1
beta = 5.0
GDO_fn = GraphDrawingObjective(rho, P, delta, beta)
dim = 3
x0 = np.full(int(rho.shape[0]) * dim, 0.1)
res = minimize(GDO_fn, x0, method='nelder-mead')
sol = res.x.reshape((dim, int(rho.shape[0])))
print('solution=\n', sol)
# gdo_val = GDO_fn([f1, f2])
# print('gdo=', gdo_val)
# For our purpose, we want to draw an edge from minimum to maximum.
| [
"ddyuudd@gmail.com"
] | ddyuudd@gmail.com |
c50a6d74bb8cdeebb6c3eb697c009993a03cf34d | 484b3833b278db5fcbad66c37dbca8365364931e | /pythonFile/COVID-19-map/day6-词云.py | 2ea9b508af703cd0d73a1a9e0c19ee9c0590c35b | [] | no_license | tanHaLiLuYa/newRp | 2ece1530165f0b4e0bf3cb62cb6b1b240e4004b5 | 3b5ed6b6b8aae15d8b4228cd019ded8fa36d941f | refs/heads/master | 2022-03-12T08:58:59.122836 | 2022-02-07T07:29:11 | 2022-02-07T07:29:11 | 228,558,262 | 0 | 0 | null | 2021-09-22T06:10:29 | 2019-12-17T07:27:34 | Python | UTF-8 | Python | false | false | 800 | py | from jieba.analyse import *
from pyecharts.charts import WordCloud
import os
os.chdir(r"E:\work\tpp\samsung\2021年\07月\W27")
with open('新建文本文档.txt',encoding="utf-8") as f:
data = f.read()
dataAnlysed=[]
for keyword, weight in textrank(data, withWeight=True,topK=11):
if keyword =="程序":
keyword="小程序"
dataAnlysed.append((keyword,weight))
dataAnlysed1 = [x for x in dataAnlysed if not (x[0] in ["督导"])]
# dataAnlysed1 = [x for x in dataAnlysed if not (x[0] in ["对比","方面","苹果","用户","手机","介绍","支持","没有","效果","优势"] )]
# # print(dataAnlysed)
print(dataAnlysed1)
wordcloud = WordCloud ()
wordcloud.add( "", dataAnlysed1,shape="cardioid" ,word_size_range=[20,100],rotate_step=180)
wordcloud.render( 'q1.html') | [
"tanpeng.hyz@gmail.com"
] | tanpeng.hyz@gmail.com |
6ec05d3bc0cd2747d542611cb02e8455d14a425b | 4e0f2938b003f5d68a57f213e652fbffb2f72ba2 | /venv/Lib/site-packages/cx_OracleObject/Utils.py | e9107e5e7501b1942bcd28955a5ca2210e161f27 | [] | no_license | adcGG/Lianxi | e4b1ce0d3cfc76e625e1e1caca0a58f25ba5d692 | 3659c3ca11a13b4ad54dbd2e669949701bae10b5 | refs/heads/master | 2022-12-13T05:45:41.312292 | 2019-08-14T07:38:19 | 2019-08-14T07:38:19 | 201,189,540 | 0 | 1 | null | 2022-04-22T22:08:16 | 2019-08-08T06:07:53 | Python | UTF-8 | Python | false | false | 6,568 | py | """Defines utility functions."""
import cx_Exceptions
import sys
__all__ = [ "OrderObjects" ]
def ClausesForOutput(clauses, firstString, restString, joinString):
"""Return a list of clauses suitable for output in a SQL statement."""
if not clauses:
return ""
joinString = joinString + "\n" + restString
return firstString + joinString.join(clauses)
def DependenciesOfInterest(key, objectsOfInterest, dependencies,
dependenciesOfInterest):
"""Return a list of dependencies on objects of interest."""
if key in dependencies:
for refKey in dependencies[key]:
if refKey in objectsOfInterest:
dependenciesOfInterest[refKey] = None
else:
DependenciesOfInterest(refKey, objectsOfInterest, dependencies,
dependenciesOfInterest)
def OrderObjects(objects, dependencies):
"""Put the objects in the order necessary for creation without errors."""
# initialize the mapping that indicates which items this object depends on
iDependOn = {}
dependsOnMe = {}
for key in objects:
iDependOn[key] = {}
dependsOnMe[key] = {}
# populate a mapping which indicates all of the dependencies for an object
mappedDependencies = {}
for owner, name, type, refOwner, refName, refType in dependencies:
key = (owner, name, type)
refKey = (refOwner, refName, refType)
subDict = mappedDependencies.get(key)
if subDict is None:
subDict = mappedDependencies[key] = {}
subDict[refKey] = None
# now populate the mapping that indicates which items this object depends on
# note that an object may depend on an object which is not in the list of
# interest, but it itself depends on an object which is in the list so the
# chain of dependencies is traversed until no objects of interest are found
for key in iDependOn:
refKeys = {}
DependenciesOfInterest(key, iDependOn, mappedDependencies, refKeys)
for refKey in refKeys:
iDependOn[key][refKey] = None
dependsOnMe[refKey][key] = None
# order the items until no more items left
outputObjs = {}
orderedObjs = []
while iDependOn:
# acquire a list of items which do not depend on anything
references = {}
keysToOutput = {}
for key, value in list(iDependOn.items()):
if not value:
owner, name, type = key
if owner not in keysToOutput:
keysToOutput[owner] = []
keysToOutput[owner].append(key)
del iDependOn[key]
else:
for refKey in value:
owner, name, type = refKey
if owner not in references:
references[owner] = 0
references[owner] += 1
# detect a circular reference and avoid an infinite loop
if not keysToOutput:
keys = list(iDependOn.keys())
keys.sort()
for key in keys:
print("%s.%s (%s)" % key, file = sys.stderr)
refKeys = list(iDependOn[key].keys())
refKeys.sort()
for refKey in refKeys:
print(" %s.%s (%s)" % refKey, file = sys.stderr)
raise CircularReferenceDetected()
# for each owner that has something to describe
while keysToOutput:
# determine the owner with the most references
outputOwner = ""
maxReferences = 0
keys = list(references.keys())
keys.sort()
for key in keys:
value = references[key]
if value > maxReferences and key in keysToOutput:
maxReferences = value
outputOwner = key
if not outputOwner:
for key in keysToOutput:
outputOwner = key
break
# remove this owner from the list
keys = keysToOutput[outputOwner]
del keysToOutput[outputOwner]
if outputOwner in references:
del references[outputOwner]
# process this list, removing dependencies and adding additional
# objects
tempKeys = keys
keys = []
while tempKeys:
nextKeys = []
tempKeys.sort()
for key in tempKeys:
refKeys = list(dependsOnMe[key].keys())
refKeys.sort()
for refKey in dependsOnMe[key]:
del iDependOn[refKey][key]
if not iDependOn[refKey]:
owner, name, type = refKey
if owner == outputOwner:
del iDependOn[refKey]
nextKeys.append(refKey)
elif owner in keysToOutput:
del iDependOn[refKey]
keysToOutput[owner].append(refKey)
keys += tempKeys
tempKeys = nextKeys
# output the list of objects that have their dependencies satisfied
for key in keys:
if key not in outputObjs:
orderedObjs.append(key)
outputObjs[key] = None
# return the ordered list
return orderedObjs
def SetOptions(obj, options):
"""Set values from the options on the command line."""
if options:
for attribute in dir(options):
if attribute.startswith("_"):
continue
if hasattr(obj, attribute):
value = getattr(options, attribute)
if isinstance(value, list):
value = [s for v in value for s in v.split(",")]
setattr(obj, attribute, value)
def SizeForOutput(size):
"""Return the size suitable for output in a SQL statement. Note that a
negative size is assumed to be unlimited."""
if size < 0:
return "unlimited"
kilobytes, remainder = divmod(size, 1024)
if not remainder:
megabytes, remainder = divmod(kilobytes, 1024)
if not remainder:
return "%gm" % megabytes
else:
return "%gk" % kilobytes
else:
return "%g" % size
class CircularReferenceDetected(cx_Exceptions.BaseException):
message = "Circular reference detected!"
| [
"979818137@11.com"
] | 979818137@11.com |
ba3998b43bf31d1e74ec6b6dd9ae68c2e280d58c | b2c7e3e711eac3858175c053ac81244751828bee | /show_scraper/items.py | b1bbf0fa30189a6e17f9123f36492e3f6632c277 | [
"MIT"
] | permissive | postliminary/show-scraper | 93e74a6ed01a753199124b5c1aa011a61f3d66ee | fe5e4b8580b3e2f2f1c55b9bb023f826e2b64221 | refs/heads/master | 2016-09-03T06:39:20.806496 | 2014-08-21T05:20:38 | 2014-08-21T05:25:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 425 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class Show(scrapy.Item):
id = scrapy.Field()
title = scrapy.Field()
desc = scrapy.Field()
airday = scrapy.Field()
startdate = scrapy.Field()
#convention for images pipeline
image_urls = scrapy.Field()
images = scrapy.Field()
| [
"thomas@postliminary.com"
] | thomas@postliminary.com |
c583e85a4941db164985b6a0183b73927b75c83d | 48d86947d5f3b5896c4a05cfcddcff01582a26ef | /amnesia/number/forms.py | fa9f1b18200101af1e9d576c327b0b05b5d1afa4 | [] | no_license | pratulyab/amnesia | 181874288c97fbf7e73d10c64e214c2a17574773 | 6b0b3428a27f98e0e2f6bb8aefdc8a4459e7b8cc | refs/heads/master | 2021-01-20T12:49:16.592335 | 2017-05-07T20:38:06 | 2017-05-07T20:38:06 | 90,409,855 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,969 | py | from django import forms
from django.contrib.auth import authenticate
from django.contrib.auth import password_validation
from django.core.exceptions import ValidationError
from django.core import validators
from django.db.utils import IntegrityError
from django.utils.translation import ugettext_lazy as _
from number.models import PhoneNumber
from sms import lookup_number
from material import *
class PhoneNumberForm(forms.ModelForm):
calling_code = forms.CharField(label=_('Calling Code'), widget=forms.TextInput(attrs={'maxlength': 4}))
def __init__(self, *args, **kwargs):
super(PhoneNumberForm, self).__init__(*args, **kwargs)
self.fields['number'].validators = [validators.RegexValidator(r'^\d{10}$')]
def clean(self, *args, **kwargs):
super(PhoneNumberForm, self).clean(*args, **kwargs)
if self.cleaned_data.get('number', ''):
phone_number = self.cleaned_data.get('calling_code', '') + self.cleaned_data['number']
if not lookup_number(phone_number, self.cleaned_data['country'].code):
raise forms.ValidationError(_('Not a valid number according to Twilio\'s Lookup API'))
return self.cleaned_data
def save(self, commit=True, *args, **kwargs):
obj = super(PhoneNumberForm, self).save(commit=False, *args, **kwargs)
if not self.cleaned_data.get('calling_code', '') or kwargs.get('calling_code', ''):
raise forms.ValidationError(_('Calling code is required.'))
if not obj.country.calling_code:
obj.country.calling_code = self.cleaned_data['calling_code'] if self.cleaned_data.get('calling_code', '') else kwargs['calling_code']
if commit:
try:
obj.save()
except (ValidationError, IntegrityError):
raise forms.ValidationError(_('Error Occurred. User with this number has already registered.'))
return obj
class Meta:
model = PhoneNumber
fields = ['country', 'number']
help_texts = {
'number': 'Make sure to enter a valid 10 digit number. It will be verified using Twilio\'s Lookup API',
}
| [
"pratulyabubna@outlook.com"
] | pratulyabubna@outlook.com |
e081d80d4bc9b743d68ee75c12ff466ca80782b3 | 6f72a42c897eaa3ecb45312867b3e17a570bbd43 | /multimodal/fusion.py | e1edd483881e0ea6291d204d51457e8ff8bae5b3 | [] | no_license | ppeng/cheem-omg-empathy | 189483a8eeca3a64468dcbc5ff12ebd0a3372f4c | 92a6aa97e4e6ac86fdc690a54333e269a98bfb41 | refs/heads/master | 2020-04-13T03:54:44.126823 | 2018-12-13T03:01:39 | 2018-12-13T03:01:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,574 | py | """"Decision-level fusion through support vector regression (SVR)."""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import joblib
import pandas as pd
import numpy as np
from sklearn import svm
from sklearn.model_selection import ParameterGrid
from datasets import OMGFusion
def eval_ccc(y_true, y_pred):
"""Computes concordance correlation coefficient."""
true_mean = np.mean(y_true)
true_var = np.var(y_true)
pred_mean = np.mean(y_pred)
pred_var = np.var(y_pred)
covar = np.cov(y_true, y_pred, bias=True)[0][1]
ccc = 2*covar / (true_var + pred_var + (pred_mean-true_mean) ** 2)
return ccc
def load_data(train_dir, test_dir, in_dirs, in_names):
print("Loading data...")
train_data = OMGFusion(
in_names, [os.path.join(train_dir, d) for d in in_dirs],
os.path.join(train_dir,"Annotations"))
test_data = OMGFusion(
in_names, [os.path.join(test_dir, d) for d in in_dirs],
os.path.join(test_dir,"Annotations"))
all_data = train_data.join(test_data)
print("Done.")
return train_data, test_data, all_data
def train(train_data, test_data):
# Concatenate training sequences into matrix
X_train, y_train = zip(*train_data)
X_train, y_train = np.concatenate(X_train), np.concatenate(y_train)
y_train = y_train.flatten()
# Set up hyper-parameters for support vector regression
params = {
'gamma': ['auto'],
'C': [0.01],# [1e-3, 0.01, 0.03, 0.1, 0.3, 1.0],
'kernel':['rbf']
}
params = list(ParameterGrid(params))
# Cross validate across hyper-parameters
best_ccc = -1
for p in params:
print("Using parameters:", p)
# Train SVR on training set
print("Fitting SVR model...")
model = svm.SVR(kernel=p['kernel'], C=p['C'], gamma=p['gamma'],
epsilon=0.1, cache_size=1000, tol=1e-2)
model.fit(X_train, y_train)
# Evaluate on test set
ccc, predictions = evaluate(model, test_data)
# Save best parameters and model
if ccc > best_ccc:
best_ccc = ccc
best_params = p
best_model = model
best_pred = predictions
# Print best parameters
print('---')
print('Best CCC: {:0.3f}'.format(best_ccc))
print('Best parameters:', best_params)
return best_ccc, best_params, best_model, best_pred
def evaluate(model, test_data):
ccc = 0
predictions = []
# Predict and evaluate on each test sequence
print("Evaluating...")
for i, (X_test, y_test) in enumerate(test_data):
# Get original valence annotations
y_test = test_data.val_orig[i].flatten()
y_pred = model.predict(X_test)
# Repeat and pad predictions to match original data length
y_pred = np.repeat(y_pred, test_data.time_ratio)[:len(y_test)]
l_diff = len(y_test) - len(y_pred)
if l_diff > 0:
y_pred = np.concatenate([y_pred, y_pred[-l_diff:]])
ccc += eval_ccc(y_test, y_pred)
predictions.append(y_pred)
ccc /= len(test_data)
print('CCC: {:0.3f}'.format(ccc))
return ccc, predictions
def save_predictions(pred, dataset, path):
if not os.path.exists(path):
os.makedirs(path)
for p, subj, story in zip(pred, dataset.subjects, dataset.stories):
df = pd.DataFrame(p, columns=['valence'])
fname = "Subject_{}_Story_{}.csv".format(subj, story)
df.to_csv(os.path.join(path, fname), index=False)
def main(train_data, test_data, args):
if args.test is None:
# Fit new model if test model is not provided
ccc, params, model, pred = train(train_data, test_data)
# Save best model
if not os.path.exists(args.model_dir):
os.makedirs(args.model_dir)
joblib.dump(model, os.path.join(args.model_dir, "best.save"))
# Save predictions of best model
pred_dir = os.path.join(args.pred_dir, "pred_test")
save_predictions(pred, test_data, pred_dir)
return ccc
else:
# Load and test model on training and test set
model = joblib.load(args.test)
print("-Training-")
ccc1, pred = evaluate(model, train_data)
pred_dir = os.path.join(args.pred_dir, "pred_train")
save_predictions(pred, train_data, pred_dir)
print("-Testing-")
ccc2, pred = evaluate(model, test_data)
pred_dir = os.path.join(args.pred_dir, "pred_test")
save_predictions(pred, test_data, pred_dir)
return ccc1, ccc2
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('in_dirs', type=str, nargs='+', metavar='DIR',
help='paths to input features')
parser.add_argument('--in_names', type=str, nargs='+', metavar='NAME',
help='names for input features')
parser.add_argument('--normalize', action='store_true', default=False,
help='whether to normalize inputs (default: True)')
parser.add_argument('--test', type=str, default=None,
help='path to model to test (default: None)')
parser.add_argument('--test_set', type=int, default=None, nargs='+',
help='stories to use as test set (optional)')
parser.add_argument('--train_dir', type=str, default="data/Training",
help='base folder for training data')
parser.add_argument('--test_dir', type=str, default="data/Validation",
help='base folder for testing data')
parser.add_argument('--model_dir', type=str, default="./fusion_models",
help='path to save models')
parser.add_argument('--pred_dir', type=str, default="./fusion_pred",
help='path to save predictions')
args = parser.parse_args()
# Construct modality names if not provided
if args.in_names is None:
args.in_names = [os.path.basename(d).lower() for d in args.in_dirs]
# Load data
train_data, test_data, all_data =\
load_data(args.train_dir, args.test_dir, args.in_dirs, args.in_names)
print('---')
# Normalize inputs
if args.normalize:
all_data.normalize()
# Make new train/test split
if args.test_set is not None:
test_data, train_data = all_data.extract(stories=args.test_set)
# Continue to rest of script
main(train_data, test_data, args)
| [
"tanqazx@gmail.com"
] | tanqazx@gmail.com |
d0637ddbbb14a19bf3d057a9674c8546a808bd8b | 7a5cfbbe73766facd4b71c366f7b7a5ca9dc9b62 | /code_app/urls.py | dff4c87697b51d5f2585e05a1420b0049fe7b270 | [] | no_license | ma76/gitir | cfb9e88b58742eb1246a5042f5c9fed931ba21c4 | 0109a119fa52ebff8ecb6c2f13d74f2c5bde0d36 | refs/heads/master | 2023-08-20T02:24:57.131801 | 2021-10-28T13:34:48 | 2021-10-28T13:34:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 341 | py | from django.urls import path, include
from code_app.views import *
urlpatterns = [
path('codes', UserCodeList.as_view()),
path('codes/search', Search.as_view()),
path('codes/<int:pk>/<str:user_name>/<repository>/<str:project_name>', UserCodeShower),
path('upload-file', UploadForm),
path('mycode', MyCode.as_view()),
]
| [
"comail205076@gmail.com"
] | comail205076@gmail.com |
a9f8aceeef8fee4a5a9563226052770bf025849b | 35e842f235768138cf161293881cef16c02c76af | /agents/tensorflow_abalone.py | c42adcb6a569e4a0d47067f86f232af3f0bcdf30 | [
"MIT"
] | permissive | yamamototakas/fxtrading | b90bb75fd294e948c6da3a5e534c8af1076bc645 | 955d247b832de7180b8893edaad0b50df515809f | refs/heads/master | 2020-06-17T05:48:12.934679 | 2017-08-05T12:46:36 | 2017-08-05T12:46:36 | 75,036,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,875 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DNNRegressor with custom estimator for abalone dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import tempfile
import os
import urllib.request
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
FLAGS = None
tf.logging.set_verbosity(tf.logging.INFO)
# Learning rate for the model
LEARNING_RATE = 0.001
# Data sets
TRAINING = "abalone_train.csv"
TEST = "abalone_test.csv"
PREDICT = "abalone_predict.csv"
def maybe_download(train_data, test_data, predict_data):
# """Maybe downloads training data and returns train and test file names."""
# if train_data:
# train_file_name = train_data
# print("train_data is avalilable")
# else:
# train_file = tempfile.NamedTemporaryFile(delete=False)
# urllib.request.urlretrieve(
# "http://download.tensorflow.org/data/abalone_train.csv",
# train_file.name)
# train_file_name = train_file.name
# train_file.close()
# print("Training data is downloaded to %s" % train_file_name)
#
#
#
# if test_data:
# test_file_name = test_data
# else:
# test_file = tempfile.NamedTemporaryFile(delete=False)
# urllib.request.urlretrieve(
# "http://download.tensorflow.org/data/abalone_test.csv", test_file.name)
# test_file_name = test_file.name
# test_file.close()
# print("Test data is downloaded to %s" % test_file_name)
#
# if predict_data:
# predict_file_name = predict_data
# else:
# predict_file = tempfile.NamedTemporaryFile(delete=False)
# urllib.request.urlretrieve(
# "http://download.tensorflow.org/data/abalone_predict.csv",
# predict_file.name)
# predict_file_name = predict_file.name
# predict_file.close()
# print("Prediction data is downloaded to %s" % predict_file_name)
if not os.path.exists(TRAINING):
raw = urllib.request.urlopen(
"http://download.tensorflow.org/data/abalone_train.csv").read()
with open(TRAINING, "wb") as f:
f.write(raw)
if not os.path.exists(TEST):
raw = urllib.request.urlopen(
"http://download.tensorflow.org/data/abalone_test.csv").read()
with open(TEST, "wb") as f:
f.write(raw)
if not os.path.exists(PREDICT):
raw = urllib.request.urlopen(
"http://download.tensorflow.org/data/abalone_predict.csv").read()
with open(PREDICT, "wb") as f:
f.write(raw)
return TRAINING, TEST, PREDICT
def model_fn(features, targets, mode, params):
# """Model function for Estimator."""
# Connect the first hidden layer to input layer
# (features) with relu activation
first_hidden_layer = tf.contrib.layers.relu(features, 10)
# Connect the second hidden layer to first hidden layer with relu
second_hidden_layer = tf.contrib.layers.relu(first_hidden_layer, 10)
# Connect the output layer to second hidden layer (no activation fn)
output_layer = tf.contrib.layers.linear(second_hidden_layer, 1)
# Reshape output layer to 1-dim Tensor to return predictions
predictions = tf.reshape(output_layer, [-1])
predictions_dict = {"ages": predictions}
# Calculate loss using mean squared error
loss = tf.losses.mean_squared_error(targets, predictions)
# Calculate root mean squared error as additional eval metric
eval_metric_ops = {
"rmse": tf.metrics.root_mean_squared_error(
tf.cast(targets, tf.float64), predictions)
}
train_op = tf.contrib.layers.optimize_loss(
loss=loss,
global_step=tf.contrib.framework.get_global_step(),
learning_rate=params["learning_rate"],
optimizer="SGD")
return model_fn_lib.ModelFnOps(
mode=mode,
predictions=predictions_dict,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops)
def main(unused_argv):
# Load datasets
abalone_train, abalone_test, abalone_predict = maybe_download(
FLAGS.train_data, FLAGS.test_data, FLAGS.predict_data)
# Training examples
training_set = tf.contrib.learn.datasets.base.load_csv_without_header(
filename=abalone_train, target_dtype=np.int, features_dtype=np.float64)
# Test examples
test_set = tf.contrib.learn.datasets.base.load_csv_without_header(
filename=abalone_test, target_dtype=np.int, features_dtype=np.float64)
# Set of 7 examples for which to predict abalone ages
prediction_set = tf.contrib.learn.datasets.base.load_csv_without_header(
filename=abalone_predict, target_dtype=np.int, features_dtype=np.float64)
# Set model params
model_params = {"learning_rate": LEARNING_RATE}
# Instantiate Estimator
nn = tf.contrib.learn.Estimator(model_fn=model_fn, params=model_params)
# Fit
nn.fit(x=training_set.data, y=training_set.target, steps=5000)
# Score accuracy
ev = nn.evaluate(x=test_set.data, y=test_set.target, steps=1)
print("Loss: %s" % ev["loss"])
print("Root Mean Squared Error: %s" % ev["rmse"])
# Print out predictions
predictions = nn.predict(x=prediction_set.data, as_iterable=True)
for i, p in enumerate(predictions):
print("Prediction %s: %s" % (i + 1, p["ages"]))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--train_data", type=str, default="", help="Path to the training data.")
parser.add_argument(
"--test_data", type=str, default="", help="Path to the test data.")
parser.add_argument(
"--predict_data",
type=str,
default="",
help="Path to the prediction data.")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| [
"ya.ma.mo.to.ta.ka.s@gmail.com"
] | ya.ma.mo.to.ta.ka.s@gmail.com |
3f563dd24da29a3808436df13732d8d92dc6540f | baaff7bac9cf0e18bddc27ed7866885637db9dac | /Studentportal/principle/migrations/0005_auto_20200427_1626.py | 7f2dcf3448db240598d8725aa7c1bbb6ff382970 | [] | no_license | pratikgosavii/School-College-management-portal | 0d477718a315c73b483b3885fce38d94f8cf7227 | 79ca0be6891067379b1544f4a8cd8bd82b177b51 | refs/heads/master | 2022-06-03T23:07:11.080921 | 2020-04-30T22:40:58 | 2020-04-30T22:40:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 533 | py | # Generated by Django 3.0.2 on 2020-04-27 10:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('principle', '0004_auto_20200426_0042'),
]
operations = [
migrations.RemoveField(
model_name='addteacher',
name='Teacher_Subjects',
),
migrations.AddField(
model_name='addteacher',
name='Teacher_Subjects',
field=models.ManyToManyField(to='principle.subjects'),
),
]
| [
"pratikgosavi654@gmail.com"
] | pratikgosavi654@gmail.com |
2902b8f40137e2f7a57b5112af3ab07097150c66 | 6587c26d1901b6c22442fd7cd0089fabfe1aa83a | /qw.py | 4e18d8d6bdc2c72579b8dfda3d49dd33c7e6d719 | [] | no_license | yunruowu/mail | d505d3d02de9fd7b55e52ed91e75f06fc6950a3a | 1c5de370ddee82c1f509c21336f5ee7c24ad83aa | refs/heads/master | 2020-05-04T09:15:42.116384 | 2019-06-02T09:15:55 | 2019-06-02T09:15:55 | 179,064,892 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,833 | py | # -*- coding: UTF-8 -*-
import numpy as np
import re
import random
"""
函数说明:将切分的实验样本词条整理成不重复的词条列表,也就是词汇表
Parameters:
dataSet - 整理的样本数据集
Returns:
vocabSet - 返回不重复的词条列表,也就是词汇表
"""
def createVocabList(dataSet):
vocabSet = set([]) # 创建一个空的不重复列表
for document in dataSet:
vocabSet = vocabSet | set(document) # 取并集
return list(vocabSet)
"""
函数说明:根据vocabList词汇表,将inputSet向量化,向量的每个元素为1或0
Parameters:
vocabList - createVocabList返回的列表
inputSet - 切分的词条列表
Returns:
returnVec - 文档向量,词集模型
"""
def setOfWords2Vec(vocabList, inputSet):
returnVec = [0] * len(vocabList) # 创建一个其中所含元素都为0的向量
for word in inputSet: # 遍历每个词条
if word in vocabList: # 如果词条存在于词汇表中,则置1
returnVec[vocabList.index(word)] = 1
else:
print("the word: %s is not in my Vocabulary!" % word)
return returnVec # 返回文档向量
"""
函数说明:根据vocabList词汇表,构建词袋模型
Parameters:
vocabList - createVocabList返回的列表
inputSet - 切分的词条列表
Returns:
returnVec - 文档向量,词袋模型
"""
def bagOfWords2VecMN(vocabList, inputSet):
returnVec = [0] * len(vocabList) # 创建一个其中所含元素都为0的向量
for word in inputSet: # 遍历每个词条
if word in vocabList: # 如果词条存在于词汇表中,则计数加一
returnVec[vocabList.index(word)] += 1
return returnVec # 返回词袋模型
"""
函数说明:朴素贝叶斯分类器训练函数
Parameters:
trainMatrix - 训练文档矩阵,即setOfWords2Vec返回的returnVec构成的矩阵
trainCategory - 训练类别标签向量,即loadDataSet返回的classVec
Returns:
p0Vect - 正常邮件类的条件概率数组
p1Vect - 垃圾邮件类的条件概率数组
pAbusive - 文档属于垃圾邮件类的概率
"""
def trainNB0(trainMatrix, trainCategory):
numTrainDocs = len(trainMatrix) # 计算训练的文档数目
numWords = len(trainMatrix[0]) # 计算每篇文档的词条数
pAbusive = sum(trainCategory) / float(numTrainDocs) # 文档属于垃圾邮件类的概率
p0Num = np.ones(numWords)
p1Num = np.ones(numWords) # 创建numpy.ones数组,词条出现数初始化为1,拉普拉斯平滑
p0Denom = 2.0
p1Denom = 2.0 # 分母初始化为2 ,拉普拉斯平滑
for i in range(numTrainDocs):
if trainCategory[i] == 1: # 统计属于侮辱类的条件概率所需的数据,即P(w0|1),P(w1|1),P(w2|1)···
p1Num += trainMatrix[i]
p1Denom += sum(trainMatrix[i])
else: # 统计属于非侮辱类的条件概率所需的数据,即P(w0|0),P(w1|0),P(w2|0)···
p0Num += trainMatrix[i]
p0Denom += sum(trainMatrix[i])
p1Vect = np.log(p1Num / p1Denom)
p0Vect = np.log(p0Num / p0Denom) # 取对数,防止下溢出
return p0Vect, p1Vect, pAbusive # 返回属于正常邮件类的条件概率数组,属于侮辱垃圾邮件类的条件概率数组,文档属于垃圾邮件类的概率
"""
函数说明:朴素贝叶斯分类器分类函数
Parameters:
vec2Classify - 待分类的词条数组
p0Vec - 正常邮件类的条件概率数组
p1Vec - 垃圾邮件类的条件概率数组
pClass1 - 文档属于垃圾邮件的概率
Returns:
0 - 属于正常邮件类
1 - 属于垃圾邮件类
"""
def classifyNB(vec2Classify, p0Vec, p1Vec, pClass1):
# p1 = reduce(lambda x, y: x * y, vec2Classify * p1Vec) * pClass1 # 对应元素相乘
# p0 = reduce(lambda x, y: x * y, vec2Classify * p0Vec) * (1.0 - pClass1)
p1 = sum(vec2Classify * p1Vec) + np.log(pClass1)
p0 = sum(vec2Classify * p0Vec) + np.log(1.0 - pClass1)
if p1 > p0:
return 1
else:
return 0
"""
函数说明:接收一个大字符串并将其解析为字符串列表
"""
def textParse(bigString): # 将字符串转换为字符列表
listOfTokens = re.split(r'\W*', bigString) # 将特殊符号作为切分标志进行字符串切分,即非字母、非数字
return [tok.lower() for tok in listOfTokens if len(tok) > 2] # 除了单个字母,例如大写的I,其它单词变成小写
"""
函数说明:测试朴素贝叶斯分类器,使用朴素贝叶斯进行交叉验证
"""
def spamTest():
docList = []
classList = []
# fullText = []
for i in range(1, 26): # 遍历25个txt文件
wordList = textParse(open('email/spam/%d.txt' % i, 'r').read()) # 读取每个垃圾邮件,并字符串转换成字符串列表
docList.append(wordList)
#fullText.append(wordList)
classList.append(1) # 标记垃圾邮件,1表示垃圾文件
wordList = textParse(open('email/ham/%d.txt' % i, 'r').read()) # 读取每个非垃圾邮件,并字符串转换成字符串列表
docList.append(wordList)
#fullText.append(wordList)
classList.append(0) # 标记正常邮件,0表示正常文件
vocabList = createVocabList(docList) # 创建词汇表,不重复
trainingSet = list(range(100))
testSet = [] # 创建存储训练集的索引值的列表和测试集的索引值的列表
for i in range(10): # 从50个邮件中,随机挑选出40个作为训练集,10个做测试集
randIndex = int(random.uniform(0, len(trainingSet))) # 随机选取索索引值
testSet.append(trainingSet[randIndex]) # 添加测试集的索引值
del (trainingSet[randIndex]) # 在训练集列表中删除添加到测试集的索引值
trainMat = []
trainClasses = [] # 创建训练集矩阵和训练集类别标签系向量
for docIndex in trainingSet: # 遍历训练集
trainMat.append(setOfWords2Vec(vocabList, docList[docIndex])) # 将生成的词集模型添加到训练矩阵中
trainClasses.append(classList[docIndex]) # 将类别添加到训练集类别标签系向量中
p0V, p1V, pSpam = trainNB0(np.array(trainMat), np.array(trainClasses)) # 训练朴素贝叶斯模型
errorCount = 0 # 错误分类计数
for docIndex in testSet: # 遍历测试集
wordVector = setOfWords2Vec(vocabList, docList[docIndex]) # 测试集的词集模型
if classifyNB(np.array(wordVector), p0V, p1V, pSpam) != classList[docIndex]: # 如果分类错误
errorCount += 1 # 错误计数加1
print("分类错误的测试集:", docList[docIndex])
print('错误率:%.2f%%' % (float(errorCount) / len(testSet) * 100))
if __name__ == '__main__':
spamTest() | [
"mcdxwan@outlook.com"
] | mcdxwan@outlook.com |
dc6c069c4825fe4b29e29006bc14b14721dac2d0 | 881d5a0141f78f72822d9f0e6fe6096d00e53b60 | /rectangle_test.py | f192f3c8ee472b610c1ec7d2934b6d5a7f4f5019 | [] | no_license | KyungbinChoi/Python-Tutorial-examples | db11b760fa384fb408ab0e6e832ba030ce0f8b4e | 312e6fc9a774c93043f545c891daaf6b9443f3bc | refs/heads/master | 2021-01-18T17:03:23.441997 | 2017-06-14T05:48:57 | 2017-06-14T05:48:57 | 86,783,468 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 139 | py | import rectangle
a = rectangle.Rectangle(4,5)
b = rectangle.Rectangle()
c = rectangle.Rectangle(3)
print(a)
print(b)
print(c)
| [
"noreply@github.com"
] | noreply@github.com |
f0e039c411936f53639aa877703d46d8104df9d5 | bbcba1d0629065e9b243cb8493801510523bd848 | /levelfive/basicapp/forms.py | 78182d3a595350a8303b53014eb0965e49c173b8 | [] | no_license | akashkashyapit/Django-signup | 21e10a976c72f1e20f4b40375d520698622692b5 | 5596dd79d342d176d8846b5f1c891499edb1d0d2 | refs/heads/master | 2020-11-26T13:44:34.235827 | 2019-12-19T16:17:33 | 2019-12-19T16:17:33 | 229,090,700 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | from django import forms
from django.contrib.auth.models import User
from basicapp.models import UserProfileInfo
class UserForm(forms.ModelForm):
password = forms.CharField(widget = forms.PasswordInput())
class Meta():
model = User
fields = ('username', 'email', 'password')
class UserProfileInfoForm(forms.ModelForm):
class Meta():
model = UserProfileInfo
fields = ('profile_site', 'profile_pic')
| [
"akashkashyapit@gmail.com"
] | akashkashyapit@gmail.com |
e94e3658282b9f6b67a6773d2ae3efb0e98c3de0 | 97edf859a9e53a2727d6d1cb9fa6f3425e6a6b82 | /第一章上机程序/chapter1_17_2.py | 42ec1e5883475e216a4f7eb0da5734b69265fbb0 | [] | no_license | sherlocklock666/shuzhifenxi_python | f06e04d7ce8b92f1d58d1ff673b0f821af1eea64 | 29181a0a9be84606a761ab86fb73892733984a99 | refs/heads/master | 2023-08-15T23:08:55.061885 | 2021-10-12T07:35:09 | 2021-10-12T07:35:09 | 415,510,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 174 | py | j = 2
N = 10**2#从2到N
S_N = 0
S_N_list = []#S_N的每次输出值列表
while j <= N:
M = 1/(N**2-1)
S_N = S_N + M
S_N_list.append(S_N)
N -= 1
else:
print(S_N_list)
| [
"1016945252fzh@gmail.com"
] | 1016945252fzh@gmail.com |
b9e4d38dbb7b3af32804a49cbdc4f28603534461 | 5a94233e02cbee640079740044f1ee377c96cc59 | /heat-config-script/install.d/hook-script.py | 0c9a92fe0cbfb0907514a31c9bf4e8f0095c3478 | [
"Apache-2.0"
] | permissive | kairen/heat-agents | 249a2faa50798f6c371f8c25eab8d291f9e53f93 | a1f20d4eceed2dba5a2dcc34f9ce494ba1d0d289 | refs/heads/master | 2020-04-01T22:46:00.397671 | 2019-04-24T02:09:06 | 2019-04-24T02:09:06 | 153,725,564 | 0 | 0 | Apache-2.0 | 2018-10-19T04:14:32 | 2018-10-19T04:14:32 | null | UTF-8 | Python | false | false | 2,997 | py | #!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
import os
import subprocess
import sys
WORKING_DIR = os.environ.get('HEAT_SCRIPT_WORKING',
'/var/lib/heat-config/heat-config-script')
OUTPUTS_DIR = os.environ.get('HEAT_SCRIPT_OUTPUTS',
'/var/run/heat-config/heat-config-script')
def prepare_dir(path):
if not os.path.isdir(path):
os.makedirs(path, 0o700)
def main(argv=sys.argv):
log = logging.getLogger('heat-config')
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(
logging.Formatter(
'[%(asctime)s] (%(name)s) [%(levelname)s] %(message)s'))
log.addHandler(handler)
log.setLevel('DEBUG')
prepare_dir(OUTPUTS_DIR)
prepare_dir(WORKING_DIR)
os.chdir(WORKING_DIR)
c = json.load(sys.stdin)
env = os.environ.copy()
for input in c['inputs']:
input_name = input['name']
value = input.get('value', '')
if isinstance(value, dict) or isinstance(value, list):
env[input_name] = json.dumps(value)
else:
env[input_name] = value
log.info('%s=%s' % (input_name, env[input_name]))
fn = os.path.join(WORKING_DIR, c['id'])
heat_outputs_path = os.path.join(OUTPUTS_DIR, c['id'])
env['heat_outputs_path'] = heat_outputs_path
with os.fdopen(os.open(fn, os.O_CREAT | os.O_WRONLY, 0o700), 'w') as f:
f.write(c.get('config', ''))
log.debug('Running %s' % fn)
subproc = subprocess.Popen([fn], stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env)
stdout, stderr = subproc.communicate()
log.info(stdout)
log.debug(stderr)
if subproc.returncode:
log.error("Error running %s. [%s]\n" % (fn, subproc.returncode))
else:
log.info('Completed %s' % fn)
response = {}
for output in c.get('outputs') or []:
output_name = output['name']
try:
with open('%s.%s' % (heat_outputs_path, output_name)) as out:
response[output_name] = out.read()
except IOError:
pass
response.update({
'deploy_stdout': stdout.decode('utf-8', 'replace'),
'deploy_stderr': stderr.decode('utf-8', 'replace'),
'deploy_status_code': subproc.returncode,
})
json.dump(response, sys.stdout)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| [
"therve@redhat.com"
] | therve@redhat.com |
50239f4756a0b1004e5a33d918a7f6b2ec81869e | 054c3f0cb8a5046ccbd70c2fb228a934161af440 | /steve/crawler/fetcher_test.py | a6184cfdb3a6a49b4ec8f43c62932fef8760aede | [] | no_license | ningliang/bagger | 16049d96dd65ac863073bf49992d0fcfe6cb602c | 075f3463d6319996399a46971a4da8f054fbf900 | refs/heads/master | 2020-05-19T15:39:26.592662 | 2009-10-25T17:28:50 | 2009-10-25T17:28:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 88 | py | from fetcher import *
import unittest
if __name__ == '__main__':
unittest.main()
| [
"fedele@google.com"
] | fedele@google.com |
9f80f8e98ba81fbc03fffe07a29c8ce878090be2 | 7971a30e49246a1080490c9641c29cb8fd575c12 | /Subset_DataStructures/remove_duplicates.py | 6781a36207b4e74472372b44658b8e5dafccec4e | [] | no_license | ymwondimu/HackerRank | 3870922a29a1e4271a1d3cfd238fd83fd80749c8 | 6481d7ddf61868108a071b44e3fdb098e8cbd61e | refs/heads/master | 2020-03-21T19:05:00.406134 | 2018-07-26T01:11:05 | 2018-07-26T01:11:05 | 138,929,791 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,068 | py | #!/bin/python3
import math
import os
import random
import re
import sys
class SinglyLinkedListNode:
def __init__(self, node_data):
self.data = node_data
self.next = None
class SinglyLinkedList:
def __init__(self):
self.head = None
self.tail = None
def insert_node(self, node_data):
node = SinglyLinkedListNode(node_data)
if not self.head:
self.head = node
else:
self.tail.next = node
self.tail = node
def removeDuplicates(head):
curr = head.next
prev = head
return head
def main():
node1 = SinglyLinkedListNode(1)
node2 = SinglyLinkedListNode(2)
node3 = SinglyLinkedListNode(3)
node4 = SinglyLinkedListNode(3)
node5 = SinglyLinkedListNode(4)
node6 = SinglyLinkedListNode(5)
node1.next = node2
node2.next = node3
node3.next = node4
node4.next = node5
node5.next = node6
h = removeDuplicates(node1)
while (h):
print (h.data)
h = h.next
if __name__ == "__main__":
main()
| [
"ywondimu6@gatech.edu"
] | ywondimu6@gatech.edu |
1ab54b43639b7604b508f863b7e80145671dff29 | 10297e27a8820f2862cace0105af58d80edd6742 | /pattern5.py | ac6d46c5b7288841d3adb388ab75045adb05372f | [] | no_license | codejay411/python_practical | 8278865b320886851957e538d0b1de28b39445cc | 83377b8ef377384332f1c0928990538791f31622 | refs/heads/master | 2022-11-28T00:07:02.841325 | 2020-07-26T07:01:33 | 2020-07-26T07:01:33 | 282,594,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 323 | py | def asd():
line=input("enter the number of line:")
for i in range(1,line+1,1):
for j in range(i,(line-1)+1,1):
print " ",
for k in range(1,2,3):
print k,
for l in range(i,(line-1)+1,1):
print " ",
print
asd()
asd()
asd()
asd()
asd()
asd()
asd()
| [
"jaypr202@gmail.com"
] | jaypr202@gmail.com |
c23e8f9950b7112ca397ef9a81e694be5988638c | aa271e98d3ea105c25e770e4638f47d68f8e631f | /edu/OO Design/Design a Car Rental System/Enums.py | fe5d90794d0f023325166e48e46f77e401dc8848 | [] | no_license | LeoChenL/Miscellaneous | 50b894b4fce6965ae5aa4f600f90b22e08e18b41 | 9dcaa0890f638d78d7f2b5a7461eb4a6c2b80560 | refs/heads/master | 2021-07-12T07:12:12.084036 | 2020-09-21T02:58:32 | 2020-09-21T02:58:32 | 196,024,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,227 | py | class BillItemType(Enum):
BASE_CHARGE, ADDITIONAL_SERVICE, FINE, OTHER = 1, 2, 3, 4
class VehicleLogType(Enum):
ACCIDENT, FUELING, CLEANING_SERVICE, OIL_CHANGE, REPAIR, OTHER = 1, 2, 3, 4, 5, 6
class VanType(Enum):
PASSENGER, CARGO = 1, 2
class CarType(Enum):
ECONOMY, COMPACT, INTERMEDIATE, STANDARD, FULL_SIZE, PREMIUM, LUXURY = 1, 2, 3, 4, 5, 6, 7
class VehicleStatus(Enum):
AVAILABLE, RESERVED, LOANED, LOST, BEING_SERVICED, OTHER = 1, 2, 3, 4, 5, 6
class ReservationStatus(Enum):
ACTIVE, PENDING, CONFIRMED, COMPLETED, CANCELLED, NONE = 1, 2, 3, 4, 5, 6
class AccountStatus(Enum):
ACTIVE, CLOSED, CANCELED, BLACKLISTED, BLOCKED = 1, 2, 3, 4, 5
class PaymentStatus(Enum):
UNPAID, PENDING, COMPLETED, FILLED, DECLINED, CANCELLED, ABANDONED, SETTLING, SETTLED, REFUNDED = 1, 2, 3, 4, 5, 6, 7, 8, 9, 10
class Address:
def __init__(self, street, city, state, zip_code, country):
self.__street_address = street
self.__city = city
self.__state = state
self.__zip_code = zip_code
self.__country = country
class Person():
def __init__(self, name, address, email, phone):
self.__name = name
self.__address = address
self.__email = email
self.__phone = phone
| [
"leochenlang7@gmail.com"
] | leochenlang7@gmail.com |
b273e4938bf52ca34ef84d5ea8d59aae5f8d5fe4 | d8c1db33096c082540bfa35a8a9be0a5e5bbc2e8 | /.c9/metadata/environment/python_sample_package/fun.py | 50ed7372473b384c8c340cd9b8d03edacf057632 | [] | no_license | JAYAPRAKASH7541/jai7541 | 7b88570108066c763bc861de7af3c8780a384f99 | 2a997baacd63555bdbb5acda7a606608c76f35a3 | refs/heads/master | 2020-07-13T00:12:45.536062 | 2020-05-28T09:56:44 | 2020-05-28T09:56:44 | 204,943,482 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,938 | py | {"filter":false,"title":"fun.py","tooltip":"/python_sample_package/fun.py","ace":{"folds":[],"scrolltop":0,"scrollleft":0,"selection":{"start":{"row":11,"column":8},"end":{"row":11,"column":8},"isBackwards":false},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":0},"hash":"b319510045eb3c1b337a03c4c934f1ad85a67a54","undoManager":{"mark":100,"position":100,"stack":[[{"start":{"row":7,"column":1},"end":{"row":7,"column":3},"action":"insert","lines":["()"],"id":63}],[{"start":{"row":7,"column":2},"end":{"row":7,"column":3},"action":"insert","lines":["c"],"id":64},{"start":{"row":7,"column":3},"end":{"row":7,"column":4},"action":"insert","lines":[","]},{"start":{"row":7,"column":4},"end":{"row":7,"column":5},"action":"insert","lines":["d"]}],[{"start":{"row":0,"column":11},"end":{"row":0,"column":12},"action":"remove","lines":["s"],"id":65},{"start":{"row":0,"column":10},"end":{"row":0,"column":11},"action":"remove","lines":["g"]},{"start":{"row":0,"column":9},"end":{"row":0,"column":10},"action":"remove","lines":["r"]},{"start":{"row":0,"column":8},"end":{"row":0,"column":9},"action":"remove","lines":["a"]}],[{"start":{"row":0,"column":8},"end":{"row":0,"column":9},"action":"insert","lines":["c"],"id":66}],[{"start":{"row":0,"column":17},"end":{"row":0,"column":18},"action":"remove","lines":["s"],"id":67},{"start":{"row":0,"column":16},"end":{"row":0,"column":17},"action":"remove","lines":["g"]},{"start":{"row":0,"column":15},"end":{"row":0,"column":16},"action":"remove","lines":["r"]},{"start":{"row":0,"column":14},"end":{"row":0,"column":15},"action":"remove","lines":["a"]},{"start":{"row":0,"column":13},"end":{"row":0,"column":14},"action":"remove","lines":["w"]},{"start":{"row":0,"column":12},"end":{"row":0,"column":13},"action":"remove","lines":["k"]}],[{"start":{"row":0,"column":12},"end":{"row":0,"column":13},"action":"insert","lines":["d"],"id":68}],[{"start":{"row":1,"column":10},"end":{"row":1,"column":11},"action":"remove","lines":["s"],"id":69},{"start":{"row":1,"column":9},"end":{"row":1,"column":10},"action":"remove","lines":["g"]},{"start":{"row":1,"column":8},"end":{"row":1,"column":9},"action":"remove","lines":["r"]},{"start":{"row":1,"column":7},"end":{"row":1,"column":8},"action":"remove","lines":["a"]}],[{"start":{"row":1,"column":7},"end":{"row":1,"column":8},"action":"insert","lines":["c"],"id":70}],[{"start":{"row":2,"column":12},"end":{"row":2,"column":13},"action":"remove","lines":["s"],"id":71},{"start":{"row":2,"column":11},"end":{"row":2,"column":12},"action":"remove","lines":["g"]},{"start":{"row":2,"column":10},"end":{"row":2,"column":11},"action":"remove","lines":["r"]},{"start":{"row":2,"column":9},"end":{"row":2,"column":10},"action":"remove","lines":["a"]},{"start":{"row":2,"column":8},"end":{"row":2,"column":9},"action":"remove","lines":["w"]},{"start":{"row":2,"column":7},"end":{"row":2,"column":8},"action":"remove","lines":["k"]}],[{"start":{"row":2,"column":7},"end":{"row":2,"column":8},"action":"insert","lines":["d"],"id":72}],[{"start":{"row":7,"column":2},"end":{"row":7,"column":3},"action":"insert","lines":["*"],"id":73}],[{"start":{"row":7,"column":5},"end":{"row":7,"column":6},"action":"insert","lines":["*"],"id":74},{"start":{"row":7,"column":6},"end":{"row":7,"column":7},"action":"insert","lines":["*"]}],[{"start":{"row":0,"column":8},"end":{"row":0,"column":9},"action":"remove","lines":["c"],"id":75}],[{"start":{"row":0,"column":8},"end":{"row":0,"column":9},"action":"insert","lines":["a"],"id":76},{"start":{"row":0,"column":9},"end":{"row":0,"column":10},"action":"insert","lines":["r"]},{"start":{"row":0,"column":10},"end":{"row":0,"column":11},"action":"insert","lines":["g"]},{"start":{"row":0,"column":11},"end":{"row":0,"column":12},"action":"insert","lines":["s"]}],[{"start":{"row":0,"column":15},"end":{"row":0,"column":16},"action":"remove","lines":["d"],"id":77}],[{"start":{"row":0,"column":15},"end":{"row":0,"column":16},"action":"insert","lines":["k"],"id":78},{"start":{"row":0,"column":16},"end":{"row":0,"column":17},"action":"insert","lines":["w"]},{"start":{"row":0,"column":17},"end":{"row":0,"column":18},"action":"insert","lines":["a"]},{"start":{"row":0,"column":18},"end":{"row":0,"column":19},"action":"insert","lines":["r"]},{"start":{"row":0,"column":19},"end":{"row":0,"column":20},"action":"insert","lines":["g"]}],[{"start":{"row":0,"column":20},"end":{"row":0,"column":21},"action":"insert","lines":["s"],"id":79}],[{"start":{"row":1,"column":7},"end":{"row":1,"column":8},"action":"remove","lines":["c"],"id":80}],[{"start":{"row":1,"column":7},"end":{"row":1,"column":8},"action":"insert","lines":["a"],"id":81},{"start":{"row":1,"column":8},"end":{"row":1,"column":9},"action":"insert","lines":["r"]},{"start":{"row":1,"column":9},"end":{"row":1,"column":10},"action":"insert","lines":["g"]},{"start":{"row":1,"column":10},"end":{"row":1,"column":11},"action":"insert","lines":["s"]}],[{"start":{"row":2,"column":7},"end":{"row":2,"column":8},"action":"remove","lines":["d"],"id":82}],[{"start":{"row":2,"column":7},"end":{"row":2,"column":8},"action":"insert","lines":["k"],"id":83},{"start":{"row":2,"column":8},"end":{"row":2,"column":9},"action":"insert","lines":["w"]},{"start":{"row":2,"column":9},"end":{"row":2,"column":10},"action":"insert","lines":["a"]},{"start":{"row":2,"column":10},"end":{"row":2,"column":11},"action":"insert","lines":["r"]},{"start":{"row":2,"column":11},"end":{"row":2,"column":12},"action":"insert","lines":["g"]},{"start":{"row":2,"column":12},"end":{"row":2,"column":13},"action":"insert","lines":["s"]}],[{"start":{"row":7,"column":6},"end":{"row":7,"column":7},"action":"remove","lines":["*"],"id":84},{"start":{"row":7,"column":5},"end":{"row":7,"column":6},"action":"remove","lines":["*"]}],[{"start":{"row":7,"column":2},"end":{"row":7,"column":3},"action":"remove","lines":["*"],"id":85}],[{"start":{"row":7,"column":2},"end":{"row":7,"column":3},"action":"insert","lines":["*"],"id":86}],[{"start":{"row":7,"column":5},"end":{"row":7,"column":6},"action":"insert","lines":["*"],"id":87},{"start":{"row":7,"column":6},"end":{"row":7,"column":7},"action":"insert","lines":["*"]}],[{"start":{"row":7,"column":6},"end":{"row":7,"column":7},"action":"remove","lines":["*"],"id":88},{"start":{"row":7,"column":5},"end":{"row":7,"column":6},"action":"remove","lines":["*"]}],[{"start":{"row":7,"column":5},"end":{"row":7,"column":6},"action":"insert","lines":["*"],"id":89},{"start":{"row":7,"column":6},"end":{"row":7,"column":7},"action":"insert","lines":["*"]}],[{"start":{"row":7,"column":5},"end":{"row":7,"column":6},"action":"insert","lines":["s"],"id":90},{"start":{"row":7,"column":6},"end":{"row":7,"column":7},"action":"insert","lines":["t"]},{"start":{"row":7,"column":7},"end":{"row":7,"column":8},"action":"insert","lines":["r"]}],[{"start":{"row":7,"column":8},"end":{"row":7,"column":9},"action":"insert","lines":["("],"id":91}],[{"start":{"row":7,"column":8},"end":{"row":7,"column":9},"action":"remove","lines":["("],"id":92},{"start":{"row":7,"column":7},"end":{"row":7,"column":8},"action":"remove","lines":["r"]},{"start":{"row":7,"column":6},"end":{"row":7,"column":7},"action":"remove","lines":["t"]},{"start":{"row":7,"column":5},"end":{"row":7,"column":6},"action":"remove","lines":["s"]}],[{"start":{"row":7,"column":7},"end":{"row":7,"column":8},"action":"insert","lines":["s"],"id":93},{"start":{"row":7,"column":8},"end":{"row":7,"column":9},"action":"insert","lines":["t"]},{"start":{"row":7,"column":9},"end":{"row":7,"column":10},"action":"insert","lines":["r"]},{"start":{"row":7,"column":10},"end":{"row":7,"column":11},"action":"insert","lines":["("]}],[{"start":{"row":7,"column":13},"end":{"row":7,"column":14},"action":"insert","lines":[")"],"id":94}],[{"start":{"row":7,"column":13},"end":{"row":7,"column":14},"action":"remove","lines":[")"],"id":95},{"start":{"row":7,"column":12},"end":{"row":7,"column":13},"action":"remove","lines":[")"]},{"start":{"row":7,"column":11},"end":{"row":7,"column":12},"action":"remove","lines":["d"]},{"start":{"row":7,"column":10},"end":{"row":7,"column":11},"action":"remove","lines":["("]},{"start":{"row":7,"column":9},"end":{"row":7,"column":10},"action":"remove","lines":["r"]},{"start":{"row":7,"column":8},"end":{"row":7,"column":9},"action":"remove","lines":["t"]}],[{"start":{"row":7,"column":7},"end":{"row":7,"column":8},"action":"remove","lines":["s"],"id":96}],[{"start":{"row":7,"column":7},"end":{"row":7,"column":8},"action":"insert","lines":["d"],"id":97},{"start":{"row":7,"column":8},"end":{"row":7,"column":9},"action":"insert","lines":[")"]}],[{"start":{"row":5,"column":5},"end":{"row":5,"column":6},"action":"remove","lines":["1"],"id":98},{"start":{"row":5,"column":4},"end":{"row":5,"column":5},"action":"remove","lines":[":"]},{"start":{"row":5,"column":3},"end":{"row":5,"column":4},"action":"remove","lines":["1"]}],[{"start":{"row":5,"column":3},"end":{"row":5,"column":4},"action":"insert","lines":["a"],"id":99},{"start":{"row":5,"column":4},"end":{"row":5,"column":5},"action":"insert","lines":[":"]}],[{"start":{"row":5,"column":5},"end":{"row":5,"column":7},"action":"insert","lines":["''"],"id":100}],[{"start":{"row":5,"column":6},"end":{"row":5,"column":7},"action":"insert","lines":["j"],"id":101},{"start":{"row":5,"column":7},"end":{"row":5,"column":8},"action":"insert","lines":["a"]},{"start":{"row":5,"column":8},"end":{"row":5,"column":9},"action":"insert","lines":["i"]}],[{"start":{"row":5,"column":11},"end":{"row":5,"column":12},"action":"remove","lines":["2"],"id":102}],[{"start":{"row":5,"column":11},"end":{"row":5,"column":12},"action":"insert","lines":["b"],"id":103},{"start":{"row":5,"column":12},"end":{"row":5,"column":13},"action":"insert","lines":[":"]}],[{"start":{"row":5,"column":14},"end":{"row":5,"column":15},"action":"remove","lines":["3"],"id":104},{"start":{"row":5,"column":13},"end":{"row":5,"column":14},"action":"remove","lines":[":"]}],[{"start":{"row":5,"column":13},"end":{"row":5,"column":15},"action":"insert","lines":["\"\""],"id":105}],[{"start":{"row":5,"column":14},"end":{"row":5,"column":15},"action":"insert","lines":["j"],"id":106},{"start":{"row":5,"column":15},"end":{"row":5,"column":16},"action":"insert","lines":["a"]},{"start":{"row":5,"column":16},"end":{"row":5,"column":17},"action":"insert","lines":["i"]},{"start":{"row":5,"column":17},"end":{"row":5,"column":18},"action":"insert","lines":["q"]}],[{"start":{"row":5,"column":21},"end":{"row":5,"column":22},"action":"remove","lines":["8"],"id":107},{"start":{"row":5,"column":20},"end":{"row":5,"column":21},"action":"remove","lines":["1"]}],[{"start":{"row":5,"column":20},"end":{"row":5,"column":21},"action":"insert","lines":["c"],"id":108}],[{"start":{"row":5,"column":22},"end":{"row":5,"column":23},"action":"remove","lines":["4"],"id":109}],[{"start":{"row":5,"column":22},"end":{"row":5,"column":24},"action":"insert","lines":["\"\""],"id":110}],[{"start":{"row":5,"column":23},"end":{"row":5,"column":24},"action":"insert","lines":["f"],"id":111},{"start":{"row":5,"column":24},"end":{"row":5,"column":25},"action":"insert","lines":["g"]},{"start":{"row":5,"column":25},"end":{"row":5,"column":26},"action":"insert","lines":["h"]}],[{"start":{"row":5,"column":3},"end":{"row":5,"column":4},"action":"insert","lines":["'"],"id":112}],[{"start":{"row":5,"column":5},"end":{"row":5,"column":6},"action":"insert","lines":["'"],"id":113}],[{"start":{"row":5,"column":13},"end":{"row":5,"column":14},"action":"insert","lines":["'"],"id":114}],[{"start":{"row":5,"column":15},"end":{"row":5,"column":16},"action":"insert","lines":["'"],"id":115}],[{"start":{"row":5,"column":24},"end":{"row":5,"column":25},"action":"insert","lines":["'"],"id":116}],[{"start":{"row":5,"column":26},"end":{"row":5,"column":27},"action":"insert","lines":["'"],"id":117}],[{"start":{"row":5,"column":11},"end":{"row":5,"column":12},"action":"remove","lines":["'"],"id":118},{"start":{"row":5,"column":10},"end":{"row":5,"column":11},"action":"remove","lines":["i"]},{"start":{"row":5,"column":9},"end":{"row":5,"column":10},"action":"remove","lines":["a"]},{"start":{"row":5,"column":8},"end":{"row":5,"column":9},"action":"remove","lines":["j"]},{"start":{"row":5,"column":7},"end":{"row":5,"column":8},"action":"remove","lines":["'"]}],[{"start":{"row":5,"column":7},"end":{"row":5,"column":8},"action":"insert","lines":["r"],"id":119}],[{"start":{"row":5,"column":7},"end":{"row":5,"column":8},"action":"remove","lines":["r"],"id":120}],[{"start":{"row":5,"column":7},"end":{"row":5,"column":8},"action":"insert","lines":["5"],"id":121}],[{"start":{"row":5,"column":18},"end":{"row":5,"column":19},"action":"remove","lines":["\""],"id":122},{"start":{"row":5,"column":17},"end":{"row":5,"column":18},"action":"remove","lines":["q"]},{"start":{"row":5,"column":16},"end":{"row":5,"column":17},"action":"remove","lines":["i"]},{"start":{"row":5,"column":15},"end":{"row":5,"column":16},"action":"remove","lines":["a"]},{"start":{"row":5,"column":14},"end":{"row":5,"column":15},"action":"remove","lines":["j"]},{"start":{"row":5,"column":13},"end":{"row":5,"column":14},"action":"remove","lines":["\""]}],[{"start":{"row":5,"column":13},"end":{"row":5,"column":14},"action":"insert","lines":["6"],"id":123}],[{"start":{"row":0,"column":0},"end":{"row":0,"column":1},"action":"insert","lines":["'"],"id":124},{"start":{"row":0,"column":1},"end":{"row":0,"column":2},"action":"insert","lines":["'"]},{"start":{"row":0,"column":2},"end":{"row":0,"column":3},"action":"insert","lines":["'"]}],[{"start":{"row":7,"column":9},"end":{"row":7,"column":10},"action":"insert","lines":["'"],"id":125},{"start":{"row":7,"column":10},"end":{"row":7,"column":11},"action":"insert","lines":["'"]},{"start":{"row":7,"column":11},"end":{"row":7,"column":12},"action":"insert","lines":["'"]}],[{"start":{"row":7,"column":12},"end":{"row":8,"column":0},"action":"insert","lines":["",""],"id":126},{"start":{"row":8,"column":0},"end":{"row":8,"column":1},"action":"insert","lines":["l"]},{"start":{"row":8,"column":1},"end":{"row":8,"column":2},"action":"insert","lines":["="]}],[{"start":{"row":8,"column":2},"end":{"row":8,"column":4},"action":"insert","lines":["[]"],"id":127}],[{"start":{"row":8,"column":3},"end":{"row":8,"column":4},"action":"insert","lines":["1"],"id":128},{"start":{"row":8,"column":4},"end":{"row":8,"column":5},"action":"insert","lines":[","]},{"start":{"row":8,"column":5},"end":{"row":8,"column":6},"action":"insert","lines":["2"]},{"start":{"row":8,"column":6},"end":{"row":8,"column":7},"action":"insert","lines":[","]},{"start":{"row":8,"column":7},"end":{"row":8,"column":8},"action":"insert","lines":["3"]},{"start":{"row":8,"column":8},"end":{"row":8,"column":9},"action":"insert","lines":[","]},{"start":{"row":8,"column":9},"end":{"row":8,"column":10},"action":"insert","lines":["3"]},{"start":{"row":8,"column":10},"end":{"row":8,"column":11},"action":"insert","lines":[","]},{"start":{"row":8,"column":11},"end":{"row":8,"column":12},"action":"insert","lines":["3"]},{"start":{"row":8,"column":12},"end":{"row":8,"column":13},"action":"insert","lines":[","]},{"start":{"row":8,"column":13},"end":{"row":8,"column":14},"action":"insert","lines":[","]}],[{"start":{"row":8,"column":14},"end":{"row":8,"column":15},"action":"insert","lines":["3"],"id":129},{"start":{"row":8,"column":15},"end":{"row":8,"column":16},"action":"insert","lines":[","]},{"start":{"row":8,"column":16},"end":{"row":8,"column":17},"action":"insert","lines":["3"]},{"start":{"row":8,"column":17},"end":{"row":8,"column":18},"action":"insert","lines":[","]},{"start":{"row":8,"column":18},"end":{"row":8,"column":19},"action":"insert","lines":[","]},{"start":{"row":8,"column":19},"end":{"row":8,"column":20},"action":"insert","lines":["3"]},{"start":{"row":8,"column":20},"end":{"row":8,"column":21},"action":"insert","lines":[","]},{"start":{"row":8,"column":21},"end":{"row":8,"column":22},"action":"insert","lines":["3"]}],[{"start":{"row":8,"column":17},"end":{"row":8,"column":18},"action":"remove","lines":[","],"id":130}],[{"start":{"row":8,"column":12},"end":{"row":8,"column":13},"action":"remove","lines":[","],"id":131}],[{"start":{"row":8,"column":21},"end":{"row":9,"column":0},"action":"insert","lines":["",""],"id":132},{"start":{"row":9,"column":0},"end":{"row":9,"column":1},"action":"insert","lines":["d"]}],[{"start":{"row":9,"column":1},"end":{"row":9,"column":2},"action":"insert","lines":["="],"id":133},{"start":{"row":9,"column":2},"end":{"row":9,"column":3},"action":"insert","lines":["{"]}],[{"start":{"row":9,"column":3},"end":{"row":9,"column":4},"action":"insert","lines":["l"],"id":134}],[{"start":{"row":9,"column":4},"end":{"row":9,"column":6},"action":"insert","lines":["[]"],"id":135}],[{"start":{"row":9,"column":5},"end":{"row":9,"column":6},"action":"insert","lines":["3"],"id":136}],[{"start":{"row":9,"column":7},"end":{"row":9,"column":8},"action":"insert","lines":[":"],"id":137},{"start":{"row":9,"column":8},"end":{"row":9,"column":9},"action":"insert","lines":["l"]},{"start":{"row":9,"column":9},"end":{"row":9,"column":10},"action":"insert","lines":["."]},{"start":{"row":9,"column":10},"end":{"row":9,"column":11},"action":"insert","lines":["c"]}],[{"start":{"row":9,"column":11},"end":{"row":9,"column":12},"action":"insert","lines":["o"],"id":138},{"start":{"row":9,"column":12},"end":{"row":9,"column":13},"action":"insert","lines":["u"]},{"start":{"row":9,"column":13},"end":{"row":9,"column":14},"action":"insert","lines":["n"]},{"start":{"row":9,"column":14},"end":{"row":9,"column":15},"action":"insert","lines":["t"]}],[{"start":{"row":9,"column":15},"end":{"row":9,"column":17},"action":"insert","lines":["()"],"id":139}],[{"start":{"row":9,"column":16},"end":{"row":9,"column":17},"action":"insert","lines":["l"],"id":140}],[{"start":{"row":9,"column":17},"end":{"row":9,"column":19},"action":"insert","lines":["[]"],"id":141}],[{"start":{"row":9,"column":18},"end":{"row":9,"column":19},"action":"insert","lines":["3"],"id":142}],[{"start":{"row":9,"column":20},"end":{"row":9,"column":21},"action":"insert","lines":[" "],"id":143}],[{"start":{"row":9,"column":20},"end":{"row":9,"column":21},"action":"remove","lines":[" "],"id":144}],[{"start":{"row":9,"column":21},"end":{"row":9,"column":22},"action":"insert","lines":[" "],"id":145},{"start":{"row":9,"column":22},"end":{"row":9,"column":23},"action":"insert","lines":["f"]},{"start":{"row":9,"column":23},"end":{"row":9,"column":24},"action":"insert","lines":["o"]},{"start":{"row":9,"column":24},"end":{"row":9,"column":25},"action":"insert","lines":["r"]}],[{"start":{"row":9,"column":25},"end":{"row":9,"column":26},"action":"insert","lines":[" "],"id":146},{"start":{"row":9,"column":26},"end":{"row":9,"column":27},"action":"insert","lines":["i"]}],[{"start":{"row":9,"column":27},"end":{"row":9,"column":28},"action":"insert","lines":[" "],"id":147},{"start":{"row":9,"column":28},"end":{"row":9,"column":29},"action":"insert","lines":["i"]},{"start":{"row":9,"column":29},"end":{"row":9,"column":30},"action":"insert","lines":["n"]}],[{"start":{"row":9,"column":30},"end":{"row":9,"column":31},"action":"insert","lines":[" "],"id":148},{"start":{"row":9,"column":31},"end":{"row":9,"column":32},"action":"insert","lines":["l"]}],[{"start":{"row":9,"column":32},"end":{"row":9,"column":33},"action":"insert","lines":["}"],"id":149}],[{"start":{"row":9,"column":33},"end":{"row":10,"column":0},"action":"insert","lines":["",""],"id":150},{"start":{"row":10,"column":0},"end":{"row":10,"column":1},"action":"insert","lines":["p"]},{"start":{"row":10,"column":1},"end":{"row":10,"column":2},"action":"insert","lines":["r"]},{"start":{"row":10,"column":2},"end":{"row":10,"column":3},"action":"insert","lines":["i"]}],[{"start":{"row":10,"column":3},"end":{"row":10,"column":4},"action":"insert","lines":["n"],"id":151},{"start":{"row":10,"column":4},"end":{"row":10,"column":5},"action":"insert","lines":["t"]}],[{"start":{"row":10,"column":5},"end":{"row":10,"column":7},"action":"insert","lines":["()"],"id":152}],[{"start":{"row":10,"column":6},"end":{"row":10,"column":7},"action":"insert","lines":["d"],"id":153}],[{"start":{"row":8,"column":21},"end":{"row":9,"column":0},"action":"insert","lines":["",""],"id":154}],[{"start":{"row":9,"column":0},"end":{"row":9,"column":1},"action":"insert","lines":[" "],"id":155}],[{"start":{"row":9,"column":1},"end":{"row":9,"column":2},"action":"insert","lines":["p"],"id":156},{"start":{"row":9,"column":2},"end":{"row":9,"column":3},"action":"insert","lines":["r"]},{"start":{"row":9,"column":3},"end":{"row":9,"column":4},"action":"insert","lines":["i"]},{"start":{"row":9,"column":4},"end":{"row":9,"column":5},"action":"insert","lines":["n"]},{"start":{"row":9,"column":5},"end":{"row":9,"column":6},"action":"insert","lines":["t"]}],[{"start":{"row":9,"column":6},"end":{"row":9,"column":8},"action":"insert","lines":["()"],"id":157}],[{"start":{"row":9,"column":7},"end":{"row":9,"column":8},"action":"insert","lines":["t"],"id":158},{"start":{"row":9,"column":8},"end":{"row":9,"column":9},"action":"insert","lines":["y"]},{"start":{"row":9,"column":9},"end":{"row":9,"column":10},"action":"insert","lines":["p"]},{"start":{"row":9,"column":10},"end":{"row":9,"column":11},"action":"insert","lines":["e"]}],[{"start":{"row":9,"column":11},"end":{"row":9,"column":13},"action":"insert","lines":["()"],"id":159}],[{"start":{"row":9,"column":12},"end":{"row":9,"column":13},"action":"insert","lines":["l"],"id":160}],[{"start":{"row":9,"column":13},"end":{"row":9,"column":15},"action":"insert","lines":["[]"],"id":161}],[{"start":{"row":9,"column":14},"end":{"row":9,"column":15},"action":"insert","lines":["0"],"id":162}],[{"start":{"row":9,"column":0},"end":{"row":9,"column":1},"action":"remove","lines":[" "],"id":163}]]},"timestamp":1583735508143} | [
"jayaprakash7541@gmail.com"
] | jayaprakash7541@gmail.com |
13445b0f6c37648b7a2401914798de6146aa3c71 | 702dd7c1d7e305bc6ce2ffce739b60746afdb558 | /Tópicos Avançados em Python/map.py | 806aca0470484fbc35ab83459de062ce261c55a5 | [] | no_license | IgorEM/Estudos-Sobre-Python | d24963164125278595159961689d1b4f0e27355d | c82814d79831b3d225e5ccd3ac391c895a6f83de | refs/heads/master | 2022-11-26T13:21:58.744771 | 2020-08-08T22:28:20 | 2020-08-08T22:28:20 | 286,119,410 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 351 | py | #map
def dobro(x):
return x*2
valor = [1,2,3,4,5,6]
valor_dobrado = map(dobro, valor)
valor_dobrado = list(valor_dobrado) #list converte em lista o valor dobrado em lista
print(valor_dobrado)
"""
for i in valor_dobrado:
print(i)
"""
#print(dobro(valor)) #imprime a lista duas vezes, nao da o dobro dos numeros
#print(dobro(3)) | [
"noreply@github.com"
] | noreply@github.com |
18bcd592602565d872d7f01d93edeb9e88cda89b | ab3dabbb0197c92618eb407e2f0aeb270ffe9a43 | /douban/middlewares.py | 3f8437ef183deac2993123968961ec535bd8158b | [
"MIT"
] | permissive | pighui/douban | 6f349a31c57cf6455e67106f72c365a08cd3dbca | 6c592988fe404bdec3f8d1333248c191ccd1652d | refs/heads/master | 2022-12-21T22:23:25.021995 | 2019-11-06T11:47:27 | 2019-11-06T11:47:27 | 219,964,132 | 1 | 0 | MIT | 2022-12-08T06:50:12 | 2019-11-06T09:51:05 | Python | UTF-8 | Python | false | false | 1,139 | py | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
import logging
import random
from scrapy.downloadermiddlewares.useragent import UserAgentMiddleware
class RandomProxyMiddleware(object):
logger = logging.getLogger(__name__)
def process_request(self, request, spider):
self.logger.debug("Using Proxy")
request.meta['proxy'] = 'http://122.112.231.109:9999'
return None
def process_response(self, request, response, spider):
response.status = 202
return response
class RandomUserAgentMiddleware(UserAgentMiddleware):
def __init__(self, user_agent_list):
super().__init__()
self.user_agent_list = user_agent_list
@classmethod
def from_crawler(cls, crawler):
return cls(user_agent_list=crawler.settings.get('USER_AGENT_LIST'))
def process_request(self, request, spider):
user_agent = random.choice(self.user_agent_list)
if user_agent:
request.headers['User-Agent'] = user_agent
return None
| [
"pighui233@163.com"
] | pighui233@163.com |
4182d35a44ea1ade31d9765203970039c60774ea | 636d06456fa85100e2fc6b054ac9068bb1ae71c8 | /polls/migrations/0002_auto_20200402_1442.py | 8753a378b16bc0aca2ec5e2bfd89cb9332e963ce | [] | no_license | andreslearns/django-pollsapp | 20f461a2a5545106e3b2e5249bb6c14a919bcf0b | a5071ad55c4702e858469c4473efab96441828b3 | refs/heads/master | 2022-04-14T10:07:21.217823 | 2020-04-03T08:13:55 | 2020-04-03T08:13:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 361 | py | # Generated by Django 3.0.4 on 2020-04-02 06:42
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('polls', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='question',
old_name='question',
new_name='question_text',
),
]
| [
"soltesandrew@gmail.com"
] | soltesandrew@gmail.com |
0cbd5e2dd23c9b19cd6082bfb61f2a869de91d23 | 0a75d88ee1dfe6376975cebacab368b6bfd8b8b0 | /assignmentone.py | 774fc8a8f6a1daded765ed95452fcc4420e74ec6 | [] | no_license | SP-18-Data-Analysis/week-1-assignment-JoshuaDG | dce23d0495dc9d1c978494195a2bc434877d1832 | e89ff56f97911de5cb40fa78b00d768b49bf40f4 | refs/heads/master | 2021-05-10T20:25:12.297176 | 2018-02-10T22:06:39 | 2018-02-10T22:06:39 | 118,187,211 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 266 | py |
"""
Joshua D. Gonzalez
assignment one
"""
a = ['i','dont','know','anything','about','github']
print (a)
listn = [1,2,3,4,5,6,7,8,9,10]
for num in listn:
print( "list stored", num)
c = {0:'ele',1:'elem2',3:'elem3' } # emtpy map
c[0] = 'test'
print(c)
| [
"noreply@github.com"
] | noreply@github.com |
68fe5ad5f595676caae93551cace08269f415827 | 4409b3ff85f19d221905ab6538b6ac084ee5b92f | /Learn3.py | c2695374a1350663bf98ecaadbccbf645a414eb3 | [] | no_license | forestdan/PythonLearn | 47f7f5ac1b0e70a59c4cac22e0bb5eda9c6f0352 | 1b7ad1e19fbdacc4ab0e6eb9c2055f22cb37f40d | refs/heads/master | 2020-08-16T00:13:55.723759 | 2020-02-02T11:54:49 | 2020-02-02T11:54:49 | 215,425,118 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 632 | py | # a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
# # length of the list
# print("the length of this list is " + str(len(a)))
# # access the list
# print(a[0])
# print(a[1])
# print(a[2])
# # access all elements of the list
# print(a)
# # add elements to the list
# a.append(11)
# print(a)
# # add elements to the position
# a.insert(1, 13)
# print(a)
# # delete the end of the list
# a.pop()
# print(a)
# # delete the position of the list
# a.pop(1)
# print(a)
# print("--------this is the end of the list test-------")
# tuble can not be changed
b = (1, 2, 3, 4)
print(b)
# mix tuple
c = ("a", 1, ["2.1", 2.2])
print(c)
c[2][0] = 2.1
print(c) | [
"danruixuan19910622@gmail.com"
] | danruixuan19910622@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.