blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ce86423ef6e78740ffb5be412b7e67111b4d140c | 23d04ada4663d8f70632923d848ac6c7b823d8d4 | /160.Intersection_of_Two_Linked_Lists/Solution.py | c6c2ce6a8b92f2fe30cc69893c9b525e06be11bd | [] | no_license | yangliunk1987/LearningLeetcode | 581a5d65f5509115f81dae22eb4ffaa9770c5373 | 621142f939d85746f870c28864e987a40b959903 | refs/heads/master | 2020-11-29T06:23:24.781327 | 2020-02-20T07:40:31 | 2020-02-20T07:40:31 | 230,045,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 650 | py | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def getIntersectionNode(self, headA, headB):
"""
:type head1, head1: ListNode
:rtype: ListNode
"""
# two points
if not headA or not headB:
return None
a, b = headA, headB
ans = None
while a or b:
if not a:
a = headB
if not b:
b = headA
if a == b and not ans:
ans = a
a, b = a.next, b.next
return ans | [
"yangliunk1987@gmail.com"
] | yangliunk1987@gmail.com |
7273ecf4c121aecc427b72cbeff56b79f163d551 | 98857b21f9717ba8c042036383711e65a46e964b | /week1.py | 2c55f30ce12f67fee2c4ef360746d79461d4e248 | [] | no_license | techmentalist/Coursera-Capstone | ad18f05f24f4a2b0bcaa46ab2b5c8383ae2495ae | fb0abf63d0ce7fefb9c6a9b2619fa9e94c9c0069 | refs/heads/master | 2020-05-02T22:11:56.064359 | 2019-03-28T16:46:42 | 2019-03-28T16:46:42 | 178,244,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 82 | py | import pandas as pd
import numpy as np
print("Hello Capstone Project Course!")
| [
"sharmaatul11@yahoo.com"
] | sharmaatul11@yahoo.com |
8ea6255f3e3bf077a3bcd2fa96f4f711d25a55a1 | e29e2079ba460ad799d17c7c4f0ba9378cd201b1 | /DerinOgrenme/cnn_test.py | bbc358106a52b997f875cf297d3461bc30ae67d3 | [
"Apache-2.0"
] | permissive | onselaydin/pytry | 3d67546d3218e92fcaded705b0c4d00f40a1c2c4 | 314aa50b6f8535e275dc8a2edd0c21637fb5a745 | refs/heads/master | 2023-04-03T09:57:01.280893 | 2021-04-17T20:03:29 | 2021-04-17T20:03:29 | 171,333,523 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,123 | py | import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("data/MNIST/", one_hot=True, reshape=False)
x = tf.placeholder(tf.float32, [None, 28, 28, 1])
y_true = tf.placeholder(tf.float32, [None, 10])
filter1 = 16
filter2 = 32
weight_1 = tf.Variable(tf.truncated_normal([5, 5, 1, filter1], stddev=0.1))
bias_1 = tf.Variable(tf.constant(0.1, shape=[filter1]))
weight_2 = tf.Variable(tf.truncated_normal([5, 5, filter1, filter2], stddev=0.1))
bias_2 = tf.Variable(tf.constant(0.1, shape=[filter2]))
weight_3 = tf.Variable(tf.truncated_normal([7*7*filter2, 256], stddev=0.1)) #Bu layera 256 tane nöron yerleştirdik.
bias_3 = tf.Variable(tf.constant(0.1, shape=[256]))
weight_4 = tf.Variable(tf.truncated_normal([256, 10], stddev=0.1))
bias_4 = tf.Variable(tf.constant(0.1, shape=[10]))
# resimlerimizin boyutu = [28,28,1] boyutundadır strides(kaydırma)=[batch, x, y, depth] SAME resmin boyunutu koruyor.
y1 = tf.nn.relu(tf.nn.conv2d(x, weight_1, strides=[1,1,1,1], padding='SAME') + bias_1)
y1 = tf.nn.max_pool(y1, ksize = [1,2,2,1], strides=[1,2,2,1], padding='SAME') # çıkto 14,14,16 boyutunda
y2 = tf.nn.relu(tf.nn.conv2d(y1, weight_2, strides=[1,1,1,1], padding='SAME') + bias_2) # çıktı 14,14,32 olacak
y2 = tf.nn.max_pool(y2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') # çıktı 7,7,32
flattened = tf.reshape(y2, shape=[-1, 7 * 7 * filter2])
y3 = tf.nn.relu(tf.matmul(flattened, weight_3) + bias_3)
logits = tf.matmul(y3, weight_4) + bias_4
y4 = tf.nn.softmax(logits)
xent = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels = y_true)
loss = tf.reduce_mean(xent)
correct_prediction = tf.equal(tf.argmax(y4, 1), tf.argmax(y_true, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
optimizer = tf.train.AdamOptimizer(5e-4).minimize(loss)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
batch_size = 128
loss_graph = []
def training_step (iterations):
for i in range (iterations):
x_batch, y_batch = mnist.train.next_batch(batch_size)
feed_dict_train = {x: x_batch, y_true: y_batch}
[_, train_loss] = sess.run([optimizer, loss], feed_dict=feed_dict_train)
loss_graph.append(train_loss)
if i % 100 == 0:
train_acc = sess.run(accuracy, feed_dict=feed_dict_train)
print('Iterations:', i, 'Training accuracy:', train_acc, 'Training loss:', train_loss)
feed_dict_test = {x: mnist.test.images, y_true: mnist.test.labels}
def test_accuracy ():
acc = sess.run(accuracy, feed_dict=feed_dict_test)
print('Testing accuracy:', acc)
def plot_images(images, cls_true, cls_pred=None):
assert len(images) == len(cls_true) == 9
fig, axes = plt.subplots(3, 3)
fig.subplots_adjust(hspace=0.3, wspace=0.3)
for i, ax in enumerate(axes.flat):
ax.imshow(images[i].reshape(28, 28), cmap='binary')
if cls_pred is None:
xlabel = "True: {0}".format(cls_true[i])
else:
xlabel = "True: {0}, Pred: {1}".format(cls_true[i], cls_pred[i])
ax.set_xlabel(xlabel)
ax.set_xticks([])
ax.set_yticks([])
plt.show()
def plot_example_errors():
mnist.test.cls = np.argmax(mnist.test.labels, axis=1)
y_pred_cls = tf.argmax(y4, 1)
correct, cls_pred = sess.run([correct_prediction, y_pred_cls], feed_dict=feed_dict_test)
incorrect = (correct == False)
images = mnist.test.images[incorrect]
cls_pred = cls_pred[incorrect]
cls_true = mnist.test.cls[incorrect]
plot_images(images=images[0:9], cls_true=cls_true[0:9], cls_pred=cls_pred[0:9])
training_step(10000)
test_accuracy()
plot_example_errors()
plt.plot(loss_graph, 'k-')
plt.title('Loss Grafiği')
plt.xlabel('Iterations')
plt.ylabel('Loss')
"""
PS C:\pytry> python
Python 3.7.1 (default, Dec 10 2018, 22:54:23) [MSC v.1915 64 bit (AMD64)] :: Anaconda, Inc. on win32
Type "help", "copyright", "credits" or "license" for more information.
>>> 1e-4
0.0001
>>> 5e-4
0.0005
>>> 5e4
50000.0
>>>
""" | [
"onselaydin@gmail.com"
] | onselaydin@gmail.com |
df1a0d2ea34b9cef717f3c4cae694a6f80af7e54 | 15a0cc718a4b2089a8467314e05cfc56b4e825cf | /activityRecognition.py | 20c66af199b06e42a84c3352a1d17d11f13c457d | [] | no_license | jettblu/activityClassifier | 29e77be9190f67cfe97554314e14b9a30d44695e | 6d9064367f6120ae78a12819a82286179cc06498 | refs/heads/main | 2023-01-31T01:17:19.869131 | 2020-12-17T04:48:53 | 2020-12-17T04:48:53 | 322,181,060 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,093 | py | import loadCSV
# system libraries
import joblib
from os import listdir
from os.path import isfile, join
from collections import Counter
# data packages
from scipy.stats import kurtosis, skew, pearsonr
import numpy as np
import plotly.graph_objects as go
# signal processing libraries
from sklearn import decomposition
from sklearn.preprocessing import *
from scipy import signal
# visualization libraries
from plotmesh import plot_decision_boundaries
import seaborn as sns
import matplotlib.pyplot as plt
# ML models
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier,GradientBoostingClassifier
from sklearn.svm import SVC
# imbalanced learn libraries
from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import RandomUnderSampler
from sklearn.model_selection import StratifiedKFold
# trims the beginning and end of sample to avoid pre/post activity noise
# trimAmount is in seconds
def trimData(sampleObj, trimAmount):
# converts trim amount to milliseconds
trimAmount = trimAmount
sampleRate = sampleObj.sampleRate
# number of points to trim from beginning and end
numTrimPoints = int(trimAmount*sampleRate)
adjustedYawLength = len(sampleObj.yaw)-numTrimPoints
sampleObj.times = sampleObj.times[numTrimPoints:adjustedYawLength]
sampleObj.accelX = sampleObj.accelX[numTrimPoints:len(sampleObj.yaw)-numTrimPoints]
sampleObj.accelY = sampleObj.accelY[numTrimPoints:len(sampleObj.yaw)-numTrimPoints]
sampleObj.accelZ = sampleObj.accelZ[numTrimPoints:len(sampleObj.yaw)-numTrimPoints]
sampleObj.roll = sampleObj.roll[numTrimPoints:len(sampleObj.yaw)-numTrimPoints]
sampleObj.pitch = sampleObj.pitch[numTrimPoints:len(sampleObj.yaw) - numTrimPoints]
sampleObj.yaw = sampleObj.yaw[numTrimPoints:len(sampleObj.yaw)-numTrimPoints]
sampleObj.totalTime = (sampleObj.times[len(sampleObj.times) - 1] - sampleObj.times[0]) / 1000
def resampleData(activityObject, newSampleRate):
newSampleRate = newSampleRate
activityObject.newSampleRate = newSampleRate
# change from sample rate to number of data points to sample
newSampleRate *= int(activityObject.totaltime)
activityObject.accelX = signal.resample(activityObject.accelX, newSampleRate)
activityObject.accelY = signal.resample(activityObject.accelY, newSampleRate)
activityObject.accelZ = signal.resample(activityObject.accelZ, newSampleRate)
activityObject.roll = signal.resample(activityObject.roll, newSampleRate)
activityObject.pitch = signal.resample(activityObject.pitch, newSampleRate)
activityObject.yaw = signal.resample(activityObject.yaw, newSampleRate)
# splits single audio sample into multiple samples of window size
def windowData(rawData, windowSize):
return [rawData[x:x + windowSize] for x in range(0, len(rawData), windowSize)]
def preprocess(activityObject, trimAmount, newSampleRate):
resampleData(activityObject=activityObject, newSampleRate=newSampleRate)
trimData(activityObject, trimAmount)
activityObject.accelX = windowData(activityObject.accelX, activityObject.newSampleRate)
activityObject.accelY = windowData(activityObject.accelY, activityObject.newSampleRate)
activityObject.accelZ = windowData(activityObject.accelZ, activityObject.newSampleRate)
activityObject.pitch = windowData(activityObject.pitch, activityObject.newSampleRate)
activityObject.roll = windowData(activityObject.roll, activityObject.newSampleRate)
activityObject.yaw = windowData(activityObject.yaw, activityObject.newSampleRate)
# returns feature vector for activity object
def getFeatureVector(activityObject):
preprocess(activityObject, 3, 50)
# container for windowed features
splitFeatures = []
for i in range(len(activityObject.accelX)):
accelX = activityObject.accelX[i]
accelY = activityObject.accelY[i]
accelZ = activityObject.accelZ[i]
yaw = activityObject.yaw[i]
pitch = activityObject.pitch[i]
roll = activityObject.roll[i]
componenets = [accelX, accelY, accelZ, yaw, pitch, roll]
featList1 = []
fnList1 = [
np.std,
np.mean,
kurtosis,
skew
]
# applies function from func list to each accel/ gyr. component of data
for funct in fnList1:
for component in componenets:
featList1.append(funct(component))
corrList = [
pearsonr(accelX, accelZ)[1],
pearsonr(pitch, roll)[1]
]
featVec = featList1 + corrList
splitFeatures.append(featVec)
return splitFeatures
def loadBikingData(isPredict=False):
sampleObjs = []
bikingPath = 'biking/'
# creates list of all files in directory
bikingFiles = [f for f in listdir(bikingPath) if isfile(join(bikingPath, f))]
fv = []
for file in bikingFiles:
# return object with sample attributes
sampleObj = loadCSV.readData(bikingPath + file)
# returns data from sample object split into windowed features
fVec = getFeatureVector(sampleObj)
sampleObjs.append(sampleObj)
fv.append(fVec)
if isPredict:
return fv, sampleObjs
return fv
def loadRunningData(isPredict=False):
sampleObjs = []
runningPath = 'running/'
# creates list of all files in directory
runningFiles = [f for f in listdir(runningPath) if isfile(join(runningPath, f))]
fv = []
for file in runningFiles:
sampleObj = loadCSV.readData(runningPath + file)
fVec = getFeatureVector(sampleObj)
sampleObjs.append(sampleObj)
fv.append(fVec)
if isPredict:
return fv, sampleObjs
return fv
def loadStairsUpData(isPredict=False):
sampleObjs = []
stairUpPath = 'stairs up/'
# creates list of all files in directory
stairUpFiles = [f for f in listdir(stairUpPath) if isfile(join(stairUpPath, f))]
fv = []
for file in stairUpFiles:
sampleObj = loadCSV.readData(stairUpPath + file)
fVec = getFeatureVector(sampleObj)
sampleObjs.append(sampleObj)
fv.append(fVec)
if isPredict:
return fv, sampleObjs
return fv
def loadStairsDownData(isPredict=False):
sampleObjs = []
stairsDownPath = 'stairs down/'
# creates list of all files in directory
stairUpFiles = [f for f in listdir(stairsDownPath) if isfile(join(stairsDownPath, f))]
fv = []
for file in stairUpFiles:
sampleObj = loadCSV.readData(stairsDownPath + file)
fVec = getFeatureVector(sampleObj)
sampleObjs.append(sampleObj)
fv.append(fVec)
if isPredict:
return fv, sampleObjs
return fv
def loadStandingData(isPredict=False):
sampleObjs = []
standingPath = 'standing/'
# creates list of all files in directory
standingFiles = [f for f in listdir(standingPath) if isfile(join(standingPath, f))]
fv = []
for file in standingFiles:
# return object with sample attributes
sampleObj = loadCSV.readData(standingPath + file)
# returns data from sample object split into windowed features
fVec = getFeatureVector(sampleObj)
sampleObjs.append(sampleObj)
fv.append(fVec)
if isPredict:
return fv, sampleObjs
return fv
def loadSquatsData(isPredict=False):
sampleObjs = []
squatsPath = 'squats/'
# creates list of all files in directory
squatsFiles = [f for f in listdir(squatsPath) if isfile(join(squatsPath, f))]
fv = []
for file in squatsFiles:
sampleObj = loadCSV.readData(squatsPath + file)
sampleObjs.append(sampleObj)
fVec = getFeatureVector(sampleObj)
fv.append(fVec)
if isPredict:
return fv, sampleObjs
return fv
def loadWalkingData(isPredict=False):
sampleObjs = []
walkingPath = 'walking/'
# creates list of all files in directory
walkingFiles = [f for f in listdir(walkingPath) if isfile(join(walkingPath, f))]
fv = []
for file in walkingFiles:
sampleObj = loadCSV.readData(walkingPath + file)
sampleObjs.append(sampleObj)
fVec = getFeatureVector(sampleObj)
fv.append(fVec)
if isPredict:
return fv, sampleObjs
return fv
# establish labels and combined data set of all activities
def setLabelsAndData(useStored=False, store=True, predict=False):
# use stored features if specified
if useStored:
data = np.load('data.npy', allow_pickle=True)
labels = np.load('labels.npy', allow_pickle=True)
return data, labels
# load raw audio data
biking = loadBikingData()
running = loadRunningData()
squats = loadSquatsData()
stairsDown = loadStairsDownData()
stairsUp = loadStairsUpData()
walking = loadWalkingData()
standing = loadStandingData()
activities = [biking, running, squats, standing, stairsDown, stairsUp, walking]
labels = []
data = []
# store features for quicker testing if specified
for i, activity in enumerate(activities):
for session in activity:
labels.extend([i]*len(session))
# add each sample from of each sound to collective data set
for sample in session:
data.append(sample)
if store:
np.save('data.npy', np.array(data))
np.save('labels.npy', np.array(labels))
return np.array(data), np.array(labels)
# scales and reduces dimensionality of feature vectors
def normalizeFeatures(data, visualize=False, isPredict=False):
if isPredict:
maxAbs = joblib.load('normalizers/maxAbs.pkl')
pca = joblib.load('normalizers/pca.pkl')
data = maxAbs.transform(data)
data = pca.transform(data)
return data
# scales data
maxAbs = MaxAbsScaler().fit(data)
joblib.dump(maxAbs, 'normalizers/maxAbs.pkl')
data = maxAbs.transform(data)
# applies principle component analysis
pca = decomposition.PCA(n_components=5)
pca.fit(data)
joblib.dump(pca, f'normalizers/pca.pkl')
data = pca.transform(data)
# visualizes scaled feature spread
if visualize:
for i in range(data.shape[1]):
sns.kdeplot(data[:, i])
plt.show()
return data
# calculates the mean accuracy for a given classifier over a number of trials
def getAccuracy(classifier, data, labels):
print(f'Testing {classifier}')
testScores = []
# split data using stratified K fold which accounts for class imbalances
cv = StratifiedKFold(n_splits=10, random_state=65, shuffle=True)
# make predictions
for train_index, test_index in cv.split(data, labels):
dataTrain, dataTest, labelsTrain, labelsTest = data[train_index], data[test_index], labels[train_index], labels[test_index]
print(Counter(labelsTrain))
sm = SMOTE(random_state=2)
dataTrain, labelsTrain = sm.fit_sample(dataTrain, labelsTrain)
print(Counter(labelsTrain))
# under = RandomUnderSampler(sampling_strategy=.5)
# dataTrain, labelsTrain = under.fit_sample(dataTrain, labelsTrain)
classifier.fit(dataTrain, labelsTrain)
# create confusion matrix
testScores.append(classifier.score(dataTest, labelsTest))
joblib.dump(classifier, f'classifiers/{str(classifier)}.pkl')
return np.mean(testScores)
# returns the accuracy for a series of classifiers
def classify(useStored=True, store=True, visualize=False):
data, labels = setLabelsAndData(useStored=useStored, store=store)
data = normalizeFeatures(data, visualize=True)
print('normalized')
clfs = [RandomForestClassifier(n_estimators=300), KNeighborsClassifier(n_neighbors=10), SVC(kernel="rbf"),
GradientBoostingClassifier(n_estimators=150)]
# initializes dictionary that will contain classifier as a key and accuracy as a value
accuracies = dict()
if visualize:
meshPlots(data, labels)
# retrieves accuracy of each classifier
for clf in clfs:
accuracy = getAccuracy(clf, data, labels)
accuracies[str(clf)] = accuracy
cumulative = np.mean(list(accuracies.values()))
print(f"\tFINAL ACCURACY\nAchieved using ensemble of algorithms\nMean Accuracy: "
f"{cumulative}\n"
f"You'll find accuracies produced from classifier tests below\n\t------------")
return accuracies
def meshPlots(data, labels):
# create mesh plots for first two features with given classifiers
plt.figure()
plt.title("random Forest")
plot_decision_boundaries(data, labels, RandomForestClassifier, n_estimators=300)
plt.show()
plt.figure()
plt.title("SVC")
plot_decision_boundaries(data, labels, SVC, kernel="rbf")
plt.show()
plt.figure()
plt.title("Nearest Neighbors")
plot_decision_boundaries(data, labels, KNeighborsClassifier, n_neighbors=2)
plt.show()
def makePredictions(data):
data = normalizeFeatures(data, visualize=False, isPredict=True)
classes = {0: 'biking', 1: 'running', 2: 'squats', 3: 'standing', 4: 'stairsDown', 5: 'stairsUp', 6: 'walking'}
classifiers = []
for fileName in listdir('classifiers'):
classifiers.append(joblib.load(f'classifiers/{fileName}'))
predictions = []
for i, timePoint in enumerate(data):
print(f'Predicting second {i} of sample.')
for classifier in classifiers:
predictions.append(classifier.predict([timePoint])[0])
votingCount = {}
# record votes
for label in predictions:
votingCount[label] = votingCount.get(label, 0)+1
# tally votes
prediction = classes[max(votingCount, key=votingCount.get)]
# visualizePrediction(predictions=predictions, prediction=prediction)
return predictions, prediction
'''Intended to create a more comprehensive visual with distinctions between classes, but was in a time crunch.'''
def visualizePrediction(predictions, prediction):
# establishes times where time length corresponds to window
times = [i for i in range(len(predictions))]
plt.title(f'{prediction} Decisions')
patches = []
plt.plot(predictions, 'ro')
traces = []
# for i, label in enumerate(diarizedDict):
# color = randomColor()
# patches.append(mpatches.Patch(color=color, label=f'Spaker {i}'))
# for start, end in times:
# plt.axvspan(start, end + 1, color=color, alpha=0.5)
# plt.legend(handles=patches)
plt.show()
for test in loadWalkingData():
print(makePredictions(test))
print(classify(useStored=True, store=False, visualize=True))
| [
"noreply@github.com"
] | noreply@github.com |
6e06d589ab36e4ea0c4a28dbb5f19654f5117e41 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5636311922769920_0/Python/ricbit/fractiles.py | 92d6025f44c5533c922023d088ce4a84b348b55a | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 148 | py | for case in xrange(input()):
k,c,s = map(int, raw_input().split())
print "Case #%d: %s" % (case + 1, ' '.join(str(1+i) for i in xrange(k)))
| [
"alexandra1.back@gmail.com"
] | alexandra1.back@gmail.com |
1f02f72f137ffc6df6f9360684433f68950e7138 | f507ddbb7b07a9e9acec144120bbf7d1a48747a6 | /완전탐색/모의고사남이품3.py | f7d98b3bdd5935c023556ebaaf923d6d5436b7b8 | [] | no_license | Insookim0702/python_Algorithm | 9f3c95b61f07bce46a5dc82c613a23d5aec6d5cd | 56ac719330ce03e2764c372cd37373e1e27a0614 | refs/heads/master | 2023-01-08T04:41:02.935269 | 2020-10-28T08:55:17 | 2020-10-28T08:55:17 | 298,799,593 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | def solution(answers):
pattern1 = [1, 2, 3, 4, 5]
pattern2 = [2, 1, 2, 3, 2, 4, 2, 5]
pattern3 = [3, 3, 1, 1, 2, 2, 4, 4, 5, 5]
score = [0, 0, 0]
for x in range(0, len(answers)):
if pattern1[x % 5] == answers[x]: score[0] += 1
if pattern2[x % 8] == answers[x]: score[1] += 1
if pattern3[x % 10] == answers[x]: score[2] += 1
print(score)
mx = max(score)
return [idx + 1 for idx, val in enumerate(score) if val == mx]
print(solution([1, 2, 3, 4, 5]))
print(solution([1, 3, 2, 4, 2]))
| [
"insookim0702@gmail.com"
] | insookim0702@gmail.com |
85a54ae7b68f5a184ca3da2d0dcbf51954cf9686 | 8cd2c4c5b62de38c30be0b1cb4573b5d9bb4285a | /border_t.py | 68196306f1cdf545e23d39d3f0c156b0eeaf76e7 | [] | no_license | ais1406/wx_python_test | 6d68ef77442a2f4d26823505e98b4f1ef4fa9a73 | d9c9ed842e5ff0c22babce8411387e76ce43a0b2 | refs/heads/master | 2016-09-06T12:57:53.465272 | 2012-07-12T15:38:30 | 2012-07-12T15:38:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,317 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# gotoclass.py
import wx
class Example(wx.Frame):
def __init__(self, parent, title):
super(Example, self).__init__(parent, title=title,
size=(390, 350))
self.InitUI()
self.Centre()
self.Show()
def InitUI(self):
panel = wx.Panel(self)
font = wx.SystemSettings_GetFont(wx.SYS_SYSTEM_FONT)
font.SetPointSize(9)
vbox = wx.BoxSizer(wx.VERTICAL)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
st1 = wx.StaticText(panel, label='Class Name')
st1.SetFont(font)
hbox1.Add(st1, flag=wx.RIGHT, border=8)
tc = wx.TextCtrl(panel)
hbox1.Add(tc, proportion=1)
vbox.Add(hbox1, flag=wx.EXPAND|wx.LEFT|wx.RIGHT|wx.TOP, border=10)
vbox.Add((-1, 10))
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
st2 = wx.StaticText(panel, label='Matching Classes')
st2.SetFont(font)
hbox2.Add(st2)
vbox.Add(hbox2, flag=wx.LEFT | wx.TOP, border=10)
vbox.Add((-1, 10))
hbox3 = wx.BoxSizer(wx.HORIZONTAL)
tc2 = wx.TextCtrl(panel, style=wx.TE_MULTILINE)
hbox3.Add(tc2, proportion=1, flag=wx.EXPAND)
vbox.Add(hbox3, proportion=1, flag=wx.LEFT|wx.RIGHT|wx.EXPAND,
border=10)
vbox.Add((-1, 25))
hbox4 = wx.BoxSizer(wx.HORIZONTAL)
cb1 = wx.CheckBox(panel, label='Case Sensitive')
cb1.SetFont(font)
hbox4.Add(cb1)
cb2 = wx.CheckBox(panel, label='Nested Classes')
cb2.SetFont(font)
hbox4.Add(cb2, flag=wx.LEFT, border=10)
cb3 = wx.CheckBox(panel, label='Non-Project classes')
cb3.SetFont(font)
hbox4.Add(cb3, flag=wx.LEFT, border=10)
vbox.Add(hbox4, flag=wx.LEFT, border=10)
vbox.Add((-1, 25))
hbox5 = wx.BoxSizer(wx.HORIZONTAL)
btn1 = wx.Button(panel, label='Ok', size=(70, 30))
hbox5.Add(btn1)
btn2 = wx.Button(panel, label='Close', size=(70, 30))
hbox5.Add(btn2, flag=wx.LEFT|wx.BOTTOM, border=5)
vbox.Add(hbox5, flag=wx.ALIGN_RIGHT|wx.RIGHT, border=10)
panel.SetSizer(vbox)
if __name__ == '__main__':
app = wx.App()
Example(None, title='Go To Class')
app.MainLoop() | [
"ais1406@gmail.com"
] | ais1406@gmail.com |
ffa03219d12c1cd7011640b25c51a17e895e0b8d | 3545b7e129753bf4808c1f60e081c91a29a5f0a9 | /ec2/delete_security_group.py | 20b0804451c1aae6dc146b3329c4fd1668581d08 | [
"Apache-2.0"
] | permissive | pceuropa/AWS-CRUD-Manager | 283f1edfb993211c64c2a4272957ffd0e82eb749 | b675798c5ff2d471470449659e8fb8e7b19e723f | refs/heads/master | 2022-12-16T18:21:45.471958 | 2019-03-11T13:31:41 | 2019-03-11T13:31:41 | 150,462,634 | 10 | 2 | Apache-2.0 | 2022-12-08T02:55:33 | 2018-09-26T17:15:11 | Python | UTF-8 | Python | false | false | 808 | py | # Copyright 2010-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# This file is licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import boto3
from botocore.exceptions import ClientError
# Create EC2 client
ec2 = boto3.client('ec2')
# Delete security group
try:
response = ec2.delete_security_group(GroupId='SECURITY_GROUP_ID')
print('Security Group Deleted')
except ClientError as e:
print(e)
| [
"info@pceuropa.net"
] | info@pceuropa.net |
753ebea727be64a72b3dfbff1b574f0a142ce574 | b2968e2b2092971f6fd72f9c72b50b5faf304985 | /zjazd_4/math_examples.py | e68d657f4a70ceb57b0ca23ad3216c6fa53cfe9c | [] | no_license | ArturoWest/pythonbootcamp | 815d0a3d6b29f12efdbd47fc7b7b7dfd18bff24f | fa7b20dfc71dcd80c201f28c72086294e482b075 | refs/heads/master | 2020-03-31T02:55:15.574065 | 2018-12-02T14:35:01 | 2018-12-02T14:35:01 | 151,844,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 470 | py | import math
print(math.sin(math.pi/2))
print(dir(math))
"""
Stwórz klasę sfera
s = Sfera(10)
s.promien # 10
s.objetosc() # 4188.78...
s.pole_powierzchni() # 1256.63...
"""
class Kula:
def __init__(self, r):
self.promien = r
def objetosc(self):
return (4/3) * math.pi * math.pow(self.promien, 3)
def pole_powierzchni(self):
return 4 * math.pi * self.promien ** 2
s = Kula(10)
print(s.objetosc())
print(s.pole_powierzchni())
| [
"you@example.com"
] | you@example.com |
5683a00f19823ee84fd3481e813c554da6850ca2 | 083c806d4c9a435e7edb3ba155462fc7ffe6256e | /myvenv/bin/symilar | 4742dee3d79c27914227460754776858908bf612 | [] | no_license | varkha-yadav-7/todo | 29efeeafddd35e782a91ee961476270cfa9468f9 | 36a21bcdf2c1d3266ecaf51f37d787f67409b2d2 | refs/heads/master | 2023-07-18T18:14:48.307027 | 2021-05-22T07:13:57 | 2021-05-22T07:13:57 | 260,203,224 | 0 | 0 | null | 2021-09-22T18:59:29 | 2020-04-30T12:17:29 | Python | UTF-8 | Python | false | false | 263 | #!/Users/varkhayadav/PycharmProjects/todo/myvenv/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_symilar
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run_symilar())
| [
"varkhay@gmail.com"
] | varkhay@gmail.com | |
33d8103a2f341f6f29a3359b9fa3be7c61b5e3ca | caa7a39055c3451db43b39ffc5e70dc560749334 | /contactus/models.py | ca5c45307ce64df354bddb41c95112374e65bc33 | [] | no_license | OneStage-NITW/website | da2438e3857c03a0c38fa6db6a33619b330a3e0d | af86e38560f16f70a0b74bcf2aeab4d855fbdc74 | refs/heads/master | 2016-08-12T15:17:14.577895 | 2015-05-31T18:10:52 | 2015-05-31T18:10:52 | 36,546,131 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | from django.db import models
# Create your models here.
class Supporter(models.Model):
name=models.CharField(max_length=100)
| [
"vivekhtc25@gmail.com"
] | vivekhtc25@gmail.com |
aa2ad7839b3b43f163375020ae8564633bedaf40 | 18485626d1d901d9629e31958867397d43701f08 | /json-filter.py | 885cfaef48c88d11f9ab90bf409dd87c5eab6f5b | [] | no_license | dleung-splunk/test | 034d3ee759916bf7097f095c48cb3ca1675849ad | 7893da74aae459135983dcae92d00d1f0a086c05 | refs/heads/master | 2021-12-29T04:19:29.518829 | 2021-12-16T22:14:51 | 2021-12-16T22:14:51 | 30,673,463 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,057 | py | import json
if __name__ == "__main__":
det_str = "detections"
cnt_str = "count"
sse_dict ={det_str: [], cnt_str: 0}
others_dict = {det_str: [], cnt_str: 0}
sse_prod_list = ['Splunk Enterprise', 'Splunk Enterprise Security', 'Splunk Cloud']
sse_prod_set = set(sse_prod_list)
d_file = open('detections.json')
d_dict = json.load(d_file)
d_list = d_dict[det_str]
d_count = d_dict[cnt_str]
for d in d_list:
prod_set = set(d['tags']['product'])
if sse_prod_set & prod_set:
sse_dict[det_str].append(d)
sse_dict[cnt_str] += 1
else:
others_dict[det_str].append(d)
others_dict[cnt_str] += 1
with open('sse_out.json', 'w') as sse_file:
json.dump(sse_dict, sse_file)
with open('others_out.json', 'w') as others_file:
json.dump(others_dict, others_file)
print("Total Count: " + str(d_count))
print("SSE Count: " + str(sse_dict[cnt_str]))
print("Others Count: " + str(others_dict[cnt_str]))
| [
"dleung@splunk.com"
] | dleung@splunk.com |
d0b08910b3beb5c40716e6dfb92d061cff82200b | f07eb63af59ae42c11372e6a3329cd4113307055 | /geneactiv_extract.py | dac046495dd9e9ebe10f97ff370154d70bcc5dc1 | [
"BSD-3-Clause"
] | permissive | dptools/dpsleep-extract | c3eafbf842db0bdac95a61a67a4b6d783f4d38b9 | 0bb63bd5f67d9eb02707e30760bf863098f5d291 | refs/heads/master | 2023-02-27T07:14:35.876292 | 2021-02-04T22:57:09 | 2021-02-04T22:57:09 | 288,187,940 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,115 | py | #!/usr/bin/env python
import os
import sys
import pandas as pd
import logging
import argparse as ap
from importlib import import_module
logger = logging.getLogger(os.path.basename(__file__))
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
def main():
argparser = ap.ArgumentParser('PHOENIX wrapper for GENEActiv extract Pipeline')
# Input and output parameters
argparser.add_argument('--phoenix-dir',
help='Phoenix directory (Default: /ncf/cnl03/PHOENIX/',
default='/ncf/cnl03/PHOENIX')
argparser.add_argument('--consent-dir',
help='Consent directory (Default: /ncf/cnl03/PHOENIX/GENERAL',
default='/ncf/cnl03/PHOENIX/GENERAL')
argparser.add_argument('--mtl-dir',
help='MTL1 directory',
default='/ncf/nrg/sw/lib/extract/master/')
argparser.add_argument('--pipeline',
help='Name of the pipeline to run',
required=True)
argparser.add_argument('--data-type',
help='Data type name (ex. "phone" or "actigraphy")',
required=True)
argparser.add_argument('--data-dir',
help='Data directory name (ex. "GENERAL" or "PROTECTED")',
required=True)
argparser.add_argument('--phone-stream',
help='Required if data-type is "phone" (ex. "surveyAnswers" or "accel")')
argparser.add_argument('--output-dir',
help='Path to the output directory')
argparser.add_argument('--study',
nargs='+', help='Study name')
argparser.add_argument('--subject',
nargs='+', help='Subject ID')
# Basic targeting parameters
argparser.add_argument('--input-tz',
help='Timezone info for the input. (Default: UTC)',
default = 'UTC')
argparser.add_argument('--ext-mode',
help='Data extraction mode (all, new, specific). (Default: new)',
default = 'new')
argparser.add_argument('--ext-date',
help='Data extraction date when ext-mode is apec. (Default: 2010-00-00)',
default = '2010-00-00')
argparser.add_argument('--output-tz',
help='Timezone info for the output. (Default: America/New_York)',
default = 'America/New_York')
argparser.add_argument('--day-from',
help='Output day from. (optional; Default: 1)',
type = int, default = 1)
argparser.add_argument('--day-to',
help='Output day to. (optional; By default, process data for all days)',
type = int, default = -1)
args = argparser.parse_args()
mod = get_module(args.pipeline)
default_path = os.path.join(args.phoenix_dir, args.data_dir)
# Gets all studies under each subdirectory
studies = args.study if args.study else scan_dir(default_path)
for study in studies:
study_path = os.path.join(default_path, study)
consent_path = os.path.join(args.consent_dir, study, study + '_metadata.csv')
consents = get_consents(consent_path)
# logger.info('metdat path is {mt}.'.format(mt=consent_path))
# logger.info('consent is {mt}.'.format(mt=consents))
# Gets all subjects under the study directory
subjects = args.subject if args.subject else scan_dir(study_path)
for subject in subjects:
subject_path = os.path.join(study_path, subject)
# logger.info('Subject path path is {mt}.'.format(mt=subject_path))
verified = verify_subject(subject, subject_path, consents)
if not verified:
continue
logger.info('Processing {S} in {ST}'.format(S=subject, ST=study))
date_from = consents[subject][0]
data_path = os.path.join(subject_path, args.data_type, 'raw/GENEActiv')
output_path = args.output_dir if args.output_dir else os.path.join(subject_path,
args.data_type,
'processed')
if args.data_type == 'phone':
mod_parser = mod.parse_args()
new_args, unknown = mod_parser.parse_known_args([
'--date-from', str(date_from),
'--read-dir', str(data_path),
'--filter-dir', "", str(args.phone_stream),
'--output-dir', output_path,
'--day-from', str(args.day_from),
'--day-to', str(args.day_to),
'--input-tz', str(args.input_tz),
'--output-tz', str(args.output_tz),
'--study', str(study),
'--subject', str(subject),
'--mtl-dir', str(args.mtl_dir)
])
mod.main(new_args)
else:
mod_parser = mod.parse_args()
new_args, unknown = mod_parser.parse_known_args([
'--date-from', str(date_from),
'--read-dir', str(data_path),
'--output-dir', str(output_path),
'--day-from', str(args.day_from),
'--day-to', str(args.day_to),
'--input-tz', str(args.input_tz),
'--output-tz', str(args.output_tz),
'--study', str(study),
'--ext-mode',str(args.ext_mode),
'--ext-date',str(args.ext_date),
'--subject', str(subject),
'--mtl-dir', str(args.mtl_dir)
])
mod.main(new_args)
return
# Import module based on user input
def get_module(pipeline):
try:
return import_module('{P}'.format(P=pipeline), __name__)
except Exception as e:
logger.error(e)
logger.error('Could not import the pipeline module. Exiting')
sys.exit(1)
# Ensures data can be processed for the subject
def verify_subject(subject, path, consents):
# Ensures the subject directory is not the consent directory
if subject.endswith('.csv'):
logger.error('Subject {S} is not a valid subject.'.format(S=subject))
return False
if not os.path.isdir(path):
logger.error('Path {P} does not exist.'.format(P=path))
return False
if not subject in consents:
logger.error('Consent date does not exist for {S}.'.format(S=subject))
return False
return True
# Get consents for the study
def get_consents(path):
try:
df = pd.read_csv(path, keep_default_na=False, engine='c', skipinitialspace=True)
# logger.info('metdat is {mt}.'.format(mt=df))
df = df.pivot(
index='Study',
columns='Subject ID',
values='Consent'
).reset_index()
return df
except Exception as e:
logger.info('Check if the metadata has duplicated subject IDs.')
logger.error(e)
return None
# Check if a directory is valid, then return its child directories
def scan_dir(path):
if os.path.isdir(path):
try:
return os.listdir(path)
except Exception as e:
logger.error(e)
return []
else:
return []
if __name__ == '__main__':
main()
| [
"hr860@eris2n4.research.partners.org"
] | hr860@eris2n4.research.partners.org |
92f484c80350747fd2e196ad063c812333d98909 | 22a2a133720bd3c17cf2e5ec493bd75a670fde8e | /app.py | c8d14ddc69aecc2bc3e6b91c9806ca0f37d42683 | [] | no_license | EldritchJS/nlp_container | 0ef9723d76372b832e074746de55164ea4b40608 | 533cd195c884cbfaedee06e161c4f9741e931a9a | refs/heads/master | 2022-07-12T07:12:05.595164 | 2019-12-04T19:06:52 | 2019-12-04T19:06:52 | 225,932,795 | 0 | 0 | null | 2022-06-21T23:42:44 | 2019-12-04T18:35:55 | Python | UTF-8 | Python | false | false | 3,443 | py | import argparse
import logging
import os
import time
import urllib.request as urllib
from json import loads
from kafka import KafkaConsumer
import foolbox
import numpy as np
from foolbox import zoo
from PIL import Image
import requests
from io import BytesIO
import toolz
import gensim
import torch
import nltk
import cytoolz
import pyrouge
import time
def main(args):
logging.info('Starting')
while True:
time.sleep(1)
def oldmain(args):
logging.info('model={}'.format(args.model))
model = zoo.get_model(url=args.model)
logging.info('finished acquiring model')
logging.info('creating attack {}'.format(args.attack))
attack = foolbox.attacks.FGSM(model)
logging.info('finished creating attack')
logging.info('brokers={}'.format(args.brokers))
logging.info('topic={}'.format(args.topic))
logging.info('creating kafka consumer')
consumer = KafkaConsumer(
args.topic,
bootstrap_servers=args.brokers,
value_deserializer=lambda val: loads(val.decode('utf-8')))
logging.info('finished creating kafka consumer')
while True:
for message in consumer:
image_uri = message.value['url']
label = message.value['label']
logging.info('received URI {}'.format(image_uri))
logging.info('received label {}'.format(label))
logging.info('downloading image')
response = requests.get(image_uri)
img = Image.open(BytesIO(response.content))
image = np.array(img.getdata()).reshape(img.size[0], img.size[1], 3)
logging.info('downloaded image')
images = np.ndarray(shape=(2,32,32,3), dtype=np.float32)
images[0] = image
adversarial = attack(image, label)
images[1] = adversarial
logging.info('adversarial image generated')
# preds = model.forward(images) # Foolbox v2.0 only
preds = model.batch_predictions(images)
orig_inf = np.argmax(preds[0])
adv_inf = np.argmax(preds[1])
logging.info('original inference: {} adversarial inference: {}'.format(orig_inf, adv_inf))
def get_arg(env, default):
return os.getenv(env) if os.getenv(env, '') is not '' else default
def parse_args(parser):
args = parser.parse_args()
args.brokers = get_arg('KAFKA_BROKERS', args.brokers)
args.topic = get_arg('KAFKA_TOPIC', args.topic)
args.attack = get_arg('ATTACK', args.attack)
args.model = get_arg('MODEL_URI', args.model)
return args
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
logging.info('starting kafka-python consumer')
parser = argparse.ArgumentParser(description='consume some stuff on kafka')
parser.add_argument(
'--brokers',
help='The bootstrap servers, env variable KAFKA_BROKERS',
default='kafka:9092')
parser.add_argument(
'--topic',
help='Topic to read from, env variable KAFKA_TOPIC',
default='images')
parser.add_argument(
'--attack',
help='Attack type, env variable ATTACK',
default='FGSM')
parser.add_argument(
'--model',
help='Foolbox zoo model uri MODEL_URI',
default='https://github.com/EldritchJS/cifar10_challenge')
args = parse_args(parser)
main(args)
logging.info('exiting')
| [
"jason@localhost.localdomain"
] | jason@localhost.localdomain |
954d4f04753e7d4fd2561471a3d7d2caf2b10d6c | 93c7eebb83b88cd4bfb06b6e5695ad785c84f1d6 | /tazebao/newsletter/migrations/0019_tracking_notes.py | 60750c49486164f1f0ac14d3f046001e45690468 | [] | no_license | otto-torino/tazebao | 960e31a576f4acc7cd4572e589424f54a8e9b166 | 12db8605b5aa9c8bf4f735a03af90d0989018105 | refs/heads/master | 2023-08-09T02:06:14.749976 | 2023-07-28T07:20:23 | 2023-07-28T07:20:23 | 68,196,585 | 5 | 0 | null | 2022-12-08T05:25:04 | 2016-09-14T10:21:21 | HTML | UTF-8 | Python | false | false | 493 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-27 12:06
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('newsletter', '0018_tracking_type'),
]
operations = [
migrations.AddField(
model_name='tracking',
name='notes',
field=models.CharField(blank=True, max_length=255, null=True, verbose_name='note'),
),
]
| [
"abidibo@gmail.com"
] | abidibo@gmail.com |
5d3ccfb77f907e35c2e66ae0604bc0b080fa8b0c | 73d36ad9b31d8118a5cea145dd89511618962836 | /code/my_tests.py | 40fe0ca88fc3ce0b33b33d264b997065079f426d | [] | no_license | aaronchoi5/ml-euchre | 103b8a28b7ebe725f0e0a0225e8fd42ab1c0eef8 | 0bae681d44a0f23d46968613b4629b74438113bf | refs/heads/master | 2020-03-08T18:06:36.940119 | 2018-04-12T01:32:44 | 2018-04-12T01:32:44 | 128,286,847 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 576 | py |
import AIs
def testAI(cur_ai):
# if the attribute does not exist this will raise an attribute error for
# the missing function
cur_ai.playCard
cur_ai.updateInfo
cur_ai.orderUp
cur_ai.pickUp
cur_ai.pickSuit
cur_ai.reset
cur_ai.setHand
# Test that all AIs have the necessary functions
testAI(AIs.RandomPlay("R"))
print "RandomPlay is good."
testAI(AIs.RealPlayer("RP"))
print "RealPlayer is good."
testAI(AIs.SimpleStat("SS1"))
print "SimpleStat is good."
testAI(AIs.SimpleStat("SS2"))
print "SimpleStat is good."
| [
"choian@mail.uc.edu"
] | choian@mail.uc.edu |
7373a853fc106496505b63aa97cb81a3b4c74a2d | 04740a66d98730afca496eb0cf5e7b5edea5f6e6 | /backend/dataset/strStr/strmatch_16.py | 9c00b55f82dfafeb143e4c6fb29fe88f22448f09 | [] | no_license | mehulthakral/logic_detector | 0c06fbd12d77a02c888d0bbe3e6776a18f2f46e3 | f7a07a6d229b250da9e02d3fac1a12fa51be97e8 | refs/heads/master | 2023-04-12T12:45:29.370502 | 2021-05-05T17:15:02 | 2021-05-05T17:15:02 | 323,953,099 | 2 | 0 | null | 2021-05-03T16:50:44 | 2020-12-23T16:39:28 | null | UTF-8 | Python | false | false | 250 | py | class Solution:
def strStr(self, haystack, needle):
n, h = len(needle), len(haystack)
hash_n = hash(needle)
for i in range(h-n+1):
if hash(haystack[i:i+n]) == hash_n:
return i
return -1
| [
"mehul.thakral@gmail.com"
] | mehul.thakral@gmail.com |
15920fcc24a062056682be6ad864a866201cf80c | fb062d2f49d36fbba96b41891ef57afb6c989a56 | /Bookstore/store/migrations/0001_initial.py | 2aa827b29d55f37fa10fc3cd14ba3196cae154af | [] | no_license | puneeth-prasahanth/Mysterious_books_latest | e5259d5e3673f5e30a9f1b900bda974ba9e3947a | 674f13ea04127b84daed67fc8d0f4acedf87f9cc | refs/heads/master | 2023-02-20T00:36:18.847860 | 2022-10-01T08:58:14 | 2022-10-01T08:58:14 | 167,383,371 | 0 | 0 | null | 2023-02-07T22:02:13 | 2019-01-24T14:45:26 | Python | UTF-8 | Python | false | false | 713 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Books',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('title', models.CharField(max_length=200)),
('Auther', models.CharField(max_length=200)),
('Discription', models.TextField()),
('Publish_date', models.DateField(default=django.utils.timezone.now)),
],
),
]
| [
"PuneethPrashanth.Ganapuram"
] | PuneethPrashanth.Ganapuram |
3b18eb6853a190e0be0393739a8e62a04458b12f | 7ce97ef2cb6990e3c2229e5486820a69b88b50d3 | /Thesis/GuelphThesis/prog.py | 723423bb0f4bb7d50ec33b64cb1a8230d604d954 | [] | no_license | qwazzy1990/ladder | b087a7fdb15ea1ed9d9ff39d72826680858e4f35 | aa331299252e46ef859591b72217135d996b61ee | refs/heads/master | 2021-11-07T00:08:32.173164 | 2021-11-06T16:04:46 | 2021-11-06T16:04:46 | 195,143,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 495 | py | import os
import sys
if __name__ == "__main__":
try:
f = open("MinLadders.txt", "r")
w = open("TestFile.txt", "w")
s = f.read()
lines = s.split("\n")
for i in range(0, len(lines)-2, 4):
print(i)
s = lines[i] + lines[i+1] + lines[i+2]
w.write(s)
s = ""
w.write("\n")
i += 3
except:
print("An Error occurred")
finally:
f.close()
w.close()
| [
"disalvopatrick@gmail.com"
] | disalvopatrick@gmail.com |
9e33a9f7dd630eb5374aa62e0d7551cc06a64898 | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/testData/console/ipython/psi/shellAssignment1.py | 88e2cbe3c3773357bc9f7ebbe8f2e90958767bcb | [
"Apache-2.0"
] | permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 14 | py | my_files = !ls | [
"Anton.Bragin@jetbrains.com"
] | Anton.Bragin@jetbrains.com |
9f6e4bc249718996a818fadbfe92c90923d1438a | 100c931c589d7918c61e26f4c30c0e75b56b6e2f | /src/block.py | 53dbee048a83e5a759c65f776a188e94dfd9679e | [] | no_license | cpantoleon/LodeRunnerPyGame | 9e153021ea78e8fef0761503c1a76d336f955ca5 | 4f4d3e7b512da5cd819da2abe17fd4b89f08cc3b | refs/heads/master | 2022-09-08T08:56:34.705407 | 2020-05-31T16:14:17 | 2020-05-31T16:14:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,792 | py | import pygame
import random
import math
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
coin = [pygame.image.load('src/coin/coin1.png'),
pygame.image.load('src/coin/coin2.png'),
pygame.image.load('src/coin/coin3.png'),
pygame.image.load('src/coin/coin4.png'),
pygame.image.load('src/coin/coin5.png'),
pygame.image.load('src/coin/coin6.png')]
class Block(pygame.sprite.Sprite):
def __init__(self, image):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load(image)
self.rotate = 0
# Fetch the rectangle object that has the dimensions of the image
# image.
# Update the position of this object by setting the values
# of rect.x and rect.y
self.rect = self.image.get_rect()
def new_coin(self, level, prev_level):
while True:
y = random.randrange(0, 7)
if y != prev_level:
prev_level = y
break
matrix = []
for i in range(len(level)):
if (level[i][4]) == prev_level:
matrix.append(level[i])
xstart = 5000
xend = -1
for i in matrix:
if (i[2]) < xstart:
xstart = i[2]
if xend == -1:
xend = i[2] + i[0]
elif i[2] > xend:
xend = i[2] + i[0]
new_pos = random.randrange(xstart, xend - 50)
self.rect.x = new_pos
self.rect.y = matrix[0][3] - matrix[0][1]
return prev_level
def update(self, screen):
if self.rotate >= 6:
self.rotate = 0
screen.blit(coin[int(self.rotate) // 1], (self.rect.x, self.rect.y))
self.rotate += 0.25
| [
"noreply@github.com"
] | noreply@github.com |
3e4f4b9f8df7ff6172160cbf492c5a41e003b4a3 | 1354c17f5a25ad17f585d875542bd6de252fcaee | /models.py | 673a8695c7cd6b76c0d387c62352ef983882c9fe | [] | no_license | zenle/travel-project | 8ecb58d1d211eb0ccc93cdb833fb29cf77cb364d | df846e4dc7ad047f24fd56b9fd54ea0237262570 | refs/heads/master | 2020-05-01T00:01:34.706780 | 2019-03-31T11:26:47 | 2019-03-31T11:26:47 | 177,158,347 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 995 | py | # flask_graphene_mongo/models.py
from datetime import datetime
from mongoengine import Document
from mongoengine.fields import (
DateTimeField, ReferenceField, StringField, ListField
)
class Department(Document):
meta = {'collection': 'department'}
name = StringField()
class Role(Document):
meta = {'collection': 'role'}
name = StringField()
class Employee(Document):
meta = {'collection': 'employee'}
name = StringField()
hired_on = DateTimeField(default=datetime.now)
department = ReferenceField(Department)
role = ReferenceField(Role)
class Flight(Document):
meta = {'collection': 'flight'}
airline = StringField()
duration = StringField()
price = StringField()
airports = StringField()
stops = StringField()
layover = StringField()
flight_time = StringField()
class FlightList(Document):
meta = {'collection': 'flightList'}
destination = StringField()
flights = ListField(ReferenceField(Flight))
| [
"senvanle4@gmail.com"
] | senvanle4@gmail.com |
05b34fd67de2c72f92f8477a5fa1aaec33b9b980 | 1bb9a830f5560f6c4d0e52f65a389a4e53e5d3b6 | /palindrome-python.py | d15c22aac1cad6b837bf69628c72db4865461a6e | [] | no_license | 3rt4nm4n/palindrome-python | e18be2f11edf0a1d3c76421f3d6004f7713f4493 | 1d33ff8a68654bb230dacad0e66f5bcb15395c6a | refs/heads/master | 2023-07-09T17:47:14.387104 | 2021-08-26T19:57:15 | 2021-08-26T19:57:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 693 | py | import pandas as pd
import string
import re
import numpy as np
data=pd.read_excel(r"C:\Users\nigbu\Desktop\isimler.xlsx")
mylist = data['isimler'].tolist()
pali_list=[]
data.isimler
length=len(mylist)
for x in range(0,length):
print("Reverse of the "+mylist[x]+": "+mylist[x][::-1])
if(mylist[x][::-1]==mylist[x]):
print("\nPalindrome is found: "+mylist[x][::-1]+"\n")
pali_list.append(mylist[x])
pali_length=len(pali_list)
text_str=""
for i in range(0,pali_length):
text_str=pali_list[i]
n_list=list(text_str.encode())
num_list=np.add(n_list,-96).tolist()
print(f"{num_list=} --> "+pali_list[i]+" --> Numeric value: "+str(sum(num_list)))
| [
"nigbuk1998@gmail.com"
] | nigbuk1998@gmail.com |
cf5b0c2f3eb01584b6f80865ead561845cd941dd | 4c98d26c485bcd01cef2293155628952f0f8cf23 | /songs/migrations/0004_auto__add_setlistarchive.py | 9134fd20c6f79ba73014f4cbe19192eec1c6f0d5 | [] | no_license | takeahsiaor/inspirepraise | bc643f0e562d1d50860048a172362a04bf7ab0b5 | 5e9e74654f6dec7a503bda71f1d45474af674c39 | refs/heads/master | 2022-11-27T20:43:20.137730 | 2015-10-03T20:53:56 | 2015-10-03T20:53:56 | 18,536,132 | 0 | 0 | null | 2022-11-22T00:33:22 | 2014-04-07T22:03:40 | JavaScript | UTF-8 | Python | false | false | 10,394 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'SetlistArchive'
db.create_table(u'songs_setlistarchive', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('profile', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['songs.Profile'])),
('date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('setlist', self.gf('django.db.models.fields.CharField')(max_length=300)),
('created_by', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
))
db.send_create_signal(u'songs', ['SetlistArchive'])
def backwards(self, orm):
# Deleting model 'SetlistArchive'
db.delete_table(u'songs_setlistarchive')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'songs.author': {
'Meta': {'object_name': 'Author'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'songs.book': {
'Meta': {'object_name': 'Book'},
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'num_chapters': ('django.db.models.fields.IntegerField', [], {}),
'order_index': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'})
},
u'songs.chapter': {
'Meta': {'object_name': 'Chapter'},
'book': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['songs.Book']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num_verses': ('django.db.models.fields.IntegerField', [], {}),
'number': ('django.db.models.fields.IntegerField', [], {})
},
u'songs.ministry': {
'Meta': {'object_name': 'Ministry'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'set_list': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'state_province': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'songs.profile': {
'Meta': {'object_name': 'Profile'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ministries': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['songs.Ministry']", 'symmetrical': 'False', 'blank': 'True'}),
'num_song_tags': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'num_verse_tags': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'setlist': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'songs.publisher': {
'Meta': {'ordering': "('name',)", 'object_name': 'Publisher'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '60', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'state_province': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
u'songs.setlistarchive': {
'Meta': {'object_name': 'SetlistArchive'},
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'profile': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['songs.Profile']"}),
'setlist': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
u'songs.song': {
'Meta': {'object_name': 'Song'},
'authors': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['songs.Author']", 'symmetrical': 'False'}),
'ccli': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'chords': ('django.db.models.fields.TextField', [], {'default': "''"}),
'key_line': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'original_key': ('django.db.models.fields.CharField', [], {'max_length': '5', 'blank': 'True'}),
'popularity': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'publication_year': ('django.db.models.fields.IntegerField', [], {'max_length': '4'}),
'publisher': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['songs.Publisher']", 'symmetrical': 'False'}),
'recommended_key': ('django.db.models.fields.CharField', [], {'max_length': '5', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'verses': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['songs.Verse']", 'symmetrical': 'False', 'through': u"orm['songs.SongVerses']", 'blank': 'True'})
},
u'songs.songverses': {
'Meta': {'object_name': 'SongVerses'},
'SV_popularity': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'song': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['songs.Song']"}),
'verse': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['songs.Verse']"})
},
u'songs.verse': {
'Meta': {'object_name': 'Verse'},
'book': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['songs.Book']"}),
'chapter': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['songs.Chapter']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number': ('django.db.models.fields.IntegerField', [], {})
}
}
complete_apps = ['songs'] | [
"rhsiao2@gmail.com"
] | rhsiao2@gmail.com |
301668fbfd57c65790cb8c5420910f05df89b599 | 5b89542f2c11c5dd860ded8d12b4fb4a929d4a92 | /ex39.py | 11b8e907e4efe79c9cd7247913ba725735217413 | [] | no_license | germancynic/LPTHW | 9b48df3605a2696684cf8b4c1921b5d9a2d64b91 | dcbca9de070f2775675cd89498c35ff14edc8c22 | refs/heads/master | 2021-09-04T20:40:10.969153 | 2018-01-22T08:21:36 | 2018-01-22T08:21:36 | 110,608,827 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,600 | py | # create a mapping of state to abbreviation
states = {
'Schleswig-Holstein': 'SH',
'Lower Saxony': 'LS',
'Saxony': 'SX',
'Hamburg': 'HH',
'Bavaria': 'FB'
}
# create a basic set of states and some cities in them
cities = {
'LS': 'Lueneburg',
'SX': 'Leipzig',
'SH': 'Luebeck'
}
# add some more cities
cities['FB'] = 'Muenchen'
cities['HH'] = 'Hamburg City'
#print out some cities
print '-' * 10
print "Lower Saxony has: ", cities['LS']
print "Saxony has: ", cities['SX']
# print some states
print '-' * 10
print "Schleswig-Holstein's abbreviation is: ", states['Schleswig-Holstein']
print "Sachsen's abbreviation is: ", states['Saxony']
# do it by using the state then citites dict
print '-' * 10
print "Bavaria has: ", cities[states['Bavaria']]
print "Schleswig-Holstein has: ", cities[states['Schleswig-Holstein']]
# print every state abbreviation
print '-' * 10
for state, abbrev in states.items():
print "%s is abbreviated %s" % (state, abbrev)
# print every city in state
print '-' * 10
for abbrev, city in cities.items():
print "%s has the city %s" % (abbrev, city)
# now do both at the same time
print '-' * 10
for state, abbrev in states.items():
print "%s state is abbreviated %s and has city %s" % (
state, abbrev, cities[abbrev])
print '-' * 10
# safely get a abbreviation by state that might not be there
state = states.get('Saarland')
if not state:
print "Sorry, no Saarland."
# get a city with a default value
city = cities.get('SL', 'Does Not Exist')
print "The city for the state 'SL' is: %s" % city | [
"noreply@github.com"
] | noreply@github.com |
a7f7f5126462aa55e71d92298e11a485f8588c55 | 791f1eab60b666edc03ff97effa194e663d86f94 | /lesson8/home_work.py | eeefc547323e68c76c71a8514f687961d899c5a0 | [
"MIT"
] | permissive | positron1510/python-developer | 6e945a760af61601fd1720b3fdb3554209a3894d | c91577b9bd35730cd9890c77bc95f8b00f974958 | refs/heads/master | 2020-12-05T15:49:56.210807 | 2020-02-02T19:54:10 | 2020-02-02T19:54:10 | 232,161,639 | 0 | 0 | MIT | 2020-01-20T07:40:57 | 2020-01-06T18:33:42 | Python | UTF-8 | Python | false | false | 746 | py | import time
import os
import psutil
def time_and_memory(f):
def wrapper(*args, **kwargs):
start = time.time()
proc = psutil.Process(os.getpid())
print('Memory start: {}'.format(str(proc.memory_info().rss / 1000000)))
f(*args, **kwargs)
proc = psutil.Process(os.getpid())
print('Memory finish: {}'.format(str(proc.memory_info().rss / 1000000)))
print("Total time: {}".format(time.time() - start))
return wrapper
@time_and_memory
def make_list(n):
print('Making list..')
return [x for x in range(n + 1)]
@time_and_memory
def make_generator(n):
print('Making generator..')
return (x for x in range(n + 1))
N = 1000000
make_list(N)
print()
make_generator(N) | [
"m.yanko@optimism.ru"
] | m.yanko@optimism.ru |
34c124b3a7647f01806bfe8477086b68f63e78b5 | 4a7092876b5057867a1290114e29dfd9fb1c0820 | /fastccd_support_ioc/utils/python2-version/setFCRIC-Normal.py | 28cf771b07ad1017c253270d147325d411a39a06 | [
"BSD-3-Clause"
] | permissive | ihumphrey/fastccd_support_ioc | 2380a9c23037ccb552d00efdb0235b7116e6ea19 | 7cd844102f042bea2fa5a31217e15fd72731b523 | refs/heads/master | 2023-03-03T04:34:39.827326 | 2021-02-08T19:47:09 | 2021-02-08T19:47:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,696 | py | #! /usr/bin/python
# -*- coding: utf-8 -*-
import cin_constants
import cin_register_map
import cin_functions
# Mask Triggers & turn off Bias
# import setTriggerSW
# cin_functions.setCameraOff()
# Clamp Mode registers
# Write clampr
cin_functions.WriteReg("821D", "A000", 0)
cin_functions.WriteReg("821E", "0048", 0)
cin_functions.WriteReg("821F", "00C7", 0)
cin_functions.WriteReg("8001", "0105", 0)
cin_functions.WriteReg("821D", "A000", 0)
cin_functions.WriteReg("821E", "0049", 0)
cin_functions.WriteReg("821F", "004C", 0)
cin_functions.WriteReg("8001", "0105", 0)
# Write clamp
cin_functions.WriteReg("821D", "A000", 0)
cin_functions.WriteReg("821E", "0050", 0)
cin_functions.WriteReg("821F", "00B4", 0)
cin_functions.WriteReg("8001", "0105", 0)
cin_functions.WriteReg("821D", "A000", 0)
cin_functions.WriteReg("821E", "0051", 0)
cin_functions.WriteReg("821F", "0002", 0)
cin_functions.WriteReg("8001", "0105", 0)
# Write ac on
cin_functions.WriteReg("821D", "A000", 0)
cin_functions.WriteReg("821E", "0058", 0)
cin_functions.WriteReg("821F", "0001", 0)
cin_functions.WriteReg("8001", "0105", 0)
cin_functions.WriteReg("821D", "A000", 0)
cin_functions.WriteReg("821E", "0059", 0)
cin_functions.WriteReg("821F", "004C", 0)
cin_functions.WriteReg("8001", "0105", 0)
cin_functions.WriteReg("821D", "A000", 0)
cin_functions.WriteReg("821E", "005A", 0)
cin_functions.WriteReg("821F", "0064", 0)
cin_functions.WriteReg("8001", "0105", 0)
cin_functions.WriteReg("821D", "A000", 0)
cin_functions.WriteReg("821E", "005B", 0)
cin_functions.WriteReg("821F", "005B", 0)
cin_functions.WriteReg("8001", "0105", 0)
# Bias On & allow Ext Triggers
# cin_functions.setCameraOn()
# import setTrigger0
| [
"ronpandolfi@gmail.com"
] | ronpandolfi@gmail.com |
ffa7006b45dc4d4246f63987a54c2535ec95a7de | 68ea05d0d276441cb2d1e39c620d5991e0211b94 | /2144.py | bb654c8a0b7125606aa402a02d74bd202336952f | [] | no_license | mcavalca/uri-python | 286bc43aa157d3a6880dc222e0136c80cf079565 | e22875d2609fe7e215f9f3ed3ca73a1bc2cf67be | refs/heads/master | 2021-11-23T08:35:17.614443 | 2021-10-05T13:26:03 | 2021-10-05T13:26:03 | 131,339,175 | 50 | 27 | null | 2021-11-22T12:21:59 | 2018-04-27T19:54:09 | Python | UTF-8 | Python | false | false | 618 | py | final = 0.0
total = 0
while True:
w1, w2, r = [int(x) for x in input().split()]
if w1 == w2 == r == 0:
break
media = float(((w1 * (1 + r/30))+(w2 * (1 + r/30))))/2.0
final += media
total += 1
if media < 13:
print('Nao vai da nao')
elif media < 14:
print('E 13')
elif media < 40:
print('Bora, hora do show! BIIR!')
elif media < 60:
print('Ta saindo da jaula o monstro!')
else:
print('AQUI E BODYBUILDER!!')
final = final/float(total)
if final > 40:
print()
print('Aqui nois constroi fibra rapaz! Nao e agua com musculo!')
| [
"m.cavalca@hotmail.com"
] | m.cavalca@hotmail.com |
168786c5446b03293bbcce5a3a2a3598f795f2b9 | 0d29a5643e7d81a0748d49cf24ba3b842d9a3bff | /cart/contexts.py | 1f7d68bd24ac566a53a778691463909e6bc00aef | [] | no_license | jasonstreet/CI-Ecommerce | 00753f3870d254cfe7747138a2928caedf61f9bb | 64c4f29df1b0b1cdbea49d61c3d96cdcc68277cc | refs/heads/master | 2023-01-05T09:05:53.458069 | 2019-04-29T15:23:28 | 2019-04-29T15:23:28 | 168,339,840 | 0 | 1 | null | 2022-12-26T20:15:29 | 2019-01-30T12:32:49 | Python | UTF-8 | Python | false | false | 634 | py | from django.shortcuts import get_object_or_404
from products.models import Product
def cart_contents(request):
"""
Makes cart content available when rendering every page
"""
cart = request.session.get('cart', {})
cart_items = []
total = 0
product_count = 0
for id, quantity in cart.items():
product = get_object_or_404(Product, pk=id)
total += quantity * product.price
product_count += quantity
cart_items.append({'id':id, 'quantity':quantity, 'product': product})
return {'cart_items': cart_items, 'total': total, 'product_count': product_count} | [
"jason.street@learningpeople.co.uk"
] | jason.street@learningpeople.co.uk |
92c456f5850eae6004ecb021f77b3e59abf2ec3c | a4fe00aa3d8c508013a52442115c1fc6be14343c | /guidance/urls.py | c8518cb15c34d7240783aadbe8809f2fcf81c3fb | [] | no_license | bakboka/presences | 8bfb11e2bc21c737e579e3fbe0ef8ea26f21b9c5 | 3108f3db2e588b6ef8677f027792d2188f24a1d8 | refs/heads/master | 2021-01-20T18:19:37.256002 | 2016-08-25T23:01:27 | 2016-08-25T23:01:27 | 65,466,654 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 973 | py | """guidance URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.conf import settings
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^api/',include('api.urls')),
url(r'', include('presence.urls')),
url(r'^static/(?P<path>.*)$', 'django.views.static.serve', {'document_root':settings.STATIC_ROOT})
]
| [
"bak@spaceapplications.com"
] | bak@spaceapplications.com |
81371ab1fe6d5402a4d6d6448d42fcfd02eabb5b | 91e2f963ec4b13d38c3d5d0258ad0c48d4d674f1 | /unittests/TestBlockLinker.py | 1ba4f2e32cbf5043dc82f85d25e6abfa9051bf8e | [
"MIT"
] | permissive | WouterGlorieux/BitcoinSpellbook-v0.2 | 6265589afc1197fe52e41b9dc701e4cc5294187f | 93b5480f87f4dc41c2d71093aa98d1fbdd83625c | refs/heads/master | 2021-06-02T06:48:25.607028 | 2016-09-23T20:03:55 | 2016-09-23T20:03:55 | 61,746,127 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 866 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from pprint import pprint
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from wrappers import SpellbookWrapper as SpellbookWrapper
import testconfig
url = testconfig.url
key = testconfig.key
secret = testconfig.secret
#test parameters
address = '1Robbk6PuJst6ot6ay2DcVugv8nxfJh5y'
block_height = 400000
xpub = 'xpub6CUvzHsNLcxthhGJesNDPSh2gicdHLPAAeyucP2KW1vBKEMxvDWCYRJZzM4g7mNiQ4Zb9nG4y25884SnYAr1P674yQipYLU8pP5z8AmahmD'
blocklinker = SpellbookWrapper.SpellbookWrapper(url).blocklinker()
#Test LAL
pprint(blocklinker.get_lal(address, xpub, block_height))
#Test LBL
pprint(blocklinker.get_lbl(address, xpub, block_height))
#Test LRL
pprint(blocklinker.get_lrl(address, xpub, block_height))
#Test LSL
pprint(blocklinker.get_lsl(address, xpub, block_height)) | [
"wouter@valyrian.tech"
] | wouter@valyrian.tech |
afa1dd6b0f679aa6df6a0a0250b61aa5007a4a21 | 08f5dd97433ce84868dbd95020e49f795e8e3f42 | /website/migrations/0011_auto_20150726_2337.py | 1c3b5f116150b0b516139ab4f7af13d2afd1e2d9 | [] | no_license | katur/forthebirds | f76e9d78f8b71f5cb13f22f3c417e737f6048896 | 2118fabebd8780cd3151f5ddd88245de402590e9 | refs/heads/master | 2023-08-08T18:57:55.722516 | 2023-03-28T03:04:19 | 2023-03-28T03:04:19 | 22,771,365 | 2 | 1 | null | 2023-07-25T21:23:49 | 2014-08-08T20:56:20 | Python | UTF-8 | Python | false | false | 1,219 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('website', '0010_auto_20150104_1404'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='speaking_keynotes',
field=models.TextField(default='', help_text=b'Use Markdown syntax for italics, bullets, etc. See <a href="http://www.darkcoding.net/software/markdown-quick-reference">a quick reference</a>, <a href="http://www.markdowntutorial.com/">a tutorial</a>, or practice <a href="http://dillinger.io/">here</a>. ', blank=True),
preserve_default=False,
),
migrations.AddField(
model_name='userprofile',
name='speaking_testimonials',
field=models.TextField(default='', help_text=b'Use Markdown syntax for italics, bullets, etc. See <a href="http://www.darkcoding.net/software/markdown-quick-reference">a quick reference</a>, <a href="http://www.markdowntutorial.com/">a tutorial</a>, or practice <a href="http://dillinger.io/">here</a>. ', blank=True),
preserve_default=False,
),
]
| [
"katherine.erickson@gmail.com"
] | katherine.erickson@gmail.com |
efaa272b0e6cf2336568d95add9b4b23b76ee094 | c9e72e68a93b3927bf28efac7bfc86809108b817 | /CryptoBallotsProject/wsgi.py | ae8d583cd072105f78d9f76a0b5013edba057e62 | [] | no_license | vasudhashah/VotingSystemBlockchain | 226b300da9261139c62c62410d751249deda92b5 | 6588fdbafac5301f1b953d9c44a245c77445a644 | refs/heads/main | 2023-03-04T11:13:49.695531 | 2021-02-02T10:07:07 | 2021-02-02T10:07:07 | 335,240,089 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 433 | py | """
WSGI config for CryptoBallotsProject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'CryptoBallotsProject.settings')
application = get_wsgi_application()
| [
"noreply@github.com"
] | noreply@github.com |
b60bd8656b218f64625a5669c5308fefecedc7d5 | 1f3e60dd844e73fa61ad4e993d774c645f9433c2 | /bin/oddt_cli | 0a95bab970bf0e46902526cc29c560d7081048ab | [
"BSD-3-Clause"
] | permissive | minghao2016/oddt | 830cd1a6b956f93d543b489b05708c17c7dc1037 | 9f667bb50682e50fa059997c5513047bf3f6a3cd | refs/heads/master | 2021-01-22T09:28:00.801290 | 2017-01-26T10:51:47 | 2017-01-26T11:34:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,508 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
# FIX Windows multiprocessing
# Module multiprocessing is organized differently in Python 3.4+
try:
# Python 3.4+
if sys.platform.startswith('win'):
import multiprocessing.popen_spawn_win32 as forking
else:
import multiprocessing.popen_fork as forking
except ImportError:
import multiprocessing.forking as forking
if sys.platform.startswith('win'):
# First define a modified version of Popen.
class _Popen(forking.Popen):
def __init__(self, *args, **kw):
if hasattr(sys, 'frozen'):
# We have to set original _MEIPASS2 value from sys._MEIPASS
# to get --onefile mode working.
os.putenv('_MEIPASS2', sys._MEIPASS)
try:
super(_Popen, self).__init__(*args, **kw)
finally:
if hasattr(sys, 'frozen'):
# On some platforms (e.g. AIX) 'os.unsetenv()' is not
# available. In those cases we cannot delete the variable
# but only set it to the empty string. The bootloader
# can handle this case.
if hasattr(os, 'unsetenv'):
os.unsetenv('_MEIPASS2')
else:
os.putenv('_MEIPASS2', '')
# Second override 'Popen' class with our modified version.
forking.Popen = _Popen
# END Fix Windows multiprocessing
import multiprocessing
import six
from os.path import isfile
from ast import literal_eval
import argparse
import oddt
from oddt.scoring import scorer
def main():
# arguments
parser = argparse.ArgumentParser(description='Open Drug Discovery (ODDT) command line tools')
parser.add_argument('--toolkit',
dest='toolkit',
choices=['ob', 'rdk'],
default='ob',
help='Choose which toolkit should be used for calculations, either "ob" (OpenBabel) or "rdkit" (RDKit) (default: ob)')
parser.add_argument('-n', '--n_cpu',
dest='n_cpu',
type=int,
help='The number of parallel processes. -1 automatically assigns maximum number of CPUs. (default=-1)')
parser.add_argument('--version',
action='version',
version='%(prog)s ' + oddt.__version__)
# in/out files and formats
parser.add_argument('in_file', nargs='+',
help='Input files of formats supported by toolkit.')
parser.add_argument('-i', dest='in_format', help='Input file(s) format')
parser.add_argument('-o', dest='out_format', help='Output file format')
parser.add_argument('-O', '--output', dest='out_file', help='Output file')
# filter
group = parser.add_argument_group('Filtering')
group.add_argument('--filter',
dest='filter',
action='append',
default=[],
help='Choose built-in filters to be used (eg. "ro5", "ro3", "pains")')
# docking
group = parser.add_argument_group('Protein-Ligand docking')
group.add_argument('--dock',
dest='dock',
choices=['autodock_vina'],
help='Choose docking software to be used')
group.add_argument('--receptor', help='Protein file')
group.add_argument('--auto_ligand', help='Docking Box is determined on that ligand')
group.add_argument('--center', type=literal_eval, help='Docking Box center (x,y,z)')
group.add_argument('--size', type=literal_eval, help='Docking Box dimentions (x,y,z)')
group.add_argument('--exhaustiveness', default=8, type=int, help='Exhaustiveness of docking')
group.add_argument('--seed', help='Random Seed')
# scoring
# generate scoring functions options
sf_choices = ['autodock_vina', 'rfscore', 'nnscore']
for v in [1, 2, 3]:
sf_choices.append('rfscore_v%i' % v)
for pdbbind_version in [2007, 2012, 2013, 2014, 2015]:
for v in [1, 2, 3]:
sf_choices.append('rfscore_v%i_pdbbind%i' % (v, pdbbind_version))
sf_choices.append('nnscore_pdbbind%i' % (pdbbind_version))
group = parser.add_argument_group('Rescoring')
group.add_argument('--score',
dest='score',
choices=sf_choices,
action='append',
default=[],
help='Choose built-in scoring function to be used')
group.add_argument('--score_file',
dest='score_file',
action='append',
default=[],
help='Choose ODDT scoring function saved to file (pickle)')
parser.add_argument('--field',
dest='save_fields',
action='append',
default=[],
help='Field to save (eg. in CSV). Each field should be specified separately.')
args = parser.parse_args()
# Switch toolkits
if 'toolkit' in args:
if args.toolkit == 'ob':
from oddt.toolkits import ob
oddt.toolkit = ob
elif args.toolkit == 'rdk':
from oddt.toolkits import rdk
oddt.toolkit = rdk
from oddt.virtualscreening import virtualscreening as vs
# Create pipeline for docking and rescoring
pipeline = vs(n_cpu=args.n_cpu if 'n_cpu' in args else -1)
for f in args.in_file:
if args.in_format:
fmt = args.in_format
else: # autodiscover
tmp = f.split('.')
if tmp[-1] == 'gz':
fmt = tmp[-2]
else:
fmt = tmp[-1]
if isfile(f):
pipeline.load_ligands(fmt, f) # add loading ligands from STDIN?
else:
raise IOError("File does not exist: '%s'" % f)
# Filter ligands
for filter in args.filter:
pipeline.apply_filter(filter)
# load protein once
if args.receptor:
extension = args.receptor.split('.')[-1]
receptor = six.next(oddt.toolkit.readfile(extension, args.receptor))
receptor.protein = True
# Docking
if args.dock == 'autodock_vina':
kwargs = {}
if args.center:
kwargs['center'] = args.center
if args.size:
kwargs['size'] = args.size
if args.size:
kwargs['size'] = args.size
if args.auto_ligand:
kwargs['auto_ligand'] = args.auto_ligand
if args.exhaustiveness:
kwargs['exhaustiveness'] = args.exhaustiveness
if args.seed:
kwargs['seed'] = args.seed
pipeline.dock('autodock_vina', receptor, **kwargs)
# Rescoring
for score in args.score:
if score.startswith('nnscore'):
pipeline.score(score, receptor)
elif score.startswith('rfscore'):
pipeline.score(score, receptor)
elif score == 'autodock_vina':
pipeline.score('autodock_vina', receptor)
for score_file in args.score_file:
if isfile(score_file): # load pickle
sf = scorer.load(score_file)
pipeline.score(sf, receptor)
else:
raise IOError('Could not read pickle file %s' % score_file)
# Write to file or STDOUT
if args.out_file:
if args.out_format:
fmt = args.out_format
else: # autodiscover
tmp = args.out_file.split('.')
if tmp[-1] == 'gz':
fmt = tmp[-2]
else:
fmt = tmp[-1]
if not fmt:
raise ValueError('No output format nor output file specified.')
if fmt == 'csv':
pipeline.write_csv(args.out_file, fields=args.save_fields)
else:
pipeline.write(fmt, args.out_file, overwrite=True)
else:
fmt = args.out_format
if not fmt:
raise ValueError('No output format nor output file specified.')
if fmt == 'csv':
pipeline.write_csv(sys.stdout, fields=args.save_fields)
else:
for lig in pipeline.fetch():
sys.stdout.write(lig.write(fmt))
if __name__ == '__main__':
# On Windows calling this function is necessary.
# On Linux/OSX it does nothing.
multiprocessing.freeze_support()
main()
| [
"maciek@wojcikowski.pl"
] | maciek@wojcikowski.pl | |
bfbf63b06cdc7643c0425ab11c912da73d947c6c | cc3673a6c5dcf0ec9cf31daa56ae6df44dada702 | /manage.py | 4dfb09a988a3f5cda90d4aef4f4828fff43297a5 | [] | no_license | migue0418/DSD_Apache_Thrift | e8ab50ed89b6c162d2f6befe69c5b812f9e40865 | 94fa1adc00a994b2471f6d18d6d4b8d8287262f7 | refs/heads/main | 2023-04-05T15:34:39.556084 | 2021-04-12T21:25:40 | 2021-04-12T21:25:40 | 352,952,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 814 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "DSD_Apache_Thrift.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv) | [
"55736829+migue0418@users.noreply.github.com"
] | 55736829+migue0418@users.noreply.github.com |
0ab9169652266ec7b80850b1436d24bd149fb8d6 | a205e4dfa3a93c62626e9f1a6d101bfbb5310d8e | /python_source_code/thresholding.py | 86f38364ee78aaabfbb13aba9757c6c505058394 | [] | no_license | artalgos/gui | ca202099d730f61047403347f832eaeb147863b2 | 9eff124452753c6bc1cc1f2652e6ecfa78174020 | refs/heads/master | 2021-07-12T07:33:48.582248 | 2017-10-18T02:00:55 | 2017-10-18T02:00:55 | 106,954,933 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,425 | py | import numpy as np
from scipy import ndimage
def RosinThreshold(imhist, picknonempty=0):
mmax2 = np.amax(imhist)
mpos = imhist.argmax()
p1 = [mpos, mmax2]
L = imhist.shape[0]
lastbin = mpos
for i in np.arange(start=mpos, stop = L):
if(imhist[i] > 0):
lastbin = i
p2 = [lastbin, imhist[lastbin]]
DD = np.sqrt(np.array(p2[0] - p1[0], dtype='int64')**2 + \
np.array(p2[1] - p1[1], dtype='int64')**2)
if DD != 0:
best = -1
found = -1
for i in np.arange(start=mpos, stop=lastbin+1):
p0 = [i, imhist[i]]
d = np.abs((p2[0] - p1[0])*(p1[1]-p0[1]) - (p1[0]-p0[0])*(p2[1]-p1[1]))
d = d/DD
if ((d > best) and ((imhist[i]>0) or (picknonempty == 0))):
best = d
found = i
if found == -1:
found = lastbin+1
else:
found = lastbin+1
T = np.min([found+1, L])
return(T)
def apply_hysteresis_threshold(image, low, high):
"""Apply hysteresis thresholding to `image`.
This algorithm finds regions where `image` is greater than `high`
OR `image` is greater than `low` *and* that region is connected to
a region greater than `high`.
Parameters
----------
image : array, shape (M,[ N, ..., P])
Grayscale input image.
low : float
Lower threshold.
high : float
Higher threshold.
Returns
-------
thresholded : array of bool, same shape as `image`
Array in which `True` indicates the locations where `image`
was above the hysteresis threshold.
Examples
--------
>>> image = np.array([1, 2, 3, 2, 1, 2, 1, 3, 2])
>>> apply_hysteresis_threshold(image, 1.5, 2.5).astype(int)
array([0, 1, 1, 1, 0, 0, 0, 1, 1])
References
----------
.. [1] J. Canny. A computational approach to edge detection.
IEEE Transactions on Pattern Analysis and Machine Intelligence.
1986; vol. 8, pp.679-698.
DOI: 10.1109/TPAMI.1986.4767851
"""
if low > high:
low, high = high, low
mask_low = image > low
mask_high = image > high
# Connected components of mask_low
labels_low, num_labels = ndimage.label(mask_low)
# Check which connected components contain pixels from mask_high
sums = ndimage.sum(mask_high, labels_low, np.arange(num_labels + 1))
connected_to_high = sums > 0
thresholded = connected_to_high[labels_low]
return thresholded
def imhist(ridge):
edges = np.linspace(start=1./(255.*2), stop=1-(1./(255.*2)), num=254)
edges = np.insert(edges, 0, 0)
edges = np.insert(edges, 255, 1)
x = np.histogram(np.where(ridge.ravel()>0, ridge.ravel(), 0), bins=edges)
return(x)
def threshold_all(ridge_validated):
T = np.zeros((2, 1))
histogram, edges = imhist(ridge_validated)
histogram = np.delete(histogram, 0)
T[0, 0] = RosinThreshold(histogram)/256.
ridge2 = ridge_validated.copy()
ridge2[ridge_validated > T[0,0]] = 0
histogram, edges = imhist(ridge2)
histogram = np.delete(histogram, 0)
T[1, 0] = RosinThreshold(histogram)/256.
return T
def threshold_segments(ridge_validated, segments):
T = np.zeros((2, int(np.max(segments))+1))
for seg in range(int(np.max(segments))+1):
ridge_validated_trunc = \
np.where(segments.ravel() == seg, ridge_validated.ravel(), 0)
histogram, edges = imhist(ridge_validated_trunc)
histogram = np.delete(histogram, 0)
T[0, seg] = RosinThreshold(histogram)/256.
ridge2 = ridge_validated_trunc.copy()
ridge2 = np.where(ridge_validated_trunc.ravel() <= T[0, seg], \
ridge2.ravel(), 0)
ridge2[ridge_validated_trunc > T[0,0]] = 0
histogram, edges = imhist(ridge2)
histogram = np.delete(histogram, 0)
T[1, seg] = RosinThreshold(histogram)/256.
return T
def apply_threshold_segments(ridge_validated, T, segments, dilate=0):
crackmap = np.zeros(ridge_validated.shape)
for seg in range(int(np.max(segments))+1):
crackmap_seg = apply_hysteresis_threshold(ridge_validated, \
T[0, seg], T[1,seg])
crackmap_seg = np.where(segments == seg, crackmap_seg, 0)
crackmap = crackmap_seg + crackmap
if dilate:
crackmap = ndimage.morphology.binary_dilation(crackmap)
return crackmap
| [
"gilad.amitai@duke.edu"
] | gilad.amitai@duke.edu |
6298ae66b2659ba754329d0314f6849ce42e0261 | 0995f4b2a0db3fe88e68862c4e3125becfb5f8af | /scripts/generate_pairs2_cacd.py | dfa29e0f924d62d97e13bde82bb8b16490216b2b | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | phymhan/face-aging | db010df62b281befeb1149085ba865382637e3f8 | 2970793d85f2502929222ca7269fb427afee71c1 | refs/heads/master | 2020-03-22T00:33:19.686177 | 2018-09-18T13:17:48 | 2018-09-18T13:17:48 | 139,252,060 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,730 | py | # datafile: A B 0/1/2
# label: 0: A < B, 1: A == B, 2: A > B
import os
import random
import argparse
random.seed(0)
parser = argparse.ArgumentParser()
parser.add_argument('--mode', type=str, default='train')
parser.add_argument('--N', type=int, default=140000)
parser.add_argument('--margin', type=int, default=10)
opt = parser.parse_args()
def parse_age_label(fname, binranges):
strlist = fname.split('_')
age = int(strlist[0])
l = None
for l in range(len(binranges)-1):
if (age >= binranges[l]) and (age < binranges[l+1]):
break
return l
def parse_age(fname):
strlist = fname.split('_')
age = int(strlist[0])
return age
# root = '/media/ligong/Toshiba/Datasets/CACD/CACD_cropped2_400'
mode = opt.mode
src = '../sourcefiles/CACD_'+mode+'_10k.txt'
N = opt.N
with open(src, 'r') as f:
fnames = f.readlines()
fnames = [fname.rstrip('\n') for fname in fnames]
def label_fn(a1, a2, m):
if abs(a1-a2) <= m:
return 1
elif a1 < a2:
return 0
else:
return 2
cnt = [0, 0, 0]
random.shuffle(fnames)
with open(mode+'_pairs_m%d_cacd_10k2.txt'%opt.margin, 'w') as f:
for _ in range(N):
# idx = _ % N
# name1 = fnames[idx]
# name2 = random.choice(fnames)
# if random.random() < 0.5:
# tmp = name1
# name1 = name2
# name2 = tmp
ss = random.sample(fnames, 2)
name1 = ss[0].rstrip('\n')
name2 = ss[1].rstrip('\n')
label = label_fn(parse_age(name1), parse_age(name2), opt.margin)
cnt[label] += 1
f.write('%s %s %d\n' % (name1, name2, label))
w = []
for c in cnt:
w.append(1.0 * sum(cnt) / c)
print([x/sum(w) for x in w])
| [
"hanligong@gmail.com"
] | hanligong@gmail.com |
57c75e86d361f15473ccee4c9e54926100ce4305 | 758bc545bcb2b29f2b6f71e66879b78d50af6a9c | /DemoFile.py | d005c4a90ad3fde5b0aeac66c29a851f50b1b8fa | [] | no_license | Jaeya/Work | d12a4aaaf836d342272bd928a487bc7fdaf13cff | 3f16b384fb3a714cfba3d58af364d09fbc8a8101 | refs/heads/master | 2023-07-12T22:18:43.813846 | 2021-08-09T07:11:39 | 2021-08-09T07:11:39 | 392,137,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,335 | py | # DemoFile.py
strURL = "http:bitcamp.com/?page=" + str(1)
print(strURL)
#오른쪽으로 정렬
for i in range(1,6):
print(i, "*",i,"=", i*i)
print("---정렬방식 변경---")
for i in range(1,6):
print(i, "*", i, "=", str(i*i).rjust(3))
print("---정렬방식 변경---")
for i in range(1,6):
print(i, "*", i, "=", str(i*i).zfill(3))
# 문자열 서식 포매팅
print("{0:x}".format(10))
print("{0:b}".format(10))
print("{0:,}".format(15000))
print("{0:e}".format(4/3))
print("{0:f}".format(4/3))
print("{0:.2f}".format(4/3))
#파일에 읽기,쓰기
f = open("c:\\work\\demo.txt","wt")
f.write("첫번째라인\n두번째라인\nabcd\n")
f.close()
f = open("c:/work/demo.txt","rt")
result = f.read()
print(result)
print("---현재 위치---")
print(f.tell())
f.seek(0)
print("---한줄씩 처리---")
print(f.readline(), end ="")
print(f.readline(), end ="")
# 리스트로 받기
f.seek(0)
lst = f.readlines()
print(lst)
f.close()
#다중의 데이터에 맵핑하는 함수
def add10(x):
return x+10
lst = [1,2,3]
for i in map(add10, lst):
print(i)
# 급속냉동과 해동
import pickle
colors = ["red","blue","green"]
f = open("c:\\work\\colors","wb")
pickle.dump(colors,f)
f.close()
del colors
# 다시 복구
f = open("c:\\work\\colors", "rb")
colors = pickle.load(f)
print(colors)
f.close() | [
"yi2228@naver.com"
] | yi2228@naver.com |
f3dced31b38367b2b096ffa0ec39e14389df39b7 | 4668bc07bdbb7734d1005e946959944a7b49b9b7 | /cookOff/DSIMP/AMZ_Distinct_Window.py | bf6fb77dc9603c3d4fc013cddba32be6d8ca838a | [] | no_license | rjoshi47/DSmax | a91a11cfac3e0aa282d3119744a322c882df16ff | f41e042b434a95dd8f41b65ea31204b35f8981c4 | refs/heads/master | 2021-06-06T18:56:14.744503 | 2020-03-06T18:52:39 | 2020-03-06T18:52:39 | 123,318,217 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,303 | py | '''
Created on May 8, 2018
https://practice.geeksforgeeks.org/problems/smallest-distant-window/0
@author: rjoshi
1. Init s=0 and start e = 0 and e+= 1 while we have not visited all distinct chars once.
then s+= 1 while the count of char at s > 1 and decrease its count
2. Now, to check for another unique window
Remove char at s and decrease its count
Repeate 1
'''
res = []
for _ in range(int(input())):
nums = input().strip()
countDict = {}
s = 0
for e in range(0, len(nums)):
countDict[nums[e]] = 0
count = 100000
totalDistinctChars = len(countDict)
distinctCharsSoFar = 0
for e in range(0, len(nums)):
if countDict[nums[e]] == 0:
distinctCharsSoFar += 1
countDict[nums[e]] += 1
if distinctCharsSoFar == totalDistinctChars:
while s < e:
if countDict[nums[s]] > 1:
countDict[nums[s]] -= 1
s += 1
else:
break
count = min(count, e-s+1)
if s+1 < len(nums):
countDict[nums[s]] -= 1
s += 1
distinctCharsSoFar -= 1
res.append(count)
for xx in res:
print(xx)
| [
"noreply@github.com"
] | noreply@github.com |
b705af18da4f1723c1c487668d6accd791c04e92 | b9a9601f70a5f13d4f11136796dcad0d461d67e3 | /ex18.py | 1216ed2799d1949051ee0691169d81fa9d7d1d55 | [] | no_license | maits/learn-python-the-hard-way | 0c9e71704ed2351a2460d51fccbf0bbfe3a0a55f | 5f1831e99caa64f5d9c0c8633c134bb0777a0dbe | refs/heads/master | 2016-09-06T08:54:27.395719 | 2014-08-09T18:04:43 | 2014-08-09T18:04:43 | 17,817,502 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 496 | py | #this one is like scripts with argv
def print_two(*args):
arg1, arg2 = args
print "arg1: %r, arg2: %r" % (arg1, arg2)
#ok, that *args is actually pointless, we can just do this
def print_two_again(arg1, arg2):
print "arg1: %r, arg2: %r" % (arg1, arg2)
#this just takes one argument
def print_one(arg1):
print "arg1: %r" % arg1
#this one takes no arguments
def print_none():
print "I got nothin'."
print_two("Zed", "Shaw")
print_two_again("Zed", "Shaw")
print_one("First!")
print_none()
| [
"maite.fern@gmail.com"
] | maite.fern@gmail.com |
2b4cd437ba2e2ee6a4c3402377be93cdd876ecac | de57b1403887db1165adf29ad8af767a503d523d | /envLitchi/Scripts/rstpep2html.py | fc6af1f845be043f88ecc2ed53350ce64815b800 | [] | no_license | wlapie40/First-own-project | 00a256c0f7cade24e37f053bd7cfe2e0b8b90157 | ed4d74395c22a5bc688bc92c7f8aafe444dac6fe | refs/heads/master | 2022-10-17T00:34:09.920165 | 2018-09-04T10:17:23 | 2018-09-04T10:17:23 | 147,328,042 | 0 | 1 | null | 2022-10-02T08:02:43 | 2018-09-04T10:20:15 | Python | UTF-8 | Python | false | false | 699 | py | #!D:\Litchi\Litchi\envLitchi\Scripts\python.exe
# $Id: rstpep2html.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing HTML from PEP
(Python Enhancement Proposal) documents.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates (X)HTML from reStructuredText-format PEP files. '
+ default_description)
publish_cmdline(reader_name='pep', writer_name='pep_html',
description=description)
| [
"32576921+wlapie40@users.noreply.github.com"
] | 32576921+wlapie40@users.noreply.github.com |
ad5c1854793eb7ff7a06b89123406bd985a462ea | 3701467a06bc624c9520984bf6bfc71c95d648d6 | /NewModelNetworkKBP/dataElmoKBP.py | 6b2c34b1656164f046b3aa9ac6958b0a01eebfe6 | [] | no_license | llq20133100095/ANA_SL_ELMO | b54ecef3774f0db85a4940ff7a402c7ebc41b9ba | 46ce451e2841cff1978044110330c2218822644a | refs/heads/master | 2020-07-30T05:11:44.519440 | 2020-06-23T06:22:28 | 2020-06-23T06:22:28 | 210,097,934 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,298 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu May 16 10:47:09 2019
@author: llq
@function:
1.process the KBP dataset
2.concate the "glove embedding"
and "Elmo embedding" and "Pos embedding"
"""
import numpy as np
#from allennlp.commands.elmo import ElmoEmbedder
import re
import time
class ELMO_KBP:
def __init__(self):
"""
0.Word2vec
"""
#word2vec file
self.output_vector_filename=r"../processData/KBP-SF48-master/glove_6B_300vec_kbp.txt"
#Dictory:store the word and vector
self.dict_word_vec={}
#Vector Size
self.vector_size=300
"""
1.(1)Initial max sentence length.
(2)Store the label id.
"""
self.label2id_txt="../processData/KBP-SF48-master/label2id.txt"
self.max_length_sen=82
#label id value: Change the label to id.And 10 classes number(0-9)
self.label2id={}
"""
2.traing filename
"""
#read data
self.train_filename=r"../data/KBP-SF48-master/train_sf3.txt"
#store data
self.train_sen_store_filename=r"../processData/KBP-SF48-master/train_sen.txt"
self.train_label_store_filename=r"../processData/KBP-SF48-master/train_label.txt"
#Postion file
self.training_e1_e2_pos_filename=r"../processData/KBP-SF48-master/training_e1_e2.txt"
"""
3.testing filename
"""
#read data
self.test_filename=r"../data/KBP-SF48-master/test_sf3.txt"
#store data
self.test_sen_store_filename=r"../processData/KBP-SF48-master/test_sen.txt"
self.test_label_store_filename=r"../processData/KBP-SF48-master/test_label.txt"
#Postion file
self.testing_e1_e2_pos_filename=r"../processData/KBP-SF48-master/testing_e1_e2.txt"
"""
4.Position:initial the position vector
"""
self.pos2vec_len=20
self.pos2vec_init=np.random.normal(size=(131,20),loc=0,scale=0.05)
"""
5.Process training data
"""
#training sentence
self.training_sen_number=28888
"""
6.Process testing data
"""
#Testing sentence
self.testing_sen_number=9600 #(9574)
"""
8.SDP file
"""
self.e1_sdp_train_file="../SdpNetwork/sdpData/train_kbp/train_e1_SDP.txt"
self.e2_sdp_train_file="../SdpNetwork/sdpData/train_kbp/train_e2_SDP.txt"
self.e1_sdp_test_file="../SdpNetwork/sdpData/test_kbp/test_e1_SDP.txt"
self.e2_sdp_test_file="../SdpNetwork/sdpData/test_kbp/test_e2_SDP.txt"
"""
9.entity pair embedding
"""
#entity train file
self.entity_train_file="../processData/KBP-SF48-master/training_e1_e2.txt"
#entity test file
self.entity_test_file="../processData/KBP-SF48-master/testing_e1_e2.txt"
"""
10.Elmo save
"""
self.train_elmo_file='./data/train_kbp_elmo_embedding.npy'
self.test_elmo_file='./data/test_kbp_elmo_embedding.npy'
"""
11.Merge Embedding
"""
self.merge_path = './data/merge_embedding'
self.train_split_n = 4
self.test_split_n = 2
self.train_merge_file = 'train_merge_embedding_'
self.test_merge_file = 'test_merge_embedding_'
def dict_word2vec(self):
"""
When create Process_data,must exec this function.
Initial dict_word_vec.
"""
#put the vector in the dictionary
with open(self.output_vector_filename,"r") as f:
i=0
for lines in f.readlines():
if(i==0):
i=i+1
continue
lines_split=lines.split(" ")
keyword=lines_split[0]
lines_split=map(float,lines_split[1:-1])
self.dict_word_vec[keyword]=lines_split
#Set value in "BLANK",its size is 300
self.dict_word_vec["BLANK"]=np.random.normal(size=self.vector_size,loc=0,scale=0.05)
#Set value in "<e1>","</e1>","<e2>","</e2>"
self.dict_word_vec["<e1>"]=np.random.normal(size=self.vector_size,loc=0,scale=0.05)
self.dict_word_vec["</e1>"]=np.random.normal(size=self.vector_size,loc=0,scale=0.05)
self.dict_word_vec["<e2>"]=np.random.normal(size=self.vector_size,loc=0,scale=0.05)
self.dict_word_vec["</e2>"]=np.random.normal(size=self.vector_size,loc=0,scale=0.05)
def label2id_init(self):
"""
When create Process_data,must exec this function.
Change the traing label value to id.
"""
with open(self.label2id_txt,"r") as f:
for lines in f.readlines():
lines=lines.strip("\r\n").split()
self.label2id[lines[0]]=lines[1]
#embedding the position
def pos_embed(self,x):
if x < -64:
return 0
if x >= -64 and x <= 64:
return x+65
if x > 64:
return 130
def embedding_lookup(self,sen_store_filename,e1_e2_pos_filename,sen_number):
"""
1.sen_list2D:put sentence in this.format:[[sentence1],[sentence2]]
2.word_vec3D:get each word vector,and Make data to this format:8000*105*300.
In 105*300,the first dim is word;the sencond dim is vector
3.word_pos_vec3D:has "word vector" and "position vector".
this format is N*105*320,(N has two value "8000" and "2717")
"""
word_vec3D=np.empty((sen_number,self.max_length_sen,self.vector_size))
# word_pos_vec3D=np.empty((sen_number,self.max_length_sen,340))
#sen_list:store the sentence([[sentence1],[sentence2]] )
sen_list2D=[]
#sen_length:length of sentence
sen_length=[]
#load the word in sen_list2D.
#The format is:[[sentence1],[sentence2]]
with open(sen_store_filename,"r") as f:
sentence_id=0
for lines in f.readlines():
lines=lines.replace(" "," ").replace(" "," ")\
.replace(" "," ").replace(" "," ").split(" ")[:-1]
#Remove the stare " "
if(lines[0]==""):
lines=lines[1:]
#store the original length of sentence
sen_length.append(len(lines))
sentence_id=sentence_id+1
#append the length of sen_list2D to 105 lengths.
#And the flag is 'BLANK'
if(len(lines)<=self.max_length_sen):
for i in range(self.max_length_sen-len(lines)):
lines.append('BLANK')
sen_list2D.append(lines)
#Find the word vector in dict_word_vec.
#Make data to this format:N*105*300,(N has two value "8000" and "2717")
#In 105*300,the first dim is "word";the sencond dim is "vector"
sentence_id=0
for sentences in sen_list2D:
word_id=0
for words in sentences:
#find word in dict_word_vec
if(self.dict_word_vec.has_key(words)):
word_vec3D[sentence_id][word_id]=self.dict_word_vec[words]
word_id=word_id+1
else:
self.dict_word_vec[words]=np.random.normal(size=(1,self.vector_size),loc=0,scale=0.05)
word_vec3D[sentence_id][word_id]=self.dict_word_vec[words]
word_id=word_id+1
# print "Warning: don't find word in dict_word_vec"
sentence_id=sentence_id+1
#Get the "realtion word"-"other word" in this.
#pos_id format:N*105*2,(N has two value "8000" and "2717")
#And 105(word)*2(id):
# [pos_id1,pos_id2],
# [pos_id1,pos_id2],
# [pos_id1,pos_id2],
# [pos_id1,pos_id2],
pos_id=np.empty((sen_number,self.max_length_sen,2))
sentence_id=0
with open(e1_e2_pos_filename,"r") as f:
for lines in f.readlines():
#the two "relation word":e1,e2
e1=lines.split("<e>")[0].split(" ")[1:]
e2=lines.split("<e>")[1].strip("\n").split(" ")
#Position number of e1 and e2
pos_e1=0
pos_e2=0
#If entity word has two number and more,set this "pos_e1" and "pos_e2" are the 1st word in entity word
for i in range(len(sen_list2D[sentence_id])):
if(sen_list2D[sentence_id][i]==e1[-2] and sen_list2D[sentence_id][i+1]=="</e1>"):
pos_e1=i
if(sen_list2D[sentence_id][i]==e2[-1] and sen_list2D[sentence_id][i+1]=="</e2>"):
pos_e2=i
for i in range(len(sen_list2D[sentence_id])):
if(i==pos_e1):
pos_id[sentence_id][i]=\
np.array([self.pos_embed(0),self.pos_embed(i-pos_e2)])
elif(i==pos_e2):
pos_id[sentence_id][i]=\
np.array([self.pos_embed(i-pos_e1),self.pos_embed(0)])
else:
pos_id[sentence_id][i]=\
np.array([self.pos_embed(i-pos_e1),self.pos_embed(i-pos_e2)])
sentence_id=sentence_id+1
#Set the "position word" to vector.
#pos_vec:N(sentence)*105(word)*20(position vector),(N has two value "8000" and "2717")
pos_vec=np.empty((sen_number,self.max_length_sen,40))
sentence_id=0
for word in pos_id:
i=0
for pos_num in word:
pos_vec[sentence_id][i]=np.hstack\
((self.pos2vec_init[int(pos_num[0])],self.pos2vec_init[int(pos_num[1])]))
i=i+1
sentence_id=sentence_id+1
return word_vec3D, pos_vec, sen_length, sen_list2D
def sentence_list(self,sen_store_filename):
"""
1.sen_list2D:put sentence in this.format:[[sentence1],[sentence2]]
"""
#sen_list:store the sentence([[sentence1],[sentence2]] )
sen_list2D=[]
#sen_length:length of sentence
sen_length=[]
pattern = u' +'
#load the word in sen_list2D.
#The format is:[[sentence1],[sentence2]]
with open(sen_store_filename,"r") as f:
sentence_id=0
for lines in f.readlines():
lines = re.sub(pattern, ' ', lines)
lines=lines.split(" ")[:-1]
#Remove the stare " "
if(lines[0]==""):
lines=lines[1:]
#store the original length of sentence
sen_length.append(len(lines))
sentence_id=sentence_id+1
#append the length of sen_list2D to 105 lengths.
#And the flag is 'BLANK'
if(len(lines)<=self.max_length_sen):
for i in range(self.max_length_sen-len(lines)):
lines.append('BLANK')
sen_list2D.append(lines)
return sen_list2D
"""
#use the python3
def embedding_lookup_in_elmo(self,sen_list2D):
options_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x4096_512_2048cnn_2xhighway/elmo_2x4096_512_2048cnn_2xhighway_options.json"
weight_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x4096_512_2048cnn_2xhighway/elmo_2x4096_512_2048cnn_2xhighway_weights.hdf5"
fin_embedding=np.zeros((len(sen_list2D),self.max_length_sen,1024))
elmo = ElmoEmbedder(options_file, weight_file)
for i in range(len(sen_list2D)):
print('iter: %d'%(i))
elmo_embedding, elmo_mask = elmo.batch_to_embeddings(sen_list2D[i:i+1])
#select the last layer as embedding
elmo_embedding=np.array(elmo_embedding[0][2])
fin_embedding[i]=elmo_embedding
return fin_embedding
"""
def merge_glove_elmo(self, word_pos_vec3D, pos_vec, elmo_file):
"""
Function:
1.merge the word_pos_vec3D and elmo_embedding
2.word_pos_vec3D: [glove embedding, position embedding]
Parameter:
1.word_pos_vec3D: embedding
2.elmo_file: save the ELMO embedding
"""
elmo_embedding=np.load(elmo_file)
word_vec3D = np.concatenate((word_pos_vec3D, elmo_embedding, pos_vec), axis=2)
return word_vec3D
def embedding_looking_root_e1_e2(self,e1_sdp_file,e2_sdp_file,sen_number,sen_list2D,elmo_file):
"""
Function:
embedding the "root" and e1 and e2
"""
#store the root word
root_list=[]
#store the e1 word
e1_list=[]
with open(e1_sdp_file,"r") as f:
for lines in f.readlines():
root=lines.split(" ")[0].replace("'","")
#get the format such as "book-crossing"
if "-" in root:
root=root.split("-")[1]
# #get the format such as "nt"
# if root=="nt":
# root="t"
# if root=="and/or":
# root="and"
# if root=="ta":
# root="gotta"
# if root=="%":
# root="95%"
e1=lines.strip("\r\n").split(" ")[-2]
root_list.append(root)
e1_list.append(e1)
#store the e2 word
e2_list=[]
with open(e2_sdp_file,"r") as f:
for lines in f.readlines():
e2=lines.strip("\r\n").split(" ")[-2]
e2_list.append(e2)
#load the elmo_embedding
elmo_embedding=np.load(elmo_file)
#root embedding and elmo_embedding
root_embedding=np.zeros((sen_number,self.vector_size+1024))
sen_num=0
for root in root_list:
try:
index=sen_list2D[sen_num].index(root)
elmo=elmo_embedding[sen_num][index]
except:
elmo=np.random.normal(size=(1024,),loc=0,scale=0.05)
try:
root_embedding[sen_num]=np.concatenate((self.dict_word_vec[root],elmo),axis=0)
except:
self.dict_word_vec[root]=np.random.normal(size=(self.vector_size,),loc=0,scale=0.05)
root_embedding[sen_num]=np.concatenate((self.dict_word_vec[root],elmo),axis=0)
sen_num+=1
#e1 embedding
e1_embedding=np.zeros((sen_number,self.vector_size+1024))
sen_num=0
for e1 in e1_list:
try:
index=sen_list2D[sen_num].index(e1)
elmo=elmo_embedding[sen_num][index]
except:
elmo=np.random.normal(size=(1024,),loc=0,scale=0.05)
try:
e1_embedding[sen_num]=np.concatenate((self.dict_word_vec[e1],elmo),axis=0)
except:
self.dict_word_vec[e1]=np.random.normal(size=(self.vector_size,),loc=0,scale=0.05)
e1_embedding[sen_num]=np.concatenate((self.dict_word_vec[e1],elmo),axis=0)
sen_num+=1
#e2 embedding
e2_embedding=np.zeros((sen_number,self.vector_size+1024))
sen_num=0
for e2 in e2_list:
try:
index=sen_list2D[sen_num].index(e2)
elmo=elmo_embedding[sen_num][index]
except:
elmo=np.random.normal(size=(1024,),loc=0,scale=0.05)
try:
e2_embedding[sen_num]=np.concatenate((self.dict_word_vec[e2],elmo),axis=0)
except:
self.dict_word_vec[e2]=np.random.normal(size=(self.vector_size,),loc=0,scale=0.05)
e2_embedding[sen_num]=np.concatenate((self.dict_word_vec[e2],elmo),axis=0)
sen_num+=1
#set position embedding in root,e1 and e2
root_pos_emb=np.zeros((sen_number,self.pos2vec_len*2))
e1_pos_emb=np.zeros((sen_number,self.pos2vec_len*2))
e2_pos_emb=np.zeros((sen_number,self.pos2vec_len*2))
for sentence_id in range(len(sen_list2D)):
#Position number of root, e1 and e2
pos_root=0
pos_e1=0
pos_e2=0
#If entity word has two number and more,set this "pos_e1" and "pos_e2" are the 1st word in entity word
for i in range(len(sen_list2D[sentence_id])):
if(sen_list2D[sentence_id][i]==root_list[sentence_id]):
pos_root=i
if(sen_list2D[sentence_id][i]==e1_list[sentence_id] and sen_list2D[sentence_id][i+1]=="</e1>"):
pos_e1=i
if(sen_list2D[sentence_id][i]==e2_list[sentence_id] and sen_list2D[sentence_id][i+1]=="</e2>"):
pos_e2=i
root_pos_emb[sentence_id]=np.hstack\
((self.pos2vec_init[int(self.pos_embed(pos_root-pos_e1))],self.pos2vec_init[int(self.pos_embed(pos_root-pos_e2))]))
e1_pos_emb[sentence_id]=np.hstack\
((self.pos2vec_init[int(self.pos_embed(0))],self.pos2vec_init[int(self.pos_embed(pos_e1-pos_e2))]))
e2_pos_emb[sentence_id]=np.hstack\
((self.pos2vec_init[int(self.pos_embed(pos_e2-pos_e1))],self.pos2vec_init[int(self.pos_embed(0))]))
#concate word embedding and pos embedding
root_embedding=np.concatenate((root_embedding,root_pos_emb),axis=1)
e1_embedding=np.concatenate((e1_embedding,e1_pos_emb),axis=1)
e2_embedding=np.concatenate((e2_embedding,e2_pos_emb),axis=1)
return np.float32(root_embedding),np.float32(e1_embedding),np.float32(e2_embedding)
def iterate_minibatches_inputAttRootE1E2(self, inputs, targets, sen_length, batchsize, input_root, input_e1, input_e2, shuffle=False):
"""
Get minibatches in input attention
"""
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], targets[excerpt], sen_length[excerpt], input_root[excerpt], input_e1[excerpt], input_e2[excerpt]
def iterate_minibatches(self, inputs, targets, sen_length, batchsize, shuffle=False):
"""
Get minibatches
"""
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], targets[excerpt], sen_length[excerpt]
def mask_train_input(self,y_train,num_labels='all'):
"""
2.Mask_train:mask train label.When mask=1,label can be supervised.When mask=0,label can be unsupervised.
"""
# Construct mask_train. It has a zero where label is unknown, and one where label is known.
if num_labels == 'all':
# All labels are used.
mask_train = np.ones(len(y_train), dtype=np.float32)
print("Keeping all labels.")
else:
#Rough classification
rou_num_classes=10
# Assign labels to a subset of inputs.
max_count = num_labels // rou_num_classes
print("Keeping %d labels per rough class." % max_count)
mask_train = np.zeros(len(y_train), dtype=np.float32)
count = [0] * rou_num_classes
for i in range(len(y_train)):
label = y_train[i]
rou_label=int(label)/2
if (count[rou_label]) < max_count:
mask_train[i] = 1.0
count[rou_label] += 1
return mask_train
def label2id_in_data(self,label_store_filename,sen_number):
"""
In train or test data,change the traing label value to id.
"""
data_label=np.empty((sen_number)).astype(int)
label_number=0
with open(label_store_filename,"r") as f:
for lines in f.readlines():
data_label[label_number]=self.label2id[lines.strip("\r\n")]
label_number=label_number+1
return data_label
def label2id_1hot(self,data_label,label2id):
"""
Make the label in one-hot encode:[0,0,...,0,1,0,0,...,0]
"""
onehot_encoded=[]
for value in data_label:
onehot=np.zeros((len(label2id)))
onehot[value]=1
onehot_encoded.append(onehot)
return np.array(onehot_encoded)
if __name__ == "__main__":
"""
1.init the ELMO_KBP
"""
elmo_kbp = ELMO_KBP()
start_time = time.time()
"""
2.load the dict word2vec
"""
elmo_kbp.dict_word2vec()
elmo_kbp.label2id_init()
print("load the dict word2vec: %f s" % (time.time() - start_time))
# """
# 3.load the ELMO embedding
# """
# #train elmo data
# train_sen_list2D = elmo_kbp.sentence_list(elmo_kbp.train_sen_store_filename)
# train_elmo_embedding = elmo_kbp.embedding_lookup_in_elmo(train_sen_list2D)
# np.save(elmo_kbp.train_elmo_file, train_elmo_embedding)
#
# #test elmo data
# test_sen_list2D = elmo_kbp.sentence_list(elmo_kbp.test_sen_store_filename)
# test_elmo_embedding = elmo_kbp.embedding_lookup_in_elmo(test_sen_list2D)
# np.save(elmo_kbp.test_elmo_file, test_elmo_embedding)
"""
4.load the glove embedding
"""
#traing_word_pos_vec3D:training data
training_word_pos_vec3D, train_pos_vec, training_sen_length,train_sen_list2D=\
elmo_kbp.embedding_lookup(elmo_kbp.train_sen_store_filename,\
elmo_kbp.training_e1_e2_pos_filename,elmo_kbp.training_sen_number)
training_word_pos_vec3D=np.float32(training_word_pos_vec3D)
training_sen_length=np.int32(np.array(training_sen_length))
print("load the train glove embedding: %f s" % (time.time() - start_time))
#testing_word_pos_vec3D:testing data
testing_word_pos_vec3D, test_pos_vec, testing_sen_length,test_sen_list2D=\
elmo_kbp.embedding_lookup(elmo_kbp.test_sen_store_filename,\
elmo_kbp.testing_e1_e2_pos_filename,elmo_kbp.testing_sen_number)
testing_word_pos_vec3D=np.float32(testing_word_pos_vec3D)
testing_sen_length=np.int32(np.array(testing_sen_length))
print("load the test glove embedding: %f s" % (time.time() - start_time))
"""
5.merge the all embedding
"""
training_word_pos_vec3D = elmo_kbp.merge_glove_elmo(training_word_pos_vec3D, train_pos_vec, elmo_kbp.train_elmo_file)
del train_pos_vec
testing_word_pos_vec3D = elmo_kbp.merge_glove_elmo(testing_word_pos_vec3D, test_pos_vec, elmo_kbp.test_elmo_file)
del test_pos_vec
print("merge the all embedding: %f s" % (time.time() - start_time))
"""
6.load the label
"""
#4.training label
training_label=elmo_kbp.label2id_in_data(elmo_kbp.train_label_store_filename,\
elmo_kbp.training_sen_number)
training_label=np.int32(training_label)
#5.testing label
testing_label=elmo_kbp.label2id_in_data(elmo_kbp.test_label_store_filename,\
elmo_kbp.testing_sen_number)
testing_label=np.int32(testing_label)
"""
7.load the embedding of root, e1 and e2.
"""
train_root_embedding, train_e1_embedding, train_e2_embedding = \
elmo_kbp.embedding_looking_root_e1_e2(elmo_kbp.e1_sdp_train_file,\
elmo_kbp.e2_sdp_train_file, elmo_kbp.training_sen_number, train_sen_list2D, elmo_kbp.train_elmo_file)
test_root_embedding, test_e1_embedding, test_e2_embedding=\
elmo_kbp.embedding_looking_root_e1_e2(elmo_kbp.e1_sdp_test_file,\
elmo_kbp.e2_sdp_test_file, elmo_kbp.testing_sen_number, test_sen_list2D, elmo_kbp.test_elmo_file)
"""
8.label id value and one-hot
"""
label2id = elmo_kbp.label2id
training_label_1hot = elmo_kbp.label2id_1hot(training_label, label2id)
training_label_1hot = np.int32(training_label_1hot)
testing_label_1hot = elmo_kbp.label2id_1hot(testing_label, label2id)
testing_label_1hot = np.int32(testing_label_1hot)
del training_label
del testing_label
| [
"1182953475@qq.com"
] | 1182953475@qq.com |
d3e761fd33793aa11b6438e8a85ee6b8d49d9f26 | bd02997a44218468b155eda45dd9dd592bb3d124 | /baekjoon_1149.py | 4e0e23db2a3600c59ccc9ab1de7704622b137d4c | [] | no_license | rheehot/ProblemSolving_Python | 88b1eb303ab97624ae6c97e05393352695038d14 | 4d6dc6aea628f0e6e96530646c66216bf489427f | refs/heads/master | 2023-02-13T03:30:07.039231 | 2021-01-04T06:04:11 | 2021-01-04T06:04:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 912 | py | '''
Problem Solving Baekjoon 1149
Author: Injun Son
Date: September 23, 2020
'''
import sys
import copy
from itertools import combinations
from collections import deque
import math
N = int(input())
cost = []
for _ in range(N):
r, g, v = map(int, input().split())
cost.append([r, g, v])
'''
dp[i][0] = i번째 집을 r로 칠할 때 최소 값 = cost[i][0]+ min(dp[i-1][1], dp[i-1][2] )
dp[i][1] = i번째 집을 g로 칠할 때 최소 값 = cost[i][1]+ min(dp[i-1][0], dp[i-1][2] )
dp[i][2] = i번째 집을 b로 칠할 때 최소 값 = cost[i][2]+ min(dp[i-1][0], dp[i-1][1] )
'''
dp = [ [0,0,0] for _ in range(N+1)]
dp[0][0] = cost[0][0]
dp[0][1] = cost[0][1]
dp[0][2] = cost[0][2]
for i in range(1, N):
dp[i][0] = min(dp[i-1][1], dp[i-1][2])+cost[i][0]
dp[i][1] = min(dp[i - 1][0], dp[i - 1][2]) + cost[i][1]
dp[i][2] = min(dp[i - 1][0], dp[i - 1][1]) + cost[i][2]
print(min(dp[N-1])) | [
"ison@sfu.ca"
] | ison@sfu.ca |
fcf5cdd7421b4f2532a2e661e5f029b817329d95 | dd681dd7874c80c2804ca8d66cdbfdf2abec537e | /Python/venv/Lib/site-packages/tensorflow/keras/datasets/boston_housing/__init__.py | 53ffc7b9cca8e7eac1826d4cf34ba9db26f68fff | [] | no_license | khaled147/Koneked | cbbaec78cf3828575e835445f45b9dd72c39d808 | 98bdc701a3d126c742e076ee3ad34719a0ac5309 | refs/heads/main | 2023-04-03T11:37:07.941179 | 2021-04-14T02:09:35 | 2021-04-14T02:09:35 | 345,202,202 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | # This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Boston housing price regression dataset.
"""
from __future__ import print_function as _print_function
import sys as _sys
from tensorflow.python.keras.datasets.boston_housing import load_data
del _print_function
| [
"elmalawanykhaled@gmail.com"
] | elmalawanykhaled@gmail.com |
66033f69ef4e05133ece1635ec5b66a8e577c7f0 | ff998a560ce3181cc6f751b066250854a77651fd | /playground/class_variable_attributes.py | 4f915ec52b641b57b470f419d3f47982fb9851d3 | [
"MIT"
] | permissive | aamitabhmedia/phototools | 36c2b1b602788da25c468aa2d3a98a8ab1fc33a2 | dc9e8025a7381578a25b02f98c4073e3784ab5b9 | refs/heads/main | 2023-05-03T01:41:43.188428 | 2021-05-22T20:57:41 | 2021-05-22T20:57:41 | 330,093,883 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 318 | py | class Metadata(object):
def __init__(self, filename):
self.filename = filename
m = Metadata("lake Merritt.jpg")
print(m.filename)
m.DateTaken = "2010:01:02 16:05:20"
print(m.DateTaken)
class Album(object):
pass
a = Album()
a.name = "Album Name"
a.path = "Album Path"
print(f"{a.name}, {a.path}")
| [
"buddhabacchan@gmail.com"
] | buddhabacchan@gmail.com |
b4241046c7c59718bfda12d04fb8e89dc55ab3e6 | 89e7752fa2fecffe2bc31371b816161bc6f06aa1 | /hashing.py | 41c75bd14e7abbea6d8944ad5d853bbbe8ab037e | [] | no_license | fagan2888/web-dev | ec04ab0dbb9e82856b3cc322d120523222677ae9 | 9b7a8621b415a9ae2c64c1281db523532ac20dc7 | refs/heads/master | 2021-05-29T00:34:42.407404 | 2015-01-21T20:58:39 | 2015-01-21T20:58:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 887 | py | from datetime import datetime
import random
import string
import hashlib
import hmac
from secret import SECRET
# functions for hashing cookies
def hash_str(s):
return hmac.new(SECRET, str(s)).hexdigest()
def make_secure_val(s):
return '|'.join([str(s), hash_str(s)])
def check_secure_val(h):
# take a string of the format s,HASH
# and returns s if hash_str(s) == HASH, otherwise None
s, HASH = h.split('|')
if hash_str(s) == HASH:
return s
# functions for hashing passwords
def make_salt():
return ''.join(random.choice(string.letters) for x in xrange(5))
def make_pw_hash(name, pw, salt=None):
if not salt:
salt = make_salt()
h = hashlib.sha256(name + pw + salt).hexdigest()
return '|'.join([h, salt])
def valid_pw(name, pw, h):
salt = h.split('|')[1]
if h == make_pw_hash(name, pw, salt):
return True
| [
"racheltho@gmail.com"
] | racheltho@gmail.com |
61e10b13bc3cd67bec00a3952ff45de859bad9c5 | 0fd91b4c7f7ff702e5ab482d9d741a78f8b31244 | /fiber/api/handlers.py | 0cbace7e4778c207d234555d54bb8efd16de29a4 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | gijs/django-fiber | 4528b6e0b384bd9b44716c26bc7d1b9275e9eeec | 6a67facd70d4de43272f9c210b820784c75c14ca | refs/heads/master | 2021-01-17T12:54:58.104638 | 2011-04-19T10:41:14 | 2011-04-19T10:41:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,747 | py | import os
from django.db.models import F, Max
from piston.handler import BaseHandler
from piston.utils import rc
from fiber.utils.date import friendly_datetime
from fiber.models import Page, PageContentItem, ContentItem, Image, File
class PageHandler(BaseHandler):
allowed_methods = ('GET', 'POST', 'PUT', 'DELETE')
fields = ('data', 'children', 'show_in_menu')
model = Page
@classmethod
def data(cls, page):
return {
'title': page.title,
'attr': {
'data-fiber-data': '{"type": "page", "id": %d}' % page.id,
'href': page.get_absolute_url(),
}
}
@classmethod
def children(cls, page):
return page.get_children()
def read(self, request, id=None):
if id:
return self.read_page(id)
else:
return self.read_trees()
def read_trees(self):
return Page.objects.filter(level=0).order_by('tree_id')
def read_page(self, page_id):
page = Page.objects.get(id=page_id)
# Do not include the data of the child pages.
page.children = None
return page
def create(self, request):
"""
Creates a new Page, either placed in the same level before or after a certain Page,
or as last child below a certain parent Page.
"""
attrs = self.flatten_dict(request.POST)
try:
page_title = attrs['title']
page_relative_url = attrs['relative_url']
except KeyError:
return rc.BAD_REQUEST
page = Page(title=page_title, relative_url=page_relative_url)
if 'before_page_id' in attrs:
before_page = Page.objects.get(pk=int(attrs['before_page_id']))
page.parent = before_page.parent
page.insert_at(before_page, position='left', save=False)
elif 'below_page_id' in attrs:
below_page = Page.objects.get(pk=int(attrs['below_page_id']))
page.parent = below_page
page.insert_at(below_page, position='last-child', save=False)
page.save()
return rc.CREATED
def update(self, request, id):
data = request.data
if data.get('action') == 'move':
self._move(
int(id),
int(data['parent_id']),
int(data['left_id']),
)
else:
# TODO: check if this situation occurs
raise Exception('Unsupported action')
def delete(self, request, id):
page = Page.objects.get(pk=id)
page.delete()
return rc.DELETED
def _move(self, page_id, parent_id, left_id):
"""
Moves the node. Parameters:
- page_id: the page to move
- parent_id: the new parent
- left_id: the node to the left (0 if it does not exist)
"""
page = Page.objects.get(pk=page_id)
page.move_page(
parent_id,
left_id,
)
class PageContentItemHandler(BaseHandler):
allowed_methods = ('POST', 'PUT', 'DELETE')
model = PageContentItem
def create(self, request):
"""
Creates a new PageContentItem.
"""
attrs = self.flatten_dict(request.POST)
content_item = ContentItem.objects.get(pk=int(attrs['content_item_id']))
if 'before_page_content_item_id' in attrs:
before_page_content_item = PageContentItem.objects.get(pk=int(attrs['before_page_content_item_id']))
page = Page.objects.get(pk=before_page_content_item.page.id)
block_name = before_page_content_item.block_name
sort = before_page_content_item.sort
# make room for new content item
PageContentItem.objects.filter(block_name=block_name).filter(sort__gte=sort).update(sort=F('sort')+1)
else:
page = Page.objects.get(pk=int(attrs['page_id']))
block_name = attrs['block_name']
all_page_content_items = PageContentItem.objects.filter(block_name=block_name).order_by('sort')
sort_max = all_page_content_items.aggregate(Max('sort'))['sort__max']
if sort_max != None:
sort = sort_max + 1
else:
sort = 0
page_content_item = PageContentItem(content_item=content_item, page=page, block_name=block_name, sort=sort)
page_content_item.save()
return rc.CREATED
def update(self, request, id):
page_content_item = PageContentItem.objects.get(pk=id)
data = request.data
if 'action' in data:
if data['action'] == 'move':
next = None
if 'before_page_content_item_id' in data:
next_id = int(data['before_page_content_item_id'])
if next_id:
next = PageContentItem.objects.get(pk=next_id)
block_name = data.get('block_name')
PageContentItem.objects.move(page_content_item, next, block_name=block_name)
page_content_item = PageContentItem.objects.get(pk=id)
return page_content_item
def delete(self, request, id):
page_content_item = PageContentItem.objects.get(pk=id)
page_content_item.delete()
return rc.DELETED
class ImageHandler(BaseHandler):
allowed_methods = ('GET', )
fields = ('id', 'url', 'image', 'filename', 'size', 'updated')
exclude = () # un-exclude `id`
model = Image
@classmethod
def url(cls, image):
return image.image.url
@classmethod
def image(cls, image):
return image.image.url
@classmethod
def filename(cls, image):
return os.path.basename(image.image.name)
@classmethod
def size(cls, image):
return '%s x %d' % (image.width, image.height)
@classmethod
def updated(cls, image):
return friendly_datetime(image.updated)
def read(self, request):
rows = int(request.GET['rows'])
page = int(request.GET['page'])
if 'filename' in request.GET:
filename = request.GET['filename']
else:
filename = ''
limit = page*rows
offset = (page-1)*rows
order_by = request.GET['sidx']
order_reversed = (request.GET['sord'] == 'desc') #desc or asc
if order_by == 'updated':
order_clause = 'updated'
elif order_by == 'filename':
order_clause = 'image'
elif order_by == 'size':
order_clause = 'width'
if order_reversed:
order_clause = '-%s' % order_clause
images = Image.objects.filter(image__icontains=filename).order_by(order_clause)[offset:limit]
return images
class FileHandler(BaseHandler):
allowed_methods = ('GET', )
fields = ('id', 'url', 'filename', 'updated')
exclude = () # un-exclude `id`
model = File
@classmethod
def url(cls, file):
return file.file.url
@classmethod
def filename(cls, file):
return os.path.basename(file.file.name)
@classmethod
def updated(cls, file):
return friendly_datetime(file.updated)
def read(self, request):
rows = int(request.GET['rows'])
page = int(request.GET['page'])
if 'filename' in request.GET:
filename = request.GET['filename']
else:
filename = ''
limit = page*rows
offset = (page-1)*rows
order_by = request.GET['sidx']
order_reversed = (request.GET['sord'] == 'desc') #desc or asc
if order_by == 'updated':
order_clause = 'updated'
elif order_by == 'filename':
order_clause = 'file'
if order_reversed:
order_clause = '-%s' % order_clause
files = File.objects.filter(file__icontains=filename).order_by(order_clause)[offset:limit]
return files
def create(self, request):
File.objects.create(
file=request.FILES['file'],
title='uploaded', # TODO: empty title
)
return rc.CREATED
class FileUploadHandler(BaseHandler):
allowed_methods = ('POST',)
def create(self, request):
File.objects.create(
file=request.FILES['file'],
title='uploaded', # TODO: empty title
)
return rc.CREATED
class ImageUploadHandler(BaseHandler):
allowed_methods = ('POST',)
def create(self, request):
Image.objects.create(
image=request.FILES['file'],
title='uploaded', # TODO: empty title
)
return rc.CREATED
class ContentItemHandler(BaseHandler):
allowed_methods = ('DELETE',)
model = ContentItem
| [
"dbunskoek@leukeleu.nl"
] | dbunskoek@leukeleu.nl |
b1c4524387a2e84017e8bdd402f74b6c789e7b61 | ef9b18c5452fe062b6baf7a1f1db49e7a1e9bd9d | /Aula 08 - Utilizando Módulos/ex019.py | f84b1620d411c9245bc1d74650280d57635677a8 | [
"MIT"
] | permissive | dayellesouza/python-project | 82c1e96dea51b1d3f00038ae09c85901157cb7e9 | 202cf71274915672b88f6e44f0b65ee9dd3218e8 | refs/heads/main | 2023-03-06T18:14:39.662673 | 2021-02-19T19:47:45 | 2021-02-19T19:47:45 | 336,878,450 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 285 | py | from random import choice
nome1 = input('Digite o primeiro nome: ')
nome2 = input('Digite o segundo nome: ')
nome3 = input('DIgite o terceiro nome ')
nome4 = input('Digite o quarto nome: ')
arr = [nome1, nome2, nome3, nome4]
s = choice(arr)
print('O nome sorteado foi {}'.format(s))
| [
"dayelle1557@hotmail.com"
] | dayelle1557@hotmail.com |
565f9d0dc508935b65b8c4ded8a188e6dde58b18 | d825fd198675abfbaaad34e5c531a65f2c7592cf | /mysite/blog/templatetags/blog_tags.py | ce21a3ccda2e0ada012b03a3f5e5c79e74227b9a | [] | no_license | aky5841468/My-Blog | ba3976ebf65ef8ce3b70edbe1bd0847a84d04d32 | ea382e101082d4f0c8a52a46d97099152625f1a9 | refs/heads/master | 2022-11-19T09:17:22.734339 | 2020-07-16T09:49:18 | 2020-07-16T09:49:18 | 280,100,804 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 564 | py | from django import template
from ..models import Post
from django.db.models import Count
register = template.Library()
@register.simple_tag
def total_posts():
return Post.published.count()
@register.inclusion_tag('blog/post/latest_posts.html')
def show_latest_posts(count = 5):
latest_posts = Post.published.order_by('-publish')[:count]
return {'latest_posts' : latest_posts }
@register.simple_tag
def get_most_commented_posts(count = 5):
return Post.published.annotate( total_comments = Count('comments')) \
.order_by('-total_comments')[:count] | [
"amity220041@gmail.com"
] | amity220041@gmail.com |
2475d6fcd437c7b0ff65d2da526af572ccafff75 | bb6913f908ea3f5a58bc40d733a732f174cd41ef | /trial.py | c75d07cafad1aa16458ef720cc627a234e7666c6 | [] | no_license | K0ra/Work | 75f533b99cc90f4325573d111a165075c319e9dc | a5766259478dbde548c0d5822b6ba49ae07f0b60 | refs/heads/master | 2020-07-16T19:50:59.950615 | 2019-09-02T13:05:48 | 2019-09-02T13:05:48 | 205,856,750 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,758 | py | import sys
import matplotlib
matplotlib.use("Qt5Agg")
from PyQt5 import QtCore
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QApplication, QMainWindow, QMenu, QVBoxLayout, QSizePolicy, QMessageBox, QWidget
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
import matplotlib.dates as mdates
from configparser import ConfigParser
import psycopg2 as pg
import numpy as np
import datetime
class MyMplCanvas(FigureCanvas):
def __init__(self, parent=None, width=5, height=4, dpi=100):
self.fig = Figure(figsize=(width, height), dpi=dpi)
self.axes = self.fig.add_subplot(111)
# We want the axes cleared every time plot() is called
#self.axes.hold(False)
self.compute_initial_figure()
FigureCanvas.__init__(self, self.fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self,
QSizePolicy.Expanding,
QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
def compute_initial_figure(self):
pass
class MyDynamicMplCanvas(MyMplCanvas):
#"""A canvas that updates itself every second with a new plot."""
def __init__(self, *args, **kwargs):
MyMplCanvas.__init__(self, *args, **kwargs)
#timer = QtCore.QTimer(self)
#timer.timeout.connect(self.connect)
#timer.start(3000)
self.filename = 'database.ini'
self.section = 'postgresql'
self.connect()
def config(self):
# create a parser
parser = ConfigParser()
# read config file
parser.read(self.filename)
# get section, default to postgresql
db = {}
if parser.has_section(self.section):
params = parser.items(self.section)
for param in params:
db[param[0]] = param[1]
else:
raise Exception('Section {0} not found in the {1} file'.format(self.section, self.filename))
return db
def setYearlyParameters(self, datetimeValues):
years = mdates.YearLocator() # every year
months = mdates.MonthLocator() # every month
tickFmt = mdates.DateFormatter('%Y')
datemin = np.datetime64(datetimeValues[0], 'Y')
datemax = np.datetime64(datetimeValues[-1], 'Y') + np.timedelta64(1, 'Y')
return years, months, tickFmt, datemin, datemax
def connect(self):
# Initialize connection
conn = None
try:
# read connection parameters
params = self.config()
# connect to the PostgreSQL server
print('Connecting to the PostgreSQL database...')
conn = pg.connect(**params)
cur = conn.cursor()
# read DB data into Pandas dataframe
#self.df = pd.read_sql_query('SELECT * FROM p ORDER BY datep, timeut',
# con = conn)
query = "SELECT * FROM p ORDER BY datep, timeut"
cur.execute(query)
print("The number of parts: ", cur.rowcount)
i = 0
row = 1
datet = np.zeros(cur.rowcount, dtype=datetime.datetime)
ch_1 = np.zeros(cur.rowcount)
while row is not None:
row = cur.fetchone()
if row is None:
break
date, time, vec = row
vec = np.array(vec, dtype=np.float)
datet[i] = datetime.datetime.combine(date, time)
vec[np.isnan(vec)] = 0
# For the trial purposes ONLY the first channel is considered
ch_1[i] = vec[0]
i += 1
self.axes.plot(datet.tolist(), ch_1.tolist(), 'r')
months, days, tickFmt, datemin, datemax = self.setYearlyParameters(datet.tolist())
# format the ticks
self.axes.xaxis.set_major_locator(months)
self.axes.xaxis.set_major_formatter(tickFmt)
self.axes.xaxis.set_minor_locator(days)
self.axes.set_xlim(datemin, datemax)
self.axes.grid(True)
# rotates and right aligns the x labels, and moves the bottom of the
# axes up to make room for them
self.fig.autofmt_xdate()
self.draw()
cur.close()
except (Exception, pg.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
print('Database connection closed.')
# def update_figure(self, datet, vec):
# X[:-1] = X[1:]
# X[-1] = datet
#
# Y[:-1] = Y[1:]
# Y[-1] = vec
# self.axes.plot(datet, vec, 'r')
# self.draw()
class ApplicationWindow(QMainWindow):
def __init__(self):
QMainWindow.__init__(self)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.setWindowTitle("application main window")
self.file_menu = QMenu('&File', self)
self.file_menu.addAction('&Quit', self.fileQuit,
QtCore.Qt.CTRL + QtCore.Qt.Key_Q)
self.menuBar().addMenu(self.file_menu)
self.help_menu = QMenu('&Help', self)
self.menuBar().addSeparator()
self.menuBar().addMenu(self.help_menu)
self.help_menu.addAction('&About', self.about)
self.main_widget = QWidget(self)
l = QVBoxLayout(self.main_widget)
self.dc = MyDynamicMplCanvas(self.main_widget, width=5, height=4, dpi=100)
#l.addWidget(sc)
l.addWidget(self.dc)
#self.scroll = QtWidgets.QScrollArea(self.main_widget)
self.scroll = QtWidgets.QScrollBar(QtCore.Qt.Horizontal)
#self.scroll.setWidget(dc)
l.addWidget(self.scroll)
self.scroll.setValue(99)
self.step = .1
self.setupSlider()
self.main_widget.setFocus()
self.setCentralWidget(self.main_widget)
self.statusBar().showMessage("All hail matplotlib!", 2000)
def setupSlider(self):
self.lims = np.array(self.dc.axes.get_xlim())
print("limit" + str(self.lims))
self.scroll.setPageStep(self.step * 100)
self.scroll.sliderReleased.connect(self.update)
self.update()
def update(self, evt=None):
r = self.scroll.value() / ((1 + self.step) * 100)
l1 = self.lims[0] + r * np.diff(self.lims)
l2 = l1 + np.diff(self.lims) * self.step
self.dc.axes.set_xlim(l1, l2)
print(self.scroll.value(), l1, l2)
self.dc.fig.canvas.draw_idle()
def fileQuit(self):
self.close()
def closeEvent(self, ce):
self.fileQuit()
def about(self):
QtWidgets.QMessageBox.about(self, "About",
"""embedding_in_qt5.py example
Copyright 2005 Florent Rougon, 2006 Darren Dale, 2015 Jens H Nielsen
This program is a simple example of a Qt5 application embedding matplotlib
canvases.
It may be used and modified with no restriction; raw copies as well as
modified versions may be distributed without limitation.
This is modified from the embedding in qt4 example to show the difference
between qt4 and qt5"""
)
if __name__ == '__main__':
app = QApplication(sys.argv)
aw = ApplicationWindow()
aw.setWindowTitle("PyQt5 Matplot Example")
aw.show()
#sys.exit(qApp.exec_())
app.exec_()
| [
"noreply@github.com"
] | noreply@github.com |
c5405da4c5997529293dfd8b33de35476f6a2ee4 | b600d4003d57d66e26f1eba2b95ba2dec5adf1fe | /LeapMotionBlender/UI/Menus/settingsPanel.py | c1fe0c8786f6c62da6733614cc6dc99c018db6d9 | [
"MIT"
] | permissive | ALucatero03/Blender-Puppet-Motion | b214904f3cb2adc2f00d9ac2f1e5fe2e924fef15 | ce3beea7d681966a17aeaf6382424efaa3f05f5d | refs/heads/master | 2020-09-01T06:30:11.079384 | 2019-10-30T08:02:36 | 2019-10-30T08:02:36 | 218,899,574 | 0 | 0 | MIT | 2019-11-01T02:38:03 | 2019-11-01T02:38:02 | null | UTF-8 | Python | false | false | 1,461 | py | import bpy
from bpy.app.handlers import persistent
from bpy.props import BoolProperty, EnumProperty, IntProperty, PointerProperty, StringProperty
from bpy.types import AddonPreferences
from ...Operators import ForceStart
from ... import communicator
class SettingsPanel(AddonPreferences):
bl_idname = "LeapMotionBlender"
auto_start : BoolProperty(
name="Start automatically",
description="Automatically start the server when loading the add-on",
default=True
)
host : StringProperty(
name="Host",
description="Listen on host:port",
default="localhost"
)
port : IntProperty(
name="Port",
description="Listen on host:port",
default=4567,
min=0,
max=65535,
subtype="UNSIGNED"
)
def draw(self, context):
layout = self.layout
row = layout.row()
split = row.split(factor=0.3)
col = split.column()
col.prop(self, "host")
col.prop(self, "port")
col.separator()
col.prop(self, "auto_start")
if communicator.server_port != 0:
self.port = communicator.server_port
col.label(text="Running at port: {}".format(communicator.server_port))
if not communicator.wserver:
col.operator(ForceStart.bl_idname, icon='QUIT', text="Start server")
col = split.column() | [
"danamadorpe@gmail.com"
] | danamadorpe@gmail.com |
8f53c74814241b9df893b923178de00b3e5b2f16 | ec0fb2acbe70d3d7f399aea42038221298c8268e | /part010/ch05_shapely/sec6_interop/test_3_geo_inter_x_x.py | 6d92bbb779c365957775521a98a907196adfb01c | [] | no_license | GAIMJKP/book_python_gis | fa09567337bfccd4ab968228d4890ec0538ada50 | cd09be08df4cf4d3e06cf7c43d0b80cc76976a7e | refs/heads/master | 2022-11-07T18:32:22.340481 | 2020-06-20T13:22:58 | 2020-06-20T13:22:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 805 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
###############################################################################
from shapely.geometry import asShape
d = {"type": "Point", "coordinates": (0.0, 0.0)}
shape = asShape(d)
shape.geom_type
tuple(shape.coords)
list(shape.coords)
###############################################################################
class GeoThing(object):
def __init__(self, d):
self.__geo_interface__ = d
###############################################################################
thing = GeoThing(d)
shape = asShape(thing)
shape.geom_type
tuple(shape.coords)
list(shape.coords)
###############################################################################
from shapely.geometry import mapping
thing = GeoThing(d)
m = mapping(thing)
type(m)
m['type']
| [
"bukun@osgeo.cn"
] | bukun@osgeo.cn |
abfcd32e8c71bff43c8a98c626c2fe7d9afc2b6c | 8fc7635b84b42e61b7efb9eaf7215394b5b5790a | /aliennor-backend copy/aliennorDjangoBackend/aliennorDjangoBackend/settings.py | cb042666939a3793989e062b84e22ccf1baf9c76 | [] | no_license | phamcong/aliennor-platform | f1e8470aab7ed634859e071f6028931f576ddf3e | e1d71532426ac9414d2158d50ee34c32257618f0 | refs/heads/master | 2021-05-14T17:08:08.629564 | 2018-02-17T23:35:07 | 2018-02-17T23:35:07 | 116,038,495 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,851 | py | """
Django settings for aliennorDjangoBackend project.
Generated by 'django-admin startproject' using Django 2.0.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
import base64
import sys
from urllib import parse
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'sqqh)7k)(q1jl7t(1^em(_1c*!2_tf(d66s79vhn_*qd21gx&_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False if 'DATABASE_URL' in os.environ else True
ALLOWED_HOSTS = [
'localhost'
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'ecocases',
'rest_framework',
'crispy_forms',
'tinymce',
'corsheaders',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'aliennorDjangoBackend.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'aliennorDjangoBackend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
# Setup for MySQL connection
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'aliennor',
'USER': 'root',
'PASSWORD': 'root',
'HOST': '127.0.0.1'
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_CREDENTIALS = True
CSRF_COOKIE_SECURE = False
CSRF_TRUSTED_ORIGINS = ['django-angular2-movies.firebaseapp.com']
# custom settings
JWT_SECRET = base64.b64encode(b'ScaredCherriesEatSurelySimpleVulcansParticipateIntensely')
# heroku database settings
# Register database schemes in URLs.
parse.uses_netloc.append('mysql')
try:
# Check to make sure DATABASES is set in settings.py file.
# If not default to {}
if 'DATABASES' not in locals():
DATABASES = {}
if 'DATABASE_URL' in os.environ:
url = parse.urlparse(os.environ['DATABASE_URL'])
# Ensure default database exists.
DATABASES['default'] = DATABASES.get('default', {})
# Update with environment configuration.
DATABASES['default'].update({
'NAME': url.path[1:],
'USER': url.username,
'PASSWORD': url.password,
'HOST': url.hostname,
'PORT': url.port,
})
if url.scheme == 'mysql':
DATABASES['default']['ENGINE'] = 'django.db.backends.mysql'
except Exception:
print('Unexpected error:', sys.exc_info())
CORS_ORIGIN_WHITELIST = (
'localhost:3000',
'localhost:4200'
) | [
"ccuong.ph@gmail.com"
] | ccuong.ph@gmail.com |
e24ca4b1413b23a29c175edafb7ddcf6f38c2cca | 1d6fb1f7d5f3d20a084de5d95258ebec57e35687 | /management_system/information/views.py | c3d8864f5a6ca5135158acfa2f03681c215d982d | [] | no_license | qikuyuechengzhi/python1903_ycz | b7e6a9c37c37205881e90eeaa5bedaf53d467ca9 | 829dc429feff39d592c7c191980741f087bb50ef | refs/heads/master | 2020-05-30T20:59:19.112157 | 2019-06-24T06:49:35 | 2019-06-24T06:49:35 | 189,962,288 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 881 | py | from django.shortcuts import render
from django.views.generic import View,ListView,DetailView
from .models import *
# Create your views here.
class MainView(View):
def get(self,req):
return render(req,'main.html')
class IndexView(View):
def get(self,req):
return render(req,'index.html')
class FormView(View):
def get(self,req):
return render(req,'form.html')
class TableView(View):
def get(self,req):
article = Summary.objects.all()
return render(req,'table.html',locals())
class LoginView(View):
def get(self,req):
return render(req,'login.html')
class RegisterView(View):
def get(self,req):
return render(req,'register.html')
from django.http import HttpResponse
class NavView(View):
def get(self,req):
# return HttpResponse("hhh")
return render(req,'nav.html',locals()) | [
"526939418@qq.com"
] | 526939418@qq.com |
36cbb4ac6703730420712c9aab733b695a081355 | 183144c8bcf5bc4f58a0689888760f9e8a48e083 | /Metode Selang Tiga Titik.py | ca8246217756195ab863343127fc4623b764abde | [] | no_license | calvinjesse/optimization-theory | 4fc3e057fe0e32866395b0b985c37b27fcc5d725 | cb13d3cb9719eaae68adec4492d6d7c02154024c | refs/heads/main | 2023-05-29T13:32:42.727957 | 2021-05-27T16:37:03 | 2021-05-27T16:37:03 | 367,455,862 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,244 | py | from numpy import linspace
from pandas import DataFrame
import matplotlib.pyplot as plt
import numpy as np
def f(x):
return ((np.sin(2*x))**3 + np.cos(x) )
k=0
a=[-4]
b=[3]
L=b[0]-a[0]
alpha=[]
beta=[]
gamma=[]
falpha=[]
fbeta=[]
fgamma=[]
while L>10**(-7):
s=linspace(a[k],b[k],5)
alpha.append(s[1])
beta.append(s[2])
gamma.append(s[3])
falpha.append(f(alpha[k]))
fbeta.append(f(beta[k]))
fgamma.append(f(gamma[k]))
if falpha[k]<fbeta[k]<fgamma[k] :
a.append(a[k])
b.append(beta[k])
elif falpha[k]>fbeta[k]>fgamma[k]:
a.append(beta[k])
b.append(b[k])
else :
a.append(alpha[k])
b.append(gamma[k])
k=k+1
L=(b[k]-a[k])
alpha.append(float('nan'))
beta.append(float('nan'))
gamma.append(float('nan'))
falpha.append(float('nan'))
fbeta.append(float('nan'))
fgamma.append(float('nan'))
xp=(b[k]+a[k])/2
fxp = f(xp)
print("Titik optimum f adalah (%f,%f)"%(xp,fxp))
data={'a':a,'alpha':alpha,'beta':beta,'gamma':gamma,'b':b,'falpha':falpha,
'fbeta':fbeta,'fgamma':fgamma}
print(DataFrame(data))
x=linspace(-4,3,100)
plt.plot(x,f(x))
plt.grid()
plt.show()
| [
"noreply@github.com"
] | noreply@github.com |
f35d78fabf39bfcfa3dd1daf222cc066fc18916b | 8c705b82e35f992c3d54d939f8ab4207473c56a4 | /07_Dynamic Programming/02_triangle/triangle_00.py | cd8526c8a0670d644fa89f32b17167b149a62c41 | [
"MIT"
] | permissive | ki-yungkim/AlgorithmStudy | 5a2bb7ec534906a2d7daf5fd8d8966a1e279f75e | 14798d37166c8803d3bf3021e79df920a15eae10 | refs/heads/main | 2023-08-25T03:09:52.449344 | 2021-10-20T11:52:02 | 2021-10-20T11:52:02 | 375,719,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 425 | py | def solution(triangle):
N = len(triangle)
answer = 0
for i in range(1, N):
for j in range(i+1):
if j == 0:
triangle[i][j] += triangle[i-1][j]
elif j == i:
triangle[i][j] += triangle[i-1][j-1]
else:
triangle[i][j] += max(triangle[i-1][j], triangle[i-1][j-1])
answer = max(triangle[N-1])
return answer | [
"kiyungdev@gmail.com"
] | kiyungdev@gmail.com |
0e704a2a55c9e5385fe8629bf8951a4746839574 | 0129b016055daa1aaa1e9e0911f271fa7b38e27e | /programacao_estruturada/20192_166/volume_circunferencia.py | 7eeb20de3039fe9af93f2fd698bac1b55a04a7d5 | [] | no_license | rogeriosilva-ifpi/teaching-tds-course | 7c43ff17d6677aef7b42071929b3de8361748870 | 771ccdc4dc932d0ef5ce6ba61a02b5ee11920d4c | refs/heads/master | 2022-04-04T01:08:45.157185 | 2020-01-30T19:36:57 | 2020-01-30T19:36:57 | 206,439,119 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 127 | py | # entrada
raio = int(input('Raio: '))
# processamento
pi = 3.14
volume = (4 * pi * raio) / 3
# saida
print('Volume:', volume) | [
"rogerio.silva@ifpi.edu.br"
] | rogerio.silva@ifpi.edu.br |
99c44cee85ae9d855c4b1b1f726c890fa1b3a186 | b71793aab0b2582eea7da39c1f41b43aef7ac426 | /workspace-python/exercicios_extras/desconto.py | a348dc958dd42198eee3aa30c1ffa7ff2f603c6b | [] | no_license | pachecoDEV/dev-workspaces | c15f738af823cc170ae427a8edd02814c3c11d53 | 482bb03460cf1cee96af635474262a743300a266 | refs/heads/master | 2022-12-22T02:51:17.215893 | 2020-09-25T23:15:54 | 2020-09-25T23:15:54 | 286,874,959 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 647 | py | preco = float(input("Digite o preço do produto: "))
desconto = float(input("Digite o percentual do desconto: "))
acrescimo = float(input("Digite o percentual do acréscimo: "))
print("O preço original do produto é: R$", preco)
desconto = desconto / 100
vl_desconto = preco * desconto
preco_final = preco - vl_desconto
print("O valor do desconto é de: R$", vl_desconto)
print("O valor com o desconto aplicado é de: R$", preco_final)
acrescimo = acrescimo / 100
vl_acrescimo = preco * acrescimo
preco_final = preco + vl_acrescimo
print("O valor do aumento é de: R$", vl_acrescimo)
print("O valor com o acréscimo é de: R$", preco_final)
| [
"APCOUTO@APCOUTO-T480.br.oracle.com"
] | APCOUTO@APCOUTO-T480.br.oracle.com |
607fd571d226bdf752eea38f0dfad5dd6bca9c59 | d429063e86eb136afe2608304eece742996d6d35 | /main.py | 9b0ec86196df716ff5e1e46a39e98999643ce7dd | [] | no_license | RISHI-RAJJJ/GCPuploadtobucket | b2b31563a7ac62cd88a9b5a4e21910ef9dcb43f2 | 20c5e18a52130dceb9109e5fe8e1c6b198b47821 | refs/heads/master | 2020-04-19T07:01:37.077856 | 2019-01-28T21:07:36 | 2019-01-28T21:07:36 | 168,035,252 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 882 | py | from flask import Flask
from google.cloud import storage
import os
import logging
app = Flask(__name__)
@app.route('/')
def hello():
storage_client = storage.Client.from_service_account_json('C:\Users\RISHI\Videos\Mulesoft\GCP material\My Project 69244-55dbb853fec6.json')
buckets = list(storage_client.list_buckets())
bucket = storage_client.get_bucket("sasuke")
blob = bucket.blob("C:\Users\RISHI\Videos\Mulesoft\GCP material\GCP1.txt")
blob.upload_from_filename("C:\Users\RISHI\Videos\Mulesoft\GCP material\GCP1.txt")
print(buckets)
return "helo world!"
if __name__ == '__main__':
app.run(host='127.0.0.1', port=8080, debug=True)
@app.errorhandler(500)
def server_error(e):
# Log the error and stacktrace.
logging.exception('An error occurred during a request.')
return 'An internal error occurred.', 500
# [END app] | [
"rishiraj8086@gmail.com"
] | rishiraj8086@gmail.com |
157d1915be5de8fd962c5458f9608cfa50c53211 | 35b58dedc97622b1973456d907ede6ab86c0d966 | /Test/2020年6月20日/selenium爬取动态加载数据.py | 75b922d51f77c8fd0aec94d57a25352626e16274 | [] | no_license | GithubLucasSong/PythonProject | 7bb2bcc8af2de725b2ed9cc5bfedfd64a9a56635 | e3602b4cb8af9391c6dbeaebb845829ffb7ab15f | refs/heads/master | 2022-11-23T05:32:44.622532 | 2020-07-24T08:27:12 | 2020-07-24T08:27:12 | 282,165,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 710 | py | from selenium import webdriver
from lxml import etree
from time import sleep
bro = webdriver.Chrome(executable_path='chromedriver')
# bro = webdriver.Edge(executable_path='./msedgedriver')
bro.get('http://125.35.6.84:81/xk/')
sleep(1)
# 获取页面源码内容
page_text = bro.page_source
all_page_text = [page_text]
for i in range(5):
next_page_btn = bro.find_element_by_xpath('//*[@id="pageIto_next"]')
next_page_btn.click()
sleep(1)
all_page_text.append(bro.page_source)
for page_text in all_page_text:
tree = etree.HTML(page_text)
li_list = tree.xpath('//*[@id="gzlist"]/li')
for li in li_list:
title = li.xpath('./dl/@title')[0]
print(title)
bro.quit()
| [
"1433880147@qq.com"
] | 1433880147@qq.com |
33bf3c212be42ffb70c92552816e6a6aaa4d7c85 | 71bfd5b4309dd14a7b2bdc5c1e4d5fcb60e40d2d | /KNN.py | 46b0bf69f2654b7aa265eedb7cb18f495a8ef094 | [] | no_license | OrangeWong/predicting_mortality | 3bbd95a014b7e0c8e86365c6ca4c9e5a0bd80539 | af95eab32d6591c0f27115f2b66062c20e27e435 | refs/heads/master | 2021-09-01T07:55:19.065847 | 2017-12-25T21:23:23 | 2017-12-25T21:23:23 | 103,076,186 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,183 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 15 14:24:00 2017
@author: richa
"""
import numpy as np
from scipy.spatial import distance
from collections import Counter
class KNNClassifier(object):
"""K-nearest neighbor classifier.
"""
def __init__(self, n_neighbors=3, metric=distance.euclidean):
"""Initialize the kNN object.
Args:
n_neighbors (int, optional): number of neighbors to use by default
for KNN (default = 3)
metric (callable, optional): the metric used to calculate the
distance between two arrays (default = distance.euclidean)
"""
self.n_neighbors = n_neighbors
self.metric = metric
if not callable(self.metric):
raise TypeError("metric must be callable")
def _cdist(self, X, Y):
"""Computes distance between each pair of the two collections of inputs
Args:
X (list or array): array with length of mX.
Y (list or array): array with length of mY.
Returns:
dm (array): A distance matric, D. For each (i,j) element, the
metric(X_i, Y_j) is computed and stored in the distance matric.
"""
X = np.array(X)
Y = np.array(Y)
mX = X.shape[0]
mY = Y.shape[0]
# Zero matric for starting
dm = np.zeros((mX, mY), dtype=np.double)
# Get the metric function
metric = self.metric
# Calculate the pairwise distance
for i in range(0, mX):
for j in range(0, mY):
dm[i, j] = metric(X[i], Y[j])
return dm
def fit(self, training_data, training_label):
"""Fit the model by training data (training_data) and label
(training_label)
Args:
training_data (list or array): the length of list or array should
equal the number of data points.
training_label (list or array): the length of list or array should
equal the number of data points.
"""
# check the dimension of training data and label
training_data = np.array(training_data)
training_label = np.array(training_label)
data_samples = training_data.shape[0]
label_samples = training_label.shape[0]
if data_samples != label_samples:
raise ValueError("Data and label samples must have same size.")
if data_samples < self.n_neighbors:
raise ValueError("Data size must be greater than n_neighbors.")
if data_samples == 0:
raise ValueError("Data size must be greater than 0.")
# store the data and label for future training
self.training_data = np.array(training_data)
self.training_label = np.array(training_label).reshape(-1,)
def _get_KNN_labels(self, d_matrix, n_neighbors):
# Get the indices that would sort an array.
sorted_indices = d_matrix.argsort()
# Get the indices for the n nearest inputs
KNN_indices = sorted_indices[:,:n_neighbors]
# Get the k nearest labels
KNN_labels = self.training_label[KNN_indices]
return KNN_labels
def predict(self, testing_data):
"""Predict the labeling for the testing data.
Args:
testing_data (list or array): length of list or array equal the
number of testing data points.
Returns:
array: the predicted label
"""
testing_data = np.array(testing_data)
dm = self._cdist(testing_data, self.training_data)
KNN_labels = self._get_KNN_labels(dm, self.n_neighbors)
voting_statistics = [Counter(KNN_label).most_common() for KNN_label in KNN_labels]
predicted_label = [vote[0][0] for vote in voting_statistics]
return np.array(predicted_label)
if __name__ == "__main__":
clf = KNNClassifier(n_neighbors=3, metric=distance.euclidean)
print( clf.__class__.__name__) | [
"richard.wkh@gmail.com"
] | richard.wkh@gmail.com |
cb143cffd3db85c7d8a3c0ab9ab7c9fe0d4f751f | 8f429b61e1b7cc34739da192c87e084c57edf0ba | /gradebook.py | 0c769995c1bfc99d0aa118503996276e643ef121 | [] | no_license | alvyn96/GradeBook | c368246d8b655c5631c159b2208377fff455bd2d | 9adaad9ec65f48e07e8d328174fa4889fa2c1b81 | refs/heads/master | 2020-03-21T13:39:20.063353 | 2018-06-25T16:04:43 | 2018-06-25T16:04:43 | 138,618,227 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,471 | py | #!/usr/bin/python3
#author:@al_vyn
#written: 25/06/2018
import csv
#Create dictionaries
lloyd = {
"name": "Lloyd",
"homework": [90.0, 97.0, 75.0, 92.0],
"quizzes": [88.0, 40.0, 94.0],
"tests": [75.0, 90.0]
}
alice = {
"name": "Alice",
"homework": [100.0, 92.0, 98.0, 100.0],
"quizzes": [82.0, 83.0, 91.0],
"tests": [89.0, 97.0]
}
tyler = {
"name": "Tyler",
"homework": [0.0, 87.0, 75.0, 22.0],
"quizzes": [0.0, 75.0, 78.0],
"tests": [100.0, 100.0]
}
alvyn = {
"name":"alvyn",
"homework": [92.0,98.0,92.0,94.0],
"quizzes": [90.0,80,0,85.0],
"tests": [95.0,93.0]
}
students = [alvyn,tyler,lloyd,alice]
#Adding functions
def banner(text, ch='=', length=78):
spaced_text = '%s' % text
banner = spaced_text.center(length, ch)
return banner
def average(numbers):
total = sum(numbers)
total = float(total)
result = total/len(numbers)
return result
def get_average(students):
homework = average(students["homework"])
quizzes = average(students["quizzes"])
tests = average(students["tests"])
var = 0.1*homework + 0.3*quizzes + 0.6*tests
return var
def get_letter_grade(score):
if score >= 90:
return "A"
elif 80 <= score < 90:
return "B"
elif 70 <= score < 80:
return "C"
elif 60 <= score < 70:
return "D"
else:
return "F"
def get_class_average(students):
results = []
for student in students:
avg = get_average(student)
results.append(avg)
return average(results)
#alvyn's average data
alvyn_hw = average(alvyn["homework"])
alvyn_qz = average(alvyn["quizzes"])
alvyn_ts = average(alvyn["tests"])
#alice's average data
alice_hw = average(alice["homework"])
alice_qz = average(alice["quizzes"])
alice_ts = average(alice["tests"])
#tyler's average data
tyler_hw = average(tyler["homework"])
tyler_qz = average(tyler["quizzes"])
tyler_ts = average(tyler["tests"])
#lloyd's average data
lloyd_hw = average(lloyd["homework"])
lloyd_qz = average(lloyd["quizzes"])
lloyd_ts = average(lloyd["tests"])
#write the results to a csv file
print (banner('GradeBook'))
print ("\n")
print ("[+]-->A python script that calculates averages and writes the data to a csv file")
with open('C:\\Users\\User\\Documents\\project\\results.csv', 'w') as csvfile:
fieldnames = ['S/N', 'NAME', 'HOMEWORK', 'QUIZZES', 'TESTS', 'REMARKS']
writer = csv.DictWriter(csvfile, fieldnames = fieldnames)
writer.writeheader()
writer.writerow({'S/N':1, 'NAME':'Alvyn', 'HOMEWORK': alvyn_hw, 'QUIZZES': alvyn_qz, 'TESTS': alvyn_ts, 'REMARKS':""})
writer.writerow({'S/N':2, 'NAME':'Alice', 'HOMEWORK': alice_hw, 'QUIZZES': alice_qz, 'TESTS': alice_ts, 'REMARKS':""})
writer.writerow({'S/N':3, 'NAME':'Lloyd', 'HOMEWORK': lloyd_hw, 'QUIZZES': lloyd_qz, 'TESTS': lloyd_ts, 'REMARKS':""})
writer.writerow({'S/N':4, 'NAME':'Tyler', 'HOMEWORK': tyler_hw, 'QUIZZES': tyler_qz, 'TESTS': tyler_ts, 'REMARKS':""})
#read and display the results csv file
with open('C:\\Users\\User\\Documents\\project\\results.csv', 'r') as csvfile:
data = csv.reader(csvfile, delimiter = ' ', quotechar = '|')
for row in data:
print (','.join(row))
with open('C:\\Users\\User\\Documents\\project\\results.csv', 'r') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
print (row['HOMEWORK'], row['QUIZZES'], row['TESTS'], row['REMARKS'])
print ("\n")
print (banner('cha0s')) | [
"noreply@github.com"
] | noreply@github.com |
0134f050e7db3b58bc21acea96931a03d5ce5775 | a26ae51a1d84249c31c58b90231b7ec23e1aa74d | /flask_app.py | e75e4e1d9083d00221068309ed9821d50809fb02 | [] | no_license | Yaomingqing/Image-Super-Resolution | b5e975f08d9cec0d1ba71ec3489e388c6ef69a2a | 631b2af81d012ff58c9d7a91f37e3e1d31377222 | refs/heads/master | 2021-08-10T13:05:44.662484 | 2017-11-12T15:58:48 | 2017-11-12T15:58:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,187 | py | from keras.models import load_model
from flask import Flask, request, render_template, flash, redirect, url_for
from werkzeug.utils import secure_filename
import models
import os
import tensorflow as tf
upload_folder = 'data/'
if not os.path.exists(upload_folder):
os.makedirs(upload_folder)
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'bmp'}
model = load_model('keras_models/RNSR_model.h5')
with tf.device('/cpu:0'):
m = models.ResNetSR(2)
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = upload_folder
app.secret_key = 'WTF_I_already*Installed^%Open%&$CV'
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/', methods=['GET', 'POST'])
def root():
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
# if user does not select file, browser also
# submit a empty part without filename
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
return redirect(url_for('uploaded_file',
filename=filename))
return render_template('index.html', title='Image Super Resolution')
@app.route('/uploaded_file/<string:filename>')
def uploaded_file(filename):
path = upload_folder + filename
try:
m.upscale(path, save_intermediate=False, mode="fast")
ext = filename.rsplit('.', 1)[1]
path = upload_folder + filename.rsplit('.', 1)[0] + "_scaled(2x)." + ext
return redirect(url_for('image', filename=path))
except:
flash("Image is too large !")
return redirect('/')
@app.route('/image/<filename>', methods=['POST'])
def image(filename):
return render_template('disp.html', image=filename)
if __name__ == "__main__":
app.run(port=8888) | [
"titu1994@gmail.com"
] | titu1994@gmail.com |
16b1e9f6255a7aeecb09b2d252d4964a10954a12 | dd2e8fe5846d3241e0200c1db0df1fe0869ab8a0 | /locksmith/hub/migrations/0008_auto__add_field_api_tools_text.py | 20f8a720a3df312a62e2b94145fb6a4a2abd6802 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | sunlightlabs/django-locksmith | 350a68a3a6559d8a40e4fecfdbe2fdabc8543bb5 | eef5b7c25404560aaad50b6e622594f89239b74b | refs/heads/master | 2021-01-19T01:29:06.502206 | 2016-06-09T20:34:48 | 2016-06-09T20:34:48 | 484,442 | 8 | 4 | null | 2015-07-14T05:59:23 | 2010-01-22T22:16:42 | Python | UTF-8 | Python | false | false | 8,881 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Api.tools_text'
db.add_column('locksmith_hub_api', 'tools_text',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Api.tools_text'
db.delete_column('locksmith_hub_api', 'tools_text')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'hub.api': {
'Meta': {'object_name': 'Api', 'db_table': "'locksmith_hub_api'"},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'display_name': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'documentation_link': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mode': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'push_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'querybuilder_link': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'signing_key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'status_message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'tools_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'hub.key': {
'Meta': {'object_name': 'Key', 'db_table': "'locksmith_hub_key'"},
'alternate_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issued_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'org_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'org_url': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'promotable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '1'}),
'usage': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'api_key'", 'unique': 'True', 'null': 'True', 'to': u"orm['auth.User']"})
},
u'hub.keypublicationstatus': {
'Meta': {'object_name': 'KeyPublicationStatus', 'db_table': "'locksmith_hub_keypublicationstatus'"},
'api': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pub_statuses'", 'to': u"orm['hub.Api']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pub_statuses'", 'to': u"orm['hub.Key']"}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'hub.report': {
'Meta': {'object_name': 'Report', 'db_table': "'locksmith_hub_report'"},
'api': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reports'", 'to': u"orm['hub.Api']"}),
'calls': ('django.db.models.fields.IntegerField', [], {}),
'date': ('django.db.models.fields.DateField', [], {}),
'endpoint': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reports'", 'to': u"orm['hub.Key']"}),
'reported_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'})
},
u'taggit.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_tagged_items'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_items'", 'to': u"orm['taggit.Tag']"})
}
}
complete_apps = ['hub'] | [
"dvogel@sunlightfoundation.com"
] | dvogel@sunlightfoundation.com |
9ad77a3831c4a0d0b80e07824a559811f0f4e269 | 5a13f74026f59510f1c33de022c0ef3de94fe321 | /src/instabot.py | b896198ce6d914c8963f25be7de7c4854f27d1fa | [
"MIT"
] | permissive | barneyElDinosaurio/instabotonwaytest | 3617a049747bc33c6a3904365f9af46fe3c6a408 | b5569b547489b94158bb7114c7e0a3af559e01e9 | refs/heads/master | 2020-04-02T07:24:03.884078 | 2018-10-22T18:25:22 | 2018-10-22T18:25:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 44,302 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from .unfollow_protocol import unfollow_protocol
from .userinfo import UserInfo
import atexit
import datetime
import itertools
import json
import logging
import random
import signal
import sys
import sqlite3
import time
import requests
from .sql_updates import check_and_update, check_already_liked, check_already_followed
from .sql_updates import insert_media, insert_username, insert_unfollow_count
from .sql_updates import get_usernames_first, get_usernames, get_username_random
from .sql_updates import check_and_insert_user_agent
from fake_useragent import UserAgent
import re
class InstaBot:
"""
Instagram bot v 1.2.0
like_per_day=1000 - How many likes set bot in one day.
media_max_like=0 - Don't like media (photo or video) if it have more than
media_max_like likes.
media_min_like=0 - Don't like media (photo or video) if it have less than
media_min_like likes.
tag_list = ['cat', 'car', 'dog'] - Tag list to like.
max_like_for_one_tag=5 - Like 1 to max_like_for_one_tag times by row.
log_mod = 0 - Log mod: log_mod = 0 log to console, log_mod = 1 log to file,
log_mod = 2 no log.
https://github.com/LevPasha/instabot.py
"""
database_name = "follows_db.db"
follows_db = None
follows_db_c = None
url = 'https://www.instagram.com/'
url_tag = 'https://www.instagram.com/explore/tags/%s/?__a=1'
url_location = 'https://www.instagram.com/explore/locations/%s/?__a=1'
url_likes = 'https://www.instagram.com/web/likes/%s/like/'
url_unlike = 'https://www.instagram.com/web/likes/%s/unlike/'
url_comment = 'https://www.instagram.com/web/comments/%s/add/'
url_follow = 'https://www.instagram.com/web/friendships/%s/follow/'
url_unfollow = 'https://www.instagram.com/web/friendships/%s/unfollow/'
url_login = 'https://www.instagram.com/accounts/login/ajax/'
url_logout = 'https://www.instagram.com/accounts/logout/'
url_media_detail = 'https://www.instagram.com/p/%s/?__a=1'
url_user_detail = 'https://www.instagram.com/%s/'
api_user_detail = 'https://i.instagram.com/api/v1/users/%s/info/'
user_agent = "" ""
accept_language = 'en-US,en;q=0.5'
# If instagram ban you - query return 400 error.
error_400 = 0
# If you have 3 400 error in row - looks like you banned.
error_400_to_ban = 3
# If InstaBot think you are banned - going to sleep.
ban_sleep_time = 2 * 60 * 60
# All counter.
bot_mode = 0
like_counter = 0
follow_counter = 0
unfollow_counter = 0
comments_counter = 0
current_user = 'hajka'
current_index = 0
current_id = 'abcds'
# List of user_id, that bot follow
bot_follow_list = []
user_info_list = []
user_list = []
ex_user_list = []
unwanted_username_list = []
is_checked = False
is_selebgram = False
is_fake_account = False
is_active_user = False
is_following = False
is_follower = False
is_rejected = False
is_self_checking = False
is_by_tag = False
is_follower_number = 0
self_following = 0
self_follower = 0
# Log setting.
logging.basicConfig(filename='errors.log', level=logging.INFO)
log_file_path = ''
log_file = 0
# Other.
user_id = 0
media_by_tag = 0
media_on_feed = []
media_by_user = []
login_status = False
by_location = False
# Running Times
start_at_h = 0,
start_at_m = 0,
end_at_h = 23,
end_at_m = 59,
# For new_auto_mod
next_iteration = {"Like": 0, "Follow": 0, "Unfollow": 0, "Comments": 0}
def __init__(self,
login,
password,
like_per_day=1000,
media_max_like=50,
media_min_like=0,
follow_per_day=0,
follow_time=5 * 60 * 60,
unfollow_per_day=0,
start_at_h=0,
start_at_m=0,
end_at_h=23,
end_at_m=59,
database_name='follows_db.db',
comment_list=[["this", "the", "your"],
["photo", "picture", "pic", "shot", "snapshot"],
["is", "looks", "feels", "is really"],
["great", "super", "good", "very good", "good",
"wow", "WOW", "cool", "GREAT", "magnificent",
"magical", "very cool", "stylish", "beautiful",
"so beautiful", "so stylish", "so professional",
"lovely", "so lovely", "very lovely", "glorious",
"so glorious", "very glorious", "adorable",
"excellent", "amazing"],
[".", "..", "...", "!", "!!", "!!!"]],
comments_per_day=0,
tag_list=['cat', 'car', 'dog'],
max_like_for_one_tag=5,
unfollow_break_min=15,
unfollow_break_max=30,
log_mod=0,
proxy="",
user_blacklist={},
tag_blacklist=[],
unwanted_username_list=[],
unfollow_whitelist=[]):
self.database_name = database_name
self.follows_db = sqlite3.connect(database_name, timeout=0, isolation_level=None)
self.follows_db_c = self.follows_db.cursor()
check_and_update(self)
fake_ua = UserAgent()
self.user_agent = check_and_insert_user_agent(self, str(fake_ua.random))
self.bot_start = datetime.datetime.now()
self.start_at_h = start_at_h
self.start_at_m = start_at_m
self.end_at_h = end_at_h
self.end_at_m = end_at_m
self.unfollow_break_min = unfollow_break_min
self.unfollow_break_max = unfollow_break_max
self.user_blacklist = user_blacklist
self.tag_blacklist = tag_blacklist
self.unfollow_whitelist = unfollow_whitelist
self.comment_list = comment_list
self.time_in_day = 24 * 60 * 60
# Like
self.like_per_day = like_per_day
if self.like_per_day != 0:
self.like_delay = self.time_in_day / self.like_per_day
# Follow
self.follow_time = follow_time
self.follow_per_day = follow_per_day
if self.follow_per_day != 0:
self.follow_delay = self.time_in_day / self.follow_per_day
# Unfollow
self.unfollow_per_day = unfollow_per_day
if self.unfollow_per_day != 0:
self.unfollow_delay = self.time_in_day / self.unfollow_per_day
# Comment
self.comments_per_day = comments_per_day
if self.comments_per_day != 0:
self.comments_delay = self.time_in_day / self.comments_per_day
# Don't like if media have more than n likes.
self.media_max_like = media_max_like
# Don't like if media have less than n likes.
self.media_min_like = media_min_like
# Auto mod seting:
# Default list of tag.
self.tag_list = tag_list
# Get random tag, from tag_list, and like (1 to n) times.
self.max_like_for_one_tag = max_like_for_one_tag
# log_mod 0 to console, 1 to file
self.log_mod = log_mod
self.s = requests.Session()
# if you need proxy make something like this:
# self.s.proxies = {"https" : "http://proxyip:proxyport"}
# by @ageorgios
if proxy != "":
proxies = {
'http': 'http://' + proxy,
'https': 'http://' + proxy,
}
self.s.proxies.update(proxies)
# convert login to lower
self.user_login = login.lower()
self.user_password = password
self.bot_mode = 0
self.media_by_tag = []
self.media_on_feed = []
self.media_by_user = []
self.unwanted_username_list = unwanted_username_list
now_time = datetime.datetime.now()
log_string = 'Instabot v1.2.0 started at %s:\n' % \
(now_time.strftime("%d.%m.%Y %H:%M"))
self.write_log(log_string)
self.login()
self.populate_user_blacklist()
signal.signal(signal.SIGTERM, self.cleanup)
atexit.register(self.cleanup)
def populate_user_blacklist(self):
for user in self.user_blacklist:
user_id_url = self.url_user_detail % (user)
info = self.s.get(user_id_url)
# prevent error if 'Account of user was deleted or link is invalid
from json import JSONDecodeError
try:
all_data = json.loads(info.text)
except JSONDecodeError as e:
self.write_log('Account of user %s was deleted or link is '
'invalid' % (user))
else:
# prevent exception if user have no media
id_user = all_data['user']['id']
# Update the user_name with the user_id
self.user_blacklist[user] = id_user
log_string = "Blacklisted user %s added with ID: %s" % (user,
id_user)
self.write_log(log_string)
time.sleep(5 * random.random())
def login(self):
log_string = 'Trying to login as %s...\n' % (self.user_login)
self.write_log(log_string)
self.login_post = {
'username': self.user_login,
'password': self.user_password
}
self.s.headers.update({
'Accept': '*/*',
'Accept-Language': self.accept_language,
'Accept-Encoding': 'gzip, deflate, br',
'Connection': 'keep-alive',
'Content-Length': '0',
'Host': 'www.instagram.com',
'Origin': 'https://www.instagram.com',
'Referer': 'https://www.instagram.com/',
'User-Agent': self.user_agent,
'X-Instagram-AJAX': '1',
'Content-Type': 'application/x-www-form-urlencoded',
'X-Requested-With': 'XMLHttpRequest'
})
r = self.s.get(self.url)
self.s.headers.update({'X-CSRFToken': r.cookies['csrftoken']})
time.sleep(5 * random.random())
login = self.s.post(
self.url_login, data=self.login_post, allow_redirects=True)
self.s.headers.update({'X-CSRFToken': login.cookies['csrftoken']})
self.csrftoken = login.cookies['csrftoken']
#ig_vw=1536; ig_pr=1.25; ig_vh=772; ig_or=landscape-primary;
self.s.cookies['ig_vw'] = '1536'
self.s.cookies['ig_pr'] = '1.25'
self.s.cookies['ig_vh'] = '772'
self.s.cookies['ig_or'] = 'landscape-primary'
time.sleep(5 * random.random())
if login.status_code == 200:
r = self.s.get('https://www.instagram.com/')
finder = r.text.find(self.user_login)
if finder != -1:
ui = UserInfo()
self.user_id = ui.get_user_id_by_login(self.user_login)
self.login_status = True
log_string = '%s login success!' % (self.user_login)
self.write_log(log_string)
else:
self.login_status = False
self.write_log('Login error! Check your login data!')
else:
self.write_log('Login error! Connection error!')
def logout(self):
now_time = datetime.datetime.now()
log_string = 'Logout: likes - %i, follow - %i, unfollow - %i, comments - %i.' % \
(self.like_counter, self.follow_counter,
self.unfollow_counter, self.comments_counter)
self.write_log(log_string)
work_time = datetime.datetime.now() - self.bot_start
log_string = 'Bot work time: %s' % (work_time)
self.write_log(log_string)
try:
logout_post = {'csrfmiddlewaretoken': self.csrftoken}
logout = self.s.post(self.url_logout, data=logout_post)
self.write_log("Logout success!")
self.login_status = False
except:
logging.exception("Logout error!")
def cleanup(self, *_):
# Unfollow all bot follow
if self.follow_counter >= self.unfollow_counter:
for f in self.bot_follow_list:
log_string = "Trying to unfollow: %s" % (f[0])
self.write_log(log_string)
self.unfollow_on_cleanup(f[0])
sleeptime = random.randint(self.unfollow_break_min,
self.unfollow_break_max)
log_string = "Pausing for %i seconds... %i of %i" % (
sleeptime, self.unfollow_counter, self.follow_counter)
self.write_log(log_string)
time.sleep(sleeptime)
self.bot_follow_list.remove(f)
# Logout
if self.login_status:
self.logout()
def get_media_id_by_tag(self, tag):
""" Get media ID set, by your hashtag or location """
if self.login_status:
if tag.startswith('l:'):
tag = tag.replace('l:', '')
self.by_location = True
log_string = "Get Media by location: %s" % (tag)
self.write_log(log_string)
if self.login_status == 1:
url_location = self.url_location % (tag)
try:
r = self.s.get(url_location)
all_data = json.loads(r.text)
self.media_by_tag = list(all_data['graphql']['location']['edge_location_to_media']['edges'])
except:
self.media_by_tag = []
self.write_log("Except on get_media!")
logging.exception("get_media_id_by_tag")
else:
return 0
else:
log_string = "Get Media by tag: %s" % (tag)
self.by_location = False
self.write_log(log_string)
if self.login_status == 1:
url_tag = self.url_tag % (tag)
try:
r = self.s.get(url_tag)
all_data = json.loads(r.text)
self.media_by_tag = list(all_data['graphql']['hashtag']['edge_hashtag_to_media']['edges'])
except:
self.media_by_tag = []
self.write_log("Except on get_media!")
logging.exception("get_media_id_by_tag")
else:
return 0
def get_instagram_url_from_media_id(self, media_id, url_flag=True, only_code=None):
""" Get Media Code or Full Url from Media ID Thanks to Nikished """
media_id = int(media_id)
if url_flag is False: return ""
else:
alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_'
shortened_id = ''
while media_id > 0:
media_id, idx = divmod(media_id, 64)
shortened_id = alphabet[idx] + shortened_id
if only_code: return shortened_id
else: return 'instagram.com/p/' + shortened_id + '/'
def get_username_by_media_id(self, media_id):
""" Get username by media ID Thanks to Nikished """
if self.login_status:
if self.login_status == 1:
media_id_url = self.get_instagram_url_from_media_id(int(media_id), only_code=True)
url_media = self.url_media_detail % (media_id_url)
try:
r = self.s.get(url_media)
all_data = json.loads(r.text)
username = str(all_data['graphql']['shortcode_media']['owner']['username'])
self.write_log("media_id=" + media_id + ", media_id_url=" +
media_id_url + ", username_by_media_id=" + username)
return username
except:
logging.exception("username_by_mediaid exception")
return False
else:
return ""
def get_username_by_user_id(self, user_id):
""" Get username by user_id """
if self.login_status:
try:
url_info = self.api_user_detail % user_id
r = self.s.get(url_info, headers="")
all_data = json.loads(r.text)
username = all_data["user"]["username"]
return username
except:
logging.exception("Except on get_username_by_user_id")
return False
else:
return False
def get_userinfo_by_name(self, username):
""" Get user info by name """
if self.login_status:
if self.login_status == 1:
url_info = self.url_user_detail % (username)
try:
r = self.s.get(url_info)
all_data = json.loads(r.text)
user_info = all_data['user']
follows = user_info['follows']['count']
follower = user_info['followed_by']['count']
follow_viewer = user_info['follows_viewer']
if follower > 3000 or follows > 1500:
self.write_log(' >>>This is probably Selebgram, Business or Fake account')
if follow_viewer:
return None
return user_info
except:
logging.exception("Except on get_userinfo_by_name")
return False
else:
return False
def like_all_exist_media(self, media_size=-1, delay=True):
""" Like all media ID that have self.media_by_tag """
if self.login_status:
if self.media_by_tag != 0:
i = 0
for d in self.media_by_tag:
# Media count by this tag.
if media_size > 0 or media_size < 0:
media_size -= 1
l_c = self.media_by_tag[i]['node']['edge_liked_by']['count']
if ((l_c <= self.media_max_like and
l_c >= self.media_min_like) or
(self.media_max_like == 0 and
l_c >= self.media_min_like) or
(self.media_min_like == 0 and
l_c <= self.media_max_like) or
(self.media_min_like == 0 and
self.media_max_like == 0)):
for blacklisted_user_name, blacklisted_user_id in self.user_blacklist.items(
):
if self.media_by_tag[i]['node']['owner'][
'id'] == blacklisted_user_id:
self.write_log(
"Not liking media owned by blacklisted user: "
+ blacklisted_user_name)
return False
if self.media_by_tag[i]['node']['owner'][
'id'] == self.user_id:
self.write_log(
"Keep calm - It's your own media ;)")
return False
if check_already_liked(self, media_id=self.media_by_tag[i]['node']['id']) == 1:
self.write_log("Keep calm - It's already liked ;)")
return False
try:
if (len(self.media_by_tag[i]['node']['edge_media_to_caption']['edges']) > 1):
caption = self.media_by_tag[i]['node']['edge_media_to_caption'][
'edges'][0]['node']['text'].encode(
'ascii', errors='ignore')
tag_blacklist = set(self.tag_blacklist)
if sys.version_info[0] == 3:
tags = {
str.lower(
(tag.decode('ASCII')).strip('#'))
for tag in caption.split()
if (tag.decode('ASCII')
).startswith("#")
}
else:
tags = {
unicode.lower(
(tag.decode('ASCII')).strip('#'))
for tag in caption.split()
if (tag.decode('ASCII')
).startswith("#")
}
if tags.intersection(tag_blacklist):
matching_tags = ', '.join(
tags.intersection(tag_blacklist))
self.write_log(
"Not liking media with blacklisted tag(s): "
+ matching_tags)
return False
except:
logging.exception("Except on like_all_exist_media")
return False
log_string = "Trying to like media: %s" % \
(self.media_by_tag[i]['node']['id'])
self.write_log(log_string)
like = self.like(self.media_by_tag[i]['node']['id'])
# comment = self.comment(self.media_by_tag[i]['id'], 'Cool!')
# follow = self.follow(self.media_by_tag[i]["owner"]["id"])
if like != 0:
if like.status_code == 200:
# Like, all ok!
self.error_400 = 0
self.like_counter += 1
log_string = "Liked: %s. Like #%i." % \
(self.media_by_tag[i]['node']['id'],
self.like_counter)
insert_media(self,
media_id=self.media_by_tag[i]['node']['id'],
status="200")
self.write_log(log_string)
elif like.status_code == 400:
log_string = "Not liked: %i" \
% (like.status_code)
self.write_log(log_string)
insert_media(self,
media_id=self.media_by_tag[i]['node']['id'],
status="400")
# Some error. If repeated - can be ban!
if self.error_400 >= self.error_400_to_ban:
# Look like you banned!
time.sleep(self.ban_sleep_time)
else:
self.error_400 += 1
else:
log_string = "Not liked: %i" \
% (like.status_code)
insert_media(self,
media_id=self.media_by_tag[i]['node']['id'],
status=str(like.status_code))
self.write_log(log_string)
return False
# Some error.
i += 1
if delay:
time.sleep(self.like_delay * 0.9 +
self.like_delay * 0.2 *
random.random())
else:
return True
else:
return False
else:
return False
else:
return False
else:
self.write_log("No media to like!")
def like(self, media_id):
""" Send http request to like media by ID """
if self.login_status:
url_likes = self.url_likes % (media_id)
try:
like = self.s.post(url_likes)
last_liked_media_id = media_id
except:
logging.exception("Except on like!")
like = 0
return like
def unlike(self, media_id):
""" Send http request to unlike media by ID """
if self.login_status:
url_unlike = self.url_unlike % (media_id)
try:
unlike = self.s.post(url_unlike)
except:
logging.exception("Except on unlike!")
unlike = 0
return unlike
def comment(self, media_id, comment_text):
""" Send http request to comment """
if self.login_status:
comment_post = {'comment_text': comment_text}
url_comment = self.url_comment % (media_id)
try:
comment = self.s.post(url_comment, data=comment_post)
if comment.status_code == 200:
self.comments_counter += 1
log_string = 'Write: "%s". #%i.' % (comment_text,
self.comments_counter)
self.write_log(log_string)
return comment
except:
logging.exception("Except on comment!")
return False
def follow(self, user_id):
""" Send http request to follow """
if self.login_status:
url_follow = self.url_follow % (user_id)
try:
follow = self.s.post(url_follow)
if follow.status_code == 200:
self.follow_counter += 1
log_string = "Followed: %s #%i." % (user_id,
self.follow_counter)
self.write_log(log_string)
username = self.get_username_by_user_id(user_id=user_id)
insert_username(self, user_id=user_id, username=username)
return follow
except:
logging.exception("Except on follow!")
return False
def unfollow(self, user_id):
""" Send http request to unfollow """
if self.login_status:
url_unfollow = self.url_unfollow % (user_id)
try:
unfollow = self.s.post(url_unfollow)
if unfollow.status_code == 200:
self.unfollow_counter += 1
log_string = "Unfollowed: %s #%i." % (user_id,
self.unfollow_counter)
self.write_log(log_string)
return unfollow
except:
logging.exception("Exept on unfollow!")
return False
def unfollow_on_cleanup(self, user_id):
""" Unfollow on cleanup by @rjmayott """
if self.login_status:
url_unfollow = self.url_unfollow % (user_id)
try:
unfollow = self.s.post(url_unfollow)
if unfollow.status_code == 200:
self.unfollow_counter += 1
log_string = "Unfollow: %s #%i of %i." % (
user_id, self.unfollow_counter, self.follow_counter)
self.write_log(log_string)
else:
log_string = "Slow Down - Pausing for 5 minutes so we don't get banned!"
self.write_log(log_string)
time.sleep(300)
unfollow = self.s.post(url_unfollow)
if unfollow.status_code == 200:
self.unfollow_counter += 1
log_string = "Unfollow: %s #%i of %i." % (
user_id, self.unfollow_counter,
self.follow_counter)
self.write_log(log_string)
else:
log_string = "Still no good :( Skipping and pausing for another 5 minutes"
self.write_log(log_string)
time.sleep(300)
return False
return unfollow
except:
log_string = "Except on unfollow... Looks like a network error"
logging.exception(log_string)
return False
def auto_mod(self):
""" Star loop, that get media ID by your tag list, and like it """
if self.login_status:
while True:
random.shuffle(self.tag_list)
self.get_media_id_by_tag(random.choice(self.tag_list))
self.like_all_exist_media(random.randint \
(1, self.max_like_for_one_tag))
def new_auto_mod(self):
while True:
now = datetime.datetime.now()
if (
datetime.time(self.start_at_h, self.start_at_m) <= now.time()
and now.time() <= datetime.time(self.end_at_h, self.end_at_m)
):
# ------------------- Get media_id -------------------
if len(self.media_by_tag) == 0:
self.get_media_id_by_tag(random.choice(self.tag_list))
self.this_tag_like_count = 0
self.max_tag_like_count = random.randint(
1, self.max_like_for_one_tag)
self.remove_already_liked()
# ------------------- Like -------------------
self.new_auto_mod_like()
# ------------------- Follow -------------------
self.new_auto_mod_follow()
# ------------------- Unfollow -------------------
self.new_auto_mod_unfollow()
# ------------------- Comment -------------------
self.new_auto_mod_comments()
# Bot iteration in 1 sec
time.sleep(3)
# print("Tic!")
else:
print("sleeping until {hour}:{min}".format(hour=self.start_at_h,
min=self.start_at_m), end="\r")
time.sleep(100)
def remove_already_liked(self):
self.write_log("Removing already liked medias..")
x = 0
while x < len(self.media_by_tag):
if check_already_liked(self, media_id=self.media_by_tag[x]['node']['id']) == 1:
self.media_by_tag.remove(self.media_by_tag[x])
else:
x += 1
def new_auto_mod_like(self):
if time.time() > self.next_iteration["Like"] and self.like_per_day != 0 \
and len(self.media_by_tag) > 0:
# You have media_id to like:
if self.like_all_exist_media(media_size=1, delay=False):
# If like go to sleep:
self.next_iteration["Like"] = time.time() + \
self.add_time(self.like_delay)
# Count this tag likes:
self.this_tag_like_count += 1
if self.this_tag_like_count >= self.max_tag_like_count:
self.media_by_tag = [0]
# Del first media_id
del self.media_by_tag[0]
def new_auto_mod_follow(self):
if time.time() > self.next_iteration["Follow"] and \
self.follow_per_day != 0 and len(self.media_by_tag) > 0:
if self.media_by_tag[0]['node']["owner"]["id"] == self.user_id:
self.write_log("Keep calm - It's your own profile ;)")
return
if check_already_followed(self, user_id=self.media_by_tag[0]['node']["owner"]["id"]) == 1:
self.write_log("Already followed before " + self.media_by_tag[0]['node']["owner"]["id"])
self.next_iteration["Follow"] = time.time() + \
self.add_time(self.follow_delay/2)
return
log_string = "Trying to follow: %s" % (
self.media_by_tag[0]['node']["owner"]["id"])
self.write_log(log_string)
if self.follow(self.media_by_tag[0]['node']["owner"]["id"]) != False:
self.bot_follow_list.append(
[self.media_by_tag[0]['node']["owner"]["id"], time.time()])
self.next_iteration["Follow"] = time.time() + \
self.add_time(self.follow_delay)
def new_auto_mod_unfollow(self):
if time.time() > self.next_iteration["Unfollow"] and self.unfollow_per_day != 0:
if self.bot_mode == 0:
log_string = "Trying to unfollow #%i: " % (self.unfollow_counter + 1)
self.write_log(log_string)
self.auto_unfollow()
self.next_iteration["Unfollow"] = time.time() + \
self.add_time(self.unfollow_delay)
if self.bot_mode == 1:
unfollow_protocol(self)
def new_auto_mod_comments(self):
if time.time() > self.next_iteration["Comments"] and self.comments_per_day != 0 \
and len(self.media_by_tag) > 0 \
and self.check_exisiting_comment(self.media_by_tag[0]['node']['shortcode']) == False:
comment_text = self.generate_comment()
log_string = "Trying to comment: %s" % (self.media_by_tag[0]['node']['id'])
self.write_log(log_string)
if self.comment(self.media_by_tag[0]['node']['id'], comment_text) != False:
self.next_iteration["Comments"] = time.time() + \
self.add_time(self.comments_delay)
def add_time(self, time):
""" Make some random for next iteration"""
return time * 0.9 + time * 0.2 * random.random()
def generate_comment(self):
c_list = list(itertools.product(*self.comment_list))
repl = [(" ", " "), (" .", "."), (" !", "!")]
res = " ".join(random.choice(c_list))
for s, r in repl:
res = res.replace(s, r)
return res.capitalize()
def check_exisiting_comment(self, media_code):
url_check = self.url_media_detail % (media_code)
check_comment = self.s.get(url_check)
if check_comment.status_code == 200:
all_data = json.loads(check_comment.text)
if all_data['graphql']['shortcode_media']['owner']['id'] == self.user_id:
self.write_log("Keep calm - It's your own media ;)")
# Del media to don't loop on it
del self.media_by_tag[0]
return True
comment_list = list(all_data['graphql']['shortcode_media']['edge_media_to_comment']['edges'])
for d in comment_list:
if d['node']['owner']['id'] == self.user_id:
self.write_log("Keep calm - Media already commented ;)")
# Del media to don't loop on it
del self.media_by_tag[0]
return True
return False
else:
insert_media(self, self.media_by_tag[0]['node']['id'], str(check_comment.status_code))
self.media_by_tag.remove(self.media_by_tag[0])
return False
def auto_unfollow(self):
checking = True
while checking:
username_row = get_username_random(self)
if not username_row:
self.write_log("Looks like there is nobody to unfollow.")
return False
current_id = username_row[0]
current_user = username_row[1]
unfollow_count = username_row[2]
if not current_user:
current_user = self.get_username_by_user_id(user_id=current_id)
if not current_user:
log_string = "api limit reached from instagram. Will try later"
self.write_log(log_string)
return False
for wluser in self.unfollow_whitelist:
if wluser == current_user:
log_string = (
"found whitelist user, starting search again")
self.write_log(log_string)
break
else:
checking = False
if self.login_status:
log_string = "Getting user info : %s" % current_user
self.write_log(log_string)
if self.login_status == 1:
url_tag = self.url_user_detail % (current_user)
try:
r = self.s.get(url_tag)
#all_data = json.loads(re.search('{"activity.+show_app', r.text, re.DOTALL).group(0)+'":""}')['entry_data']['ProfilePage'][0]
all_data = json.loads(re.search('{"activity.+gatekeepers', r.text, re.DOTALL).group(0)+'":""}')['entry_data']['ProfilePage'][0]
user_info = all_data['graphql']['user']
i = 0
log_string = "Checking user info.."
self.write_log(log_string)
follows = user_info['edge_follow']['count']
follower = user_info['edge_followed_by']['count']
media = user_info['edge_owner_to_timeline_media']['count']
follow_viewer = user_info['follows_viewer']
followed_by_viewer = user_info[
'followed_by_viewer']
requested_by_viewer = user_info[
'requested_by_viewer']
has_requested_viewer = user_info[
'has_requested_viewer']
log_string = "Follower : %i" % (follower)
self.write_log(log_string)
log_string = "Following : %s" % (follows)
self.write_log(log_string)
log_string = "Media : %i" % (media)
self.write_log(log_string)
if follows == 0 or follower / follows > 2:
self.is_selebgram = True
self.is_fake_account = False
print(' >>>This is probably Selebgram account')
elif follower == 0 or follows / follower > 2:
self.is_fake_account = True
self.is_selebgram = False
print(' >>>This is probably Fake account')
else:
self.is_selebgram = False
self.is_fake_account = False
print(' >>>This is a normal account')
if media > 0 and follows / media < 25 and follower / media < 25:
self.is_active_user = True
print(' >>>This user is active')
else:
self.is_active_user = False
print(' >>>This user is passive')
if follow_viewer or has_requested_viewer:
self.is_follower = True
print(" >>>This account is following you")
else:
self.is_follower = False
print(' >>>This account is NOT following you')
if followed_by_viewer or requested_by_viewer:
self.is_following = True
print(' >>>You are following this account')
else:
self.is_following = False
print(' >>>You are NOT following this account')
except:
logging.exception("Except on auto_unfollow!")
time.sleep(3)
return False
else:
return False
if (
self.is_selebgram is not False
or self.is_fake_account is not False
or self.is_active_user is not True
or self.is_follower is not True
):
self.write_log(current_user)
self.unfollow(current_id)
insert_unfollow_count(self, user_id=current_id)
def get_media_id_recent_feed(self):
if self.login_status:
now_time = datetime.datetime.now()
log_string = "%s : Get media id on recent feed" % (self.user_login)
self.write_log(log_string)
if self.login_status == 1:
url_tag = 'https://www.instagram.com/?__a=1'
try:
r = self.s.get(url_tag)
all_data = json.loads(r.text)
self.media_on_feed = list(
all_data['graphql']['user']['edge_web_feed_timeline'][
'edges'])
log_string = "Media in recent feed = %i" % (
len(self.media_on_feed))
self.write_log(log_string)
except:
logging.exception("get_media_id_recent_feed")
self.media_on_feed = []
time.sleep(20)
return 0
else:
return 0
def write_log(self, log_text):
""" Write log by print() or logger """
if self.log_mod == 0:
try:
now_time = datetime.datetime.now()
print(now_time.strftime("%d.%m.%Y_%H:%M") + " " + log_text)
except UnicodeEncodeError:
print("Your text has unicode problem!")
elif self.log_mod == 1:
# Create log_file if not exist.
if self.log_file == 0:
self.log_file = 1
now_time = datetime.datetime.now()
self.log_full_path = '%s%s_%s.log' % (
self.log_file_path, self.user_login,
now_time.strftime("%d.%m.%Y_%H:%M"))
formatter = logging.Formatter('%(asctime)s - %(name)s '
'- %(message)s')
self.logger = logging.getLogger(self.user_login)
self.hdrl = logging.FileHandler(self.log_full_path, mode='w')
self.hdrl.setFormatter(formatter)
self.logger.setLevel(level=logging.INFO)
self.logger.addHandler(self.hdrl)
# Log to log file.
try:
self.logger.info(log_text)
except UnicodeEncodeError:
print("Your text has unicode problem!")
| [
"marioalzatelopez@gmail.com"
] | marioalzatelopez@gmail.com |
6031044fe9c0ca47166a2ee89c2183e6ded1bf43 | 4e6533cbdd5e90402218140d8ec9f527d500f4ca | /page.py | 4e11a44b9231443c0ca45b4fa97ba2ee80355f73 | [] | no_license | hliu26/Wikipedia_Philosophy | 56ec6339fd49ad5b5cca9196db3515b0a9ac033e | 0dde1fdd9a0c7b21413ebec9c6069d4a3a105c90 | refs/heads/master | 2022-11-20T00:29:10.731518 | 2020-07-25T00:22:45 | 2020-07-25T00:22:45 | 279,162,353 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,425 | py | from bs4 import BeautifulSoup
from urllib.request import urlopen
import urllib.request
import urllib
from PIL import Image
from io import BytesIO
class Page:
wiki = 'https://en.wikipedia.org'
end = "https://en.wikipedia.org/wiki/Philosophy"
branches = 0
def __init__(self, url):
self.url = url
self.soup = BeautifulSoup(urlopen(self.url), "lxml")
self.title = self.soup.title.string
try:
self.image = Image.open(BytesIO(urlopen(self.soup.find_all("meta")[-1]["content"]).read()))
except:
self.image = None
def remove_table(self):
try:
for t in self.soup.find_all("table"):
t.extract()
except:
pass
def remove_parentheses(self, body_text):
"""converts body of text from BeautifulSoup Object into String
then takes the string and filters out all parenthesized strings/ links
and returns the filtered obj reconverted into BeautifulSoup obj
"""
out = []
paren = 0
italic =0
for i in str(body_text).split(" "):
#print(i)
if "(" in i :
if "href" not in i:
for symbol in i:
if symbol == "(":
paren += 1
else:
if paren != 0:
paren += 1
if ")" in i:
if "href" not in i:
for symbol in i:
if symbol == ")":
paren -= 1
else:
if paren != 0:
paren -= 1
if "<i>" in i or "<i" in i:
if "<img" not in i:
italic += 1
if "</i>" in i:
if italic == 0:
pass
else:
italic -= 1
#print(paren)
#print(italic)
if paren == 0 and italic == 0:
if ")" in i:
if "href" in i :
out.append(i)
else:
out.append(i)
return BeautifulSoup(' '.join(out), "lxml")
def all_links(self):
"""check_next takes a beautifulsoup object.
returns all links found in bodies of text (tags "p")
the result is a list wiki links in each body of text
"""
self.remove_table()
res = []
for tags in self.soup.find("body").find_all("p"):
tags = self.remove_parentheses(tags)
if (any(s in str(tags).split(' ')[0] for s in ["<p>\n<span", "<p><span"])
or "text-align" in str(tags)):
continue
if tags.find("a") == None:
pass
else:
for links in tags.find_all('a'):
try:
if "/wiki/" in links["href"]:
res.append(links["href"])
except:
pass
return res
def get_nexturl(self):
for link in self.all_links():
return self.wiki + link
def crawl(self):
"""Crawls from current wiki page to next.
Should check for cycles and break if one occurs
"""
#should Page class be nodes with .next (pointers?)
visited = []
self.branches = 0
print(self.title + ":", self.url)
visited.append(self.url)
next_link = self.get_nexturl()
while next_link != self.end:
self.branches += 1
temp_page = Page(next_link)
visited.append(next_link)
print(temp_page.title + ":", temp_page.url)
next_link = temp_page.get_nexturl()
if next_link in visited:
print("Exception('Cycle was discovered. '"+ next_link + ")")
raise Exception('Cycle was discovered. ' + next_link)
self.branches += 1
print("\033[1m Philosophy Reached! \033[0m", self.wiki+next_link)
| [
"howardliu26@gmail.com"
] | howardliu26@gmail.com |
c495cb44e7e90173d89e9dc6d7629db953e97243 | c62b0dfae6820b7a99c3592d3cc75c94c22f5ee6 | /ipwhois/nir.py | 741b4e9bc3050d255e7b8d0ab09f5a52744088eb | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | ghelmer/ipwhois | 5443e9b829f8fbdcc1a979a2e231b49bf4a15a24 | d2bff4132d405db544a9812677f620f2fa04cd41 | refs/heads/master | 2020-03-29T04:04:42.475279 | 2018-09-20T12:54:50 | 2018-09-20T12:54:50 | 149,514,805 | 0 | 0 | BSD-2-Clause | 2018-09-19T21:35:15 | 2018-09-19T21:35:15 | null | UTF-8 | Python | false | false | 23,137 | py | # Copyright (c) 2013-2017 Philip Hane
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from . import NetError
from .utils import unique_everseen
import logging
import sys
import re
import copy
from datetime import (datetime, timedelta)
if sys.version_info >= (3, 3): # pragma: no cover
from ipaddress import (ip_address,
ip_network,
summarize_address_range,
collapse_addresses)
else: # pragma: no cover
from ipaddr import (IPAddress as ip_address,
IPNetwork as ip_network,
summarize_address_range,
collapse_address_list as collapse_addresses)
log = logging.getLogger(__name__)
# Base NIR whois output dictionary.
BASE_NET = {
'cidr': None,
'name': None,
'handle': None,
'range': None,
'country': None,
'address': None,
'postal_code': None,
'nameservers': None,
'created': None,
'updated': None,
'contacts': None
}
# Base NIR whois contact output dictionary.
BASE_CONTACT = {
'name': None,
'email': None,
'reply_email': None,
'organization': None,
'division': None,
'title': None,
'phone': None,
'fax': None,
'updated': None
}
# National Internet Registry
NIR_WHOIS = {
'jpnic': {
'country_code': 'JP',
'url': ('http://whois.nic.ad.jp/cgi-bin/whois_gw?lang=%2Fe&key={0}'
'&submit=query'),
'request_type': 'GET',
'request_headers': {'Accept': 'text/html'},
'form_data_ip_field': None,
'fields': {
'name': r'(\[Organization\])[^\S\n]+(?P<val>.*?)\n',
'handle': r'(\[Network Name\])[^\S\n]+(?P<val>.*?)\n',
'created': r'(\[Assigned Date\])[^\S\n]+(?P<val>.*?)\n',
'updated': r'(\[Last Update\])[^\S\n]+(?P<val>.*?)\n',
'nameservers': r'(\[Nameserver\])[^\S\n]+(?P<val>.*?)\n',
'contact_admin': r'(\[Administrative Contact\])[^\S\n]+.+?\>'
'(?P<val>.+?)\<\/A\>\n',
'contact_tech': r'(\[Technical Contact\])[^\S\n]+.+?\>'
'(?P<val>.+?)\<\/A\>\n'
},
'contact_fields': {
'name': r'(\[Last, First\])[^\S\n]+(?P<val>.*?)\n',
'email': r'(\[E-Mail\])[^\S\n]+(?P<val>.*?)\n',
'reply_email': r'(\[Reply Mail\])[^\S\n]+(?P<val>.*?)\n',
'organization': r'(\[Organization\])[^\S\n]+(?P<val>.*?)\n',
'division': r'(\[Division\])[^\S\n]+(?P<val>.*?)\n',
'title': r'(\[Title\])[^\S\n]+(?P<val>.*?)\n',
'phone': r'(\[TEL\])[^\S\n]+(?P<val>.*?)\n',
'fax': r'(\[FAX\])[^\S\n]+(?P<val>.*?)\n',
'updated': r'(\[Last Update\])[^\S\n]+(?P<val>.*?)\n'
},
'dt_format': '%Y/%m/%d %H:%M:%S(JST)',
'dt_hourdelta': 9,
'multi_net': False
},
'krnic': {
'country_code': 'KR',
'url': 'https://whois.kisa.or.kr/eng/whois.jsc',
'request_type': 'POST',
'request_headers': {'Accept': 'text/html'},
'form_data_ip_field': 'query',
'fields': {
'name': r'(Organization Name)[\s]+\:[^\S\n]+(?P<val>.+?)\n',
'handle': r'(Service Name|Network Type)[\s]+\:[^\S\n]+(?P<val>.+?)'
'\n',
'address': r'(Address)[\s]+\:[^\S\n]+(?P<val>.+?)\n',
'postal_code': r'(Zip Code)[\s]+\:[^\S\n]+(?P<val>.+?)\n',
'created': r'(Registration Date)[\s]+\:[^\S\n]+(?P<val>.+?)\n',
'contact_admin': r'(id="eng_isp_contact").+?\>(?P<val>.*?)\<'
'\/div\>\n',
'contact_tech': r'(id="eng_user_contact").+?\>(?P<val>.*?)\<'
'\/div\>\n'
},
'contact_fields': {
'name': r'(Name)[^\S\n]+?:[^\S\n]+?(?P<val>.*?)\n',
'email': r'(E-Mail)[^\S\n]+?:[^\S\n]+?(?P<val>.*?)\n',
'phone': r'(Phone)[^\S\n]+?:[^\S\n]+?(?P<val>.*?)\n'
},
'dt_format': '%Y%m%d',
'dt_hourdelta': 0,
'multi_net': True
}
}
class NIRWhois:
"""
The class for parsing whois data for NIRs (National Internet Registry).
JPNIC and KRNIC are currently the only NIRs supported. Output varies
based on NIR specific whois formatting.
Args:
net (:obj:`ipwhois.net.Net`): The network object.
Raises:
NetError: The parameter provided is not an instance of
ipwhois.net.Net
IPDefinedError: The address provided is defined (does not need to be
resolved).
"""
def __init__(self, net):
from .net import Net
# ipwhois.net.Net validation
if isinstance(net, Net):
self._net = net
else:
raise NetError('The provided net parameter is not an instance of '
'ipwhois.net.Net')
def parse_fields(self, response, fields_dict, net_start=None,
net_end=None, dt_format=None, field_list=None,
hourdelta=0, is_contact=False):
"""
The function for parsing whois fields from a data input.
Args:
response (:obj:`str`): The response from the whois/rwhois server.
fields_dict (:obj:`dict`): The mapping of fields to regex search
values (required).
net_start (:obj:`int`): The starting point of the network (if
parsing multiple networks). Defaults to None.
net_end (:obj:`int`): The ending point of the network (if parsing
multiple networks). Defaults to None.
dt_format (:obj:`str`): The format of datetime fields if known.
Defaults to None.
field_list (:obj:`list` of :obj:`str`): If provided, fields to
parse. Defaults to :obj:`ipwhois.nir.BASE_NET` if is_contact
is False. Otherwise, defaults to
:obj:`ipwhois.nir.BASE_CONTACT`.
hourdelta (:obj:`int`): The timezone delta for created/updated
fields. Defaults to 0.
is_contact (:obj:`bool`): If True, uses contact information
field parsing. Defaults to False.
Returns:
dict: A dictionary of fields provided in fields_dict, mapping to
the results of the regex searches.
"""
response = '{0}\n'.format(response)
if is_contact:
ret = {}
if not field_list:
field_list = list(BASE_CONTACT.keys())
else:
ret = {
'contacts': {'admin': None, 'tech': None},
'contact_admin': {},
'contact_tech': {}
}
if not field_list:
field_list = list(BASE_NET.keys())
field_list.remove('contacts')
field_list.append('contact_admin')
field_list.append('contact_tech')
generate = ((field, pattern) for (field, pattern) in
fields_dict.items() if field in field_list)
for field, pattern in generate:
pattern = re.compile(
str(pattern),
re.DOTALL
)
if net_start is not None:
match = pattern.finditer(response, net_end, net_start)
elif net_end is not None:
match = pattern.finditer(response, net_end)
else:
match = pattern.finditer(response)
values = []
for m in match:
try:
values.append(m.group('val').strip())
except IndexError:
pass
if len(values) > 0:
value = None
try:
if field in ['created', 'updated'] and dt_format:
value = (
datetime.strptime(
values[0],
str(dt_format)
) - timedelta(hours=hourdelta)
).isoformat('T')
elif field in ['nameservers']:
value = list(unique_everseen(values))
else:
values = unique_everseen(values)
value = '\n'.join(values)
except ValueError as e:
log.debug('NIR whois field parsing failed for {0}: {1}'
''.format(field, e))
pass
ret[field] = value
return ret
def _parse_fields(self, *args, **kwargs):
"""
Deprecated. This will be removed in a future release.
"""
from warnings import warn
warn('NIRWhois._parse_fields() has been deprecated and will be '
'removed. You should now use NIRWhois.parse_fields().')
return self.parse_fields(*args, **kwargs)
def get_nets_jpnic(self, response):
"""
The function for parsing network blocks from jpnic whois data.
Args:
response (:obj:`str`): The response from the jpnic server.
Returns:
list of dict: Mapping of networks with start and end positions.
::
[{
'cidr' (str) - The network routing block
'start' (int) - The starting point of the network
'end' (int) - The endpoint point of the network
}]
"""
nets = []
# Iterate through all of the networks found, storing the CIDR value
# and the start and end positions.
for match in re.finditer(
r'^.*?(\[Network Number\])[^\S\n]+.+?>(?P<val>.+?)</A>$',
response,
re.MULTILINE
):
try:
net = copy.deepcopy(BASE_NET)
tmp = ip_network(match.group(2))
try: # pragma: no cover
network_address = tmp.network_address
except AttributeError: # pragma: no cover
network_address = tmp.ip
pass
try: # pragma: no cover
broadcast_address = tmp.broadcast_address
except AttributeError: # pragma: no cover
broadcast_address = tmp.broadcast
pass
net['range'] = '{0} - {1}'.format(
network_address + 1, broadcast_address
)
cidr = ip_network(match.group(2).strip()).__str__()
net['cidr'] = cidr
net['start'] = match.start()
net['end'] = match.end()
nets.append(net)
except (ValueError, TypeError):
pass
return nets
def _get_nets_jpnic(self, *args, **kwargs):
"""
Deprecated. This will be removed in a future release.
"""
from warnings import warn
warn('NIRWhois._get_nets_jpnic() has been deprecated and will be '
'removed. You should now use NIRWhois.get_nets_jpnic().')
return self.get_nets_jpnic(*args, **kwargs)
def get_nets_krnic(self, response):
"""
The function for parsing network blocks from krnic whois data.
Args:
response (:obj:`str`): The response from the krnic server.
Returns:
list of dict: Mapping of networks with start and end positions.
::
[{
'cidr' (str) - The network routing block
'start' (int) - The starting point of the network
'end' (int) - The endpoint point of the network
}]
"""
nets = []
# Iterate through all of the networks found, storing the CIDR value
# and the start and end positions.
for match in re.finditer(
r'^(IPv4 Address)[\s]+:[^\S\n]+((.+?)[^\S\n]-[^\S\n](.+?)'
'[^\S\n]\((.+?)\)|.+)$',
response,
re.MULTILINE
):
try:
net = copy.deepcopy(BASE_NET)
net['range'] = match.group(2)
if match.group(3) and match.group(4):
addrs = []
addrs.extend(summarize_address_range(
ip_address(match.group(3).strip()),
ip_address(match.group(4).strip())))
cidr = ', '.join(
[i.__str__() for i in collapse_addresses(addrs)]
)
net['range'] = '{0} - {1}'.format(
match.group(3), match.group(4)
)
else:
cidr = ip_network(match.group(2).strip()).__str__()
net['cidr'] = cidr
net['start'] = match.start()
net['end'] = match.end()
nets.append(net)
except (ValueError, TypeError):
pass
return nets
def _get_nets_krnic(self, *args, **kwargs):
"""
Deprecated. This will be removed in a future release.
"""
from warnings import warn
warn('NIRWhois._get_nets_krnic() has been deprecated and will be '
'removed. You should now use NIRWhois.get_nets_krnic().')
return self.get_nets_krnic(*args, **kwargs)
def get_contact(self, response=None, nir=None, handle=None,
retry_count=3, dt_format=None):
"""
The function for retrieving and parsing NIR whois data based on
NIR_WHOIS contact_fields.
Args:
response (:obj:`str`): Optional response object, this bypasses the
lookup.
nir (:obj:`str`): The NIR to query ('jpnic' or 'krnic'). Required
if response is None.
handle (:obj:`str`): For NIRs that have separate contact queries
(JPNIC), this is the contact handle to use in the query.
Defaults to None.
retry_count (:obj:`int`): The number of times to retry in case
socket errors, timeouts, connection resets, etc. are
encountered. Defaults to 3.
dt_format (:obj:`str`): The format of datetime fields if known.
Defaults to None.
Returns:
dict: Mapping of the fields provided in contact_fields, to their
parsed results.
"""
if response or nir == 'krnic':
contact_response = response
else:
# Retrieve the whois data.
contact_response = self._net.get_http_raw(
url=str(NIR_WHOIS[nir]['url']).format(handle),
retry_count=retry_count,
headers=NIR_WHOIS[nir]['request_headers'],
request_type=NIR_WHOIS[nir]['request_type']
)
return self._parse_fields(
response=contact_response,
fields_dict=NIR_WHOIS[nir]['contact_fields'],
dt_format=dt_format,
hourdelta=int(NIR_WHOIS[nir]['dt_hourdelta']),
is_contact=True
)
def _get_contact(self, *args, **kwargs):
"""
Deprecated. This will be removed in a future release.
"""
from warnings import warn
warn('NIRWhois._get_contact() has been deprecated and will be '
'removed. You should now use NIRWhois.get_contact().')
return self.get_contact(*args, **kwargs)
def lookup(self, nir=None, inc_raw=False, retry_count=3, response=None,
field_list=None, is_offline=False):
"""
The function for retrieving and parsing NIR whois information for an IP
address via HTTP (HTML scraping).
Args:
nir (:obj:`str`): The NIR to query ('jpnic' or 'krnic'). Required
if response is None.
inc_raw (:obj:`bool`, optional): Whether to include the raw
results in the returned dictionary. Defaults to False.
retry_count (:obj:`int`): The number of times to retry in case
socket errors, timeouts, connection resets, etc. are
encountered. Defaults to 3.
response (:obj:`str`): Optional response object, this bypasses the
NIR lookup. Required when is_offline=True.
field_list (:obj:`list` of :obj:`str`): If provided, fields to
parse. Defaults to :obj:`ipwhois.nir.BASE_NET`.
is_offline (:obj:`bool`): Whether to perform lookups offline. If
True, response and asn_data must be provided. Primarily used
for testing.
Returns:
dict: The NIR whois results:
::
{
'query' (str) - The IP address.
'nets' (list of dict) - Network information which consists
of the fields listed in the ipwhois.nir.NIR_WHOIS
dictionary.
'raw' (str) - Raw NIR whois results if the inc_raw
parameter is True.
}
"""
if nir not in NIR_WHOIS.keys():
raise KeyError('Invalid arg for nir (National Internet Registry')
# Create the return dictionary.
results = {
'query': self._net.address_str,
'raw': None
}
# Only fetch the response if we haven't already.
if response is None:
if is_offline:
raise KeyError('response argument required when '
'is_offline=True')
log.debug('Response not given, perform WHOIS lookup for {0}'
.format(self._net.address_str))
form_data = None
if NIR_WHOIS[nir]['form_data_ip_field']:
form_data = {NIR_WHOIS[nir]['form_data_ip_field']:
self._net.address_str}
# Retrieve the whois data.
response = self._net.get_http_raw(
url=str(NIR_WHOIS[nir]['url']).format(self._net.address_str),
retry_count=retry_count,
headers=NIR_WHOIS[nir]['request_headers'],
request_type=NIR_WHOIS[nir]['request_type'],
form_data=form_data
)
# If inc_raw parameter is True, add the response to return dictionary.
if inc_raw:
results['raw'] = response
nets = []
nets_response = None
if nir == 'jpnic':
nets_response = self._get_nets_jpnic(response)
elif nir == 'krnic':
nets_response = self._get_nets_krnic(response)
nets.extend(nets_response)
global_contacts = {}
# Iterate through all of the network sections and parse out the
# appropriate fields for each.
log.debug('Parsing NIR WHOIS data')
for index, net in enumerate(nets):
section_end = None
if index + 1 < len(nets):
section_end = nets[index + 1]['start']
try:
dt_format = NIR_WHOIS[nir]['dt_format']
except KeyError: # pragma: no cover
dt_format = None
temp_net = self._parse_fields(
response=response,
fields_dict=NIR_WHOIS[nir]['fields'],
net_start=section_end,
net_end=net['end'],
dt_format=dt_format,
field_list=field_list,
hourdelta=int(NIR_WHOIS[nir]['dt_hourdelta'])
)
temp_net['country'] = NIR_WHOIS[nir]['country_code']
contacts = {
'admin': temp_net['contact_admin'],
'tech': temp_net['contact_tech']
}
del (
temp_net['contact_admin'],
temp_net['contact_tech']
)
if not is_offline:
for key, val in contacts.items():
if len(val) > 0:
if isinstance(val, str):
val = val.splitlines()
for contact in val:
if contact in global_contacts.keys():
temp_net['contacts'][key] = (
global_contacts[contact]
)
else:
if nir == 'krnic':
tmp_response = contact
tmp_handle = None
else:
tmp_response = None
tmp_handle = contact
temp_net['contacts'][key] = self._get_contact(
response=tmp_response,
handle=tmp_handle,
nir=nir,
retry_count=retry_count,
dt_format=dt_format
)
global_contacts[contact] = (
temp_net['contacts'][key]
)
# Merge the net dictionaries.
net.update(temp_net)
# The start and end values are no longer needed.
del net['start'], net['end']
# Add the networks to the return dictionary.
results['nets'] = nets
return results
| [
"secynic@gmail.com"
] | secynic@gmail.com |
9fcd3987ca9746115417bee085a86a8f34446247 | b2308018c50240615d88e0b3ae0e13c264c05430 | /lofasm/galaxymodel.py | 4295a3209cbe64a87e8072a6545b7c4d67936a1f | [] | no_license | pauvarela3/LoFASM | 0268d7ac811b47026a3d13b221f54e48531d836a | 852173ffd1d9f1e46a75e61a36d829d792dd1c19 | refs/heads/master | 2020-03-10T14:13:23.167180 | 2019-04-01T18:51:04 | 2019-04-01T18:51:04 | 129,420,949 | 0 | 0 | null | 2018-04-13T15:28:29 | 2018-04-13T15:28:28 | null | UTF-8 | Python | false | false | 1,582 | py | import lofasm.calibrate.lofasmcal as lfc
import numpy as np
from datetime import datetime
from datetime import timedelta
from astropy.time import Time
from lofasm import parse_data as pdat
galaxyobj = lfc.galaxy()
lst_times = np.linspace(0,24,300)
lst_time = lst_times.tolist()
# galaxymodel = galaxyobj.galaxy_power_array(dates, freq, stationid, horizon_cutoff_alt)
# the output should be an array of data points with timestamp, and put that in a file
# every five minutes for 24 hours
hor = 0
st = 3
# while hor < 25:
# 288: 5 minutes every hour for 24 hours
# while st < 5: #loop through stations 1,2,3 and 4
# if z == 2: # skipping station2
# z = z + 1
# df = 100./1024 #delta frequency
# cur = df/2 #cursor for the average frequency in a bin
cur = 15.
df = 70. / 300.
# surya edit
df = 70./1024
# surya edit off
while cur < 86: # looping through range of frequencies 5-85 : UPDATE, changed to 101 to models at all frequencies across the 1024 spectra
datafile = open("lofasm" + str(st) + "_" + str(cur) + "MHz_" + str(float(hor)) + "deg_new.dat", 'w')
datafile.write("<Time Stamp>\t<Galaxy Power>\n" )
galaxymodel = galaxyobj.galaxy_power_array(lst_time, cur, st, hor,lst=True)
for x in range(len(lst_time)): #looping through times, to print it out
datafile.write(str(lst_time[x]) + " " + str(galaxymodel[x]) + '\n')
datafile.close()
# print("freq: ", cur)
cur = cur + df #change between every frequency channel
#z = z + 1
# print("horizon: ", hor)
# hor = hor + 5
| [
"noreply@github.com"
] | noreply@github.com |
5b1f9fa94e913fb8469bb608f7aff55cd882ab1f | c227735e87fe9da845e81c994f5a0c4cc410abf9 | /Python/ReverseString.py | 378dd1708951b66f59c8040961793d275e8f5106 | [
"MIT"
] | permissive | ani03sha/potd | 816d5a25c9658eb38dda46352d7518d999a807f2 | 05ca25039c5462b68dae9d83e856dd4c66e7ab63 | refs/heads/main | 2023-02-02T18:14:43.273677 | 2020-12-18T15:31:06 | 2020-12-18T15:31:06 | 315,034,076 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 633 | py | """
Given a string, reverse all of its characters and return the resulting string.
"""
from typing import List
class ReverseString:
def reverseString(self, s: List[str]) -> List:
# Reversed string
reversedString = ""
# Loop for all characters in the string
for i in s:
reversedString = i + reversedString
return reversedString
if __name__ == "__main__":
r = ReverseString()
print(r.reverseString("Cat"))
print(r.reverseString("Program of the day"))
print(r.reverseString("red quark"))
print(r.reverseString("level"))
print(r.reverseString(""))
| [
"anirudh03sharma@gmail.com"
] | anirudh03sharma@gmail.com |
12654f11056f73cda0eb1e3ff7d062af58f8d11c | 90fb55320c81259cb199b9a8900e11b2ba63da4f | /232/gold.py | 2b2c82b71ecab0bffd80f1f0592f28ec519fc32d | [] | no_license | pogross/bitesofpy | f9bd8ada790d56952026a938b1a34c20562fdd38 | 801f878f997544382e0d8650fa6b6b1b09fa5b81 | refs/heads/master | 2020-05-19T07:31:44.556896 | 2020-01-26T12:48:28 | 2020-01-26T12:48:28 | 184,899,394 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,068 | py | from itertools import tee
from dataclasses import dataclass
# https://pkgstore.datahub.io/core/gold-prices/annual_csv/data/343f626dd4f7bae813cfaac23fccd1bc/annual_csv.csv
gold_prices = """
1950-12,34.720 1951-12,34.660 1952-12,34.790 1953-12,34.850 1954-12,35.040
1955-12,34.970 1956-12,34.900 1957-12,34.990 1958-12,35.090 1959-12,35.050
1960-12,35.540 1961-12,35.150 1962-12,35.080 1963-12,35.080 1964-12,35.120
1965-12,35.130 1966-12,35.180 1967-12,35.190 1968-12,41.113 1969-12,35.189
1970-12,37.434 1971-12,43.455 1972-12,63.779 1973-12,106.236 1974-12,183.683
1975-12,139.279 1976-12,133.674 1977-12,160.480 1978-12,207.895 1979-12,463.666
1980-12,596.712 1981-12,410.119 1982-12,444.776 1983-12,388.060 1984-12,319.622
1985-12,321.985 1986-12,391.595 1987-12,487.079 1988-12,419.248 1989-12,409.655
1990-12,378.161 1991-12,361.875 1992-12,334.657 1993-12,383.243 1994-12,379.480
1995-12,387.445 1996-12,369.338 1997-12,288.776 1998-12,291.357 1999-12,283.743
2000-12,271.892 2001-12,275.992 2002-12,333.300 2003-12,407.674 2004-12,442.974
2005-12,509.423 2006-12,629.513 2007-12,803.618 2008-12,819.940 2009-12,1135.012
2010-12,1393.512 2011-12,1652.725 2012-12,1687.342 2013-12,1221.588 2014-12,1200.440
2015-12,1068.317 2016-12,1152.165 2017-12,1265.674 2018-12,1249.887
""" # noqa E501
@dataclass
class GoldPrice:
year: int
price: float
change: float = 0
def pairwise(iterable):
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def years_gold_value_decreased(gold_prices: str = gold_prices) -> (int, int):
"""Analyze gold_prices returning a tuple of the year the gold price
decreased the most and the year the gold price increased the most.
"""
prices = [
GoldPrice(year=int(entry.split(",")[0][:4]), price=float(entry.split(",")[1]))
for entry in " ".join(gold_prices.splitlines()).strip().split(" ")
]
for first, second in pairwise(prices):
second.change = first.price - second.price
prices.sort(key=lambda x: x.change)
return prices[-1].year, prices[0].year
| [
"p.gross@tu-bs.de"
] | p.gross@tu-bs.de |
0897c73c5459f223e50ba84b5052ba11d22ae3ee | cb280ddcc6dafa8fa62c588c7e4694909c4c2504 | /helper.py | aeb12ac892070ae9e20e826d9cbe809235396cbc | [] | no_license | chayapatr/discord | 78b383080f0eff7f4509571458a72057066c3dfa | e6ccec50b7650407f7e77a372c0f0afc1db13ea0 | refs/heads/master | 2023-05-20T14:10:38.888898 | 2021-06-11T16:09:40 | 2021-06-11T16:09:40 | 374,484,413 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 242 | py | # IDEA
import requests
import json
def fetch(text):
res = requests.get(text)
response = json.loads(res.text)
return response
def printList(listname,l):
print(listname)
for x in l:
print(x)
print('-------------------') | [
"31594543+chayapatr@users.noreply.github.com"
] | 31594543+chayapatr@users.noreply.github.com |
da7353d15c0e1c60c60a975cc03adfdad7af1680 | 4b5090212ba723f319baff170454795816c77d0d | /todo/todo/settings.py | ccee94efc42fefe80d5dcb73bd42a81f8cfb7918 | [] | no_license | hacker1db/django_projects | 285fd496010ba6eca09d81006f1461c6230f29be | af088f683c019b604887baef780b9857d18be50f | refs/heads/master | 2021-01-02T08:13:22.062407 | 2019-02-10T03:09:16 | 2019-02-10T03:09:16 | 98,952,852 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,090 | py | """
Django settings for todo project.
Generated by 'django-admin startproject' using Django 1.11.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'rs#)0c-&qr&of9z9owek5$k&004q6!xs@)uyf)h$51w0f8mbts'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'todo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'todo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| [
"hacker1db@outlook.com"
] | hacker1db@outlook.com |
f374671883af08c4b50ac4f41ea90214c848b1a7 | c380976b7c59dadaccabacf6b541124c967d2b5a | /.history/src/data/data_20191018141356.py | 20f7b6d5858a6e20347e611529877edb27123cb8 | [
"MIT"
] | permissive | bkraft4257/kaggle_titanic | b83603563b4a3c995b631e8142fe72e1730a0e2e | f29ea1773773109a867278c001dbd21a9f7b21dd | refs/heads/master | 2020-08-17T12:45:28.653402 | 2019-11-15T16:20:04 | 2019-11-15T16:20:04 | 215,667,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,042 | py | import pandas as pd
from typing import Union
from pathlib import Path
from nameparser import HumanName
class ExtractData:
title_translator = {
"Mlle.": "Mrs.",
"Mme.": "Mrs.",
"Sir.": "Mr.",
"Ms.": "Mrs.",
"": "Mr.",
"Col.": "Mr.",
"Capt.": "Mr.",
"Lady.": "Mrs.",
"the Countess. of": "Mrs.",
}
def __init__(self, filename: Union[str, Path], drop_columns=None):
# """Extract Training Data from file or Path
# Arguments:
# filename {[str]} -- Filename of CSV data file containing data.
# drop_columns -- Columns in dataframe that should be dropped.
# """
if drop_columns is None:
drop_columns = ["age", "cabin", "name", "ticket"]
self.filename = filename
self.drop_columns = drop_columns
self.all_label_columns = ["survived"]
self.all_feature_columns = [
"pclass",
"name",
"sex",
"age",
"sibsp",
"parch",
"ticket",
"fare",
"cabin",
"embarked",
]
self.Xy_raw = None
self.Xy = None
self.extract_raw()
self.Xy = self.Xy_raw.copy()
self.extract_title()
self.extract_last_name()
def extract_raw(self):
"""
Extracts data from a CSV file.
Returns:
pd.DataFrame -- [description]
"""
Xy_raw = pd.read_csv(self.filename)
Xy_raw.columns = Xy_raw.columns.str.lower().str.replace(" ", "_")
Xy_raw = Xy_raw.rename(columns={'age':'age_known'})
Xy_raw["pclass"] = Xy_raw["pclass"].astype("category")
self.Xy_raw = Xy_raw.set_index("passengerid")
def extract_cabin_prefix(self):
Xy['cabin_number'] = Xy.ticket.str.extract('(\d+)$')
Xy['cabin_prefix'] = Xy.ticket.str.extract('^(.+) ')
def extract_title(self):
"""[summary]
"""
self.Xy["title"] = (
self.Xy.name.apply(lambda x: HumanName(x).title)
.replace(self.title_translator)
.replace({"\.": ""}, regex=True)
)
def extract_last_name(self):
self.Xy["last_name"] = self.Xy.name.apply(lambda x: HumanName(x).last)
def clean(self,):
"""Clean data to remove missing data and "unnecessary" features.
Arguments:
in_raw_df {pd.DataFrame} -- Dataframe containing all columns and rows Kaggle Titanic Training Data set
"""
self.Xy = self.Xy_raw.drop(self.drop_columns, axis=1)
def estimate_age(in_df, groupby=['sex','title']):
Xy_age_estimate = in_df.groupby(['sex','title']).age_known.mean().to_frame().round(1)
Xy_age_estimate = Xy_age_estimate.rename(columns ={'age_known':'age_estimate'})
out_df = in_df.reset_index().merge(Xy_age_estimate, on=['sex', 'title'])
out_df['age'] = out_df['age_known'].fillna(out_df['age_estimate'])
return out_df | [
"bob.kraft@infiniteleap.net"
] | bob.kraft@infiniteleap.net |
c70fb6ef81be3017d8199562e44b8c3d85373747 | f038295d1b052b99a975a17fbdcebbb03c231ba7 | /Mangoplate_Crawler/ConnectDB.py | f6ba64d320f91c749dd6b0f245010c8c57c8bc77 | [] | no_license | SNUEV/Selenium_Crawler | 8f1ebb97d6917de3dbb8fbff958a3ad5263c143d | c8336fcdde716eb1ec14491ddb4e9118f0be0413 | refs/heads/master | 2020-12-27T10:44:32.544224 | 2020-02-02T08:23:24 | 2020-02-02T08:23:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,777 | py | # Database Setting
import psycopg2
from datetime import datetime
class ConnectDB:
hostname = 'localhost'
username = 'sangchulkim'
password = ''
database = 'restaurant_api_development'
myConnection = psycopg2.connect(
host = hostname,
database = database,
user = username,
password = password
)
cursor = myConnection.cursor()
def __init__(self):
pass
def getRestaurant(self):
ConnectDB.cursor.execute("SELECT * FROM categories")
print(ConnectDB.cursor.fetchall())
# 여기에 restaurant table 추가 쿼리
# 나중에 조금더 추상화
def addRestaurant(self, restaurantInfo):
cur_time = datetime.today().strftime('%Y-%m-%d')
ConnectDB.cursor.execute(
"INSERT INTO restaurants \
( \
name, \
address, \
business_hour, \
parking_space,\
created_at, \
updated_at, \
location, \
pricerange,\
category, \
point \
) \
VALUES \
( %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) ON CONFLICT (name) DO NOTHING; COMMIT; ", \
(
restaurantInfo.get('name'),
restaurantInfo.get('address'),
restaurantInfo.get('business_hour'),
restaurantInfo.get('parking_space'),
cur_time,
cur_time,
restaurantInfo.get('location'),
restaurantInfo.get('pricerange'),
restaurantInfo.get('category'),
restaurantInfo.get('point'),
)
)
print(restaurantInfo) | [
"tsb03672@naver.com"
] | tsb03672@naver.com |
a4a0b29f837124a381829b42b53afcfabe37fc0c | 5301f6a0c1a84690fe43e8a7506e5a75b93f68e5 | /sec2_prediction/_4_line_reg_visual.py | c8885341920f4e41b264a9657127c4ddd06ab4fe | [] | no_license | gungoren/ml_lecture_notes | 7b08c81fa113680649ddb02b2aedffc6422bfbac | 24944fb8e5ccbbb741df9cf4512fea1a0a203c98 | refs/heads/master | 2020-06-28T21:51:13.668536 | 2020-02-12T16:01:01 | 2020-02-12T16:01:01 | 200,351,395 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,069 | py |
# 1. libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
# 2. preprocessing
# 2.1. import data
df = pd.read_csv("../data/bilkav/sec2_prediction/satislar.csv")
aylar = df[['Aylar']]
satislar = df[['Satislar']]
# 3. split test and train data
x_train, x_test, y_train, y_test = train_test_split(aylar, satislar, test_size=0.33, random_state=0)
'''
# 4. attribute scaling
sc = StandardScaler()
X_train = sc.fit_transform(x_train)
X_test = sc.fit_transform(x_test)
Y_train = sc.fit_transform(y_train)
Y_test = sc.fit_transform(y_test)
'''
lr = LinearRegression()
lr.fit(x_train, y_train)
x_predict = lr.predict(x_test)
x_train = x_train.sort_index()
y_train = y_train.sort_index()
plt.plot(x_train, y_train)
plt.plot(x_test, x_predict)
plt.title("Aylara göre satislar")
plt.xlabel("Aylar")
plt.ylabel("Satislar") | [
"mehgungoren@gmail.com"
] | mehgungoren@gmail.com |
31b673e81032380bdca2332df8006befb0d71612 | 87098732a1339e4de1033774d27f44b24012ddee | /serious 2.py | 344e757a4a67f2f36cf8793827a22e8920ab45dc | [] | no_license | ducanh2209/btvn | 3902d1172d536b915c1bc85e8860dab15d7a31f3 | 5eae6be2b4860edbfc1f492406f8626e4be89716 | refs/heads/master | 2021-01-19T18:58:59.971044 | 2017-08-24T12:45:45 | 2017-08-24T12:45:45 | 101,175,068 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 94 | py | print ("Hế lô", end ='')
print (", tên tôi là", end ='')
print ("Đức Anh", end= '') | [
"noreply@github.com"
] | noreply@github.com |
e572f70627bcd860e641a3765013c1fcd1e9cb1f | 2acf6e307cf923be308c069fbc2a58d5f9de9300 | /controle_estagios/urls.py | 84b5c96121879b5d8a8a9e55d0f3689dc5f88394 | [] | no_license | jefethibes/Estagios | a6766cc1ee265345ed46429f9730fa3f24894e01 | 4a75fd98d5ee5de633a6618b4d1f596d20386015 | refs/heads/main | 2023-03-18T08:41:00.031769 | 2021-03-14T15:38:24 | 2021-03-14T15:38:24 | 335,810,368 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,204 | py | from django.urls import path
from controle_estagios.views import cursos, estagios, empresas, alunos, index
app_name = 'controle_estagios'
urlpatterns = [
path('form_cursos/', cursos.AddCursos.as_view(), name='form_cursos'),
path('list_cursos/', cursos.ListCursos.as_view(), name='list_cursos'),
path('update_cursos/<int:pk>/', cursos.UpdateCursos.as_view(), name='update_cursos'),
path('form_estagios/', estagios.AddEstagios.as_view(), name='form_estagios'),
path('list_estagios/', estagios.ListEstagios.as_view(), name='list_estagios'),
path('update_estagios/<int:pk>/', estagios.UpdateEstagios.as_view(), name='update_estagios'),
path('form_empresas/', empresas.AddEmpresas.as_view(), name='form_empresas'),
path('list_empresas/', empresas.ListEmpresas.as_view(), name='list_empresas'),
path('update_empresas/<int:pk>/', empresas.UpdateEmpresas.as_view(), name='update_empresas'),
path('form_alunos/', alunos.AddAlunos.as_view(), name='form_alunos'),
path('list_alunos/', alunos.ListAlunos.as_view(), name='list_alunos'),
path('update_alunos/<int:pk>/', alunos.UpdateAlunos.as_view(), name='update_alunos'),
path('', index.index, name='index'),
]
| [
"jefepatytony@gmail.com"
] | jefepatytony@gmail.com |
3b116be5ec9c964a89dad5698c76061c51a48a4d | 097e7e762ff6b0f7e25d31709eb73a311f56995b | /divide.py | 3cd3163de78149fd4b40783a9023a87a8193e870 | [] | no_license | hsywhu/facial_landmark_regression | fc72d6b52b1a407046f7fa96ef3cad97ff0cb3df | 84ce1f9c5630b9e5cf1d5a9e2befcb4d09f3fbb8 | refs/heads/master | 2020-04-10T14:57:44.810244 | 2018-12-09T23:56:23 | 2018-12-09T23:56:23 | 161,092,758 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,202 | py | import os
import shutil
import pandas as pd
import random
root_dir = "E:\\face_cropped_collect"
train_dir = "E:\\face_cropped_divide\\train"
val_dir = "E:\\face_cropped_divide\\val"
train_landmarks = []
val_landmarks = []
landmarks_frame = pd.read_csv("E:\\face_cropped\\train_cropped.csv", header = None)
img_name = landmarks_frame.iloc[:, 0]
landmarks = landmarks_frame.iloc[:, 1:].values
for i in range(len(img_name)):
if random.randint(1, 100) <= 25:
file_path = os.path.join(root_dir, img_name[i])
shutil.copy(file_path, val_dir)
temp = []
temp.append(img_name[i])
for l in landmarks[i]:
temp.append(l)
val_landmarks.append(temp)
else:
file_path = os.path.join(root_dir, img_name[i])
shutil.copy(file_path, train_dir)
temp = []
temp.append(img_name[i])
for l in landmarks[i]:
temp.append(l)
train_landmarks.append(temp)
print("processing picture #" + str(i))
pd.DataFrame.from_dict(train_landmarks).to_csv('E:\\face_cropped_divide\\train.csv')
pd.DataFrame.from_dict(val_landmarks).to_csv('E:\\face_cropped_divide\\val.csv')
| [
"songyih@d207-023-196-142.wireless.sfu.ca"
] | songyih@d207-023-196-142.wireless.sfu.ca |
50379785fe0217192317c3e473e84b92458a2be4 | 1cfa61598d9ab150c643489280970b5b71a5a3c8 | /HighScoreReader.py | 272a6302b1a8a8b730b5683131461da2fc4cc270 | [] | no_license | cspoonerKRHS/exorcist-rpg | 547e92151c45bee94fc825c431ff5e01a40ba72c | bd4ed7244ab3ac953d1bab455f11b3a0144a51cf | refs/heads/master | 2016-09-11T11:50:42.097800 | 2013-04-12T13:28:35 | 2013-04-12T13:28:35 | 42,615,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,422 | py | import pygame, sys, math, time
class HighScoreReader():
def __init__(self, txt, other, score):
self.canSend = False
self.canRun = False
def reload(self, txt, other, score):
if self.canRun == True:
self.canRun = False
f = open(txt, 'r')
self.scores = f.readlines()
f.close()
self.score = ""
fixedScores = []
for line in self.scores:
for c in line:
if c != "\n":
self.score += c
fixedScores += [self.score]
self.score = ""
title = fixedScores[0]
self.scores = []
for line in fixedScores[1:]:
self.score = []
self.score = line.split('|')
self.scores += [self.score]
self.name = other.display
self.score = score.value
entry = [self.name, self.score]
inserted = False
for index, item in enumerate(self.scores):
#print item, entry
if not inserted:
if int(item[1]) < int(entry[1]):
self.scores.insert(index, entry)
inserted = True
if not inserted:
self.scores += [entry]
myStr = title + '\n'
for item in self.scores:
myStr += str(item[0]) + '|' + str(item[1]) + '\n'
f = open(txt, 'w')
f.write(myStr)
f.close()
def send(self, txt, other1, other2, other3, other4, other5, other6, other7, other8, other9, other10):
if self.canSend == True:
f = open(txt, 'r')
self.scores = f.readlines()
f.close()
self.score = ""
fixedScores = []
for line in self.scores:
for c in line:
if c != "\n":
self.score += c
fixedScores += [self.score]
self.score = ""
title = fixedScores[0]
self.scores = []
for line in fixedScores[1:]:
self.score = []
self.score = line.split('|')
self.scores += [self.score]
other1.display = (str(self.scores[0][0]) + ": " + str(self.scores[0][1]))
other2.display = (str(self.scores[1][0]) + ": " + str(self.scores[1][1]))
other3.display = (str(self.scores[2][0]) + ": " + str(self.scores[2][1]))
other4.display = (str(self.scores[3][0]) + ": " + str(self.scores[3][1]))
other5.display = (str(self.scores[4][0]) + ": " + str(self.scores[4][1]))
other6.display = (str(self.scores[5][0]) + ": " + str(self.scores[5][1]))
other7.display = (str(self.scores[6][0]) + ": " + str(self.scores[6][1]))
other8.display = (str(self.scores[7][0]) + ": " + str(self.scores[7][1]))
other9.display = (str(self.scores[8][0]) + ": " + str(self.scores[8][1]))
other10.display = (str(self.scores[9][0]) + ": " + str(self.scores[9][1]))
# print str(self.scores[0][0])+ ":", str(self.scores[0][1]) | [
"Zander.Blasingame@kearsarge.org"
] | Zander.Blasingame@kearsarge.org |
75201fde37f7cebb6c0b276f4e6f89c588af812a | 238e46a903cf7fac4f83fa8681094bf3c417d22d | /VTK/vtk_7.1.1_x64_Debug/lib/python2.7/site-packages/twisted/cred/portal.py | 23e48739cfb924a7a71886e884eb010098d27305 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | baojunli/FastCAE | da1277f90e584084d461590a3699b941d8c4030b | a3f99f6402da564df87fcef30674ce5f44379962 | refs/heads/master | 2023-02-25T20:25:31.815729 | 2021-02-01T03:17:33 | 2021-02-01T03:17:33 | 268,390,180 | 1 | 0 | BSD-3-Clause | 2020-06-01T00:39:31 | 2020-06-01T00:39:31 | null | UTF-8 | Python | false | false | 5,460 | py | # -*- test-case-name: twisted.test.test_newcred -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
The point of integration of application and authentication.
"""
from twisted.internet import defer
from twisted.internet.defer import maybeDeferred
from twisted.python import failure, reflect
from twisted.cred import error
from zope.interface import providedBy, Interface
class IRealm(Interface):
"""
The realm connects application-specific objects to the
authentication system.
"""
def requestAvatar(avatarId, mind, *interfaces):
"""
Return avatar which provides one of the given interfaces.
@param avatarId: a string that identifies an avatar, as returned by
L{ICredentialsChecker.requestAvatarId<twisted.cred.checkers.ICredentialsChecker.requestAvatarId>}
(via a Deferred). Alternatively, it may be
C{twisted.cred.checkers.ANONYMOUS}.
@param mind: usually None. See the description of mind in
L{Portal.login}.
@param interfaces: the interface(s) the returned avatar should
implement, e.g. C{IMailAccount}. See the description of
L{Portal.login}.
@returns: a deferred which will fire a tuple of (interface,
avatarAspect, logout), or the tuple itself. The interface will be
one of the interfaces passed in the 'interfaces' argument. The
'avatarAspect' will implement that interface. The 'logout' object
is a callable which will detach the mind from the avatar.
"""
class Portal:
"""
A mediator between clients and a realm.
A portal is associated with one Realm and zero or more credentials checkers.
When a login is attempted, the portal finds the appropriate credentials
checker for the credentials given, invokes it, and if the credentials are
valid, retrieves the appropriate avatar from the Realm.
This class is not intended to be subclassed. Customization should be done
in the realm object and in the credentials checker objects.
"""
def __init__(self, realm, checkers=()):
"""
Create a Portal to a L{IRealm}.
"""
self.realm = realm
self.checkers = {}
for checker in checkers:
self.registerChecker(checker)
def listCredentialsInterfaces(self):
"""
Return list of credentials interfaces that can be used to login.
"""
return self.checkers.keys()
def registerChecker(self, checker, *credentialInterfaces):
if not credentialInterfaces:
credentialInterfaces = checker.credentialInterfaces
for credentialInterface in credentialInterfaces:
self.checkers[credentialInterface] = checker
def login(self, credentials, mind, *interfaces):
"""
@param credentials: an implementor of
L{twisted.cred.credentials.ICredentials}
@param mind: an object which implements a client-side interface for
your particular realm. In many cases, this may be None, so if the
word 'mind' confuses you, just ignore it.
@param interfaces: list of interfaces for the perspective that the mind
wishes to attach to. Usually, this will be only one interface, for
example IMailAccount. For highly dynamic protocols, however, this
may be a list like (IMailAccount, IUserChooser, IServiceInfo). To
expand: if we are speaking to the system over IMAP, any information
that will be relayed to the user MUST be returned as an
IMailAccount implementor; IMAP clients would not be able to
understand anything else. Any information about unusual status
would have to be relayed as a single mail message in an
otherwise-empty mailbox. However, in a web-based mail system, or a
PB-based client, the ``mind'' object inside the web server
(implemented with a dynamic page-viewing mechanism such as a
Twisted Web Resource) or on the user's client program may be
intelligent enough to respond to several ``server''-side
interfaces.
@return: A deferred which will fire a tuple of (interface,
avatarAspect, logout). The interface will be one of the interfaces
passed in the 'interfaces' argument. The 'avatarAspect' will
implement that interface. The 'logout' object is a callable which
will detach the mind from the avatar. It must be called when the
user has conceptually disconnected from the service. Although in
some cases this will not be in connectionLost (such as in a
web-based session), it will always be at the end of a user's
interactive session.
"""
for i in self.checkers:
if i.providedBy(credentials):
return maybeDeferred(self.checkers[i].requestAvatarId, credentials
).addCallback(self.realm.requestAvatar, mind, *interfaces
)
ifac = providedBy(credentials)
return defer.fail(failure.Failure(error.UnhandledCredentials(
"No checker for %s" % ', '.join(map(reflect.qual, ifac)))))
| [
"l”ibaojunqd@foxmail.com“"
] | l”ibaojunqd@foxmail.com“ |
a9f7d32b744939624f05501583a7d40c11ef98aa | 9db5e22b171a163d39c35bc2782826d545b635e6 | /RNN_model.py | 84a50ee13bf82be2b78ef408c65d59708b3ece71 | [] | no_license | tanmayaggarwal/TV-Script | 4628c28cb0d576bd33b1589f521e557eada6d031 | 7409411ea71127d8c9c9443181faf613cb654ade | refs/heads/master | 2020-09-07T21:30:00.744380 | 2019-11-11T07:29:06 | 2019-11-11T07:29:06 | 220,917,674 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,680 | py | # File: define the model
import numpy as np
import torch
import problem_unittests as tests
import helper
import torch.nn as nn
class RNN(nn.Module):
def __init__(self, vocab_size, output_size, embedding_dim, hidden_dim, n_layers, dropout=0.5):
# initialize the PyTorch RNN module
# param vocab_size: the number of input dimensions of the neural network (the size of the vocabulary)
# param out_size: the number of output dimensions of the neural network
# param embedding_dim: the size of embeddings
# param hidden_dim: the size of the hidden layer outputs
# param dropout: dropout to add in between LSTM layers
super(RNN, self).__init__()
# set class variables
self.vocab_size = vocab_size
self.output_size = output_size
self.embedding_dim = embedding_dim
self.hidden_dim = hidden_dim
self.n_layers = n_layers
self.drop_prob = dropout
# define model layers
self.embed = nn.Embedding(vocab_size, embedding_dim)
self.lstm = nn.LSTM(embedding_dim, hidden_dim, n_layers, dropout=dropout, batch_first=True)
self.dropout = nn.Dropout(p=dropout)
self.fc = nn.Linear(hidden_dim, output_size)
def forward(self, nn_input, hidden):
# forward propagation function
# param nn_input: the input to the neural network
# param hidden: the hidden state
# return: two tensors, output of the neural network and the latest hidden state
batch_size = nn_input.size(0)
x = self.embed(nn_input)
lstm_output, hidden = self.lstm(x, hidden)
out = lstm_output.contiguous().view(-1, self.hidden_dim)
out = self.dropout(out)
out = self.fc(out)
# reshape to be batch_size first
output = out.view(batch_size, -1, self.output_size)
output_words = output[:, -1] # get last batch of word scores
return output_words, hidden
def init_hidden(self, batch_size, train_on_gpu):
# initialization function for the LSTM hidden state
# param batch_size: the batch_size of the hidden state
# return: hidden state of dimensions (n_layers, batch_size, hidden_dim)
weight = next(self.parameters()).data
if(train_on_gpu):
hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda(),
weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda())
else:
hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_(),
weight.new(self.n_layers, batch_size, self.hidden_dim).zero_())
return hidden
| [
"tanmay.agg@gmail.com"
] | tanmay.agg@gmail.com |
2360f4f554de608bb05e9424255fb741b981cb7e | 8c380afd2bd530a1dd209aec3122db72a576c258 | /cap3/gauss_seidel/main.py | 6439f3fcff0eb69fc59477fb68ac730f082d1586 | [] | no_license | phenrique/metodos_numericos | 4af9e80386290af29caa699802d767e07afd3b02 | 3b9e1b3a76134d949f0259a6af2b736d96cd657d | refs/heads/main | 2023-03-12T17:08:28.540889 | 2021-03-06T12:11:26 | 2021-03-06T12:11:26 | 317,352,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,197 | py | import numpy as np
from scipy import linalg
def calc_erro(x0, x1):
return abs(x1-x0)
def atende_criterio(ek, e):
if ek <= e:
return True
else:
return False
#a = np.array([[3, -.1, -.2],
# [.1, 7, -.3],
# [.3, -.2, 10]])
#
#b = np.array([7.85, -19.3, 71.4])
a = np.array([[10, 2, 1],
[1, 5, 1],
[2, 3, 10]])
b = np.array([7, -8, 6])
# Matriz de termos independentes
#a = np.array([[4, -2, 1],
# [1, -4, 1],
# [1, 2, 4]])
#
## Matriz de termos independentes
#b = np.array([3, -2, 7])
# Vetor de solução
X = np.zeros(3, dtype = int)
#Matrix triangular superior
U = np.triu(a,1)
#Matrix triangular inferior
L = np.tril(a,-1)
#Matrix Diagonal
D = np.diag(np.diag(a))
#Matrix Diagonal inversa
inv_D = np.linalg.inv(D)
G = D + L
#print(G, "\n")
inv_G = np.linalg.inv(G)
#print(inv_G, "\n")
F = np.dot(inv_G, U)
#print(F, "\n")
F = F*(-1)
B = np.dot(inv_G, b)
#print(B, "\n")
e = 0.05
ek = np.ones(3)
i = 0
while not atende_criterio(max(ek), e):
Xk = F.dot(X) + B
ek = calc_erro(X, Xk)# Vetor de erros
X = Xk
i = i + 1
print(i,"º interação: ",X, "\n")
| [
"ph.cardoso10@gmail.com"
] | ph.cardoso10@gmail.com |
28b13bd61e0f41c98cca46010f217204703531c0 | fbeee8c3c70b4c6e6f4446b6bd9d05a285c61ee7 | /Starter_Files/hw06/correctness_tests/q3_3.py | 2fdc640bafdf33949d1231db474e8a275c7c9009 | [] | no_license | ucsd-ets/dsc10-wi18 | 5e06053afc3e362a2e8421e4dd70d0072beb4d9f | 52704520ab01dd3efea7179e8bd23dfa3a23a189 | refs/heads/master | 2021-09-10T21:44:55.184661 | 2018-04-02T20:16:15 | 2018-04-02T20:16:15 | 104,229,542 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 608 | py | test = {
'name': 'Question 3_3',
'points': 1,
'suites': [
{
'cases': [
{
'code': r"""
>>> isinstance(missing, np.ndarray)
True
>>> missing_ref = make_array(14, 33, 35, 57, 60, 76, 80, 81, 85, 96, 102, 103, 130, 143, 178, 181, 186, 210, 215, 227, 247, 258, 264, 270, 272, 294, 319, 344, 354, 358)
>>> set(missing_ref) == set(missing)
True
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'
}
]
}
| [
"patrickghhayes@gmail.com"
] | patrickghhayes@gmail.com |
1460a9bdda6e64146651a0f82717dfe96d11b0d1 | ac75b787a84d15e15a3339cc1e72280015f9829f | /projects/finalProject_v0.5/population.py | f6a375af2f199ef31e21b4d8b60b07f683ec66c9 | [] | no_license | mmarder96/pyrosim | e5cb1ec2c7eb07c5a4c841d4e46cece5cff3ed1e | c9879462d58806f5473e55762290a31825bbe6ce | refs/heads/master | 2020-03-31T14:58:11.405260 | 2018-06-24T22:22:06 | 2018-06-24T22:22:06 | 152,317,847 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,456 | py | ### created by Max Marder
###########
# IMPORTS #
###########
from individual import INDIVIDUAL
import constants as c
import copy
import random
####################
# POPULATION CLASS #
####################
class POPULATION:
#---CONSTRUCTOR---#
def __init__ (self, popSize):
self.popSize = popSize
self.p = {}
self.fitnessList = []
#---INITIALIZE--#
def Initialize(self):
for i in range(0, self.popSize):
self.p[i] = INDIVIDUAL(i)
#---EVALUATE POPULATION---#
def Evaluate(self, envs, pb = True, pp = False):
# set fitness value to 0
for i in self.p:
self.p[i].fitness = 0
# evaluate and compute fitness for each robot in each environment
for e in range(c.numEnvs):
for i in self.p:
self.p[i].Start_Evaluation(envs.envs[e], pb, pp)
for i in self.p:
self.p[i].Compute_Fitness()
# take average fitness for each environment
for i in self.p:
self.p[i].fitness/=c.numEnvs
#---TABULATE BEST FITNESS---#
def Tabulate_Fitness(self):
self.fitnessList.append(self.p[0].fitness)
#---FILL FROM POPULATION---#
def Fill_From(self, other):
self.Copy_Best_From(other)
self.Collect_Children_From(other)
#---COPY BEST FROM POPULATION---#
def Copy_Best_From(self, other):
best_index = 0
for i in range(0, len(other.p)):
if (other.p[best_index].fitness < other.p[i].fitness):
best_index = i
self.p[0] = copy.deepcopy(other.p[best_index])
#---COLLECT CHILDREN FROM POPULATION---#
def Collect_Children_From(self, other):
for i in range (1, len(other.p)):
winner = other.Winner_Of_Tournament_Selection()
self.p[i] = copy.deepcopy(winner)
self.p[i].Mutate()
#---TOURNAMENT WINNER---#
def Winner_Of_Tournament_Selection(other):
p1 = random.randint(0, len(other.p)-1)
p2 = random.randint(0, len(other.p)-1)
while (p1 == p2):
p2 = random.randint(0, len(other.p)-1)
if (other.p[p1].fitness >= other.p[p2].fitness):
return other.p[p1]
elif (other.p[p1].fitness < other.p[p2].fitness):
return other.p[p2]
else:
print("uh oh, tournament winner is broken!")
#---REPLACE POPULATION---#
def ReplaceWith(self, other):
for i in range(0, len(self.p)):
if (self.p[i].fitness < other.p[i].fitness):
self.p[i] = other.p[i]
#---MUTATE POPULATION---#
def Mutate(self):
for i in self.p:
self.p[i].Mutate()
#---PRINT---#
def Print(self):
for i in range(0, len(self.p)):
if (i in self.p):
self.p[i].Print()
| [
"demenon290@gmail.com"
] | demenon290@gmail.com |
4d3e94dd6b1a0834e2e0628c96d63c32eb3069c9 | f3ae4a3a105ef7f110a1ba0aec9d2d93f2f9348a | /sr.py | 9f1c038b47d0e1515797fe9793b45760b1f23cb4 | [] | no_license | shydesky/sr | 00c21e6d76fff74b95806fc289654d0aca696693 | 76112420cd0091868481ef073c39541e221b2934 | refs/heads/master | 2020-08-03T20:22:35.030010 | 2019-10-12T06:48:03 | 2019-10-12T06:48:03 | 211,875,818 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,855 | py | import requests
import datetime
sr_dict = {"4178c842ee63b253f8f0d2955bbc582c661a078c9d":"TLyqzVGLV1srkB7dToTAEqgDSfPtXRJZYH","41037e18c9ca44b2ba35f0bb7d0c075f252a191294":"TAHg5zi2ejWeWiE6bqtDT9vfbH3zNTWrfA", "411103d62d8299e90fa011b4ce7fc6ba151e5f1a23":"TBXB5tobBPCFkC8ihFDBWjaiwW2iSpzSfr", "4118e2e1c6cdf4b74b7c1eb84682e503213a174955":"TCEo1hMAdaJrQmvnGTCcGT2LqrGU4N7Jqf", "412d7bdb9846499a2e5e6c5a7e6fb05731c83107c7":"TE7hnUtWRRBz3SkFrX8JESWUmEvxxAhoPt", "412fb5abdf8a1670f533c219e7251fe30b89849359":"TEKUPpjTMKWw9LJZ9YJ4enhCjAmVXSL7M6", "4138e3e3a163163db1f6cfceca1d1c64594dd1f0ca":"TFA1qpUkQ1yBDw4pgZKx25wEZAqkjGoZo1", "41496e85711fa3b7ba5a093af635269a67230ac2c1":"TGfUjDNr5Huk8DumHfmzY2ksE6RRYpuGLG", "414a193c92cd631c1911b99ca964da8fd342f4cddd":"TGj1Ej1qRzL9feLTLhjwgxXF4Ct6GTWg2U", "414d1ef8673f916debb7e2515a8f3ecaf2611034aa":"TGzz8gjYiYRqpfmDwnLxfgPuLVNmpCswVp", "415863f6091b8e71766da808b1dd3159790f61de7d":"TJ2aDMgeipmoZRuUEru2ri8t7TGkxnm6qY", "4167e39013be3cdd3814bed152d7439fb5b6791409":"TKSXDA8HfE9E1y39RczVQ1ZascUEtaSToF", "417bdd2efb4401c50b6ad255e6428ba688e0b83f81":"TMG95kirH4cKW5GnKoCcCye1dBqbt77yGu", "4184399fc6a98edc11a6efb146e86a3e153d0a0933":"TN2MEB71iox2mDwwngdTrUyUZXeENcb79F", "418a445facc2aa94d72292ebbcb2a611e9fd8a6c6e":"TNaJADoq1u2atryP1ZzwvmEE4ZBELXfMqw", "4192c5d96c3b847268f4cb3e33b87ecfc67b5ce3de":"TPMGfspxLQGom8sKutrbHcDKtHjRHFbGKw", "4193a8bc2e7d6bb1bd75fb2d74107ffbda81af439d":"TPRxUBEakukBMwTScCHgvCPSBYk5UhfboJ", "41a4475dbd14feb2221f303fc33dc8d0a08f25f445":"TQwqK8LhHGria5gkkfCVqQfE5mSwpvdp3B", "41a75a876ef0e8715aa2cd34597154382502b8d646":"TRE6JANcQBfmXTDooRyStoALZNWvEgNpsE", "41a9d4b388c009b7ee36819114b8558d078103ad0b":"TRTC1DxDg2eWPHmyc3DTGR672rVHoZQa8h", "41b3eec71481e8864f0fc1f601b836b74c40548287":"TSNbzxac4WhxN91XvaUfPTKP2jNT18mP6T", "41b487cdc02de90f15ac89a68c82f44cbfe3d915ea":"TSRmKPv8kvokcHKfLeySQ3gNHyYrAAXrnc", "41b668d4991cd636b694989ebf3fa1a84613d7899e":"TSbhZijH2t7Qn1UAHAu7PBHQdVAvRwSyYr", "41bac7378c4265ad2739772337682183b8864f517a":"TSzoLaVCdSNDpNxgChcFt9rSRF5wWAZiR4", "41beab998551416b02f6721129bb01b51fceceba08":"TTMNxTmRpBZnjtUnohX84j25NLkTqDga7j", "41c05142fd1ca1e03688a43585096866ae658f2cb2":"TTW663tQYJTTCtHh6DWKAfexRhPMf2DxQ1", "41c189fa6fc9ed7a3580c3fe291915d5c6a6259be7":"TTcYhypP8m4phDhN6oRexz2174zAerjEWP", "41c4bc4d7f64df4fd3670ce38e1a60080a50da85cf":"TTuT2AG5q37f74zra7PjAApUmyVaXmFvC4", "41c81107148e5fa4b4a2edf3d5354db6c6be5b5549":"TUD4YXYdj2t1gP5th3A7t97mx1AUmrrQRt", "41d1dbde8b8f71b48655bec4f6bb532a0142b88bc0":"TV6qcwSp38uESiDczxxb7zbJX1h2LfDs78", "41d25855804e4e65de904faf3ac74b0bdfc53fac76":"TV9QitxEJ3pdiAUAfJ2QuPxLKp9qTTR3og", "41d376d829440505ea13c9d1c455317d51b62e4ab6":"TVFKwzE8qeETLaZEHMx2tjEsdnujAgAWaA", "41d599cb8c1b609722e81741667ba3c8fb441fba41":"TVSdAnnZUdSH8gdeUgMPTht1mXmhtEXAN3", "41d70365508e5a6fe846ad433af9302779fd5fdb1b":"TVa6MF7SgZa8PToLoQ9PNq6KQHyTXLBz1p", "41e40302d6b5e889bfbd395ed884638d7f03ee3f87":"TWkpg1ZQ4fTv7sj41zBUTMo1kuJEUWTere", "41f29f57614a6b201729473c837e1d2879e9f90b8e":"TY65QiDt4hLTMpf3WRzcX357BnmdxT2sw9", "41f70386347e689e6308e4172ed7319c49c0f66e0b":"TYVJ8JuQ6ctzCa2u79MFmvvNQ1U2tYQEUM","4178C842EE63B253F8F0D2955BBC582C661A078C9D":"TLyqzVGLV1srkB7dToTAEqgDSfPtXRJZYH", "41243accc5241d97ce79272b06952ee88a34d8e1f9":"TDGmmTC7xDgQGwH4FYRGuE7SFH2MePHYeH", "413dd6a14c95be5d43d1d1e51f94205ffbfc63b8fb":"TFcBLX6W4u7xAFYvcyUDEHT6Cy7GroVLfD", "4142e95c76430e62dfe5182d58f46a482101648304":"TG519wQRpmBNSzVUiHVx6wkVZkLKmxTXxS", "41685ef5321f2d94080d6bdcd1d4cbdded428cecb6":"TKV52nvtpLe5uapeTabyAYANMQYBXPGVds", "417ad0ee1300d0366e901fa613a929137dde1d2224":"TMAbjAuefZqzJAyGhkn4AbWa3jinzcZtGc", "417b88db9da8aacae0a7e967d24c0fc00129e815f6":"TMEQ4hu7DtLTHPn6qPsYktk1QGdTbAyTEK", "41856eef3d964e450b52def7c0d49bb5719d2d22d3":"TN8jmSQv79QHa37cckKMguDyf3vAKtpvDs", "41a47e8b12d006a2c2a3f044453e2745bfb9321939":"TQxyQu5d76MaxsEF4nBf9tFa8s93nSHe8M", "41aa97642e4137cf828a971de448756958b844d72f":"TRXDEXMoaAprSGJSwKanEUBqfQjvQEDuaw", "41bd2fee2df4a2ba73f29d58b777c4c1113b74e5db":"TTDY8aQQizYCHA7qdmgtLgPNEe2YWEfPZa", "41d49bf5202b3dba65d46a5be73396b6b66d3555aa":"TVMP5r12ymtNerq5KB4E8zAgLDmg2FqsEG", "41f6862099bff15eade076c3a8f1b5abd7712b46d4":"TYShuadcBNBKgk8UgxsnB43yeBdyioswHh","410694981b116304ed21e05896fb16a6bc2e91c92c":"TAa14iLEKPAetX49mzaxZmH6saRxcX7dT5", "4108b55b2611ec829d308a62b3339fba9dd5c27151":"TAmFfS4Tmm8yKeoqZN8x51ASwdQBdnVizt", "411155d10415fac16a8f4cb2f382ce0e0f0a7e64cc":"TBYsHxDmFaRmfCF3jZNmgeJE8sDnTNKHbz", "4116440834509c59de4ee6ba4933678626f451befe":"TBzwNEE7D3DdTB4wALiSBaMmpSK7jdoxYF", "411661f25387370c9cd3a9a5d97e60ca90f4844e7e":"TC1ZCj9Ne3j5v3TLx5ZCDLD55MU9g3XqQW", "411d7aba13ea199a63d1647e58e39c16a9bb9da689":"TCf5cqLffPccEY7hcsabiFnMfdipfyryvr", "41207ab1585b9cc6c4c1232f67e4a10e19a442fe68":"TCvwc3FV3ssq2rD82rMmjhT4PVXYTsFcKV", "4127a6419bbe59f4e64a064d710787e578a150d6a7":"TDarXEG2rAD57oa7JTK785Yb2Et32UzY32", "4127bf0d1a57f335c11bc5d002dd82e9e0727cb967":"TDbNE1VajxjpgM5p7FyGNDASt3UVoFbiD3", "412edce151c81d9b4aae17f974f7f646242eff989d":"TEEzguTtCihbRPfjf1CvW8Euxz1kKuvtR9", "41318b2b6b4c7fcaa4b62f25a282329e1952a3c0d1":"TEVAq8dmSQyTYK7uP1ZnZpa6MBVR83GsV6", "41410e468919155aa847d83b0c206148511b6dc848":"TFuC2Qge4GxA2U9abKxk1pw3YZvGM5XRir", "414593d27b70d21454b39ab60bf13291dae8dc0326":"TGK6iAKgBmHeQyp5hn3imB71EDnFPkXiPR", "414b4778beebb48abe0bc1df42e92e0fe64d0c8685":"TGqFJPFiEqdZx52ZR4QcKHz4Zr3QXA24VL", "415095d4f4d26ebc672ca12fc0e3a48d6ce3b169d2":"THKJYuUmMKKARNf7s2VT51g5uPY6KEqnat", "416419765bacf1dc441f722cabc8b661140558bb5d":"TK6V5Pw2UWQWpySnZyCDZaAvu1y48oRgXN", "4169051b001c6169201970f5a6a4f9ababfd916ae3":"TKYW4wDGg3FcaAR8cP8sbmiU8RdZwcK7Bz", "417040583133e831953ea4f65a8196fcffcfbf0d80":"TLCjmH6SqGK8twZ9XrBDWpBbfyvEXihhNS", "4172fd5dfb8ab36eb28df8e4aee97966a60ebf9efe":"TLTDZBcPoJ8tZ6TTEeEqEvwYFk2wgotSfD", "417312080619a24d38a2029b724ff5c84d8f2e4483":"TLTeJxavvgAC5opkDXfXfhRXp4thLfHL5p", "41746e6af4ac9db3473c0c955f1fca11d4013f32ed":"TLaqfGrxZ3dykAFps7M2B4gETTX1yixPgN", "4186f5793eb678c65d9673d5498c550439d762c1cc":"TNGoca1VHC6Y5Jd2B1VFpFEhizVk92Rz85", "418c66e4883782b793fcf2dcb92b23eece57769499":"TNmas2SUNGJ2R1jq4Sivya5nHDLYN9ggpG", "41a857362c1b77cb04e8f2b51b6e970f24fa5c1e5b":"TRKJzrZxN34YyB8aBqqPDt7g4fv6sieemz", "41a8bb7680d85f9821b3d82505edc4663f6fbd8fde":"TRMP6SKeFUt5NtMLzJv8kdpYuHRnEGjGfe", "41d32b3fa8ca0b4896257fdf1821ac8d116da84c45":"TVDmPWGYxgi5DNeW8hXrzrhY8Y6zgxPNg4", "41df3bd4e0463534cb7f1f3ffc2ec14ac4693dc3b2":"TWKZN1JJPFydd5rMgMCV5aZTSiwmoksSZv", "41e40de6895c142ade8b86194063bcdbaa6c9360b6":"TWm3id3mrQ42guf7c4oVpYExyTYnEGy3JL", "41e72d833e0c46837c0802864acc5f119a0a904d05":"TX3ZceVew6yLC5hWTXnjrUFtiFfUDGKGty", "41f8c7acc4c08cf36ca08fc2a61b1f5a7c8dea7bec":"TYednHaV9zXpnPchSywVpnseQxY9Pxw4do", "41fc45da0e51966bd1af2cb1e0f66633f160603a8b":"TYy6xnc35LkyEuU9fJv4x89vkhLZim1yJv", "41ffd564656556a8b6b79311a932e3d216f4fc030b":"TZHvwiw9cehbMxrtTbmAexm9oPo4eFFvLS"}
url = "http://52.53.189.99:8090/wallet/listwitnesses"
exclude_witness = []
grs = ['410694981b116304ed21e05896fb16a6bc2e91c92c',
'4108b55b2611ec829d308a62b3339fba9dd5c27151',
'411155d10415fac16a8f4cb2f382ce0e0f0a7e64cc',
'411661f25387370c9cd3a9a5d97e60ca90f4844e7e',
'411d7aba13ea199a63d1647e58e39c16a9bb9da689',
'41207ab1585b9cc6c4c1232f67e4a10e19a442fe68',
'4127a6419bbe59f4e64a064d710787e578a150d6a7',
'4127bf0d1a57f335c11bc5d002dd82e9e0727cb967',
'412edce151c81d9b4aae17f974f7f646242eff989d',
'41318b2b6b4c7fcaa4b62f25a282329e1952a3c0d1',
'41410e468919155aa847d83b0c206148511b6dc848',
'414593d27b70d21454b39ab60bf13291dae8dc0326',
'414b4778beebb48abe0bc1df42e92e0fe64d0c8685',
'415095d4f4d26ebc672ca12fc0e3a48d6ce3b169d2',
'416419765bacf1dc441f722cabc8b661140558bb5d',
'417040583133e831953ea4f65a8196fcffcfbf0d80',
'4172fd5dfb8ab36eb28df8e4aee97966a60ebf9efe',
'41746e6af4ac9db3473c0c955f1fca11d4013f32ed',
'4186f5793eb678c65d9673d5498c550439d762c1cc',
'41a857362c1b77cb04e8f2b51b6e970f24fa5c1e5b',
'41a8bb7680d85f9821b3d82505edc4663f6fbd8fde',
'41d32b3fa8ca0b4896257fdf1821ac8d116da84c45',
'41df3bd4e0463534cb7f1f3ffc2ec14ac4693dc3b2',
'41e40de6895c142ade8b86194063bcdbaa6c9360b6',
'41e72d833e0c46837c0802864acc5f119a0a904d05',
'41f8c7acc4c08cf36ca08fc2a61b1f5a7c8dea7bec',
'41ffd564656556a8b6b79311a932e3d216f4fc030b'
]
tron_sr = ['41B3EEC71481E8864F0FC1F601B836B74C40548287','41F29F57614A6B201729473C837E1D2879E9F90B8E','418A445FACC2AA94D72292EBBCB2A611E9FD8A6C6E','41BAC7378C4265AD2739772337682183B8864F517A', '4138E3E3A163163DB1F6CFCECA1D1C64594DD1F0CA','4118E2E1C6CDF4B74B7C1EB84682E503213A174955','41E40302D6B5E889BFBD395ED884638D7F03EE3F87','41D376D829440505EA13C9D1C455317D51B62E4AB6','41C05142FD1CA1E03688A43585096866AE658F2CB2','41C4BC4D7F64DF4FD3670CE38E1A60080A50DA85CF','418C66E4883782B793FCF2DCB92B23EECE57769499','417312080619A24D38A2029B724FF5C84D8F2E4483','41B487CDC02DE90F15AC89A68C82F44CBFE3D915EA','41DCA1955F9EDBFB7B25A3FE2998793A4B22746EB9','41FC45DA0E51966BD1AF2CB1E0F66633F160603A8B','41c81107148e5fa4b4a2edf3d5354db6c6be5b5549']
tron_sr = [i.lower() for i in tron_sr]
wintokengames = ['4116440834509c59de4ee6ba4933678626f451befe', '4169051b001c6169201970f5a6a4f9ababfd916ae3']
exclude_witness.extend(grs)
exclude_witness.extend(tron_sr)
exclude_witness.extend(wintokengames)
#exclude_witness = []
witness_list = requests.get(url).json().get("witnesses")
ll = []
for w in witness_list:
if w.get("address") in exclude_witness:
continue
if w.get("totalProduced") is None:
continue
if w.get("totalProduced") < 2000:
continue
ll.append((w.get("address"), w.get("url"), w.get("totalProduced")))
total_blocks = 0
for l in ll:
total_blocks += l[2]
time = datetime.datetime.now()
filename = "final." + str(time) + ".txt"
with open(filename, 'w+') as f1:
for l in ll:
f1.writelines("%2s %40s %22s %10s \n" % (sr_dict.get(l[0]) if sr_dict.get(l[0]) else l[0],l[1],str(l[2]),str(l[2] * 50000000 / total_blocks)))
| [
"shydesky@gmail.com"
] | shydesky@gmail.com |
c0d053a1f6337e71ea73c563de659ed32a47a8a4 | b33dbea89b476a19dc8d8fa5208396cc2ac7f165 | /pylibftdi/_base.py | 56f54305626e46073f6e034a4d68e4e1b13546a5 | [
"MIT"
] | permissive | we711111/pylibftdi | adf7377e5880a8f63e097f91254d2321b6f4c03d | 9e33cbccd388785bb392d51efd52d3247b36fc18 | refs/heads/master | 2022-08-01T02:00:08.689626 | 2020-05-26T05:49:28 | 2020-05-26T05:49:28 | 266,955,446 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | """
pylibftdi - python wrapper for libftdi
Copyright (c) 2010-2014 Ben Bass <benbass@codedstructure.net>
See LICENSE file for details and (absence of) warranty
pylibftdi: http://bitbucket.org/codedstructure/pylibftdi
"""
# This module contains things needed by at least one other
# module so as to prevent circular imports.
__ALL__ = ['FtdiError']
class FtdiError(Exception):
pass
| [
"noreply@github.com"
] | noreply@github.com |
5ecb3398364215e8795aea80ec35dc9d88142820 | 2a5771551f3cf9658a25638783e70bbf6c7b375e | /code/option/template.py | bf04a8cc8aee75a7a93bd80d3b46719e5f082a46 | [] | no_license | vivid-chen/Video_SR | 30ff4c8496ce59a0bb933f24ebebde0c87edfc24 | 5ab3f27e54edad4e464cdae7966e4aaed3800ed8 | refs/heads/main | 2023-03-01T21:55:26.485300 | 2021-02-04T09:19:01 | 2021-02-04T09:19:01 | 331,247,197 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,221 | py | def set_template(args):
if args.template == 'DBVSR':
args.model = "DBVSR"
args.save = "dbvsr_test"
args.data_train = 'VideoSR'
args.dir_data = '/media/omnisky/8TDisk/CZY/REDS/train'
args.data_test = 'VideoSR'
args.testset = "REDS4"
args.dir_data_test = '/media/omnisky/8TDisk/CZY/REDS/val'
args.fc_pretrain = '../pretrain/kernel.pt'
args.pwc_pretrain = '../pretrain/network-default.pytorch'
args.fcn_number = 2
args.kernel_size = (15, 15) # 初始化高斯模糊核的大小
args.grad_clip = 0.5
# args.save_middle_models = True
args.test_model_path = "../models_in_paper/dbvsr/model_dbvsr.pt"
# args.test_only = True
elif args.template == 'baseline_lr':
args.model = "baseline_lr"
args.save = "baseline_lr_test"
args.data_train = 'VideoSR'
args.dir_data = '/media/omnisky/8TDisk/CZY/REDS/train'
args.data_test = 'VideoSR'
args.testset = "REDS4"
args.dir_data_test = '/media/omnisky/8TDisk/CZY/REDS/val'
args.fc_pretrain = '../pretrain/kernel.pt'
args.pwc_pretrain = '../pretrain/network-default.pytorch'
args.fcn_number = 2
args.kernel_size = (15, 15)
args.grad_clip = 0.5
args.test_model_path = "../models_in_paper/baseline_lr/model_lr.pt"
# args.test_only = True
elif args.template == 'baseline_hr':
args.model = "baseline_hr"
args.save = "baseline_hr_test"
args.data_train = 'VideoSR'
# args.dir_data = '../datasets/REDS/train'
args.dir_data = '/media/omnisky/8TDisk/CZY/REDS/train'
args.data_test = 'VideoSR'
args.testset = "REDS4"
args.dir_data_test = '/media/omnisky/8TDisk/CZY/REDS/val'
args.fc_pretrain = '../pretrain/kernel.pt'
args.pwc_pretrain = '../pretrain/network-default.pytorch'
args.fcn_number = 2
args.kernel_size = (15, 15)
args.grad_clip = 0.5
args.test_model_path = "../models_in_paper/baseline_hr/model_hr.pt"
# args.test_only = True
else:
raise NotImplementedError('Template [{:s}] is not found'.format(args.template))
| [
"1031268032@qq.com"
] | 1031268032@qq.com |
bea1c86845c4f2b3382023753aa260859f146cf5 | b69471dee8864e4e2cd57094a195df980858aa2a | /djangonautic/settings.py | aa6a7901179028cd9afeddc9631b16f0f738aa46 | [] | no_license | ArmandoBolanios/Blog_Django | 4c02605f0fdc6221e2f87ce2c406f336661e86fa | e09196706d47f748f0651d86618aad21bf3e3349 | refs/heads/master | 2020-07-15T16:55:32.737754 | 2019-09-01T00:46:49 | 2019-09-01T00:46:49 | 205,611,894 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,282 | py | """
Django settings for djangonautic project.
Generated by 'django-admin startproject' using Django 2.0.10.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^pev7e(8^m@kyrc&g4-v8spfpa$-i^4a9mrf(sm8bb6-tdh&o8'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'articles',
'accounts',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djangonautic.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djangonautic.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'assets'),
)
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
| [
"45885565+ArmandoBolanios@users.noreply.github.com"
] | 45885565+ArmandoBolanios@users.noreply.github.com |
84c79a1359a9fc75719b15f0fff95d517dcb8d5f | ec2b1c82500c4054cd0aa4f486416a62b077a8b4 | /mystuff/ex38.py | f9bf1db8fdc8e8a9b7d1fe7a7291d56ac5db7fca | [] | no_license | vassmate/Learn_Python_THW | 81bcd2dd309181c06bbfbf74573791bc05899288 | c976dc21045e10bf63147f92070f8468e4c3c337 | refs/heads/master | 2021-01-10T02:08:37.725829 | 2016-02-01T11:25:43 | 2016-02-01T11:25:43 | 46,182,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 576 | py | ten_things = "Apples Oranges Crows Telephone Light Sugar"
print "Wait, there are not 10 things in that list let's fix that."
stuff = ten_things.split(' ')
more_stuff = ["Day", "Night", "Song", "Frisbee", "Corn", "Banana", "Girl", "Boy"]
for next_one in range(len(stuff), 10):
next_one = more_stuff.pop()
print "Adding: ", next_one
stuff.append(next_one)
print "There are %d items now" % len(stuff)
print "There we go: ", stuff
print "Let's do some things with stuff."
print stuff[1]
print stuff[-1]
print stuff.pop()
print ' '.join(stuff)
print '#'.join(stuff[3:5])
| [
"vassmate08@gmail.com"
] | vassmate08@gmail.com |
8ff640784c681e408c071c2e02eb51603e37c398 | 4796f60be673a6f6550fa269580710235b472019 | /cif.py | c25211fd7db8fbdede80e5ee615cbf783f2ec513 | [] | no_license | LonelyHobby/Python | c37b1d1c599085bba9633a1a2416f63cd047bdc8 | fe9e120a31a6ad045f46a995b8219eabec89c96e | refs/heads/master | 2020-04-05T06:28:19.625797 | 2018-11-08T02:51:22 | 2018-11-08T02:51:22 | 156,639,361 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,103 | py |
# -*- coding:utf-8 -*-
'''
DFA有限自动机Python实现
作者:王灿
2015-9-27于中国矿业大学
'''
class DFA:
file_object = ''#文件句柄
line_number = 0 #记录行号
state = 0 #状态
ResWord = ['int','if','then','else','end','repeat','until','read','write']#保留字
error_message = []#保存错误信息,存储元组,元组第一个参数是行号,第二个参数是错误字符
annotate_message = []#注释信息,存储元组,元组第一个参数是行号,第二个参数是注释
char_message = []#识别的字符串,存储元组,元组第一个参数是类型,第二个参数是该字符串
def __init__(self,file_name):
self.file_object = file_name
self.state = 0
self.line_number = 0
self.error_message = []
self.annotate_message = []
self.char_message = []
def Start_convert(self):
for line in self.file_object:#一行行的处理
line = line.strip('\n')#去除换行fu
self.line_number += 1#没处理一行行号加一
line_length = len(line)
i = 0
string = ''#存储一个字符串
while i < line_length:
ch = line[i]#读取该行的一个字符
i += 1
if self.state == 0:#初始状态
string = ch
if ch.isalpha():
self.state = 1
elif ch.isdigit():
self.state = 3
elif ch == '+':
self.state = 5
elif ch == '-':
self.state = 9
elif ch == '*':
self.state = 13
elif ch == '/':
self.state = 16
elif ch == '=':
self.state = 20
i -= 1
elif ch == '<':
self.state = 21
i -= 1
elif ch == '{':
self.state = 22
i -= 1
elif ch == '}':
self.state = 23
i -= 1
elif ch == ';':
i -= 1
self.state = 24
elif ch.isspace():
self.state = 25
else:
self.state = 26#不可识别状态
i -= 1
elif self.state == 1:#判断字母数字
while ch.isalpha() or ch.isdigit():
string += ch
if i < line_length:
ch = line[i]
i += 1
else:
break
self.state = 2
i -= 2#回退2个字符
elif self.state == 2:
if string in self.ResWord:
content = '(关键字,' + string + ')'
else:
content = '(标识符,' + string + ')'
#print content
self.char_message.append(content)
string = ''#回到初始情况
self.state = 0#回到状态0
elif self.state == 3:
while ch.isdigit():
string += ch
if i < line_length:
ch = line[i]
i += 1
else:
break
self.state = 4
i -= 2#回退2个字符
elif self.state == 4:
content = '(数字,' + string + ')'
self.char_message.append(content)
#print string
string = ''#回到初始情况
self.state = 0#回到状态0
elif self.state == 5:
if ch == '+':
self.state = 6
i -= 1
elif ch == '=':
self.state = 7
i -= 1
else:
self.state = 8
i -= 2
elif self.state == 6:#判断++
content = '(特殊符号,' + string + ch + ')'
self.char_message.append(content)
#print string + ch
string = ''#回到初始情况
self.state = 0#回到状态0
elif self.state == 7:#判断+=
content = '(特殊符号,' + string + ch + ')'
self.char_message.append(content)
#print string + ch
string = ''#回到初始情况
self.state = 0#回到状态0
elif self.state == 8:#判断+
content = '(特殊符号,' + ch + ')'
self.char_message.append(content)
#print ch
string = ''#回到初始情况
self.state = 0#回到状态0
elif self.state == 9:
if ch == '-':
self.state = 10
i -= 1
elif ch == '=':
self.state = 11
i -= 1
else:
self.state = 12
i -= 2
elif self.state == 10:
content = '(特殊符号,' + string + ch + ')'
self.char_message.append(content)
#print string + ch#判断--
string = ''#回到初始情况
self.state = 0#回到状态0
elif self.state == 11:#判断-=
content = '(特殊符号,' + string + ch + ')'
self.char_message.append(content)
#print string + ch
string = ''#回到初始情况
self.state = 0#回到状态0
elif self.state == 12:#判断-
content = '(特殊符号,' + ch + ')'
self.char_message.append(content)
#print ch
string = ''#回到初始情况
self.state = 0#回到状态0
elif self.state == 13:
if ch == '=':
self.state = 14
i -= 1
else:
self.state = 15
i -= 2
elif self.state == 14:#判断*=
content = '(特殊符号,' + string + ch + ')'
self.char_message.append(content)
#print string + ch
string = ''#回到初始情况
self.state = 0#回到状态0
elif self.state == 15:#判断*
content = '(特殊符号,' + ch + ')'
self.char_message.append(content)
#print ch
string = ''#回到初始情况
self.state = 0#回到状态0
elif self.state == 16:
if ch == '/':
self.state = 17
i -= 1
elif ch == '=':
self.state = 18
i -= 1
else:
self.state = 19
i -= 2
elif self.state == 17:#判断//
content = '(特殊符号,' + string + ch + ')'
self.char_message.append(content)
content = '(注释,'+ line[i:] +')'
self.annotate_message.append(content)
#print content
string = ''#回到初始情况
self.state = 0#回到状态0
elif self.state == 18:#判断/=
content = '(特殊符号,' + string + ch + ')'
self.char_message.append(content)
#print string + ch
string = ''#回到初始情况
self.state = 0#回到状态0
elif self.state == 19:#判断/
content = '(特殊符号,' + ch + ')'
self.char_message.append(content)
#print ch
string = ''#回到初始情况
self.state = 0#回到状态0
elif self.state == 20:
content = '(特殊符号,=)'
self.char_message.append(content)
#print '='
self.state = 0
string = ''
elif self.state == 21:
content = '(特殊符号,<)'
self.char_message.append(content)
#print '<'
self.state = 0
string = ''
elif self.state == 22:
content = '(特殊符号,{)'
self.char_message.append(content)
#print '{'
self.state = 0
string = ''
elif self.state == 23:
content = '(特殊符号,})'
self.char_message.append(content)
#print '}'
self.state = 0
string = ''
elif self.state == 24:
content = '(特殊符号,;)'
self.char_message.append(content)
#print ';'
self.state = 0
string = ''
elif self.state == 25:
while ch.isspace():
if i < line_length:
ch = line[i]
i += 1
else:
break
self.state = 0
i -= 1
elif self.state == 26:
content = '(行号:'+str(self.line_number)+',' + ch + ')'
self.error_message.append(content)
#print 'error:' + ch
self.state = 0
string = ''
#print self.state
def Get_error(self):#获取错误信息
return self.error_message
def Get_annotate(self):#获取注释信息
return self.annotate_message
def Get_char(self):#获取识别信息
return self.char_message
try:
file_object = open("F:\Source\Python\ProducerCustomer.py")
dfa = DFA(file_object)
dfa.Start_convert()
content = dfa.Get_char()
for item in content:
print(item)
content = dfa.Get_annotate()
for item in content:
print(item)
content = dfa.Get_error()
for item in content:
print(item)
finally:
file_object.close()
| [
"noreply@github.com"
] | noreply@github.com |
86596841d06dceba2da2b8680af57662e8c6258e | a051b5946a037edc08a4e2ebe0b3c8db1ba48e6f | /interview/find_most_populated_year.py | 0587b65df0ec0c237eb601a0cc90415f357c6ac3 | [
"Apache-2.0"
] | permissive | denis-trofimov/learn-python-10 | 8fef96df9e4b19a9651151f56e65b6d6320c0280 | 4296f869a008415826fa224fbd005f6f4be9f81e | refs/heads/master | 2020-03-28T12:18:23.862567 | 2020-02-24T20:34:57 | 2020-02-24T20:34:57 | 148,286,400 | 0 | 0 | Apache-2.0 | 2020-03-24T16:19:21 | 2018-09-11T08:35:58 | Python | UTF-8 | Python | false | false | 644 | py |
class Human(object):
def __init__(self, birth, death, name):
self.birth = birth
self.death = death
self.name = name
def max_population(humans: list) -> int:
""" {year: qty}"""
pop = {}
maximum = 0;
best_year = -1
"""O(humans*longevity)"""
for human in humans:
for year in range(human.burn, human.due):
if not pop.get(year, 0):
pop[year] = 0
else:
pop[year] += 1
if maximum < pop[year]:
maximum = pop[year]
best_year = year
for year, qty in pop.items():
if maximum < qty:
maximum = qty
best_year = year
return best_year
| [
"silaradost@yandex.ru"
] | silaradost@yandex.ru |
54683d9cb619367b78e6684b511981c528f4503c | cb8a942add71b005f4cc9ac4e86aab9943406d65 | /testproject/secret.py | 7d852d0f9b096cb18f6790e5051c1d26209bef63 | [] | no_license | kervinson/testproject | 704c37aaa9094693da244bb7e7ad64aeca791747 | 8aed18100b95c2d70b7b31dd7f4befa695ab337f | refs/heads/master | 2021-01-22T23:43:54.207228 | 2017-09-26T12:24:11 | 2017-09-26T12:24:11 | 101,470,817 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 184 | py | EMAIL_USE_SSL = True
EMAIL_HOST = 'smtp.qq.com'
EMAIL_PORT = 465
EMAIL_HOST_USER = '2388992177@qq.com'
EMAIL_HOST_PASSWORD = 'wfnuutkfytldebhi'
DEFAULT_FROM_EMAIL = '2388992177@qq.com' | [
"2388992177@qq.com"
] | 2388992177@qq.com |
4857427bb3aad0535001583b9b0ac47f2b7011a6 | 8a34a23be2c0d4d2cb1f160d31ae22aa66ed26f6 | /main/migrations/0014_auto_20150917_1110.py | 1fb5acec46d67aba4c8e6d5ed5938c7105df3c48 | [] | no_license | arwaal/final-project | aabd6d97940a32bfe5f5abc3735fda8dd49d1148 | 77cf7d9273992e9c3aeb53a1a828d5c2290911a5 | refs/heads/master | 2021-01-10T13:58:01.542321 | 2015-11-14T08:30:07 | 2015-11-14T08:30:07 | 46,166,819 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,597 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0013_auto_20150917_0926'),
]
operations = [
migrations.RenameField(
model_name='businesssubmission',
old_name='mobile_number',
new_name='mobile',
),
migrations.RenameField(
model_name='businesssubmission',
old_name='phone_number',
new_name='phone',
),
migrations.AddField(
model_name='recommendation',
name='email',
field=models.CharField(max_length=150, blank=True),
),
migrations.AddField(
model_name='recommendation',
name='facebook',
field=models.CharField(max_length=150, blank=True),
),
migrations.AddField(
model_name='recommendation',
name='instagram',
field=models.CharField(max_length=150, blank=True),
),
migrations.AddField(
model_name='recommendation',
name='twitter',
field=models.CharField(max_length=150, blank=True),
),
migrations.AddField(
model_name='recommendation',
name='website',
field=models.CharField(max_length=150, blank=True),
),
migrations.AddField(
model_name='recommendation',
name='youtube',
field=models.CharField(max_length=500, blank=True),
),
]
| [
"arwa@Arwas-MacBook-Pro.local"
] | arwa@Arwas-MacBook-Pro.local |
307f587d31cc07e174370678eb26c5487377e342 | 157cf9d7327499d86162eb0170684f4b02a9804a | /scrapylib/proxy.py | 8f19e099c4210fe2780818d83ace596d2a39f9ed | [] | no_license | alepharchives/scrapylib | 8f59f6f1abe075adb49fbd28a6f575851cab3099 | 9d84cca95952a19d85c3229df7105502649d99e0 | refs/heads/master | 2021-01-24T01:58:45.568968 | 2012-11-07T20:13:38 | 2012-11-07T20:13:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,322 | py | import base64
from urllib import unquote
from urllib2 import _parse_proxy
from urlparse import urlunparse
from scrapy.conf import settings
class SelectiveProxyMiddleware(object):
"""A middleware to enable http proxy to selected spiders only.
Settings:
HTTP_PROXY -- proxy uri. e.g.: http://user:pass@proxy.host:port
PROXY_SPIDERS -- all requests from these spiders will be routed
through the proxy
"""
def __init__(self):
self.proxy = self.parse_proxy(settings.get('HTTP_PROXY'), 'http')
self.proxy_spiders = set(settings.getlist('PROXY_SPIDERS', []))
def parse_proxy(self, url, orig_type):
proxy_type, user, password, hostport = _parse_proxy(url)
proxy_url = urlunparse((proxy_type or orig_type, hostport, '', '', '', ''))
if user and password:
user_pass = '%s:%s' % (unquote(user), unquote(password))
creds = base64.b64encode(user_pass).strip()
else:
creds = None
return creds, proxy_url
def process_request(self, request, spider):
if spider.name in self.proxy_spiders:
creds, proxy = self.proxy
request.meta['proxy'] = proxy
if creds:
request.headers['Proxy-Authorization'] = 'Basic ' + creds
| [
"pablo@pablohoffman.com"
] | pablo@pablohoffman.com |
cb89719b54fb42a04bddb26a4b20891618a9e092 | f8018939014597700fa601bbb3e0fabc0d7eb39b | /Learn Data Analysis with Python_CHA5_PG76.py | 97766fc149048e3f53c95e30d67ba495663618c6 | [] | no_license | photos514/ISM_6419_Raghav_Dasari | 6c8c5149095c1c41a4639572c2f8c8a056b004a4 | 71475c54b35f0427feb5a12c922d39161ed60128 | refs/heads/master | 2021-04-03T16:10:35.942880 | 2020-03-20T03:38:32 | 2020-03-20T03:38:32 | 248,376,621 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 499 | py | #!/usr/bin/env python
# coding: utf-8
# In[6]:
import matplotlib.pyplot as plt
import pandas as pd
names = ['Bob','Jessica','Mary','John','Mel']
status = ['Senior','Freshman','Sophomore','Senior','Junior']
grades = [76,95,77,78,99]
GradeList = zip(status,grades)
# In[7]:
df = pd.DataFrame(data = GradeList,columns=['status', 'Grades'])
get_ipython().run_line_magic('matplotlib', 'inline')
df.plot(kind='bar')
# In[8]:
df2 = df.set_index(df['status'])
df2.plot(kind="bar")
# In[ ]:
| [
"noreply@github.com"
] | noreply@github.com |
9050b0975c3054ac27ea2a22854e52f9441d1b2b | 5a16e8cec8cc3900096dd9d6914482f63e81b01f | /conf/settings.py | add336da760b26daeb00f9e74195126b40bd040f | [] | no_license | chenrun666/FR- | 8c3f8181781274e2b895c21c95c9ee731dd3f5ce | 5614c2ca469f2ac9529d83b902ce3411416f13c3 | refs/heads/master | 2020-04-22T19:58:36.785285 | 2019-02-14T02:58:36 | 2019-02-14T02:58:36 | 170,626,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,327 | py | # 测试环境
TEST = True
CHOOSESITE = False
# 回填结果
result = {
"accountPassword":"",
"accountType":"",
"accountUsername":"",
"cardName": "",
"cardNumber": "",
"checkStatus": True,
"clientType": "",# 跑单的客户端码
"createTaskStatus": True,
"linkEmail": "",
"linkEmailPassword": "",
"linkPhone": "",
"machineCode": "58.57.62.229:20181",# 跑单客户端ip
"nameList": [],# 如果支持乘客分开出,nameList里放本次跑单成功的乘客姓名,单个也是集合
"payTaskId": 0,
"pnr": "ZTY2TG",# 跑单成功的pnr
"price": 0.00, # 支付的机票含税总价
"baggagePrice":0.00,# 支付行李总价
"sourceCur": "CNY",
"errorMessage":"",
"status": 0, # 350 保留成功,301 保留失败, 450 支付成功 ,401 支付失败
"targetCur": "MYR",
"promo":"使用的优惠码",
"creditEmail":"信用账号邮箱",
"creditEmailCost":"信用账号花费",
}
bookStatus = {
"BookingFail" : 301, #301, "预定失败"
"PriceVerifyFail" : 340, #340, "跑单失败,执行下一条规则"
"BookingSuccess" : 350, #350, "预定成功"
"PayFail" : 401, #401, "支付失败"
"PayFailAfterSubmitCard" : 440, #440, "提交卡号后失败"
"PaySuccess" : 450 #450, "支付成功"
} | [
"17610780919@163.com"
] | 17610780919@163.com |
2c9a221ad5dbc16c5d082b77293b4181768db006 | 9dec118d74fcbfbbef40f0393649b6ac2e23bd3e | /api/routers/feed.py | 9e1b6d463578e1106be843e032a29d96263555b0 | [] | no_license | larrykubin/showlists | 6fed8353a17415d9fecb4101096a1e448ca427c5 | c144862b53ee4a7c8d0577ef4418bb271fee7bf7 | refs/heads/master | 2021-05-21T11:18:59.940819 | 2020-04-02T20:25:43 | 2020-04-02T20:25:43 | 63,821,500 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 558 | py | import boto3
from botocore.exceptions import ClientError
from fastapi import APIRouter, Depends, Form, Request
from .auth import get_current_active_user
from pydantic import BaseModel
from models import Attachment, User, Show, ShowAttachment
from db import db
from .auth import get_current_user
from typing import List
from sqlalchemy import text
router = APIRouter()
@router.get("/")
async def feed():
shows = db.query(Show).join(User).order_by(Show.created.desc()).limit(10).all()
return {
"code": "SUCCESS",
"shows": shows
} | [
"59261823+financehacks@users.noreply.github.com"
] | 59261823+financehacks@users.noreply.github.com |
4d74b892e84c8743de61cae4f7390681d20d20af | 496b6f92d62999ee88a8a1ff6dfe64285ec3fc56 | /ayush_crowdbotics_211/urls.py | 21b8756b708b4532e4e76355dd3f3bcf448d6d08 | [] | no_license | payush/ayush-crowdbotics-211 | cab04122c6c605d1fa6993630cd81dd39a81e1f5 | 72ddd9b7d506faa430215575913fb09834051a63 | refs/heads/master | 2020-03-23T10:19:23.756211 | 2018-07-18T13:16:00 | 2018-07-18T13:16:00 | 141,437,083 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 931 | py | """ayush_crowdbotics_211 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url('', include('home.urls')),
url(r'^accounts/', include('allauth.urls')),
url(r'^api/v1/', include('home.api.v1.urls')),
url(r'^admin/', admin.site.urls),
]
| [
"ayushpuroheet@gmail.com"
] | ayushpuroheet@gmail.com |
c78d63f4c02eeb276c37f7dab1d9be5488795ab6 | 4332968c78d838902b02e8e819c70864429c4f40 | /Timer.py | a8812e244bf379d5925e6fc7a555a38d6e1656f7 | [
"MIT"
] | permissive | KRHS-GameProgramming-2016/Spoonghetti-Man-v2 | 878b52dfb7d27bb54e6d09d4f427205dfa209067 | 415c183e06f84e493b4f5774f32bb45aea54c83b | refs/heads/master | 2021-01-18T23:08:07.109527 | 2017-06-12T15:46:30 | 2017-06-12T15:46:30 | 87,090,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 631 | py | import pygame, sys, math, time
from Score import *
class Timer(Score):
def __init__(self, pos):
Score.__init__(self, pos)
self.startTime = time.clock()
self.image = self.font.render("Time: " + str(self.value), True, (0,0,0))
self.rect = self.image.get_rect(center = self.rect.center)
def update(self):
newValue = int(time.clock() - self.startTime)
if newValue != self.value:
self.value = newValue
self.image = self.font.render("Time: " + str(self.value), True, (0,0,0))
self.rect = self.image.get_rect(center = self.rect.center)
| [
"EthanJ.Thompson@kearsarge.org"
] | EthanJ.Thompson@kearsarge.org |
5b94d7cb3c951405797f09f7785cf0ac70ee2123 | 07c75f8717683b9c84864c446a460681150fb6a9 | /back_cursor/S-scrapy/zhilianspider2/zhilianspider2/settings.py | 76979aa5a98fe2c2624dddecb681be245fbf0fda | [] | no_license | laomu/py_1709 | 987d9307d9025001bd4386381899eb3778f9ccd6 | 80630e6ac3ed348a2a6445e90754bb6198cfe65a | refs/heads/master | 2021-05-11T09:56:45.382526 | 2018-01-19T07:08:00 | 2018-01-19T07:08:00 | 118,088,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,160 | py | # -*- coding: utf-8 -*-
# Scrapy settings for zhilianspider2 project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'zhilianspider2'
SPIDER_MODULES = ['zhilianspider2.spiders']
NEWSPIDER_MODULE = 'zhilianspider2.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'zhilianspider2 (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'zhilianspider2.middlewares.Zhilianspider2SpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'zhilianspider2.middlewares.Zhilianspider2DownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'zhilianspider2.pipelines.Zhilianspider2Pipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"1007821300@qq.com"
] | 1007821300@qq.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.