blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 213
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 246
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
81353cc3889ed290f47df18bd617a984e547a18c | c3358a9a9914fdba9a4a276b68f7c3c921571a14 | /bin/wheel | b2a57ed3a505f0c031772736af677e9172331846 | [] | no_license | mohithg/django_learn | ee411da9402ff8a73ce8120f25e1ce558d2936d7 | cb917cbcf53be1ec9d8ac30b5d45d0e723995df9 | refs/heads/master | 2021-08-07T05:00:24.800568 | 2017-11-07T15:35:43 | 2017-11-07T15:35:43 | 109,852,629 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 241 | #!/Users/mohithg/learning/django/bin/python2.7
# -*- coding: utf-8 -*-
import re
import sys
from wheel.tool import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"mohithgm@gmail.com"
] | mohithgm@gmail.com | |
26448faf3cba7c0c50f9b20ffe7ffba940ed1a4b | 0ceb04ce763cf8b73f9b71a211990070abc1f5bb | /src/helpers/plotterHelper.py | 3685e691e8b957c5a86229632115e90d10ddc46d | [
"MIT"
] | permissive | fangzhimeng/MachineLearningRegressionBenchmark | e04412581ca21d82f4bbff64ba29ec9795337cac | 42a83a1261dbf6b30624e9950db5b2d297622d76 | refs/heads/main | 2023-03-21T04:57:51.633945 | 2021-01-10T20:20:24 | 2021-01-10T20:20:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,840 | py | from matplotlib import pyplot
from pandas.plotting import register_matplotlib_converters
from matplotlib import style
from numpy import arange, linspace, ndarray, array, unique
from scipy.interpolate import make_interp_spline, BSpline
from src.store import Store
style.use('ggplot')
register_matplotlib_converters()
class PlotterHelper:
@staticmethod
def show():
pyplot.show()
@staticmethod
def plotEvaluations(names: list, results: list, figure: str, show: bool = False):
f = pyplot.figure(figure)
x = arange(len(names)) # the label locations
width = 0.35 # the width of the bars
rects1 = pyplot.bar(x - width / 2, results, width)
# Add some text for names, title and custom x-axis tick names, etc.
pyplot.ylabel('Scores')
pyplot.title('Models Scores')
pyplot.xticks(x, names)
def autolabel(rects):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
pyplot.annotate('{}'.format(height),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, -75), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom', rotation=90)
autolabel(rects1)
f.tight_layout()
if show:
pyplot.show()
else:
return f
@staticmethod
def plotFormula(store: Store, figure: str, show: bool = False):
features = store.features
fig = pyplot.figure(figure, figsize=[10, 10])
pyplot.suptitle(store.model.getAlgorithmUsed(), y=1, fontsize=18)
pyplot.title(store.resultingFunction, wrap=True, fontsize=10)
pyplot.xlabel('Variables - X')
pyplot.ylabel('Results - Y')
for feature in features:
x = store.dataSet.getFeatureData(feature)
y = store.dataSet.getLabelData()
uqIdx = PlotterHelper.uniqueIndexes(x, y)
x, y = PlotterHelper.sortRelatedLists(x[uqIdx], y[uqIdx])
xNew = linspace(x.min(), x.max(), store.numberOfSamples)
bSpline = make_interp_spline(x, y)
yNew = bSpline(xNew)
pyplot.plot(xNew, yNew, label=feature)
pyplot.legend(loc=3)
if show:
pyplot.show()
else:
return fig
@staticmethod
def sortRelatedLists(list1: ndarray, list2: ndarray) -> (array, array):
x, y = (array(t) for t in zip(*sorted(zip(list1, list2))))
return x, y
@staticmethod
def uniqueIndexes(x: array, y: array):
arr = array([*zip(y, x)])
return unique(arr[:, 1], return_index=True)[1]
| [
"iulian.octavian.preda@gmail.com"
] | iulian.octavian.preda@gmail.com |
42f478e01f3ca3ec68d1abb1a1fbf7bba99fdb5e | 9647e44409b261f823ceccce79b4ec34ea3a9bd6 | /timeapp/__init__.py | e2d04bb17d4cbbe39ae8da13fd276aa0441176e2 | [
"MIT"
] | permissive | iskenderunlu/jogging-time-management | a7cbaf37fd6678571608b7d9277655f7fcc3e136 | 1cf88bee2abe1f237a2e6f194264064ec89d6e4a | refs/heads/master | 2022-11-28T22:12:12.948328 | 2020-08-19T11:31:25 | 2020-08-19T11:31:25 | 288,540,160 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 90 | py | ## write according to your app name here
default_app_config = 'Timeapp.apps.TimeappConfig' | [
"iskenderunlu804@gmail.com"
] | iskenderunlu804@gmail.com |
c57d4e1cdaa74d2a4820707c049cc11363c0b70c | 2597c0487ce5b7a388df2e5c377721c139a1acd8 | /bin/python/comparison.py | 1fc37db8d81e9b0967940e802c66faaf4f806885 | [] | no_license | cilesiz/fluffy-octo-system | 8ca5b46fb2bad0ad9599be24eaeb8b01c3cb06f0 | 8d6e7d32d7a07c4e68dfb154d3460a9ca55726c3 | refs/heads/master | 2020-03-20T02:58:02.157506 | 2015-12-27T06:02:32 | 2015-12-27T06:02:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 114 | py | #!/usr/bin/env python
message = "new string"
message2 = "new string"
if message == message2:
print "they match"
| [
"rossof@icloud.com"
] | rossof@icloud.com |
f38e52cda7f5a3f771a65f7eeb92d6375981bb4a | f25440c9f9fd470ba44394a36d5659dd47ee8800 | /tests/conftest.py | 6ee0b688b6b226162706d75c8e1acd7eadcb3541 | [] | no_license | kqf/hubmap | 75010d9109f8b8656e244179de5de226be584d5b | 37b3d839f0ad3f47dc39c1b9b036cb1acc27ca2c | refs/heads/master | 2023-02-20T04:06:00.145932 | 2021-01-23T07:56:13 | 2021-01-23T07:56:13 | 317,635,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 552 | py | import pytest
import tempfile
from pathlib import Path
from models.preprocess import write
from models.mc import make_blob, blob2image
@pytest.fixture
def size():
return 256
@pytest.fixture
def fake_dataset(size=256, nfiles=5):
with tempfile.TemporaryDirectory() as dirname:
path = Path(dirname)
for i in range(nfiles):
mask = make_blob(size, size)
write(mask, path / str(i) / "mask.png")
tile = blob2image(mask)
write(tile, path / str(i) / "tile.png")
yield path
| [
"noreply@github.com"
] | kqf.noreply@github.com |
46fb30be2965828aa50fce9cd0eb5a1588be3c08 | 6b556d8096c14e7ee3b408a066808baf6de138b3 | /main.py | df363c2c8228f28add5cad03c0f1b7a4f81480ba | [] | no_license | hieumdd/gavin_stripe | a717be8c3c92331533394ea8f7fda72abf1699dd | e0a9d8004dedcf82291fbed31e3edf720c137690 | refs/heads/master | 2023-06-06T00:05:26.062564 | 2021-06-22T13:34:33 | 2021-06-22T13:34:33 | 379,280,648 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 317 | py | from models import BalanceTransactions
def main(request):
request_json = request.get_json()
job = BalanceTransactions(start=request_json.get('start'), end=request_json.get('end'))
responses = {
"pipelines": "Stripe",
"results": [job.run()]
}
print(responses)
return responses
| [
"hieumdd@gmail.com"
] | hieumdd@gmail.com |
e1dcd2a11d7423ba518efc1697c3a148293ffa2a | 5456502f97627278cbd6e16d002d50f1de3da7bb | /components/google/core/browser/DEPS | 26e9743a04d2db628f4a7357a7d73e4ad5cf843a | [
"BSD-3-Clause"
] | permissive | TrellixVulnTeam/Chromium_7C66 | 72d108a413909eb3bd36c73a6c2f98de1573b6e5 | c8649ab2a0f5a747369ed50351209a42f59672ee | refs/heads/master | 2023-03-16T12:51:40.231959 | 2017-12-20T10:38:26 | 2017-12-20T10:38:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | include_rules = [
"+components/data_use_measurement/core",
"+components/keyed_service/core",
"+components/pref_registry",
]
| [
"lixiaodonglove7@aliyun.com"
] | lixiaodonglove7@aliyun.com | |
da48be8998dbc65a10f28b1a195faa144e03a79c | 88c3f6dd1e62da124a9718f745ced22e28491d62 | /FWHM.py | de3fdf5d7a4bae1eb711aefd34c5a67f10beeb45 | [] | no_license | zhazhajust/THzScript | df79edfb72665074ec79684be17d8f63fdabaa49 | 005c4206c870aca430ffa794bfe3a485fff2b9c6 | refs/heads/main | 2023-07-15T18:43:43.169484 | 2021-08-20T13:11:29 | 2021-08-20T13:11:29 | 398,280,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 975 | py | import numpy as np
import constant as const
import scipy.signal
from scipy.signal import chirp, find_peaks, peak_widths
from scipy.interpolate import UnivariateSpline
import matplotlib.pyplot as plt
load_dir = const.txtdir+"xt.npy"
xt = np.load(load_dir)
xt = xt.real
xtProfile = [] #* (0+0*1j)
#xt = xt.astype('complex128')
FWHM=[]
print(xt.shape)
x = np.arange(xt.shape[1])
for i in range(0,xt.shape[0],1000):
#print(xt[i].shape)
index=np.argmax(xt[i])
Xmax=xt[i,index]
#print(scipy.signal.hilbert(xt[i]))
xtProfile.append(scipy.signal.hilbert(xt[i]))
#f = UnivariateSpline(x , xtProfile[-1],s = 100)
#xtProfile[-1] = f(x)
peaks, _ = find_peaks(xtProfile[-1])
results_half = peak_widths(xtProfile[-1], peaks, rel_height=0.5)
#print(results_half[0])
try:
FWHM.append(results_half[0].max())
except:
FWHM.append(np.nan)
#print(np.where(xt[i]>Xmax/2,0,1))
#FWHM[i] = np.where(xt[i],Xmax/2)
print(FWHM)
| [
"251338258@qq.com"
] | 251338258@qq.com |
b37c7369aca8f5f50c6d95de5bc3431d254cf30f | d0fc402348cc87cf378336a2e4173cd9eede3c9b | /std_generator/data_helper.py | 761bad756f0aad0b95b0ebd660b46c603fd4468c | [] | no_license | dominthomas/gpu_farm | 154ef37003a6692b3f6a8157975f222b8560b2b3 | 67f5e1326e7d5579004f4acc1d31c66b19615f6b | refs/heads/master | 2021-02-09T04:51:11.028799 | 2020-04-19T14:44:10 | 2020-04-19T14:44:10 | 244,241,602 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,079 | py | import nibabel
import numpy as np
from tensorflow import keras as K
class DataGenerator(K.utils.Sequence):
def __init__(self, list_IDs, labels, batch_size=5, dim=(176, 256, 256), n_channels=1,
n_classes=10, shuffle=True):
"""Initialization"""
self.dim = dim
self.batch_size = batch_size
self.labels = labels
self.list_IDs = list_IDs
self.n_channels = n_channels
self.n_classes = n_classes
self.shuffle = shuffle
self.on_epoch_end()
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.list_IDs))
if self.shuffle:
np.random.shuffle(self.indexes)
def __data_generation(self, list_IDs_temp):
"""Generates data containing batch_size samples""" # X : (n_samples, *dim, n_channels)
# Initialization
X = np.empty((self.batch_size, *self.dim, self.n_channels))
y = np.empty(self.batch_size, dtype=int)
# Generate data
for i, ID in enumerate(list_IDs_temp):
# Store sample
nifti = np.asarray(nibabel.load(ID).get_fdata())
xs, ys, zs = np.where(nifti != 0)
nifti = nifti[min(xs):max(xs) + 1, min(ys):max(ys) + 1, min(zs):max(zs) + 1]
nifti = nifti[0:100, 0:100, 0:100]
X[i,] = np.reshape(nifti, (100, 100, 100, 1))
# Store class
y[i] = self.labels[ID]
return X, K.utils.to_categorical(y, num_classes=self.n_classes)
def __len__(self):
"""Denotes the number of batches per epoch"""
return int(np.floor(len(self.list_IDs) / self.batch_size))
def __getitem__(self, index):
"""Generate one batch of data"""
# Generate indexes of the batch
indexes = self.indexes[index * self.batch_size:(index + 1) * self.batch_size]
# Find list of IDs
list_IDs_temp = [self.list_IDs[k] for k in indexes]
# Generate data
X, y = self.__data_generation(list_IDs_temp)
return X, y
| [
"dthomas@donbibi.localdomain"
] | dthomas@donbibi.localdomain |
5fd5f69280f7e2c8dfa60b2c1d5a770471cc61ab | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2520/60790/274856.py | 18e4e0a2884251d28a2c4c3bc79d6f4d2f5ba4c8 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | R=int(input())
C=int(input())
r0=int(input())
c0=int(input())
print(sorted([[i, j] for i in range(R) for j in range(C)], key=lambda x: abs(x[0] - r0) + abs(x[1] - c0))) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
974a2ad112f38e2fe813b9a8d77b4b96920d50d6 | c46cb6ffb259124d38c7babcf1cadb6b3be5d594 | /test.py | 1fe28ca510a1cfc26ee15c91d9fa822f0e030447 | [] | no_license | sundshinerj/WWW | f04ee6719d07ee07fd2a8017bf0333e4670154fe | b83980d38eb94ffedfa193be6e2d264aabce7c9f | refs/heads/master | 2021-08-22T03:55:29.131487 | 2017-11-29T06:44:10 | 2017-11-29T06:44:10 | 112,165,146 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,005 | py | #!/usr/bin/env python
#-*- coding:utf-8 -*-
#@author: sundsinerj
#@date: 2017/10/25
import MySQLdb
import time
import json
db = MySQLdb.connect("127.0.0.1","root","root","xedaojia_ams")
cursor = db.cursor()
# last_time = int(time.time())
# begien_time = int(last_time - 21600)
last_time = 1506412800
begien_time = 1506398400
sql = 'select clock,value_avg from trends_uint where clock>='+str(begien_time)+' and clock<='+str(last_time)+';'
cursor.execute(sql)
results = cursor.fetchall()
cursor.close()
db.commit()
db.close()
formats = '%H:%M:%S'
time.strftime(formats,time.localtime(1508941095))
list_date = []
#print(format(float(a)/float(b),'.2f'))
for i in range(len(results)):
data_clock = time.strftime(formats,time.localtime(int(results[i][0])))
data_value = format(float(int(results[i][1]))/1024/1024,'.2f')
data_value = str(data_value)
list_date.append({"clock": data_clock,"value_avg": data_value})
#list_date.append({data_clock: data_value})
print json.dumps(list_date) | [
"sundshinerj@gmail.com"
] | sundshinerj@gmail.com |
5c53a777153b9ad4cece20454b6b93bfa892ab0d | 171baddb78f2f7bfdf32490112cff4bd2f32389f | /scripts/corridor_load_histograms.py | c266fe83c611e81fd87d34c25a2224b4ec8be1b9 | [] | no_license | sergimolina/stefmap_ros | 6795f0c988ece1ef56e888b8a295b40a28cdce93 | c55fa3497a162f4a8163b914df023a95379d7ef6 | refs/heads/master | 2022-12-06T23:42:46.335325 | 2022-11-30T15:40:20 | 2022-11-30T15:40:20 | 168,020,781 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 358 | py | #!/usr/bin/python2
import os
if __name__ == '__main__':
#python ~/workspace/cpp_ws/src/mod_ros/stefmap_ros/scripts/tools/load_histograms.py ../histograms.txt
output_file_name = "./../data/corridor_2017_05_31_histograms.txt"
print("Loading histograms to FreMEn...")
os.system("./tools/load_histograms.py "+output_file_name)
print("Done")
| [
"sergimolina91@gmail.com"
] | sergimolina91@gmail.com |
df199d45bbed5b2eb57ed382acd03991cfdeaff4 | 1ce4c43907ec04f1e797c317871a23bcec46c3c8 | /scripts/utilities/toggle_spout_leds.py | 85bc4fc4c907348d12eef83f0aa5ecb56a414877 | [] | no_license | m-col/reach | d9709593e1f0ec25786a4c4e601b14b26419ce96 | 8fabb4ce30ddb39260039ebea2d46a919dfbba14 | refs/heads/master | 2022-05-31T08:51:31.270970 | 2022-05-21T19:46:12 | 2022-05-21T19:46:12 | 169,552,311 | 1 | 2 | null | 2021-06-25T14:18:21 | 2019-02-07T10:12:30 | Python | UTF-8 | Python | false | false | 155 | py | #!/usr/bin/env python3
"""
Toggle the LEDs.
"""
from reach.backends.raspberrypi import Utilities
rpi = Utilities()
rpi.toggle_spout_leds()
rpi.cleanup()
| [
"mcol@posteo.net"
] | mcol@posteo.net |
f160ae11bb28516c727156fdb31f749a0e3e40e9 | 9b903480e4153e31f1065b59af670968ba55902f | /weather.py | 5b9a35bbbdaf52d61b2be2ccc6e48e46e2d268f7 | [] | no_license | nickdebCompApps/snips-assistant | d7e1fac0fa5cce342550de599df01538feca2137 | a4f22f5298729514b990db21b8daa4fb40411d45 | refs/heads/master | 2020-03-09T03:09:35.982017 | 2018-04-07T18:59:04 | 2018-04-07T18:59:04 | 128,558,537 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,330 | py | import forecastio
import datetime
from time import sleep
import requests
import json
from json import dumps
from key import keys
def timeConvert(miliTime):
hours, minutes = miliTime.split(":")
hours, minutes = int(hours), int(minutes)
setting = " AM"
if hours >= 12:
if hours == 12:
setting = " PM"
hours = hours
else:
setting = " PM"
hours -= 12
if hours == 0:
hours = 12
return(("%02d:%02d" + setting) % (hours, minutes))
def Weather(conn):
ip_url = 'https://freegeoip.net/json'
request_zip = requests.get(ip_url)
load_zip = json.loads(request_zip.text)
lat = str(load_zip['latitude'])
longs = str(load_zip['longitude'])
API = key.api_keys['WEATHER_API']
forecast = forecastio.manual('https://api.darksky.net/forecast/6a92bd8d0626c735970600815a0323a7/' + lat + ',' + longs + '')
byHour = forecast.hourly()
high_low = []
for currentData in forecast.daily().data:
high_low_list = []
high_low_list.extend((currentData.temperatureLow, currentData.temperatureHigh))
high_low.append(high_low_list)
forecast_array = []
high = str(int(round(high_low[0][1])))
low = str(int(round(high_low[0][0])))
#LOOP THROUGH HOURLY DATA
for hourlyData in byHour.data:
#CREATE ARRAY TO APPEND TO MASTER ARRAY
forecast_array_list = []
#GET TEMPERATURE TIME DATE AND SUMMARY
temp = str(int(round(hourlyData.temperature)))
time = hourlyData.time
time = time - datetime.timedelta(hours=5)
time = str(time).split()
test_time = time[1]
time_date = time[0]
test_time = test_time[:-3]
#CONVERT TIME TO STANDARD 12 HR TIME
time = timeConvert(test_time)
summary = hourlyData.summary
#APPEND VARIABLES TO SINGLE ARRAY CREATED EARLIER AND THEN APPEND TO MASTER ARRAY FOR 2D ARRAY
forecast_array_list.extend((temp,time,summary,time_date))
forecast_array.append(forecast_array_list)
#DELETE 25 ROWS AS WE DONT NEED ALL OF THEM
#for i in range(25):
#del forecast_array[-i]
print(forecast_array)
conn.send((forecast_array, high, low))
conn.close()
return(forecast_array, high, low)
#weather = weather()
#print(weather)
| [
"noreply@github.com"
] | nickdebCompApps.noreply@github.com |
0b5d3655b298036c53309389fcfc2864b3c16b97 | aa535ed791407504aa24eac32da2e7b15f1b19b6 | /iconsBked/settings.py | 81da8c1f077645ad7b51f182cddb9a01a4fd98f7 | [
"MIT"
] | permissive | Epath-Pro/icons-bked | 404698e6401a7d16b947d7494ca627f777f8e260 | edc8cf57d4c6ee31369ef8d3751f3d89cc6d375a | refs/heads/main | 2023-07-14T17:30:07.932842 | 2021-08-29T08:20:38 | 2021-08-29T08:20:38 | 390,057,494 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,285 | py | import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-#7(tc6!^b464v(w7v04%)wm-=&fop(iukt#ttmbvd$d7b#vmzt'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*','.vercel.app']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'rest_framework',
'icons'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'corsheaders.middleware.CorsMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'iconsBked.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'iconsBked.wsgi.application'
CORS_ORIGIN_ALLOW_ALL = True
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'postgres',
'USER': 'postgres',
'PASSWORD': 'MediumSB@2020!',
'HOST': 'db.pdilyfnochamrjvvkenn.supabase.co',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"elearningpath0620@gmail.com"
] | elearningpath0620@gmail.com |
03cc688115e56b3caacc8b1bcb0a2acf97cca126 | 89eec81430daea547822c26cf637bcd9db5e57ad | /pols/migrations/0005_question_number.py | 4d1e7c4c89dfaeeeb276ec140daf28cdb8c5dd7a | [] | no_license | sanlem/teston | 5bd2f01ef4dc4f3cfef8189d6ea259af78fe4388 | 89c21ea745b1b517c589caf5688c7a856548d904 | refs/heads/master | 2020-12-11T22:17:23.943699 | 2015-06-18T15:44:08 | 2015-06-18T15:44:08 | 36,315,768 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('pols', '0004_auto_20150525_1549'),
]
operations = [
migrations.AddField(
model_name='question',
name='number',
field=models.IntegerField(default=1),
),
]
| [
"vfranchook@gmail.com"
] | vfranchook@gmail.com |
d0fae8b7c4d33afb588c1fd017fe389b750b6135 | 547ac7b09add2e24146f59fa4377188cd59419fb | /reprozip/pack/vt_workflow/workflow_utils.py | 316ba09a4b52958588d151e1ded15d6b8c4f1937 | [
"BSD-3-Clause"
] | permissive | fchirigati/reprozip | 44b274fec6d9558a97c85e7eb0678730702ccfe0 | fb7b4e18a6938fdb10b6fe8e0fcd042ce4547375 | refs/heads/master | 2020-05-18T05:47:17.156691 | 2018-06-19T22:39:27 | 2018-06-19T22:39:27 | 10,867,693 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 10,245 | py | ###############################################################################
##
## Copyright (C) 2012-2013, NYU-Poly.
## All rights reserved.
## Contact: fchirigati@nyu.edu
##
## This file is part of ReproZip.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of NYU-Poly nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
class WfObject:
"""
WfObject represents a VisTrails workflow object.
"""
def __init__(self):
"""
Init method for WfObject
"""
self.__sub_element = None
def get_sub_element(self):
return self.__sub_element
def set_sub_element(self, value):
self.__sub_element = value
sub_element = property(get_sub_element, set_sub_element, None, None)
class Module(WfObject):
"""
Module represents a module in the VisTrails workflow.
"""
def __init__(self, id, cache, name, namespace, package, version):
"""
Init method for Module.
-> id is the unique id of the object;
-> cache indicates whether the module is cacheable or not;
-> name is the name of the module;
-> namespace is the namespace of the module;
-> package is the package that contains the module;
-> version is the version of the package
"""
WfObject.__init__(self)
self.__id = id
self.__cache = cache
self.__name = name
self.__namespace = namespace
self.__package = package
self.__version = version
def get_id(self):
return self.__id
def get_cache(self):
return self.__cache
def get_name(self):
return self.__name
def get_namespace(self):
return self.__namespace
def get_package(self):
return self.__package
def get_version(self):
return self.__version
id = property(get_id, None, None, None)
cache = property(get_cache, None, None, None)
name = property(get_name, None, None, None)
namespace = property(get_namespace, None, None, None)
package = property(get_package, None, None, None)
version = property(get_version, None, None, None)
class Annotation(WfObject):
"""
Annotation represents an annotation in an object of the VisTrails workflow.
"""
def __init__(self, id, wf_object, key, value):
"""
Init method for Annotation.
-> id is the unique id of the annotation;
-> wf_object is the object from the workflow with which the annotation
is associated;
-> key is the key of the annotation;
-> value is the value of the annotation
"""
WfObject.__init__(self)
self.__id = id
self.__wf_object = wf_object
self.__key = key
self.__value = value
def get_id(self):
return self.__id
def get_wf_object(self):
return self.__wf_object
def get_key(self):
return self.__key
def get_value(self):
return self.__value
id = property(get_id, None, None, None)
wf_object = property(get_wf_object, None, None, None)
key = property(get_key, None, None, None)
value = property(get_value, None, None, None)
class Location(WfObject):
"""
Location represents the location of a VisTrails module.
"""
def __init__(self, id, module, x, y):
"""
Init method for Location.
-> id is the unique id of the object;
-> module is the module with which the location is associated;
-> x is the position in the x axis;
-> y is the position in the y axis
"""
WfObject.__init__(self)
self.__id = id
self.__module = module
self.__x = x
self.__y = y
def get_id(self):
return self.__id
def get_module(self):
return self.__module
def get_x(self):
return self.__x
def get_y(self):
return self.__y
id = property(get_id, None, None, None)
module = property(get_module, None, None, None)
x = property(get_x, None, None, None)
y = property(get_y, None, None, None)
class Function(WfObject):
"""
Function represents a function of a VisTrails module.
"""
def __init__(self, id, module, name, pos):
"""
Init method for Function.
-> id is the unique id of the object;
-> module is the module with which the function is associated;
-> name is the name of the function;
-> pos is... well, pos :-)
"""
WfObject.__init__(self)
self.__id = id
self.__module = module
self.__name = name
self.__pos = pos
def get_id(self):
return self.__id
def get_module(self):
return self.__module
def get_name(self):
return self.__name
def get_pos(self):
return self.__pos
id = property(get_id, None, None, None)
module = property(get_module, None, None, None)
name = property(get_name, None, None, None)
pos = property(get_pos, None, None, None)
class Parameter(WfObject):
"""
Parameter represents the parameter for a function in a VisTrails workflow.
"""
def __init__(self, id, function, alias, name, pos, type, value):
"""
Init method for Parameter.
-> id is the unique id of the object;
-> function is the function with which the parameter is associated;
-> alias is an alias for the parameter;
-> name is the name of the parameter;
-> pos is, well... pos :-)
-> type represents the type of the parameter;
-> value is the value of the parameter, respecting the type
"""
WfObject.__init__(self)
self.__id = id
self.__function = function
self.__alias = alias
self.__name = name
self.__pos = pos
self.__type = type
self.__value = value
def get_id(self):
return self.__id
def get_function(self):
return self.__function
def get_alias(self):
return self.__alias
def get_name(self):
return self.__name
def get_pos(self):
return self.__pos
def get_type(self):
return self.__type
def get_value(self):
return self.__value
id = property(get_id, None, None, None)
function = property(get_function, None, None, None)
alias = property(get_alias, None, None, None)
name = property(get_name, None, None, None)
pos = property(get_pos, None, None, None)
type = property(get_type, None, None, None)
value = property(get_value, None, None, None)
class Connection(WfObject):
"""
Connection represents a connection in a VisTrails workflow.
"""
def __init__(self, id, source, dst):
"""
Init method for Connection.
-> id is the unique id of the object;
-> source is the source port of the connection;
-> dst is the destination port of the connection
"""
WfObject.__init__(self)
self.__id = id
self.__source = source
self.__dst = dst
def get_id(self):
return self.__id
def get_source(self):
return self.__source
def get_dst(self):
return self.__dst
id = property(get_id, None, None, None)
source = property(get_source, None, None, None)
dst = property(get_dst, None, None, None)
class Port(WfObject):
"""
Port represents a port in a VisTrails connection.
"""
def __init__(self, id, module, name, signature):
"""
Init method for Port.
-> id is the unique id of the object;
-> module is the module with which the port is associated;
-> name is the name of the port;
-> signature is the signature of the port
"""
WfObject.__init__(self)
self.__id = id
self.__module = module
self.__name = name
self.__signature = signature
def get_id(self):
return self.__id
def get_module(self):
return self.__module
def get_name(self):
return self.__name
def get_signature(self):
return self.__signature
id = property(get_id, None, None, None)
module = property(get_module, None, None, None)
name = property(get_name, None, None, None)
signature = property(get_signature, None, None, None)
| [
"fernando.chirigati@gmail.com"
] | fernando.chirigati@gmail.com |
c9d87460c9daf44323f8c8e853dd25cd21cb8670 | 35b96d09ff3b74e7f05cc0085dde129456d70ad9 | /tornado/Day5/tornado_sqlalchemy.py | 10d41141ef829b75b22f49e22e4892636e6990f9 | [] | no_license | yanghongfei/Python | ef0e54f98bc390ffd908d27f2ed306952b3bba46 | f1103754e2752d38bcfd4357aa4b1a2318b33e31 | refs/heads/master | 2020-07-01T20:06:52.870910 | 2018-11-01T09:15:34 | 2018-11-01T09:15:34 | 74,260,335 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,238 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/9/30 10:49
# @Author : Fred Yang
# @File : tornado_sqlalchemy.py
# @Role : Sqlalchemy 增删改查
# 导入
from sqlalchemy import Column, String, create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from settings import DB_INFO
HOST=DB_INFO['host']
USER=DB_INFO['user']
PORT=DB_INFO['port']
PASSWD=DB_INFO['password']
DB_NAME= DB_INFO['db_name']
# 创建对象的基类:
Base = declarative_base()
#定义User对象:
class User(Base):
# 表的名字:
__tablename__ = 'user'
# 表的结构:
id = Column(String(100), primary_key=True)
name = Column(String(200))
class Weibo(Base):
__tablename__ = 'weibo'
id = Column(String(100), primary_key=True)
username = Column(String(100)) #用户名
content = Column(String(1000)) #内容
# 初始化数据库连接:
engine = create_engine('mysql+mysqlconnector://{}:{}@{}:{}/{}'.format(USER, PASSWD, HOST, PORT, DB_NAME))
#print(engine)
# 创建DBSession类型:
DBSession = sessionmaker(bind=engine)
Base.metadata.create_all(engine) #创建表的语句 第一次使用
| [
"yanghongfei@shinezone.com"
] | yanghongfei@shinezone.com |
cafe602ff007a80036f57b301bc84dd23e3e5581 | eabe529cbf8a6ae6b0ae476961d69182a1827842 | /parlai/tasks/tasks.py | 421987f23ab2036c478bf84fb049b4e23a1fb35f | [] | no_license | JiaQiSJTU/ResponseSelection | b3ce8a15129e23830ba3a7311d0b2eb831217163 | 660732f7cc9c0c419a3cf26c85430eb258e5f1f0 | refs/heads/master | 2023-07-04T18:59:47.498626 | 2021-09-01T08:04:54 | 2021-09-01T08:04:54 | 297,997,764 | 28 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,619 | py | #!/usr/bin/env python3
"""
Helper functions for defining the set of tasks in ParlAI.
The actual task list and definitions are in the file task_list.py
"""
from .task_list import task_list
from collections import defaultdict
def _preprocess(name):
return name.lower().replace('-', '')
def _build(task_list):
tasks = {}
tags = defaultdict(list)
for t in task_list:
task = _preprocess(t['id'])
tasks[task] = [t]
for j in t['tags']:
tag = _preprocess(j)
if tag in tasks:
raise RuntimeError('tag ' + tag + ' is the same as a task name')
tags[tag].append(t)
return tasks, tags
def _id_to_task_data(t_id):
t_id = _preprocess(t_id)
if t_id in tasks:
# return the task assoicated with this task id
return tasks[t_id]
elif t_id in tags:
# return the list of tasks for this tag
return tags[t_id]
else:
# should already be in task form
raise RuntimeError('could not find tag/task id')
def _id_to_task(t_id):
if t_id[0] == '#':
# this is a tag, so return all the tasks for this tag
return ','.join((d['task'] for d in _id_to_task_data(t_id[1:])))
else:
# this should already be in task form
return t_id
def ids_to_tasks(ids):
if ids is None:
raise RuntimeError(
'No task specified. Please select a task with ' + '--task {task_name}.'
)
return ','.join((_id_to_task(i) for i in ids.split(',') if len(i) > 0))
# Build the task list from the json file.
tasks, tags = _build(task_list)
| [
"Jia_qi_0217@163.com"
] | Jia_qi_0217@163.com |
35227700b56937c04f644617981a3abdf9158a9c | 11c096d0ce9d9145dddf857d513b7539c65962bf | /MarchingSquares_lights.py | 6a27bc94010b595e7f8a968acc82f53f27d71ead | [] | no_license | JessieThomson/CodeSamples | a93322d3a68de602ae5f612cfad67939969ec077 | ecd131a318b3f063f2eb34acd22c213fe4fc7952 | refs/heads/master | 2022-03-27T07:47:06.033354 | 2019-12-20T10:09:10 | 2019-12-20T10:09:10 | 109,897,459 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,544 | py | import pygame, sys, time, random
from pygame.locals import *
import numpy as np
class Particle:
"""
@summary: Data class to store particle details i.e. Position, Direction and speed of movement, radius, etc
"""
def __init__(self):
self.__version = 0
"""@type: int"""
self.__position = []
# Sub dicts of the whole vertexMarkupDict
self.__movement = []
self.__radius = 0
# Python overrides -------------------------------------------------------------------------------------------------
def __str__(self):
printStr = ''
printStr += 'Position: (' + str(self.__position[0]) + ',' + str(self.__position[1]) + ') '
printStr += 'Direction and Speed: (' + str(self.__movement[0]) + ',' + str(self.__movement[1]) + ') '
printStr += 'Radius: ' + str(self.__radius)
return printStr
def __setitem__(self, position, movement, rad, c):
print position, movement, rad
# TODO: Check inputs
self.__position = position
self.__movement = movement
self.__radius = rad
# Properties -------------------------------------------------------------------------------------------------------
@property
def Position(self):
return self.__position
@property
def Movement(self):
return self.__movement
@property
def Radius(self):
return self.__radius
# Methods ----------------------------------------------------------------------------------------------------------
def SetPosition(self, pos):
self.__position = pos
def SetMovement(self, move):
self.__movement = move
def SetRadius(self, rad):
self.__radius = rad
def CalculateGrid(screenWidth, screenHeight, resolution):
x_size = resolution + divmod(screenWidth, resolution)[1]
y_size = resolution + divmod(screenHeight, resolution)[1]
print x_size, y_size
grid = []
for y in range(0, y_size):
temp_list = []
for x in range(0, x_size):
temp_list += [[x * (screenWidth / x_size), y * (screenHeight / y_size)]]
grid += [temp_list]
print np.array(grid).shape
return grid
pygame.init()
windowSurface = pygame.display.set_mode((500, 400), 0, 32)
pygame.display.set_caption("Paint")
# get screen size
info = pygame.display.Info()
sw = info.current_w
sh = info.current_h
grid = CalculateGrid(sw, sh, 50) # NEED TO CALCULATE OCCUPIED VALUE FOR ALL GRID CELLS!!!!!!!!!!!!!!!!
y_size = len(grid[:])
x_size = len(grid[0])
cell_size_x = sw / x_size
cell_size_y = sh / y_size
print x_size, y_size
# for celly in range(0, y_size):
# for cellx in range(0, x_size):
# print grid[celly][cellx][0]
max_dx = 5
max_dy = 5
min_radius = 15
max_radius = 60
circle_objs = []
num_circles = 10
for i in range(0, num_circles):
p = Particle()
p.SetRadius(random.randrange(min_radius, max_radius))
p.SetPosition([random.randrange(p.Radius, sw - p.Radius), random.randrange(p.Radius, sh - p.Radius)])
p.SetMovement([random.random() * max_dx + 1, random.random() * max_dy + 1])
circle_objs += [p]
BLACK = (0, 0, 0)
GREEN = (0, 255, 0)
windowSurface.fill(BLACK)
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
windowSurface.fill(BLACK)
for particle in circle_objs:
dx = particle.Movement[0]
dy = particle.Movement[1]
radius = particle.Radius
# update position with direction
particle.SetPosition([particle.Position[0] + dx, particle.Position[1] + dy])
pos = particle.Position
# check bounds
if (pos[0] - radius) + dx < 0 or (pos[0] + radius) + dx > sw:
dx = -dx
particle.SetMovement([dx, dy])
if (pos[1] - radius) + dy < 0 or (pos[1] + radius) + dy > sh:
dy = -dy
particle.SetMovement([dx, dy])
# pygame.draw.circle(windowSurface, GREEN, (int(pos[0]), int(pos[1])), radius, 1)
for cellx in range(0, x_size):
for celly in range(0, y_size):
sum_cell = 0
for p in circle_objs:
sum_cell += pow(p.Radius, 2) / (pow((grid[celly][cellx][0]) - p.Position[0], 2) + pow((grid[celly][cellx][1]) - p.Position[1], 2))
if sum_cell > 1:
pygame.draw.rect(windowSurface, GREEN, [grid[celly][cellx][0], grid[celly][cellx][1], 3, 3], 0)
pygame.time.Clock().tick(20)
pygame.display.update() | [
"JessieThomson@me.com"
] | JessieThomson@me.com |
e79dca9531ee613ea930b7be4c7871b1eac88c18 | d608c2b9fbfcd142fa82875f01f70e1db95cecef | /FlaskAppVenv/Lib/site-packages/pymysql/tests/test_connection.py | c626a0d39468fc0249dbdd719881a28872564b48 | [
"MIT"
] | permissive | nidheekamble/SponsCentral | 9b30918006b98f242de86920a550f8e072ba093f | b8189993cb87cc2d83e36c9d72df7a3b7d620bd7 | refs/heads/master | 2022-12-21T11:14:36.565494 | 2021-01-31T16:15:33 | 2021-01-31T16:15:33 | 135,418,522 | 1 | 2 | MIT | 2022-12-08T07:57:59 | 2018-05-30T09:16:30 | Python | UTF-8 | Python | false | false | 24,709 | py | import datetime
import sys
import time
import unittest2
import pymysql
from pymysql.tests import base
from pymysql._compat import text_type
from pymysql.constants import CLIENT
class TempUser:
def __init__(self, c, user, db, auth=None, authdata=None, password=None):
self._c = c
self._user = user
self._db = db
create = "CREATE USER " + user
if password is not None:
create += " IDENTIFIED BY '%s'" % password
elif auth is not None:
create += " IDENTIFIED WITH %s" % auth
if authdata is not None:
create += " AS '%s'" % authdata
try:
c.execute(create)
self._created = True
except pymysql.err.InternalError:
# already exists - TODO need to check the same plugin applies
self._created = False
try:
c.execute("GRANT SELECT ON %s.* TO %s" % (db, user))
self._grant = True
except pymysql.err.InternalError:
self._grant = False
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if self._grant:
self._c.execute("REVOKE SELECT ON %s.* FROM %s" % (self._db, self._user))
if self._created:
self._c.execute("DROP USER %s" % self._user)
class TestAuthentication(base.PyMySQLTestCase):
socket_auth = False
socket_found = False
two_questions_found = False
three_attempts_found = False
pam_found = False
mysql_old_password_found = False
sha256_password_found = False
import os
osuser = os.environ.get('USER')
# socket auth requires the current user and for the connection to be a socket
# rest do grants @localhost due to incomplete logic - TODO change to @% then
db = base.PyMySQLTestCase.databases[0].copy()
socket_auth = db.get('unix_socket') is not None \
and db.get('host') in ('localhost', '127.0.0.1')
cur = pymysql.connect(**db).cursor()
del db['user']
cur.execute("SHOW PLUGINS")
for r in cur:
if (r[1], r[2]) != (u'ACTIVE', u'AUTHENTICATION'):
continue
if r[3] == u'auth_socket.so':
socket_plugin_name = r[0]
socket_found = True
elif r[3] == u'dialog_examples.so':
if r[0] == 'two_questions':
two_questions_found = True
elif r[0] == 'three_attempts':
three_attempts_found = True
elif r[0] == u'pam':
pam_found = True
pam_plugin_name = r[3].split('.')[0]
if pam_plugin_name == 'auth_pam':
pam_plugin_name = 'pam'
# MySQL: authentication_pam
# https://dev.mysql.com/doc/refman/5.5/en/pam-authentication-plugin.html
# MariaDB: pam
# https://mariadb.com/kb/en/mariadb/pam-authentication-plugin/
# Names differ but functionality is close
elif r[0] == u'mysql_old_password':
mysql_old_password_found = True
elif r[0] == u'sha256_password':
sha256_password_found = True
#else:
# print("plugin: %r" % r[0])
def test_plugin(self):
if not self.mysql_server_is(self.connections[0], (5, 5, 0)):
raise unittest2.SkipTest("MySQL-5.5 required for plugins")
cur = self.connections[0].cursor()
cur.execute("select plugin from mysql.user where concat(user, '@', host)=current_user()")
for r in cur:
self.assertIn(self.connections[0]._auth_plugin_name, (r[0], 'mysql_native_password'))
@unittest2.skipUnless(socket_auth, "connection to unix_socket required")
@unittest2.skipIf(socket_found, "socket plugin already installed")
def testSocketAuthInstallPlugin(self):
# needs plugin. lets install it.
cur = self.connections[0].cursor()
try:
cur.execute("install plugin auth_socket soname 'auth_socket.so'")
TestAuthentication.socket_found = True
self.socket_plugin_name = 'auth_socket'
self.realtestSocketAuth()
except pymysql.err.InternalError:
try:
cur.execute("install soname 'auth_socket'")
TestAuthentication.socket_found = True
self.socket_plugin_name = 'unix_socket'
self.realtestSocketAuth()
except pymysql.err.InternalError:
TestAuthentication.socket_found = False
raise unittest2.SkipTest('we couldn\'t install the socket plugin')
finally:
if TestAuthentication.socket_found:
cur.execute("uninstall plugin %s" % self.socket_plugin_name)
@unittest2.skipUnless(socket_auth, "connection to unix_socket required")
@unittest2.skipUnless(socket_found, "no socket plugin")
def testSocketAuth(self):
self.realtestSocketAuth()
def realtestSocketAuth(self):
with TempUser(self.connections[0].cursor(), TestAuthentication.osuser + '@localhost',
self.databases[0]['db'], self.socket_plugin_name) as u:
c = pymysql.connect(user=TestAuthentication.osuser, **self.db)
class Dialog(object):
fail=False
def __init__(self, con):
self.fail=TestAuthentication.Dialog.fail
pass
def prompt(self, echo, prompt):
if self.fail:
self.fail=False
return b'bad guess at a password'
return self.m.get(prompt)
class DialogHandler(object):
def __init__(self, con):
self.con=con
def authenticate(self, pkt):
while True:
flag = pkt.read_uint8()
echo = (flag & 0x06) == 0x02
last = (flag & 0x01) == 0x01
prompt = pkt.read_all()
if prompt == b'Password, please:':
self.con.write_packet(b'stillnotverysecret\0')
else:
self.con.write_packet(b'no idea what to do with this prompt\0')
pkt = self.con._read_packet()
pkt.check_error()
if pkt.is_ok_packet() or last:
break
return pkt
class DefectiveHandler(object):
def __init__(self, con):
self.con=con
@unittest2.skipUnless(socket_auth, "connection to unix_socket required")
@unittest2.skipIf(two_questions_found, "two_questions plugin already installed")
def testDialogAuthTwoQuestionsInstallPlugin(self):
# needs plugin. lets install it.
cur = self.connections[0].cursor()
try:
cur.execute("install plugin two_questions soname 'dialog_examples.so'")
TestAuthentication.two_questions_found = True
self.realTestDialogAuthTwoQuestions()
except pymysql.err.InternalError:
raise unittest2.SkipTest('we couldn\'t install the two_questions plugin')
finally:
if TestAuthentication.two_questions_found:
cur.execute("uninstall plugin two_questions")
@unittest2.skipUnless(socket_auth, "connection to unix_socket required")
@unittest2.skipUnless(two_questions_found, "no two questions auth plugin")
def testDialogAuthTwoQuestions(self):
self.realTestDialogAuthTwoQuestions()
def realTestDialogAuthTwoQuestions(self):
TestAuthentication.Dialog.fail=False
TestAuthentication.Dialog.m = {b'Password, please:': b'notverysecret',
b'Are you sure ?': b'yes, of course'}
with TempUser(self.connections[0].cursor(), 'pymysql_2q@localhost',
self.databases[0]['db'], 'two_questions', 'notverysecret') as u:
with self.assertRaises(pymysql.err.OperationalError):
pymysql.connect(user='pymysql_2q', **self.db)
pymysql.connect(user='pymysql_2q', auth_plugin_map={b'dialog': TestAuthentication.Dialog}, **self.db)
@unittest2.skipUnless(socket_auth, "connection to unix_socket required")
@unittest2.skipIf(three_attempts_found, "three_attempts plugin already installed")
def testDialogAuthThreeAttemptsQuestionsInstallPlugin(self):
# needs plugin. lets install it.
cur = self.connections[0].cursor()
try:
cur.execute("install plugin three_attempts soname 'dialog_examples.so'")
TestAuthentication.three_attempts_found = True
self.realTestDialogAuthThreeAttempts()
except pymysql.err.InternalError:
raise unittest2.SkipTest('we couldn\'t install the three_attempts plugin')
finally:
if TestAuthentication.three_attempts_found:
cur.execute("uninstall plugin three_attempts")
@unittest2.skipUnless(socket_auth, "connection to unix_socket required")
@unittest2.skipUnless(three_attempts_found, "no three attempts plugin")
def testDialogAuthThreeAttempts(self):
self.realTestDialogAuthThreeAttempts()
def realTestDialogAuthThreeAttempts(self):
TestAuthentication.Dialog.m = {b'Password, please:': b'stillnotverysecret'}
TestAuthentication.Dialog.fail=True # fail just once. We've got three attempts after all
with TempUser(self.connections[0].cursor(), 'pymysql_3a@localhost',
self.databases[0]['db'], 'three_attempts', 'stillnotverysecret') as u:
pymysql.connect(user='pymysql_3a', auth_plugin_map={b'dialog': TestAuthentication.Dialog}, **self.db)
pymysql.connect(user='pymysql_3a', auth_plugin_map={b'dialog': TestAuthentication.DialogHandler}, **self.db)
with self.assertRaises(pymysql.err.OperationalError):
pymysql.connect(user='pymysql_3a', auth_plugin_map={b'dialog': object}, **self.db)
with self.assertRaises(pymysql.err.OperationalError):
pymysql.connect(user='pymysql_3a', auth_plugin_map={b'dialog': TestAuthentication.DefectiveHandler}, **self.db)
with self.assertRaises(pymysql.err.OperationalError):
pymysql.connect(user='pymysql_3a', auth_plugin_map={b'notdialogplugin': TestAuthentication.Dialog}, **self.db)
TestAuthentication.Dialog.m = {b'Password, please:': b'I do not know'}
with self.assertRaises(pymysql.err.OperationalError):
pymysql.connect(user='pymysql_3a', auth_plugin_map={b'dialog': TestAuthentication.Dialog}, **self.db)
TestAuthentication.Dialog.m = {b'Password, please:': None}
with self.assertRaises(pymysql.err.OperationalError):
pymysql.connect(user='pymysql_3a', auth_plugin_map={b'dialog': TestAuthentication.Dialog}, **self.db)
@unittest2.skipUnless(socket_auth, "connection to unix_socket required")
@unittest2.skipIf(pam_found, "pam plugin already installed")
@unittest2.skipIf(os.environ.get('PASSWORD') is None, "PASSWORD env var required")
@unittest2.skipIf(os.environ.get('PAMSERVICE') is None, "PAMSERVICE env var required")
def testPamAuthInstallPlugin(self):
# needs plugin. lets install it.
cur = self.connections[0].cursor()
try:
cur.execute("install plugin pam soname 'auth_pam.so'")
TestAuthentication.pam_found = True
self.realTestPamAuth()
except pymysql.err.InternalError:
raise unittest2.SkipTest('we couldn\'t install the auth_pam plugin')
finally:
if TestAuthentication.pam_found:
cur.execute("uninstall plugin pam")
@unittest2.skipUnless(socket_auth, "connection to unix_socket required")
@unittest2.skipUnless(pam_found, "no pam plugin")
@unittest2.skipIf(os.environ.get('PASSWORD') is None, "PASSWORD env var required")
@unittest2.skipIf(os.environ.get('PAMSERVICE') is None, "PAMSERVICE env var required")
def testPamAuth(self):
self.realTestPamAuth()
def realTestPamAuth(self):
db = self.db.copy()
import os
db['password'] = os.environ.get('PASSWORD')
cur = self.connections[0].cursor()
try:
cur.execute('show grants for ' + TestAuthentication.osuser + '@localhost')
grants = cur.fetchone()[0]
cur.execute('drop user ' + TestAuthentication.osuser + '@localhost')
except pymysql.OperationalError as e:
# assuming the user doesn't exist which is ok too
self.assertEqual(1045, e.args[0])
grants = None
with TempUser(cur, TestAuthentication.osuser + '@localhost',
self.databases[0]['db'], 'pam', os.environ.get('PAMSERVICE')) as u:
try:
c = pymysql.connect(user=TestAuthentication.osuser, **db)
db['password'] = 'very bad guess at password'
with self.assertRaises(pymysql.err.OperationalError):
pymysql.connect(user=TestAuthentication.osuser,
auth_plugin_map={b'mysql_cleartext_password': TestAuthentication.DefectiveHandler},
**self.db)
except pymysql.OperationalError as e:
self.assertEqual(1045, e.args[0])
# we had 'bad guess at password' work with pam. Well at least we get a permission denied here
with self.assertRaises(pymysql.err.OperationalError):
pymysql.connect(user=TestAuthentication.osuser,
auth_plugin_map={b'mysql_cleartext_password': TestAuthentication.DefectiveHandler},
**self.db)
if grants:
# recreate the user
cur.execute(grants)
# select old_password("crummy p\tassword");
#| old_password("crummy p\tassword") |
#| 2a01785203b08770 |
@unittest2.skipUnless(socket_auth, "connection to unix_socket required")
@unittest2.skipUnless(mysql_old_password_found, "no mysql_old_password plugin")
def testMySQLOldPasswordAuth(self):
if self.mysql_server_is(self.connections[0], (5, 7, 0)):
raise unittest2.SkipTest('Old passwords aren\'t supported in 5.7')
# pymysql.err.OperationalError: (1045, "Access denied for user 'old_pass_user'@'localhost' (using password: YES)")
# from login in MySQL-5.6
if self.mysql_server_is(self.connections[0], (5, 6, 0)):
raise unittest2.SkipTest('Old passwords don\'t authenticate in 5.6')
db = self.db.copy()
db['password'] = "crummy p\tassword"
with self.connections[0] as c:
# deprecated in 5.6
if sys.version_info[0:2] >= (3,2) and self.mysql_server_is(self.connections[0], (5, 6, 0)):
with self.assertWarns(pymysql.err.Warning) as cm:
c.execute("SELECT OLD_PASSWORD('%s')" % db['password'])
else:
c.execute("SELECT OLD_PASSWORD('%s')" % db['password'])
v = c.fetchone()[0]
self.assertEqual(v, '2a01785203b08770')
# only works in MariaDB and MySQL-5.6 - can't separate out by version
#if self.mysql_server_is(self.connections[0], (5, 5, 0)):
# with TempUser(c, 'old_pass_user@localhost',
# self.databases[0]['db'], 'mysql_old_password', '2a01785203b08770') as u:
# cur = pymysql.connect(user='old_pass_user', **db).cursor()
# cur.execute("SELECT VERSION()")
c.execute("SELECT @@secure_auth")
secure_auth_setting = c.fetchone()[0]
c.execute('set old_passwords=1')
# pymysql.err.Warning: 'pre-4.1 password hash' is deprecated and will be removed in a future release. Please use post-4.1 password hash instead
if sys.version_info[0:2] >= (3,2) and self.mysql_server_is(self.connections[0], (5, 6, 0)):
with self.assertWarns(pymysql.err.Warning) as cm:
c.execute('set global secure_auth=0')
else:
c.execute('set global secure_auth=0')
with TempUser(c, 'old_pass_user@localhost',
self.databases[0]['db'], password=db['password']) as u:
cur = pymysql.connect(user='old_pass_user', **db).cursor()
cur.execute("SELECT VERSION()")
c.execute('set global secure_auth=%r' % secure_auth_setting)
@unittest2.skipUnless(socket_auth, "connection to unix_socket required")
@unittest2.skipUnless(sha256_password_found, "no sha256 password authentication plugin found")
def testAuthSHA256(self):
c = self.connections[0].cursor()
with TempUser(c, 'pymysql_sha256@localhost',
self.databases[0]['db'], 'sha256_password') as u:
if self.mysql_server_is(self.connections[0], (5, 7, 0)):
c.execute("SET PASSWORD FOR 'pymysql_sha256'@'localhost' ='Sh@256Pa33'")
else:
c.execute('SET old_passwords = 2')
c.execute("SET PASSWORD FOR 'pymysql_sha256'@'localhost' = PASSWORD('Sh@256Pa33')")
db = self.db.copy()
db['password'] = "Sh@256Pa33"
# not implemented yet so thows error
with self.assertRaises(pymysql.err.OperationalError):
pymysql.connect(user='pymysql_256', **db)
class TestConnection(base.PyMySQLTestCase):
def test_utf8mb4(self):
"""This test requires MySQL >= 5.5"""
arg = self.databases[0].copy()
arg['charset'] = 'utf8mb4'
conn = pymysql.connect(**arg)
def test_largedata(self):
"""Large query and response (>=16MB)"""
cur = self.connections[0].cursor()
cur.execute("SELECT @@max_allowed_packet")
if cur.fetchone()[0] < 16*1024*1024 + 10:
print("Set max_allowed_packet to bigger than 17MB")
return
t = 'a' * (16*1024*1024)
cur.execute("SELECT '" + t + "'")
assert cur.fetchone()[0] == t
def test_autocommit(self):
con = self.connections[0]
self.assertFalse(con.get_autocommit())
cur = con.cursor()
cur.execute("SET AUTOCOMMIT=1")
self.assertTrue(con.get_autocommit())
con.autocommit(False)
self.assertFalse(con.get_autocommit())
cur.execute("SELECT @@AUTOCOMMIT")
self.assertEqual(cur.fetchone()[0], 0)
def test_select_db(self):
con = self.connections[0]
current_db = self.databases[0]['db']
other_db = self.databases[1]['db']
cur = con.cursor()
cur.execute('SELECT database()')
self.assertEqual(cur.fetchone()[0], current_db)
con.select_db(other_db)
cur.execute('SELECT database()')
self.assertEqual(cur.fetchone()[0], other_db)
def test_connection_gone_away(self):
"""
http://dev.mysql.com/doc/refman/5.0/en/gone-away.html
http://dev.mysql.com/doc/refman/5.0/en/error-messages-client.html#error_cr_server_gone_error
"""
con = self.connect()
cur = con.cursor()
cur.execute("SET wait_timeout=1")
time.sleep(2)
with self.assertRaises(pymysql.OperationalError) as cm:
cur.execute("SELECT 1+1")
# error occures while reading, not writing because of socket buffer.
#self.assertEqual(cm.exception.args[0], 2006)
self.assertIn(cm.exception.args[0], (2006, 2013))
def test_init_command(self):
conn = self.connect(
init_command='SELECT "bar"; SELECT "baz"',
client_flag=CLIENT.MULTI_STATEMENTS)
c = conn.cursor()
c.execute('select "foobar";')
self.assertEqual(('foobar',), c.fetchone())
conn.close()
with self.assertRaises(pymysql.err.Error):
conn.ping(reconnect=False)
def test_read_default_group(self):
conn = self.connect(
read_default_group='client',
)
self.assertTrue(conn.open)
def test_context(self):
with self.assertRaises(ValueError):
c = self.connect()
with c as cur:
cur.execute('create table test ( a int ) ENGINE=InnoDB')
c.begin()
cur.execute('insert into test values ((1))')
raise ValueError('pseudo abort')
c.commit()
c = self.connect()
with c as cur:
cur.execute('select count(*) from test')
self.assertEqual(0, cur.fetchone()[0])
cur.execute('insert into test values ((1))')
with c as cur:
cur.execute('select count(*) from test')
self.assertEqual(1,cur.fetchone()[0])
cur.execute('drop table test')
def test_set_charset(self):
c = self.connect()
c.set_charset('utf8mb4')
# TODO validate setting here
def test_defer_connect(self):
import socket
d = self.databases[0].copy()
try:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(d['unix_socket'])
except KeyError:
sock.close()
sock = socket.create_connection(
(d.get('host', 'localhost'), d.get('port', 3306)))
for k in ['unix_socket', 'host', 'port']:
try:
del d[k]
except KeyError:
pass
c = pymysql.connect(defer_connect=True, **d)
self.assertFalse(c.open)
c.connect(sock)
c.close()
sock.close()
@unittest2.skipUnless(sys.version_info[0:2] >= (3,2), "required py-3.2")
def test_no_delay_warning(self):
current_db = self.databases[0].copy()
current_db['no_delay'] = True
with self.assertWarns(DeprecationWarning) as cm:
conn = pymysql.connect(**current_db)
# A custom type and function to escape it
class Foo(object):
value = "bar"
def escape_foo(x, d):
return x.value
class TestEscape(base.PyMySQLTestCase):
def test_escape_string(self):
con = self.connections[0]
cur = con.cursor()
self.assertEqual(con.escape("foo'bar"), "'foo\\'bar'")
# added NO_AUTO_CREATE_USER as not including it in 5.7 generates warnings
# mysql-8.0 removes the option however
if self.mysql_server_is(con, (8, 0, 0)):
cur.execute("SET sql_mode='NO_BACKSLASH_ESCAPES'")
else:
cur.execute("SET sql_mode='NO_BACKSLASH_ESCAPES,NO_AUTO_CREATE_USER'")
self.assertEqual(con.escape("foo'bar"), "'foo''bar'")
def test_escape_builtin_encoders(self):
con = self.connections[0]
cur = con.cursor()
val = datetime.datetime(2012, 3, 4, 5, 6)
self.assertEqual(con.escape(val, con.encoders), "'2012-03-04 05:06:00'")
def test_escape_custom_object(self):
con = self.connections[0]
cur = con.cursor()
mapping = {Foo: escape_foo}
self.assertEqual(con.escape(Foo(), mapping), "bar")
def test_escape_fallback_encoder(self):
con = self.connections[0]
cur = con.cursor()
class Custom(str):
pass
mapping = {text_type: pymysql.escape_string}
self.assertEqual(con.escape(Custom('foobar'), mapping), "'foobar'")
def test_escape_no_default(self):
con = self.connections[0]
cur = con.cursor()
self.assertRaises(TypeError, con.escape, 42, {})
def test_escape_dict_value(self):
con = self.connections[0]
cur = con.cursor()
mapping = con.encoders.copy()
mapping[Foo] = escape_foo
self.assertEqual(con.escape({'foo': Foo()}, mapping), {'foo': "bar"})
def test_escape_list_item(self):
con = self.connections[0]
cur = con.cursor()
mapping = con.encoders.copy()
mapping[Foo] = escape_foo
self.assertEqual(con.escape([Foo()], mapping), "(bar)")
def test_previous_cursor_not_closed(self):
con = self.connect(
init_command='SELECT "bar"; SELECT "baz"',
client_flag=CLIENT.MULTI_STATEMENTS)
cur1 = con.cursor()
cur1.execute("SELECT 1; SELECT 2")
cur2 = con.cursor()
cur2.execute("SELECT 3")
self.assertEqual(cur2.fetchone()[0], 3)
def test_commit_during_multi_result(self):
con = self.connect(client_flag=CLIENT.MULTI_STATEMENTS)
cur = con.cursor()
cur.execute("SELECT 1; SELECT 2")
con.commit()
cur.execute("SELECT 3")
self.assertEqual(cur.fetchone()[0], 3)
| [
"shreyansh.chheda@gmail.com"
] | shreyansh.chheda@gmail.com |
c0d41c468fe46eae60304a6e4170b7feb432e5cd | 973eed0d6173ab95b3cea9508bd20516ef84a56d | /services/gmaps.py | 3a4b1b96e5fbcc5a7452e5331952955d404e7893 | [
"Apache-2.0"
] | permissive | FenrirUnbound/kessel-run | 213a71d94b74a518a6a92b3fb5929e1ae0e71997 | 0b39ec4aead0ee1397f46a0893166c433fe4f85b | refs/heads/master | 2020-12-07T15:24:13.924077 | 2017-07-02T02:56:58 | 2017-07-02T02:56:58 | 95,517,943 | 0 | 0 | null | 2017-07-02T02:56:59 | 2017-06-27T04:36:54 | Python | UTF-8 | Python | false | false | 774 | py | import googlemaps
import time
from map_formatter import MapFormatter
from models.route import Route
from models.secret import Secret
class Gmaps(object):
def __init__(self):
self.gmaps = googlemaps.Client(key=Secret.token())
self.route_data = Route()
self.formatter = MapFormatter()
def lookup_travel_time(self, route_id):
desired_route = self.route_data.get(route_id)
now = int(time.time())
map_data = self.gmaps.directions(
alternatives=True,
departure_time=now,
destination=desired_route['destination'],
mode='driving',
origin=desired_route['origin'],
units='imperial'
)
return self.formatter.format(content=map_data)
| [
"aeneascorrupt@gmail.com"
] | aeneascorrupt@gmail.com |
1f778b04e332c6fb1e5a8be955cd628bea529f50 | 36c546160a70228e28f216e841453a55a4b665bb | /cli_common.py | 32a2ebcb79b2c99a1bb4fc6b64bbe49c2839a7ee | [] | no_license | tpietruszka/ulmfit_experiments | b4718df389478a12d920f72cdca476797d4397fc | 9385cd7d4285f93a2f220bc9fd5095051879a49a | refs/heads/master | 2020-04-21T18:18:01.633887 | 2020-04-07T17:56:19 | 2020-04-07T17:56:19 | 169,764,201 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | import pathlib
import os
import sys
os.environ['QT_QPA_PLATFORM'] = 'offscreen' # prevents some fastai imports from causing a crash
try:
from ulmfit_experiments import experiments
except ModuleNotFoundError:
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from ulmfit_experiments import experiments
results_dir = (pathlib.Path(__file__).parent / 'trained_models').resolve()
| [
"tomek.pietruszka@gmail.com"
] | tomek.pietruszka@gmail.com |
5d5b6258a717833464801f98683c23cb6435e4f2 | 25ec545186596ea20ade231e1fa2a83faac0aa33 | /penncycle/app/models.py | f5690000a90aa27fd65ef1dfff8d9f99576c6dfa | [] | no_license | rattrayalex/PennCycle | a0f43ef7a1390fea3016ed5ac96cca5ab431e8e1 | dbcfa68c7bf9c928c559ba310e23be12e01ad998 | refs/heads/master | 2020-04-28T22:40:43.747751 | 2013-05-14T22:01:18 | 2013-05-14T22:01:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,559 | py | import datetime
from django.core.mail import send_mail
from django_localflavor_us.models import PhoneNumberField
from django.template.defaultfilters import slugify
from django.db import models
from django.core.validators import RegexValidator
from south.modelsinspector import add_introspection_rules
# Necessary because South hasn't been updated since localflavors was broken up.
add_introspection_rules([], ['django_localflavor_us\.models\.PhoneNumberField'])
GENDER_CHOICES = (
('M', 'Male'),
('F', 'Female'),
)
GRAD_YEAR_CHOICES = (
('2016', '2016'),
('2015', '2015'),
('2014', '2014'),
('2013', '2013'),
('2012', '2012'),
('grad', 'grad student'),
('faculty', 'faculty'),
('staff', 'staff'),
('guest', 'guest'),
)
LIVING_LOCATIONS = (
('Hill', 'Hill'),
('KCECH', 'KCECH'),
('Riepe', 'Riepe'),
('Fisher', 'Fisher'),
('Ware', 'Ware'),
('Harrison', 'Harrison'),
('Harnwell', 'Harnwell'),
('Rodin', 'Rodin'),
('Stouffer', 'Stouffer'),
('Mayer', 'Mayer'),
('Du Bois', 'Du Bois'),
('Gregory', 'Gregory'),
('Sansom', 'Sansom'),
('Off Campus', 'Off Campus'),
)
SCHOOL_CHOICES = (
('C', 'College'),
('W', 'Wharton'),
('E', 'SEAS'),
('N', 'Nursing'),
('ANN', 'Annenberg'),
('DEN', 'Dental'),
('DES', 'Design'),
('GSE', 'Education'),
('LAW', 'Law'),
('MED', 'Medicine'),
('SPP', 'Social Policy & Practice'),
('VET', 'Veterinary'),
('O', 'Other or N/A'),
)
PAYMENT_CHOICES = (
('cash', 'cash'),
('penncash', 'penncash'),
('bursar', 'bursar'),
('credit', 'credit'),
('group', 'group'),
('stouffer', 'stouffer'),
('free', 'free'),
('other', 'other'),
('fisher', 'fisher')
)
class Plan(models.Model):
name = models.CharField(max_length=100)
cost = models.IntegerField()
start_date = models.DateField()
end_date = models.DateField()
description = models.TextField(max_length=150, default="Details coming soon!")
banner = models.CharField(max_length=50, default="")
def __unicode__(self):
return self.name + ': $' + str(self.cost)
class Payment(models.Model):
amount = models.DecimalField(decimal_places=2, max_digits=6)
plan = models.ForeignKey(
Plan, default=1, limit_choices_to={
'end_date__gte': datetime.date.today(),
}
)
student = models.ForeignKey('Student', related_name="payments")
date = models.DateField(auto_now_add=True)
satisfied = models.BooleanField(default=False)
payment_type = models.CharField(max_length=100, choices=PAYMENT_CHOICES, blank=True, null=True)
status = models.CharField(max_length=100, default='available')
def save(self):
super(Payment, self).save()
self.student.paid = self.student.paid_now
def __unicode__(self):
return str(self.student) + ' for ' + str(self.plan)
class Manufacturer(models.Model):
name = models.CharField(max_length=30)
address = models.CharField(max_length=50, blank=True)
city = models.CharField(max_length=60, blank=True)
country = models.CharField(max_length=50, blank=True)
website = models.URLField(blank=True)
email = models.EmailField(blank=True)
def __unicode__(self):
return self.name
class Student(models.Model):
name = models.CharField(max_length=100)
email = models.EmailField()
phone = PhoneNumberField()
penncard = models.CharField(max_length=8, validators=[RegexValidator('\d{8}')], unique=True)
last_two = models.CharField(max_length=2, validators=[RegexValidator('\d{2}')], blank=True, null=True)
gender = models.CharField(max_length=1, choices=GENDER_CHOICES)
grad_year = models.CharField(max_length=50, choices=GRAD_YEAR_CHOICES)
join_date = models.DateField(default=datetime.date.today())
school = models.CharField(max_length=100, choices=SCHOOL_CHOICES)
major = models.CharField(max_length=50, blank=True)
living_location = models.CharField(max_length=100, choices=LIVING_LOCATIONS)
waiver_signed = models.BooleanField(default=False)
paid = models.BooleanField(default=False)
payment_type = models.CharField(max_length=100, choices=PAYMENT_CHOICES, blank=True, null=True)
staff = models.NullBooleanField(default=False)
plan = models.ManyToManyField('Plan', blank=True, null=True)
@property
def paid_now(self):
return len(self.current_payments) > 0
@property
def current_payments(self):
today = datetime.date.today()
return self.payments.filter(
satisfied=True,
plan__start_date__lte=today,
plan__end_date__gte=today,
)
@property
def can_ride(self):
if len(self.current_payments.filter(status='available')) > 0 and self.waiver_signed:
return True
else:
return False
def __unicode__(self):
return u'%s %s' % (self.name, self.penncard)
class Bike(models.Model):
bike_name = models.CharField(max_length=100, unique=True)
manufacturer = models.ForeignKey(Manufacturer)
purchase_date = models.DateField()
color = models.CharField(max_length=30, blank=True)
status = models.CharField(max_length=100, default='available')
serial_number = models.CharField(max_length=100, blank=True)
tag_id = models.CharField(max_length=100, blank=True)
key_serial_number = models.CharField(max_length=100, blank=True)
combo = models.CharField(max_length=4, blank=True)
combo_update = models.DateField()
@property
def knows_combo(self):
rides = self.rides.filter(checkout_time__gt=self.combo_update)
return list(set([ride.rider for ride in rides]))
@property
def location(self):
last_ride = self.rides.filter(checkin_station__isnull=False).order_by('-checkin_time')
try:
last_ride = last_ride[0]
location = last_ride.checkin_station
except:
location = Station.objects.get(name__contains="PSA")
return location
def __unicode__(self):
return '#%s. Location: %s' % (self.bike_name, self.location.name)
days = {
"Monday": 0,
"Tuesday": 1,
"Wednesday": 2,
"Thursday": 3,
"Friday": 4,
"Saturday": 5,
"Sunday": 6,
}
strings = dict([v, k] for k, v in days.items())
def decimal(time):
if len(time) <= 2:
return int(time)
else:
hours, minutes = time.split(":")
return int(hours) + float(minutes) / 60
def hour(time):
return decimal(time[0]) if (time[1] == "am" or time[0] == "12") else decimal(time[0])+12
def enter_hours(interval, info, day):
# print(info)
start_time = hour(info[0:2])
end_time = hour(info[3:5])
if day in interval:
interval[day].append((start_time, end_time))
else:
interval[day] = [(start_time, end_time)]
def get_hours(description):
intervals = {}
day = 0
if not description: # empty station
return {}
for line in description.split("\n"): # assumes to be in order
if line.split()[1] == "-": # there is a range of days
# print("range of days")
start = days[line.split()[0]]
end = days[line.split()[2][:-1]]
for i in range(end-start+1):
that_day = strings[day]
if "and" in line: # multiple ranges
enter_hours(intervals, line.split()[3:8], that_day)
enter_hours(intervals, line.split()[9:14], that_day)
else:
enter_hours(intervals, line.split()[3:8], that_day)
day += 1
elif line.split()[0][-1] == ":":
# print("matched :")
that_day = strings[day]
if "and" in line: # multiple ranges
enter_hours(intervals, line.split()[1:6], that_day)
enter_hours(intervals, line.split()[7:12], that_day)
else:
enter_hours(intervals, line.split()[1:6], that_day)
day += 1
else: # 7 days a week.
for day in range(7):
enter_hours(intervals, line.split()[2:7], strings[day])
return intervals
class Station(models.Model):
name = models.CharField(max_length=100)
latitude = models.FloatField(default=39.9529399)
longitude = models.FloatField(default=-75.1905607)
address = models.CharField(max_length=300, blank=True)
notes = models.TextField(max_length=100, blank=True)
hours = models.TextField(max_length=100, blank=True)
picture = models.ImageField(upload_to='img/stations', blank=True)
capacity = models.IntegerField(default=15)
full_name = models.CharField(max_length=100, default="")
def __unicode__(self):
return self.name
@property
def is_open(self):
ranges = get_hours(self.hours)
today = datetime.datetime.today().weekday()
this_hour = datetime.datetime.today().hour
if strings[today] in ranges:
hours = ranges[strings[today]]
for opening in hours:
if this_hour > opening[0] and this_hour < opening[1]:
return True
return False
@property
def comma_name(self):
return ", ".join(self.hours.split("\n"))
class Ride(models.Model):
rider = models.ForeignKey(
Student, limit_choices_to={
'payments__status': 'available',
'waiver_signed': True,
'payments__satisfied': True,
'payments__plan__end_date__gte': datetime.date.today(),
'payments__plan__start_date__lte': datetime.date.today(),
},
)
bike = models.ForeignKey('Bike', limit_choices_to={'status': 'available'}, related_name='rides')
checkout_time = models.DateTimeField(auto_now_add=True)
checkin_time = models.DateTimeField(null=True, blank=True)
checkout_station = models.ForeignKey(Station, default=1, related_name='checkouts')
checkin_station = models.ForeignKey(Station, blank=True, null=True, related_name='checkins')
num_users = models.IntegerField()
@property
def ride_duration_days(self):
if self.checkin_time is None:
end = datetime.datetime.now()
else:
end = self.checkin_time
duration = end - self.checkout_time
duration_days = duration.days
return duration_days
@property
def status(self):
if self.checkin_time is None:
return 'out'
else:
return 'in'
def save(self):
print 'in Ride save method'
if not self.num_users:
self.num_users = len(Student.objects.all())
super(Ride, self).save()
print 'super saved!'
if self.checkin_time is None:
self.bike.status = 'out'
payment = self.rider.current_payments.filter(status='available')[0]
payment.status = 'out'
else:
self.bike.status = 'available'
payment = self.rider.current_payments.filter(status='out')[0]
payment.status = 'available'
self.bike.save()
payment.save()
def __unicode__(self):
return u'%s on %s' % (self.rider, self.checkout_time)
class Page(models.Model):
content = models.TextField()
name = models.CharField(max_length=100)
slug = models.SlugField()
def save(self):
self.slug = slugify(self.name)
super(Page, self).save()
def __unicode__(self):
return self.name
class Comment(models.Model):
comment = models.TextField()
time = models.DateTimeField(auto_now_add=True)
student = models.ForeignKey(Student, blank=True, null=True)
ride = models.ForeignKey(Ride, blank=True, null=True)
is_problem = models.BooleanField(default=False)
def save(self):
super(Comment, self).save()
message = '''
Comment: \n %s \n \n
Time: \n %s \n \n
Student: \n %s \n \n
Ride: \n %s \n \n
Marked as problem? \n %s \n \n
''' % (self.comment, self.time, self.student, self.ride, self.is_problem)
send_mail('PennCycle: Comment Submitted', message, 'messenger@penncycle.org', ['messenger@penncycle.org'])
def __unicode__(self):
return self.comment[:30]
class Info(models.Model):
message = models.TextField()
date = models.DateField(auto_now_add=True)
def __unicode__(self):
return self.message + " on " + self.date.isoformat()
| [
"razzi53@gmail.com"
] | razzi53@gmail.com |
461f9252ada4badc3896e5dda3754393969d3ce1 | 42e9810116a4c726f2fb60a0133fc3b81670c0e1 | /setup.py | 41ff4cdca9f78be739cc42a2b42a8886a90aca79 | [
"BSD-3-Clause"
] | permissive | pinjasec/binarypinja | 247e6a13f3b4f58fb16aab00a3649f575b428db6 | 106bb2c68ea530cbf99079749f1a7184cf21d480 | refs/heads/master | 2020-07-24T19:57:50.921387 | 2019-09-12T11:15:21 | 2019-09-12T11:15:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 324 | py | from setuptools import find_packages, setup
setup(
name='pinja',
packages=find_packages(),
version='0.1.0',
entry_points={
'console_scripts':
'pinja = pinja.main:main'
},
description='A short description of the project.',
author='*pinja_sec',
license='BSD-3',
)
| [
"poo_eix@protonmail.com"
] | poo_eix@protonmail.com |
58530dd0f15e00fa4623a19b9378cc34b6dd4111 | e5937e1305b6f1a68c98bf85d479f2cc46271f6d | /First.py | 8dccbe53121fefa238ea688a09fb13622b1be489 | [] | no_license | sishen123258/python | 14b974cc078e9b2f6e0a15561a071da7acbccd91 | 3e1fde3289f018979f9b67799fa2daee8920beaa | refs/heads/master | 2021-04-09T16:51:28.129461 | 2015-05-29T07:38:29 | 2015-05-29T07:38:29 | 35,621,952 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 876 | py | __author__ = 'Yue'
#
# class MyFirstPyClass:
# "first py class"
#
# def _init_(self, pm, ph):
# self.name = pm
# self.phone = ph
# print("self created:", self.name);
#
# def updatePhone(self, ph):
# self.phone = ph
# print("self phone changed:", self.phone);
#
class Person:
def __init__(self, first_name):
self.first_name = first_name
# Getter function
@property
def first_name(self):
return self._first_name
# Setter function
@first_name.setter
def first_name(self, value):
if not isinstance(value, str):
raise TypeError('Expected a string')
self._first_name = value
# Deleter function (optional)
@first_name.deleter
def first_name(self):
raise AttributeError("Can't delete attribute")
p=Person("tong")
print(p.first_name) | [
"1144299328@qq.com"
] | 1144299328@qq.com |
616c58d38ba1341e31e3110eb28caf617d8d0c51 | 7f045311cf07d86c21c3e32649a0d3343351d7b5 | /sky/c3.py | d661310b92f5e4c9a50397604a8e3141cb7587a1 | [
"BSD-2-Clause"
] | permissive | amititash/sky | 2c757ec572902f12a6a550597044101d6e7cdbc1 | ffaf33e46825522bb87654593a0ca77c095c98b0 | refs/heads/master | 2020-04-14T22:50:55.264011 | 2019-02-06T05:11:22 | 2019-02-06T05:11:22 | 164,180,289 | 0 | 0 | NOASSERTION | 2019-01-05T04:02:40 | 2019-01-05T04:02:40 | null | UTF-8 | Python | false | false | 2,190 | py | #!/usr/bin/env python3
# --------- 1. Setup ----------------------------------------------
PROJECT_NAME = 'sophonone'
import os, base64, re, logging
from elasticsearch import Elasticsearch
from sky.crawler_services import CrawlElasticSearchService
from sky.crawler_plugins import CrawlElasticSearchPluginNews
import json, sys
import json
import pika
'''
# Parse the auth and host from env:
bonsai = 'https://5bgygw52r4:637c8qay66@cj-test-9194042377.us-west-2.bonsaisearch.net' #os.environ['BONSAI_URL']
auth = re.search('https\:\/\/(.*)\@', bonsai).group(1).split(':')
host = bonsai.replace('https://%s:%s@' % (auth[0], auth[1]), '')
# Connect to cluster over SSL using auth for best security:
es_header = [{
'host': host,
'port': 443,
'use_ssl': True,
'http_auth': (auth[0],auth[1])
}]
es = Elasticsearch(es_header)
'''
es = Elasticsearch([{'host': '886f099c.ngrok.io', 'port': 80}])
# Instantiate the new Elasticsearch connection:
cs = CrawlElasticSearchService(PROJECT_NAME, es, CrawlElasticSearchPluginNews)
connection = pika.BlockingConnection(pika.URLParameters('amqp://titash:test123@54.175.53.47/paays_products_cj'))
channel = connection.channel()
channel.queue_declare(queue='crawl')
#code starts here
def goCrawl(ch, method, properties, msg):
item = msg.decode('utf8')
item = json.loads(item)
print(item)
#es = Elasticsearch([{'host': '886f099c.ngrok.io', 'port': 80}])
# Instantiate the new Elasticsearch connection:
#cs = CrawlElasticSearchService(PROJECT_NAME, es, CrawlElasticSearchPluginNews)
# --------- 4. Start crawling --------------------------------------
#from sky.configs import PRODUCTION_CRAWL_CONFIG
#default = cs.get_crawl_plugin('default')
#default.save_config(PRODUCTION_CRAWL_CONFIG)
print("****crawling...",item["sku"])
#one_config = json.load(item)
#configname = item['sku']
four = cs['testcrawl']
four.save_config(item)
four.run()
#Execution starts from here
channel.basic_consume(goCrawl,
queue='crawl',
no_ack=True)
print(' [*] Waiting for messages. To exit press CTRL+C')
channel.start_consuming() | [
"amititash@gmail.com"
] | amititash@gmail.com |
ba543c69df6097031a5e3dfae710324c39db3560 | 43671eb2be91782ac2096b9ec64e3a02b5aa9559 | /elComandante/lowVoltage_agente.py | cab98c5f206c5377ec93e1596070cdea46f3e0d2 | [] | no_license | psi46/elComandante | fba87eda9c404de0ceed0ab9b81f5258f172325b | 052066eda34d1e335128af214e55e330f9b6e855 | refs/heads/master | 2020-04-06T12:37:01.024183 | 2016-11-28T14:23:31 | 2016-11-28T14:23:31 | 28,905,523 | 1 | 4 | null | 2015-08-08T11:06:20 | 2015-01-07T08:25:52 | FORTRAN | UTF-8 | Python | false | false | 7,695 | py | ## @file
## Implements the agente class lowVoltage_agente
## @ingroup elComandante
## @ingroup elAgente
import os
import subprocess
from myutils import process
import el_agente
def preexec():
os.setpgrp()
## Agente class that communicates with the lowVoltageClient
##
## This is the agente whose job is to communicte with the lowVoltageClient. It
## has a very simple task: To turn on and off the low voltage for the test
## setup.
##
## The low voltage device normally has to operate only before the test
## (lowVoltage_agente.prepare_test) and nothing has to be done during the
## actual testing, except for monitoring the device state.
##
## The action performed is normally only a power cycle which serves as a hard
## reset for the test hardware.
##
## The lowVoltag agente sends very high level commands to the lowVoltageClient such
## as SET OUTPUT ON, SET OUTPUT OFF, or EXEC POWERCYCLE and it does not have to know about the
## details of these operations. It expects that the client handles these
## things and that when it if finished, it will answer the FINISHED command
## with giving back FINISHED. Therefore, the agente waits for the operations
## of the client to finish. Since the client is a separate process, elComandante
## (of which this agente is a part) may continue to start or monitor other
## processes through other agentes.
##
## The configuration of the lowVoltag agente is made in the elComandante.conf
## and the elComandante.ini files. The elComandante.conf file contains information
## about the setup such as low voltage device type and device file name:
## @code
## lowVoltageSubscription: /lowVoltage
##
## [lowVoltageClient]
## lowVoltageType: yoctorelay
## @endcode
##
## The initialization only holds the parameter
## @code
## LowVoltageUse: True
## @endcode
## which enables or disables the lowVoltageAgente.
## @ingroup elComandante
## @ingroup elAgente
class lowVoltage_agente(el_agente.el_agente):
## Initializes the agente
## @param timestamp Timestamp from elComandante
## @param log Log handler
## @param sclient Subsystem client handle
def __init__(self, timestamp, log, sclient):
el_agente.el_agente.__init__(self, timestamp, log, sclient)
self.agente_name = "lowVoltageAgente"
self.client_name = "lowVoltageClient"
## Sets up the permanent configuration of the agente
##
## Determines settings such as low voltage device type
## from elComandante's permanent configuration.
## @param conf Configuration handle
## @return Boolean for success
def setup_configuration(self, conf):
## Type of the low voltage device, to be passed to the client
self.device_type = conf.get("lowVoltageClient", "lowVoltageType")
self.subscription = conf.get("subsystem", "lowVoltageSubscription")
## Directory for the log files
self.logdir = conf.get("Directories", "dataDir") + "/logfiles/"
return True
## Sets up the initialization of the agente
##
## Determines settings such as whether the low voltage device is used
## for this run from elComandante's run time configuration
## (initialization)
## @param init Initialization handle
## @return Boolean for success
def setup_initialization(self, init):
self.active = init.getboolean("LowVoltage", "LowVoltageUse")
return True
## Checks whether the lowVoltageClient is running
##
## Checks whether the lowVoltageClient is running by finding
## the PID file and checking the process.
## @return Boolean, whether the client is running or not
def check_client_running(self):
if not self.active:
return False
if process.check_process_running(self.client_name + ".py"):
raise Exception("Another %s is already running. Please close this client first." % self.client_name)
return True
return False
## Starts the lowVoltageClient
##
## If enabled, starts the lowVoltageClient with the parameters read from the
## configuration.
## @param Timestamp
## @return Boolean for success
def start_client(self, timestamp):
if not self.active:
return True
command = "xterm +sb -geometry 120x20-0+300 -fs 10 -fa 'Mono' -e '"
command += "cd ../lowVoltageClient && python ../lowVoltageClient/lowVoltageClient.py "
command += "--timestamp {0:d} ".format(timestamp)
command += "--directory {0:s} ".format(self.logdir)
command += "--device-type {0:s}'".format(self.device_type)
self.log << "Starting " + self.client_name + " ..."
## Child process handle for the lowVoltageClient
self.child = subprocess.Popen(command, shell = True, preexec_fn = preexec)
return True
## Subscribes to the subsystem channel where the lowVoltageClient listening
##
## Enables listening to the subsystem channel that the lowVoltageClient is
## receiving commands on
## @return None
def subscribe(self):
if (self.active):
self.sclient.subscribe(self.subscription)
## Checks whether the subsystem channel is open and the server is responding
## @return Boolean, whether it is responding or not
def check_subscription(self):
if (self.active):
return self.sclient.checkSubscription(self.subscription)
return True
## Asks the lowVoltageClient to exit by sending it a command through the subsystem
## @return Boolean for success
def request_client_exit(self):
if not self.active:
return True
self.sclient.send(self.subscription, ":EXIT\n")
return False
## Tries to kill the lowVoltageClient by sending the SIGTERM signal
## @return Boolean for success
def kill_client(self):
if not self.active:
return True
try:
self.child.kill()
except:
pass
return True
## Prepares a test with a given environment
##
## Powercycles the low voltage of the test setup to hard reset
## all devices
## @param test The current test
## @param environment The environment the test should run in
## @return Boolean for success
def prepare_test(self, test, environment):
# Run before a test is executed
if not self.active:
return True
self.sclient.send(self.subscription, ":EXEC:POWERCYCLE\n")
self.set_pending()
return True
## Function to execute the test which is disregarded by this agente
## @return Always returns True
def execute_test(self):
# Runs a test
if not self.active:
return True
return True
## Function to clean up the test which is disregarded by this agente
##
## Turns of the beam. This may change in the future.
## @return Boolean for success
def cleanup_test(self):
# Run after a test has executed
if not self.active:
return True
return True
## Final test cleanup
## @return Boolean for success
def final_test_cleanup(self):
# Run after a test has executed
if not self.active:
return True
self.sclient.send(self.subscription, ":EXEC:POWERCYCLE\n")
self.set_pending()
return True
## Checks whether the client is finished or has an error
##
## Checks whether the client is finished or has an error. Even if
## no action is pending from the client it may happen that the state
## of the low voltage device changes. An error is received in this case
## and an exception is thrown.
## @return Boolean, whether the client has finished or not
def check_finished(self):
if not self.active:
return True
while True:
packet = self.sclient.getFirstPacket(self.subscription)
if packet.isEmpty():
break
if self.pending and "FINISHED" in packet.data.upper():
self.pending = False
elif "ERROR" in packet.data.upper():
self.pending = False
raise Exception("Error from %s!" % self.client_name)
return not self.pending
## Asks whether the client is finished and sets the agente state
## to pending
## @return None
def set_pending(self):
self.sclient.send(self.subscription, ":FINISHED\n")
self.pending = True
| [
"mrossini@phys.ethz.ch"
] | mrossini@phys.ethz.ch |
57c22cd8876ae6bdf928f7d58919d905f86c43a5 | d343b6f47b9241f3822845c6627b82c9f98b95c4 | /core/apps.py | ab6efcf7bc66b12c7822555e95a5755b283f13ad | [] | no_license | ivan371/kiber | 11f23171bd51b29d210c44db0784b6caea31bdd6 | 39d7834c5e4e5497061748bd66232936300adda4 | refs/heads/master | 2021-05-05T06:30:55.836149 | 2020-06-16T20:37:59 | 2020-06-16T20:37:59 | 118,800,832 | 0 | 0 | null | 2020-06-16T20:38:00 | 2018-01-24T17:51:29 | JavaScript | UTF-8 | Python | false | false | 144 | py | from django.apps import AppConfig
class CoreConfig(AppConfig):
name = 'core'
def ready(self):
from .views import UserViewSet
| [
"ivan@DESKTOP-GM6Q430.localdomain"
] | ivan@DESKTOP-GM6Q430.localdomain |
37494b82fc1bfeefecdc11791dc68f84c757fca1 | d4184f2468852c5312e3e7a7e2033f1700534130 | /workflow/scripts/write_qc_metadata.py | ff0bfbcb948de8f68f439f1276ce7b4d6f9d7863 | [] | no_license | austintwang/ENCODE_scATAC_bingren_raw | d85b529cecd55e596c1d96ea370ade57cf418e81 | 9c1a6b23615a9d0f6d28f7e9ea6937755016b086 | refs/heads/master | 2023-08-17T04:03:46.276522 | 2021-09-19T00:40:32 | 2021-09-19T00:40:32 | 407,326,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,760 | py | """
Adapted from https://github.com/ENCODE-DCC/atac-seq-pipeline/blob/master/src/encode_lib_log_parser.py
"""
from collections import OrderedDict
import json
import os
def to_int(var):
try:
return int(var)
except ValueError:
return None
def to_float(var):
try:
return float(var)
except ValueError:
return None
def to_bool(var):
return var.lower() in set(['true', 't', 'ok', 'yes', '1'])
def parse_frac_mito_qc(txt):
result = OrderedDict()
with open(txt, 'r') as fp:
for line in fp.read().strip('\n').split('\n'):
k, v = line.split('\t')
if k.startswith('frac_'):
result[k] = float(v)
else:
result[k] = int(v)
return result
def parse_flagstat_qc(txt):
result = OrderedDict()
if not txt:
return result
total = ''
total_qc_failed = ''
duplicates = ''
duplicates_qc_failed = ''
mapped = ''
mapped_qc_failed = ''
mapped_pct = ''
paired = ''
paired_qc_failed = ''
read1 = ''
read1_qc_failed = ''
read2 = ''
read2_qc_failed = ''
paired_properly = ''
paired_properly_qc_failed = ''
paired_properly_pct = ''
with_itself = ''
with_itself_qc_failed = ''
singletons = ''
singletons_qc_failed = ''
singletons_pct = ''
diff_chroms = ''
diff_chroms_qc_failed = ''
delimiter_pass_fail = ' + '
with open(txt, 'r') as f:
for line in f:
if ' total ' in line:
if ' in total ' in line:
tmp1 = line.split(' in total ')
else:
tmp1 = line.split(' total ')
line1 = tmp1[0]
tmp1 = line1.split(delimiter_pass_fail)
total = tmp1[0]
total_qc_failed = tmp1[1]
if ' duplicates' in line:
tmp2 = line.split(' duplicates')
line2 = tmp2[0]
tmp2 = line2.split(delimiter_pass_fail)
duplicates = tmp2[0]
duplicates_qc_failed = tmp2[1]
if ' mapped (' in line:
tmp3 = line.split(' mapped (')
line3_1 = tmp3[0]
tmp3_1 = line3_1.split(delimiter_pass_fail)
mapped = tmp3_1[0]
mapped_qc_failed = tmp3_1[1]
line3_2 = tmp3[1]
tmp3_2 = line3_2.split(':')
mapped_pct = tmp3_2[0] # .replace('%','')
if ' paired in sequencing' in line:
tmp2 = line.split(' paired in sequencing')
line2 = tmp2[0]
tmp2 = line2.split(delimiter_pass_fail)
paired = tmp2[0]
paired_qc_failed = tmp2[1]
if ' read1' in line:
tmp2 = line.split(' read1')
line2 = tmp2[0]
tmp2 = line2.split(delimiter_pass_fail)
read1 = tmp2[0]
read1_qc_failed = tmp2[1]
if ' read2' in line:
tmp2 = line.split(' read2')
line2 = tmp2[0]
tmp2 = line2.split(delimiter_pass_fail)
read2 = tmp2[0]
read2_qc_failed = tmp2[1]
if ' properly paired (' in line:
tmp3 = line.split(' properly paired (')
line3_1 = tmp3[0]
tmp3_1 = line3_1.split(delimiter_pass_fail)
paired_properly = tmp3_1[0]
paired_properly_qc_failed = tmp3_1[1]
line3_2 = tmp3[1]
tmp3_2 = line3_2.split(':')
paired_properly_pct = tmp3_2[0] # .replace('%','')
if ' with itself and mate mapped' in line:
tmp3 = line.split(' with itself and mate mapped')
line3_1 = tmp3[0]
tmp3_1 = line3_1.split(delimiter_pass_fail)
with_itself = tmp3_1[0]
with_itself_qc_failed = tmp3_1[1]
if ' singletons (' in line:
tmp3 = line.split(' singletons (')
line3_1 = tmp3[0]
tmp3_1 = line3_1.split(delimiter_pass_fail)
singletons = tmp3_1[0]
singletons_qc_failed = tmp3_1[1]
line3_2 = tmp3[1]
tmp3_2 = line3_2.split(':')
singletons_pct = tmp3_2[0] # .replace('%','')
if ' with mate mapped to a different chr' in line:
tmp3 = line.split(' with mate mapped to a different chr')
line3_1 = tmp3[0]
tmp3_1 = line3_1.split(delimiter_pass_fail)
diff_chroms = tmp3_1[0]
diff_chroms_qc_failed = tmp3_1[1]
if total:
result['total_reads'] = int(total)
if total_qc_failed:
result['total_reads_qc_failed'] = int(total_qc_failed)
if duplicates:
result['duplicate_reads'] = int(duplicates)
if duplicates_qc_failed:
result['duplicate_reads_qc_failed'] = int(duplicates_qc_failed)
if mapped:
result['mapped_reads'] = int(mapped)
if mapped_qc_failed:
result['mapped_reads_qc_failed'] = int(mapped_qc_failed)
if mapped_pct:
if 'nan' not in mapped_pct and 'N/A' not in mapped_pct \
and 'NA' not in mapped_pct:
if '%' in mapped_pct:
mapped_pct = mapped_pct.replace('%', '')
result['pct_mapped_reads'] = float(mapped_pct)
else:
result['pct_mapped_reads'] = 100.0 * float(mapped_pct)
else:
result['pct_mapped_reads'] = 0.0
if paired:
result['paired_reads'] = int(paired)
if paired_qc_failed:
result['paired_reads_qc_failed'] = int(paired_qc_failed)
if read1:
result['read1'] = int(read1)
if read1_qc_failed:
result['read1_qc_failed'] = int(read1_qc_failed)
if read2:
result['read2'] = int(read2)
if read2_qc_failed:
result['read2_qc_failed'] = int(read2_qc_failed)
if paired_properly:
result['properly_paired_reads'] = int(paired_properly)
if paired_properly_qc_failed:
result['properly_paired_reads_qc_failed'] = int(
paired_properly_qc_failed)
if paired_properly_pct:
if 'nan' not in paired_properly_pct and \
'N/A' not in paired_properly_pct \
and 'NA' not in paired_properly_pct:
if '%' in paired_properly_pct:
paired_properly_pct = paired_properly_pct.replace('%', '')
result['pct_properly_paired_reads'] = float(
paired_properly_pct)
else:
result['pct_properly_paired_reads'] = 100.0 * \
float(paired_properly_pct)
else:
result['pct_properly_paired_reads'] = 0.0
if with_itself:
result['with_itself'] = int(with_itself)
if with_itself_qc_failed:
result['with_itself_qc_failed'] = int(with_itself_qc_failed)
if singletons:
result['singletons'] = int(singletons)
if singletons_qc_failed:
result['singletons_qc_failed'] = int(singletons_qc_failed)
if singletons_pct:
if 'nan' not in singletons_pct and 'N/A' not in singletons_pct \
and 'NA' not in singletons_pct:
if '%' in singletons_pct:
singletons_pct = singletons_pct.replace('%', '')
result['pct_singletons'] = float(singletons_pct)
else:
result['pct_singletons'] = 100.0 * float(singletons_pct)
else:
result['pct_singletons'] = 0.0
if diff_chroms:
result['diff_chroms'] = int(diff_chroms)
if diff_chroms_qc_failed:
result['diff_chroms_qc_failed'] = int(diff_chroms_qc_failed)
return result
def parse_dup_qc(txt):
result = OrderedDict()
if not txt:
return result
paired_reads = ''
unpaired_reads = ''
unmapped_reads = ''
unpaired_dupes = ''
paired_dupes = ''
paired_opt_dupes = ''
dupes_pct = ''
picard_log_found = False
# picard markdup
with open(txt, 'r') as f:
header = '' # if 'UNPAIRED_READS_EXAMINED' in header
content = ''
for line in f:
if header:
content = line.replace(',', '.')
picard_log_found = True
break
if 'UNPAIRED_READS_EXAMINED' in line:
header = line
if picard_log_found:
header_items = header.split('\t')
content_items = content.split('\t')
m = dict(zip(header_items, content_items))
unpaired_reads = m['UNPAIRED_READS_EXAMINED']
paired_reads = m['READ_PAIRS_EXAMINED']
unmapped_reads = m['UNMAPPED_READS']
unpaired_dupes = m['UNPAIRED_READ_DUPLICATES']
paired_dupes = m['READ_PAIR_DUPLICATES']
paired_opt_dupes = m['READ_PAIR_OPTICAL_DUPLICATES']
if 'PERCENT_DUPLICATION' in m:
dupes_pct = m['PERCENT_DUPLICATION']
else:
dupes_pct = '0'
else:
# sambamba markdup
with open(txt, 'r') as f:
for line in f:
if ' end pairs' in line:
tmp1 = line.strip().split(' ')
paired_reads = tmp1[1]
if ' single ends ' in line:
tmp1 = line.strip().split(' ')
unpaired_reads = tmp1[1]
unmapped_reads = tmp1[6]
if 'found ' in line:
tmp1 = line.strip().split(' ')
if paired_reads == '0':
unpaired_dupes = tmp1[1] # SE
paired_dupes = 0
else:
unpaired_dupes = 0
paired_dupes = str(int(tmp1[1])/2) # PE
if paired_reads == '0': # SE
dupes_pct = '{0:.2f}'.format(
float(unpaired_dupes)/float(unpaired_reads))
elif paired_reads:
dupes_pct = '{0:.2f}'.format(
float(paired_dupes)/float(paired_reads))
if unpaired_reads:
result['unpaired_reads'] = int(unpaired_reads)
if paired_reads:
result['paired_reads'] = int(paired_reads)
if unmapped_reads:
result['unmapped_reads'] = int(unmapped_reads)
if unpaired_dupes:
result['unpaired_duplicate_reads'] = int(unpaired_dupes)
if paired_dupes:
result['paired_duplicate_reads'] = int(paired_dupes)
if paired_opt_dupes:
result['paired_optical_duplicate_reads'] = int(paired_opt_dupes)
if dupes_pct:
result['pct_duplicate_reads'] = float(dupes_pct)*100.0
return result
def parse_lib_complexity_qc(txt):
result = OrderedDict()
if not txt:
return result
with open(txt, 'r') as f:
for line in f:
arr = line.strip().split('\t')
break
result['total_fragments'] = to_int(arr[0])
result['distinct_fragments'] = to_int(arr[1])
result['positions_with_one_read'] = to_int(arr[2])
result['positions_with_one_read'] = to_int(arr[3])
result['NRF'] = to_float(arr[4])
result['PBC1'] = to_float(arr[5])
result['PBC2'] = to_float(arr[6])
return result
def parse_picard_est_lib_size_qc(txt):
result = OrderedDict()
if not txt:
return result
with open(txt, 'r') as f:
val = f.readlines()[0].strip()
result['picard_est_lib_size'] = float(val)
return result
def build_quality_metric_header(sample_data, config, data_path, out_path):
lab = config["dcc_lab"]
experiment = sample_data["experiment"]
replicate = sample_data["replicate_num"]
data_alias = f"{lab}:{experiment}${replicate}${os.path.basename(data_path)}"
alias = f"{lab}:{experiment}${replicate}${os.path.basename(out_path)}"
h = OrderedDict({
"lab": lab,
"award": config["dcc_award"],
"quality_metric_of": data_alias,
"aliases": [alias],
})
return h
def write_json(data, out_path):
with open(out_path, "w") as f:
json.dump(data, f, indent=4)
try:
out_group = snakemake.params['out_group']
sample_data = snakemake.params['sample_data']
data_path = snakemake.input['data_file']
config = snakemake.config
if out_group == "fastqs":
pass
elif out_group == "mapping":
alignment_stats_out = snakemake.output['alignment_stats']
samstats_raw = snakemake.input['samstats_raw']
a = parse_flagstat_qc(samstats_raw)
h = build_quality_metric_header(sample_data, config, data_path)
alignment_stats = h | a
write_json(alignment_stats, alignment_stats_out)
elif out_group == "filtering":
alignment_stats_out = snakemake.output['alignment_stats']
lib_comp_stats_out = snakemake.output['lib_comp_stats']
samstats_filtered = snakemake.input['samstats_filtered']
picard_markdup = snakemake.input['picard_markdup']
pbc_stats = snakemake.input['pbc_stats']
frac_mito = snakemake.input['frac_mito']
s = parse_flagstat_qc(samstats_filtered)
p = parse_picard_est_lib_size_qc(picard_markdup)
l = parse_lib_complexity_qc(pbc_stats)
m = parse_frac_mito_qc(frac_mito)
h = build_quality_metric_header(sample_data, config, data_path)
alignment_stats = h | s | m
lib_comp_stats = h | p | l
write_json(alignment_stats, alignment_stats_out)
write_json(lib_comp_stats, lib_comp_stats_out)
elif out_group == "fragments":
pass
elif out_group == "archr":
pass
except NameError:
pass | [
"austin.wang1357@gmail.com"
] | austin.wang1357@gmail.com |
2eb829c87ae5849e5b0d7bf0a4c9e93efc347ecc | 32d934cabb1eac917bb583a1428b87f78b335a4e | /code_per_day/day_47_to_48.py | a8d229a9b40eae3b056c893a2848e2fd3d553e8c | [] | no_license | zotroneneis/magical_universe | 7339fefcfdf47e21e5ebcc6f56e3f1949230932a | c5da3367b7854c4cf9625c45e03742dba3a6d63c | refs/heads/master | 2022-12-07T20:21:25.427333 | 2022-11-13T14:33:01 | 2022-11-13T14:33:01 | 141,951,821 | 414 | 58 | null | null | null | null | UTF-8 | Python | false | false | 1,266 | py | from collections import defaultdict
class CastleKilmereMember:
""" Creates a member of the Castle Kilmere School of Magic """
def __init__(self, name: str, birthyear: int, sex: str):
self.name = name
self.birthyear = birthyear
self.sex = sex
self._traits = defaultdict(lambda: False)
def add_trait(self, trait, value=True):
self._traits[trait] = value
def exhibits_trait(self, trait: str) -> bool:
value = self._traits[trait]
return value
def print_traits(self):
true_traits = [trait for trait, value in self._traits.items() if value]
false_traits = [trait for trait, value in self._traits.items() if not value]
if true_traits:
print(f"{self.name} is {', '.join(true_traits)}")
if false_traits:
print(f"{self.name} is not {', '.join(false_traits)}")
if (not true_traits and not false_traits):
print(f"{self.name} does not have traits yet")
if __name__ == "__main__":
bromley = CastleKilmereMember('Bromley Huckabee', 1959, 'male')
bromley.add_trait('tidy-minded')
bromley.add_trait('kind')
bromley.exhibits_trait('kind')
bromley.exhibits_trait('mean')
bromley.print_traits()
| [
"popkes@gmx.net"
] | popkes@gmx.net |
0ae601e2d21d74e13bbdd78607d416c058eed97a | 5d4def230bad7174e2a2352d277d391dfa118694 | /vocab.py | 3ae126b0cb1ca0de8128bb2c779c875720f8c902 | [] | no_license | hitercs/biLSTM-SlotFilling | a733b2df0e65834b6c6a91d609daa60c73c596ca | 8a0c1baed51e668e7fc4119f69ca6491e7328e7c | refs/heads/master | 2020-03-14T07:20:50.869860 | 2018-05-01T10:35:18 | 2018-05-01T10:35:18 | 131,502,870 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,673 | py | #-*- encoding: utf-8 -*-
import codecs
import settings
from util import Util
class BiVocab(object):
def __init__(self, src_vocab, trg_vocab):
self.src_vocab = src_vocab
self.trg_vocab = trg_vocab
self.src_vocab_size = src_vocab.vocab_size
self.trg_vocab_size = trg_vocab.vocab_size
self.pad_id = self.trg_vocab.get_idx(settings.PAD)
self.unk_id = self.trg_vocab.get_idx(settings.UNK)
def get_src_word(self, idx):
return self.src_vocab.get_word(idx)
def get_trg_word(self, idx):
return self.trg_vocab.get_word(idx)
def get_src_idx(self, w):
return self.src_vocab.get_idx(w)
def get_trg_idx(self, w):
return self.trg_vocab.get_idx(w)
class Vocab(object):
def __init__(self, vocab_size, vocab_fn):
self.word2idx = dict()
self.idx2word = dict()
self.vocab_size = vocab_size
self.build_vocab(vocab_fn)
def build_vocab(self, vocab_fn):
with codecs.open(vocab_fn, encoding='utf-8', mode='r', buffering=settings.read_buffer_size) as fp:
for line in fp:
word, idx, _ = line.strip().split()
Util.add_vocab(self.word2idx, word, int(idx))
Util.add_vocab(self.idx2word, int(idx), word)
def get_idx(self, word):
if not word in self.word2idx:
return self.word2idx[settings.UNK]
if self.word2idx[word] > self.vocab_size - 1:
return self.word2idx[settings.UNK]
return self.word2idx[word]
def get_word(self, idx):
if idx > self.vocab_size - 1:
return settings.UNK
return self.idx2word[idx] | [
"schen@ir.hit.edu.cn"
] | schen@ir.hit.edu.cn |
43d6dc559a18868fb2fe56aa0c08b57bada0fce3 | f80b0891fbd9bbda3532327ed8129406d00947b7 | /IP/Lista-6/Comando de Repetição (while) – Roteiro Laboratório/3.py | 49086875a6382fccd9177714b3408668a594bca2 | [] | no_license | viniciuspolux/UFPB | d7e7cd7101e90b008391605832404ba2ae6d2001 | 445fc953d9499e41e753c1c3e5c57937d93b2d59 | refs/heads/master | 2021-01-19T20:44:51.869087 | 2017-08-30T17:16:07 | 2017-08-30T17:16:07 | 101,222,709 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 326 | py | numeroa,numerob=map(int,input("Digite dois números com um espaço entre eles =").split())
x=1
while x < 2 :
if(numeroa > 0 and numerob > 0):
soma= numeroa + numerob
prod= numeroa * numerob
print("{} {}" .format(soma,prod))
else:
print("Você digitou um número inválido")
x += 1
| [
"30605184+viniciuspolux@users.noreply.github.com"
] | 30605184+viniciuspolux@users.noreply.github.com |
c52152bc18b44d48c909e1256ce9ae3b6d37647f | 310a141e68d730f2e3a0dee21b14cca65883e521 | /courses/migrations/0008_course_passed.py | 1ec04deca6b315eb29f330e1aa93eb3abec9e6b9 | [] | no_license | sokogfb/edu_fcih | 5c2eb883b88d70a34c7f21487527f18a8f6a26b2 | c480b448350226a1727f1d155e99dbe1ca6d30e7 | refs/heads/master | 2021-09-12T14:53:38.484104 | 2018-04-17T23:13:10 | 2018-04-17T23:13:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | # Generated by Django 2.0.3 on 2018-04-02 19:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0007_term_active'),
]
operations = [
migrations.AddField(
model_name='course',
name='passed',
field=models.BooleanField(default=False),
),
]
| [
"abdelrahman.sico_931@fci.helwan.edu.eg"
] | abdelrahman.sico_931@fci.helwan.edu.eg |
ec3ee36ac1ce3cea82d7bfe1563d5a76ade5968f | 7a583c534559ad08950e6e1564d4a59095ce9669 | /autoclient/src/plugins/memory.py | bc4df39f71e332e172e24144a790ef9c8973a5a3 | [] | no_license | wyyalt/cmdb | 67fbeabda2035e11c1933ab84b75c9c3feac7d92 | c43c17db7c6fb9f63b2387b7054a89a54bee199a | refs/heads/master | 2021-05-05T12:05:38.026134 | 2017-09-25T14:58:24 | 2017-09-25T14:58:24 | 104,718,572 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,812 | py | import os
from lib import convert
from lib.conf.config import settings
class Memory(object):
def __init__(self):
pass
@classmethod
def initial(cls):
return cls()
def process(self, command_func, debug):
if debug:
output = open(os.path.join(settings.BASEDIR, 'files/memory.out'), 'r', encoding='utf-8').read()
else:
output = command_func("sudo dmidecode -q -t 17 2>/dev/null")
return self.parse(output)
def parse(self, content):
"""
解析shell命令返回结果
:param content: shell 命令结果
:return:解析后的结果
"""
ram_dict = {}
key_map = {
'Size': 'capacity',
'Locator': 'slot',
'Type': 'model',
'Speed': 'speed',
'Manufacturer': 'manufacturer',
'Serial Number': 'sn',
}
devices = content.split('Memory Device')
for item in devices:
item = item.strip()
if not item:
continue
if item.startswith('#'):
continue
segment = {}
lines = item.split('\n\t')
for line in lines:
if not line.strip():
continue
if len(line.split(':')):
key, value = line.split(':')
else:
key = line.split(':')[0]
value = ""
if key in key_map:
if key == 'Size':
segment[key_map['Size']] = convert.convert_mb_to_gb(value, 0)
else:
segment[key_map[key.strip()]] = value.strip()
ram_dict[segment['slot']] = segment
return ram_dict
| [
"wyyalt@live.com"
] | wyyalt@live.com |
af585888517df64c46a62653fa6ff3912e6b9f0d | 508c5e01aa7dce530093d5796250eff8d74ba06c | /code/venv/lib/python3.6/site-packages/pgadmin4/pgadmin/browser/server_groups/servers/databases/schemas/tests/test_schema_get.py | d39692be0008269bf1791e585f1e0e92b09181fa | [
"MIT",
"PostgreSQL"
] | permissive | jhkuang11/UniTrade | f220b0d84db06ff17626b3daa18d4cb8b72a5d3f | 5f68b853926e167936b58c8543b8f95ebd6f5211 | refs/heads/master | 2022-12-12T15:58:30.013516 | 2019-02-01T21:07:15 | 2019-02-01T21:07:15 | 166,479,655 | 0 | 0 | MIT | 2022-12-07T03:59:47 | 2019-01-18T22:19:45 | Python | UTF-8 | Python | false | false | 2,132 | py | ##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2017, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
from pgadmin.browser.server_groups.servers.databases.tests import utils as \
database_utils
from pgadmin.browser.server_groups.servers.tests import utils as server_utils
from pgadmin.utils.route import BaseTestGenerator
from regression import parent_node_dict
from regression.python_test_utils import test_utils as utils
class SchemaGetTestCase(BaseTestGenerator):
""" This class will add new schema under database node. """
scenarios = [
# Fetching default URL for extension node.
('Check Schema Node URL', dict(url='/browser/schema/obj/'))
]
def runTest(self):
""" This function will delete schema under database node. """
schema = parent_node_dict["schema"][-1]
db_id = schema["db_id"]
server_id = schema["server_id"]
server_response = server_utils.connect_server(self, server_id)
if not server_response["data"]["connected"]:
raise Exception("Could not connect to server to connect the"
" database.")
db_con = database_utils.connect_database(self,
utils.SERVER_GROUP,
server_id,
db_id)
if not db_con["info"] == "Database connected.":
raise Exception("Could not connect to database to get the schema.")
schema_id = schema["schema_id"]
schema_response = self.tester.get(
self.url + str(utils.SERVER_GROUP) + '/' +
str(server_id) + '/' + str(db_id) +
'/' + str(schema_id),
content_type='html/json')
self.assertEquals(schema_response.status_code, 200)
# Disconnect the database
database_utils.disconnect_database(self, server_id, db_id)
| [
"jhkuang11@gmail.com"
] | jhkuang11@gmail.com |
8fcae2a12359d68896b1d9e5d7db84dacd86f151 | 3f453e74ae03c777d4ca803623cf9f69b70ace87 | /mappanel.py | 3d92e74b1764f1483756c88d1f68dbe828478608 | [] | no_license | acidtobi/weewar_clone | 5a348ece62ff22f3a0812867a93ac5f5a370f782 | 5b0575ee7534278d49df446a852e33d3f232d6e7 | refs/heads/master | 2021-05-04T10:41:44.741431 | 2016-02-11T20:47:23 | 2016-02-11T20:47:23 | 50,377,074 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,916 | py | from __future__ import division
import wx
import wx.lib.scrolledpanel
class MapPanel(wx.lib.scrolledpanel.ScrolledPanel):
def __init__(self, parent, background_tile, size, innerbitmap=None):
self.background_tile = background_tile
self.InnerSize = size
self.innerbitmap = innerbitmap
self._Buffer = None
self.virtual_x = 0
self.virtual_y = 0
screen_width, screen_height = wx.DisplaySize()
self.background = wx.EmptyBitmap(screen_width, screen_height)
dc = wx.MemoryDC()
dc.SelectObject(self.background)
tile_width, tile_height = self.background_tile.Size
for rownum in range(int(screen_height / tile_height)):
for colnum in range(int(screen_width / tile_width)):
dc.DrawBitmap(self.background_tile, colnum * tile_width, rownum * tile_height, True)
width_px, height_px = size
wx.lib.scrolledpanel.ScrolledPanel.__init__(self, parent, size=(width_px, height_px))
self.SetupScrolling()
self.SetScrollRate(1, 1)
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_SIZE, self.OnResize)
#self.OnResize(None)
def setInnerBitmap(self, bitmap):
self.innerbitmap = bitmap
def GetVirtualPosition(self, (x, y)):
scrolled_x, scrolled_y = self.CalcScrolledPosition((self.virtual_x, self.virtual_y))
return x - scrolled_x, y - scrolled_y
def UpdateDrawing(self):
dc = wx.MemoryDC()
dc.SelectObject(self._Buffer)
self_width, self_height = self.InnerSize
sizer_width, sizer_height = self.GetSize()
self.virtual_x = max(0, (sizer_width - self_width) / 2)
self.virtual_y = max(0, (sizer_height - self_height) / 2)
tile_width, tile_height = self.background_tile.Size
offset_x, offset_y = self.virtual_x % tile_width, self.virtual_y % tile_height
dc.DrawBitmap(self.background, offset_x - tile_width, offset_y - tile_height)
if self.innerbitmap:
dc.DrawBitmap(self.innerbitmap, self.virtual_x, self.virtual_y, True)
del dc
self.Refresh(eraseBackground=False)
self.Update()
def OnPaint(self, e):
dc = wx.PaintDC(self)
x, y = self.CalcScrolledPosition((0, 0))
dc.DrawBitmap(self._Buffer, x, y)
def OnResize(self, e):
width, height = e.GetSize()
inner_width, inner_height = self.InnerSize
self.SetSize((width, height))
self.SetVirtualSize((inner_width, inner_height))
self._Buffer = wx.EmptyBitmap(max(width, inner_width), max(height, inner_height))
self.UpdateDrawing()
# ==============================================================================================
# tests
# ==============================================================================================
if __name__ == "__main__":
class MainFrame(wx.Frame):
def __init__(self, parent, title):
background = wx.Bitmap("tiles_background.jpg")
background_tile = wx.Bitmap("logo_background_repeating.png")
self.foreground = wx.Bitmap("rubberducky.png")
wx.Frame.__init__(self, parent, title=title, size=background.Size)
self.mappanel = MapPanel(self, background_tile, size=self.foreground.Size, innerbitmap=self.foreground)
leftpanel = wx.Panel(self, -1, size=(100, -1))
self.box = wx.BoxSizer(wx.HORIZONTAL)
self.box.Add(leftpanel, 0, wx.EXPAND)
self.box.Add(self.mappanel, 2, wx.EXPAND)
self.SetAutoLayout(True)
self.SetSizer(self.box)
self.Layout()
#self.Bind(wx.EVT_PAINT, self.OnPaint)
self.mappanel.setInnerBitmap(self.foreground)
app = wx.App()
mainframe = MainFrame(None, "Map Panel")
mainframe.Show()
app.MainLoop()
| [
"acidtobi@gmail.com"
] | acidtobi@gmail.com |
cd04729dafc1306355807963c87d375bbfa6c2a7 | 6b096e1074479b13dc9d28cec7e5220d2ecc5c13 | /Python/q34.py | af3b5a71997a6c98c126bd1f89d3957a291886a6 | [] | no_license | wzb1005/leetcode | ed3684e580b4dae37dce0af8314da10c89b557f7 | 4ba73ac913993ba5bb7deab5971aaeaaa16ed4d7 | refs/heads/master | 2023-03-19T06:37:30.274467 | 2021-03-09T03:02:57 | 2021-03-09T03:02:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 112 | py | from typing import List
class Solution:
def searchRange(self, nums: List[int], target: int) -> List[int]:
| [
"chiyexiao@icloud.com"
] | chiyexiao@icloud.com |
c770c4a0ef473e599ea32a354884f2360f88218a | 365051fefddc9d549201225915122cb413168919 | /final CNN data aug/data_aug_v03.py | 1360161c08ef375089f3a624dd6d9ccfb8841482 | [] | no_license | PauloAxcel/SERS-EHD-pillars | 1623f5141a3d6fcd6b6f13e83afe1dac08cb893a | 89c029be9f3cb435103f497644d30e75ce3ae3ad | refs/heads/main | 2023-07-19T10:04:44.237797 | 2021-09-04T21:05:14 | 2021-09-04T21:05:14 | 375,076,169 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,140 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 15 13:05:02 2019
@author: paulo
"""
#DATA AUGMENTATION
import os
import cv2
import random
import numpy as np
import matplotlib.pyplot as plt
from keras.preprocessing.image import ImageDataGenerator
#get the folders
SERS_train_dir = r'/home/newuser/Desktop/emily try/Data2(part1+part3+part4+part6)/Original Data/SERS/'
NOENH_train_dir = r'/home/newuser/Desktop/emily try/Data2(part1+part3+part4+part6)/Original Data/nonSERS/'
gen_dir_tra = r'/home/newuser/Desktop/emily try/Data2(part1+part3+part4+part6)/Training/'
gen_dir_val = r'/home/newuser/Desktop/emily try/Data2(part1+part3+part4+part6)/Validation/'
#num is the total number of samples that we want to generate.
num = 5000
#get the files inside the folders
SERS_train = os.listdir(SERS_train_dir)
NOENH_train = os.listdir(NOENH_train_dir)
all_dir = [SERS_train_dir, NOENH_train_dir]
all_data = [SERS_train, NOENH_train]
for dire,file in zip(all_dir, all_data):
#for i in range(len(all_data)):
for j in range(num):
#generate a rand to select a random file in the folder
# rand = random.randint(0,len(all_data[i])-1)
rand = random.randint(0,len(file)-1)
if len(file[rand].split('_'))>1:
continue
else:
# im = cv2.imread(all_dir[i]+all_data[i][rand])
im = cv2.imread(dire+file[rand])
# plt.imshow(im)
#datagen.flow needs a rank 4 matrix, hence we use np.expand_dims to increase the dimention of the image
image = np.expand_dims(im,0)
# word_label = all_data[i][rand].split('.')[0]
word_label = file[rand].split('.')[0]
#Generate new image process
datagen = ImageDataGenerator(featurewise_center=0,
samplewise_center=0,
rotation_range=180,
horizontal_flip=True,
vertical_flip=True,
fill_mode='nearest')
#label files based on the train/validation by employing a rand function
lab = dire.split('/')[-2]
if random.random() < 0.8:
aug_iter = datagen.flow(image,save_to_dir = gen_dir_tra , save_prefix = lab+'_train_' + word_label +'_gen_' + str(random.randint(0,num)))
else:
aug_iter = datagen.flow(image,save_to_dir = gen_dir_val ,save_prefix = lab+'_val_' + word_label +'_gen_' + str(random.randint(0,num)))
#next function produces the result from the datagen flow. collapses the function.
# plt.imshow(next(aug_iter)[0].astype(np.uint8))
aug_images = [next(aug_iter)[0].astype(np.uint8) for m in range(1)]
| [
"noreply@github.com"
] | PauloAxcel.noreply@github.com |
62c20ca9fb15d381b187ac793e03b1b5242e6d37 | 495b0b8de3ecc341511cdb10f11368b35b585bea | /SoftLayer/CLI/modules/filters.py | 1e4274ac04ae064468c5d1d0736b540b8f35416c | [] | no_license | hugomatic/softlayer-api-python-client | cf6c1e6bfa32e559e72f8b0b069339ae8edd2ede | 9c115f0912ee62763b805941593f6dd50de37068 | refs/heads/master | 2021-01-18T11:09:19.122162 | 2013-04-09T01:44:51 | 2013-04-09T01:44:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 996 | py | """
usage: sl help filters
Filters are used to limit the amount of results. Some commands will accept a
filter operation for certain fields. Filters can be applied across multiple
fields in most cases.
Available Operations:
Case Insensitive
'value' Exact value match
'value*' Begins with value
'*value' Ends with value
'*value*' Contains value
Case Sensitive
'~ value' Exact value match
'> value' Greater than value
'< value' Less than value
'>= value' Greater than or equal to value
'<= value' Less than or equal to value
Examples:
sl cci list --datacenter=dal05
sl cci list --hostname='prod*'
sl cci list --network=100 --cpu=2
sl cci list --network='< 100' --cpu=2
sl cci list --memory='>= 2048'
Note: Comparison operators (>, <, >=, <=) can be used with integers, floats,
and strings.
"""
# :copyright: (c) 2013, SoftLayer Technologies, Inc. All rights reserved.
# :license: BSD, see LICENSE for more details.
| [
"k3vinmcdonald@gmail.com"
] | k3vinmcdonald@gmail.com |
11aa915574de5fc4f11f5c7671205cfbaa964fe2 | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/copp/lacpallowhist5min.py | 2d5afaedb106d24fcc43463d8548e0ce36b681e4 | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,598 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class LacpAllowHist5min(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = StatsClassMeta("cobra.model.copp.LacpAllowHist5min", "Per Interface Allow Counters for Lacp")
counter = CounterMeta("bytesRate", CounterCategory.GAUGE, "bytes-per-second", "LacpAllowed Bytes rate")
counter._propRefs[PropCategory.IMPLICIT_MIN] = "bytesRateMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "bytesRateMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "bytesRateAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "bytesRateSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "bytesRateThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "bytesRateTr"
meta._counters.append(counter)
counter = CounterMeta("bytes", CounterCategory.COUNTER, "bytes", "LacpAllowed Bytes")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "bytesCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "bytesPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "bytesMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "bytesMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "bytesAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "bytesSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "bytesThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "bytesTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "bytesRate"
meta._counters.append(counter)
counter = CounterMeta("pktsRate", CounterCategory.GAUGE, "packets-per-second", "LacpAllowed Packets rate")
counter._propRefs[PropCategory.IMPLICIT_MIN] = "pktsRateMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "pktsRateMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "pktsRateAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "pktsRateSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "pktsRateThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "pktsRateTr"
meta._counters.append(counter)
counter = CounterMeta("pkts", CounterCategory.COUNTER, "packets", "LacpAllowed Packets")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "pktsCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "pktsPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "pktsMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "pktsMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "pktsAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "pktsSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "pktsThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "pktsTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "pktsRate"
meta._counters.append(counter)
meta.moClassName = "coppLacpAllowHist5min"
meta.rnFormat = "HDcoppLacpAllow5min-%(index)s"
meta.category = MoCategory.STATS_HISTORY
meta.label = "historical Per Interface Allow Counters for Lacp stats in 5 minute"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.parentClasses.add("cobra.model.svi.If")
meta.parentClasses.add("cobra.model.pc.AggrIf")
meta.parentClasses.add("cobra.model.l1.PhysIf")
meta.parentClasses.add("cobra.model.l3.RtdIf")
meta.parentClasses.add("cobra.model.l3.EncRtdIf")
meta.superClasses.add("cobra.model.copp.LacpAllowHist")
meta.superClasses.add("cobra.model.stats.Item")
meta.superClasses.add("cobra.model.stats.Hist")
meta.rnPrefixes = [
('HDcoppLacpAllow5min-', True),
]
prop = PropMeta("str", "bytesAvg", "bytesAvg", 32068, PropCategory.IMPLICIT_AVG)
prop.label = "LacpAllowed Bytes average value"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesAvg", prop)
prop = PropMeta("str", "bytesCum", "bytesCum", 32064, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "LacpAllowed Bytes cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesCum", prop)
prop = PropMeta("str", "bytesMax", "bytesMax", 32067, PropCategory.IMPLICIT_MAX)
prop.label = "LacpAllowed Bytes maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesMax", prop)
prop = PropMeta("str", "bytesMin", "bytesMin", 32066, PropCategory.IMPLICIT_MIN)
prop.label = "LacpAllowed Bytes minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesMin", prop)
prop = PropMeta("str", "bytesPer", "bytesPer", 32065, PropCategory.IMPLICIT_PERIODIC)
prop.label = "LacpAllowed Bytes periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesPer", prop)
prop = PropMeta("str", "bytesRate", "bytesRate", 32072, PropCategory.IMPLICIT_RATE)
prop.label = "LacpAllowed Bytes rate"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesRate", prop)
prop = PropMeta("str", "bytesRateAvg", "bytesRateAvg", 32084, PropCategory.IMPLICIT_AVG)
prop.label = "LacpAllowed Bytes rate average value"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesRateAvg", prop)
prop = PropMeta("str", "bytesRateMax", "bytesRateMax", 32083, PropCategory.IMPLICIT_MAX)
prop.label = "LacpAllowed Bytes rate maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesRateMax", prop)
prop = PropMeta("str", "bytesRateMin", "bytesRateMin", 32082, PropCategory.IMPLICIT_MIN)
prop.label = "LacpAllowed Bytes rate minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesRateMin", prop)
prop = PropMeta("str", "bytesRateSpct", "bytesRateSpct", 32085, PropCategory.IMPLICIT_SUSPECT)
prop.label = "LacpAllowed Bytes rate suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesRateSpct", prop)
prop = PropMeta("str", "bytesRateThr", "bytesRateThr", 32086, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "LacpAllowed Bytes rate thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("bytesRateThr", prop)
prop = PropMeta("str", "bytesRateTr", "bytesRateTr", 32087, PropCategory.IMPLICIT_TREND)
prop.label = "LacpAllowed Bytes rate trend"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesRateTr", prop)
prop = PropMeta("str", "bytesSpct", "bytesSpct", 32069, PropCategory.IMPLICIT_SUSPECT)
prop.label = "LacpAllowed Bytes suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesSpct", prop)
prop = PropMeta("str", "bytesThr", "bytesThr", 32070, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "LacpAllowed Bytes thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("bytesThr", prop)
prop = PropMeta("str", "bytesTr", "bytesTr", 32071, PropCategory.IMPLICIT_TREND)
prop.label = "LacpAllowed Bytes trend"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesTr", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "index", "index", 31203, PropCategory.REGULAR)
prop.label = "History Index"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("index", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "pktsAvg", "pktsAvg", 32104, PropCategory.IMPLICIT_AVG)
prop.label = "LacpAllowed Packets average value"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsAvg", prop)
prop = PropMeta("str", "pktsCum", "pktsCum", 32100, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "LacpAllowed Packets cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsCum", prop)
prop = PropMeta("str", "pktsMax", "pktsMax", 32103, PropCategory.IMPLICIT_MAX)
prop.label = "LacpAllowed Packets maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsMax", prop)
prop = PropMeta("str", "pktsMin", "pktsMin", 32102, PropCategory.IMPLICIT_MIN)
prop.label = "LacpAllowed Packets minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsMin", prop)
prop = PropMeta("str", "pktsPer", "pktsPer", 32101, PropCategory.IMPLICIT_PERIODIC)
prop.label = "LacpAllowed Packets periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsPer", prop)
prop = PropMeta("str", "pktsRate", "pktsRate", 32108, PropCategory.IMPLICIT_RATE)
prop.label = "LacpAllowed Packets rate"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsRate", prop)
prop = PropMeta("str", "pktsRateAvg", "pktsRateAvg", 32120, PropCategory.IMPLICIT_AVG)
prop.label = "LacpAllowed Packets rate average value"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsRateAvg", prop)
prop = PropMeta("str", "pktsRateMax", "pktsRateMax", 32119, PropCategory.IMPLICIT_MAX)
prop.label = "LacpAllowed Packets rate maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsRateMax", prop)
prop = PropMeta("str", "pktsRateMin", "pktsRateMin", 32118, PropCategory.IMPLICIT_MIN)
prop.label = "LacpAllowed Packets rate minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsRateMin", prop)
prop = PropMeta("str", "pktsRateSpct", "pktsRateSpct", 32121, PropCategory.IMPLICIT_SUSPECT)
prop.label = "LacpAllowed Packets rate suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsRateSpct", prop)
prop = PropMeta("str", "pktsRateThr", "pktsRateThr", 32122, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "LacpAllowed Packets rate thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("pktsRateThr", prop)
prop = PropMeta("str", "pktsRateTr", "pktsRateTr", 32123, PropCategory.IMPLICIT_TREND)
prop.label = "LacpAllowed Packets rate trend"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsRateTr", prop)
prop = PropMeta("str", "pktsSpct", "pktsSpct", 32105, PropCategory.IMPLICIT_SUSPECT)
prop.label = "LacpAllowed Packets suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsSpct", prop)
prop = PropMeta("str", "pktsThr", "pktsThr", 32106, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "LacpAllowed Packets thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("pktsThr", prop)
prop = PropMeta("str", "pktsTr", "pktsTr", 32107, PropCategory.IMPLICIT_TREND)
prop.label = "LacpAllowed Packets trend"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsTr", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
meta.namingProps.append(getattr(meta.props, "index"))
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
meta.deploymentQueryPaths.append(DeploymentPathMeta("l1EthIfToEPg", "EPG", "cobra.model.fv.EPg"))
def __init__(self, parentMoOrDn, index, markDirty=True, **creationProps):
namingVals = [index]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"bkhoward@live.com"
] | bkhoward@live.com |
853e8cd7d44015eb9bfbe2e8d913ffb2d35fe27c | 35c75ed0ca9850a6dd62d0e19b7e2ab472c5f292 | /shop/migrations/0002_auto_20171109_0354.py | f417d1eaa71b14f9d43a38cb507c9d803779dad6 | [] | no_license | HyeriChang/tuanh | 1314b270d7b8d44424c5b6b82361b20397d30f4b | 38546afde0a4fa6a54727b4595b7cfa7c8baec1e | refs/heads/master | 2021-05-07T06:32:31.060921 | 2017-12-06T09:36:48 | 2017-12-06T09:36:48 | 111,763,554 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,656 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-11-09 03:54
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='product',
name='brand',
field=models.CharField(default='', max_length=50),
preserve_default=False,
),
migrations.AddField(
model_name='product',
name='color',
field=models.CharField(default='x', max_length=50),
preserve_default=False,
),
migrations.AddField(
model_name='product',
name='condition',
field=models.CharField(default='', max_length=50),
preserve_default=False,
),
migrations.AddField(
model_name='product',
name='detail',
field=models.TextField(default=''),
preserve_default=False,
),
migrations.AddField(
model_name='product',
name='material',
field=models.CharField(default='', max_length=50),
preserve_default=False,
),
migrations.AddField(
model_name='product',
name='rating',
field=models.FloatField(default=1),
preserve_default=False,
),
migrations.AddField(
model_name='product',
name='size',
field=models.CharField(default='', max_length=50),
preserve_default=False,
),
]
| [
"anh.nguyentu3110@gmail.com"
] | anh.nguyentu3110@gmail.com |
76f0bfb3491090f86e4d11cf509c6a61dde62e2f | 1f9d8381f111ee34be61a82cdf2038afc1a44079 | /sequenceToLine.py | 848d936908c046a2428b7f84395126b3190bd404 | [
"BSD-3-Clause"
] | permissive | el-mat/ectools | f35d305c8fd558436cd7534c5fe4db66fffead24 | 031eb0300c82392915d8393a5fedb4d3452b15bf | refs/heads/master | 2021-01-23T21:10:42.869782 | 2014-12-01T14:07:00 | 2014-12-01T14:07:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | #!/usr/bin/env
import sys
from seqio import iteratorFromExtension
from nucio import fileIterator
if not len(sys.argv) == 2:
sys.exit("sequencToLine.py in.{fa.fq}\n")
it = iteratorFromExtension(sys.argv[1])
for record in fileIterator(sys.argv[1], it):
if hasattr(record, "desc"):
print "\t".join([record.name, record.seq, record.desc, record.qual])
else:
print "\t".join([record.name, record.seq])
| [
"gurtowsk@mshadoop1.cshl.edu"
] | gurtowsk@mshadoop1.cshl.edu |
61c91a5a98307bf6308fc87306a01cc429275024 | 83dc2a8d80a0614c66016efba9630cd60538d4b8 | /spider_traffic/test.py | 5a9cc750354cadebbffc1e99a03d3901ab54aea1 | [] | no_license | hanxianzhe1116/Python_Spider | 5095297e6071842aef95d0264b2024d5a0e81ce5 | ba3757acf2ed133ab76720a146d380eafe69a092 | refs/heads/master | 2021-01-04T09:29:13.616769 | 2020-10-19T02:16:17 | 2020-10-19T02:16:17 | 240,488,992 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,906 | py | import requests
import re
import json
import csv
from urllib.parse import urlencode
import datetime
import time
'''
函数说明:输入url及其参数
params:
baseUrl:最开始的url
cityCode:城市编码,这里我选择的是重庆,重庆编码:132
roadType:道路类型
callBack:返回类型
'''
def getPage(baseUrl,cityCode,roadType,callBack):
#headers信息
params = {
'cityCode' : cityCode,
'roadtype' : roadType,
'callback' : callBack
}
url = baseUrl + urlencode(params) #获取到url参数
# print(requests.get(url).text)
try:
response = requests.get(url)
if response.status_code == 200: #返回成功
return response
except requests.ConnectionError as e:
print('url出错',e.args)
if __name__ == '__main__':
url = 'https://jiaotong.baidu.com/trafficindex/city/roadrank?'
#
with open('transformData.csv','w') as f:
f_csv = csv.writer(f)
#保存五十分钟的数据
for i in range(10):
response = getPage(url,132,0,'jsonp_1553486162746_179718')
# print(type(response.text))
transformData = json.loads(re.findall(r'^\w+\((.*)\)$',response.text)[0])
transformData = transformData.get('data').get('list')
dateTime = datetime.datetime.now().strftime('%Y-%m-%d')
f_csv.writerow(str(dateTime))
dataList = []
for item in transformData:
# print(item)
list = []
list.append(item.get('roadname'))
list.append(item.get('index'))
list.append(item.get('speed'))
dataList.append(list)
# print(datetime.datetime.now().strftime('%Y-%m-%d'))
f_csv.writerows(dataList)
print(dataList)
time.sleep(5)
# f_csv.close()
| [
"876605943@qq.com"
] | 876605943@qq.com |
5f620ca66ea5f22f98da060905725de7b1622114 | e8c0513bce6ba781d6d55c48330c54edbd20cc23 | /manage.py | d66549db189c8fe3cddba1a8a34913eaa50627a3 | [] | no_license | BohnSix/myblog | aad06969026e5e0059e83d3c8bedab66eab3a5d2 | 3961bd813c8d706b15e66cd55dff2edeb992ca3c | refs/heads/master | 2022-09-25T00:47:35.509766 | 2019-11-21T08:33:20 | 2019-11-21T08:33:20 | 185,914,940 | 2 | 1 | null | 2022-09-16T18:13:43 | 2019-05-10T03:49:23 | JavaScript | UTF-8 | Python | false | false | 721 | py | from flask_migrate import Migrate, MigrateCommand
from flask_script import Manager, Shell
from app import create_app, db
from app.models import *
app = create_app(config_name="develop")
@app.template_filter()
def countTime(content):
return int(content.__len__() / 200) + 1
manager = Manager(app)
migrate = Migrate(app, db)
manager.add_command('db', MigrateCommand)
def make_shell_context():
return dict(db=db, Article=Article, User=User, Category=Category)
manager.add_command("shell", Shell(make_context=make_shell_context))
manager.add_command("db", MigrateCommand)
if __name__ == "__main__":
# db.drop_all(app=app)
# db.create_all(app=app)
app.run(host="0.0.0.0", port=8080, debug=True)
| [
"bohn_six@163.com"
] | bohn_six@163.com |
ac19567d2c9ed4b4c9852a5ca2ad7fbd7ab5185d | 7fdf9c9b4f9601f1f24414da887acb03018e99f1 | /gym/gym/settings.py | 80b1a8a45b3e88065e60982b4b7505f389a7c7e7 | [] | no_license | sid-ncet/fitnesss | d96653022664ec2b1c9a5811fc3f7048a122e3ed | fc62c77abe30659131f0befc93424f8bb9333a0b | refs/heads/master | 2023-04-25T19:02:12.719223 | 2021-06-05T06:53:17 | 2021-06-05T06:53:17 | 374,042,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,318 | py | """
Django settings for gym project.
Generated by 'django-admin startproject' using Django 3.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure--v@m2t(6erzobw08yshw&tbm9s47$n)99#t8+o87ib22l7$-dj'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'fitness'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'gym.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'gym.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT= BASE_DIR
MEDIA_URL= '/images/download/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"siddharthsinghcs11@gmail.com"
] | siddharthsinghcs11@gmail.com |
661b9aa3fceb522de6be34632dd648f5060f74c3 | a69b96621abef181606fd3d68eebaa5b655ed529 | /Lesson3problem2.py | baf178f6055c80d571cdd52706d09817b15fd3d5 | [] | no_license | agonzalez33/Lesson3 | 3724be5e602a9befafe72be8570a5e3cc6ab2ec0 | cc7dfe7a43b3691141a1d9f81d5939d5134a23e3 | refs/heads/master | 2020-05-03T04:00:58.346507 | 2019-03-29T13:35:45 | 2019-03-29T13:35:45 | 178,411,421 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 69 | py | for integer in range(1000,0,-1):
print(integer)
print('Blast off!') | [
"noreply@github.com"
] | agonzalez33.noreply@github.com |
9c49c4755281a3c8a9b671df5099d752953dc5ec | b4ef8fcaf8e8818215add4402efadfef9bda45ee | /sample_code/python/vessels_v2_graphql/run.py | e393f1cdfdf590796dd7068877f0fcb1a4f2fb1a | [] | no_license | ykparkwixon/maritime | aebbbfe8e2f1ebb1bf2dbb01a94127977251285c | 378834c9b521ff538395c36e377117c87760fe22 | refs/heads/main | 2023-07-17T07:00:17.651206 | 2021-08-30T22:18:14 | 2021-08-30T22:18:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,062 | py | import yaml
import json
import csv
from loguru import logger
from utilities import paging, helpers
from gql import gql
logger.add('demo_client.log', rotation="500 MB", retention="10 days", level='DEBUG')
rows_written_to_raw_log: int = 0
rows_written_to_csv: int = 0
pages_processed: int = 0
wrote_csv_header = False
def get_settings():
"""Reads the settings.yaml file and returns the variables and values
:returns data: setting variables and values
:rtype data: dict
"""
with open('settings.yaml') as f:
data: dict = yaml.load(f, Loader=yaml.FullLoader)
return data
def read_query_file():
settings = get_settings()
file_name = settings['name_of_gql_query_file']
with open(file_name, 'r') as f:
return f.read()
def write_raw(data: dict):
settings = get_settings()
name_of_raw_output_file = settings['name_of_raw_output_file']
if not name_of_raw_output_file:
return
with open(name_of_raw_output_file, 'a+') as f:
f.write(json.dumps(data, indent=4))
def write_csv(data: dict):
global rows_written_to_csv, wrote_csv_header
settings = get_settings()
name_of_csv_file = settings['name_of_csv_file']
if not name_of_csv_file:
return
members = helpers.get_vessels_v2_members()
# get just the keys
csv_columns: list = [i[0] for i in members]
try:
with open(name_of_csv_file, 'a+') as f:
writer = csv.DictWriter(f, fieldnames=csv_columns)
logger.debug(f"WROTE HEADER: {wrote_csv_header}")
if not wrote_csv_header:
writer.writeheader()
wrote_csv_header = True
item: dict
for item in data:
writer.writerow(item)
rows_written_to_csv += 1
except Exception:
raise
def get_info():
info = f"""
TOTAL PAGES WRITTEN TO RAW LOG: {rows_written_to_raw_log}
TOTAL ROWS WRITTEN TO CSV: {rows_written_to_csv}
TOTAL PAGES PROCESSED: {pages_processed}"""
return info
def run():
global pages_processed
settings = get_settings()
test_name = settings['test_name']
pages_to_process = settings['pages_to_process']
# make a client connection
client = helpers.get_gql_client()
# read file
query = read_query_file()
if not "pageInfo" or not "endCursor" or not "hasNextPage" in query:
logger.error("Please include pageInfo in the query, it is required for paging. See the README.md")
return
response: dict = dict()
try:
response = client.execute(gql(query))
except BaseException as e:
logger.error(e)
raise
# initialize paging
pg = paging.Paging(response=response)
schema_members = helpers.get_vessels_v2_members()
# page, write, util complete
logger.info("Paging started")
while True:
response, hasNextPage = pg.page_and_get_response(client, query)
logger.debug(f"hasNextPage: {hasNextPage}")
if response:
write_raw(response)
csv_data = helpers.transform_response_for_loading(response=response, schema=schema_members, test_name=test_name)
if csv_data:
write_csv(csv_data)
pages_processed += 1
logger.info(f"Page: {pages_processed}")
if pages_to_process == 1:
break
elif pages_to_process:
if not hasNextPage or not response:
break
if pages_processed >= pages_to_process:
break
elif not hasNextPage or not response:
break
else:
logger.info("Did not get data for csv, either because there are no more pages, or did not get a response")
break
else:
logger.info("No response or no more responses")
break
logger.info(get_info())
if __name__ == '__main__':
run()
logger.info("Done")
| [
"78374623+brucebookman@users.noreply.github.com"
] | 78374623+brucebookman@users.noreply.github.com |
849b16fbf6169f6d56be1d9b19ad76d20f75fe68 | 92dbb16f383754fd9fd8d35c87b68977ec42a586 | /Geogria/20200514-graph/map_world.py | 847c835b947d72027a50300d73ea3ed4aef2713e | [] | no_license | YWJL/pchong | c0c1bfa4695ac3b143430fd2291b197b4fdab884 | eaa98c5ed3daad60e8ac0560634ba631e665f00e | refs/heads/master | 2022-11-11T00:01:55.550199 | 2020-07-01T06:11:56 | 2020-07-01T06:11:56 | 276,290,019 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,565 | py | import pandas
from pyecharts import options as opts
from pyecharts.charts import Bar, Grid, Line
from pyecharts import options as opts
from pyecharts.charts import Map
from pyecharts.faker import Collector, Faker
from pyecharts.datasets import register_url
import pandas as pd
import asyncio
from pyecharts.commons.utils import JsCode
import math
from aiohttp import TCPConnector, ClientSession
import pyecharts.options as opts
from pyecharts.charts import Map
import pyecharts.options as opts
from pyecharts.charts import Line
from pyecharts import options as opts
from pyecharts.charts import Bar, Timeline
from pyecharts import options as opts
from pyecharts.charts import Grid, Line, Scatter
from pyecharts.faker import Faker
from pyecharts.commons.utils import JsCode
from pyecharts.faker import Faker
from pyecharts.charts import Geo
import json
US_name='daily.csv'
death_name='202005014-world-death-data.json.csv'
positive_name='202005014-world-confirm-data.json.csv'
recovered_name='202005014-world-cover-data.json.csv'
RECOVERED=pd.read_csv(recovered_name)
DEATH=pd.read_csv(death_name)
POSITIVE=pd.read_csv(positive_name)
US=pd.read_csv(US_name)
US_pos=[]
US_Dea=[]
US_Rec=[]
for i in range(1,56):
if math.isnan(US.iloc[i,2]):
US.iloc[i, 2]=0
if math.isnan(US.iloc[i, 16]):
US.iloc[i, 16]=0
if math.isnan(US.iloc[i, 11]):
US.iloc[i,11]=0
US_pos.append(US.iloc[i, 2])
US_Dea.append(US.iloc[i, 16])
US_Rec.append(US.iloc[i, 11])
print('US_pos:',US_pos)
print('US_Dea:',US_Dea)
print('US_Rec:',US_Rec)
country_number_pos=int((POSITIVE.shape[1])/2-1)
country_number_dea=int((DEATH.shape[1])/2-1)
country_number_rec=int((RECOVERED.shape[1])/2-2)
print(country_number_dea)
print(RECOVERED.iloc[1,country_number_rec])
day=len(POSITIVE)-1
print(day)
country_pos=[]
country_dea=[]
country_rec=[]
positive=[]
death=[]
recovered=[]
print('sum(US_dea):',sum(US_Dea))
print('sum(US_pos):',sum(US_pos))
print('sum(US_rec):',sum(US_Rec))
time="截止至{}全球疫情数据".format(POSITIVE.iloc[-1,0])
for i in range(1,country_number_dea):
country_dea.append(DEATH.iloc[1,i])
death.append(DEATH.iloc[day-1,i])
country_dea.append('United States')
death.append(sum(US_Dea))
MAP_data_dea=[list(z) for z in zip(country_dea, death)]
print('MAP_data_dea:',MAP_data_dea)
for i in range(1,country_number_pos):
country_pos.append(POSITIVE.iloc[1,i])
positive.append(POSITIVE.iloc[day,i])
country_pos.append('United States')
positive.append(sum(US_pos))
MAP_data_pos=[list(z) for z in zip(country_pos, positive)]
print(len(positive))
MAP_data_rec=[]
# print(type(RECOVERED.iloc[2,0]))
for i in range(1,day-6):
for j in range(1,day-6):
if type(RECOVERED.iloc[i,j])!=str and math.isnan(RECOVERED.iloc[i,j]):
RECOVERED.iloc[i,j]=0
for i in range(1,day-6):
country_rec.append(RECOVERED.iloc[1,i])
recovered.append(RECOVERED.iloc[day-6,i])
MAP_data_rec=[list(z) for z in zip(country_rec, recovered)]
country_rec.append('United States')
recovered.append(sum(US_Rec))
print('MAP_data_pos:',MAP_data_pos)
# for i in range(1,country_number_dea-1):
# for j in range(1,country_number_dea-1):
# if country_pos[i]==country_dea[j]:
# map1=[list(z) for z in zip(country_dea, country_pos)]
# print(map1)
# print(country)
# print(data.iloc[day,1])
# print(day)
NAME_MAP_DATA = {
# "key": "value"
# "name on the hong kong map": "name in the MAP DATA",
}
c = (
Map(init_opts=opts.InitOpts(width="1400px", height="800px"))
.add(
series_name="Positive_number",
data_pair=MAP_data_pos,
maptype="world",
name_map=NAME_MAP_DATA,
is_map_symbol_show=False)
.add(
series_name="Death_number",
data_pair=MAP_data_dea,
maptype="world",
name_map=NAME_MAP_DATA,
is_map_symbol_show=False)
.add(
series_name="Recovered_number",
data_pair=MAP_data_rec,
maptype="world",
name_map=NAME_MAP_DATA,
is_map_symbol_show=False)
.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
.set_global_opts(
title_opts=opts.TitleOpts(
title="Map-世界地图",
subtitle=time),
# subtitle=time,
visualmap_opts=opts.VisualMapOpts(max_=sum(US_pos)),
tooltip_opts=opts.TooltipOpts(
trigger="item", formatter="{b0}<br/>(number:{c}) "
),
)
.render("map_world.html")
)
# print([list(z) for z in zip(Faker.country, Faker.values())])
print(max(US_pos)) | [
"201256153@qq.com"
] | 201256153@qq.com |
10fe64c7113e565bb25b1d2565fa28e8ea3cfdcd | 40711c0546644d1bb8709ee348211d294c0a48d2 | /Rapport/Benchmarking/state_plot.py | 48111f01a271055f269de89702c8ec49040ddac4 | [] | no_license | smaibom/bach_2015 | aaefa2a33cc0d5bb06761d72a3820ee0f2cfe290 | 4177977b81bb6f6c945e5e8a1956dbd4ca4b43f2 | refs/heads/master | 2021-01-20T21:53:03.374855 | 2015-06-07T21:54:47 | 2015-06-07T21:54:47 | 31,496,008 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,459 | py | """
Demo of a simple plot with a custom dashed line.
A Line object's ``set_dashes`` method allows you to specify dashes with
a series of on/off lengths (in points).
"""
import numpy as np
import matplotlib.pyplot as plt
#x = np.linspace(0, 22, 22)
grosses = [62242364,123246883,122562228,122562228,183556657,184105105,184105105,243618278,245304681,245304681,305119181,305119181,305967853,977490276]
labels = ['0 0 0',
'0 0 1', '0 1 0', '1 0 0',
'0 0 2', '0 2 0', '2 0 0 ',
'0 0 3', '0 3 0', '3 0 0 ',
'1 0 1', '0 1 1', '1 1 0',
'1 1 1']
fig = plt.figure()
fig.subplots_adjust(bottom=0.2) # Remark 1
ax = fig.add_subplot(111)
ax.ticklabel_format(style='plain') # Remark 2
ax.set_xticks(range(len(labels)))
ax.set_xticklabels(labels, rotation=80)
ax.bar(range(len(grosses)), grosses)
plt.xlabel('alterations, deletions and insertions\n(In order)')
plt.ylabel('states processed')
plt.show()
#0 = 62242364 0 hits
#1ins = 123246883 0 hits
#1del = 122562228 0 hits
#1mut = 122562228 0 hits
#2ins = 183556657 3 hits
#2del = 184105105 117 hits
#2mut = 184105105 117 hits
#3ins = 243618278 28 hits
#3del = 245304681 2066 hits
#3mut = 245304681 2066 hits
#1ins 1mut = 305119181 41 hits
#1ins 1del = 305119181 41 hits
#1del 1mut = 305967853 234 hits
#1 1 1 = 977490276 3275 hits | [
"kullax@feral.dk"
] | kullax@feral.dk |
bd21bc1c8fa779e7d91a63e97ee2f3b07852e152 | 756504caae02535f359baa1bd232038979f5b3b5 | /AIA/scanm/apps.py | f211b8310e63fa9572dfb5eceffc97ba91744cd3 | [] | no_license | dante993/scantit | b75aac717e68cea25e17a40c44e719c95d0f0376 | 9a447f02af9f23b433bafdd02de852bd1c4e4d9e | refs/heads/master | 2021-01-13T15:04:49.853328 | 2017-05-29T12:52:54 | 2017-05-29T12:52:54 | 79,124,612 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 126 | py | from __future__ import unicode_literals
from django.apps import AppConfig
class ScanmConfig(AppConfig):
name = 'scanm'
| [
"danteejmg@gmail.com"
] | danteejmg@gmail.com |
fc0d28850895dd119c8a2b4afc9f5481bb7779fe | b9eef16211d4a5f2e5b51c0ddfb7dc0a9608db86 | /Chap2InprovingDNN/week2/optimization/optimization.py | e2e8cee8f4713b5cd9743e543af41771e71a40f0 | [] | no_license | vinares/DeepLearning | 905f44655c0b72c9ba6d52bf1c15146b0d07fc92 | c307c3c1063a101dcfa192bc3b8671c2781e31f3 | refs/heads/main | 2023-06-22T05:45:46.716091 | 2021-07-21T09:03:02 | 2021-07-21T09:03:02 | 369,407,380 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,039 | py | import numpy as np
import matplotlib.pyplot as plt
import scipy.io
import math
import sklearn
import sklearn.datasets
from opt_utils_v1a import load_params_and_grads, initialize_parameters, forward_propagation, backward_propagation
from opt_utils_v1a import compute_cost, predict, predict_dec, plot_decision_boundary, load_dataset
from testCases import *
plt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# GRADED FUNCTION: update_parameters_with_gd
def update_parameters_with_gd(parameters, grads, learning_rate):
"""
Update parameters using one step of gradient descent
Arguments:
parameters -- python dictionary containing your parameters to be updated:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads -- python dictionary containing your gradients to update each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
learning_rate -- the learning rate, scalar.
Returns:
parameters -- python dictionary containing your updated parameters
"""
L = len(parameters) // 2 # number of layers in the neural networks
# Update rule for each parameter
for l in range(L):
### START CODE HERE ### (approx. 2 lines)
parameters["W" + str(l + 1)] = parameters['W' + str(l + 1)] - learning_rate * grads['dW' + str(l + 1)]
parameters["b" + str(l + 1)] = parameters['b' + str(l + 1)] - learning_rate * grads['db' + str(l + 1)]
### END CODE HERE ###
return parameters
parameters, grads, learning_rate = update_parameters_with_gd_test_case()
parameters = update_parameters_with_gd(parameters, grads, learning_rate)
print("W1 =\n" + str(parameters["W1"]))
print("b1 =\n" + str(parameters["b1"]))
print("W2 =\n" + str(parameters["W2"]))
print("b2 =\n" + str(parameters["b2"]))
# GRADED FUNCTION: random_mini_batches
def random_mini_batches(X, Y, mini_batch_size=64, seed=0):
"""
Creates a list of random minibatches from (X, Y)
Arguments:
X -- input data, of shape (input size, number of examples)
Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples)
mini_batch_size -- size of the mini-batches, integer
Returns:
mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y)
"""
np.random.seed(seed) # To make your "random" minibatches the same as ours
m = X.shape[1] # number of training examples
mini_batches = []
# Step 1: Shuffle (X, Y)
permutation = list(np.random.permutation(m))
shuffled_X = X[:, permutation]
shuffled_Y = Y[:, permutation].reshape((1, m))
# Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.
num_complete_minibatches = math.floor(
m / mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning
for k in range(0, num_complete_minibatches):
### START CODE HERE ### (approx. 2 lines)
mini_batch_X = shuffled_X[:, k * mini_batch_size: (k + 1) * mini_batch_size]
mini_batch_Y = shuffled_Y[:, k * mini_batch_size: (k + 1) * mini_batch_size]
### END CODE HERE ###
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
# Handling the end case (last mini-batch < mini_batch_size)
if m % mini_batch_size != 0:
### START CODE HERE ### (approx. 2 lines)
mini_batch_X = shuffled_X[:, (num_complete_minibatches ) * mini_batch_size:m]
mini_batch_Y = shuffled_Y[:, (num_complete_minibatches ) * mini_batch_size:m]
### END CODE HERE ###
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches
X_assess, Y_assess, mini_batch_size = random_mini_batches_test_case()
mini_batches = random_mini_batches(X_assess, Y_assess, mini_batch_size)
print ("shape of the 1st mini_batch_X: " + str(mini_batches[0][0].shape))
print ("shape of the 2nd mini_batch_X: " + str(mini_batches[1][0].shape))
print ("shape of the 3rd mini_batch_X: " + str(mini_batches[2][0].shape))
print ("shape of the 1st mini_batch_Y: " + str(mini_batches[0][1].shape))
print ("shape of the 2nd mini_batch_Y: " + str(mini_batches[1][1].shape))
print ("shape of the 3rd mini_batch_Y: " + str(mini_batches[2][1].shape))
print ("mini batch sanity check: " + str(mini_batches[0][0][0][0:3]))
# GRADED FUNCTION: initialize_velocity
def initialize_velocity(parameters):
"""
Initializes the velocity as a python dictionary with:
- keys: "dW1", "db1", ..., "dWL", "dbL"
- values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters.
Arguments:
parameters -- python dictionary containing your parameters.
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
Returns:
v -- python dictionary containing the current velocity.
v['dW' + str(l)] = velocity of dWl
v['db' + str(l)] = velocity of dbl
"""
L = len(parameters) // 2 # number of layers in the neural networks
v = {}
# Initialize velocity
for l in range(L):
### START CODE HERE ### (approx. 2 lines)
v["dW" + str(l + 1)] = np.zeros(parameters["W" + str(l + 1)].shape)
v["db" + str(l + 1)] = np.zeros(parameters["b" + str(l + 1)].shape)
### END CODE HERE ###
return v
parameters = initialize_velocity_test_case()
v = initialize_velocity(parameters)
print("v[\"dW1\"] =\n" + str(v["dW1"]))
print("v[\"db1\"] =\n" + str(v["db1"]))
print("v[\"dW2\"] =\n" + str(v["dW2"]))
print("v[\"db2\"] =\n" + str(v["db2"]))
# GRADED FUNCTION: update_parameters_with_momentum
def update_parameters_with_momentum(parameters, grads, v, beta, learning_rate):
"""
Update parameters using Momentum
Arguments:
parameters -- python dictionary containing your parameters:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads -- python dictionary containing your gradients for each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
v -- python dictionary containing the current velocity:
v['dW' + str(l)] = ...
v['db' + str(l)] = ...
beta -- the momentum hyperparameter, scalar
learning_rate -- the learning rate, scalar
Returns:
parameters -- python dictionary containing your updated parameters
v -- python dictionary containing your updated velocities
"""
L = len(parameters) // 2 # number of layers in the neural networks
# Momentum update for each parameter
for l in range(L):
### START CODE HERE ### (approx. 4 lines)
# compute velocities
v["dW" + str(l + 1)] = beta * v["dW" + str(l + 1)] + (1 - beta) * grads["dW" + str(l + 1)]
v["db" + str(l + 1)] = beta * v["db" + str(l + 1)] + (1 - beta) * grads["db" + str(l + 1)]
# update parameters
parameters["W" + str(l + 1)] = parameters["W" + str(l + 1)] - learning_rate * v["dW" + str(l + 1)]
parameters["b" + str(l + 1)] = parameters["b" + str(l + 1)] - learning_rate * v["db" + str(l + 1)]
### END CODE HERE ###
return parameters, v
parameters, grads, v = update_parameters_with_momentum_test_case()
parameters, v = update_parameters_with_momentum(parameters, grads, v, beta = 0.9, learning_rate = 0.01)
print("W1 = \n" + str(parameters["W1"]))
print("b1 = \n" + str(parameters["b1"]))
print("W2 = \n" + str(parameters["W2"]))
print("b2 = \n" + str(parameters["b2"]))
print("v[\"dW1\"] = \n" + str(v["dW1"]))
print("v[\"db1\"] = \n" + str(v["db1"]))
print("v[\"dW2\"] = \n" + str(v["dW2"]))
print("v[\"db2\"] = v" + str(v["db2"]))
# GRADED FUNCTION: initialize_adam
def initialize_adam(parameters):
"""
Initializes v and s as two python dictionaries with:
- keys: "dW1", "db1", ..., "dWL", "dbL"
- values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters.
Arguments:
parameters -- python dictionary containing your parameters.
parameters["W" + str(l)] = Wl
parameters["b" + str(l)] = bl
Returns:
v -- python dictionary that will contain the exponentially weighted average of the gradient.
v["dW" + str(l)] = ...
v["db" + str(l)] = ...
s -- python dictionary that will contain the exponentially weighted average of the squared gradient.
s["dW" + str(l)] = ...
s["db" + str(l)] = ...
"""
L = len(parameters) // 2 # number of layers in the neural networks
v = {}
s = {}
# Initialize v, s. Input: "parameters". Outputs: "v, s".
for l in range(L):
### START CODE HERE ### (approx. 4 lines)
v["dW" + str(l + 1)] = np.zeros(parameters["W" + str(l + 1)].shape)
v["db" + str(l + 1)] = np.zeros(parameters["b" + str(l + 1)].shape)
s["dW" + str(l + 1)] = np.zeros(parameters["W" + str(l + 1)].shape)
s["db" + str(l + 1)] = np.zeros(parameters["b" + str(l + 1)].shape)
### END CODE HERE ###
return v, s
parameters = initialize_adam_test_case()
v, s = initialize_adam(parameters)
print("v[\"dW1\"] = \n" + str(v["dW1"]))
print("v[\"db1\"] = \n" + str(v["db1"]))
print("v[\"dW2\"] = \n" + str(v["dW2"]))
print("v[\"db2\"] = \n" + str(v["db2"]))
print("s[\"dW1\"] = \n" + str(s["dW1"]))
print("s[\"db1\"] = \n" + str(s["db1"]))
print("s[\"dW2\"] = \n" + str(s["dW2"]))
print("s[\"db2\"] = \n" + str(s["db2"]))
# GRADED FUNCTION: update_parameters_with_adam
def update_parameters_with_adam(parameters, grads, v, s, t, learning_rate=0.01,
beta1=0.9, beta2=0.999, epsilon=1e-8):
"""
Update parameters using Adam
Arguments:
parameters -- python dictionary containing your parameters:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads -- python dictionary containing your gradients for each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
v -- Adam variable, moving average of the first gradient, python dictionary
s -- Adam variable, mo v["dW" + str(l + 1)] = np.zeros(parameters["W" + str(l + 1)].shape)
v["db" + str(l + 1)] = np.zeros(parameters["b" + str(l + 1)].shape)
s["dW" + str(l + 1)] = np.zeros(parameters["W" + str(l + 1)].shape)
learning_rate -- the learning rate, scalar.
beta1 -- Exponential decay hyperparameter for the first moment estimates
beta2 -- Exponential decay hyperparameter for the second moment estimates
epsilon -- hyperparameter preventing division by zero in Adam updates
Returns:
parameters -- python dictionary containing your updated parameters
v -- Adam variable, moving average of the first gradient, python dictionary
s -- Adam variable, moving average of the squared gradient, python dictionary
"""
L = len(parameters) // 2 # number of layers in the neural networks
v_corrected = {} # Initializing first moment estimate, python dictionary
s_corrected = {} # Initializing second moment estimate, python dictionary
# Perform Adam update on all parameters
for l in range(L):
# Moving average of the gradients. Inputs: "v, grads, beta1". Output: "v".
### START CODE HERE ### (approx. 2 lines)
v["dW" + str(l + 1)] = beta1 * v["dW" + str(l + 1)] + (1 - beta1) * grads["dW" + str(l + 1)]
v["db" + str(l + 1)] = beta1 * v["db" + str(l + 1)] + (1 - beta1) * grads["db" + str(l + 1)]
### END CODE HERE ###
# Compute bias-corrected first moment estimate. Inputs: "v, beta1, t". Output: "v_corrected".
### START CODE HERE ### (approx. 2 lines)
v_corrected["dW" + str(l + 1)] = v["dW" + str(l + 1)] / (1 - beta1 ** t)
v_corrected["db" + str(l + 1)] = v["db" + str(l + 1)] / (1 - beta1 ** t)
### END CODE HERE ###
# Moving average of the squared gradients. Inputs: "s, grads, beta2". Output: "s".
### START CODE HERE ### (approx. 2 lines)
s["dW" + str(l + 1)] = beta2 * s["dW" + str(l + 1)] + (1 - beta2) * (grads["dW" + str(l + 1)] ** 2)
s["db" + str(l + 1)] = beta2 * s["db" + str(l + 1)] + (1 - beta2) * (grads["db" + str(l + 1)] ** 2)
### END CODE HERE ###
# Compute bias-corrected second raw moment estimate. Inputs: "s, beta2, t". Output: "s_corrected".
### START CODE HERE ### (approx. 2 lines)
s_corrected["dW" + str(l + 1)] = s["dW" + str(l + 1)] / (1 - beta2 ** t)
s_corrected["db" + str(l + 1)] = s["db" + str(l + 1)] / (1 - beta2 ** t)
### END CODE HERE ###
# Update parameters. Inputs: "parameters, learning_rate, v_corrected, s_corrected, epsilon". Output: "parameters".
### START CODE HERE ### (approx. 2 lines)
parameters["W" + str(l + 1)] = parameters["W" + str(l + 1)] - learning_rate * v_corrected["dW" + str(l + 1)] / (np.sqrt(s_corrected["dW" + str(l + 1)]) + epsilon)
parameters["b" + str(l + 1)] = parameters["b" + str(l + 1)] - learning_rate * v_corrected["db" + str(l + 1)] / (np.sqrt(s_corrected["db" + str(l + 1)]) + epsilon)
### END CODE HERE ###
return parameters, v, s
parameters, grads, v, s = update_parameters_with_adam_test_case()
parameters, v, s = update_parameters_with_adam(parameters, grads, v, s, t = 2)
print("W1 = \n" + str(parameters["W1"]))
print("b1 = \n" + str(parameters["b1"]))
print("W2 = \n" + str(parameters["W2"]))
print("b2 = \n" + str(parameters["b2"]))
print("v[\"dW1\"] = \n" + str(v["dW1"]))
print("v[\"db1\"] = \n" + str(v["db1"]))
print("v[\"dW2\"] = \n" + str(v["dW2"]))
print("v[\"db2\"] = \n" + str(v["db2"]))
print("s[\"dW1\"] = \n" + str(s["dW1"]))
print("s[\"db1\"] = \n" + str(s["db1"]))
print("s[\"dW2\"] = \n" + str(s["dW2"]))
print("s[\"db2\"] = \n" + str(s["db2"]))
train_X, train_Y = load_dataset()
plt.show()
def model(X, Y, layers_dims, optimizer, learning_rate=0.0007, mini_batch_size=64, beta=0.9,
beta1=0.9, beta2=0.999, epsilon=1e-8, num_epochs=10000, print_cost=True):
"""
3-layer neural network model which can be run in different optimizer modes.
Arguments:
X -- input data, of shape (2, number of examples)
Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples)
layers_dims -- python list, containing the size of each layer
learning_rate -- the learning rate, scalar.
mini_batch_size -- the size of a mini batch
beta -- Momentum hyperparameter
beta1 -- Exponential decay hyperparameter for the past gradients estimates
beta2 -- Exponential decay hyperparameter for the past squared gradients estimates
epsilon -- hyperparameter preventing division by zero in Adam updates
num_epochs -- number of epochs
print_cost -- True to print the cost every 1000 epochs
Returns:
parameters -- python dictionary containing your updated parameters
"""
L = len(layers_dims) # number of layers in the neural networks
costs = [] # to keep track of the cost
t = 0 # initializing the counter required for Adam update
seed = 10 # For grading purposes, so that your "random" minibatches are the same as ours
m = X.shape[1] # number of training examples
# Initialize parameters
parameters = initialize_parameters(layers_dims)
# Initialize the optimizer
if optimizer == "gd":
pass # no initialization required for gradient descent
elif optimizer == "momentum":
v = initialize_velocity(parameters)
elif optimizer == "adam":
v, s = initialize_adam(parameters)
# Optimization loop
for i in range(num_epochs):
# Define the random minibatches. We increment the seed to reshuffle differently the dataset after each epoch
seed = seed + 1
minibatches = random_mini_batches(X, Y, mini_batch_size, seed)
cost_total = 0
for minibatch in minibatches:
# Select a minibatch
(minibatch_X, minibatch_Y) = minibatch
# Forward propagation
a3, caches = forward_propagation(minibatch_X, parameters)
# Compute cost and add to the cost total
cost_total += compute_cost(a3, minibatch_Y)
# Backward propagation
grads = backward_propagation(minibatch_X, minibatch_Y, caches)
# Update parameters
if optimizer == "gd":
parameters = update_parameters_with_gd(parameters, grads, learning_rate)
elif optimizer == "momentum":
parameters, v = update_parameters_with_momentum(parameters, grads, v, beta, learning_rate)
elif optimizer == "adam":
t = t + 1 # Adam counter
parameters, v, s = update_parameters_with_adam(parameters, grads, v, s,
t, learning_rate, beta1, beta2, epsilon)
cost_avg = cost_total / m
# Print the cost every 1000 epoch
if print_cost and i % 1000 == 0:
print("Cost after epoch %i: %f" % (i, cost_avg))
if print_cost and i % 100 == 0:
costs.append(cost_avg)
# plot the cost
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('epochs (per 100)')
plt.title("Learning rate = " + str(learning_rate))
plt.show()
return parameters
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, optimizer = "gd")
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Gradient Descent optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, beta = 0.9, optimizer = "momentum")
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Momentum optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, optimizer = "adam")
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Adam optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y) | [
"877649518@qq.com"
] | 877649518@qq.com |
1f276f5a21289f070e9ebfcc655a747a3d1cd3b1 | 0104f7736632084592cd6ced20de0be9fb9e24ac | /剑指offer/构建乘积数组.py | 09d398118fb634a3669c2c9da9a75f98947ad262 | [] | no_license | longkun-uestc/examination | 9eb63b6e8ffdb503a90a6be3d049ad2fdb85e46c | ef1d29a769f2fd6d517497f8b42121c02f8307cc | refs/heads/master | 2021-06-25T23:11:24.460680 | 2021-06-23T03:28:55 | 2021-06-23T03:28:55 | 228,847,479 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 745 | py | class Solution:
def multiply(self, A):
left_to_right = [-1]*len(A)
right_to_left = [-1]*len(A)
left_to_right[0] = 1
right_to_left[-1] = 1
for i in range(1, len(left_to_right)):
left_to_right[i] = left_to_right[i-1] * A[i-1]
for i in range(len(right_to_left)-2, -1, -1):
right_to_left[i] = right_to_left[i+1] * A[i+1]
# B = [1]*len(A)
# for i in range(len(B)):
# B[i] = left_to_right[i] * right_to_left[i]
B = [a*b for a, b in zip(left_to_right, right_to_left)]
# print(left_to_right)
# print(right_to_left)
# print(B)
return B
if __name__ == '__main__':
s = Solution()
s.multiply([2,3,4,5, 6]) | [
"1256904448@qq.com"
] | 1256904448@qq.com |
eeaa16fb67a5f6fb3382537928469d161d2ee20e | 985be2d2d979c1d5ffbd6cd73d9da711951e4f1c | /chat/consumers.py | a2ffba5a9b3687a82d2473cd12f884a487b2d806 | [] | no_license | sreesh-mallya/django-channels-demo | 6a1492c2ffe3a8f37782ced19562c629fa65ee8f | 8a3ac7d3e04ecd8c5053009f760d84e3b9415882 | refs/heads/master | 2021-01-23T22:16:04.353634 | 2017-09-19T03:44:00 | 2017-09-19T03:44:00 | 102,924,295 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,740 | py | import re
import json
import logging
from channels import Group
from channels.sessions import channel_session
from .models import Room
@channel_session
def ws_connect(message):
# Could use more error handling here
prefix, label = message['path'].strip('/').split('/')
try:
room = Room.objects.get(label=label)
message.reply_channel.send({"accept": True}) # Accept connection
print('Room : %s' % room.label)
except Room.DoesNotExist:
print('Room with label %s does not exist!' % label)
return
Group('chat-' + label).add(message.reply_channel)
message.channel_session['room'] = room.label
print(message.keys())
@channel_session
def ws_receive(message):
# Could use more error handling here
label = message.channel_session['room']
try:
room = Room.objects.get(label=label)
except Room.ObjectDoesNotExist:
print('Room with label %s does not exist!' % label)
return
# Get text message, and parse to json; throw any errors if any
try:
data = json.loads(message['text'])
except ValueError:
print('Oops! Your message isn\'t in json!')
return
# Make sure data is in proper format, i.e, { 'handle': ... , 'message': ... }
if set(data.keys()) != {'handle', 'message'}:
print('Improper message format : %s ', data)
return
msg = room.messages.create(handle=data['handle'], message=data['message'])
response = json.dumps(msg.as_dict())
Group('chat-' + label).send({'text': response})
@channel_session
def ws_disconnect(message):
print('disconnecting')
label = message.channel_session['room']
Group('chat-' + label).discard(message.reply_channel)
| [
"sreeshsmallya@gmail.com"
] | sreeshsmallya@gmail.com |
0dca2f890e85ab82a477f193ca5d7b13bb4452f4 | 5310aad336ad7cdc304a7204d4bd91b4fa754f1e | /Lab3/homework/serious_ex9.py | 1c3bf0845b2d742f8ca01016425fdb34f9ee6da7 | [] | no_license | dattran1997/trandat-fundamental-c4e17 | 329e294f68bde1fc04d53c0acd0f9a7e87d7d444 | fd2f0648f28e78769f7fbf3e40e9973bf211f1de | refs/heads/master | 2020-03-09T22:49:56.228853 | 2018-05-29T04:22:39 | 2018-05-29T04:22:39 | 129,044,126 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | py | def get_even_list(list):
new_list = []
for i in list:
if (i % 2 == 0):
new_list.append(i)
return new_list
if __name__ == "__main__":
list = [1,4,5,-1,10]
newlist = get_even_list(list)
print(newlist)
| [
"dattran1997@gmail.com"
] | dattran1997@gmail.com |
6763b1340462fabc349debc7f52e0774a21e430f | 7c59004e0165c9b32dc5b786b96fc4d81f565daf | /predictor_ker.py | db39a01a8c2f10a576acb2f85d032224fae85302 | [] | no_license | LeonHardt427/mayi | f04d7d7bca68e0a3a57ca2ef2de14af7db28d2e7 | 679f688a971075794dd3d4ed0a7cbc50931a422f | refs/heads/master | 2020-03-20T05:34:58.677201 | 2018-07-08T03:23:23 | 2018-07-08T03:23:23 | 137,219,188 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,562 | py | # -*- coding: utf-8 -*-
# @Time : 2018/5/29 10:11
# @Author : LeonHardt
# @File : predictor_ker.py
import os
import numpy as np
import pandas as pd
from sklearn.preprocessing import Imputer, OneHotEncoder
import tensorflow as tf
from keras import Sequential
from keras.layers import Dense, Activation
# data_path = os.getcwd()+"/data_error/"
# x_train = np.loadtxt(data_path+"x_train_error93.txt", delimiter=',')
# y_train = np.loadtxt(data_path+"y_train_error93.txt", delimiter=',')
# x_test = np.loadtxt(data_path+"x_test_error93.txt", delimiter=',')
# # print("ready")
# im = Imputer(strategy="most_frequent")
# x_train = im.fit_transform(x_train)
# x_test = im.transform(x_test)
data_path = os.getcwd()+"/data/"
x_train = np.loadtxt(data_path+"x_train_most.txt", delimiter=',')
y_train = np.loadtxt(data_path+"y_train_filter.txt", delimiter=',')
x_test = np.loadtxt(data_path+"x_test_a_most.txt", delimiter=',')
enc = OneHotEncoder()
y_train = enc.fit_transform(y_train.reshape(-1, 1))
print(y_train)
print(y_train.shape)
model = Sequential()
model.add(Dense(input_dim=297, units=297, activation='relu'))
# model.add(Dense(200, activation='relu'))
model.add(Dense(200, activation='relu'))
model.add(Dense(2, activation='sigmoid'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# cw = {0: 1, 1: 100}
model.fit(x_train, y_train, epochs=20, batch_size=10000)
prob = model.predict_proba(x_test)
np.savetxt(os.getcwd()+"/prediction/ker200_1_error93_1.txt", prob, delimiter=',')
# model.save("merge_model")
| [
"leonhardt427@126.com"
] | leonhardt427@126.com |
3774ffc4fdcb9c86ca755421da7b371e9f1e7d2c | 6f61a105f85f9e4b6b98494b45e96d3099402449 | /kapool/settings.py | 7993700b0e3d7208e963c459ba82c5a60ebc6863 | [
"MIT"
] | permissive | Marah-uwase/carpool | 00ca3b230fbe2bfabb4660cbf8974a902dadc85b | 6ee69e1ad48352a4d1f59f372b41a2891fc58ec7 | refs/heads/models | 2023-02-25T19:52:44.180676 | 2021-02-02T08:22:05 | 2021-02-02T08:22:05 | 334,284,598 | 0 | 0 | null | 2021-02-01T13:21:49 | 2021-01-29T23:14:54 | Python | UTF-8 | Python | false | false | 3,767 | py | """
Django settings for kapool project.
Generated by 'django-admin startproject' using Django 3.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
import cloudinary
import cloudinary.uploader
import cloudinary.api
from decouple import config
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '(c94#zgkxgtwila5*$=yss0nngan+b9l9&r1+#nrd=cd849p76'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'carpool',
'app',
'tinymce',
'bootstrap4',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'kapool.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'kapool.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'karpool',
'USER': 'maranatha',
'PASSWORD':'maman',
}
}
cloudinary.config(
api_key = 'AIzaSyCv9Yc1eQAYKqm3qXBpUBfEa-CYW9CVoTQ',
)
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = ( os.path.join(BASE_DIR, 'static'),)
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
LOGIN_REDIRECT_URL='/'
LOGOUT_REDIRECT_URL = '/'
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
)
}
| [
"maranahuwase12@gmail.com"
] | maranahuwase12@gmail.com |
be3571f979caec5427a8a79884e69c6e57bd6ecf | 27c1ba6ed6c1586a348cdcfe26d17be13ae38b72 | /scripts/hello_world_pallavisavant.py | c4a681787dd9cfab9a8e69eddfd8f12183f68509 | [] | no_license | codewithgauri/HacktoberfestPR2020 | 4299f2ae8f44b31c6ecbeaefa058fde26327a253 | 335310f3d81029938d119e15d3f1a131d745d3f2 | refs/heads/master | 2022-12-30T20:28:41.937632 | 2020-10-26T06:47:24 | 2020-10-26T06:47:24 | 307,281,958 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 60 | py | #python code to print 'Hello World" :)
print("Hello World") | [
"pallavisavant02@gmail.com"
] | pallavisavant02@gmail.com |
08d2b69ed9e737d8ee5c1f6d6389ece08b8737c4 | 74dd16cb3d4181d4b7b0d1bcfa3aa0c3a617548b | /src/utilities/video_metadata.py | 1545ef9a99282ca5b7d66e4802fd2bebed2c4ba0 | [] | no_license | eliasnieminen/vgs-data-annotation | 4b87a6ece64eb83b6d5d43d34825539a1ff27fc6 | a6569cb52017b88beffa8c1a1332acd9b340646f | refs/heads/main | 2023-07-20T05:13:57.492399 | 2021-08-31T12:16:19 | 2021-08-31T12:16:19 | 398,779,632 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 927 | py | import math
from typing import Optional, Union, Dict
class VideoMetadata:
"""Metadata object for videos.
"""
def __init__(self,
dur: Optional[Union[None, float]] = None,
fps: Optional[Union[None, float]] = None,
metadata: Optional[Union[None, Dict]] = None):
self.dur = dur
self.framerate = fps
self.video_metadata = metadata
def set_duration(self, duration):
self.dur = duration
def set_fps(self, fps):
self.framerate = fps
def set_video_metadata(self, metadata):
self.video_metadata = metadata
@property
def duration(self):
return self.dur
@property
def fps(self):
return self.framerate
@property
def metadata(self):
return self.video_metadata
@property
def frame_count(self):
return math.floor(self.framerate * self.duration)
| [
"elias.nieminen@tuni.fi"
] | elias.nieminen@tuni.fi |
b101ab8c3181c1392886b3ff9ddf5ba9a39dd257 | 7ff1ebbaaccd65665bb0fae19746569c043a8f40 | /readcsv.py | 28ea6bc771aee30752a72b42cf17a39c26ce3f94 | [] | no_license | Mutugiii/bot | 1cbc8e8493de5a31b831722c698fd8c7b1f60bf5 | 3d4ccbf8dbe981e2c363ad6f2774deb2d34ac110 | refs/heads/master | 2022-12-11T22:04:53.040939 | 2020-05-20T11:09:54 | 2020-05-20T11:09:54 | 247,986,918 | 0 | 0 | null | 2022-12-08T09:33:14 | 2020-03-17T14:11:54 | Python | UTF-8 | Python | false | false | 77 | py | import pandas
dataformat = pandas.read_csv('csv/data.csv')
print(dataformat) | [
"mutugimutuma@gmail.com"
] | mutugimutuma@gmail.com |
6c16d977d5da188d8203250fd478cfac76c891cc | 85c9d6fdff58b9cb40f5fdb9f01ff1a0dd386113 | /bot_tests/reminder.py | ef7aa772e1bbf39b40113c0d3d7e94d3036748d1 | [] | no_license | jmccormac01/karmafleet | 5874644c496b0bbcb2037404ad7ed43a1e4caaae | 57ebefbbc6ec3aae634cd9196950f103d48eae95 | refs/heads/master | 2020-03-25T17:24:39.187176 | 2019-04-20T18:17:05 | 2019-04-20T18:17:05 | 143,976,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,922 | py | """
Bot for converting EVE times to local timezones
"""
from datetime import datetime
from pytz import timezone
import discord
from discord.ext import commands
import asyncio
# pylint: disable=invalid-name
Client = discord.Client()
client = commands.Bot(command_prefix="!")
reminders = {}
async def reminder_handler(reminders):
await client.wait_until_ready()
while not client.is_closed:
broke = False
print('Checking reminders...')
print(reminders)
now = datetime.utcnow()
for a in reminders:
print('Checking for author {}'.format(a))
for t in reminders[a]:
if now > t:
print(a, reminders[a][t])
await client.send_message(a, reminders[a][t])
# remove the reminder from the list
del reminders[a][t]
broke = True
break
if broke:
break
await asyncio.sleep(10)
@client.event
async def on_ready():
"""
Simple print to say we're ready
"""
print('Ready for remembering stuff...')
@client.event
async def on_message(message):
"""
Handle incoming messages and convert time requests
"""
sp = message.content.split()
return_message = ""
error_count = 0
# check we want time conversion from eve time
if len(sp) >= 3 and sp[0].lower() == '!reminder':
author = message.author
await client.delete_message(message)
# split the command up
reminder_time = datetime.strptime(sp[1], '%Y-%m-%dT%H:%M')
note = ' '.join(sp[2:])
if author not in reminders.keys():
reminders[author] = {}
reminders[author][reminder_time] = note
print(reminders)
client.loop.create_task(reminder_handler(reminders))
client.run('NDk0OTQ2Mzg3ODM5MDI1MTYz.Do66Yw.nsleHS3S8UvbWdBugiDtPWHrIKY')
| [
"jmccormac001@gmail.com"
] | jmccormac001@gmail.com |
367ec183a847084b29dd59bd79ca5db7e7418f61 | fb46511d2fa968e6a2e74a20a67ace59819e15dd | /ProcessedData/trial.py | 781f802ca1ea3ca3e8a244cc236a45615f979a0c | [] | no_license | PushA308/QPQCT | 6505f18907f8af3d9be24ebf21a01cc6603d657a | 6bd99690df213860a4af83f142423e64fa57c34a | refs/heads/master | 2020-04-27T05:03:55.061701 | 2019-03-06T05:02:51 | 2019-03-06T05:02:51 | 174,071,885 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,423 | py | import csv
import nltk
import shutil
import os
import sys
import traceback
import win32com.client #pip install pywin32
import nltk #pip install nltk
#nltk.download('punkt')
#nltk.download('averaged_perceptron_tagger')
#include other ntlk packages, if asked for.
##########################################
#initialized variables
##########################################
question_no_column = 7
start_index = 16
def process_question_paper(ques_paper_path) :
fd = open(os.path.join(os.getcwd(),"processed_data.csv"),'w')
headers = "Marks, CO_Type, Module No, Question Type, Question No, SUb Que No., Question"
fd.write(headers + '\n')
for root, dirs,files in os.walk(ques_paper_path) :
for file in files:
if file.endswith('.xls') :
file_path = os.path.join(root,file)
try:
excel = win32com.client.Dispatch('Excel.Application')
workbook = excel.Workbooks.open(file_path)
sheet = workbook.WorkSheets('QuestionPaper')
for start_row in range(start_index, 50):
try:
row, col = start_row, question_no_column
question = sheet.Cells(row, col).value
if question is not None:
row, col = start_row, question_no_column + 1
marks = str(sheet.Cells(row, col).value)
row, col = start_row, question_no_column + 2
co_type = str(sheet.Cells(row, col).value)
row, col = start_row, question_no_column + 4
module_no = str(sheet.Cells(row, col).value)
row, col = start_row, question_no_column - 5
question_type = sheet.Cells(row, col).value
row, col = start_row, question_no_column - 2
question_no = sheet.Cells(row, col).value
row, col = start_row, question_no_column - 1
sub_question_no = sheet.Cells(row, col).value
row, col = start_row, question_no_column - 2
question_no = sheet.Cells(row, col).value
row, col = start_row, question_no_column
question = sheet.Cells(row, col).value
print (question+'\n')
fd.write(marks + ','+co_type + ',' + module_no + ',' +question_type + ','+ question_no + ',' + sub_question_no + ',' +question + '\n')
else:
pass
except Exception as e:
print ("hhj")
pass
workbook.Close(True)
except Exception as e:
print ("ERROR")
pass
fd.close()
def extract_verb(sentence):
helping_verbs = ['am','are','is','was','were','be','being','been','have','has','had','shall','will','do','does','did','may','might','must','can','could','would','should']
#sentence = "what is meant by data structure. it has been long time. i didn't do that"
tokens = nltk.word_tokenize(sentence)
tagged = nltk.pos_tag(tokens)
#print("tagged tokens:-")
#print(tagged)
length = len(tagged) - 1
#print(length)
a = list()
i=0
flg=0
for item in tagged:
if item[1][0] == 'V':
if((item[0] in helping_verbs)!=1):
a.append(item[0])
#print(item[0])
flg=1
if flg==0:
a.append("N.A")
#print(a)
with open("File/question_verb.csv","a",newline='') as csvfile:
spamwriter = csv.writer(csvfile)
spamwriter.writerow(a)
return a;
#analysis of question paper using verbs
def final(k):
fs = open("File/level.csv","r")
reader = csv.reader(fs)
i=0
a=list()
b=0
d=0
for row in reader:
with open("File/Value_level.csv","a",newline='') as csvfile:
spamwriter = csv.writer(csvfile)
spamwriter.writerow(max(row))
a.append(max(row))
i=i+1
fs.close()
if k==1:
res=list(map(int,a))
print(res)
p=sum(res)
print(i)
print(p)
l=int(input("input the level from 1-6: "))
k=(p/(i*l))*100
print("Average quality per question is {}".format(k))
fs.close()
#calculating verb's level using bloom's taxonomy
def calculate_level(line):
fs = open("File/bloom verbs.csv","r")
reader = csv.reader(fs)
#included_cols = [1]
a=list()
b=list()
#row = next(reader)
#print(line)
flg=0
for word in line:
i=1
for row in reader:
if word.lower() in row:
a.append(i)
flg=1
i=i+1
if flg==0:
a.append(0)
#print(line,a,max(a))
with open("File/level.csv","a",newline='') as csvfile:
spamwriter = csv.writer(csvfile)
spamwriter.writerow(a)
fs.close()
def view_table():
f=open("File/your_csv1.csv","r")
#reader=csv.reader(f)
f1=open("File/question_verb.csv","r")
reader1=csv.reader(f1)
rows1 = list(reader1)
print("-------------------------------------------/n")
print(rows1)
#for row in reader1:
# print(row)
included_cols=[0]
included_cols1=[1]
included_cols2=[2]
i=1
def compare_Type():
f1=open("File/bloom_type.csv","r")
f2=open("File/Value_level.csv","r")
r1 = list(f1)
length = len(r1)
r2 = list(f2)
sum=0
for i in range(length):
if r1[i]==r2[i]:
k=abs(int(r1[i])-int(r2[i]))
sum=sum+k
print(chr(ord('A') + k))
else:
k=abs(int(r1[i])-int(r2[i]))
sum=sum+k
print(chr(ord('A') + k))
print("Avg quality per question: "+chr(ord('A')+int(sum/length)))
#Start:
if __name__ == "__main__" :
arg_cnt = len(sys.argv)
if arg_cnt > 1:
ques_paper_path = sys.argv[1]
process_question_paper(ques_paper_path)
else:
print ("Please provide question paper directory path !")
f = open("processed_data.csv","r")
reader = csv.reader(f)
#out_file = open("File\solution1.csv", "w")
#writer = csv.writer(out_file)
add=0
included_cols = [2]
included_cols1=[0]
row = next(reader)
for row in reader:
content = list(row[i] for i in included_cols) #selecting question
content1 = list(row[i] for i in included_cols1) #selecting question type
with open("File/bloom_type.csv","a",newline='') as csvfile:
spamwriter = csv.writer(csvfile)
if content1[0]=="remembering":
spamwriter.writerow("1")
elif content1[0]=="understanding":
spamwriter.writerow("2")
elif content1[0]=="applying":
spamwriter.writerow("3")
elif content1[0]=="analyzing":
spamwriter.writerow("4")
elif content1[0]=="evaluating":
spamwriter.writerow("5")
elif content1[0]=="creating":
spamwriter.writerow("6")
a=extract_verb(content[0])
print(a)
calculate_level(a)
k=int(input("Select the option for Analysis of Question Paper:\n1.Verbs\n2.Question Type\n3.Course Outcome"))
if k==1:
final(k)
elif k==2:
final(k)
compare_Type()
v=int(input("View Information:\n1.Question Paper\n2.Verbs\n3.Bloom's Level\n"))
if v==1:
f = open("File/UOS_paper.csv","r")
reader = csv.reader(f)
for row in reader:
print(row)
f.close()
elif v==2:
f = open("File/question_verb.csv","r")
reader = csv.reader(f)
for row in reader:
print(row)
f.close()
elif v==3:
f = open("File/Value_level.csv","r")
reader = csv.reader(f)
for row in reader:
print(row)
f.close()
#print(a)
f.close()
| [
"noreply@github.com"
] | PushA308.noreply@github.com |
0dca7a66a1da77d96fed23a3f91e8168a80f5e26 | 0ee64034518898893d495639cb01aa9523789f77 | /2018 Materials/Resources/Week 4/RaspberryPi.py | 63b9c98778493a0ca93bb145474cbfe01bd4c169 | [
"MIT"
] | permissive | Phangster/digital-world-for-normal-humans | 31187b47e16d4359fce2ecac2ce7b5c1aa88d909 | 29a479af2e380bdf691f6487167d0d8edf0ba5ed | refs/heads/master | 2020-05-07T19:52:37.655375 | 2018-12-29T14:32:35 | 2018-12-29T14:32:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,039 | py | import RPi.GPIO as GPIO
from time import sleep
from firebase import firebase
url = "https://internet-of-things-c572e.firebaseio.com/"
token = 'qcHr20bWwg1ziqik58l39JD8UlcLLIGa8HJ0DaSC'
firebase = firebase.FirebaseApplication(url, token)
# Use the BCM GPIO numbers as the numbering scheme.
GPIO.setmode(GPIO.BCM)
# Use GPIO12, 16, 20 and 21 for the buttons.
s1 = 12
s2 = 16
s3 = 20
s4 = 21
switch_list = [12, 16, 20, 21]
# Set GPIO numbers in the list: [12, 16, 20, 21] as input with pull-down resistor.
movement_list = []
GPIO.setup(switch_list, GPIO.IN, GPIO.PUD_DOWN)
done = False
while done == False:
if GPIO.input(12) == GPIO.HIGH:
movement_list.append('left')
print('Left added.')
sleep(0.1)
elif GPIO.input(16) == GPIO.HIGH:
movement_list.append('right')
print('Right added.')
sleep(0.1)
elif GPIO.input(20) == GPIO.HIGH:
movement_list.append('up')
print('Up added.')
sleep(0.1)
elif GPIO.input(21) == GPIO.HIGH:
movement_list.append('done')
print('Terminating control, uploading sequence to Firebase.')
firebase.put('/','movement_list', movement_list)
done = True
break
while done==True:
a=firebase.get('/movement_list') # get the value from node age
if a == None:
done=False
sleep(0.5)
# Write your code here
'''
We loop through the key (button name), value (gpio number) pair of the buttons
dictionary and check whether the button at the corresponding GPIO is being
pressed. When the OK button is pressed, we will exit the while loop and
write the list of movements (movement_list) to the database. Any other button
press would be stored in the movement_list.
Since there may be debouncing issue due to the mechanical nature of the buttons,
we can address it by putting a short delay between each iteration after a key
press has been detected.
'''
# Write to database once the OK button is pressed
| [
"thaddeus.phua@gmail.com"
] | thaddeus.phua@gmail.com |
abb40cfd7886a6089a10fff801f6ff4840838feb | a5a99f646e371b45974a6fb6ccc06b0a674818f2 | /RecoBTag/ONNXRuntime/python/pfParticleNetFromMiniAODAK4DiscriminatorsJetTags_cfi.py | b09fabc5e9632fe7d6cba6adb353d5a7f3afbfa9 | [
"Apache-2.0"
] | permissive | cms-sw/cmssw | 4ecd2c1105d59c66d385551230542c6615b9ab58 | 19c178740257eb48367778593da55dcad08b7a4f | refs/heads/master | 2023-08-23T21:57:42.491143 | 2023-08-22T20:22:40 | 2023-08-22T20:22:40 | 10,969,551 | 1,006 | 3,696 | Apache-2.0 | 2023-09-14T19:14:28 | 2013-06-26T14:09:07 | C++ | UTF-8 | Python | false | false | 18,375 | py | import FWCore.ParameterSet.Config as cms
pfParticleNetFromMiniAODAK4PuppiCentralDiscriminatorsJetTags = cms.EDProducer(
'BTagProbabilityToDiscriminator',
discriminators = cms.VPSet(
cms.PSet(
name = cms.string('BvsAll'),
numerator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probb'),
),
denominator=cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probb'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probc'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probuds'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probg'),
),
),
cms.PSet(
name = cms.string('CvsL'),
numerator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probc'),
),
denominator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probc'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probuds'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probg'),
),
),
cms.PSet(
name = cms.string('CvsB'),
numerator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probc'),
),
denominator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probc'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probb'),
),
),
cms.PSet(
name = cms.string('QvsG'),
numerator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probuds'),
),
denominator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probuds'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probg'),
),
),
cms.PSet(
name = cms.string('TauVsJet'),
numerator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h2p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup3h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup3h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h2p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum3h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum3h1p'),
),
denominator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probb'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probc'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probuds'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probg'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h2p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup3h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup3h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h2p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum3h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum3h1p'),
),
),
cms.PSet(
name = cms.string('TauVsEle'),
numerator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h2p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup3h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup3h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h2p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum3h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum3h1p'),
),
denominator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probele'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h2p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup3h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup3h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h2p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum3h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum3h1p'),
),
),
cms.PSet(
name = cms.string('TauVsMu'),
numerator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h2p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup3h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup3h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h2p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum3h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum3h1p'),
),
denominator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probmu'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h2p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup3h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup3h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h2p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum3h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum3h1p'),
),
),
)
)
pfParticleNetFromMiniAODAK4PuppiForwardDiscriminatorsJetTags = cms.EDProducer(
'BTagProbabilityToDiscriminator',
discriminators = cms.VPSet(
cms.PSet(
name = cms.string('QvsG'),
numerator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiForwardJetTags', 'probq'),
),
denominator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiForwardJetTags', 'probq'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiForwardJetTags', 'probg'),
),
),
)
)
pfParticleNetFromMiniAODAK4CHSCentralDiscriminatorsJetTags = cms.EDProducer(
'BTagProbabilityToDiscriminator',
discriminators = cms.VPSet(
cms.PSet(
name = cms.string('BvsAll'),
numerator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probb'),
),
denominator=cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probb'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probc'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probuds'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probg'),
),
),
cms.PSet(
name = cms.string('CvsL'),
numerator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probc'),
),
denominator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probc'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probuds'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probg'),
),
),
cms.PSet(
name = cms.string('CvsB'),
numerator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probc'),
),
denominator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probc'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probb'),
),
),
cms.PSet(
name = cms.string('QvsG'),
numerator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probuds'),
),
denominator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probuds'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probg'),
),
),
cms.PSet(
name = cms.string('TauVsJet'),
numerator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h2p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup3h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup3h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h2p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum3h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum3h1p'),
),
denominator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probb'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probc'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probuds'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probg'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h2p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup3h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup3h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h2p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum3h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum3h1p'),
),
),
cms.PSet(
name = cms.string('TauVsEle'),
numerator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h2p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup3h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup3h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h2p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum3h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum3h1p'),
),
denominator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probele'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h2p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup3h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup3h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h2p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum3h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum3h1p'),
),
),
cms.PSet(
name = cms.string('TauVsMu'),
numerator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h2p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup3h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup3h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h2p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum3h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum3h1p'),
),
denominator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probmu'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h2p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup3h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup3h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h2p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum3h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum3h1p'),
),
),
)
)
pfParticleNetFromMiniAODAK4CHSForwardDiscriminatorsJetTags = cms.EDProducer(
'BTagProbabilityToDiscriminator',
discriminators = cms.VPSet(
cms.PSet(
name = cms.string('QvsG'),
numerator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4CHSForwardJetTags', 'probq'),
),
denominator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4CHSForwardJetTags', 'probq'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSForwardJetTags', 'probg'),
),
),
)
)
| [
"stephane.b.cooperstein@cern.ch"
] | stephane.b.cooperstein@cern.ch |
4c2ce9f4572cc0369d582cfe65ef86a9f3d7106a | e9530da3f17f990a3fade9c8c442ad3fbb4befc4 | /test.py | 3b60dca7cebd68376caafb431dc6465cd28133bf | [] | no_license | mattyhempstead/syncs-hack-2020 | 9796565c03560c76f0a4402ded1a536f0f3f7fc8 | 6e3d69070dad3228ed8bed3eb805dc090d52b56f | refs/heads/master | 2022-12-08T10:23:49.395788 | 2020-08-30T01:00:35 | 2020-08-30T01:00:35 | 290,966,904 | 5 | 0 | null | 2020-08-29T11:27:44 | 2020-08-28T06:17:23 | JavaScript | UTF-8 | Python | false | false | 490 | py | import binascii
text = "https://www.google.com/"
binary_conversion = bin(int.from_bytes(text.encode(), 'big'))
binary_conversion = binary_conversion[2:]
for count,i in enumerate(binary_conversion):
time = 0.5
sound_array = []
if count%8 == 0:
sound_array.append(0)
base_one = 220
base_two = 440
else:
base_one = 320
base_two = 550
if i == 0:
sound_array.append(base_one)
else:
sound_array.append(base_two)
| [
"pranav.alavandi"
] | pranav.alavandi |
b588764a31f012d092aa5fbb402b4f34eead4552 | 1071b46a6ea054a186ab5c270dfdba48362adf70 | /Python/Examples/juego_adivina_el_numero.py | 5231d40690c74099e97d77b93aef36975ff2fe0d | [] | no_license | diegoldsv/technotes | 5aaed2d6ef5037217a0c071b6f7b48b04d89d4fd | 6cb0b90001c52438b74da72c02c664164938d7e9 | refs/heads/main | 2023-05-10T22:08:21.189916 | 2021-05-31T14:14:13 | 2021-05-31T14:14:13 | 351,212,081 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 557 | py | import random
def run():
numero_aleatorio = random.randint(1,100)
numero_ingresado = input("Elige un número del 1 al 100: ")
numero_ingresado = int(numero_ingresado)
while numero_ingresado != numero_aleatorio:
if numero_ingresado < numero_aleatorio:
print("Busca un número más grande")
else:
print("Busca un número más pequeño")
numero_ingresado = input("Elige otro número: ")
numero_ingresado = int(numero_ingresado)
print("Ganaste!")
if __name__ == "__main__":
run() | [
"disalvatorediego@gmail.com"
] | disalvatorediego@gmail.com |
ce3bfb840d3411bd2a1255ab453499c357ba459b | f407b21811c8eebbf1c32d6aadc502403d83d048 | /problem19.py | edb8fdb3cd28c83eb54faa6fca3eb45a9fee0301 | [] | no_license | gnikesh/project-euler | 37e95cbc0c82ff54ddb23b89f4f38067ec69d5c8 | 0d39c7b78fc2e11d2f863e7ae40fb27f93a18fbc | refs/heads/master | 2021-08-20T04:53:54.266867 | 2021-01-20T23:21:00 | 2021-01-20T23:21:00 | 87,681,964 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,484 | py | # 1 Jan 1900 was a Monday.
# Thirty days has September,
# April, June and November.
# All the rest have thirty-one,
# Saving February alone,
# Which has twenty-eight, rain or shine.
# And on leap years, twenty-nine.
# A leap year occurs on any year evenly divisible by 4, but not on a century unless it is divisible by 400.
# How many Sundays fell on the first of the month during the twentieth century (1 Jan 1901 to 31 Dec 2000)?
def get_days():
week_days = ["Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"]
week_days_num = [i for i in range(1, 8)]
months = [i for i in range(1, 13)]
month_30_days = [4, 6, 9, 11]
month_31_days = [1, 3, 5, 7, 8, 10, 12]
month_28_days = [2]
cur_day = 1 # 1 Jan 1900 was Monday
sundays = 0
for year in range(1900, 2001):
for month in range(1, 13):
if month in month_30_days:
days = 30
elif month in month_31_days:
days = 31
elif month in month_28_days:
if year % 4 == 0 and not year == 1900:
days = 29
else:
days = 28
for day in range(1, days + 1):
today = week_days[cur_day]
if today == "Sun" and day == 1 and year != 1900:
sundays += 1
print("Year: ", year, "Month: ", month, "Day: ", day, today)
cur_day += 1
cur_day = cur_day % 7
print(sundays)
if __name__ == "__main__":
get_days()
| [
"gnikesh03@gmail.com"
] | gnikesh03@gmail.com |
5a1a67ef9e36c7013d262a0ec9e876fcec96d9c0 | 75a0e169a7b45a95b5d0de639b12ae2b601af236 | /worker.py | 94d496b3e2f01dd00dc3ff71faa400d71db56822 | [] | no_license | john-peterson/goodreads | 65722ef88f66c1ff00a22f308b2497c03cf44a5e | 0cf6d294cef6d7d4e1e4526ae02777d206f19ca3 | refs/heads/master | 2021-01-20T21:31:49.690764 | 2012-12-22T06:50:56 | 2012-12-22T06:50:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,895 | py | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2011, Grant Drake <grant.drake@gmail.com>'
__docformat__ = 'restructuredtext en'
import socket, re, datetime
from collections import OrderedDict
from threading import Thread
from lxml.html import fromstring, tostring
from calibre.ebooks.metadata.book.base import Metadata
from calibre.library.comments import sanitize_comments_html
from calibre.utils.cleantext import clean_ascii_chars
import calibre_plugins.goodreads.config as cfg
class Worker(Thread): # Get details
'''
Get book details from Goodreads book page in a separate thread
'''
def __init__(self, url, result_queue, browser, log, relevance, plugin, timeout=20):
Thread.__init__(self)
self.daemon = True
self.url, self.result_queue = url, result_queue
self.log, self.timeout = log, timeout
self.relevance, self.plugin = relevance, plugin
self.browser = browser.clone_browser()
self.cover_url = self.goodreads_id = self.isbn = None
def run(self):
try:
self.get_details()
except:
self.log.exception('get_details failed for url: %r'%self.url)
def get_details(self):
try:
self.log.info('Goodreads book url: %r'%self.url)
raw = self.browser.open_novisit(self.url, timeout=self.timeout).read().strip()
except Exception as e:
if callable(getattr(e, 'getcode', None)) and \
e.getcode() == 404:
self.log.error('URL malformed: %r'%self.url)
return
attr = getattr(e, 'args', [None])
attr = attr if attr else [None]
if isinstance(attr[0], socket.timeout):
msg = 'Goodreads timed out. Try again later.'
self.log.error(msg)
else:
msg = 'Failed to make details query: %r'%self.url
self.log.exception(msg)
return
raw = raw.decode('utf-8', errors='replace')
#open('c:\\goodreads.html', 'wb').write(raw)
if '<title>404 - ' in raw:
self.log.error('URL malformed: %r'%self.url)
return
try:
root = fromstring(clean_ascii_chars(raw))
except:
msg = 'Failed to parse goodreads details page: %r'%self.url
self.log.exception(msg)
return
try:
# Look at the <title> attribute for page to make sure that we were actually returned
# a details page for a book. If the user had specified an invalid ISBN, then the results
# page will just do a textual search.
title_node = root.xpath('//title')
if title_node:
page_title = title_node[0].text_content().strip()
if page_title is None or page_title.find('search results for') != -1:
self.log.error('Failed to see search results in page title: %r'%self.url)
return
except:
msg = 'Failed to read goodreads page title: %r'%self.url
self.log.exception(msg)
return
errmsg = root.xpath('//*[@id="errorMessage"]')
if errmsg:
msg = 'Failed to parse goodreads details page: %r'%self.url
msg += tostring(errmsg, method='text', encoding=unicode).strip()
self.log.error(msg)
return
self.parse_details(root)
def parse_details(self, root):
try:
goodreads_id = self.parse_goodreads_id(self.url)
except:
self.log.exception('Error parsing goodreads id for url: %r'%self.url)
goodreads_id = None
try:
(title, series, series_index) = self.parse_title_series(root)
except:
self.log.exception('Error parsing title and series for url: %r'%self.url)
title = series = series_index = None
try:
authors = self.parse_authors(root)
except:
self.log.exception('Error parsing authors for url: %r'%self.url)
authors = []
if not title or not authors or not goodreads_id:
self.log.error('Could not find title/authors/goodreads id for %r'%self.url)
self.log.error('Goodreads: %r Title: %r Authors: %r'%(goodreads_id, title,
authors))
return
mi = Metadata(title, authors)
if series:
mi.series = series
mi.series_index = series_index
mi.set_identifier('goodreads', goodreads_id)
self.goodreads_id = goodreads_id
try:
isbn = self.parse_isbn(root)
if isbn:
self.isbn = mi.isbn = isbn
except:
self.log.exception('Error parsing ISBN for url: %r'%self.url)
try:
mi.rating = self.parse_rating(root)
except:
self.log.exception('Error parsing ratings for url: %r'%self.url)
try:
mi.comments = self.parse_comments(root)
except:
self.log.exception('Error parsing comments for url: %r'%self.url)
try:
self.cover_url = self.parse_cover(root)
except:
self.log.exception('Error parsing cover for url: %r'%self.url)
mi.has_cover = bool(self.cover_url)
try:
tags = self.parse_tags(root)
if tags:
mi.tags = tags
except:
self.log.exception('Error parsing tags for url: %r'%self.url)
try:
mi.publisher, mi.pubdate = self.parse_publisher_and_date(root)
except:
self.log.exception('Error parsing publisher and date for url: %r'%self.url)
mi.source_relevance = self.relevance
if self.goodreads_id:
if self.isbn:
self.plugin.cache_isbn_to_identifier(self.isbn, self.goodreads_id)
if self.cover_url:
self.plugin.cache_identifier_to_cover_url(self.goodreads_id,
self.cover_url)
self.plugin.clean_downloaded_metadata(mi)
self.result_queue.put(mi)
def parse_goodreads_id(self, url):
return re.search('/show/(\d+)', url).groups(0)[0]
def parse_title_series(self, root):
title_node = root.xpath('//div[@id="metacol"]/h1[@id="bookTitle"]')
if not title_node:
return (None, None, None)
title_text = title_node[0].text_content().strip()
if title_text.find('(') == -1:
return (title_text, None, None)
# Contains a Title and possibly a series. Possible values currently handled:
# "Some title (Omnibus)"
# "Some title (#1-3)"
# "Some title (Series #1)"
# "Some title (Series (digital) #1)"
# "Some title (Series #1-5)"
# "Some title (NotSeries #2008 Jan)"
# "Some title (Omnibus) (Series #1)"
# "Some title (Omnibus) (Series (digital) #1)"
# "Some title (Omnibus) (Series (digital) #1-5)"
text_split = title_text.rpartition('(')
title = text_split[0]
series_info = text_split[2]
hash_pos = series_info.find('#')
if hash_pos <= 0:
# Cannot find the series # in expression or at start like (#1-7)
# so consider whole thing just as title
title = title_text
series_info = ''
else:
# Check to make sure we have got all of the series information
series_info = series_info[:len(series_info)-1] #Strip off trailing ')'
while series_info.count(')') != series_info.count('('):
title_split = title.rpartition('(')
title = title_split[0].strip()
series_info = title_split[2] + '(' + series_info
if series_info:
series_partition = series_info.rpartition('#')
series_name = series_partition[0].strip()
if series_name.endswith(','):
series_name = series_name[:-1]
series_index = series_partition[2].strip()
if series_index.find('-'):
# The series is specified as 1-3, 1-7 etc.
# In future we may offer config options to decide what to do,
# such as "Use start number", "Use value xxx" like 0 etc.
# For now will just take the start number and use that
series_index = series_index.partition('-')[0].strip()
try:
return (title.strip(), series_name, float(series_index))
except ValueError:
# We have a series index which isn't really a series index
title = title_text
return (title.strip(), None, None)
def parse_authors(self, root):
get_all_authors = cfg.plugin_prefs[cfg.STORE_NAME][cfg.KEY_GET_ALL_AUTHORS]
if get_all_authors:
author_node = root.xpath('//div[@id="metacol"]/div[@id="bookAuthors"]/a[@class="authorName"]/span[@itemprop="name"]')
if author_node:
authors = []
for author_value in author_node:
author = tostring(author_value, method='text', encoding=unicode).strip()
# If multiple authors with some as editors can result in a trailing , to remove
if author[-1:] == ',':
author = author[:len(author)-1]
authors.append(author)
return authors
else:
# We need to more carefully look at the authors to only bring them in if:
# 1. They have no author type specified
# 2. They have an author type of 'Goodreads Author'
# 3. There are no authors from 1&2 and they have an author type of 'Editor'
div_authors = root.xpath('//div[@id="metacol"]/div[@id="bookAuthors"]')
if not div_authors:
return
authors_html = tostring(div_authors[0], method='text', encoding=unicode).replace('\n','').strip()
if authors_html.startswith('by'):
authors_html = authors_html[2:]
authors_type_map = OrderedDict()
for a in authors_html.split(','):
author = a.strip()
if author.startswith('more...'):
author = author[7:]
elif author.endswith('...less'):
author = author[:-7]
author_parts = author.strip().split('(')
if len(author_parts) == 1:
authors_type_map[author_parts[0]] = ''
else:
authors_type_map[author_parts[0]] = author_parts[1][:-1]
# At this point we have a dict of authors with their contribution if any in values
authors = []
valid_contrib = None
for a, contrib in authors_type_map.iteritems():
if not contrib or contrib == 'Goodreads Author':
authors.append(a)
elif len(authors) == 0:
authors.append(a)
valid_contrib = contrib
elif contrib == valid_contrib:
authors.append(a)
else:
break
return authors
def parse_rating(self, root):
rating_node = root.xpath('//div[@id="metacol"]/div[@id="bookMeta"]/span[@class="value rating"]/span')
if rating_node:
rating_text = tostring(rating_node[0], method='text', encoding=unicode)
rating_text = re.sub('[^0-9]', '', rating_text)
rating_value = float(rating_text)
if rating_value >= 100:
return rating_value / 100
return rating_value
def parse_comments(self, root):
# Look for description in a second span that gets expanded when interactively displayed [@id="display:none"]
description_node = root.xpath('//div[@id="metacol"]/div[@id="description"]/span')
if description_node:
desc = description_node[0] if len(description_node) == 1 else description_node[1]
less_link = desc.xpath('a[@class="actionLinkLite"]')
if less_link is not None and len(less_link):
desc.remove(less_link[0])
comments = tostring(desc, method='html', encoding=unicode).strip()
while comments.find(' ') >= 0:
comments = comments.replace(' ',' ')
comments = sanitize_comments_html(comments)
return comments
def parse_cover(self, root):
imgcol_node = root.xpath('//div[@id="imagecol"]/a/img/@src')
if imgcol_node:
img_url = imgcol_node[0]
# Unfortunately Goodreads sometimes have broken links so we need to do
# an additional request to see if the URL actually exists
info = self.browser.open_novisit(img_url, timeout=self.timeout).info()
if int(info.getheader('Content-Length')) > 1000:
return img_url
else:
self.log.warning('Broken image for url: %s'%img_url)
def parse_isbn(self, root):
isbn_node = root.xpath('//div[@id="metacol"]/div[@id="details"]/div[@class="buttons"]/div[@id="bookDataBox"]/div/div')
if isbn_node:
id_type = tostring(isbn_node[0], method='text', encoding=unicode).strip()
if id_type == 'ISBN':
isbn10_data = tostring(isbn_node[1], method='text', encoding=unicode).strip()
isbn13_pos = isbn10_data.find('ISBN13:')
if isbn13_pos == -1:
return isbn10_data[:10]
else:
return isbn10_data[isbn13_pos+8:isbn13_pos+21]
elif id_type == 'ISBN13':
# We have just an ISBN13, without an ISBN10
return tostring(isbn_node[1], method='text', encoding=unicode).strip()
def parse_publisher_and_date(self, root):
publisher = None
pub_date = None
publisher_node = root.xpath('//div[@id="metacol"]/div[@id="details"]/div[2]')
if publisher_node:
# Publisher is specified within the div above with variations of:
# Published December 2003 by Books On Tape <nobr class="greyText">(first published 1982)</nobr>
# Published June 30th 2010
# Note that the date could be "2003", "December 2003" or "December 10th 2003"
publisher_node_text = tostring(publisher_node[0], method='text', encoding=unicode)
# See if we can find the publisher name
pub_text_parts = publisher_node_text.partition(' by ')
if pub_text_parts[2]:
publisher = pub_text_parts[2].strip()
if '(first' in publisher:
# The publisher name is followed by (first published xxx) so strip that off
publisher = publisher.rpartition('(first')[0].strip()
# Now look for the pubdate. There should always be one at start of the string
pubdate_text_match = re.search('Published[\n\s]*([\w\s]+)', pub_text_parts[0].strip())
pubdate_text = None
if pubdate_text_match is not None:
pubdate_text = pubdate_text_match.groups(0)[0]
# If we have a first published section of text use that for the date.
if '(first' in publisher_node_text:
# For the publication date we will use first published date
# Note this date could be just a year, or it could be monthname year
pubdate_text_match = re.search('.*\(first published ([\w\s]+)', publisher_node_text)
if pubdate_text_match is not None:
first_pubdate_text = pubdate_text_match.groups(0)[0]
if pubdate_text and first_pubdate_text[-4:] == pubdate_text[-4:]:
# We have same years, use the first date as it could be more accurate
pass
else:
pubdate_text = first_pubdate_text
if pubdate_text:
pub_date = self._convert_date_text(pubdate_text)
return (publisher, pub_date)
def parse_tags(self, root):
# Goodreads does not have "tags", but it does have Genres (wrapper around popular shelves)
# We will use those as tags (with a bit of massaging)
genres_node = root.xpath('//div[@class="stacked"]/div/div/div[contains(@class, "bigBoxContent")]/div/div')
if genres_node:
genre_tags = list()
for genre_node in genres_node:
sub_genre_nodes = genre_node.xpath('a')
genre_tags_list = [sgn.text_content().strip() for sgn in sub_genre_nodes]
if genre_tags_list:
genre_tags.append(' > '.join(genre_tags_list))
calibre_tags = self._convert_genres_to_calibre_tags(genre_tags)
if len(calibre_tags) > 0:
return calibre_tags
def _convert_genres_to_calibre_tags(self, genre_tags):
# for each tag, add if we have a dictionary lookup
calibre_tag_lookup = cfg.plugin_prefs[cfg.STORE_NAME][cfg.KEY_GENRE_MAPPINGS]
calibre_tag_map = dict((k.lower(),v) for (k,v) in calibre_tag_lookup.iteritems())
tags_to_add = list()
for genre_tag in genre_tags:
tags = calibre_tag_map.get(genre_tag.lower(), None)
if tags:
for tag in tags:
if tag not in tags_to_add:
tags_to_add.append(tag)
return list(tags_to_add)
def _convert_date_text(self, date_text):
# Note that the date text could be "2003", "December 2003" or "December 10th 2003"
year = int(date_text[-4:])
month = 1
day = 1
if len(date_text) > 4:
text_parts = date_text[:len(date_text)-5].partition(' ')
month_name = text_parts[0]
# Need to convert the month name into a numeric value
# For now I am "assuming" the Goodreads website only displays in English
# If it doesn't will just fallback to assuming January
month_dict = {"January":1, "February":2, "March":3, "April":4, "May":5, "June":6,
"July":7, "August":8, "September":9, "October":10, "November":11, "December":12}
month = month_dict.get(month_name, 1)
if len(text_parts[2]) > 0:
day = int(re.match('([0-9]+)', text_parts[2]).groups(0)[0])
from calibre.utils.date import utc_tz
return datetime.datetime(year, month, day, tzinfo=utc_tz)
| [
"john.peterson3@hotmail.com"
] | john.peterson3@hotmail.com |
0a185392606cd314acb4f13f45994b76855c9a6c | 500e5426adf70162cc75ae99be0743129639e4c7 | /gathering_server/gathering/apps.py | 56de823fb91dd525090a6cd2e82d8a62295c84ae | [] | no_license | L3oNav/gathering_server | 8b06ff4f176c6dfe3bc7f5c27bce0c9b4dfae8cb | b708fa831b6b5b227bafebd3ea302bcfa35adc46 | refs/heads/main | 2023-04-01T02:09:11.083248 | 2021-02-18T01:54:23 | 2021-02-18T01:54:23 | 339,837,793 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 345 | py | from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class GatheringConfig(AppConfig):
name = "gathering_server.gathering"
verbose_name = _("Gathering")
def ready(self):
try:
import gathering_server.gathering.signals # noqa F401
except ImportError:
pass
| [
"L3oNav@outlook.com"
] | L3oNav@outlook.com |
e0aa72eb56790380371681952975423a0c147795 | 1a856152b3ab65a8a0cc5cbedf0492d1c3716d27 | /dropout_acnd_pe_noprior_nochans.py | 2eda907f0b1b6f9a40b45eb2cb3e98dbdb1b24e5 | [] | no_license | stablum/thesis | 272f7f23ad1ad454c9310775b969bb54c84c9ea0 | 5c06d78322ddd6e1b8c214261ea6e4464a094bad | refs/heads/master | 2021-07-23T04:29:28.438657 | 2018-08-18T18:59:11 | 2018-08-18T18:59:11 | 60,299,071 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,460 | py | #!/usr/bin/env python3
import scipy
import ipdb
import theano
from theano import tensor as T
import lasagne
import numpy as np
import random
import sys
from tqdm import tqdm
tqdm.monitor_interval = 0
import ipdb
# local imports
import movielens
import cftools
import config
import numutils as nu
import augmented_types as at
import activation_functions
import update_algorithms
import model_build
update =update_algorithms.get_func()
adam_shared = lasagne.updates.adam # FIXME: generalize like the 'update' placeholder
#g = lambda x:x
g_in = activation_functions.get(config.g_in)
g_rij = activation_functions.get(config.g_rij)
sigma = 1.
sigma_u = 100.
sigma_v = 1000.
chan_out_dim = config.chan_out_dim
hid_dim = config.hid_dim
#log = print
log = lambda *args: print(*args)#None
def main():
dataset = movielens.load(config.movielens_which)
U,V = cftools.UV_vectors_np(dataset)
U_t, U_m, U_v = update_algorithms.adam_for(U)
V_t, V_m, V_v = update_algorithms.adam_for(V)
def make_predict_to_1(ui,vj):
#o_ui,net_ui_params = make_net(ui,config.K,hid_dim,chan_out_dim,"net_u",g_in,g_in)
#o_ui.name = "o_ui"
#o_vj,net_vj_params = make_net(vj,config.K,hid_dim,chan_out_dim,"net_v",g_in,g_in)
#o_vj.name = "o_vj"
comb = T.concatenate([ui,vj],axis=1)
comb.name = "comb"
prediction_det, prediction_lea, net_comb_params, regularizer_term = model_build.make_net(comb,2*chan_out_dim,hid_dim,1,"net_comb",g_in,g_rij)
prediction_det.name = "prediction_det"
prediction_lea.name = "prediction_lea"
return prediction_det, prediction_lea, net_comb_params, regularizer_term
def make_predict_to_5(predict_to_1_sym):
ret = (predict_to_1_sym * (config.max_rating - 1. )) + 1.
return ret
def make_objective_term(ui_mb,vj_mb,Rij_mb,predict_to_1_sym,regularizer_term):
eij = ( Rij_mb - predict_to_1_sym ) ** 2
ret = 0.5 * 1./(sigma**2) * eij # error term (gaussian centered in the prediction)
# 0-mean gaussian prior on the latent feature vector.
# since this term refers to a specific <ui,vj> tuple, then
# the update following the prior quantity has to be divided
# by how many terms (error term) contain that vector
#coef_u = T.constant(0.5/(dataset.N_compressed * sigma_u),"coef_u")
#sqsum_u = T.sum(ui_mb**2,axis=1,keepdims=True)
#sqsum_u.name = "sqsum_u"
#term_u = coef_u * sqsum_u
#term_u.name = "term_u"
#ret = ret + term_u
#coef_v = T.constant(0.5/(dataset.M_compressed * sigma_v),"coef_v")
#sqsum_v = T.sum(vj_mb**2,axis=1,keepdims=True)
#sqsum_v.name = "sqsum_v"
#term_v = coef_v * sqsum_v
#term_v.name = "term_v"
#ret = ret + term_v
#ret.name = "obj_before_sum"
ret = T.sum(ret) # on all axes: cost needs to be a scalar
ret.name = "obj_after_sum"
if config.regularization_lambda > 0:
ret = ret + config.regularization_lambda * regularizer_term
ret.name = "obj_with_regularizer"
return ret
print("creating update functions..")
ui_mb_sym = T.fmatrix('ui_mb')
vj_mb_sym = T.fmatrix('vj_mb')
Rij_mb_sym = T.fmatrix('Rij_mb')
t_mb_prev_sym = T.fmatrix('t_mb_prev')
t_mb_prev_sym = T.addbroadcast(t_mb_prev_sym,1)
m_mb_prev_sym = T.fmatrix('m_mb_prev')
v_mb_prev_sym = T.fmatrix('v_mb_prev')
predict_to_1_sym_det, predict_to_1_sym_lea, params, regularizer_term = make_predict_to_1(ui_mb_sym,vj_mb_sym)
# instead of calculating a different count of latent vectors of each
# (other side) latent vector, a global estimate (average) is performed
obj_term = make_objective_term(ui_mb_sym,vj_mb_sym,Rij_mb_sym,predict_to_1_sym_lea, regularizer_term)
grads_ui = T.grad(obj_term, ui_mb_sym)
grads_vj = T.grad(obj_term, vj_mb_sym)
grads_params = [
T.grad(obj_term,curr)
for curr
in params
]
updates_kwargs = dict(t_prev=t_mb_prev_sym,m_prev=m_mb_prev_sym,v_prev=v_mb_prev_sym)
new_for_ui = list(update(ui_mb_sym,grads_ui,**updates_kwargs))
new_for_vj = list(update(vj_mb_sym,grads_vj,**updates_kwargs))
params_updates = adam_shared(grads_params,params,learning_rate=config.lr_begin)
common = [ t_mb_prev_sym,m_mb_prev_sym,v_mb_prev_sym,Rij_mb_sym,ui_mb_sym,vj_mb_sym ]
ui_update_fn = theano.function(common,new_for_ui)
ui_update_fn.name="ui_update_fn"
vj_update_fn = theano.function(common,new_for_vj)
vj_update_fn.name="vj_update_fn"
params_update_fn = theano.function([Rij_mb_sym,ui_mb_sym,vj_mb_sym],[], updates=params_updates)
params_update_fn.name = "params_update_fn"
predict_to_5_fn = theano.function([ui_mb_sym,vj_mb_sym], [make_predict_to_5(predict_to_1_sym_det)])
predict_to_5_fn.name="predict_to_5_fn"
predict_to_1_fn = theano.function([ui_mb_sym,vj_mb_sym], [predict_to_1_sym_det])
predict_to_1_fn.name="predict_to_1_fn"
ui_mb_l = []
vj_mb_l = []
Rij_mb_l = []
U_t_mb_l = []
U_m_mb_l = []
U_v_mb_l = []
V_t_mb_l = []
V_m_mb_l = []
V_v_mb_l = []
indices_mb_l = []
def train_with_datapoint(i,j,Rij,lr):
nonlocal indices_mb_l
nonlocal ui_mb_l
nonlocal vj_mb_l
nonlocal Rij_mb_l
nonlocal U_t_mb_l
nonlocal U_m_mb_l
nonlocal U_v_mb_l
nonlocal V_t_mb_l
nonlocal V_m_mb_l
nonlocal V_v_mb_l
indices_mb_l.append((i,j))
ui_mb_l.append(U[i])
vj_mb_l.append(V[j])
Rij_mb_l.append(Rij)
U_t_mb_l.append(U_t[i])
U_m_mb_l.append(U_m[i])
U_v_mb_l.append(U_v[i])
V_t_mb_l.append(V_t[j])
V_m_mb_l.append(V_m[j])
V_v_mb_l.append(V_v[j])
if len(ui_mb_l) >= config.minibatch_size:
ui_mb = np.vstack(ui_mb_l).astype('float32')
#print('ui_mb.shape',ui_mb.shape)
vj_mb = np.vstack(vj_mb_l).astype('float32')
#print('vj_mb.shape',vj_mb.shape)
Rij_mb = np.vstack(Rij_mb_l).astype('float32')
#print('Rij_mb.shape',Rij_mb.shape)
U_t_mb = np.vstack(U_t_mb_l ).astype('float32')
#print('U_t_mb.shape',U_t_mb.shape)
U_m_mb = np.vstack(U_m_mb_l ).astype('float32')
#print('U_m_mb.shape',U_m_mb.shape)
U_v_mb = np.vstack(U_v_mb_l ).astype('float32')
#print('U_v_mb.shape',U_v_mb.shape)
V_t_mb = np.vstack(V_t_mb_l ).astype('float32')
V_m_mb = np.vstack(V_m_mb_l ).astype('float32')
V_v_mb = np.vstack(V_v_mb_l ).astype('float32')
Rij_mb = (Rij_mb - 1.) / (config.max_rating - 1.)
#log("Rij_mb",Rij_mb)
#log("predict_to_1_fn",predict_to_1_fn(ui_mb,vj_mb))
#log("predict_to_5_fn",predict_to_5_fn(ui_mb,vj_mb))
#print("before ui_update_fn, vj_mb.shape=",vj_mb.shape)
#print("before ui_update_fn, ui_mb.shape=",ui_mb.shape)
new_ui_mb, new_U_t_mb, new_U_m_mb, new_U_v_mb = ui_update_fn(
U_t_mb,U_m_mb,U_v_mb,Rij_mb,ui_mb,vj_mb
)
#log("ui_mb",ui_mb,"new_ui_mb",new_ui_mb,"diff",ui_mb-new_ui_mb)
#print("before vj_update_fn, vj_mb.shape=",vj_mb.shape)
#print("before vj_update_fn, ui_mb.shape=",ui_mb.shape)
new_vj_mb, new_V_t_mb, new_V_m_mb, new_V_v_mb = vj_update_fn(
V_t_mb,V_m_mb,V_v_mb,Rij_mb,ui_mb,vj_mb
)
#log("vj_mb",vj_mb,"new_vj_mb",new_vj_mb,"diff",vj_mb-new_vj_mb)
for pos,(i,j) in enumerate(indices_mb_l):
U[i] = new_ui_mb[pos,:]
V[j] = new_vj_mb[pos,:]
U_t[i] = new_U_t_mb[pos,:]
U_m[i] = new_U_m_mb[pos,:]
U_v[i] = new_U_v_mb[pos,:]
V_t[j] = new_V_t_mb[pos,:]
V_m[j] = new_V_m_mb[pos,:]
V_v[j] = new_V_v_mb[pos,:]
params_update_fn(Rij_mb,ui_mb,vj_mb)
ui_mb_l = []
vj_mb_l = []
Rij_mb_l = []
U_t_mb_l = []
U_m_mb_l = []
U_v_mb_l = []
V_t_mb_l = []
V_m_mb_l = []
V_v_mb_l = []
indices_mb_l = []
print("training pmf...")
cftools.mainloop(train_with_datapoint,dataset,U,V,predict_to_5_fn)
if __name__=="__main__":
main()
| [
"stablum@gmail.com"
] | stablum@gmail.com |
ab04985a81690a29fc99f93e08d4a4ec4e364ad5 | 847273de4b1d814fab8b19dc651c651c2d342ede | /.history/Sudoku_II_004_20180618143456.py | c999da2e6ae97112548cc81b5e4e3de4c117dc62 | [] | no_license | Los4U/sudoku_in_python | 0ba55850afcffeac4170321651620f3c89448b45 | 7d470604962a43da3fc3e5edce6f718076197d32 | refs/heads/master | 2020-03-22T08:10:13.939424 | 2018-07-04T17:21:13 | 2018-07-04T17:21:13 | 139,749,483 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,415 | py | from random import randint
# Sudoku1 almost solved
sudoku1 = [
[5, 9, 8, 6, 1, 2, 3, 4, 7],
[2, 1, 7, 9, 3, 4, 8, 6, 5],
[6, 4, 3, 5, 8, 7, 1, 2, 9],
[1, 6, 5, 4, 9, 8, 2, 7, 3],
[3, 2, 9, 7, 6, 5, 4, 1, 8],
[7, 8, 4, 3, 2, 1, 5, 9, 6],
[8, 3, 1, 2, 7, 6, 9, 5, 4],
[4, 7, 2, 8, 5, 9, 6, 3, 1],
[9, 5, ' ', ' ', ' ', ' ', ' ', ' ', 2]
]
# Sudoku 2 almost solved
# row1 = [9,8,7,4,3,2,5,6,1]
# row2 = [2,4,3,5,1,6,8,7,9]
# row3 = [5,6,1,7,9,8,4,3,2]
# row4 = [3,9,5,6,4,7,2,1,8]
# row5 = [8,2,4,3,5,1,6,9,7]
# row6 = [1,7,6,2,8,9,3,4,5]
# row7 = [7,1,2,8,6,3,9,5,4]
# row8 = [4,3,8,9,7,5,1,2,6]
# row9 = [' ',5,' ',' ',2,' ',7,' ',' ']
def printSudoku():
i = 0
while i < 10:
if i == 0:
print(" 1 2 3 4 5 6 7 8 9")
print(" -------------------------")
elif i == 3 or i == 6 or i == 9:
print(" -------------------------")
spaceBar = "|"
if i < 9:
print('{2} {1} {0[0]} {0[1]} {0[2]} {1} {0[3]} {0[4]} {0[5]} {1} {0[6]} {0[7]} {0[8]} {1}'.format(sudoku1[i], spaceBar,i+1))
i = i + 1
while True: # prints Sudoku until is solved
print("Your sudoku to solve:")
printSudoku()
print("Input 3 numbers in format a b c, np. 4 5 8")
print(" a - row number")
print(" b - column number ")
print(" c - value")
# vprint(" r - reset chart to start\n ")
x = input("Input a b c: ")
print("")
numbers = " 0123456789" # conditions of entering the numbers !
if (len(x) != 5) or (str(x[0]) not in numbers) or (str(x[2]) not in numbers) or (
str(x[4]) not in numbers) or (str(x[1]) != " ") or (str(x[3]) != " "):
if x == "r": # reset
print(" Function reset() will be ready in Next Week")
else:
print("Error - wrong number format \n ")
continue
sudoku1[int(x[0])-1][int(x[2])-1] = x[4]
try:
i = 0
for item in sudoku1:
if sum(item) == 45:
i = i + 1
if i == 9:
print("YOU WIN")
break
except TypeError:
print()
'''
print(" ")
print(" %@@@@@@@ @@@ @@@ (@@@@@@@@@ ,@@@@2@@@@@ @@@, /@@@/ @@@, @@@ ")
print(" @@@* @@@ @@@ (@@( /@@@# .@@@% (@@@ @@@, @@@% @@@, @@@. ")
print(" @@@& @@@ @@@ (@@( @@@* @@@% #@@% @@@,.@@@. @@@, @@@. ")
print(" ,@@@@@@* @@@ @@@ (@@( (@@% .@@@* ,@@@ @@@%@@% @@@, @@@. ")
print(" /@@@@@# @@@ @@@ (@@( (@@% .@@@* ,@@@ @@@,@@@( @@@, @@@. ")
print(" *@@@. @@@ .@@& (@@( @@@. @@@% &@@( @@@, &@@@. @@@* .@@@. ")
print(" &, &@@@ #@@@. ,@@@, (@@( ,&@@@* ,@@@& .@@@@ @@@, (@@@/ #@@@* @@@# ")
print(",@@@@@@@@( (@@@@@@@@% (@@@@@@@@@( #@@@@@@@@@, @@@, ,@@@% ,@@@@@@@@@. \n ")
print("To start game input:")
print(" r - to load random puzzle:")
print(" 1 - to load chart nr 1:")
print(" 2 - to load chart nr 2:")
print(" 3 - to load chart nr 3:")
choice = input("Input here: ")
if choice == "R" or choice == "r":
sudoku_number = randint(0, 1)
rows_fill(sudoku_number)
elif int(choice) == 1:
rows_fill(0)
elif int(choice) == 2:
rows_fill(1)
elif int(choice) == 3:
rows_fill(0)
'''
| [
"inz.kamil.wos@gmail.com"
] | inz.kamil.wos@gmail.com |
111d16939c63cebf88383cf5a24501665490bbc1 | 0f7b8d2ae2c0e81941d5ca5fa4c8313cec8d1544 | /endApi/migrations/0008_auto_20200904_0734.py | 89d799dc142fa35893d751810280051c1fd1bddf | [] | no_license | rajielijah/endpoint | 5c061972cb8ab9fc089046dd9e71f194ee6e5aca | 6db1d6c92d57fc143446d2c4df13664ffa5b1f2d | refs/heads/master | 2022-12-27T23:58:22.232298 | 2020-10-01T11:46:45 | 2020-10-01T11:46:45 | 298,791,670 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | # Generated by Django 3.0.7 on 2020-09-04 07:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('endApi', '0007_post_image'),
]
operations = [
migrations.AlterField(
model_name='post',
name='image',
field=models.ImageField(blank=True, null=True, upload_to='feedupload'),
),
]
| [
"rajielijah@gmail.com"
] | rajielijah@gmail.com |
fe588b211aefbc83d08eca506d88db9be266716c | 0d7247b52044d5bfc498610fe33725c4ca0a2076 | /MDD-SG-SD.py | 235989f1e5607b3a6d8c9407160ab862c37b7b9d | [] | no_license | SivaArwin/Scraping---Uswitch.com | 1ebde73978ce7912d164e8965a47fd79106b5026 | f33e3d9b05b9ba23065c5b2ac9073e16174a0585 | refs/heads/main | 2023-03-03T14:06:52.455351 | 2021-02-13T18:11:34 | 2021-02-13T18:11:34 | 338,635,431 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,392 | py | from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import Select
import pandas as pd
import xlsxwriter
import time
import _Custom_Exception as CE
import _Config as config
import _EHl_urls
overlapXpath = "/html/body/div[@id='loaderDiv']"
web_driv = config._WebDriv()
_Mdd_links = config._read_MDD_Urls()
_savePath = config.savePath()
#_regionList = ["Eastern", "East Midlands","London", "MANWEB", "Midlands", "Northern", "NORWEB", "Scottish Hydro", "Scottish Power", "Seeboard", "Southern", "Swalec", "SWEB", "Yorkshire"]
"""
#MainPage #Postcode
postcode = web_driv.find_element_by_xpath("/html/body/main[@class='main']/div/div/div/div[@id='postCodeEntry']/form/fieldset[@class='homepage-cta-container']/div[@class='form-group homepage-cta-input-container']/input[@id='PostCode']")
postcode.send_keys("SS26LU")
CE._Time_to_delay(1)
#Mainpage #Submit button
submit = web_driv.find_element_by_xpath("/html/body/main[@class='main']/div/div/div/div[@id='postCodeEntry']/form/fieldset[@class='homepage-cta-container']/button")
submit.click()
CE._Time_to_delay(10)
#Select Both Gas & Elec
gas_elec_elementXpath = "/html/body/div[@class='ng-scope']/div[@class='container-fluid page_funnel-questions ofy-visible current ng-scope ng-isolate-scope']/div/form[@id='current-optilead']/div[@class='ng-scope']/div[@id='questions-intro']/div[@id='field-compare-type']/div[@class='field-input stacked-radio-buttons']/div/input[@id='compare-type-gas-elec']"
CE._Pass_Through_Me(web_driv,overlapXpath,gas_elec_elementXpath)
CE._Time_to_delay(1)
#Select both same supplier
sameSupplier_elementxpath = "/html/body/div[@class='ng-scope']/div[@class='container-fluid page_funnel-questions ofy-visible current ng-scope ng-isolate-scope']/div/form[@id='current-optilead']/div[@class='ng-scope']/div[@id='questions-intro']/div[@id='field-same-supplier']/div[@class='field-input stacked-radio-buttons']/div/input[@id='comparison-type-same-supplier']"
CE._Pass_Through_Me(web_driv,overlapXpath,sameSupplier_elementxpath)
CE._Time_to_delay(1)
#select tariff
tariffname = web_driv.find_element_by_xpath("/html/body/div[@class='ng-scope']/div[@class='container-fluid page_funnel-questions ofy-visible current ng-scope ng-isolate-scope']/div/form[@id='current-optilead']/div[@class='ng-scope']/div[@id='section-supply']/span[@id='section-supply-dual']/div[@class='funnel-section question-group-container ng-isolate-scope ng-valid']/div[3]/div[@class='field-input single-radio-button']/select[@id='elecSupplierTariff']")
Select(tariffname).select_by_value("string:44")
CE._Time_to_delay(1)
#select payment method
payment_Method_Xpath = "/html/body/div[@class='ng-scope']/div[@class='container-fluid page_funnel-questions ofy-visible current ng-scope ng-isolate-scope']/div/form[@id='current-optilead']/div[@class='ng-scope']/div[@id='section-supply']/span[@id='section-supply-dual']/div[@class='funnel-section question-group-container ng-isolate-scope ng-valid ng-dirty ng-valid-parse']/div[@id='field-energy-payment-type']/div[@class='field-input stacked-radio-buttons']/div[@class='ng-scope']/input[@id='elec-payment-type-1']"
CE._Pass_Through_Me(web_driv,overlapXpath,payment_Method_Xpath)
CE._Time_to_delay(1)
#Select gas usage radio button
gas_button_xpath = "/html/body/div[@class='ng-scope']/div[@class='container-fluid page_funnel-questions ofy-visible current ng-scope ng-isolate-scope']/div/form[@id='current-optilead']/div[2]/div[@id='section-usage']/div[@id='gas-usage']/div[@class='field-input expand']/div[@class='radio-gas-usage']/input[@id='gasKWhUsage']"
CE._Pass_Through_Me(web_driv,overlapXpath,gas_button_xpath)
CE._Time_to_delay(3)
#Passing Gas usage
gas_usage_xpath = "/html/body/div[@class='ng-scope']/div[@class='container-fluid page_funnel-questions ofy-visible current ng-scope ng-isolate-scope']/div/form[@id='current-optilead']/div[2]/div[@id='section-usage']/div[@id='gas-usage']/div[@class='field-input expand']/div[@class='radio-gas-usage']/div[@class='input-error-container-inline']/input[@id='gasKWhUsage-usageAsKWh']"
gs_usage_res = web_driv.find_element_by_xpath(gas_usage_xpath)
gs_usage_res.send_keys("12000")
CE._Time_to_delay(1)
#select Elec usage radio button
elec_button_xpath = "/html/body/div[@class='ng-scope']/div[@class='container-fluid page_funnel-questions ofy-visible current ng-scope ng-isolate-scope']/div/form[@id='current-optilead']/div[2]/div[@id='section-usage']/div[@id='electricity-usage']/div[@class='field-input expand']/div[@class='radio-elec-usage']/input[@id='elecKWhUsage']"
CE._Pass_Through_Me(web_driv,overlapXpath,elec_button_xpath)
CE._Pass_Through_Me(web_driv,overlapXpath,elec_button_xpath) #running this code twice because the elec button is not clicked
CE._Time_to_delay(3)
#Passing Elec usage
elec_usage_xpath = "/html/body/div[@class='ng-scope']/div[@class='container-fluid page_funnel-questions ofy-visible current ng-scope ng-isolate-scope']/div/form[@id='current-optilead']/div[2]/div[@id='section-usage']/div[@id='electricity-usage']/div[@class='field-input expand']/div[@class='radio-elec-usage']/div[@class='input-error-container-inline']/input[@id='elecKWhUsage-usageAsKWh']"
elec_usage_res = web_driv.find_element_by_xpath(elec_usage_xpath)
elec_usage_res.send_keys("3100")
CE._Time_to_delay(1)
#Click Submit button #Page2
show_results_button_xpath = "/html/body/div[@class='ng-scope']/div[@class='container-fluid page_funnel-questions ofy-visible current ng-scope ng-isolate-scope']/div/form[@id='current-optilead']/div[@id='section-spending']/div[2]/div[@id='usageSummary']/div[@class='spending-text ng-scope']/button[@id='show-results']"
CE._Pass_Through_Me(web_driv,overlapXpath,show_results_button_xpath)
CE._Pass_Through_Me(web_driv,overlapXpath,show_results_button_xpath) #running this code twice because the elec button is not clicked
CE._Time_to_delay(10)
#Page 3 #Select Show all results .. #Whole of market
#show_all_tariffs_xpath = "/html/body/div[@class='ng-scope']/div[@class='page_funnel-questions container-fluid ofy-visible ng-scope ng-isolate-scope']/div[@class='row wider-margin funnel-columns']/section/div[@class='funnel-filter-sidebar-container']/div[@class='funnel-section ng-isolate-scope']/div[@class='funnel-sidebar-wrapper']/div[@id='section-filters']/form/div[@class='field side-bar-form field-stacked']/ul[2]/li[@class='left-column']/input[@id='Show me all generally available plans']"
#CE._Pass_Through_Me(web_driv,overlapXpath,show_all_tariffs_xpath)
#CE._Time_to_delay(3)
"""
writer = pd.ExcelWriter(_savePath+'MDD-SG-SD.xlsx', engine='xlsxwriter')
try:
if(_Mdd_links):
for driver in range(len(_Mdd_links)):
web_driv.delete_all_cookies()
web_driv.get(_Mdd_links[driver])
CE._Time_to_delay(15)
Tariff_Name = {}
#Result Table output
who = "/html/body/div[@class='ng-scope']/div[@class='page_funnel-questions container-fluid ofy-visible ng-scope ng-isolate-scope']/div[@class='row wider-margin funnel-columns']/section/div[@class='funnel-results-container']/div[@class='funnel-section ng-isolate-scope']/div[@id='section-compare-table']/div[@class='compare-table']/div[@class='compare-table-body']"
who_res_final_res = web_driv.find_element_by_xpath(who)
gas = '//*[@id="Gas only"]'
CE._Pass_Through_Me(web_driv,overlapXpath,gas)
CE._Time_to_delay(3)
'''
Ele = '//*[@id="Electricity only"]'
CE._Pass_Through_Me(web_driv,overlapXpath,Ele)
CE._Time_to_delay(3)
'''
## ENQUIRY TARIFFS
#Supplier Name on Enquiry
for _supplierName_enquiry in who_res_final_res.find_elements_by_xpath("div[contains(@class, 'compare-table-item') and contains(@class, 'compare-cannot-switch')]/fri-result-tariff/div/div[1]/div[@class='supplier']/p[contains(@class, 'ng-binding') and contains(@class, 'ng-scope')]"):
Tariff_Name.setdefault('SupplierName', []).append(_supplierName_enquiry.text)
#print("Supplier Name ->", _supplierName_enquiry.text )
print("Fetched Supplier Name Enquire..")
#Tariff Name on Enquiry
for _tarifName_enquiry in who_res_final_res.find_elements_by_xpath("div[contains(@class, 'compare-table-item') and contains(@class, 'compare-cannot-switch')]/fri-result-tariff/div/div[1]/div[@class='supplier']/p[@class='ng-binding']"):
#print("tariff name ->", _tarifName_enquiry.text)
Tariff_Name.setdefault('TariffName',[]).append(_tarifName_enquiry.text)
print("Fetched Tariff Name....")
#Cancellation fees yes or no on apply
for cancellation_fees in who_res_final_res.find_elements_by_xpath("div[contains(@class, 'compare-table-item') and contains(@class, 'compare-cannot-switch')]/fri-result-tariff/div/div[2]/p/span[1]/span"):
Tariff_Name.setdefault('Cancellationstatus',[]).append(cancellation_fees.text)
#print("Cancellation >", cancellation_fees.text)
print("Fetched Cancellation status...!!!")
#Tariff expiry
for tariff_expiry in who_res_final_res.find_elements_by_xpath("div[contains(@class, 'compare-table-item') and contains(@class, 'compare-cannot-switch')]/fri-result-tariff/div/div[2]/p/span[2]/span"):
Tariff_Name.setdefault('Tariffexpiry',[]).append(tariff_expiry.text)
#print("Expiry >", tariff_expiry.text)
print("Fetched Tariff expiry...!!!")
#annual bill value on apply
for annual_bill in who_res_final_res.find_elements_by_xpath("div[contains(@class, 'compare-table-item') and contains(@class, 'compare-cannot-switch')]/fri-result-tariff/div/div[3]/p/span[@class='ng-binding']"):
Tariff_Name.setdefault('annual_bill',[]).append(annual_bill.text)
#print("Annual Bills >",annual_bill.text)
print("Fetched Annual values ...!!!")
#On Enquiry
for on_enquiry in who_res_final_res.find_elements_by_xpath("div[contains(@class, 'compare-table-item') and contains(@class, 'compare-cannot-switch')]/fri-result-tariff/div/div[6]/p[@class='ng-binding']"):
if (on_enquiry.text == "This supplier has not made this plan available through us" ):
Tariff_Name.setdefault('Status',[]).append("Enquiry")
#print("#", on_enquiry.text)
print("Fetched on Enquiry ...!!!")
#show Apply only
show_apply_tariffs_xpath = "/html/body/div[@class='ng-scope']/div[@class='page_funnel-questions container-fluid ofy-visible ng-scope ng-isolate-scope']/div[@class='row wider-margin funnel-columns']/section/div[@class='funnel-filter-sidebar-container']/div[@class='funnel-section ng-isolate-scope']/div[@class='funnel-sidebar-wrapper']/div[@id='section-filters']/form/div[@class='field side-bar-form field-stacked']/ul[1]/li[@class='left-column']/input[@id='Show plans you can switch me to']"
CE._Pass_Through_Me(web_driv,overlapXpath,show_apply_tariffs_xpath)
CE._Time_to_delay(3)
### APPLY TARIFFS
print("Fetching on apply tariffs now.......")
#Supplier Name On Apply #img[@class='supplier-logo ng-scope']
for SA in who_res_final_res.find_elements_by_xpath("div/fri-result-tariff/div/div[1]/div[@class='supplier']/img[@class='supplier-logo ng-scope']"):
Tariff_Name.setdefault('SupplierName',[]).append(SA.get_attribute('alt'))
#print("Supplier Name >", SA.get_attribute('alt'))
print("Fetched Supplier Name....!!!")
#Tariff Name on Apply
for TA in who_res_final_res.find_elements_by_xpath("div/fri-result-tariff/div/div[1]/div[@class='supplier']/p[@class='ng-binding']"):
Tariff_Name.setdefault('TariffName',[]).append(TA.text)
#print("Tariff Name >",TA.text)
print("Fetched Tariff Name....!!!")
#Cancellation fees yes or no on apply
for cancellation_fees in who_res_final_res.find_elements_by_xpath("div/fri-result-tariff/div/div[2]/p/span[1]/span"):
Tariff_Name.setdefault('Cancellationstatus',[]).append(cancellation_fees.text)
#print("Cancellation fees >", cancellation_fees.text)
print("Fetched Cancellation status...!!!")
#Tariff expiry
for tariff_expiry in who_res_final_res.find_elements_by_xpath("div/fri-result-tariff/div/div[2]/p/span[2]/span"):
Tariff_Name.setdefault('Tariffexpiry',[]).append(tariff_expiry.text)
#print("Expiry >", tariff_expiry.text)
print("Fetched Tariff expiry...!!!")
#annual bill value on apply
for annual_bill in who_res_final_res.find_elements_by_xpath("div/fri-result-tariff/div/div[3]/p/span[@class='ng-binding']"):
Tariff_Name.setdefault('annual_bill',[]).append(annual_bill.text)
#print("Annual Bills >",annual_bill.text)
print("Fetched Annual values ...!!!")
#On Apply
for on_apply in who_res_final_res.find_elements_by_xpath("div/fri-result-tariff/div/div[6]/button"):
if (on_apply.text == "I WANT THIS PLAN"):
Tariff_Name.setdefault('Status',[]).append("Apply")
#print("#", on_apply.text)
print("Fetched on Apply ...!!!")
'''
#Page 3 #Select Show all results .. #Whole of market
show_all_tariffs_xpath = "/html/body/div[@class='ng-scope']/div[@class='page_funnel-questions container-fluid ofy-visible ng-scope ng-isolate-scope']/div[@class='row wider-margin funnel-columns']/section/div[@class='funnel-filter-sidebar-container']/div[@class='funnel-section ng-isolate-scope']/div[@class='funnel-sidebar-wrapper']/div[@id='section-filters']/form/div[@class='field side-bar-form field-stacked']/ul[2]/li[@class='left-column']/input[@id='Show me all generally available plans']"
CE._Pass_Through_Me(web_driv,overlapXpath,show_all_tariffs_xpath)
CE._Time_to_delay(3)
'''
_df = pd.DataFrame.from_dict(Tariff_Name)
#for _region in driver:
_df.to_excel(writer, sheet_name=str(driver+1), index=False)
print("Region %d complete" %(driver+1))
#tn.to_csv('EHL.csv', index=False, sep=',', encoding='utf-8')
#print(tn)
writer.save()
print("File is ready to use!!!")
web_driv.close()
except TimeoutException:
print("Link is broken... Replace new url")
web_driv.close() | [
"noreply@github.com"
] | SivaArwin.noreply@github.com |
24c2e84b37749a34542141af25758a0b77c195ba | e5ee01bde67fed16b890023cdc33b3294e7acb6d | /python/path_search_stripped/a_star.py | dc833d9ba96cf55f58739839921c943c153c83a2 | [] | no_license | dragonfi/a_star_examples | f8ca1494d49abf5170d52408e9efa6179b36b002 | a6c43ca4b5f135bbaa848fcc45e74922dc174286 | refs/heads/master | 2020-06-19T15:07:09.298508 | 2019-08-06T15:02:56 | 2019-08-06T15:03:06 | 196,756,037 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,063 | py | from collections import OrderedDict, namedtuple
from .graph import Graph
class Path():
def __init__(self, weight, nodes):
self.weight = weight
self.nodes = nodes
@property
def dest(self):
return self.nodes[-1]
@property
def source(self):
return self.nodes[0]
AStarResult = namedtuple("AStarResult", "path explored candidates")
class AStar():
def __init__(self, graph , heuristic ) :
self._graph = graph
self._heuristic = heuristic
def shortest_path(self, source , dest ) :
return self.shortest_path_with_metadata(source, dest).path
def shortest_path_with_metadata(self, source , dest ) :
def candidate_sorting_key(candidate ) :
node, path = candidate
node_data = self._graph.get_node_data(node)
dest_data = self._graph.get_node_data(dest)
return path.weight + self._heuristic(node_data, dest_data)
explored = {}
candidates = OrderedDict({source: Path(0, [source])})
while candidates:
candidates = OrderedDict(sorted(candidates.items(), key=candidate_sorting_key))
node, path = candidates.popitem(last=False)
if node == dest:
return AStarResult(path, explored, candidates)
if node not in explored.keys() or explored[node].weight > path.weight:
explored[node] = path
new_candidates = {
edge.dest: Path(path.weight + edge.weight, path.nodes + [edge.dest])
for edge in self._graph.edges_from(node)
if edge.dest not in explored.keys()}
for key, value in new_candidates.items():
if key not in candidates.keys() or candidates[key].weight > value.weight:
candidates[key] = value
return AStarResult(None, explored, candidates)
| [
"david.gabor.bodr@gmail.com"
] | david.gabor.bodr@gmail.com |
a07905b07cfcf4e19974315b9839310a2d8f725c | d4a88b3b102e20e727cae8fbd4167dcb4b57d1ec | /additional_examples/py2exe_setup__basic_test.py | 746c5610f04ff4682414ded9ad60376e1f9e3b2d | [
"MIT"
] | permissive | viblo/pymunk | ca64888e45706db431788368ff8464edf2912d5f | 20ac14f665fb38b4ef1bef5acea36a3d612dd0d5 | refs/heads/master | 2023-08-27T16:37:14.740653 | 2023-08-16T19:26:16 | 2023-08-16T19:26:16 | 13,273,472 | 855 | 255 | MIT | 2023-01-13T10:13:47 | 2013-10-02T14:36:46 | Python | UTF-8 | Python | false | false | 218 | py | """Simple example of py2exe to create a exe of the no_dependencies example.
Tested on py2exe 0.13.0.0 on python 3.11
"""
import py2exe
py2exe.freeze(console=["no_dependencies.py"], options={"includes": ["pymunk"]})
| [
"vb@viblo.se"
] | vb@viblo.se |
b4430e26ab1dde9f74b12f200a1896351cd2722b | 4d65f85fb8fba5a3d6582ccbf9d38042ec1ec422 | /代码1/hotdog_war.py | f177dd62bdbf8984d14a17f21671d5aee76aa8df | [] | no_license | qsten/game | f50756d001116f41cfdf7715ee061a3dfa3f9400 | 86164c3dcec869b85aaa777105c7faf738dd8e1f | refs/heads/master | 2020-04-28T10:56:36.871862 | 2019-05-19T13:18:46 | 2019-05-19T13:18:46 | 175,218,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,001 | py | import pygame
from player import Player
from settings import Settings
import game_functions as gf
from pygame.sprite import Group
from game_stats import GameStats
from button import Button
from scoreboard import Scoreboard
from lifeboard import Lifeboard
from music_button import Music_button
from stop_button import Stop_button
from statistics_board import Statistics_board
from Restart_button import Restart_button
from return_button import Return_button
from rank_button import Rank_button
def run_game():
#初始化游戏并创建一个屏幕对象
ai_settings=Settings()
screen=pygame.display.set_mode((ai_settings.screen_width,ai_settings.screen_height))
pygame.display.set_caption('hotdog_invasion')
play_button=Button(screen)
player=Player(ai_settings,screen)
hotdogs=Group()
bombs=Group()
stats=GameStats(ai_settings)
sb=Scoreboard(screen,stats)
life=Lifeboard(screen,stats)
music_button = Music_button(screen)
stop_button=Stop_button(screen,stats)
restart_button=Restart_button(screen,stats)
statistics_board=Statistics_board(screen, stats)
return_button=Return_button(screen,stats)
rank_button=Rank_button(screen)
#开始游戏的主循环
while True:
gf.check_events(ai_settings,screen,stats,play_button,player,hotdogs,bombs,music_button,stop_button,restart_button,return_button,rank_button)
if stats.game_active:
gf.create_hotdogs(ai_settings, screen, hotdogs)
gf.create_bombs(ai_settings, screen, bombs)
player.update(stats)
gf.update_hotdog(ai_settings,stats,sb,player,hotdogs)
gf.update_bomb(screen,stats, player, bombs,statistics_board,rank_button)
music_button.music_play()
gf.update_screen(screen,stats,sb,life,player,hotdogs,bombs,play_button,music_button,stop_button,restart_button,return_button,rank_button)
if __name__=='__main__':
run_game()
| [
"noreply@github.com"
] | qsten.noreply@github.com |
cc4189ead66a7efb115d15670bd7e27b82860536 | 3874a909e3152fda6a87dbb0ef05b18d6908807c | /la/parse_tabs.py | 0de026ab72ba5ab67f99b19a08939f52599c51dc | [
"MIT"
] | permissive | FranchuFranchu/la | f5ef3f8d43aec67d84030018278640d91a77dd05 | 7afa25d3d102f5a0316f5084a46a04e62976991b | refs/heads/master | 2020-07-24T07:50:01.756324 | 2020-04-18T15:49:03 | 2020-04-18T15:49:03 | 207,853,053 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,332 | py | # Converts python-style code into JS-style code
def tabs_to_codeblocks(d):
list_index = 0
code = list(d)
at_newline = True
current_indentation = 0
this_line_indentation = 0
while list_index < len(code):
if at_newline:
if code[list_index] in (" ", "\t"):
this_line_indentation += 1
else:
at_newline = False
difference = this_line_indentation - current_indentation
if difference > 0:
for i in range(difference):
code.insert(list_index,"{")
list_index += 1
elif difference < 0:
for i in range(-difference):
code.insert(list_index,"}")
list_index += 1
code.insert(list_index, ";")
current_indentation = this_line_indentation
if not at_newline:
if code[list_index] == "\n":
at_newline = True
this_line_indentation = 0
code.insert(list_index,";")
list_index += 1
list_index += 1
# Close indentation again
for i in range(current_indentation):
code.insert(list_index,"}")
list_index += 1
return "".join(code) | [
"fff999abc999@gmail.com"
] | fff999abc999@gmail.com |
359f9c86575cbc6401fa831c42183d3cd110679b | 9d278285f2bc899ac93ec887b1c31880ed39bf56 | /ondoc/account/migrations/0103_merge_20190905_1609.py | 16fb7e5557b47e3db32cd6549c06ffb2218de131 | [] | no_license | ronit29/docprime | 945c21f8787387b99e4916cb3ba1618bc2a85034 | 60d4caf6c52a8b70174a1f654bc792d825ba1054 | refs/heads/master | 2023-04-01T14:54:10.811765 | 2020-04-07T18:57:34 | 2020-04-07T18:57:34 | 353,953,576 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | py | # Generated by Django 2.0.5 on 2019-09-05 10:39
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('account', '0100_auto_20190902_1653'),
('account', '0102_auto_20190903_1950'),
]
operations = [
]
| [
"ankit.s@policybazaar.com"
] | ankit.s@policybazaar.com |
98239088c3b4a53c50df2bc9f8bf239942107bf9 | a36d54fb56bc2898089d6ad407bc2039a55271d4 | /zdevicemanager/base/tools.py | 8385f630bed268e1b477abec92e22fe0662faa58 | [] | no_license | zerynth/core-zerynth-toolchain | 443e5180d87b3b783c2b3ec69f24918761715b63 | d27b0d6ee47b9c4f320f518705074f1032fedf8a | refs/heads/master | 2021-07-25T00:28:00.192322 | 2021-05-17T14:53:20 | 2021-05-17T14:53:20 | 122,219,458 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,153 | py | from .base import *
from .fs import *
from .cfg import *
from .pygtrie import *
__all__ = ["tools"]
class Tools():
def __init__(self):
self.tools = {}
self.installed = {}
def init(self):
#register platform tools
if env.is_windows():
self.tools["stty"]="mode"
elif env.is_linux():
self.tools["stty"]="/bin/stty -F"
else:
self.tools["stty"]="/bin/stty -f"
for tooldir in fs.dirs(env.sys):
self.add_tool(tooldir)
for tooldir in fs.dirs(fs.path(env.dist,"sys")):
self.add_tool(tooldir)
ifile = fs.path(env.dist,"installed.json")
self.installed = fs.get_json(ifile)
def get_package(self,fullname):
return env.repo["packs"][env.repo["byname"][fullname]]
def get_packages_by_tag(self,tag):
idx = env.repo["bytag"][tag]
res = set()
for i in idx:
pack = env.repo["packs"][i]
if pack.get("sys") and pack.get("sys")!=env.platform:
# skip other platforms
continue
res.add(pack["fullname"])
return sorted(list(res))
def get_package_deps(self,fullname):
try:
pack = self.get_package(fullname)
except:
pack = {}
res = []
for dep in pack.get("deps",[]):
res.extend(self.get_packages_by_tag(dep))
res = sorted(list(set(res)))
return res
def has_all_deps(self,fullname):
deps = self.get_package_deps(fullname)
for fname in deps:
if fname not in self.installed:
return False
return True
def get_pack_info(self,packdir):
pfiles = [fs.path(packdir,"z.yml"), fs.path(packdir,"package.json")]
for pfile in pfiles:
if fs.exists(pfile):
pkg = fs.get_yaml_or_json(pfile)
return pkg
return None
def add_tool(self,tooldir):
if fs.basename(tooldir) in ["browser","newbrowser","newpython"]:
# ignore some sys packages
return
try:
pkg = self.get_pack_info(tooldir)
if pkg is None:
warning("Can't load tool package",tooldir)
return
else:
fullname = pkg["fullname"]
toolname = pkg.get("tool")
pkg = pkg["sys"]
except Exception as e:
warning("Can't load tool",tooldir,e)
return
if toolname:
self.tools[toolname]={}
addto = self.tools[toolname]
else:
addto = self.tools
if isinstance(pkg,dict):
for k,v in pkg.items():
addto[k]=fs.path(env.sys,tooldir,v)
elif isinstance(pkg,list) or isinstance(pkg,tuple):
for k,v in pkg:
addto[k]=fs.path(env.sys,tooldir,v)
else:
warning("Can't load tool info",tooldir,err=True)
#print(self.tools)
def get_tool_dir(self,toolname):
for tooldir in fs.dirs(env.sys):
if fs.basename(tooldir)==toolname:
return tooldir
for tooldir in fs.dirs(fs.path(env.dist,"sys")):
if fs.basename(tooldir)==toolname:
return tooldir
return None
def __getattr__(self,attr):
if attr in self.tools:
return self.tools[attr]
raise AttributeError
def __getitem__(self,attr):
if attr in self.tools:
return self.tools[attr]
raise KeyError
def get_vm(self,vmuid,version,chipid,target):
vmpath = fs.path(env.vms,target,chipid)
vmfs = fs.glob(vmpath,"*.vm")
vm = None
for vmf in vmfs:
vmm = fs.basename(vmf)
if vmm.startswith(vmuid+"_"+version+"_"):
vm=vmf
return vm
def get_vm_by_uid(self,vmuid):
#for root,dirnames,files in os.walk(fs.path(env.vms)):
for target in fs.dirs(env.vms):
for chid in fs.dirs(fs.path(env.vms,target)):
for ff in fs.files(fs.path(env.vms,target,chid)):
path_splitted = ff.split('/')
ff_ = fs.basename(ff)
if ff_.startswith(vmuid+"_"):
return fs.path(ff)
return None
def get_vms(self,target,chipid=None,full_info=False):
vms = {}
targetpath = fs.path(env.vms,target)
if not fs.exists(targetpath):
return vms
for chid in fs.dirs(targetpath):
chid=fs.basename(chid)
if chipid and chipid!=chid:
continue
vmfs = fs.glob(fs.path(targetpath,chid),"*.vm")
for vmf in vmfs:
vmbf = fs.basename(vmf)
rpos = vmbf.rfind("_") #rtos
hpos = vmbf.rfind("_",0,rpos-1) #hash
vpos = vmbf.rfind("_",0,hpos-1) #version
vmrtos = vmbf[rpos+1:-3]
vmhash = vmbf[hpos+1:rpos]
vmversion = vmbf[vpos+1:hpos]
vmuid = vmbf[0:vpos] #TODO: add check
if full_info:
vms[vmuid]=(vmf,vmversion,vmrtos,vmhash)
else:
vms[vmuid]=vmf
return vms
def get_vm_by_prefix(self,vmuid):
#for root,dirnames,files in os.walk(fs.path(env.vms)):
res = []
for target in fs.dirs(env.vms):
for chid in fs.dirs(fs.path(env.vms,target)):
for ff in fs.files(fs.path(env.vms,target,chid)):
path_splitted = ff.split('/')
ff_ = fs.basename(ff)
if ff_.startswith(vmuid):
res.append(fs.path(ff))
return res
def _parse_order(self,path):
try:
order = fs.readfile(fs.path(path,"order.txt"))
debug("Can't open order.txt at",path)
except:
return []
lines = order.split("\n")
stack = []
rs = []
for line in lines:
line = line.strip()
if not line or len(line)<4 or line.startswith(";"):
continue
pos = line.count("#")
if pos>0:
label = line[pos:]
while (len(stack)>=(pos)): stack.pop()
stack.append(label)
else:
try:
ex = {
"tag":list(stack),
"name":line.replace("_"," "),
"path":fs.path(path,line),
"desc":fs.readfile(fs.path(path,line,"project.md")),
"code":fs.readfile(fs.path(path,line,"main.py")),
}
rs.append(ex)
except:
pass
return rs
def _get_examples(self,path):
return self._parse_order(path)
def get_examples(self):
exs = {}
exr = []
srcs = [(fs.path(env.stdlib,"examples"),"core.zerynth.stdlib")]
repos = fs.dirs(env.libs)
if "official" in repos: #put official on top
repos.remove("official")
repos = ["official"]+repos
for repo in repos:
nms = fs.dirs(repo)
for nm in nms:
libs = fs.dirs(nm)
for lib in libs:
srcs.append((fs.path(lib,"examples"),"lib."+fs.basename(nm)+"."+fs.basename(lib)))
for exlib,lib in srcs:
if fs.exists(exlib):
ee = self._get_examples(exlib)
for eee in ee:
eee["lib"]=lib
exr.extend(ee)
return exr
def get_devices(self):
bdirs = fs.dirs(env.devices)
for bdir in bdirs:
try:
pkg = self.get_pack_info(bdir)
if pkg is None:
continue
bj = fs.get_json(fs.path(bdir,"device.json"))
bj["path"] = bdir
bj["deps"] = self.get_package_deps(pkg["fullname"])
bj["has_all_deps"] = self.has_all_deps(pkg["fullname"])
bj["fullname"] = pkg["fullname"]
yield bj
except Exception as e:
warning(e)
#load custom devices
cdirs = fs.dirs(env.cvm)
for cdir in cdirs:
if not fs.exists(fs.path(cdir,"active")):
#not compiled yet, skip
continue
try:
pkg = self.get_pack_info(bdir)
if pkg is None:
continue
bj = fs.get_json(fs.path(cdir,"device.json"))
bj["path"] = cdir
bj["deps"] = self.get_package_deps(pkg["fullname"])
bj["has_all_deps"] = self.has_all_deps(pkg["fullname"])
bj["fullname"] = pkg["fullname"]
yield bj
except Exception as e:
warning(e)
def get_specs(self,specs):
options = {}
for spec in specs:
pc = spec.find(":")
if pc<0:
fatal("invalid spec format. Give key:value")
thespec = spec[pc+1:]
if thespec=="null":
thespec=None
options[spec[:pc]]=thespec
return options
def get_target(self,target,options={}):
import devices
_dsc = devices.Discover()
return _dsc.get_target(target,options)
def get_modules(self):
res = {}
# libraries
rdirs = fs.dirs(env.libs)
for r in rdirs:
repo = fs.basename(r)
nsdirs = fs.dirs(r)
for ns in nsdirs:
namespace = fs.basename(ns)
lbdirs = fs.dirs(ns)
for l in lbdirs:
lib = fs.basename(l)
if repo=="official":
if namespace=="zerynth":
module = lib
else:
module = namespace+"."+lib
else:
module = repo+"."+namespace+"."+lib
imports = []
for f in fs.files(l):
fl = fs.basename(f)
if fl.endswith(".py") and fl!="main.py":
imports.append(fl[0:-3])
res[module]=imports
return res
def get_vhal(self):
vhal = {}
arch_dirs = fs.dirs(env.vhal)
for ad in arch_dirs:
fmdirs = fs.dirs(ad)
for fm in fmdirs:
vhal_file = fs.path(fm,"vhal.json")
if fs.exists(vhal_file):
vj = fs.get_json(vhal_file)
vhal.update(vj)
return vhal
def disk_usage(self):
bytes = fs.dir_size(env.home)
return bytes
#fs.set_json(rj["data"], fs.path(vmpath,uid+"_"+version+"_"+rj["data"]["hash_features"]+"_"+rj["data"]["rtos"]+".vm"))
tools = Tools()
# add_init(tools.init)
| [
"dev@zerynth.com"
] | dev@zerynth.com |
9881b96519fce86f61a5ee3cb7a611005b646983 | 0d2af397b900fddad3d532a9f772f70473886cf5 | /tickets/urls.py | 0e2d8c65e60ed82fb02ab25f58af4e4c1d190634 | [] | no_license | RobertUJ/Omaha | cc779b06e42c08ebadae0b8df4e006ad67d504d1 | 650d5e1e5550bf772f1817e16505c574f361bae0 | refs/heads/master | 2016-08-12T13:51:32.262876 | 2016-02-12T00:51:52 | 2016-02-12T00:51:52 | 49,794,851 | 0 | 0 | null | 2016-01-22T00:04:29 | 2016-01-16T23:12:39 | Python | UTF-8 | Python | false | false | 264 | py | from django.conf.urls import patterns, url
from tickets.views import TicketsIndexView, AddTicketView
urlpatterns = [
url(r'^tickets/$', TicketsIndexView.as_view(), name='TicketsView'),
url(r'^addticket/$', AddTicketView.as_view(), name='AddTicketView'),
] | [
"erickhp12@gmail.com"
] | erickhp12@gmail.com |
f84e7e892f22dcef23a66020fb69487611bee303 | b37769515f7e078e2215be27a76a0ba199f7676e | /home/migrations/0003_remove_blog_slug.py | c37321351241594e4fb6b823fb4bc8ea1c54e86c | [] | no_license | roxna/eproc | 15e532a401291505adec086d2c60c78843c9afc6 | f22506e2afd005538c21d7bb678649a3736b6feb | refs/heads/master | 2022-12-02T20:38:49.674344 | 2017-03-28T09:44:03 | 2017-03-28T09:44:03 | 72,560,527 | 0 | 0 | null | 2022-11-22T01:20:51 | 2016-11-01T17:38:57 | HTML | UTF-8 | Python | false | false | 377 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-11-23 17:51
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('home', '0002_blog_slug'),
]
operations = [
migrations.RemoveField(
model_name='blog',
name='slug',
),
]
| [
"roxna.irani@gmail.com"
] | roxna.irani@gmail.com |
41dfb043debbb31d564d9bdcdda0dd997a4a98a5 | dca5705c291da76cbfaf3897680eb0ae2eb56e2b | /aayushg_assgn/myauth/views.py | face35c4566395dead6248d30c8430cf8b2fedf8 | [] | no_license | gadia-aayush/Django-API-1 | 41a40598653009def8ca5bda9a578a26b8bf9115 | 307202ad0aa4357408e756cd74f3723e74fca253 | refs/heads/master | 2022-12-13T23:09:45.960562 | 2020-08-30T19:36:16 | 2020-08-30T19:36:16 | 273,763,155 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,723 | py | from django.shortcuts import render
from django.contrib.auth.models import User
from django.shortcuts import redirect
from django.contrib.auth import authenticate, login, logout
from django.http import JsonResponse
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.decorators import api_view
from rest_framework import views
from rest_framework.authtoken.models import Token
from rest_framework.permissions import IsAuthenticated
import re
def user_login(request):
if request.method == 'POST':
username = request.POST.get('phone')
password = request.POST.get('password')
user = authenticate(username = username, password = password)
if user :
if user.is_active:
login(request,user)
data = {"code" : 200, "status" : "OK", "message" : "LogIn Successfull"}
return JsonResponse(data)
else:
data = {"code" : 403, "status" : "Forbidden", "message" : "User Disabled"}
return JsonResponse(data)
else:
data = {"code" : 401, "status" : "Unauthorized", "message" : "Invalid Login Credentials"}
return JsonResponse(data)
else:
return render(request,'login.html')
# Django Rest Framework used
class logout(APIView):
permission_classes = (IsAuthenticated,)
def get(self, request):
user = request.user
token = Token.objects.get(user=user)
if token:
token.delete()
data = {"code" : 200, "status" : "OK", "message" : "Log Out Successfull"}
return Response(data)
def user_signup(request):
if request.method == 'POST':
username = request.POST.get('phone')
password = request.POST.get('password')
name = request.POST.get('name')
email = request.POST.get('email')
#validate whether the phone number is registered or not
try:
if User.objects.get(username = username):
data = {"code" : 403, "status" : "Forbidden", "message" : "Entered Mobile Number is already registered. Try loggin-in"}
return JsonResponse(data)
except:
pass
#validate mobile number [must be 10 digits. assumed that all are of India, so ignored prefixed country codes]
phoneregex = re.compile(r'^[1-9]\d{9}$')
if phoneregex.search(str(username)):
pass
else:
data = {"code" : 422, "status" : "Unprocessable Entity", "message" : "Mobile Number should be of 10 digits- ^[1-9]\d{9}$"}
return JsonResponse(data)
#validate name, making sure it is not empty
firstregex = re.compile(r"^[A-Za-z][A-Za-z,.'].*$")
if firstregex.search(str(name)):
pass
else:
data = {"code" : 422, "status" : "Unprocessable Entity", "message" : "Name should start with an alphabet- ^[A-Za-z][A-Za-z,.']*$"}
return JsonResponse(data)
#validate email address
emailregex = re.compile(r"^([\w\.\-]+)@([\w\-]+)((\.(\w){2,3})+)$")
if str(email) != "":
if emailregex.search(str(email)):
pass
else:
data = {"code" : 422, "status" : "Unprocessable Entity", "message" : "Enter a valid email address- ^([\w\.\-]+)@([\w\-]+)((\.(\w){2,3})+)$"}
return JsonResponse(data)
#validate password
passregex = re.compile(r"^(?=.*\d)(?=.*[a-z])(?=.*[A-Z])(?=.*[^a-zA-Z0-9])(?!.*\s).{8,15}$")
if passregex.search(str(password)):
pass
else:
data = {"code" : 422, "status" : "Unprocessable Entity", "message" : "Password should be between 8 to 15 characters which contain at least one lowercase letter, one uppercase letter, one numeric digit, and one special character- ^(?=.*\d)(?=.*[a-z])(?=.*[A-Z])(?=.*[^a-zA-Z0-9])(?!.*\s).{8,15}$"}
return JsonResponse(data)
authobj = User.objects.create_user(username = username, password = password, first_name = name, email = email)
authobj.save()
data = {"code" : 201, "status" : "Created", "message" : "Sign-Up Successfull"}
return JsonResponse(data)
else:
return render(request,'user_signup.html')
# Django Rest Framework used
@api_view(['POST', ])
def get_token(request):
if request.method == 'POST':
username = request.data.get('phone')
password = request.data.get('password')
user = authenticate(username = username, password = password)
if user :
if user.is_active:
tokened = Token.objects.filter(user=user)
data = {}
if tokened.count()>0:
data["code"] = 200
data["status"] = "OK"
data["message"] = "Token already Exists"
data["phone"] = username
data["Token"] = tokened[0].key
return Response(data)
else:
token = Token.objects.create(user=user)
data["code"] = 201
data["status"] = "Created"
data["message"] = "Token Created"
data["Token"] = token.key
data["phone"] = username
return Response(data)
else:
data = {"code" : 403, "status" : "Forbidden", "message" : "User Disabled"}
return Response(data)
else:
data = {"code" : 401, "status" : "Unauthorized", "message" : "Invalid Login Credentials"}
return Response(data)
| [
"gadia.aayush@gmail.com"
] | gadia.aayush@gmail.com |
ac9c2a9ef0b1cf9f39976b219335f1e2257893fc | d4c2846af2194e8463bff02a9ad49eedc97539eb | /src/RPConfig1.py | 77c0a56cf43effb39d46c064b268de9169bf6a08 | [] | no_license | rbulha/pytimeclock | 8eda6a41ecbe0e5f94238885a4d70e6d5f7e385f | a1cda1edce3d69fa504f55c40e78db9ecb2d837b | refs/heads/master | 2021-01-15T22:28:57.382733 | 2012-08-10T17:58:52 | 2012-08-10T17:58:52 | 40,454,915 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,680 | py | import sys
import os
import time
import shelve
import dbhash #incluidos apenas para que o instalador encontre os requisitos
import anydbm #incluidos apenas para que o instalador encontre os requisitos
CONFIGURATION_FILE = 'configuration1.dat'
class CRPConfig:
global CONFIGURATION_FILE
print '[CRPConfig] LOAD CONFIGURATION'
sys_path = sys.path[0]
if os.path.splitext(sys_path)[1] == '':
base = sys.path[0]
else:
base = os.path.dirname(sys.path[0])
DB_BASE_PATH = os.path.dirname(base) + '\\data\\'
caminho = DB_BASE_PATH + CONFIGURATION_FILE
DB = shelve.open(caminho)
print '[CRPConfig] DB=',len(DB)
if (len(DB) != 0) and DB.has_key('C_H_NORMAL') and DB.has_key('H_E_ALMOCO'):
C_H_NORMAL = DB['C_H_NORMAL']
C_H_SEXTA = DB['C_H_SEXTA']
T_ALMOCO = DB['T_ALMOCO']
H_E_OFICIAL = DB['H_E_OFICIAL']
H_S_OFICIAL = DB['H_S_OFICIAL']
H_S_OFICIAL_SEXTA = DB['H_S_OFICIAL_SEXTA']
H_S_ALMOCO = DB['H_S_ALMOCO']
H_E_ALMOCO = DB['H_E_ALMOCO']
START_REPORT_DAY = DB['START_REPORT_DAY']
else:
H_E_OFICIAL = 7.0
DB['H_E_OFICIAL']=H_E_OFICIAL
H_S_OFICIAL = 17.0
DB['H_S_OFICIAL']=H_S_OFICIAL
T_ALMOCO = 1.0
DB['T_ALMOCO']=T_ALMOCO
H_S_OFICIAL_SEXTA = 16.0
DB['H_S_OFICIAL_SEXTA']=H_S_OFICIAL_SEXTA
H_S_ALMOCO = 12.0
DB['H_S_ALMOCO']=H_S_ALMOCO
H_E_ALMOCO = 13.0
DB['H_E_ALMOCO']=H_E_ALMOCO
#total working day hours
C_H_NORMAL = (H_S_OFICIAL - H_E_OFICIAL) - T_ALMOCO#9.1
DB['C_H_NORMAL']=C_H_NORMAL
C_H_SEXTA = (H_S_OFICIAL_SEXTA - H_E_OFICIAL) - T_ALMOCO#7.6
DB['C_H_SEXTA']=C_H_SEXTA
START_REPORT_DAY = 21
DB['START_REPORT_DAY']=START_REPORT_DAY
DB.sync()
@staticmethod
def GetJorneyInSeconds():
nowtime = time.localtime()
if nowtime.tm_wday == 4: #Sexta-feira
return CRPConfig.C_H_SEXTA*3600
else:
return CRPConfig.C_H_NORMAL*3600
@staticmethod
def GetLanchTimeInSeconds():
return CRPConfig.T_ALMOCO*3600
@staticmethod
def Get_H_S_OFICIAL():
nowtime = time.localtime()
if nowtime.tm_wday == 4: #Sexta-feira
return CRPConfig.H_S_OFICIAL_SEXTA
else:
return CRPConfig.H_S_OFICIAL
def main():
config = CRPConfig()
if __name__ == '__main__':
main()
| [
"rbulha@3db46129-f7cc-561c-f858-d950435ae609"
] | rbulha@3db46129-f7cc-561c-f858-d950435ae609 |
658da1160eb4755901ebedf82b585ce6ddcd99da | 1b83b79fcd58878cad8c683f7c2fb048abdc9b6c | /magnum/conf/kubernetes.py | 2de9370e2bc59fb73dcbfd6a2ef6f75e558c8313 | [
"Apache-2.0"
] | permissive | ititandev/magnum | 88f7ab8d93e6913fa085d34577827d11aead1790 | 16ea8b6397f2bafc01e6d4ec474c1ae97f15a484 | refs/heads/master | 2020-12-28T19:07:02.905485 | 2020-02-03T17:53:15 | 2020-02-03T17:53:15 | 238,458,066 | 1 | 0 | Apache-2.0 | 2020-02-05T13:35:13 | 2020-02-05T13:35:12 | null | UTF-8 | Python | false | false | 1,333 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
kubernetes_group = cfg.OptGroup(name='kubernetes',
title='Options for the Kubernetes addons')
kubernetes_opts = [
cfg.StrOpt('keystone_auth_default_policy',
default="/etc/magnum/keystone_auth_default_policy.json",
help='Explicitly specify the path to the file defined default '
'Keystone auth policy for Kubernetes cluster when '
'the Keystone auth is enabled. Vendors can put their '
'specific default policy here'),
]
def register_opts(conf):
conf.register_group(kubernetes_group)
conf.register_opts(kubernetes_opts, group=kubernetes_group)
def list_opts():
return {
kubernetes_group: kubernetes_opts
}
| [
"flwang@catalyst.net.nz"
] | flwang@catalyst.net.nz |
7aade3ac2d090d75cb7eb785668927ac61e0d212 | 297b6b2a030a0d665fd12780da80bc64a9016f59 | /Assignment2/Assignment/makeChange.py | 5d9e807a700003f2aa560de428e99a25f0a3393e | [] | no_license | z0t0b/COMP5703 | 133ed9a90ba2024616a7ad5480937b89a9f70072 | bd89faa66f726c9675d4e58855577e2fda1075c4 | refs/heads/master | 2022-04-21T15:50:39.272916 | 2020-04-15T02:40:13 | 2020-04-15T02:40:13 | 255,782,341 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,310 | py | import decimal
changeList = [0, 0, 0, 0, 0, 0, 0, 0]
def chop_to_n_decimals(x, n):
# rounds x to n decimals (works better for inputs like 0.005 than standard round func)
d = decimal.Decimal(repr(x))
targetdigit = decimal.Decimal("1e%d" % -n)
chopped = d.quantize(targetdigit, decimal.ROUND_HALF_UP)
return float(chopped)
def makingChange(inputVal, index, amount):
num = int(inputVal / amount)
changeList[index] = num
inputVal -= (num * amount)
if(amount < 1):
inputVal = chop_to_n_decimals(inputVal, 2)
return inputVal
def makeChange(amount = []):
if((isinstance(amount, int) or isinstance(amount, float)) and (amount < 99.995 and amount >= 0.0)):
roundedAmount = chop_to_n_decimals(amount, 2)
roundedAmount = makingChange(roundedAmount, 0, 20)
roundedAmount = makingChange(roundedAmount, 1, 10)
roundedAmount = makingChange(roundedAmount, 2, 5)
roundedAmount = makingChange(roundedAmount, 3, 1)
roundedAmount = makingChange(roundedAmount, 4, 0.25)
roundedAmount = makingChange(roundedAmount, 5, 0.10)
roundedAmount = makingChange(roundedAmount, 6, 0.05)
roundedAmount = makingChange(roundedAmount, 7, 0.01)
return changeList
return None
| [
"noreply@github.com"
] | z0t0b.noreply@github.com |
e495a6da64e3b39072332ee3934ad2f8318bb290 | b7bb0a3ea2078dbdaa17947fd841fe1c9b5e356b | /oschown/workflows.py | a5b712d06344ce7e3a3a514e1831195fb2f2557f | [
"Apache-2.0"
] | permissive | epim/oschown | 48d4a7528ed38fb12cae408baad7a6e370ba86f1 | a50d3ad3769dad8d1f56dfe171d5345b3bee517f | refs/heads/master | 2021-02-13T21:21:01.223901 | 2018-09-17T15:28:41 | 2018-09-17T15:28:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,690 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import mock
import oslo_config.cfg
nova_conf = oslo_config.cfg.ConfigOpts()
cinder_conf = oslo_config.cfg.ConfigOpts()
# NOTE(danms): This is a crazy hack to import these project modules
# but with separated global oslo.config objects. Hopefully I can
# replace this with something that isn't quite as crazy (and at least
# doesn't use mock), but this works for testing.
with mock.patch('oslo_config.cfg.CONF', new=cinder_conf):
from oschown import chown_cinder
with mock.patch('oslo_config.cfg.CONF', new=nova_conf):
from oschown import chown_nova
from oschown import chown_neutron
from oschown import exception
LOG = logging.getLogger(__name__)
def parse_resource_id(resource_id):
return resource_id.split(':', 1)
class ResourceCollection(object):
"""A collection of resources across projects.
Collects resources that must be resolved and chown'ed together.
"""
RESOURCE_TYPES = {
'cinder': chown_cinder.CinderProject(),
'nova': chown_nova.NovaProject(),
'neutron': chown_neutron.NeutronProject(),
}
def __init__(self, context):
self._collected_resources = {}
self._context = context
def need_resource(self, resource_id):
"""Mark a resource id like project:id as needed for resolution.
Needed resources must be chown'ed with the other resources in
the collection.
"""
if resource_id not in self._collected_resources:
self._collected_resources[resource_id] = None
@property
def resolved_resources(self):
"""A list of ChownableResource objects that have been resolved."""
return [res for res in self._collected_resources.values()
if res is not None]
@property
def unresolved_resources(self):
"""A list of resource identifiers that are yet unresolved."""
return [r_id for r_id, r_res in self._collected_resources.items()
if r_res is None]
@property
def have_all_resources(self):
"""Return whether or not all known resources have been resolved."""
return len(self.unresolved_resources) == 0
def resolve_missing_resources_one(self):
"""One pass of resource resolution.
Make one pass through the list of unresolved resources and try
to resolve them (collecting any additional dependencies.
"""
for resource_id in self.unresolved_resources:
project_id, local_id = parse_resource_id(resource_id)
if project_id not in self.RESOURCE_TYPES:
raise exception.UnknownResourceType()
project = self.RESOURCE_TYPES[project_id]
resource = project.collect_resource_by_id(self._context,
local_id)
self._collected_resources[resource_id] = resource
for dep in resource.dependencies:
self.need_resource(dep)
def resolve_missing_resources(self):
"""Resolve all resources.
Attempt to repeatedly resolve all resources in the list of
needed ones. This runs until we have resolved all resources or
we stop making progress.
:raises: exception.UnableToResolveResources if some resources are not
resolvable
"""
last_unresolved = None
while not self.have_all_resources:
self.resolve_missing_resources_one()
now_unresolved = self.unresolved_resources
if now_unresolved == last_unresolved:
raise exception.UnableToResolveResources()
last_unresolved = now_unresolved
def chown_resources(self):
"""Actually change ownership of all resources in the collection.
Does not actually change ownership if the context indicates a dry run
should be performed.
"""
for resource in self.resolved_resources:
if self._context.dry_run:
LOG.info('Would chown resource %s' % resource.identifier)
else:
LOG.info('Chowning resource %s' % resource.identifier)
resource.chown(self._context)
def _workflow_main(context, collection):
try:
collection.resolve_missing_resources()
except exception.ChownException as e:
LOG.error('Unable to resolve resources: %s' % e)
return
LOG.info('Resolved %i resources to be chowned: %s' % (
len(collection.resolved_resources),
','.join([r.identifier for r in collection.resolved_resources])))
collection.chown_resources()
def workflow_nova(context, instance_id):
"""Resolve and change ownership of an instance and dependent resources."""
collection = ResourceCollection(context)
collection.need_resource('nova:%s' % instance_id)
_workflow_main(context, collection)
def workflow_cinder(context, volume_id):
"""Resolve and change ownership of a volume and dependent resources."""
collection = ResourceCollection(context)
collection.need_resource('cinder:%s' % volume_id)
_workflow_main(context, collection)
| [
"dansmith@redhat.com"
] | dansmith@redhat.com |
8d8b46573115c470483434c30bc2fd15efceb159 | 73785aea08895d0fc15e914ce329716712f057ec | /recipes/errorAnal.py | 9208c6a48ac906004212b9520360e38dbc9b8806 | [] | no_license | Peder2911/ModelComp | 5e93e6db7fbc809e7444448729a91ff7a762b0cc | 91ee3835ddc560adeb4af457953905aaeca79cd6 | refs/heads/master | 2020-05-20T05:09:01.877547 | 2019-05-18T13:37:34 | 2019-05-18T13:37:34 | 185,397,609 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 188 | py |
ppSentences(sentences,y,x):
for i,s in enumerate(errorSents):
print('#'*38)
print(f'{s} - pred: {prediction[err][i]} | actual: {actual[err][i]}')
print('\n')
| [
"pglandsverk@gmail.com"
] | pglandsverk@gmail.com |
0c57b23ce2e57693a0fa07b8ddd2d25521f90145 | c6a101547c2b7f36fe83a725974a8a7f02cf176d | /data_structures/binary_trees/flip_tree.py | 20c8cbf5f563689f2b9a252bd664a6b22b2a1b23 | [
"MIT"
] | permissive | prabhupant/python-ds | 737cc35574de5c2ece0f0813cf00775324a8dbe7 | f7d6d78fedaf84b7527965bb1798b7a8da989474 | refs/heads/master | 2023-08-22T05:04:22.937675 | 2022-10-04T01:29:39 | 2022-10-04T01:29:39 | 199,366,418 | 2,325 | 704 | MIT | 2022-10-10T13:01:10 | 2019-07-29T02:48:57 | Python | UTF-8 | Python | false | false | 643 | py | # Flip a tree such like here
# https://www.geeksforgeeks.org/flip-binary-tree/
# Flipping subtree algorithm
# 1. root->left->left = root->right
# 2. root->left->right = root
# 3. root->left = NULL
# 4. root->right = NULL
class Node:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def flip_tree(root):
if root is None:
return root
if root.left is None and root.right is None:
return root
flipped_root = flip_tree(root.left)
root.left.left = root.right
root.left.right = root
root.left = None
root.right = None
return flipped_root
| [
"noreply@github.com"
] | prabhupant.noreply@github.com |
99d5656ae432b56eb9438da7a8014adeca443e39 | ee2c15d82ff596f4ca9eda408f8e096b787f0d48 | /Python/4 Dictionaries_Sets/4 dictionary/sets_challenge.py | 7a56065963a00863f02685fa85a6c29210e88624 | [] | no_license | sainimohit23/algorithms | 1bbfee3bd4d1049b18425bf0d86ecaacd4c43ea0 | 911986abe015f7518ef169a5866b1058c7d41d4f | refs/heads/master | 2022-11-13T17:40:06.128838 | 2020-06-30T17:35:35 | 2020-06-30T17:35:35 | 268,071,412 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | my_str = input("enter some text please ")
my_set = set(my_str)
vowels = set("aeiou")
finalset = my_set.difference(vowels)
finallist = sorted(finalset)
for num in finallist:
print(num)
| [
"sainimohit23@gmail.com"
] | sainimohit23@gmail.com |
784e7a40abe66b769c8b6ffca8fcf4ff447532c1 | 88ff86b95b377a4fd10474d2b215b0cf0b32143c | /src/ralph/scan/plugins/ssh_proxmox.py | 5627cf11be6d296a44bcf87c00dae5afd8551d1c | [
"Apache-2.0"
] | permissive | fossabot/ralph | f00fbfd9e64ae779633e0ea1faeb7fbe8f35353f | 9eb82955adf6b662bc460112b3d9b2d574ef0d70 | refs/heads/master | 2020-07-04T15:27:38.758147 | 2014-04-28T15:08:59 | 2014-04-28T15:08:59 | 202,324,100 | 0 | 0 | NOASSERTION | 2019-08-14T09:59:42 | 2019-08-14T09:59:41 | null | UTF-8 | Python | false | false | 9,507 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import os
import json
from django.conf import settings
from ralph.discovery.hardware import get_disk_shares
from ralph.discovery.models import DeviceType
from ralph.scan.errors import ConnectionError, NoMatchError, NoLanError
from ralph.scan.plugins import get_base_result_template
from ralph.util import network
SETTINGS = settings.SCAN_PLUGINS.get(__name__, {})
logger = logging.getLogger("SCAN")
def _connect_ssh(ip_address, user, password):
if not network.check_tcp_port(ip_address, 22):
raise ConnectionError('Port 22 closed on a Proxmox server.')
return network.connect_ssh(ip_address, user, password)
def _get_master_ip_address(ssh, ip_address, cluster_cfg=None):
if not cluster_cfg:
stdin, stdout, stderr = ssh.exec_command("cat /etc/pve/cluster.cfg")
data = stdout.read()
else:
data = cluster_cfg
if not data:
stdin, stdout, stderr = ssh.exec_command("pvesh get /nodes")
data = stdout.read()
if data:
for node in json.loads(data):
stdin, stdout, stderr = ssh.exec_command(
'pvesh get "/nodes/%s/dns"' % node['node'],
)
dns_data = stdout.read()
if not dns_data:
return ip_address
ip_address = json.loads(dns_data)['dns1']
break
else:
return ip_address
nodes = {}
current_node = None
for line in data.splitlines():
line = line.strip()
if line.endswith('{'):
current_node = line.replace('{', '').strip()
nodes[current_node] = {}
elif line.endswith('}'):
current_node = None
elif ':' in line and current_node:
key, value = (v.strip() for v in line.split(':', 1))
nodes[current_node][key] = value
for node, pairs in nodes.iteritems():
is_master = node.startswith('master')
try:
ip_address = pairs['IP']
except KeyError:
continue
if is_master:
return ip_address
return ip_address
def _get_cluster_member(ssh, ip_address):
stdin, stdout, stderr = ssh.exec_command("ifconfig eth0 | head -n 1")
mac = stdout.readline().split()[-1]
return {
'model_name': 'Proxmox',
'mac_addresses': [mac],
'installed_software': [{
'model_name': 'Proxmox',
'path': 'proxmox',
}],
'system_ip_addresses': [ip_address],
}
def _get_local_disk_size(ssh, disk):
"""Return the size of a disk image file, in bytes"""
path = os.path.join('/var/lib/vz/images', disk)
stdin, stdout, stderr = ssh.exec_command("du -m '%s'" % path)
line = stdout.read().strip()
if not line:
return 0
size = int(line.split(None, 1)[0])
return size
def _get_virtual_machine_info(
ssh,
vmid,
master_ip_address,
storages,
hypervisor_ip_address,
):
stdin, stdout, stderr = ssh.exec_command(
"cat /etc/qemu-server/%d.conf" % vmid,
)
lines = stdout.readlines()
if not lines:
# Proxmox 2 uses a different directory structure
stdin, stdout, stderr = ssh.exec_command(
"cat /etc/pve/nodes/*/qemu-server/%d.conf" % vmid,
)
lines = stdout.readlines()
disks = {}
lan_model = None
name = 'unknown'
for line in lines:
line = line.strip()
if line.startswith('#') or ':' not in line:
continue
key, value = line.split(':', 1)
if key.startswith('vlan'):
lan_model, lan_mac = value.split('=', 1)
elif key.startswith('net'):
lan_model, lan_mac = value.split('=', 1)
if ',' in lan_mac:
lan_mac = lan_mac.split(',', 1)[0]
elif key == 'name':
name = value.strip()
elif key == 'sockets':
cpu_count = int(value.strip())
elif key.startswith('ide') or key.startswith('virtio'):
disks[key] = value.strip()
if lan_model is None:
raise NoLanError(
"No LAN for virtual server %s. Hypervisor IP: %s" % (
vmid,
hypervisor_ip_address,
),
)
device_info = {
'model_name': 'Proxmox qemu kvm',
'type': DeviceType.virtual_server.raw,
'mac_addresses': [lan_mac],
'management': master_ip_address, # ?
'hostname': name,
}
detected_disks = []
detected_shares = []
for slot, disk in disks.iteritems():
params = {}
if ',' in disk:
disk, rawparams = disk.split(',', 1)
for kv in rawparams.split(','):
if not kv.strip():
continue
k, v = kv.split('=', 1)
params[k] = v.strip()
if ':' in disk:
vg, lv = disk.split(':', 1)
else:
vg = ''
lv = disk
if vg == 'local':
size = _get_local_disk_size(ssh, lv)
if not size > 0:
continue
detected_disks.append({
'family': 'QEMU disk image',
'size': size,
'label': slot,
'mount_point': lv,
})
continue
if vg in ('', 'local', 'pve-local'):
continue
vol = '%s:%s' % (vg, lv)
try:
wwn, size = storages[lv]
except KeyError:
logger.warning(
'Volume %s does not exist. Hypervisor IP: %s' % (
lv,
hypervisor_ip_address,
),
)
continue
detected_shares.append({
'serial_number': wwn,
'is_virtual': True,
'size': size,
'volume': vol,
})
if detected_disks:
device_info['disks'] = detected_disks
if detected_shares:
device_info['disk_shares'] = detected_shares
detected_cpus = [
{
'family': 'QEMU Virtual',
'model_name': 'QEMU Virtual CPU',
'label': 'CPU {}'.format(i + 1),
'index': i + 1,
'cores': 1,
} for i in range(cpu_count)
]
if detected_cpus:
device_info['processors'] = detected_cpus
return device_info
def _get_virtual_machines(ssh, master_ip_address, hypervisor_ip_address):
detected_machines = []
storages = get_disk_shares(ssh)
stdin, stdout, stderr = ssh.exec_command("qm list")
for line in stdout:
line = line.strip()
if line.startswith('VMID'):
continue
vmid, name, status, mem, bootdisk, pid = (
v.strip() for v in line.split()
)
if status != 'running':
continue
vmid = int(vmid)
try:
device_info = _get_virtual_machine_info(
ssh,
vmid,
master_ip_address,
storages,
hypervisor_ip_address,
)
except NoLanError as e:
logger.warning(unicode(e))
else:
detected_machines.append(device_info)
return detected_machines
def _ssh_proxmox(ip_address, user, password):
ssh = _connect_ssh(ip_address, user, password)
try:
cluster_cfg = None
for command in (
'cat /etc/pve/cluster.cfg',
'cat /etc/pve/cluster.conf',
'cat /etc/pve/storage.cfg',
'pvecm help',
):
stdin, stdout, stderr = ssh.exec_command(command)
data = stdout.read()
if data != '':
if command == 'cat /etc/pve/cluster.cfg':
cluster_cfg = data
break
else:
raise NoMatchError('This is not a PROXMOX server.')
master_ip_address = _get_master_ip_address(
ssh,
ip_address,
cluster_cfg,
)
cluster_member = _get_cluster_member(ssh, ip_address)
subdevices = _get_virtual_machines(
ssh,
master_ip_address,
ip_address,
)
if subdevices:
cluster_member['subdevices'] = subdevices
finally:
ssh.close()
return cluster_member
def scan_address(ip_address, **kwargs):
if 'nx-os' in (kwargs.get('snmp_name') or '').lower():
raise NoMatchError('Incompatible Nexus found.')
if kwargs.get('http_family') not in ('Proxmox',):
raise NoMatchError('It is not Proxmox.')
user = SETTINGS.get('user')
password = SETTINGS.get('password')
messages = []
result = get_base_result_template('ssh_proxmox', messages)
if not user or not password:
result['status'] = 'error'
messages.append(
'Not configured. Set SSH_USER and SSH_PASSWORD in your '
'configuration file.',
)
else:
try:
device_info = _ssh_proxmox(ip_address, user, password)
except (ConnectionError, NoMatchError) as e:
result['status'] = 'error'
messages.append(unicode(e))
else:
result.update({
'status': 'success',
'device': device_info,
})
return result
| [
"andrew.jankowski@gmail.com"
] | andrew.jankowski@gmail.com |
c75ea51b954cef8081502d553948e07b0487abe9 | bf813d2b877fb8ba62feb4263484db3d0f26d5cd | /early-phd/map_to_flux.py | 1c2d0eab20e2c6fa5e1fe3228a8f9507a9b7ba48 | [] | no_license | 9217392354A/astro-scripts | 1e8e8c827097a877518d1f3e10870a5c2609417c | cd7a175bd504b4e291020b551db3077b067bc632 | refs/heads/master | 2021-01-13T00:40:57.481755 | 2016-03-25T17:04:28 | 2016-03-25T17:04:28 | 54,730,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | py | #Program created by Chris Fuller to test a function for extracting flux's from a fits file using appature photomotry
#import stuff
from numpy import *
import numpy
import scipy
import math
import sys
import os
from os.path import join as pj
#File stuff
cat = "bigcoma.csv"
catfolder = "/Users/chrisfuller/Dropbox/coma/Catalogues"
catout ="comaTEST.csv"
folder = "/Users/chrisfuller/Dropbox/coma/flux2/" | [
"chrisfuller@Chriss-MBP.lan"
] | chrisfuller@Chriss-MBP.lan |
98ae73f5af580dce3fc708af8516af5e1c67bbf3 | 50e03dae243af6bfab19f8cf42494284ff70fbd3 | /BIG-BIRD/RelGAN.py | 05e0634536e46c4d7140e7c904e0f5d7773baeb5 | [] | no_license | BritneyMuller/Summarization-Lab | bf2d79abe724e999e4017d4ffe6220863fe7f162 | 4b40f5ac7a629f509c323bf426d3058268628186 | refs/heads/master | 2021-01-25T23:13:13.669487 | 2019-09-30T14:38:13 | 2019-09-30T14:38:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,010 | py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import math, copy, time
from torch.autograd import Variable
import matplotlib.pyplot as plt
import os
import torch.autograd as autograd
from RelationalMemory import *
from Transformer import *
class BigBird():
#generator is translator here
def __init__(self, generator, discriminator, reconstructor, dictionary, gamma = 0.99, clip_value = 0.1, lr_G = 5e-5, lr_D = 5e-5, lr_R = 1e-4, LAMBDA = 10, TEMP_END = 0.5, vq_coef =0.8, device=torch.device("cuda" if torch.cuda.is_available() else "cpu")):
super(BigBird, self).__init__()
self.device = device
self.dictionary = dictionary
self.generator = generator.to(self.device)
self.reconstructor = reconstructor.to(self.device)
self.discriminator = discriminator.to(self.device)
self.gamma = gamma
self.eps = np.finfo(np.float32).eps.item()
self.optimizer_R = torch.optim.Adam(list(self.generator.parameters()) + list(self.reconstructor.parameters()), lr=lr_R)
#normal WGAN
self.optimizer_G = torch.optim.RMSprop(self.generator.parameters(), lr=lr_G)
self.optimizer_D = torch.optim.RMSprop(self.discriminator.parameters(), lr=lr_D)
#WGAN GP
#self.LAMBDA = LAMBDA # Gradient penalty lambda hyperparameter
#self.optimizer_G = torch.optim.Adam(self.generator.parameters(), lr=lr_G, betas=(0.0, 0.9))
#self.optimizer_D = torch.optim.Adam(self.discriminator.parameters(), lr=lr_D, betas=(0.0, 0.9))
self.clip_value = clip_value
self.TEMP_END = TEMP_END
self.lr_G = lr_G
self.lr_D = lr_D
self.lr_R = lr_R
self.total_steps = 0
self.vq_coef = 0.8
self.epoch = 0
def calc_gradient_penalty(self, netD, real_data, fake_data):
#print real_data.size()
BATCH_SIZE = real_data.shape[0]
dim_1 = real_data.shape[1]
dim_2 = real_data.shape[2]
alpha = torch.rand(BATCH_SIZE, dim_1)
alpha = alpha.view(-1,1).expand(dim_1 * BATCH_SIZE, dim_2).view(BATCH_SIZE, dim_1, dim_2)
alpha = alpha.to(self.device)
#print(real_data.shape) #[BATCH_SIZE, 19, vocab_sz]
#print(fake_data.shape) #[BATCH_SIZE, 19, vocab_sz]
interpolates_data = ( alpha * real_data.float() + ((1 - alpha) * fake_data.float()) )
interpolates = interpolates_data.to(self.device)
#interpolates = netD.disguised_embed(interpolates_data)
interpolates = autograd.Variable(interpolates, requires_grad=True)
src_mask = (interpolates_data.argmax(-1) != netD.padding_index).type_as(interpolates_data).unsqueeze(-2)
disc_interpolates = netD.transformer_encoder( interpolates, src_mask )
gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).to(self.device),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * self.LAMBDA
return gradient_penalty
def _to_one_hot(self, y, n_dims):
scatter_dim = len(y.size())
y_tensor = y.to(self.device).long().view(*y.size(), -1)
zeros = torch.zeros(*y.size(), n_dims).to(self.device)
return zeros.scatter(scatter_dim, y_tensor, 1)
def train_D(self, fake_datas, real_datas):
## train discriminator
# print("real")
# print(real_datas[:10])
real_score = torch.mean(self.discriminator(real_datas))
# print("fake")
# print(fake_datas[:10])
fake_score = torch.mean(self.discriminator(fake_datas))
batch_d_loss = -real_score + fake_score #+ self.calc_gradient_penalty(self.discriminator, real_datas, fake_datas)
return batch_d_loss, real_score.item(), fake_score.item()
def train_G(self, fake_datas):
self.optimizer_G.zero_grad()
batch_g_loss = -torch.mean(self.discriminator(fake_datas))
batch_g_loss.backward(retain_graph=True)
self.optimizer_G.step()
return batch_g_loss.item()
def indicies2string(self, indices):
inv_map = {v: k for k, v in self.dictionary.items()}
return ' '.join([inv_map[i.item()] for i in indices])
def train(self):
self.generator.train()
self.reconstructor.train()
self.discriminator.train()
def eval(self):
self.generator.eval()
self.reconstructor.eval()
self.discriminator.eval()
def load(self, load_path):
print('load Bird from', load_path)
loader = torch.load(load_path)
self.generator.load_state_dict(loader['generator'])
self.discriminator.load_state_dict(loader['discriminator'])
self.reconstructor.load_state_dict(loader['reconstructor'])
self.total_steps = loader['total_steps']
self.epoch = loader['epoch']
self.gumbel_temperature = loader['gumbel_temperature']
def save(self, save_path):
print('lay egg to ./Nest ... save as', save_path)
torch.save({'generator':self.generator.state_dict(),
'reconstructor':self.reconstructor.state_dict(),
'discriminator':self.discriminator.state_dict(),
'total_steps':self.total_steps,
'epoch':self.epoch,
'gumbel_temperature':self.gumbel_temperature
},save_path)
def eval_iter(self, src, src_mask, max_len, real_data, ct, verbose = 1):
with torch.no_grad():
batch_size = src.shape[0]
memory = self.generator.initial_state(batch_size, trainable=True).to(self.device)
summary_sample, summary_log_values, summary_probs, gumbel_one_hot = self.generator(src, max_len, memory, self.dictionary['[CLS]'], temperature = self.gumbel_temperature)
memory = self.reconstructor.initial_state(batch_size, trainable=True).to(self.device)
CE_loss, acc, out = self.reconstructor.reconstruct_forward(gumbel_one_hot, src, memory, self.dictionary['[CLS]'])
if verbose == 1 and ct % 1 == 0:
print("origin:")
print(self.indicies2string(src[0]))
print("summary:")
print(self.indicies2string(summary_sample[0]))
print("real summary:")
print(self.indicies2string(real_data[0]))
print("reconsturct out:")
print(self.indicies2string(out[0]))
print("")
return acc, CE_loss.item()
def pretrainGAN_run_iter(self, src, src_mask, max_len, real_data, D_iters = 5, D_toggle = 'On', verbose = 1):
batch_size = src.shape[0]
memory = self.generator.initial_state(batch_size, trainable=True).to(self.device)
self.gumbel_temperature = max(self.TEMP_END, math.exp(-1e-4*self.total_steps))
summary_sample, summary_log_values, summary_probs, gumbel_one_hot = self.generator(src, max_len, memory, self.dictionary['[CLS]'], temperature = self.gumbel_temperature)
batch_G_loss = 0
NNcriterion = nn.NLLLoss().to(self.device)
batch_G_loss = NNcriterion(summary_probs.log().contiguous().view(batch_size * max_len, -1), real_data.contiguous().view(-1))
self.optimizer_G.zero_grad()
batch_G_loss.backward()
self.optimizer_G.step()
self.total_steps += 1
if self.total_steps % 500 == 0:
if not os.path.exists("./Nest"):
os.makedirs("./Nest")
self.save("./Nest/Pretrain_RelGAN")
if verbose == 1 and self.total_steps % 1000 == 0:
print("origin:")
print(self.indicies2string(src[0]))
print("summary:")
print(self.indicies2string(summary_sample[0]))
print("real summary:")
print(self.indicies2string(real_data[0]))
print("")
distrib = summary_probs[0,0, :100].cpu().detach().numpy()
one_hot_out = gumbel_one_hot[0,0, :100].cpu().detach().numpy()
return [batch_G_loss, 0], [0], [0, 0, 0], [self.indicies2string(src[0]), self.indicies2string(summary_sample[0]), 0], distrib, one_hot_out
def run_iter(self, src, src_mask, max_len, real_data, D_iters = 5, D_toggle = 'On', verbose = 1, writer = None):
#summary_logits have some problem
#summary = self.generator(src, src_mask, max_len, self.dictionary['[CLS]'])
batch_size = src.shape[0]
memory = self.generator.initial_state(batch_size, trainable=True).to(self.device)
self.gumbel_temperature = max(self.TEMP_END, math.exp(-1e-4*self.total_steps))
summary_sample, summary_log_values, summary_probs, gumbel_one_hot = self.generator(src, max_len, memory, self.dictionary['[CLS]'], temperature = self.gumbel_temperature)
batch_D_loss = 0
if(D_toggle == 'On'):
for i in range(D_iters):
self.optimizer_D.zero_grad()
batch_d_loss, real_score, fake_score = self.train_D(gumbel_one_hot, self._to_one_hot(real_data, len(self.dictionary)))
batch_D_loss += batch_d_loss
batch_d_loss.backward(retain_graph=True);
#Clip critic weights
for p in self.discriminator.parameters():
p.data.clamp_(-self.clip_value, self.clip_value)
self.optimizer_D.step();
batch_D_loss = batch_D_loss.item()/D_iters
batch_G_loss = 0
if(D_toggle == 'On'):
#print(gumbel_one_hot.shape)
batch_G_loss = self.train_G(gumbel_one_hot)
self.gumbel_temperature = max(self.TEMP_END, math.exp(-1e-4*self.total_steps))
memory = self.reconstructor.initial_state(batch_size, trainable=True).to(self.device)
CE_loss, acc, out = self.reconstructor.reconstruct_forward(gumbel_one_hot, src, memory, self.dictionary['[CLS]'])
rec_loss = CE_loss #+ self.vq_coef * vq_loss + 0.25 * self.vq_coef * commit_loss
self.optimizer_R.zero_grad()
rec_loss.backward()
nn.utils.clip_grad_norm_(list(self.generator.parameters()) + list(self.reconstructor.parameters()), 0.1)
self.optimizer_R.step()
self.total_steps += 1
if self.total_steps % 500 == 0:
if not os.path.exists("./Nest"):
os.makedirs("./Nest")
self.save("./Nest/DoubleRelationMEM_GAN")
#for i in range(5):
#plt.plot(range(1000),summary_probs.cpu().detach().numpy()[0,i,:1000] )
# wandb.log({"prob {}".format(i): wandb.Histogram(summary_probs.cpu().detach().numpy()[0,i,:1000])},step=step)
if verbose == 1 and self.total_steps % 100 == 0:
print("origin:")
print(self.indicies2string(src[0]))
print("summary:")
print(self.indicies2string(summary_sample[0]))
print("real summary:")
print(self.indicies2string(real_data[0]))
print("reconsturct out:")
print(self.indicies2string(out[0]))
# print("sentiment:",label[0].item())
# print("y:",sentiment_label[0].item())
# print("reward:",rewards[0].item())
print("")
# for name, param in self.generator.named_parameters():
# writer.add_histogram(name, param.clone().cpu().data.numpy(), self.total_steps)
# for name, param in self.reconstructor.named_parameters():
# writer.add_histogram(name, param.clone().cpu().data.numpy(), self.total_steps)
distrib = summary_probs.cpu().detach().numpy()[0,0, :100]
one_hot_out = gumbel_one_hot.cpu().detach().numpy()[0,0, :100]
return [batch_G_loss, batch_D_loss], [CE_loss.item()], [real_score, fake_score, acc], [self.indicies2string(src[0]), self.indicies2string(summary_sample[0]), self.indicies2string(out[0])], distrib, one_hot_out
class LSTMEncoder(nn.Module):
def __init__(self, vocab_sz, hidden_dim, padding_index):
super().__init__()
self.src_embed = nn.Embedding(vocab_sz, hidden_dim)
self.rnn_cell = nn.LSTM(hidden_dim, hidden_dim, num_layers=1, batch_first=True, bidirectional=True)
self.padding_index = padding_index
self.outsize = hidden_dim*2
def forward(self, x):
#src_mask = (x != self.padding_index).type_as(x).unsqueeze(-2)
out, (h,c) = self.rnn_cell( self.src_embed(x))
return out
# class LSTM_Gumbel_Encoder_Decoder(nn.Module):
# def __init__(self, hidden_dim, emb_dim, input_len, output_len, voc_size, device, eps=1e-8, num_layers = 2):
# super().__init__()
# self.hidden_dim = hidden_dim
# self.emb_dim = emb_dim
# #self.input_len = input_len
# #self.output_len = output_len
# #self.voc_size = voc_size
# #self.teacher_prob = 1.
# #self.epsilon = eps
# self.emb_layer = nn.Embedding(voc_size, emb_dim)
# self.num_layers = num_layers
# self.encoder = nn.LSTM(emb_dim, hidden_dim, num_layers=num_layers, batch_first=True, bidirectional=True)
# self.decoder = nn.LSTM(emb_dim, hidden_dim*2, num_layers=num_layers, batch_first=True)
# self.device = device
# self.attention_softmax = nn.Softmax(dim=1)
# # self.pro_layer = nn.Sequential(
# # nn.Linear(hidden_dim*4, voc_size, bias=True)
# # )
# self.adaptive_softmax = torch.nn.AdaptiveLogSoftmaxWithLoss(hidden_dim*4, voc_size, [100, 1000, 10000], div_value=4.0, head_bias=False)
# def forward(self, x, src_mask, max_len, start_symbol, mode = 'argmax', temp = 2.0):
# batch_size = x.shape[0]
# input_len = x.shape[1]
# device = x.device
# # encoder
# x_emb = self.emb_layer(x)
# memory, (h, c) = self.encoder(x_emb)
# h = h.transpose(0, 1).contiguous()
# c = c.transpose(0, 1).contiguous()
# h = h.view(batch_size, self.num_layers, h.shape[-1]*2)
# c = c.view(batch_size, self.num_layers, c.shape[-1]*2)
# h = h.transpose(0, 1).contiguous()
# c = c.transpose(0, 1).contiguous()
# ## decoder
# out_h, out_c = (h, c)
# ys = torch.ones(batch_size, 1).fill_(start_symbol).type_as(x.data)
# values = []
# all_probs = []
# gumbel_one_hots = []
# for i in range(max_len-1):
# ans_emb = self.emb_layer(ys[:,-1]).view(batch_size, 1, self.emb_dim)
# out, (out_h, out_c) = self.decoder(ans_emb, (out_h, out_c))
# attention = torch.bmm(memory, out.transpose(1, 2)).view(batch_size, input_len)
# attention = self.attention_softmax(attention)
# context_vector = torch.bmm(attention.view(batch_size, 1, input_len), memory)
# logits = torch.cat((out, context_vector), -1).view(batch_size, -1)
# one_hot, next_words, value, prob = self.gumbel_softmax(logits, temp)
# # print(feature.shape)
# # print(one_hot.shape)
# # print(next_words.shape)
# # print(values.shape)
# # print(log_probs.shape)
# # input("")
# ys = torch.cat((ys, next_words.view(batch_size, 1)), dim=1)
# values.append(value)
# all_probs.append(prob)
# gumbel_one_hots.append(one_hot)
# values = torch.stack(values,1)
# all_probs = torch.stack(all_probs,1)
# gumbel_one_hots = torch.stack(gumbel_one_hots, 1)
# return ys, values, all_probs, gumbel_one_hots
# def sample_gumbel(self, shape, eps=1e-20):
# U = torch.rand(shape).to(self.device)
# return -Variable(torch.log(-torch.log(U + eps) + eps))
# def gumbel_softmax_sample(self, logits, temperature):
# y = logits + self.sample_gumbel(logits.size())
# #the formula should be prob not logprob, I guess it still works
# return self.adaptive_softmax.log_prob(logits).exp()
# #return F.softmax(y / temperature, dim=-1)
# def gumbel_softmax(self, logits, temperature):
# """
# ST-gumple-softmax
# input: [*, n_class]
# return: flatten --> [*, n_class] an one-hot vector
# """
# y = self.gumbel_softmax_sample(logits, temperature)
# shape = y.size()
# values, ind = y.max(dim=-1)
# y_hard = torch.zeros_like(y).view(-1, shape[-1])
# y_hard.scatter_(1, ind.view(-1, 1), 1)
# y_hard = y_hard.view(*shape)
# y_hard = (y_hard - y).detach() + y
# return y_hard.view(logits.shape[0], -1), ind, values, y
# class LSTM_Normal_Encoder_Decoder(nn.Module):
# def __init__(self, hidden_dim, emb_dim, input_len, output_len, voc_size, pad_index, device, eps=1e-8, num_layers = 2):
# super().__init__()
# self.hidden_dim = hidden_dim
# self.emb_dim = emb_dim
# self.device = device
# #self.input_len = input_len
# #self.output_len = output_len
# #self.voc_size = voc_size
# #self.teacher_prob = 1.
# #self.epsilon = eps
# self.num_layers = num_layers
# #self.emb_layer = nn.Embedding(voc_size, emb_dim)
# self.disguise_embed = nn.Linear(voc_size, emb_dim)
# self.encoder = nn.LSTM(emb_dim, hidden_dim, num_layers=num_layers, batch_first=True, bidirectional=True)
# self.decoder = nn.LSTM(emb_dim, hidden_dim*2, num_layers=num_layers, batch_first=True)
# self.attention_softmax = nn.Softmax(dim=1)
# self.vocab_sz = voc_size
# self.criterion = torch.nn.AdaptiveLogSoftmaxWithLoss(hidden_dim*4, voc_size, [1000, 5000, 20000], div_value=4.0, head_bias=False)
# def forward(self, x, src_mask, max_len, start_symbol, y, mode = 'argmax', temp = 2.0):
# batch_size = x.shape[0]
# input_len = x.shape[1]
# device = x.device
# # encoder
# x_emb = self.disguise_embed(x)
# memory, (h, c) = self.encoder(x_emb)
# h = h.transpose(0, 1).contiguous()
# c = c.transpose(0, 1).contiguous()
# h = h.view(batch_size, self.num_layers, h.shape[-1]*2)
# c = c.view(batch_size, self.num_layers, c.shape[-1]*2)
# h = h.transpose(0, 1).contiguous()
# c = c.transpose(0, 1).contiguous()
# ## decoder
# out_h, out_c = (h, c)
# logits = []
# for i in range(max_len):
# ans_emb = self.disguise_embed(self._to_one_hot(y[:,i], self.vocab_sz)).view(batch_size, 1, self.emb_dim)
# out, (out_h, out_c) = self.decoder(ans_emb, (out_h, out_c))
# attention = torch.bmm(memory, out.transpose(1, 2)).view(batch_size, input_len)
# attention = self.attention_softmax(attention)
# context_vector = torch.bmm(attention.view(batch_size, 1, input_len), memory)
# logit = torch.cat((out, context_vector), -1).view(batch_size, -1)
# # if mode == 'argmax':
# # values, next_words = torch.max(log_probs, dim=-1, keepdim=True)
# # if mode == 'sample':
# # m = torch.distributions.Categorical(logits=log_probs)
# # next_words = m.sample()
# # values = m.log_prob(next_words)
# logits.append(logit)
# logits = torch.stack(logits, 1)
# _ ,loss = self.criterion(logits[:,:-1].contiguous().view(batch_size * (max_len - 1), -1), y[:,1:].contiguous().view(batch_size * (max_len-1)))
# #y from one to get rid of [CLS]
# log_argmaxs = self.criterion.predict(logits[:,:-1].contiguous().view(batch_size * (max_len - 1), -1)).view(batch_size, max_len-1)
# acc = ( log_argmaxs== y[:,1:]).float().mean()
# return loss, acc, log_argmaxs
# def _to_one_hot(self, y, n_dims):
# scatter_dim = len(y.size())
# y_tensor = y.to(self.device).long().view(*y.size(), -1)
# zeros = torch.zeros(*y.size(), n_dims).to(self.device)
# return zeros.scatter(scatter_dim, y_tensor, 1)
class Discriminator(nn.Module):
def __init__(self, transformer_encoder, hidden_dim, vocab_sz, padding_index):
super(Discriminator, self).__init__()
self.padding_index = padding_index
self.disguise_embed = nn.Linear(vocab_sz, hidden_dim)
self.transformer_encoder = transformer_encoder
self.linear = nn.Linear(self.transformer_encoder.layers[-1].size, 1)
#self.sigmoid = nn.Sigmoid()
def forward(self, x):
src_mask = (x.argmax(-1) != self.padding_index).type_as(x).unsqueeze(-2)
x = self.transformer_encoder(self.disguise_embed(x), src_mask)
score = self.linear(x)
return score
| [
"you@example.com"
] | you@example.com |
cc8c69ab62120ec4784513c836d1a7756d9b1a0d | 2814757215ea599c47817315902a1642459970df | /object-dev/student-info/two_version/step5.py | 1cdf392ac433075bcca876a89264a944d9d516a2 | [] | no_license | legolas999/Python-learning | caadf31e60b973864f365c4f27eb9589bc1cdcd2 | 1a828595bc9596e737cc997bfad1f245b3314e8b | refs/heads/master | 2020-05-15T04:11:13.328995 | 2019-06-08T16:17:04 | 2019-06-08T16:17:04 | 182,081,867 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,281 | py | #!/usr/bin/python3.6
#定义全局变量存储学生信息
student_info = []
def print_menu():
'''实现打印功能提示菜单功能'''
#1.打印功能提示菜单
print('=' * 40)
print('\t{:<40}'.format('学生信息管理系统V1.0'))
print('\t{:<40}'.format('1.查询学员信息'))
print('\t{:<40}'.format('2.增加学员信息'))
print('\t{:<40}'.format('3.修改学员信息'))
print('\t{:<40}'.format('4.删除学员信息'))
print('\t{:<40}'.format('5.显示学员信息'))
print('\t{:<40}'.format('6.保存学员信息'))
print('\t{:<40}'.format('7.退出系统'))
print('=' * 40)
def add_stu_info():
'''实现添加一个新的学生信息功能'''
global student_info
#获取用户输入的信息
new_number = input('请输入你的学号:')
new_name = input('请输入你的姓名:')
new_id = input('请输入你的身份证号码:')
new_phone = input('请输入你的电话号码:')
new_dormitory = input('请输入你的宿舍号码:')
new_addr = input('请输入你的籍贯地址:')
#定义一个新的字典,来存储新的学生信息
new_info = {}
new_info['number'] = new_number
new_info['name'] = new_name
new_info['id'] = new_id
new_info['phone'] = new_phone
new_info['dormitory'] = new_dormitory
new_info['address'] = new_addr
#将新的学生信息,添加到学生整体列表中
student_info.append(new_info)
#print(student_info) # for test
def find_stu_info():
'''实现查找学员信息功能'''
global student_info
#获取要查询的学员姓名
find_name = input('请输入要查找的学员姓名:')
find_flag = 0 #默认表示没有找到的标志
for item in student_info:
if find_name == item['name']:
print('{:<20}\t{:<20}\t{:<20}\t{:<20}\t{:<20}\t{:<20}'.format('学号','姓名','身份证','电话','宿舍','籍贯'))
print('{:<20}\t{:<20}\t{:<20}\t{:<20}\t{:<20}\t{:<20}'.format(item['number'],item['name'],item['id'],item['phone'],\
item['dormitory'],item['address']))
find_flag = 1 #表示已经找到了学员信息
break #找到后打印退出
#判断是否找到了学员信息
if find_flag == 0:
print("查无此人")
def show_stu_info():
'''实现显示所有学生信息功能'''
global student_info
print('{:<20}\t{:<20}\t{:<20}\t{:<20}\t{:<20}\t{:<20}'.format('学号','姓名','身份证','电话','宿舍','籍贯'))
for item in student_info:
print('{:<20}\t{:<20}\t{:<20}\t{:<20}\t{:<20}\t{:<20}'.format(item['number'],item['name'],item['id'],item['phone'],\
item['dormitory'],item['address']))
def save_stu_info():
'''实现将学员信息保存到文件中'''
global student_info
f = open('stu_info.data','w')
f.write(str(student_info))
f.close()
def load_stu_info():
'''实现加载学生信息功能'''
global student_info
try:
f = open('stu_info.data')
student_info = eval(f.read())
f.close()
except Exception:
pass
def modify_stu_info():
'''实现修改学生信息功能'''
global student_info
find_flag = 0
modify_name = input('请输入需要修改的学生名字:')
for item in student_info:
if modify_name == item['name']:
modify_number = input('请输入你的新的学号:')
modify_id = input('请输入你的新的身份证号码:')
modify_phone = input('请输入你的新的电话号码:')
modify_dormitory = input('请输入你的新的宿舍号码:')
modify_addr = input('请输入你的新的籍贯地址:')
item['number'] = modify_number
item['id'] = modify_id
item['phone'] = modify_phone
item['dormitory'] = modify_dormitory
item['address'] = modify_addr
find_flag = 1
break
if find_flag == 0:
print('输入的名字不正确,重新输入')
def delete_stu_info():
'''实现删除学生信息功能'''
global student_info
find_flag = 0
del_name = input('请输入要删除的学生名字:')
for item in student_info:
if del_name == item['name']:
del student_info[student_info.index(item)]
find_flag = 1
break
if find_flag == 0:
print('此学生不存在,请重新输入')
def main():
#恢复以前数据到程序中
load_stu_info()
#打印功能提示菜单
print_menu()
while True:
#2. 获取用户的输入
num = int(input('请输入操作序号:'))
#3. 根据用户的数据执行相应的功能
if num==1:
find_stu_info()
elif num==2:
add_stu_info()
elif num==3:
modify_stu_info()
elif num==4:
delete_stu_info()
elif num==5:
show_stu_info()
elif num==6:
save_stu_info()
elif num==7:
break
else:
print('输入有误,请重新输入')
print('-'*50)
print('')
if __name__ == '__main__':
main()
| [
"lqr888888@aliyun.com"
] | lqr888888@aliyun.com |
715c52973d9758a579026ef80e34afbd30905a12 | 32134ac2fa760ba7285d9bc844fa4db0be76352a | /perceptron.py | dcc4b74eaf87eec1c00054f55b18a839728e6999 | [] | no_license | NJCinnamond/NLPAssignment-1 | 8143bf8bce8de1044b757de28e2b0afce4169ce1 | 4204dcf64cf0864e6be2c5ce645f3e1ea810762f | refs/heads/master | 2020-12-31T10:45:22.029690 | 2020-02-25T01:14:43 | 2020-02-25T01:14:43 | 239,006,956 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,636 | py | """ Maximum entropy model for Assignment 1: Starter code.
You can change this code however you like. This is just for inspiration.
"""
import os
import sys
import numpy as np
from util import evaluate, load_data
from sklearn.metrics import confusion_matrix
class PerceptronModel():
""" Maximum entropy model for classification.
Attributes:
(float) weights
(float) bias
(int) num_dim
(bool) add_bias
"""
def __init__(self, label_to_index, lr=0.02):
self.W = None
self.bias = None
self.lr = lr
self.num_dim = 0
self.num_class = len(label_to_index)
self.label_to_index = label_to_index
self.index_to_label = {v: k for k, v in label_to_index.items()}
def train(self, training_data):
""" Trains the maximum entropy model.
Inputs:
training_data: Suggested type is (list of pair), where each item is
a training example represented as an (input, label) pair.
"""
self.num_dim = len(training_data[0][0])
self.num_epochs = 5
self.W = {c: np.array([0.0 for _ in range(self.num_dim)]) for c in self.label_to_index.keys()}
epoch = 0
change_over_epoch = True
while change_over_epoch and epoch < self.num_epochs:
print("Epoch: ", epoch)
epoch += 1
correct = 0
change_over_epoch = False
for sample in training_data:
#Get numerical value of label
label = sample[1]
if sample[1] not in self.label_to_index.keys():
label = self.index_to_label[0]
# Initialize arg_max value, predicted class.
arg_max, predicted_label = 0, self.index_to_label[0]
# Multi-Class Decision Rule:
for c in self.label_to_index.keys():
current_activation = np.dot(sample[0], self.W[c])
if current_activation >= arg_max:
arg_max, predicted_label = current_activation, c
# Update Rule:
if not (label == predicted_label):
change_over_epoch = True
self.W[label] += np.dot(self.lr, sample[0])
self.W[predicted_label] -= np.dot(self.lr, sample[0])
else:
correct += 1
acc = correct / len(training_data)
print("Accuracy: ", str(acc))
def predict(self, model_input):
""" Predicts a label for an input.
Inputs:
model_input (features): Input data for an example, represented as a
feature vector.
Returns:
The predicted class.
"""
# Initialize predicted label to UNK token
arg_max, predicted_label = 0, self.index_to_label[0]
# Multi-Class Decision Rule:
for c in self.label_to_index.keys():
current_activation = np.dot(model_input, self.W[c])
if current_activation >= arg_max:
arg_max, predicted_label = current_activation, c
return predicted_label
def create_dummy_bias(data):
for sample in data:
sample[0].append(1)
return data
if __name__ == "__main__":
print("Getting data")
train_data, dev_data, test_data, data_type, label_dict = load_data(sys.argv)
print("Got data")
train_data = create_dummy_bias(train_data)
dev_data = create_dummy_bias(dev_data)
test_data = create_dummy_bias(test_data)
print(len(train_data))
print(len(dev_data))
print(len(test_data))
# Train the model using the training data.
model = PerceptronModel(label_to_index=label_dict)
model.train(train_data)
# Predict on the development set.
'''
dev_accuracy = evaluate(model,
dev_data,
os.path.join("results", "perceptron_" + data_type + "_dev_predictions.csv"))
print("Dev accuracy: ", dev_accuracy)
'''
pred_label = [model.predict(example[0]) for example in dev_data]
true_label = [example[1] for example in dev_data]
conf_mat = confusion_matrix(true_label, pred_label,
labels=np.sort(np.unique(true_label)))
print(conf_mat)
print(np.sort(np.unique(true_label)))
# Predict on the test set.
# Note: We don't provide labels for test, so the returned value from this
# call shouldn't make sense.
#evaluate(model,
# test_data,
# os.path.join("results", "perceptron_" + data_type + "_test_predictions.csv"))
| [
"54274991+NJCinnamond@users.noreply.github.com"
] | 54274991+NJCinnamond@users.noreply.github.com |
342f10e5e1c17b196563987f7720df7d1de0ef8e | 1361f56a3dc2205455054d144fa30d9cebb9704f | /week-07/project/get_data.py | b6f5c0ca6d65a95f039ea83f0e9e44f705ff9f35 | [] | no_license | green-fox-academy/TemExile | 31b240f58a0d56364e3b888cd9610b176f244d5e | 040882ebb07d10c65b98cd3dc12814f10fa52dc0 | refs/heads/master | 2020-05-19T18:17:58.468807 | 2019-06-21T06:22:51 | 2019-06-21T06:22:51 | 185,149,461 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,689 | py | import requests
from bs4 import BeautifulSoup
import numpy as np
import pandas as pd
from function import get_info
city_list = {
'Bath':'116', 'Bridgwater':'212', 'Burnham-On-Sea':'251', 'Chard':'301',
'Cheddar':'306', 'Clevedon':'337', 'Crewkerne':'381',
'Frome':'536', 'Glastonbury':'551', 'Ilminster':'678', 'Minehead':'942',
'Radstock':'1109', 'Shepton+Mallet':'1198',
'Street':'1287', 'Taunton':'1317', 'Wellington':'1414', 'Wells':'1415',
'Weston-Super-Mare':'1437', 'Wincanton':'1458', 'Yeovil':'1497'
}
# 'https://www.rightmove.co.uk/house-prices/detail.html?'
# 'country=england&locationIdentifier=REGION%5E1198&'
# 'searchLocation=Shepton+Mallet&referrer=listChangeCriteria&index=0'
page_list = [x*25 for x in range(40)]
base_url = r'https://www.rightmove.co.uk/house-prices/detail.html?country=england&locationIdentifier=REGION%5E'
raw_data_list = []
for key, value in city_list.items():
for n in page_list:
url = base_url + value + r'&searchLocation=' + key + '&&referrer=listChangeCriteria&index=' + str(n)
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
data = soup.find_all('li', 'soldUnit')
for item in data:
dic = {}
result = get_info(item)
dic['Price'] = result[0]
dic['HomeType'] = result[1]
dic['HoldType'] = result[2]
dic['Type'] = result[3]
dic['SoldDate'] = result[4]
dic['Bedroom'] = result[5]
dic['areaCode'] = result[6]
dic['City'] = key
raw_data_list.append(dic)
df = pd.DataFrame(raw_data_list)
df.to_csv('Raw_data.csv') | [
"hxwengl@163.com"
] | hxwengl@163.com |
5a18ee6526a8d5b5735523e7efe503c9224f57c1 | 35631053e6c1e7d01d31c27e10388204ab59b8f2 | /Streaming Media Player/pop_up_message.py | aaa1ee198e1f91f7dbfde98f2f21ab3e38f033da | [] | no_license | vanduan/DichVuMang | 7cf442498820c6c39362cc69e1fd10b503fca704 | c569cf52265356ed67eb703f50ddc65e6ce9e846 | refs/heads/master | 2021-01-21T13:03:15.699281 | 2016-04-22T10:41:27 | 2016-04-22T10:41:27 | 55,877,543 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 900 | py | import sys
from PyQt4.QtGui import *
from PyQt4.QtCore import *
def window():
app = QApplication(sys.argv)
w = QWidget()
b = QPushButton(w)
b.setText("Show message!")
b.move(50,50)
b.clicked.connect(showdialog)
w.setWindowTitle("PyQt Dialog demo")
w.show()
sys.exit(app.exec_())
def showdialog():
msg = QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setText("This is a message box")
msg.setInformativeText("This is additional information")
msg.setWindowTitle("MessageBox demo")
msg.setDetailedText("The details are as follows:")
msg.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)
msg.buttonClicked.connect(msgbtn)
retval = msg.exec_()
print "value of pressed message box button:", retval
def msgbtn(i):
print "Button pressed is:",i.text()
if __name__ == '__main__':
window() | [
"vanduan95.dvp@gmail.com"
] | vanduan95.dvp@gmail.com |
99cd43a8c940db281d4db4d33d06b1cee795bc61 | c5291e50a3c72c885922378573a0ad423fcedf05 | /analysis/data/urls.py | e7638f31b2b04491d30e6f29d5a4d9826f2a05c3 | [] | no_license | raghurammanyam/django-projects | bcc3ed6285882af437a2995514cef33760fb063e | dd20ae354f7f111a0176a1cc047c099bd23e9f05 | refs/heads/master | 2022-12-12T19:22:31.698114 | 2018-12-09T09:41:45 | 2018-12-09T09:41:45 | 137,443,359 | 0 | 0 | null | 2022-11-22T03:01:07 | 2018-06-15T05:08:15 | Python | UTF-8 | Python | false | false | 196 | py |
from django.conf.urls import url
from django.urls import path
from .views import test,get
from django.http import HttpResponse
urlpatterns = [
url(r'^date/',test),
url(r'^get/',get)
]
| [
"manyamraghuram@gmail.com"
] | manyamraghuram@gmail.com |
f231f73dec833a474cefcee2707d8742f92f9d51 | 125bc51efb95f383257e7bdb50ae74e5dc05b7f7 | /src/belajarIntegerString.py | f28765c84ddfefc5911c0710cd851199053fcd21 | [] | no_license | frestea09/learn_ch1_python | f9688fffda5f0fa312b82bd25081b986fa0779e9 | 510ea59bf85ec024ebc473db2533e92becaefbf3 | refs/heads/master | 2020-05-26T18:22:31.171688 | 2019-05-26T05:42:08 | 2019-05-26T05:42:08 | 188,334,119 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | from __future__ import print_function
def main():
variabelNama = input('nama : ')
variabelInteger = int(input('Umur'))
print('Nama anda %s dan umur anda %d'%(variabelNama,variabelInteger))
if __name__ == "__main__":
main() | [
"ilmanfrasetya@gmail.com"
] | ilmanfrasetya@gmail.com |
8f4ca0a46c8c2f2b477ecfa59a36f08c988916bb | c220ac95ee13465d549b721700fe482ed490a2ac | /itty/__init__.py | 6363a9929b9908c6a4220c4143cefa060e4c3310 | [
"BSD-3-Clause"
] | permissive | ruthenium/itty | 66587621e1f36fc66202bf3a24509438d04b48d1 | 5d4219909c88d21af8b5548d366888accace68f6 | refs/heads/master | 2021-01-18T08:46:24.315865 | 2011-08-17T03:10:47 | 2011-08-17T03:10:47 | 2,151,905 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,591 | py | from .base import (HTTP_MAPPINGS,
Callback as _Callback,
Error as _Error,
Request,
Response,
static_file,
EnvironmentError,
Forbidden,
NotFound,
AppError,
Redirect,
App,
run_app)
APP_METHODS = { }
class Callback(_Callback):
def __get__(self, instance, owner):
if instance is None:
return self
return self._func
@classmethod
def decorator(cls, pattern):
def wrapper(func):
res = cls(pattern, func)
APP_METHODS[func.func_name] = res
return func
return wrapper
class GetCallback(Callback):
method = 'GET'
get = GetCallback.decorator
class PostCallback(Callback):
method = 'POST'
post = PostCallback.decorator
class PutCallback(Callback):
method = 'PUT'
put = PutCallback.decorator
class DeleteCallback(Callback):
method = 'DELETE'
delete = DeleteCallback.decorator
class Error(_Error):
def __get__(self, instance, owner):
if instance is None:
return self
return self._func
@classmethod
def decorator(cls, status):
def wrapper(func):
res = cls(func, status)
APP_METHODS[func.func_name] = res
return func
return wrapper
error = Error.decorator
def run_itty(host='localhost', port=8080, adapter='wsgiref'):
return run_app(type('IttyMainApplication',
(base.App, ),
APP_METHODS),
host, port, adapter)
| [
"bubucektop@gmail.com"
] | bubucektop@gmail.com |
9a0f0433298aaf2b0b0aa33f5a64b0273f639e93 | 24d8cf871b092b2d60fc85d5320e1bc761a7cbe2 | /eXe/rev2669-2722/left-trunk-2722/twisted/internet/iocpreactor/udp.py | 3bf7a5bba392de8252482bdf0e1ba0600cfe27fa | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | joliebig/featurehouse_fstmerge_examples | af1b963537839d13e834f829cf51f8ad5e6ffe76 | 1a99c1788f0eb9f1e5d8c2ced3892d00cd9449ad | refs/heads/master | 2016-09-05T10:24:50.974902 | 2013-03-28T16:28:47 | 2013-03-28T16:28:47 | 9,080,611 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 6,644 | py | import socket
from twisted.internet import interfaces, defer, error, protocol, address
from twisted.internet.abstract import isIPAddress
from twisted.persisted import styles
from twisted.python import log, failure, reflect
from ops import ReadFileOp, WriteFileOp, WSARecvFromOp, WSASendToOp
from util import StateEventMachineType
from zope.interface import implements
ERROR_PORT_UNREACHABLE = 1234
class Port(log.Logger, styles.Ephemeral, object):
__metaclass__ = StateEventMachineType
implements(interfaces.IUDPTransport)
events = ["startListening", "stopListening", "write", "readDone", "readErr", "writeDone", "writeErr", "connect"]
sockinfo = (socket.AF_INET, socket.SOCK_DGRAM, 0)
read_op_class = WSARecvFromOp
write_op_class = WSASendToOp
reading = False
_realPortNumber = None
disconnected = property(lambda self: self.state == "disconnected")
def __init__(self, bindAddress, proto, maxPacketSize=8192):
assert isinstance(proto, protocol.DatagramProtocol)
self.state = "disconnected"
from twisted.internet import reactor
self.bindAddress = bindAddress
self._connectedAddr = None
self.protocol = proto
self.maxPacketSize = maxPacketSize
self.logstr = reflect.qual(self.protocol.__class__) + " (UDP)"
self.read_op = self.read_op_class(self)
self.readbuf = reactor.AllocateReadBuffer(maxPacketSize)
self.reactor = reactor
def __repr__(self):
if self._realPortNumber is not None:
return "<%s on %s>" % (self.protocol.__class__, self._realPortNumber)
else:
return "<%s not connected>" % (self.protocol.__class__,)
def handle_listening_connect(self, host, port):
if not isIPAddress(host):
raise ValueError, "please pass only IP addresses, not domain names"
self.state = "connecting"
return defer.maybeDeferred(self._connectDone, host, port)
def handle_connecting_connect(self, host, port):
raise RuntimeError, "already connected, reconnecting is not currently supported (talk to itamar if you want this)"
handle_connected_connect = handle_connecting_connect
def _connectDone(self, host, port):
self._connectedAddr = (host, port)
self.state = "connected"
self.socket.connect((host, port))
return self._connectedAddr
def handle_disconnected_startListening(self):
self._bindSocket()
host, port = self.bindAddress
if isIPAddress(host):
return defer.maybeDeferred(self._connectSocket, host)
else:
d = self.reactor.resolve(host)
d.addCallback(self._connectSocket)
return d
def _bindSocket(self):
try:
skt = socket.socket(*self.sockinfo)
skt.bind(self.bindAddress)
except socket.error, le:
raise error.CannotListenError, (None, None, le)
self._realPortNumber = skt.getsockname()[1]
log.msg("%s starting on %s"%(self.protocol.__class__, self._realPortNumber))
self.socket = skt
def _connectSocket(self, host):
self.bindAddress = (host, self.bindAddress[1])
self.protocol.makeConnection(self)
self.startReading()
self.state = "listening"
def startReading(self):
self.reading = True
try:
self.read_op.initiateOp(self.socket.fileno(), self.readbuf)
except WindowsError, we:
log.msg("initiating read failed with args %s" % (we,))
def stopReading(self):
self.reading = False
def handle_listening_readDone(self, bytes, addr = None):
if addr:
self.protocol.datagramReceived(self.readbuf[:bytes], addr)
else:
self.protocol.datagramReceived(self.readbuf[:bytes])
if self.reading:
self.startReading()
handle_connecting_readDone = handle_listening_readDone
handle_connected_readDone = handle_listening_readDone
def handle_listening_readErr(self, ret, bytes):
log.msg("read failed with err %s" % (ret,))
if ret == 1234: # ERROR_PORT_UNREACHABLE
self.protocol.connectionRefused()
if self.reading:
self.startReading()
handle_connecting_readErr = handle_listening_readErr
handle_connected_readErr = handle_listening_readErr
def handle_disconnected_readErr(self, ret, bytes):
pass # no kicking the dead horse
def handle_disconnected_readDone(self, bytes, addr = None):
pass # no kicking the dead horse
def handle_listening_write(self, data, addr):
self.performWrite(data, addr)
def handle_connected_write(self, data, addr = None):
assert addr in (None, self._connectedAddr)
self.performWrite(data, addr)
def performWrite(self, data, addr = None):
self.writing = True
try:
write_op = self.write_op_class(self)
if not addr:
addr = self._connectedAddr
write_op.initiateOp(self.socket.fileno(), data, addr)
except WindowsError, we:
log.msg("initiating write failed with args %s" % (we,))
def handle_listening_writeDone(self, bytes):
log.msg("write success with bytes %s" % (bytes,))
handle_connecting_writeDone = handle_listening_writeDone
handle_connected_writeDone = handle_listening_writeDone
def handle_listening_writeErr(self, ret, bytes):
log.msg("write failed with err %s" % (ret,))
if ret == ERROR_PORT_UNREACHABLE:
self.protocol.connectionRefused()
handle_connecting_writeErr = handle_listening_writeErr
handle_connected_writeErr = handle_listening_writeErr
def handle_disconnected_writeErr(self, ret, bytes):
pass # no kicking the dead horse
def handle_disconnected_writeDone(self, bytes):
pass # no kicking the dead horse
def writeSequence(self, seq, addr):
self.write("".join(seq), addr)
def handle_listening_stopListening(self):
self.stopReading()
self.connectionLost()
handle_connecting_stopListening = handle_listening_stopListening
handle_connected_stopListening = handle_listening_stopListening
def connectionLost(self, reason=None):
log.msg('(Port %s Closed)' % self._realPortNumber)
self._realPortNumber = None
self.protocol.doStop()
self.socket.close()
del self.socket
self.state = "disconnected"
def logPrefix(self):
return self.logstr
def getHost(self):
return address.IPv4Address('UDP', *(self.socket.getsockname() + ('INET_UDP',)))
| [
"joliebig@fim.uni-passau.de"
] | joliebig@fim.uni-passau.de |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.