blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
19a873e4e3896df4714cebbd65d8a78cd02da923 | 773aef0de494fde01ea5a444b0cfdf57deb88b10 | /puchowebapp/urls.py | 925ea082cf610643223dc59a8d2e26160968a8dc | [] | no_license | gk90731/Pucho_Web | 44c509f92950dc7f35cd5dfd6cf3e42fb6b2d720 | 041239934cd9303120e67d613b2ae90f23c17f20 | refs/heads/master | 2022-12-10T19:47:43.400760 | 2020-04-04T14:28:29 | 2020-04-04T14:28:29 | 253,017,818 | 0 | 0 | null | 2022-12-08T03:59:01 | 2020-04-04T14:26:52 | HTML | UTF-8 | Python | false | false | 383 | py | from django.urls import path,include
from . import views
urlpatterns = [
path('',views.index ,name="home"),
path('what_we_do/',views.what_we_do ,name="what_we_do"),
path('about/',views.about ,name="about"),
path('protfolio/',views.protfolio ,name="protfolio"),
path('gallery/',views.gallery ,name="gallery"),
path('contact/',views.contact ,name="contact"),
]
| [
"gk90731@gmail.com"
] | gk90731@gmail.com |
f1cac7d37380b930cbd540fd2d90c8714420e176 | 1a53448d510a2b348f013eaee020a5075bb99652 | /learning_users/basic_app/migrations/0001_initial.py | 0903addc62c58340f2442566bec4dd7855e0eea8 | [] | no_license | bluedragon0/django-deployment-example | 876085158e95a8d8f2bebcb13561e67a94cb45cb | 1420f5cb623f31f6211b62ebd84f4c44642b6d67 | refs/heads/master | 2021-05-13T23:23:52.839853 | 2018-01-06T18:01:26 | 2018-01-06T18:01:26 | 116,513,973 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 910 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-01-04 12:48
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserProfileInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('portfolio_site', models.URLField(blank=True)),
('profile_pic', models.ImageField(blank=True, upload_to='profile_pics')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"eng.abdodragon653@gmail.com"
] | eng.abdodragon653@gmail.com |
40739e13d7c1efdb70cc5b1f49df8dea03cd6bdb | c71d6c6699c535011569dadb41a7bb7f904793c6 | /inference_top10.py | 4c88faab5ded0ae9927ffddfaafcc955cee5d671 | [] | no_license | Pyligent/Fashion-Image-Text-Multimodal-retrieval | 6a195c0a02264962f5c7cf837cd26d8587e9021b | 64b8d02a788e3fcf50d242408b3303bde9cc7256 | refs/heads/master | 2021-05-25T17:22:19.863240 | 2020-04-07T18:23:44 | 2020-04-07T18:23:44 | 253,841,451 | 12 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,566 | py | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
import numpy as np
import torch, torchvision
from tqdm import tqdm as tqdm
import PIL
import skimage.io
import datasets
import img_text_composition_models
from matplotlib.pyplot import figure, imshow, axis
from matplotlib.image import imread
def infer_top10(opt, model, testset):
"""Tests a model over the given testset."""
model.eval()
test_queries = testset.get_test_queries()
all_imgs = []
all_captions = []
all_queries = []
all_target_captions = []
if test_queries:
# compute test query features
imgs = []
mods = []
for t in tqdm(test_queries):
imgs += [testset.get_img(t['source_img_id'])]
mods += [t['mod']['str']]
if len(imgs) >= opt.batch_size or t is test_queries[-1]:
if 'torch' not in str(type(imgs[0])):
imgs = [torch.from_numpy(d).float() for d in imgs]
imgs = torch.stack(imgs).float()
imgs = torch.autograd.Variable(imgs).cuda()
mods = [t for t in mods]
f = model.compose_img_text(imgs, mods).data.cpu().numpy()
all_queries += [f]
imgs = []
mods = []
all_queries = np.concatenate(all_queries)
all_target_captions = [t['target_caption'] for t in test_queries]
# compute all image features
imgs = []
for i in tqdm(range(len(testset.imgs))):
imgs += [testset.get_img(i)]
if len(imgs) >= opt.batch_size or i == len(testset.imgs) - 1:
if 'torch' not in str(type(imgs[0])):
imgs = [torch.from_numpy(d).float() for d in imgs]
imgs = torch.stack(imgs).float()
imgs = torch.autograd.Variable(imgs).cuda()
imgs = model.extract_img_feature(imgs).data.cpu().numpy()
all_imgs += [imgs]
imgs = []
all_imgs = np.concatenate(all_imgs)
all_captions = [img['captions'][0] for img in testset.imgs]
else:
# use training queries to approximate training retrieval performance
imgs0 = []
imgs = []
mods = []
for i in range(10000):
item = testset[i]
imgs += [item['source_img_data']]
mods += [item['mod']['str']]
if len(imgs) > opt.batch_size or i == 9999:
imgs = torch.stack(imgs).float()
imgs = torch.autograd.Variable(imgs)
mods = [t for t in mods]
f = model.compose_img_text(imgs.cuda(), mods).data.cpu().numpy()
all_queries += [f]
imgs = []
mods = []
imgs0 += [item['target_img_data']]
if len(imgs0) > opt.batch_size or i == 9999:
imgs0 = torch.stack(imgs0).float()
imgs0 = torch.autograd.Variable(imgs0)
imgs0 = model.extract_img_feature(imgs0.cuda()).data.cpu().numpy()
all_imgs += [imgs0]
imgs0 = []
all_captions += [item['target_caption']]
all_target_captions += [item['target_caption']]
all_imgs = np.concatenate(all_imgs)
all_queries = np.concatenate(all_queries)
# feature normalization
for i in range(all_queries.shape[0]):
all_queries[i, :] /= np.linalg.norm(all_queries[i, :])
for i in range(all_imgs.shape[0]):
all_imgs[i, :] /= np.linalg.norm(all_imgs[i, :])
# match test queries to target images, get nearest neighbors
sims = all_queries.dot(all_imgs.T)
if test_queries:
for i, t in enumerate(test_queries):
sims[i, t['source_img_id']] = -10e10 # remove query image
nn_result = [np.argsort(-sims[i, :])[:10] for i in range(sims.shape[0])]
nn_result1 = nn_result
# compute recalls
out = []
nn_result = [[all_captions[nn] for nn in nns] for nns in nn_result]
for k in [1, 5, 10]:
r = 0.0
for i, nns in enumerate(nn_result):
if all_target_captions[i] in nns[:k]:
r += 1
r /= len(nn_result)
out += [('recall_top' + str(k) + '_correct_composition', r)]
if opt.dataset != 'fashion200k':
r = 0.0
for i, nns in enumerate(nn_result):
if all_target_captions[i].split()[0] in [c.split()[0] for c in nns[:k]]:
r += 1
r /= len(nn_result)
out += [('recall_top' + str(k) + '_correct_adj', r)]
r = 0.0
for i, nns in enumerate(nn_result):
if all_target_captions[i].split()[1] in [c.split()[1] for c in nns[:k]]:
r += 1
r /= len(nn_result)
out += [('recall_top' + str(k) + '_correct_noun', r)]
return nn_result1,nn_result,out
def show_source(path,source_id):
idx = test_queries[source_id]['source_img_id']
img_path = testset.imgs[idx]['file_path']
img_path = path + '/' +img_path
pil_im = Image.open(img_path, 'r')
print('Product Attributes: ',test_queries[source_id]['source_caption'])
print('Query: ',test_queries[source_id]['mod']['str'])
imshow(np.asarray(pil_im))
def show_results(path,result_id):
img_path = testset.imgs[result_id]['file_path']
img_path = path + '/' +img_path
pil_im = Image.open(img_path, 'r')
print('Product Attributes: ',testset.imgs[result_id]['captions'])
imshow(np.asarray(pil_im))
def get_result(array_list):
result_files = []
for i in range(len(array_list)):
img_path = testset.imgs[array_list[i]]['file_path']
img_path = path + '/' +img_path
result_files += [img_path]
return result_files
def show_result_all(result_files):
fig = figure(figsize = (25,25))
number_of_files = len(result_files)
for i in range(number_of_files):
a=fig.add_subplot(1,number_of_files,i+1)
image = imread(result_files[i])
imshow(image)
axis('off')
| [
"noreply@github.com"
] | noreply@github.com |
c0cf0962495662ae563a1a6b07d1ec6c2b8f5619 | 255e19ddc1bcde0d3d4fe70e01cec9bb724979c9 | /all-gists/9549174/snippet.py | 312f68f442a31f1ee8acc642c7594905cdeb8ac0 | [
"MIT"
] | permissive | gistable/gistable | 26c1e909928ec463026811f69b61619b62f14721 | 665d39a2bd82543d5196555f0801ef8fd4a3ee48 | refs/heads/master | 2023-02-17T21:33:55.558398 | 2023-02-11T18:20:10 | 2023-02-11T18:20:10 | 119,861,038 | 76 | 19 | null | 2020-07-26T03:14:55 | 2018-02-01T16:19:24 | Python | UTF-8 | Python | false | false | 3,652 | py | import random
import sys
def print_grid(grid):
print ("\n%s\n" % "+".join([('-' * 4)] * 4)).join(
["|".join(["%4d" % item if item > 0 else " " * 4 for item in line]) for line in grid])
def get_available_cells(grid):
return [(y, x) for y in range(4) for x in range(4) if not grid[y][x]]
def insert_new_item(grid):
available_cells = get_available_cells(grid)
if len(available_cells) == 0:
return False
y, x = random.choice(available_cells)
grid[y][x] = 2 if random.random() < 0.9 else 4
return True
def is_legal_position(y, x):
return 0 <= y <= 3 and 0 <= x <= 3
def get_next_position(y, x, (y_offset, x_offset)):
return y + y_offset, x + x_offset
def get_next_nonzero_cell(grid, y, x, (y_offset, x_offset)):
next_y, next_x = get_next_position(y, x, (y_offset, x_offset))
if is_legal_position(next_y, next_x):
if grid[next_y][next_x]:
return next_y, next_x
else:
return get_next_nonzero_cell(grid, next_y, next_x, (y_offset, x_offset))
else:
return None, None
def merge_cells(grid, (write_y, write_x), (read_y, read_x), direction, virtual, winning=False):
if (write_y, write_x) == (read_y, read_x):
read_y, read_x = get_next_nonzero_cell(grid, read_y, read_x, direction)
if not is_legal_position(write_y, write_x) or not is_legal_position(read_y, read_x):
return winning if not virtual else False
if grid[write_y][write_x]:
if grid[read_y][read_x] == grid[write_y][write_x]:
if virtual:
return True
grid[write_y][write_x] *= 2
grid[read_y][read_x] = 0
return merge_cells(grid, get_next_position(write_y, write_x, direction),
get_next_nonzero_cell(grid, read_y, read_x, direction), direction, virtual,
winning or grid[write_y][write_x] > 1024)
else:
return merge_cells(grid, get_next_position(write_y, write_x, direction),
(read_y, read_x), direction, virtual, winning)
else:
if virtual:
return True
grid[write_y][write_x] = grid[read_y][read_x]
grid[read_y][read_x] = 0
return merge_cells(grid, (write_y, write_x),
get_next_nonzero_cell(grid, read_y, read_x, direction), direction, virtual, winning)
def get_movable_directions(grid):
return [direction for direction in ["a", "d", "w", "s"] if move(grid, direction, True)]
def move(grid, direction, virtual):
if direction == "a": #left
return any([merge_cells(grid, (i, 0), (i, 0), (0, 1), virtual) for i in range(4)])
elif direction == "d": #right
return any([merge_cells(grid, (i, 3), (i, 3), (0, -1), virtual) for i in range(4)])
elif direction == "w": #up
return any([merge_cells(grid, (0, i), (0, i), (1, 0), virtual) for i in range(4)])
elif direction == "s": #down
return any([merge_cells(grid, (3, i), (3, i), (-1, 0), virtual) for i in range(4)])
grid = [[0 for x in range(4)] for y in range(4)]
insert_new_item(grid)
while True:
insert_new_item(grid)
print_grid(grid)
movable_directions = get_movable_directions(grid)
if len(movable_directions) == 0:
print "You lose!"
break
direction_name = sys.stdin.readline().strip().lower()
while direction_name not in movable_directions:
print "Invalid direction."
direction_name = sys.stdin.readline().strip().lower()
if move(grid, direction_name, False):
print_grid(grid)
print "You win!"
break | [
"gistshub@gmail.com"
] | gistshub@gmail.com |
94cc000a99b3c4cdc30e64ea23c2f5594a106539 | 3c32a798c0a3b4872c69a5ea08ed1132b337c5a9 | /max_api/max_api/utils.py | ea08cc429f08f4caae052f8879746c80da4f1356 | [] | no_license | Flushot/max_test | 21e2bf36db354a092859a9da5b6480c10e98b33a | ec3bd6579580ba6b50c36d2a4ab24b2c92712eaa | refs/heads/master | 2020-09-15T12:54:12.630179 | 2017-06-16T18:43:49 | 2017-06-16T18:43:49 | 94,478,844 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,025 | py | from __future__ import absolute_import, division, print_function, unicode_literals
from max_api import cache, config
def transform_artist(artist):
"""
Transform Spotify artist record into MAX artist record.
:param artist: artist record to transform.
:return: transformed artist record (dict).
"""
record = {k: artist.get(k) for k in ('name', 'id', 'uri', 'genres', 'popularity', 'images')}
if 'images' in artist and len(artist['images']) > 0:
record['image'] = artist['images'][0]['url'] # Simplification: Just use first image
return record
def get_artist(spotify, artist_id):
"""
Get an artist record from cache.
On cache miss, will fetch and cache record from Spotify API.
:param spotify: Spotify client.
:param artist_id: artist ID to get.
:return: artist record.
"""
artist = cache.get_artist(artist_id)
if not artist:
# Cache miss
artist = spotify.artist(artist_id)
cache.put_artist(artist)
return artist
| [
"flushot@gmail.com"
] | flushot@gmail.com |
7095dbaf424df3e7cbf035214bbf5420c6a56b5a | 9f7e29502e9d964eb83297dd11e7d6d74ccc34ce | /code/bad_class_classifier.py | 70eaae263fa52670426b0ed78a35625f904d3d84 | [] | no_license | ash567/ml_contest | f31ad655312c839ecd6d5cd3d5e7cad514b13661 | d071c50ab32ba6b08bc533a6c3eacd36ea186a96 | refs/heads/master | 2021-01-19T17:26:38.138985 | 2017-08-22T11:56:06 | 2017-08-22T11:56:06 | 101,057,404 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,343 | py | from get_data import *
import numpy as np
from sklearn import metrics
from sklearn import cross_validation
from sklearn import preprocessing
from sklearn import cross_validation
from sklearn import naive_bayes
# from sklearn.neighbors import NearestCentroid
from sklearn import datasets
from sklearn.semi_supervised import LabelPropagation
distinct = 100
data = getData()
dataX = data[:, :-1]
dataY = data[:, -1]
# change the kernel accordingly
# clf = LabelPropagation()
# scaler = preprocessing.MinMaxScaler(feature_range = (-1, 1))
# scaler.fit(trainX)
# trainX = scaler.transform(trainX)
# clf = naive_bayes.MultinomialNB(alpha = 0.001)
# clf = naive_bayes.BernoulliNB()
# clf = NearestCentroid()
# val = cross_validation.cross_val_score(clf, trainX, trainY, scoring = 'f1_macro', cv = 5, n_jobs = -2, verbose = 3)
# let all the other classes be named as the 100
# let list of the classes to be trained on
bad_classes = [0, 4, 10, 27, 28, 31, 33, 34, 38, 40, 46, 54, 69, 70, 73, 77, 86, 88, 98]
# high = [dataY == 67]
# low = [dataY != 67]
# highData = data[high]
# lowData = data[low]
# count = np.zeros((distinct,1))
# for i in range(len(dataY)):
# count[int(dataY[i])] = count[int(dataY[i])] + 1
# class_count = []
# for i in range(len(count)):
# class_count.append(( int(count[i]), i))
# class_count.sort()
# for a in class_count:
# print a
# print class_count
# count.sort()
# print count
# print count.shape
# print sum(dataY == 100)
from sklearn import svm
for i in xrange(len(dataY)):
if dataY[i] not in bad_classes:
dataY[i] = 100
clf = svm.SVC(class_weight = 'auto', cache_size = 2000, C =.01)
stratSplit = cross_validation.StratifiedKFold(dataY, n_folds = 5, shuffle = True)
i = 0
for train_index, test_index in stratSplit:
trainXX = dataX[train_index]
testXX = dataX[test_index]
scaler = preprocessing.MinMaxScaler(feature_range = (-1, 1))
scaler.fit(trainXX)
trainXX = scaler.transform(trainXX)
testXX = scaler.transform(testXX)
trainYY = dataY[train_index]
testYY = dataY[test_index]
clf.fit(trainXX, trainYY)
predYY = clf.predict(testXX)
i = i + 1
print 'For %d fold the results are as follows:' %(i)
print metrics.classification_report(testYY, predYY)
print "\n"
| [
"ishugarg567@gmail.com"
] | ishugarg567@gmail.com |
ee483b5ae0918e738f3aea8d490f8fcc70edf57a | f179d37ae430e5c338de156a0fcff563532ef12e | /test.py | 1e076c97108bd7293eaf7d9c99f95d11860a4285 | [] | no_license | LLaner/testpycharm | dc20b8b0c253b7fc1a34bef6efb26ace2709357a | eb6d5f34587958402f9fb658ed5bef0df834a864 | refs/heads/master | 2020-03-31T12:52:28.341975 | 2018-10-10T02:26:17 | 2018-10-10T02:26:17 | 152,232,692 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 48 | py | class Animal(object):
pass
dog = Animal()
| [
"wangwenqin@jd.com"
] | wangwenqin@jd.com |
d0b8df90a505c6ce70739548052cf57d31f3c545 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/422/usersdata/328/89006/submittedfiles/lecker.py | 646555fee493b246ee37fbf271bb339645a2e877 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | n1=int(input('n1:'))
n2=int(input('n2:'))
n3=int(input('n3:'))
n4=int(input('n4:'))
if n1 >n2 and n4<n3:
print('S')
elif n2 >n1> n3 and n4<n3 :
print('S')
elif n3>n4>n2 and n1<n2:
print('S')
elif n4>n3 :
print('S')
else:
print('N') | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
11e4459bd5cac23d61d48b48aa304483301f533e | a442742df11b6781cc61fb91ebf5713d137cd703 | /sine/pendulum.py | cff5d3f8ad610b29c4da30fe60f9b54f36f13007 | [] | no_license | ianflitman/combinatoria | 4291f467907fbff4e9d533969f969488d4d32797 | f29921b6bb404ae1290845560ff85250171f48aa | refs/heads/master | 2016-09-05T19:27:30.728833 | 2015-09-02T20:43:19 | 2015-09-02T20:43:19 | 26,325,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,341 | py | __author__ = 'ian'
# Double pendulum formula translated from the C code at
# http://www.physics.usyd.edu.au/~wheat/dpend_html/solve_dpend.c
from numpy import sin, cos, pi, array
import numpy as np
import matplotlib.pyplot as plt
import scipy.integrate as integrate
import matplotlib.animation as animation
G = 9.8 # acceleration due to gravity, in m/s^2
L1 = 1.0 # length of pendulum 1 in m
L2 = 1.0 # length of pendulum 2 in m
M1 = 1.0 # mass of pendulum 1 in kg
M2 = 1.0 # mass of pendulum 2 in kg
def derivs(state, t):
dydx = np.zeros_like(state)
dydx[0] = state[1]
del_ = state[2]-state[0]
den1 = (M1+M2)*L1 - M2*L1*cos(del_)*cos(del_)
dydx[1] = (M2*L1*state[1]*state[1]*sin(del_)*cos(del_)
+ M2*G*sin(state[2])*cos(del_) + M2*L2*state[3]*state[3]*sin(del_)
- (M1+M2)*G*sin(state[0]))/den1
dydx[2] = state[3]
den2 = (L2/L1)*den1
dydx[3] = (-M2*L2*state[3]*state[3]*sin(del_)*cos(del_)
+ (M1+M2)*G*sin(state[0])*cos(del_)
- (M1+M2)*L1*state[1]*state[1]*sin(del_)
- (M1+M2)*G*sin(state[2]))/den2
return dydx
# create a time array from 0..100 sampled at 0.05 second steps
dt = 0.05
t = np.arange(0.0, 20, dt)
# th1 and th2 are the initial angles (degrees)
# w10 and w20 are the initial angular velocities (degrees per second)
th1 = 120.0
w1 = 0.0
th2 = -10.0
w2 = 0.0
rad = pi/180
# initial state
state = np.array([th1, w1, th2, w2])*pi/180.
# integrate your ODE using scipy.integrate.
y = integrate.odeint(derivs, state, t)
x1 = L1*sin(y[:,0])
y1 = -L1*cos(y[:,0])
x2 = L2*sin(y[:,2]) + x1
y2 = -L2*cos(y[:,2]) + y1
fig = plt.figure()
ax = fig.add_subplot(111, autoscale_on=False, xlim=(-2, 2), ylim=(-2, 2))
ax.grid()
line, = ax.plot([], [], 'o-', lw=2)
time_template = 'time = %.1fs'
time_text = ax.text(0.05, 0.9, '', transform=ax.transAxes)
def init():
line.set_data([], [])
time_text.set_text('')
return line, time_text
def animate(i):
thisx = [0, x1[i], x2[i]]
thisy = [0, y1[i], y2[i]]
line.set_data(thisx, thisy)
time_text.set_text(time_template%(i*dt))
return line, time_text
ani = animation.FuncAnimation(fig, animate, np.arange(1, len(y)),
interval=25, blit=True, init_func=init)
#ani.save('double_pendulum.mp4', fps=15)
plt.show() | [
"ianflitman@gmail.com"
] | ianflitman@gmail.com |
a47b22ebabdf38d2e34e8f8243241fedfd974f97 | fc90d2109094b025fc4432a1a0e4c5de35e61fbe | /wall.py | 9eb52ac76dda92b712ad9bf89133c0be9334db33 | [
"MIT"
] | permissive | mostafa-elhaiany/blobVolleyBallAi | 969980b975f4be9fb7743dbe058a96c2c933816f | c25e6c042d262c7f6c48b8078a4f85b1f482a83c | refs/heads/master | 2022-03-25T14:45:59.999535 | 2020-01-01T19:14:07 | 2020-01-01T19:14:07 | 231,140,026 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 967 | py | import pygame
import os
WALL_WIDTH=50
WALL_HEIGHT=250
wallImage= pygame.transform.scale( pygame.image.load( os.path.join( "imgs","wall.png" ) ),(WALL_WIDTH,WALL_HEIGHT) )
class Wall:
def __init__(self,x,y):
self.x=x
self.width=WALL_WIDTH
self.y=y
self.height=WALL_HEIGHT
self.image = wallImage
self.passed=False
def draw(self, window):
window.blit(self.image,(self.x,self.y))
def getMask(self):
return pygame.mask.from_surface(self.image)
def collide(self,collider):
colliderMask=collider.getMask()
myMask= self.getMask()
offset = ( round(self.x-collider.x) , round(self.y - collider.y)-collider.height )
point=colliderMask.overlap(myMask,offset)
if(point):
collider.bounce(point[0],0,0)
print('collided')
return True
return False
| [
"mostafa_elhaiany@outlook.com"
] | mostafa_elhaiany@outlook.com |
87481f971aab378f0cea55dabcddcedecfdce3f5 | 4c704c60dcd8bba658f4e0cdc85f299c01f2058e | /002/for1.py | 9727ffbd6c0fcc9cbb45013575fc2759408bb8fa | [] | no_license | steveq1/py2016 | acd6c80595637fb3be7f1f3378bbdca8d2dcf8cc | fb9b2708d49790efe03d84315442d7e93a7cc6d6 | refs/heads/master | 2021-01-17T13:00:25.787387 | 2016-07-18T16:28:07 | 2016-07-18T16:28:07 | 63,125,247 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | for x in range(0,10):
if x >=3:
is_break = False
break
print('x={0}'.format(x))
if is_break:
break
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
91f683f5ae10fa0d17fac5d8d2ed8efc7e5b63a8 | fc1c1e88a191b47f745625688d33555901fd8e9a | /meraki_sdk/models/universal_search_knowledge_base_search_enum.py | eeb90cdd62bbd16c19b2fcca21e1750437564fb5 | [
"MIT",
"Python-2.0"
] | permissive | RaulCatalano/meraki-python-sdk | 9161673cfd715d147e0a6ddb556d9c9913e06580 | 9894089eb013318243ae48869cc5130eb37f80c0 | refs/heads/master | 2022-04-02T08:36:03.907147 | 2020-02-03T19:24:04 | 2020-02-03T19:24:04 | 416,889,849 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 821 | py | # -*- coding: utf-8 -*-
"""
meraki_sdk
This file was automatically generated for meraki by APIMATIC v2.0 ( https://apimatic.io ).
"""
class UniversalSearchKnowledgeBaseSearchEnum(object):
"""Implementation of the 'UniversalSearchKnowledgeBaseSearch' enum.
The universal search box always visible on Dashboard will, by default,
present results from the Meraki KB. This configures
whether these Meraki KB results should be returned. Can be one of
'default or inherit', 'hide' or 'show'.
Attributes:
ENUM_DEFAULT OR INHERIT: TODO: type description here.
HIDE: TODO: type description here.
SHOW: TODO: type description here.
"""
ENUM_DEFAULT_OR_INHERIT = 'default or inherit'
HIDE = 'hide'
SHOW = 'show'
| [
"api-pm@meraki.com"
] | api-pm@meraki.com |
28434428c5ffef0e0b6e89f84912783421e0ee76 | 2f9891bc21d9f06c1460473f5b3dea4a0f8ac976 | /_emailSystem/delete.py | 53c19e4031cd6e8da19cdcf169a3f317f7b7b70e | [] | no_license | ajays8273/MailingSystem | 59962fb3fa5a0767ae1d19f557ae33d3e0da8935 | 9be6f40e18bd78711cfc5654bbd73b1f6d9e3ff7 | refs/heads/master | 2021-01-10T14:54:05.514278 | 2015-10-01T18:30:38 | 2015-10-01T18:30:38 | 43,513,324 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 526 | py | #!/usr/bin/python
import time;
import cgi, cgitb
import os
from os import environ
import Cookie
import hashlib
from sql import *
ldb = sqldb();
unique = os.environ.get('QUERY_STRING')
unique_id = str(unique)[5:]
#print unique_id
values = str(unique_id).split('@');
#print values
to = values[0]
from_ = values[1]
time = values[2]
time = time.replace('%20',' ')
#print to , from_ , time
ldb.change_folder_email(to, from_, time, "r" , "delete")
print 'Content-type:text/html\r\n'+'Location: %s' % "inbox.py"
print "\r\n\r\n" | [
"ajaypal1iitg@gmail.com"
] | ajaypal1iitg@gmail.com |
b3a1e79a93921130ab94d9a78260910679d0c7d1 | 769b34b86f385789cce0e192317d15a847c92bca | /ml_little_elephant/py2/4.7.Benford.py | 510a653276c288120eb5432aa0fd1aef7b86d69b | [] | no_license | Blueyonder00/machine-learning2 | 8c08e6bae5cca54c205bdd7cb6db875b063caffe | cf009003e0c8181dcdadc0ea063e276309d4d642 | refs/heads/master | 2021-07-09T23:11:22.946659 | 2017-10-11T02:33:41 | 2017-10-11T02:33:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,207 | py | # -*- coding:utf-8 -*-
# /usr/bin/python
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from time import time
from scipy.special import factorial
import math
mpl.rcParams['axes.unicode_minus'] = False
mpl.rcParams['font.sans-serif'] = 'SimHei'
def top1(number, a):
number /= a
while number >= 10:
number /= 10
a *= 10
return number, a
def top2(number, N2):
while number >= N2:
number /= 10
n = number
while number >= 10:
number /= 10
return n, number
def top3(number):
number -= int(number)
return int(10 ** number)
def top4(number):
number -= int(number)
frequency[int(10 ** number) - 1] += 1
if __name__ == '__main__':
N = 1000000
x = range(1, N+1)
frequency = np.zeros(9, dtype=np.int)
f = 1
print '开始计算...'
t0 = time()
# top1
# a = 1
# for t in x:
# f *= t
# i, a = top1(f, a)
# # print t, i, f, a
# frequency[i-1] += 1
# top2
# N2 = N ** 3
# for t in x:
# f *= t
# f, i = top2(f, N2)
# frequency[i-1] += 1
# Top 3:实现1
# f = 0
# for t in x:
# f += math.log10(t)
# frequency[top3(f) - 1] += 1
# Top 3:实现2
# y = np.cumsum(np.log10(x))
# for t in y:
# frequency[top3(t) - 1] += 1
# Top 4:本质与Top3相同
y = np.cumsum(np.log10(x))
map(top4, y)
t1 = time()
print '耗时:', t1 - t0
print frequency
plt.figure(facecolor='w')
t = np.arange(1, 10)
plt.plot(t, frequency, 'r-', t, frequency, 'go', lw=2, markersize=8)
for x,y in enumerate(frequency):
plt.text(x+1.1, y, frequency[x], verticalalignment='top', fontsize=15)
plt.title(u'%d!首位数字出现频率' % N, fontsize=18)
plt.xlim(0.5, 9.5)
plt.ylim(0, max(frequency)*1.03)
plt.grid(b=True)
plt.show()
# 使用numpy
# N = 170
# x = np.arange(1, N+1)
# f = np.zeros(9, dtype=np.int)
# t1 = time()
# y = factorial(x, exact=False)
# z = map(top, y)
# t2 = time()
# print '耗时 = \t', t2 - t1
# for t in z:
# f[t-1] += 1
# print f
| [
"yc2739@nyu.edu"
] | yc2739@nyu.edu |
b23e293d98f34c501f22bf53408b9ce36345b1a5 | 3ee5bf329a2e58eb9f775ec5ee6a329fd3541e36 | /tests/CrawlerProcess/twisted_reactor_poll.py | b2ca046725745e77cc901ca338b3681caa6663e4 | [
"BSD-3-Clause"
] | permissive | scrapy/scrapy | 53bd79e500e2cb7441d33bfd61ba003962d5fb46 | cddb8c15d66831dc4e1bc4b745fcc6c534bb03dc | refs/heads/master | 2023-08-31T04:08:06.193342 | 2023-08-30T18:29:54 | 2023-08-30T18:29:54 | 529,502 | 47,472 | 12,120 | BSD-3-Clause | 2023-09-14T12:08:07 | 2010-02-22T02:01:14 | Python | UTF-8 | Python | false | false | 295 | py | import scrapy
from scrapy.crawler import CrawlerProcess
class PollReactorSpider(scrapy.Spider):
name = "poll_reactor"
process = CrawlerProcess(
settings={
"TWISTED_REACTOR": "twisted.internet.pollreactor.PollReactor",
}
)
process.crawl(PollReactorSpider)
process.start()
| [
"noreply@github.com"
] | noreply@github.com |
d355432f129370a16a64f113b98baaaa5509eb2f | 6162da15532484d6579c45691b2f1779ced5352f | /standardize_data.py | 49e9478c765853dc611fa51e13991a2b647b0c6e | [] | no_license | SyedTanzimAlam/DS_Python | d20dc6e65a2605fecdcff68bf17198dbc5749b72 | 5b339aef6383098f1929e8f00eeb39f1115530ed | refs/heads/master | 2020-03-18T00:31:23.863405 | 2018-06-06T06:38:53 | 2018-06-06T06:38:53 | 134,098,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 535 | py | """@author: Tanzim"""
# Standardize data (0 mean, 1 stdev)
import pandas as pd
filename = "PUT THE .csv file"
colnames = ['Column names in quotes seperated by comma']
dataset = pd.read_csv(filename, names=colnames).values
# separate array into input and output components
X = dataset[:,0:8] # rows:columns
Y = dataset[:,8]
# Add standard fit
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler().fit_transform(X)
# summarize transformed data
import numpy as np
np.set_printoptions(precision=3)
print(scaler[0:5,:]) | [
"syedtanzimalam88@yahoo.com"
] | syedtanzimalam88@yahoo.com |
d3df2a9c6e7610fa9979b874da473c7708eec89a | b5cff0ace71ed81fc14735fb9ba63792fa6c2baa | /com/zk/demo/pydemo13_range.py | 684555576bc2a65df2eae269cc3c0b574f8cd406 | [] | no_license | xuantian868/pythonDemo | fc7f7d1b2f44de8ce6bbe53a823b96453b40760d | 591c44249e4acbfd437f1e5f9db72e180e7358f9 | refs/heads/master | 2021-03-16T03:31:55.116136 | 2017-06-21T00:52:36 | 2017-06-21T00:52:36 | 91,522,416 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,368 | py | # -*- coding: utf-8 -*-
'''
Created on 2017-5-17
列表生成式 列表生成式即List Comprehensions,是Python内置的非常简单却强大的可以用来创建list的生成式
@author: Administrator
'''
L = list(range(10)) #[0,9]
print (L)
L = list(range(1,11)) #[1,10]
print (L)
#但如果要生成[1x1, 2x2, 3x3, ..., 10x10]怎么做?方法一是循环
L = []
for i in range(1,11):
L.append(i*i)
print (L)
#但是循环太繁琐,而列表生成式则可以用一行语句代替循环生成上面的list
'''写列表生成式时,把要生成的元素x * x放到前面,后面跟for循环,就可以把list创建出来,十分有用'''
L = [x*x for x in range(1,11)]
print (L)
#for循环后面还可以加上if判断,这样我们就可以筛选出仅偶数的平方:
L = [x*x for x in range(1,11) if x%2==0]
print (L)
#还可以使用两层循环,可以生成全排列:
L = [a+b for a in 'abc' for b in 'xyz']
print (L)
d = {'x': 'A', 'y': 'B', 'z': 'C' }
L = [k +'='+v for k,v in d.items()]
print (L)
#将所有大写改为小写
L = ['Hello', 'World', 'IBM', 'Apple']
L = [s.lower() for s in L]
print (L)
#输出结果 ['hello', 'world', 'apple'] 使用内建的isinstance函数可以判断一个变量是不是字符
L1 = ['Hello', 'World', 18, 'Apple', None]
L2 = [s.lower() for s in L1 if isinstance(s,str)]
print (L2)
| [
"897312944@qq.com"
] | 897312944@qq.com |
50e591b01a2f7fd8b0b2c7d4f8d37c4060acdc33 | 528c81bff7c5943c528e0d66f400eb0147d1de85 | /tests/test_user.py | 492040f6a602b9e0b677fc7b1db966b4abf4a345 | [
"MIT"
] | permissive | Akumucollins/Personal-Blog | 83c5ef51d4e3093f3c160d1b933ad5008ad77b48 | 92bd3fcfe167ba979908a4f2f7c747a68ed02e7b | refs/heads/master | 2022-12-25T14:44:22.764149 | 2020-10-01T14:20:51 | 2020-10-01T14:20:51 | 299,817,028 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,408 | py | import unittest
from app.models import User,Role
from manage import app
class UserModelTest(unittest.TestCase):
'''
Test class to test behaviours of the User class
Args:
unittest.TestCase : Test case class that helps create test cases
'''
def setUp(self):
'''
Set up method that will run before every Test
'''
self.user_role = Role(name="Banana Eater")
self.new_user = User(password='banana', role=self.user_role)
def test_instance(self):
'''
Test case to check if new_user is an instance of User
'''
self.assertTrue( isinstance( self.new_user, User) )
def test_password_setter(self):
'''
Test case to ascertain when a password is being hashed and pass_secure contains a value
'''
self.assertTrue(self.new_user.pass_secure is not None)
def test_no_access_password(self):
'''
Test case to confirm the application raises an AttributeError when we try to access the password property
'''
with self.assertRaises(AttributeError):
self.new_user.password
def test_password_verification(self):
'''
Test case that confirms that our user password_hash can be verified when we pass in the correct the password
'''
self.assertTrue(self.new_user.verify_password('banana')) | [
"akumucollins@gmail.com"
] | akumucollins@gmail.com |
21cb7af509a12efcab699223841fcdfe4cf32d1c | 881351f9fb0d0ae3a7bfc33ee1bb25cb8847a3c9 | /sample/migrations/0016_product.py | 8d069263625e6b0746e975f08e3638ec128b0b80 | [
"MIT"
] | permissive | ptrck/django-clone | fe2a1cdeb59f19d4c5c987eee15e30c07672cbee | 5c868b65ac6a3e3367595f8aa54abc42ef0d0144 | refs/heads/main | 2023-05-03T14:58:32.220072 | 2021-05-28T15:41:05 | 2021-05-28T15:41:05 | 372,395,918 | 0 | 0 | MIT | 2021-05-31T05:48:11 | 2021-05-31T05:48:10 | null | UTF-8 | Python | false | false | 692 | py | # Generated by Django 3.2.3 on 2021-05-17 12:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("sample", "0015_auto_20210423_0935"),
]
operations = [
migrations.CreateModel(
name="Product",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.TextField(unique=True)),
],
),
]
| [
"noreply@github.com"
] | noreply@github.com |
fd11696f5320f7e94e1d3b3c9d0c3b398a6bee0b | 7a90f09b9db357430c14eb6d9186e0d472a93520 | /pick_up_oordinate.py | 51b9bff074ff98ed01d0d97cd03511c491aa3709 | [] | no_license | zhangguo7/pick_up_coordinates | b68758bce2308888ef90472010bc63b86ef6e78e | 6996da6d6039b3597b16e8ad208846c3f7a1fe35 | refs/heads/master | 2021-01-21T07:04:18.674840 | 2017-03-07T01:01:00 | 2017-03-07T01:01:00 | 82,878,388 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,013 | py | # -*- coding: UTF-8 -*-
import MySQLdb
import requests
import time
from numpy.random import chisquare
class PickUpCoordinates(object):
"""拾取经纬度坐标
从数据库中获取没有经纬度的样本
传递到百度api获取经纬度
将获取结果回写数据库
"""
def __init__(self, conn,ak):
self.conn = conn
self.ak = ak
def pick_ll_main(self):
"""执行获取经纬度的主函数"""
cur_select = conn.cursor()
cur_update = conn.cursor()
self._get_samples_with_no_ll(cur_select)
self._loop_gain_ll(cur_select,cur_update)
cur_select.close()
cur_update.close()
def _get_samples_with_no_ll(self, cur_select):
"""从数据库中抽取没有包含经纬度的样本
:param cur_select: 查询数据的cursor
:return: cur_select
"""
sql = "SELECT " \
" registered_no," \
" company_address " \
"From craw_raw " \
"WHERE company_address != ''"
cur_select.execute(sql)
return cur_select
def _gain_ll(self, sample_info, cur_update):
"""获取单条样本的经纬度信息,并执行更新数据库的命令
:param sample_info: 样本信息
:param cur_update: 更新数据库的cursor
"""
params = {
'address': '%s' % sample_info[1],
'output': 'json',
'ak': '%s' %self.ak
}
url = 'http://api.map.baidu.com/geocoder/v2/'
response = requests.get(url, params)
dict = response.json()['result']['location']
lat, lng = dict['lat'], dict['lng']
sql_update = "UPDATE craw_raw SET " \
" longitude = %.16f," \
" latitude = %.16f " \
"WHERE registered_no='%s' " \
% (lat, lng, sample_info[0])
cur_update.execute(sql_update)
print(u'%s 经纬度被写入 !' % sample_info[0])
def _loop_gain_ll(self, cur_select, cur_update):
"""循环获取经纬度的信息"""
failure = 0
while cur_select.rownumber < cur_select.rowcount:
try:
sample_info = cur_select.fetchone()
self._gain_ll(sample_info, cur_update)
i = chisquare(0.5)
time.sleep(i)
except:
failure += 1
print(u'经纬度获取失败,累计获取失败样本:%d 条'%failure)
finally:
self.conn.commit()
if __name__ == "__main__":
conn = MySQLdb.connect(host='localhost',user='root',passwd='123456',
charset='utf8',db='pick_up_coordinates')
ak = open('ak_raw').read()
puc_obj = PickUpCoordinates(conn,ak)
puc_obj.pick_ll_main()
try:
puc_obj.pick_ll_main()
except Exception as e:
print(e)
finally:
conn.close()
print(u'所有经纬度获取完成 !') | [
"zhangguo7@aliyun.com"
] | zhangguo7@aliyun.com |
08e64277223c06c607a305f6816878e91c7112f9 | 3b6b76aae93eb8a2c738a1364e923d3bad20e0a6 | /articles/wsgi-intro/twill-wsgi-example.py | c8d9450fb8f6a4c7610666f7a9687e7e5a2e8ccb | [] | no_license | ctb/ivory.idyll.org | 24e4a0f67fbbde399118aff3c27a03bac304aa8f | 88df5f33361e6e13eda248ee55f1e4e460b998d9 | refs/heads/master | 2020-04-10T10:42:00.111811 | 2018-12-08T19:54:05 | 2018-12-08T19:54:05 | 160,973,601 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | #! /usr/bin/env python
import twill
def simple_app(environ, start_response):
status = '200 OK'
response_headers = [('Content-type','text/plain')]
start_response(status, response_headers)
return ['Hello world!\n']
if __name__ == '__main__':
print '*** installing WSGI intercept hook ***\n'
twill.add_wsgi_intercept('localhost', 80, lambda: simple_app)
twill.shell.main()
| [
"titus@idyll.org"
] | titus@idyll.org |
f59015df0fd96a8dc9105e2b9aec3f31d216ca8f | df7b40e95718ac0f6071a0ba571b42efc81cf6de | /configs/dnlnet/dnl_r50-d8_512x512_160k_ade20k.py | 5305689d09b944f6e37aa85567ce3f29fc6974a7 | [
"Apache-2.0"
] | permissive | shinianzhihou/ChangeDetection | 87fa2c498248e6124aeefb8f0ee8154bda36deee | 354e71234bef38b6e142b6ba02f23db958582844 | refs/heads/master | 2023-01-23T20:42:31.017006 | 2023-01-09T11:37:24 | 2023-01-09T11:37:24 | 218,001,748 | 162 | 29 | Apache-2.0 | 2022-11-03T04:11:00 | 2019-10-28T08:41:54 | Python | UTF-8 | Python | false | false | 249 | py | _base_ = [
'../_base_/models/dnl_r50-d8.py', '../_base_/datasets/ade20k.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
]
model = dict(
decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150))
| [
"1178396201@qq.com"
] | 1178396201@qq.com |
9cd9159062c4df5c69d0f0a0021668b9d1cef742 | a5c550725c0707b6ad62f2015f7979aa0eadd1d0 | /mysite/blogging/urls.py | 09ca2df59882e5bcf36991febd32983e120e68af | [] | no_license | colephalen/mysite01 | 313445d606446503243c96a111b0919e44208661 | f71be9c01d7903976972faf43d4e8fe6e26215b6 | refs/heads/master | 2020-09-06T08:18:25.235983 | 2019-11-08T02:47:04 | 2019-11-08T02:47:04 | 220,373,420 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | # blogging/urls
from django.urls import path
from blogging.views import list_view, detail_view
urlpatterns = [
path('', list_view, name="post_index"),
path('posts/<int:post_id>/', detail_view, name="post_detail"),
] | [
"colephalen@gmail.com"
] | colephalen@gmail.com |
5f5b4e4172a9aafe394060657cf1b1bd9a055427 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5631572862566400_0/Python/ugo/c.py | fc210345694d8b61a3644358a93468fbce72a716 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,899 | py |
def get_candidates(bffs):
ret = []
for i in range(len(bffs)):
for j in range(i+1, len(bffs)):
if bffs[i] == j and bffs[j] == i:
ret.append((i, j))
return ret
def longest(n, dontgo, edges):
print 'longest', n, dontgo
ret = 1
for nb in edges[n]:
if nb != dontgo:
ret = max(ret, longest(nb, dontgo, edges) + 1)
return ret
# def dfs(n, starting, visited, edges):
# next = edges[n]
# if starting in visited
f = open('c.small.in')
fout = open('c.out', 'w')
numCases = int(f.readline().strip())
for numCase in range(numCases):
print 'CASE: {}'.format(numCase+1)
N = int(f.readline().strip())
bffs = [None] * N
reverse_bffs = []
for i in range(N):
reverse_bffs.append([])
ss = f.readline().split()
for i in range(N):
bffs[i] = int(ss[i]) - 1
reverse_bffs[int(ss[i]) - 1].append(i)
# print bffs
# print reverse_bffs
#case 1
case1max = 0
candidates = get_candidates(bffs)
len_candidates = len(candidates)
for (c_x, c_y) in candidates:
# print c_x, c_y
print c_x
d1 = longest(c_x, c_y, reverse_bffs)
print c_y
d2 = longest(c_y, c_x, reverse_bffs)
case1max = max(case1max, d1+d2 + 2 * (len_candidates-1) )
print c_x, d1
print c_y, d2
print case1max
case2max = 0
for n in range(0, N):
if len(reverse_bffs[n]) == 0:
continue
cnt = 1
cur = n
visited = set()
visited.add(cur)
while True:
next = bffs[cur]
if next == n:
break
if next in visited:
cnt = 0
break
visited.add(next)
cur = next
cnt += 1
print 'cycle starting n:', n, cnt
case2max = max(case2max, cnt)
# visited = set()
# visited.add(n)
# d = dfs(n, n, visited, bffs)
# print n, d
# case2max = max(case2max, d)
#case 2
# for node in range(1, N+1):
# print ' '.join(result)
print 'case1max', case1max, 'case2max', case2max
fout.write('Case #{}: {}\n'.format(numCase+1, max(case1max, case2max)))
fout.close()
| [
"alexandra1.back@gmail.com"
] | alexandra1.back@gmail.com |
5de35e142d78a609467672c03e9ce01600cf7b4f | 60fda86d4df5a209e5a1503f45e9abfbbbcd5ed9 | /NestedRouters2/NestedRouters2/settings.py | 2e6064fe84d1edf3920168dacd9445e33b214d99 | [] | no_license | jkvishwanath/django_conceptwise_samples | ee8554ffe3c1c1342906eec18850f2ec4f7a5e6e | 017a0ddb14aaf293b8792220bd87b8d7107ef8c2 | refs/heads/master | 2022-11-07T16:57:37.735472 | 2020-06-18T10:26:16 | 2020-06-18T10:26:16 | 268,430,941 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,225 | py | """
Django settings for NestedRouters2 project.
Generated by 'django-admin startproject' using Django 3.0.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '5fn7os8on0k(m=+v7nhn$1+p72zh8u1+uazc^(9b=y^@%03hlb'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'drf_nested_routing',
'rest_framework',
'nestedRouting',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'NestedRouters2.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'NestedRouters2.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| [
"jkashivishwanath5@gmail.com"
] | jkashivishwanath5@gmail.com |
a5a86bbd328a499a96f39f3d75c3be98c09f07af | 3eeb484cff8e36f5588e887e7d6f22538b0d8c98 | /leetcode/editor/cn/[69]x 的平方根.py | 8deb95193f4ee32ca6ebd12efcbb9a59cd422357 | [] | no_license | chendingyan/My-Leetcode | 37fad8ba989280b416bbc30fb1de57269be6b0ea | 7bcba42556475f56fad995b97a37b98f4981da8c | refs/heads/master | 2022-11-04T15:10:57.787717 | 2022-09-23T06:47:15 | 2022-09-23T06:47:15 | 178,884,222 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 901 | py | # 实现 int sqrt(int x) 函数。
#
# 计算并返回 x 的平方根,其中 x 是非负整数。
#
# 由于返回类型是整数,结果只保留整数的部分,小数部分将被舍去。
#
# 示例 1:
#
# 输入: 4
# 输出: 2
#
#
# 示例 2:
#
# 输入: 8
# 输出: 2
# 说明: 8 的平方根是 2.82842...,
# 由于返回类型是整数,小数部分将被舍去。
#
# Related Topics 数学 二分查找
# 👍 722 👎 0
# leetcode submit region begin(Prohibit modification and deletion)
class Solution:
def mySqrt(self, x):
left = 0
right = x
while left <= right:
mid = int((right - left) / 2 + left)
if mid * mid > x:
right = mid - 1
elif mid * mid <= x:
left = mid + 1
return right
# leetcode submit region end(Prohibit modification and deletion)
| [
"dingyan.chen96@gmail.com"
] | dingyan.chen96@gmail.com |
dc3863c80f79d6451076cad50b43b6d12da5df23 | 9a0bd4288a4785562ee912173869bf3956ab2fc4 | /Code/resources/proxyfinder.py | 48d2853182afe2010f9c50d2c04adcc28ee8cd3b | [] | no_license | shrey-agarwal/question-quality-analyser | 0e9c017f7a9cbc2006ea39d6ee6055c2b772fb63 | ae24295fc82762fc274c6cbcc354bb4a44b25c00 | refs/heads/master | 2021-04-27T12:11:23.344427 | 2018-02-23T04:44:44 | 2018-02-23T04:44:44 | 122,574,189 | 0 | 0 | null | 2018-02-23T04:41:24 | 2018-02-23T04:41:24 | null | UTF-8 | Python | false | false | 1,395 | py | # Invocation: python3 proxy_finder.py 1000 500 (mines 1000 http and 500 https proxies)
import asyncio
import json
import re
import sys
from proxybroker import Broker
def __get_proxies(protocol, limit):
proxy_list = []
async def save(proxies):
while True:
proxy = await proxies.get()
if proxy is None:
break
match = re.search('([\d]+\.[\d]+\.[\d]+\.[\d]+:[\d]+)', str(proxy))
proxy_list.append(match.group(1))
print('Count:', len(proxy_list))
proxies = asyncio.Queue()
broker = Broker(proxies)
tasks = asyncio.gather(broker.find(types=[protocol.upper()], limit=limit), save(proxies))
loop = asyncio.get_event_loop().run_until_complete(tasks)
return proxy_list
if __name__ == "__main__":
if len(sys.argv) < 3:
sys.exit('Not enough arguments')
http_proxies = __get_proxies('http', int(sys.argv[1]))
https_proxies = __get_proxies('https', int(sys.argv[2]))
proxies = {}
try:
with open('proxies.txt', 'r') as f:
proxies = json.load(f)
except:
proxies['http'] = []
proxies['https'] = []
with open('proxies.txt', 'w') as f:
proxies['http'] = list(set(proxies['http'] + http_proxies))
proxies['https'] = list(set(proxies['https'] + https_proxies))
print(json.dumps(proxies), file=f)
| [
"mohit.surana95@gmail.com"
] | mohit.surana95@gmail.com |
87e82915ecd4164bd64f8577b7cddd41cc5493fa | b90ee7486ee91e6ee11a628bf961f463e2858d16 | /RES_Scripts/LSS_RankScript_WestGalvestonBay.py | 8d0d18a7737722c7c8a6b293d590ca852a56902a | [] | no_license | sjtouzel/UMCodeShare | 0eaff14703a7abd843d8da1af6465fa600cd9573 | 09a026bf98acdf57d49848ea15886074022debca | refs/heads/master | 2023-02-04T15:52:04.850643 | 2023-01-31T23:20:44 | 2023-01-31T23:20:44 | 129,941,069 | 0 | 1 | null | 2018-04-17T21:00:01 | 2018-04-17T17:26:52 | Python | UTF-8 | Python | false | false | 11,171 | py | import arcpy
import time
"""
========================================================================
LSS_RankScript_Alabama.py
========================================================================
Author: Joe Touzel
========================================================================
Date Modifier Description of Change
2019/07/01 KC Published
2019/12/26 JT Modified
2020/05/21 JT Alabama updates
2020/07/30 JT Set up for West Galveston Bay Search
========================================================================
Description:
This script is based on a model made in Model Builder for ArcGIS by Amy
Ferguson for RES. The model takes a parcel data set and adds a standard
set of fields that are used to rank parcels in the RES land search system.
The ranking categories are multiplied together to calculate a final ranking.
Current script written by Katherine Clark, July 2019.
Inputs:
- Parcel Data with spatial analysis and Publishing Prep complete
- Rank classes as specified by the Land Search Request
"""
def Add_Rank_Fields(parcel_input):
new_fields = ['Canopy_cover_parcelR',
'Canopy_cover_riparian_bufferR',
'Stream_Linear_FeetR',
'LULC_bufferR',
'LULC_parcelR']
#'NWI_PWSLR']
# 'WetlandRestR',
# 'WetlandPresR',
# 'LF_Strm_HWR',
# 'NHDR',
# 'PriorityR']
attribute_type = 'SHORT'
for field in new_fields:
arcpy.AddMessage("Adding field: {}".format(field)) # print the field we're adding
arcpy.AddField_management(parcel_input, field_name=field,
field_type=attribute_type)
def Canopy_Parcel_Rank_Calc(Canopy_Mean):
val = 1
if Canopy_Mean < 50:
val = 3
elif Canopy_Mean >= 50:
val = 1
return val
def Canopy_Buffer_Rank_Calc(Canopy_Buffer_Mean):
val = 1
if Canopy_Buffer_Mean < 50:
val = 3
elif Canopy_Buffer_Mean >= 50:
val = 1
return val
def Stream_Linear_Ft_Rank_Calc(Stream):
val = 0
if Stream < 3000:
val = 0
elif Stream < 4000:
val = 1
elif Stream < 5000:
val = 2
elif Stream < 6000:
val = 3
elif Stream > 7001:
val = 4
return val
def LULC_Buffer_Rank_Calc(lulc, lc1, lc2, lc3, lc4, lc5, lc6, lc7, lc8, lc9, lc10, lc11, lc12, lc13, lc14, lc15):
val = 1
if(lulc == lc1 or lulc == lc4 or lulc == lc5):
val = 0
if(lulc == lc2 or lulc == lc3 or lulc == lc6 or lulc == lc14 or lulc == lc15):
val = 1
if(lulc == lc7 or lulc == lc8 or lulc == lc9):
val = 2
if(lulc == lc10 or lulc == lc11):
val = 3
if(lulc == lc12 or lulc == lc13):
val = 4
return val
def LULC_Parcel_Rank_Calc(lulc, lc1, lc2, lc3, lc4, lc5, lc6, lc7, lc8, lc9, lc10, lc11, lc12, lc13, lc14, lc15):
val = 1
if(lulc == lc1 or lulc == lc4 or lulc == lc5):
val = 0
if(lulc == lc2 or lulc == lc3 or lulc == lc6 or lulc == lc14 or lulc == lc15):
val = 1
if(lulc == lc7 or lulc == lc8 or lulc == lc9):
val = 2
if(lulc == lc10 or lulc == lc11):
val = 3
if(lulc == lc12 or lulc == lc13):
val = 4
return val
def NWI_PWSL_Rank_Calc(nwi, pwsl):
val = 1
Tot_ac_pot = nwi + pwsl
if Tot_ac_pot < 20:
val = 1
elif Tot_ac_pot < 40:
val = 2
elif Tot_ac_pot < 60:
val = 3
elif Tot_ac_pot >= 60:
val = 4
return val
def Restoration_Rank_Calc(Restor):
val = 1
if Restor < 1:
val = 1
elif Restor < 5:
val = 2
elif Restor < 10:
val = 3
elif Restor >= 15:
val = 4
return val
def Preservation_Rank_Calc(Preserv):
val = 1
if Preserv > 0:
val = 2
return val
def LF_Strm_HW_Calc(LF_Strm_HW):
val = 1
if LF_Strm_HW < 1000:
val = 1
elif LF_Strm_HW < 2000:
val = 2
elif LF_Strm_HW < 3000:
val = 3
elif LF_Strm_HW >= 3000:
val = 4
return val
def NHD_Calc(NHD):
val = 0
if NHD < 5000:
val = 0
elif NHD < 6000:
val = 1
elif NHD < 7000:
val = 2
elif NHD < 8000:
val = 3
elif NHD > 8001:
val = 4
return val
def main():
county_parcel_data = arcpy.GetParameterAsText(0)
# Write to Log
arcpy.AddMessage('')
arcpy.AddMessage("===================================================================")
sVersionInfo = 'LSS_RankScript_WestGalvestonBay.py, v20200730'
arcpy.AddMessage('LSS Ranking Script, {}'.format(sVersionInfo))
arcpy.AddMessage("")
arcpy.AddMessage("Support: jtouzel@res.us, 281-715-9109")
arcpy.AddMessage("")
arcpy.AddMessage("Input FC: {}".format(county_parcel_data))
field_names = [f.name for f in arcpy.ListFields(county_parcel_data)]
arcpy.AddMessage("Field Names: {}".format(", ".join(field_names)))
arcpy.AddMessage("===================================================================")
Add_Rank_Fields(county_parcel_data)
fields = ['Canopy_cover_parcel', 'Canopy_cover_parcelR']
arcpy.AddMessage("===================================================================")
arcpy.AddMessage("Calculate Parcel Canopy Cover Ranking") # Print the Ranking info for Parcel Canopy
with arcpy.da.UpdateCursor(county_parcel_data, fields) as cursor:
for row in cursor:
rank_val = Canopy_Parcel_Rank_Calc(row[0])
row[1] = rank_val
cursor.updateRow(row)
time.sleep(1) # gives a 1 second pause before going to the next step
fields = ['Canopy_cover_riparian_buffer', 'Canopy_cover_riparian_bufferR']
arcpy.AddMessage("===================================================================")
arcpy.AddMessage("Calculate Buffer Canopy Cover Ranking") # Print the Ranking info for Buffer Canopy
with arcpy.da.UpdateCursor(county_parcel_data, fields) as cursor:
for row in cursor:
rank_val = Canopy_Buffer_Rank_Calc(row[0])
row[1] = rank_val
cursor.updateRow(row)
time.sleep(1) # gives a 1 second pause before going to the next step
fields = ['NHD', 'Stream_Linear_FeetR']
arcpy.AddMessage("===================================================================")
arcpy.AddMessage("Calculate NHD Stream LF Ranking") # Print the Ranking info
with arcpy.da.UpdateCursor(county_parcel_data, fields) as cursor:
for row in cursor:
rank_val = Stream_Linear_Ft_Rank_Calc(row[0])
row[1] = rank_val
cursor.updateRow(row)
time.sleep(1) # gives a 1 second pause before going to the next step
fields = ['LULC_riparian_buffer', 'LULC_bufferR']
arcpy.AddMessage("===================================================================")
arcpy.AddMessage("Calculate Buffer LULC Ranking") # Print the Ranking info
with arcpy.da.UpdateCursor(county_parcel_data, fields) as cursor:
for row in cursor:
rank_val = LULC_Buffer_Rank_Calc(row[0], "Open Water", "Developed, Open Space", "Developed, Low Intensity", "Developed, Medium Intensity", "Developed, High Intensity", "Barren Land", "Deciduous Forest", "Evergreen Forest", "Mixed Forest", "Shrub/Scrub", "Grassland/Herbaceous", "Hay/Pasture", "Cultivated Crops", "Woody Wetlands", "Emergent Herbaceous Wetlands")
row[1] = rank_val
cursor.updateRow(row)
time.sleep(1) # gives a 1 second pause before going to the next step
fields = ['LULC_parcel', 'LULC_parcelR']
arcpy.AddMessage("===================================================================")
arcpy.AddMessage("Calculate Parcel LULC Ranking") # Print the Ranking info
with arcpy.da.UpdateCursor(county_parcel_data, fields) as cursor:
for row in cursor:
rank_val = LULC_Parcel_Rank_Calc(row[0], "Open Water", "Developed, Open Space", "Developed, Low Intensity", "Developed, Medium Intensity", "Developed, High Intensity", "Barren Land", "Deciduous Forest", "Evergreen Forest", "Mixed Forest", "Shrub/Scrub", "Grassland/Herbaceous", "Hay/Pasture", "Cultivated Crops", "Woody Wetlands", "Emergent Herbaceous Wetlands")
row[1] = rank_val
cursor.updateRow(row)
time.sleep(1) # gives a 1 second pause before going to the next step
# fields = ['NWI_acres','PWSL_acres', 'NWI_PWSLR']
# arcpy.AddMessage("===================================================================")
# arcpy.AddMessage("Calculate NWI PWSL Ranking") # Print the Ranking info
# with arcpy.da.UpdateCursor(county_parcel_data, fields) as cursor:
# for row in cursor:
# rank_val = NWI_PWSL_Rank_Calc(row[0], row[1])
# row[2] = rank_val
# cursor.updateRow(row)
# time.sleep(1) # gives a 1 second pause before going to the next step
# fields = ['Restor', 'WetlandRestR']
# arcpy.AddMessage("===================================================================")
# arcpy.AddMessage("Calculate Restoration Rank Ranking") # Print the Ranking info
# with arcpy.da.UpdateCursor(county_parcel_data, fields) as cursor:
# for row in cursor:
# rank_val = Restoration_Rank_Calc(row[0])
# row[1] = rank_val
# cursor.updateRow(row)
# time.sleep(1) # gives a 1 second pause before going to the next step
# fields = ['Preserv', 'WetlandPresR']
# arcpy.AddMessage("===================================================================")
# arcpy.AddMessage("Calculate Preservation Ranking") # Print the Ranking info
# with arcpy.da.UpdateCursor(county_parcel_data, fields) as cursor:
# for row in cursor:
# rank_val = Preservation_Rank_Calc(row[0])
# row[1] = rank_val
# cursor.updateRow(row)
# time.sleep(1) # gives a 1 second pause before going to the next step
# fields = ['LF_Strm_HW', 'LF_Strm_HWR']
# arcpy.AddMessage("===================================================================")
# arcpy.AddMessage("Calculate Stream Headwater Ranking") # Print the Ranking info
# with arcpy.da.UpdateCursor(county_parcel_data, fields) as cursor:
# for row in cursor:
# rank_val = LF_Strm_HW_Calc(row[0])
# row[1] = rank_val
# cursor.updateRow(row)
# time.sleep(1) # gives a 1 second pause before going to the next step
# fields = ['NHD', 'NHDR']
# arcpy.AddMessage("===================================================================")
# arcpy.AddMessage("Calculate NHD stream LF Ranking") # Print the Ranking info
# with arcpy.da.UpdateCursor(county_parcel_data, fields) as cursor:
# for row in cursor:
# rank_val = NHD_Calc(row[0])
# row[1] = rank_val
# cursor.updateRow(row)
# time.sleep(1) # gives a 1 second pause before going to the next step
if __name__ == '__main__':
main()
| [
"37873145+sjtouzel@users.noreply.github.com"
] | 37873145+sjtouzel@users.noreply.github.com |
59252af12c8239b80dc9d1744afb5a51b53726b7 | eddc1543ea682d348420f3fd1b4396348e82efc2 | /back-end/FriendManagement/FriendManagement/Management/migrations/0002_auto_20200609_1035.py | cc78f01cc889b8b789bbce074cf715d1aa8347d1 | [] | no_license | ChinhPV1293/StudyReactJS | 68a7375901168992e2580dc9d70d3b8241b5e4cc | f0fe6c718dc5dbb3d50504d9c65ca969ae03acf8 | refs/heads/master | 2021-03-20T09:48:49.533232 | 2020-06-23T08:02:30 | 2020-06-23T08:02:30 | 247,199,529 | 0 | 1 | null | 2021-01-06T04:30:33 | 2020-03-14T02:44:24 | JavaScript | UTF-8 | Python | false | false | 1,475 | py | # Generated by Django 3.0.4 on 2020-06-09 03:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Management', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='FriendInfomation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nameFriend', models.CharField(blank=True, default=None, max_length=36, null=True)),
('is_Men', models.BooleanField(default=None)),
('Birthday', models.DateField(default='1990-01-01')),
('phoneNumber', models.IntegerField(blank=True, default=None, null=True)),
('address', models.TextField(blank=True, default=None, null=True)),
],
),
migrations.CreateModel(
name='Group_Friend',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nameGroup', models.TextField(default=None, max_length=100)),
('description', models.TextField()),
],
),
migrations.DeleteModel(
name='book',
),
migrations.AddField(
model_name='friendinfomation',
name='groups',
field=models.ManyToManyField(to='Management.Group_Friend'),
),
]
| [
"chinhpv1293@gmail.com"
] | chinhpv1293@gmail.com |
23688473d73c3ad4e76facc9d7ed5015fc9eea25 | 00b93e4359464cc8f6c8fa634d6351208a668eeb | /parismaps.py | 215fe26ddc600f9f16474cac27bf469743e16ff0 | [
"MIT"
] | permissive | davidferguson/mactutor-converter | ebab1b20fcb1bfda0f2b0b48c5da75d32c8a4dfa | 288fd7934f6a9c0185e7d8c07ea81bf6539ff08c | refs/heads/master | 2021-09-08T07:26:51.071104 | 2021-09-05T12:51:09 | 2021-09-05T12:51:09 | 191,822,213 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,605 | py | import json
import glob
import os
import shutil
import regex as re
import lektor.metaformat
DATASHEET_DIR = '/Users/david/Documents/MacTutor/actual-work/datasheets/'
CONTENT_DIR = '/Users/david/Documents/MacTutor/actual-work/dev/mathshistory-site/content/'
SERVER_FILES = '/Users/david/Documents/MacTutor/actual-work/from-server/2/history/'
dir = os.path.join(SERVER_FILES, 'Honours','Parismaps/')
for filename in glob.iglob(dir + '*.html'):
if filename.lower().endswith(('.png', '.jpg', '.jpeg', '.gif', 'index.html', 'xx.html')):
continue
with open(filename, 'r') as f:
data = f.read()
filename = os.path.basename(filename).replace('.html', '')
pattern = re.compile(r'google\.maps\.LatLng\((?P<lat>-?\d+\.\d+),(?P<long>-?\d+\.\d+)\)')
match = pattern.search(data)
if not match:
assert False
lat = match.group('lat')
long = match.group('long')
pattern = re.compile(r'<h2>(?P<name>.+?)</h2>')
match = pattern.search(data)
if not match:
print(filename)
assert False
name = match.group('name')
data = {
'_model': 'parismap',
'latitude': lat,
'longitude': long,
'name': name
}
items = list(data.items())
lektordata = lektor.metaformat.serialize(items)
dir = os.path.join(CONTENT_DIR, 'Parismaps', filename)
if not os.path.isdir(dir):
os.mkdir(dir)
contents_file = os.path.join(dir, 'contents.lr')
with open(contents_file, 'wb') as f:
for chunk in lektor.metaformat.serialize(items, encoding='utf-8'):
f.write(chunk)
| [
"davidferguson@users.noreply.github.com"
] | davidferguson@users.noreply.github.com |
7cc7c5fa0da86735bf9a7b0efcdbfe8eb1e7cd4c | 6f8fb8c7a7dd06be1d05a8dde3106daa46a27d6e | /Missing Multipliers.py | 5783d32638c6e197a53830f31aba46ee432530a3 | [] | no_license | laughtLOOL/GrokLearning | b2d6147b3deb32ed8f0851ccdb165cfd60943649 | cee864d170dc86931b2f6ed0f193ec65a806a444 | refs/heads/master | 2023-03-27T00:00:04.290067 | 2021-03-13T10:45:15 | 2021-03-13T10:45:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 171 | py | times, step = int(input("Times table: ")), int(input("Step: "))
curstep = 3
while 3 <= curstep <= 12:
print(f"{times} x [ ] = {times * curstep}")
curstep += step
| [
"lkc364636722@gmail.com"
] | lkc364636722@gmail.com |
59673ace5224787385a1a946cb7ea56655ce1dee | 35e731c868d29f2a609b55de8a72656b6b60ecd0 | /boa.py | f773bf4a9a64d4632a9eb5e918190ddd62d45c41 | [] | no_license | nobody4678/BOA | 63590b236e2b507a2732a57a5d86959cbe06397f | 961821071a16b495a31aeca13b806016e7631755 | refs/heads/main | 2023-01-20T14:09:41.903227 | 2020-11-24T09:28:23 | 2020-11-24T09:28:23 | 315,557,556 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,112 | py | import sys
sys.path.append('.')
import os
import cv2
import copy
import time
import torch
import random
import joblib
import argparse
import numpy as np
import os.path as osp
import torch.nn as nn
from tqdm import tqdm
import learn2learn as l2l
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torchvision.utils import make_grid
import config
import constants
from models import hmr, SMPL
from datasets import H36M, PW3D, HP3D
from utils.pose_utils import reconstruction_error
from utils.geometry import perspective_projection, rotation_matrix_to_angle_axis, batch_rodrigues
from smplify.prior import MaxMixturePrior
parser = argparse.ArgumentParser()
parser.add_argument('--expdir', type=str, default='', help='common dir of each experiment')
parser.add_argument('--name', type=str, default='', help='exp name')
parser.add_argument('--seed', type=int, default=22, help='random seed')
parser.add_argument('--model_file', type=str, default='logs/GN-adv-lsgan-0root-v2-loss5-2stages-v2-fintune/checkpoints/2020_10_29-12_29_41.pt', help='base model')
parser.add_argument('--num_augsamples', type=int, default=0, help='times of augmentation')
parser.add_argument('--batch_size', type=int, default=1, help='')
parser.add_argument('--dataset_name', type=str, default='3dpw', choices=['3dpw', 'mpi-inf-3dhp'], help='test set name')
parser.add_argument('--img_res', type=int, default=224, help='image resolution')
parser.add_argument('--T', type=int, default=1, help='times of adaptation')
parser.add_argument('--offline', action='store_true', default=False, help='offline adapt?')
## baseline hyper-parameters
parser.add_argument('--lr', type=float, default=3e-6, help='learning rate')
parser.add_argument('--beta1', type=float, default=0.5, help='adam beta1')
parser.add_argument('--beta2', type=float, default=0.999, help='adam beta2')
parser.add_argument('--use_mixtrain', action='store_true', default=False)
parser.add_argument('--s2dsloss_weight', type=float, default=10, help='weight of reprojection kp2d loss')
parser.add_argument('--shapepriorloss_weight', type=float, default=1e-5, help='weight of shape prior')
parser.add_argument('--gmmpriorloss_weight', type=float, default=2e-4, help='weight of pose prior(GMM)')
parser.add_argument('--labelloss_weight', type=float, default=1, help='weight of h36m loss')
## mean-teacher hyper-parameters
parser.add_argument('--use_meanteacher', action='store_true', default=False)
parser.add_argument('--ema_decay', type=float, default=0.3, help='ema_decay * T + (1-ema_decay) * M')
# fixed
parser.add_argument('--consistentloss_weight', type=float, default=0.01, help='weight of consistent loss')
parser.add_argument('--consistent_s3d_weight', type=float, default=5, help='weight of shape prior')
parser.add_argument('--consistent_s2d_weight', type=float, default=5, help='weight of consistent loss')
parser.add_argument('--consistent_pose_weight', type=float, default=1, help='weight of pose prior(GMM)')
parser.add_argument('--consistent_beta_weight', type=float, default=0.001, help='weight of h36m loss')
## bilevel hyper parameters
parser.add_argument('--use_bilevel', action='store_true', default=False)
parser.add_argument('--use_motionloss', action='store_true', default=False)
parser.add_argument('--metalr', type=float, default=3e-6, help='learning rate')
parser.add_argument('--prev_n', type=int, default=5)
parser.add_argument('--motionloss_weight', type=float, default=0.1)
parser.add_argument('--only_use_motionloss', action='store_true', default=False)
# predefined variables
device = torch.device('cuda')
J_regressor = torch.from_numpy(np.load(config.JOINT_REGRESSOR_H36M)).float()
smpl_neutral = SMPL(config.SMPL_MODEL_DIR, create_transl=False).to(device)
smpl_male = SMPL(config.SMPL_MODEL_DIR, gender='male', create_transl=False).to(device)
smpl_female = SMPL(config.SMPL_MODEL_DIR, gender='female', create_transl=False).to(device)
# -- end
# tools of mean teacher
def create_model(ema=False):
model = hmr(config.SMPL_MEAN_PARAMS)
if ema:
for param in model.parameters():
param.detach_()
return model
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
# -- end
# other tools
def seed_everything(self, seed=42): # 42
""" we need set seed to ensure that all model has same initialization
"""
random.seed(seed)
os.environ['PYHTONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.cuda.manual_seed_all(seed)
print('seed has been set')
# -- end
class Adaptor():
def __init__(self, options):
# prepare
self.options = options
self.exppath = osp.join(self.options.expdir, self.options.name)
self.summary_writer = SummaryWriter(self.exppath)
self.device = torch.device('cuda')
# set seed
seed_everything(self.options.seed)
# build model and optimizer
model = create_model()
# using the tool of learn2learn to realize bilevel optimization
if self.options.use_bilevel:
self.model = l2l.algorithms.MAML(model, lr=self.options.metalr, first_order=False).to(self.device)
else:
self.model = model.to(self.device)
# create a teacher model, whose initial weight is the copy of base model
if self.options.use_meanteacher:
ema_model = create_model(ema=True) # teacher model
self.ema_model = ema_model.to(self.device)
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.options.lr, betas=(self.options.beta1, self.options.beta2))
print('model & optimizer are set.')
# load pretrained model (base model)
checkpoint = torch.load(self.options.model_file)
self.modeldict_copy = checkpoint['model']
checkpoint['model'] = {k.replace('module.',''):v for k,v in checkpoint['model'].items()}
self.model.load_state_dict(checkpoint['model'], strict=True)
if self.options.use_meanteacher:
checkpoint['model'] = {k.replace('module.',''):v for k,v in checkpoint['model'].items()}
self.ema_model.load_state_dict(checkpoint['model'], strict=True)
print('pretrained CKPT has been load')
# build dataloders
if '3dpw' in self.options.dataset_name:
# 3dpw
self.pw3d_dataset = PW3D(self.options, '3dpw', num_aug=self.options.num_augsamples)
self.pw3d_dataloader = DataLoader(self.pw3d_dataset, batch_size=1, shuffle=False, num_workers=8)
elif 'mpi-inf' in self.options.dataset_name:
# 3DHP
self.pw3d_dataset = HP3D(self.options, 'mpi-inf-3dhp', num_aug=self.options.num_augsamples)
self.pw3d_dataloader = DataLoader(self.pw3d_dataset, batch_size=1, shuffle=False, num_workers=8)
# h36m
self.h36m_dataset = H36M(self.options, 'h36m', num_aug=self.options.num_augsamples)
self.h36m_dataloader = DataLoader(self.h36m_dataset, batch_size=1, shuffle=False, num_workers=8) #self.options.batch_size, shuffle=False, num_workers=8)
print('dataset has been created')
# prepare criterion functions
self.criterion_regr = nn.MSELoss().to(self.device)
self.criterion_keypoints = nn.MSELoss(reduction='none').to(self.device)
self.criterion_consistent = nn.MSELoss().to(self.device)
self.criterion_poseprior = MaxMixturePrior(prior_folder='data',
num_gaussians=8,
dtype=torch.float32).to(self.device)
print('loss funtion has been created')
### helper functions
def decode_smpl_params(self, rotmats, betas, cam, neutral=True, pose2rot=False):
if neutral:
smpl_out = smpl_neutral(betas=betas, body_pose=rotmats[:,1:], global_orient=rotmats[:,0].unsqueeze(1), pose2rot=pose2rot)
return {'s3d': smpl_out.joints, 'vts': smpl_out.vertices}
def set_dropout_eval(self, m):
classname = m.__class__.__name__
if classname.find('Dropout') != -1:
# print('freezing: {}'.format(classname))
m.eval()
def freeze_dropout(self,):
self.model.apply(self.set_dropout_eval)
if self.options.use_meanteacher:
self.ema_model.apply(self.set_dropout_eval)
### helper functions end
def inference(self):
joint_mapper_h36m = constants.H36M_TO_J17 if self.options.dataset_name == 'mpi-inf-3dhp' else constants.H36M_TO_J14
joint_mapper_gt = constants.J24_TO_J17 if self.options.dataset_name == 'mpi-inf-3dhp' else constants.J24_TO_J14
# build human 3.6m loader if using the source data during online adaptation.
if self.options.use_mixtrain:
h36m_loader = iter(self.h36m_dataloader)
# if use the motion loss, we create a dict to save the previous images and its 2D keypoints.
if self.options.use_motionloss:
self.history_info = {}
mpjpe, pampjpe, pck = [], [], []
self.global_step = 0
h36m_batch = None
for step, pw3d_batch in tqdm(enumerate(self.pw3d_dataloader), total=len(self.pw3d_dataloader)):
# for each arrived frames, we first adapt the history model, and then use the adapted model to estimate the human mesh.
self.global_step = step
# move test data to the gpu device
pw3d_batch = {k: v.to(self.device) if isinstance(v, torch.Tensor) else v for k,v in pw3d_batch.items()}
# load source data, and move them to the gpu device
if self.options.use_mixtrain:
# load h36m data
try:
h36m_batch = next(h36m_loader)
except StopIteration:
h36m_loader = iter(self.h36m_dataloader)
h36m_batch = next(h36m_loader)
h36m_batch = {k: v.to(self.device) if isinstance(v, torch.Tensor) else v for k,v in h36m_batch.items()}
# set model to the training mode
self.model.train()
if self.options.use_meanteacher:
self.ema_model.train()
# during adaptation, we don't use dropout
self.freeze_dropout()
# Step1. begin online adaptation
# T = 1 in our experiments.
T = self.options.T
for i in range(T):
self.optimizer.zero_grad()
adaptation_loss = self.meta_adapt(pw3d_batch, h36m_batch)
adaptation_loss.backward()
self.optimizer.step()
# exponential moving averge update. (teacher model)
if self.options.use_meanteacher:
update_ema_variables(self.model, self.ema_model, self.options.ema_decay, self.global_step)
# Step2. begin test
eval_res = self.test(pw3d_batch, joint_mapper_gt, joint_mapper_h36m)
mpjpe.append(eval_res['mpjpe'])
pampjpe.append(eval_res['pa-mpjpe'])
pck.append(eval_res['pck'])
print('=== Final Results ===')
print('MPJPE:', np.mean(mpjpe)*1000)
print('PAMPJPE:', np.mean(pampjpe)*1000)
print('PCK:', pck.mean()*100)
mpjpe = np.stack(mpjpe)
pampjpe = np.stack(pampjpe)
pck = np.stack(pck)
np.save(osp.join(self.exppath, 'mpjpe'), mpjpe)
np.save(osp.join(self.exppath, 'pampjpe'), pampjpe)
np.save(osp.join(self.exppath, 'pck'), pck)
def meta_adapt(self, unlabeled_batch, labeled_batch=None):
# lower-level weight probe
if self.options.use_bilevel:
learner = self.model.clone()
total_loss = self.adaptation(learner, unlabeled_batch, labeled_batch, use_motionloss=False, use_consistentLoss=False)
if self.options.use_bilevel:
learner.adapt(total_loss)
# upper-level model update
total_loss = self.adaptation(learner, unlabeled_batch, labeled_batch, use_motionloss=self.options.use_motionloss, use_consistentLoss=True, only_use_motionloss=self.options.only_use_motionloss)
return total_loss
def adaptation(self, learner, unlabeled_batch, labeled_batch=None, use_motionloss=False, use_consistentLoss=False, only_use_motionloss=False):
# adapt unlabeled data, short for udata
if self.options.dataset_name == '3dpw':
uimage, us2d = unlabeled_batch['img'].squeeze(0), unlabeled_batch['smpl_j2ds'].squeeze(0)
elif self.options.dataset_name == 'mpi-inf-3dhp':
uimage, us2d = unlabeled_batch['img'].squeeze(0), unlabeled_batch['keypoints'].squeeze(0)
if use_motionloss:
# if consider motion loss, we need store history data.
history_idx = self.global_step - self.options.prev_n
if history_idx > 0:
hist_uimage, hist_us2d = self.history_info[history_idx]['image'].to(self.device),\
self.history_info[history_idx]['s2d'].to(self.device)
else:
hist_uimage, hist_us2d = None, None
unlabelloss = self.adapt_for_unlabeled_data(learner, uimage, us2d, hist_uimage, hist_us2d,use_consistentLoss=use_consistentLoss,only_use_motionloss=only_use_motionloss)
self.history_info[self.global_step] = {'image': uimage.clone().detach().cpu(), 's2d': us2d.clone().detach().cpu()}
if labeled_batch is not None:
# update for labeled data
h36image, h36s3d, h36s2d, h36beta, h36pose = labeled_batch['img'].squeeze(0),\
labeled_batch['pose_3d'].squeeze(0),\
labeled_batch['keypoints'].squeeze(0),\
labeled_batch['betas'].squeeze(0),\
labeled_batch['pose'].squeeze(0)
labelloss = self.adapt_for_labeled_data(learner, h36image, h36s3d, h36s2d, h36beta, h36pose)
return unlabelloss + labelloss * self.options.labelloss_weight
else:
return unlabelloss
def adapt_for_unlabeled_data(self, learner, image, gt_s2d, hist_image=None, hist_s2d=None, use_consistentLoss=False, only_use_motionloss=False):
"""
adapt on test data
"""
batch_size = image.shape[0]
pred_rotmat, pred_betas, pred_cam = learner(image)
# convert it to smpl verts and keypoints
pred_smpl_items = self.decode_smpl_params(pred_rotmat, pred_betas, pred_cam, neutral=True)
pred_s3ds = pred_smpl_items['s3d']
pred_vts = pred_smpl_items['vts']
# project 3d kp to 2d kp
pred_cam_t = torch.stack([pred_cam[:,1],
pred_cam[:,2],
2*constants.FOCAL_LENGTH/(self.options.img_res * pred_cam[:,0] +1e-9)],dim=-1)
camera_center = torch.zeros(batch_size, 2, device=self.device)
pred_s2d = perspective_projection(pred_s3ds,
rotation=torch.eye(3, device=self.device).unsqueeze(0).expand(batch_size, -1, -1),
translation=pred_cam_t,
focal_length=constants.FOCAL_LENGTH,
camera_center=camera_center)
# normalized to [-1,1]
pred_s2d = pred_s2d / (self.options.img_res / 2.)
# cal kp2d loss
s2ds_loss = self.cal_s2ds_loss(pred_s2d, gt_s2d)
# cal prior loss
shape_prior_loss = self.shape_prior(pred_betas)
pose_prior_losses = self.pose_prior(pred_rotmat, pred_betas, gmm_prior=True)
gmm_prior_loss = pose_prior_losses['gmm']
loss = s2ds_loss * self.options.s2dsloss_weight +\
shape_prior_loss * self.options.shapepriorloss_weight +\
gmm_prior_loss * self.options.gmmpriorloss_weight
if hist_image is not None and hist_s2d is not None:
pred_hist_rotmat, pred_hist_betas, pred_hist_cam = learner(hist_image)
pred_hist_smpl_items = self.decode_smpl_params(pred_hist_rotmat, pred_hist_betas, pred_hist_cam, neutral=True)
pred_hist_s3ds = pred_hist_smpl_items['s3d']
pred_hist_vts = pred_hist_smpl_items['vts']
# project 3d kp to 2d kp
pred_hist_cam_t = torch.stack([pred_hist_cam[:,1],
pred_hist_cam[:,2],
2*constants.FOCAL_LENGTH/(self.options.img_res * pred_hist_cam[:,0] +1e-9)],dim=-1)
camera_center = torch.zeros(batch_size, 2, device=self.device)
pred_hist_s2d = perspective_projection(pred_hist_s3ds,
rotation=torch.eye(3, device=self.device).unsqueeze(0).expand(batch_size, -1, -1),
translation=pred_hist_cam_t,
focal_length=constants.FOCAL_LENGTH,
camera_center=camera_center)
# normalized to [-1,1]
pred_hist_s2d = pred_hist_s2d / (self.options.img_res / 2.)
motion_loss = self.cal_motion_loss(pred_s2d, pred_hist_s2d, gt_s2d, hist_s2d)
loss = loss + motion_loss * self.options.motionloss_weight
if use_consistentLoss and self.options.use_meanteacher:
# cal consistent loss
ema_rotmat, ema_betas, ema_cam = self.ema_model(image)
consistent_loss = self.cal_consistent_constrain(pred_rotmat, pred_betas, pred_cam, ema_rotmat, ema_betas, ema_cam)
loss = loss + consistent_loss * self.options.consistentloss_weight
return loss
def adapt_for_labeled_data(self, learner, gtimage, gts3d, gts2d, gtbetas, gtpose):
"""
adapt on source data
"""
batchsize = gtimage.shape[0]
# forward
pred_rotmat, pred_betas, pred_cam = self.model(gtimage)
# convert it to smpl verts and keypoints
pred_smpl_items = self.decode_smpl_params(pred_rotmat, pred_betas, pred_cam, neutral=True)
pred_s3ds = pred_smpl_items['s3d']
pred_vts = pred_smpl_items['vts']
# project 3d skeleton to image space, and then rescale to [-1,1] and calculate 2k kp reporjection loss
pred_cam_t = torch.stack([pred_cam[:,1],
pred_cam[:,2],
2*constants.FOCAL_LENGTH/(self.options.img_res * pred_cam[:,0] +1e-9)],dim=-1)
camera_center = torch.zeros(batchsize, 2, device=self.device)
pred_s2d = perspective_projection(pred_s3ds,
rotation=torch.eye(3, device=self.device).unsqueeze(0).expand(batchsize, -1, -1),
translation=pred_cam_t,
focal_length=constants.FOCAL_LENGTH,
camera_center=camera_center)
# Normalize keypoints to [-1,1]
pred_s2d = pred_s2d / (self.options.img_res / 2.)
s2ds_loss = self.cal_s2ds_loss(pred_s2d, gts2d)
s3d_loss = self.cal_s3ds_loss(pred_s3ds, gts3d)
# smpl loss
gt_rotmat = batch_rodrigues(gtpose.view(-1,3)).view(-1, 24, 3, 3)
loss_pose = self.criterion_regr(pred_rotmat, gt_rotmat)
loss_beta = self.criterion_regr(pred_betas, gtbetas)
# we use the same setting with SPIN
loss = s3d_loss * 5. + s2ds_loss * 5 + loss_pose * 1. + loss_beta * 0.001
return loss
def test(self, databatch, joint_mapper_gt, joint_mapper_h36m):
"""
test on arrived data
"""
if '3dpw' in self.options.dataset_name:
gt_pose = databatch['oripose']
gt_betas = databatch['oribeta']
gender = databatch['gender']
with torch.no_grad():
# set model to evaluation mode
self.model.eval()
# forward
oriimages = databatch['oriimg']
pred_rotamt, pred_betas, pred_cam = self.model(oriimages)
pred_smpl_out = self.decode_smpl_params(pred_rotamt, pred_betas, pred_cam, neutral=True)
pred_vts = pred_smpl_out['vts']
# get 14 gt joints, J_regressor maps mesh to 3D keypoints.
J_regressor_batch = J_regressor[None, :].expand(pred_vts.shape[0], -1, -1).to(self.device)
if 'h36m' in self.options.dataset_name or 'mpi-inf' in self.options.dataset_name:
gt_keypoints_3d = databatch['oripose_3d']
gt_keypoints_3d = gt_keypoints_3d[:, joint_mapper_gt, :-1]
else:
gt_vertices = smpl_male(global_orient=gt_pose[:,:3], body_pose=gt_pose[:,3:], betas=gt_betas).vertices
gt_vertices_female = smpl_female(global_orient=gt_pose[:,:3], body_pose=gt_pose[:,3:], betas=gt_betas).vertices
gt_vertices[gender==1, :, :] = gt_vertices_female[gender==1, :, :]
gt_keypoints_3d = torch.matmul(J_regressor_batch, gt_vertices)
gt_pelvis = gt_keypoints_3d[:, [0],:].clone()
gt_keypoints_3d = gt_keypoints_3d[:, joint_mapper_h36m, :]
gt_keypoints_3d = gt_keypoints_3d - gt_pelvis
# Get 14 predicted joints from the mesh
pred_keypoints_3d = torch.matmul(J_regressor_batch, pred_vts)
pred_pelvis = pred_keypoints_3d[:, [0],:].clone()
pred_keypoints_3d = pred_keypoints_3d[:, joint_mapper_h36m, :]
pred_keypoints_3d = pred_keypoints_3d - pred_pelvis
# calculate metrics
# 1. MPJPE
error = torch.sqrt(((pred_keypoints_3d - gt_keypoints_3d) ** 2).sum(dim=-1)).mean(dim=-1).cpu().numpy()
# 2. PA-MPJPE and PCK
r_error, pck_error = reconstruction_error(pred_keypoints_3d.cpu().numpy(), gt_keypoints_3d.cpu().numpy(), needpck=True, reduction=None)
return {'mpjpe': error, 'pa-mpjpe': r_error, 'pck': pck_error}
##########
# the following is the loss functions
##########
## -- motion loss
def cal_motion_loss(self, pred_kps_t, pred_kps_n, gt_kps_t, gt_kps_n):
"""
pred_kps_t: (B, 49, 2), at time t
pred_kps_n: (B, 49, 2), at time t-n
gt_kps_t : (B, 49, 3), at time t
gt_kps_n : (B, 49, 3), at time t-n
"""
motion_pred = pred_kps_t[:,25:] - pred_kps_n[:,25:]
motion_gt = gt_kps_t[:,25:,:-1] - gt_kps_n[:,25:,:-1]
motion_loss = self.criterion_regr(motion_pred, motion_gt)
return motion_loss
## -- motion loss end
## -- consistent loss
def cal_consistent_constrain(self, pred_rotmat, pred_betas, pred_cam, ema_rotmat, ema_betas, ema_cam):
batchsize = pred_rotmat.shape[0]
# convert it to smpl verts and keypoints
pred_smpl_items = self.decode_smpl_params(pred_rotmat, pred_betas, pred_cam, neutral=True)
pred_s3ds = pred_smpl_items['s3d']
pred_vts = pred_smpl_items['vts']
# convert it to smpl verts and keypoints
ema_smpl_items = self.decode_smpl_params(ema_rotmat, ema_betas, ema_cam, neutral=True)
ema_s3ds = ema_smpl_items['s3d']
ema_vts = ema_smpl_items['vts']
# project 3d skeleton to image space, and then rescale to [-1,1] and calculate 2k kp reporjection loss
pred_cam_t = torch.stack([pred_cam[:,1],
pred_cam[:,2],
2*constants.FOCAL_LENGTH/(self.options.img_res * pred_cam[:,0] +1e-9)],dim=-1)
camera_center = torch.zeros(batchsize, 2, device=self.device)
pred_s2d = perspective_projection(pred_s3ds,
rotation=torch.eye(3, device=self.device).unsqueeze(0).expand(batchsize, -1, -1),
translation=pred_cam_t,
focal_length=constants.FOCAL_LENGTH,
camera_center=camera_center)
# Normalize keypoints to [-1,1]
pred_s2d = pred_s2d / (self.options.img_res / 2.)
# project 3d skeleton to image space, and then rescale to [-1,1] and calculate 2k kp reporjection loss
ema_cam_t = torch.stack([ema_cam[:,1],
ema_cam[:,2],
2*constants.FOCAL_LENGTH/(self.options.img_res * pred_cam[:,0] +1e-9)],dim=-1)
camera_center = torch.zeros(batchsize, 2, device=self.device)
ema_s2d = perspective_projection(ema_s3ds,
rotation=torch.eye(3, device=self.device).unsqueeze(0).expand(batchsize, -1, -1),
translation=ema_cam_t,
focal_length=constants.FOCAL_LENGTH,
camera_center=camera_center)
# Normalize keypoints to [-1,1]
ema_s2d = ema_s2d / (self.options.img_res / 2.)
s2ds_loss = self.cal_s2ds_loss_for_mt(pred_s2d, ema_s2d)
s3d_loss = self.cal_s3ds_loss_for_mt(pred_s3ds, ema_s3ds)
# smpl loss
# gt_rotmat = batch_rodrigues(gtpose.view(-1,3)).view(-1, 24, 3, 3)
loss_pose = self.criterion_regr(pred_rotmat, ema_rotmat)
loss_beta = self.criterion_regr(pred_betas, ema_betas)
# loss = s3d_loss * 5. + s2ds_loss * 5 + loss_pose * 1. + loss_beta * 0.001
loss = s3d_loss * self.options.consistent_s3d_weight + s2ds_loss * self.options.consistent_s2d_weight +\
loss_pose * self.options.consistent_pose_weight + loss_beta * self.options.consistent_beta_weight
return loss
def cal_s3ds_loss_for_mt(self, pred_s3d, gt_s3d):
"""
pred_s3d: (B, 49, 3)
gt_s3d: (B, 49, 4)
"""
# conf = gt_s3d[:,:,-1].unsqueeze(-1).clone()
gt_s3d = gt_s3d[:,25:]
pred_s3d = pred_s3d[:,25:]
# align the root
gt_hip = (gt_s3d[:,2] + gt_s3d[:,3]) / 2
gt_s3d = gt_s3d - gt_hip[:,None,:]
pred_hip = (pred_s3d[:,2] + pred_s3d[:,3]) / 2
pred_s3d = pred_s3d - pred_hip[:,None,:]
# print(pred_s3d.shape, gt_s3d.shape, conf.shape)
loss = (self.criterion_keypoints(pred_s3d, gt_s3d)).mean()
return loss
def cal_s2ds_loss_for_mt(self, pred_s2d, gt_s2d):
"""
pred_s2d: (B, 49, 2)
gt_s2d: (B, 49, 3)
only calculate the later 24 joints, i.e., 25:
"""
# conf = gt_s2d[:,25:,-1].unsqueeze(-1).clone()
loss = (self.criterion_keypoints(pred_s2d[:,25:], gt_s2d[:,25:])).mean()
return loss
## -- consistent loss end
def cal_s3ds_loss(self, pred_s3d, gt_s3d):
"""
pred_s3d: (B, 49, 3)
gt_s3d: (B, 49, 4)
"""
conf = gt_s3d[:,:,-1].unsqueeze(-1).clone()
# gt_s3d = gt_s3d[:,25:]
pred_s3d = pred_s3d[:,25:]
# align the root
gt_hip = (gt_s3d[:,2] + gt_s3d[:,3]) / 2
gt_s3d = gt_s3d - gt_hip[:,None,:]
pred_hip = (pred_s3d[:,2] + pred_s3d[:,3]) / 2
pred_s3d = pred_s3d - pred_hip[:,None,:]
# print(pred_s3d.shape, gt_s3d.shape, conf.shape)
loss = (conf * self.criterion_keypoints(pred_s3d, gt_s3d[:,:,:-1])).mean()
return loss
def cal_s2ds_loss(self, pred_s2d, gt_s2d):
"""
pred_s2d: (B, 49, 2)
gt_s2d: (B, 49, 3)
only calculate the later 24 joints, i.e., 25:
"""
conf = gt_s2d[:,25:,-1].unsqueeze(-1).clone()
loss = (conf * self.criterion_keypoints(pred_s2d[:,25:], gt_s2d[:,25:, :-1])).mean()
return loss
def shape_prior(self, betas):
shape_prior_loss = (betas ** 2).sum(dim=-1).mean()
return shape_prior_loss
def pose_prior(self, pose, betas, angle_prior=False, gmm_prior=False):
loss_items = {}
body_pose = rotation_matrix_to_angle_axis(pose[:,1:].contiguous().view(-1,3,3)).contiguous().view(-1, 69)
assert body_pose.shape[0] == pose.shape[0]
if gmm_prior:
pose_prior_loss = self.criterion_poseprior(body_pose, betas).mean()
loss_items['gmm'] = pose_prior_loss
if angle_prior:
constant = torch.tensor([1., -1., -1, -1.]).to(self.device)
angle_prior_loss = torch.exp(body_pose[:, [55-3, 58-3, 12-3, 15-3]] * constant) ** 2
loss_items['angle'] = angle_prior_loss
return loss_items | [
"noreply@github.com"
] | noreply@github.com |
3e103b2a850638d5d07d6e6ac73cc1650f6650cf | 82e74e1fccf0c83f45063bd781e768abaa9a01c0 | /credentials.py | d9be023de8e6fdc1bedba57db1025ef54f095754 | [
"Apache-2.0"
] | permissive | msgpo/amzn-music-skill | c966c29fad5ea8abdc7b61b732474fbacca513e2 | 418e5ddc37730dbb7fcd83cd38898459b1bb4827 | refs/heads/master | 2022-02-19T12:46:23.791937 | 2019-09-17T08:29:28 | 2019-09-17T08:29:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 457 | py | #!/usr/bin/env python
import base64
import pickle
from getpass import getpass
try:
input = raw_input
except NameError:
pass
e = base64.b64encode(str(input('Login/Email: ')).encode('ascii'))
p = base64.b64encode(getpass('Password (input is hidden): ').encode('ascii'))
credentials = {'e': e, 'p': p}
with open('/opt/mycroft/skills/amzn-music-skill.domcross/credentials.store', 'wb') as f:
pickle.dump(credentials, f, pickle.HIGHEST_PROTOCOL)
| [
"dkreutz@t-online.de"
] | dkreutz@t-online.de |
bd7f88508e67dbfcf5ecffbf0562f7a05eb1619b | e49a07ad215172e9c82cb418b10371bf0ce1c0f7 | /第1章 python基础/Python基础08/10-异常传递.py | a53af9d709038f15ce320e9490696f4377f4e232 | [] | no_license | taogangshow/python_Code | 829c25a7e32ead388c8b3ffa763cb9cf587bfd7b | 4b3d6992ec407d6069f3187ca7e402a14d863fff | refs/heads/master | 2022-12-16T01:26:17.569230 | 2018-11-16T10:07:59 | 2018-11-16T10:07:59 | 157,832,985 | 0 | 1 | null | 2022-11-25T09:55:32 | 2018-11-16T08:00:13 | Python | UTF-8 | Python | false | false | 405 | py | def test1():
print("---test1-1---")
print(num)
print("---test1-2---")
def test2():
print("---test2-1---")
test1()
print("---test2-2---")
def test3():
try:
print("---test3-1---")
test1()
print("---test3-2---")
except Exception as result:
print("捕获到了异常,信息是:%s"%result)
test3()
print("---华丽的分割线---")
test2() | [
"cdtaogang@163.com"
] | cdtaogang@163.com |
a29d2ab349ac923e658f543633ecbc7e9198ccb7 | df8c8fe6349cb649f941835944c4cc69beb9ae00 | /chapter 6/bookmarks/actions/models.py | 9d7ff1981b98a1c63cfc0d4560097f3707786e2c | [] | no_license | ravikings/myshop | b1ea8605158939be9bcca6f1c3db97324149b135 | f56c2573dcef15dfe95c6aa271c71125a12cbae8 | refs/heads/master | 2023-01-19T09:00:46.891087 | 2020-11-23T23:01:57 | 2020-11-23T23:01:57 | 315,460,988 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,086 | py | from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey
# Create your models here.
class Action(models.Model):
user = models.ForeignKey('auth.User',
related_name='actions',
db_index=True,
on_delete=models.CASCADE)
verb = models.CharField(max_length=255)
target_ct = models.ForeignKey(ContentType,
blank=True,
null=True,
related_name='target_obj',
on_delete=models.CASCADE)
target_id = models.PositiveIntegerField(null=True,
blank=True,
db_index=True)
target = GenericForeignKey('target_ct', 'target_id')
created = models.DateTimeField(auto_now_add=True,
db_index=True)
class Meta:
ordering = ('-created',) | [
"39674414+ravikings@users.noreply.github.com"
] | 39674414+ravikings@users.noreply.github.com |
dc5bd990874448e61a81eef868a5fb3c6c5ba6db | 600ef5fed15109bd894d3e8c7fe7956bd9278594 | /SeleniumWebDriverTests/test_edit_allele.py | a3d661a49e8e07422d73b983e9c0ec205aefaade | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | phac-nml/ngstar | f7bc702e6a311bf81a0391776e808ba9ac70b949 | 34f9c8d4d40fa0e9836071fabab19114f0ac399a | refs/heads/master | 2021-01-09T05:27:09.285010 | 2017-02-02T21:54:11 | 2017-02-02T21:54:11 | 80,771,682 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 54,354 | py | import constants
import unittest
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support.wait import WebDriverWait
CLASS_NAME = "TestCaseEditAllele"
class TestCaseEditAllele(unittest.TestCase):
DRIVER=None
@classmethod
def setUpClass(cls):
cls.PASSWORD_TEXTBOX_ID = "password"
cls.ADD_ALLELE_TYPE_TEXTBOX_ID = "allele_type"
cls.ADD_ALLELE_SEQ_TEXTBOX_ID = "allele_sequence"
cls.ADD_ALLELE_SUCCESS_MSG = "Allele submitted successfully!"
cls.ALLELE_LIST_ALERT_ID = "allele_list_alert"
cls.ALLELE_QUERY_SUBMIT_BTN_CSS_SEL = "button[type='submit'][value='allele']"
cls.ALLELE_QUERY_TEXTBOX_IDS = {"penA": "seq0",
"mtrR": "seq1",
"porB": "seq2",
"ponA": "seq3",
"gyrA": "seq4",
"parC": "seq5",
"23S" : "seq6"}
cls.ALLELE_SEQ_EXISTS = "The sequence you have submitted already exists for the loci"
cls.ALLELE_TYPE_CHAR_ERROR = "Please enter a valid type. This field can only contain numbers and decimals"
cls.ALLELE_TYPE_DUPLICATE ="Please enter a different allele type."
cls.ALLELE_TYPE_EMPTY = "Please enter an allele type"
cls.ALERT_ID = "errorModal"
cls.DELETE_ALLELE_BTN_CSS_SEL = "delete-allele"
cls.DELETE_ALLELE_ALERT_BTN_ID = "delete-ok"
cls.DELETE_ALLELE_SUCCESS_MSG = "Allele deleted successfully!"
cls.EDIT_ALLELE_BTN_CSS_SEL = "button[type='button'][name='option'][value='edit']"
cls.HELP_BLOCK_NAME = "help-block"
cls.SEQUENCE_CHAR_ERROR ="Please enter a valid sequence. This field can only contain the following letters [ A , T , C , G ]"
cls.SEQUENCE_DUPLICATE ="The sequence you have submitted already exists"
cls.SEQUENCE_EMPTY = "Please enter a sequence"
cls.SIGN_IN_ALERT_ID = "sign_in_alert"
cls.SIGN_IN_BTN_NAME = "Sign In"
cls.SIGN_IN_SUCCESS_VAL = "You have successfully signed in!"
cls.SIGN_OUT_BTN_NAME = "Sign Out"
cls.SUBMIT_BTN_CSS_SEL = "button[type='submit']"
cls.SUBMIT_BTN_ID = "submit"
cls.USERNAME_TEXTBOX_ID = "username"
cls.loci_names = ["penA",
"mtrR",
"porB",
"ponA",
"gyrA",
"parC",
"23S"]
cls.allele_types = {"penA": "0.000",
"mtrR": "1",
"porB": "2",
"ponA": "3",
"gyrA": "4",
"parC": "5",
"23S" : "6"}
cls.allele_types_additional = {"penA": "2.000",
"mtrR": "3",
"porB": "4",
"ponA": "5",
"gyrA": "6",
"parC": "7",
"23S" : "8"}
cls.allele_types_edited = {"penA": "1.000",
"mtrR": "2",
"porB": "3",
"ponA": "4",
"gyrA": "5",
"parC": "6",
"23S" : "7"}
cls.allele_types_radio_btns = {"penA": "0.000",
"mtrR": "1",
"porB": "2",
"ponA": "3",
"gyrA": "4",
"parC": "5",
"23S" : "6"}
cls.allele_types_clear_db = {"penA": "0.000",
"mtrR": "1",
"porB": "2",
"ponA": "3",
"gyrA": "4",
"parC": "5",
"23S" : "6"}
cls.allele_types_clear_db_edited = {"penA": "1.000",
"mtrR": "2",
"porB": "3",
"ponA": "4",
"gyrA": "5",
"parC": "6",
"23S" : "7"}
cls.allele_types_clear_db_additional = {"penA": "2.000",
"mtrR": "3",
"porB": "4",
"ponA": "5",
"gyrA": "6",
"parC": "7",
"23S" : "8"}
cls.negative_allele_types = {"penA": "0.000",
"mtrR": "1",
"porB": "2",
"ponA": "3",
"gyrA": "4",
"parC": "5",
"23S" : "6"}
cls.invalid_allele_types = {"penA": "0.00a",
"mtrR": "$1",
"porB": "2&",
"ponA": "#",
"gyrA": "*",
"parC": "!",
"23S" : "%#@"}
cls.empty_allele_types = {"penA": "",
"mtrR": "",
"porB": "",
"ponA": "",
"gyrA": "",
"parC": "",
"23S" : ""}
cls.sequences = {
"penA": "ATGTTGATTAAAAGCGAATATAAGCCCCGGATGCTGCCCAAAGAAGAGCAGGTCAAAAAGCCGATGACCAGTAACGGACGGATTAGCTTCGTCCTGATGGCAATGGCGGTCTTGTTTGCCTGTCTGATTGCCCGCGGGCTGTATCTGCAGACGGTAACGTATAACTTTTTGAAAGAACAGGGCGACAACCGGATTGTGCGGACTCAAGCATTGCCGGCTACACGCGGTACGGTTTCGGACCGGAACGGTGCGGTTTTGGCGTTGAGCGCGCCGACGGAGTCCCTGTTTGCCGTGCCTAAAGATATGAAGGAAATGCCGTCTGCCGCCCAATTGGAACGCCTGTCCGAGCTTGTCGATGTGCCGGTCGATGTTTTGAGGAACAAACTCGAACAGAAAGGCAAGTCGTTTATTTGGATCAAGCGGCAGCTCGATCCCAAGGTTGCCGAAGAGGTCAAAGCCTTGGGTTTGGAAAACTTTGTATTTGAAAAAGAATTAAAACGCCATTACCCGATGGGCAACCTGTTTGCACACGTCATCGGATTTACCGATATTGACGGCAAAGGTCAGGAAGGTTTGGAACTTTCGCTTGAAGACAGCCTGTATGGCGAAGACGGCGCGGAAGTTGTTTTGCGGGACCGGCAGGGCAATATTGTGGACAGCTTGGACTCCCCGCGCAATAAAGCACCGCAAAACGGCAAAGACATCATCCTTTCCCTCGATCAGAGGATTCAGACCTTGGCCTATGAAGAGTTGAACAAGGCGGTCGAATACCATCAGGCAAAAGCCGGAACGGTGGTGGTTTTGGATGCCCGCACGGGGGAAATCCTCGCCTTGGCCAATACGCCCGCCTACGATCCCAACAGACCCGGCCGGGCAGACAGCGAACAGCGGCGCAACCGTGCCGTAACCGATATGATCGAACCTGGTTCGGCAATCAAACCGTTCGTGATTGCGAAGGCATTGGATGCGGGCAAAACCGATTTGAACGAACGGCTGAATACGCAGCCTTATAAAATCGGACCGTCTCCCGTGCGCGATACCCATGTTTACCCCTCTTTGGATGTGCGCGGCATTATGCAGAAATCGTCCAACGTCGGCACAAGCAAACTGTCTGCGCGTTTCGGCGCCGAAGAAATGTATGACTTCTATCATGAATTGGGCATCGGTGTGCGTATGCACTCGGGCTTTCCGGGGGAAACTGCAGGTTTGTTGAGAAATTGGCGCAGGTGGCGGCCCATCGAACAGGCGACGATGTCTTTCGGTTACGGTCTGCAATTGAGCCTGCTGCAATTGGCGCGCGCCTATACCGCACTGACGCACGACGGCGTTTTGCTGCCGCTCAGCTTTGAGAAGCAGGCGGTTGCGCCGCAAGGCAAACGCATATTCAAAGAATCGACCGCGCGCGAGGTACGCAATCTGATGGTTTCCGTAACCGAGCCGGGCGGCACCGGTACGGCGGGTGCGGTGGACGGTTTCGATGTCGGCGCTAAAACCGGCACGGCGCGCAAGTTCGTCAACGGGCGTTATGCCGACAACAAACACGTCGCTACCTTTATCGGTTTTGCCCCCGCCAAAAACCCCCGTGTGATTGTGGCGGTAACCATCGACGAACCGACTGCCCACGGCTATTACGGCGGCGTAGTGGCAGGGCCGCCCTTCAAAAAAATTATGGGCGGCAGCCTGAACATCTTGGGCATTTCCCCGACCAAGCCACTGACCGCCGCAGCCGTCAAAACACCGTCTTAA",
"mtrR": "TTGCACGGATAAAAAGTCTTTTTTTATAACCCGCCCTCGTCAAACCGACCCGAAACGAAAACGCCATTATGAGAAAAACCAAAACCGAAGCCTTGAAAACCAAAGAACACCTGATGCTTGCCGCCTTGGAAACCTTTTACCGCAAAGGGATTGCCCGCACCTCGCTCAACGAAATCGCCCAAGCCGCCGGCGTAACGCGCGGCGCGCTTTATTGGCATTTCAAAAATAAGGAAGACTTGTTCGACGCGCTGTTCCAACGTATCTGCGACGACATCGAAAACTGCATCGCGCAAGATGCCGCAGATGCCGAAGGAGGGTCTTGGGCGGTATTCCGCCACACGCTGCTGCACTTTTTCGAGCGGCTGCAAAGCAACGACATCTACTACAAATTCCACAACATCCTGTTTTTAAAATGCGAACACACGGAGCAAAACGCCGCCGTTATCGCCATTGCCCGCAAGCATCAGGCAATCTGGCGCGAGAAAATTACCGCCGTTTTGACCGAAGCGGTGGAAAATCAGGATTTGGCTGACGATTTGGACAAGGAAACGGCGGTCATCTTCATCAAATCGACGTTGGACGGGCTGATTTGGCGTTGGTTCTCTTCCGGCGAAAGTTTCGATTTGGGCAAAACCGCCCCCCGCGCATCATCGGGATAATGATGGACAACTTGGAAAACCATCCCTGCCTGCGCCGGAAATAA",
"porB": "AAAAACACCGGCGCCAACGTCAATGCTTGG",
"ponA": "AAAAACAACGGCGGGCGTTGGGCGGTGGTTCAAGAGCCGTTGCTGCAGGGGGCTTTGGTTTCGCTGGATGCAAAA",
"gyrA": "ctgtacgcgatgcacgagctgaaaaataactggaatgccgcctacaaaaaatcggcgcgcatcgtcggcgacgtcatcggtaaataccacccccacggcgattccgcagtttacgacaccatcgtccgtatggcgtaaaatttcgctatgcgttatgtgctgatagacggacagggcaacttcggatcggtggacgggcttgccgccgcagccatgcgctataccgaaatccgcatggcgaaaatctcacatgaaatgctggca",
"parC": "GTTTCAGACGGCCAAAAGCCCGTGCAGCGGCGCATTTTGTTTGCCATGCGCGATATGGGTTTGACGGCGGGGGCGAAGCCGGTGAAATCCGCGCGCGTGGTCGGCGAGATTTTGGGTAAATACCATCCGCACGGCGACAGTTCCGCCTATGAGGCGATGGTGCGCATGGCTCAGGATTTTACCTTGCGCTACCCCTTAATCGACGGCATCGGCAACTTCGGTTCGCGCGACGGCGACGGGGCGGCGGCGATGCGTTACACCGAAGCGCGGCTGACGCCGATTGCGGAATTGCTGTTGTCCGAAATCAATCAGGGGACGGTGGATTTTATGCC",
"23S" : "TAGACGGAAAGACCCCGTGAACCTTTACTGTAGCTTTGCATTGGACTTTGAAGTCACTTGTGTAGGATAGGTGGGAGGCTTGGAAGCAGAGACGCCAGTCTCTGTGGAGTCGTCCTTGAAATACCACCCTGGTGTCTTTGAGGTTCTAACCCAGACCCGTCATCCGGGTCGGGGACCGTGCATGGTAGGCAGTTTGACTGGGGCGGTCTCCTCCCAAAGCGTAACGGAGGAGTTCGAAGGTTACCTAGGTCCGGTCGGAAATCGGACTGATAGTGCAATGGCAAAAGGTAGCTTAACTGCGAGACCGACAAGTCGGGCAGGTGCGAAAGCAGGACATAGTGATCCGGTGGTTCTGTATGGAAGGGCCATCGCTCAACGGATAAAAGGTACTCCGGGGATAACAGGCTGATTCCGCCCAAGAGTTCATATCGACGGCGGAGTTTGGCACCTCGATGTCGGCTCATCACATCCTGGGGCTGTAGTCGGTCCCAAGGGTATGGCTGTTCGCCATTTAAAGTGGTACGTGAGCTGGGTTTAAAACGTCGTGAGACAGTTTGGTCCCTATCT",
}
cls.sequences_additional = {
"penA": "ATGTTGATTAAAAGCGAATATAAGCCCCGGATGCTGCCCAAAGAAGAGCAGGTCAAAAAGCCGATGACCAGTAACGGACGGATTAGCTTCGTCCTGATGGCAATGGCGGTCTTGTTTGCCTGTCTGATTGCCCGCGGGCTGTATCTGCAGACGGTAACGTATAACTTTTTGAAAGAACAGGGCGACAACCGGATTGTGCGGACTCAAGCATTGCCGGCTACACGCGGTACGGTTTCGGACCGGAACGGTGCGGTTTTGGCGTTGAGCGCGCCGACGGAGTCCCTGTTTGCCGTGCCTAAAGATATGAAGGAAATGCCGTCTGCCGCCCAATTGGAACGCCTGTCCGAGCTTGTCGATGTGCCGGTCGATGTTTTGAGGAACAAACTCGAACAGAAAGGCAAGTCGTTTATTTGGATCAAGCGGCAGCTCGATCCCAAGGTTGCCGAAGAGGTCAAAGCCTTGGGTTTGGAAAACTTTGTATTTGAAAAAGAATTAAAACGCCATTACCCGATGGGCAACCTGTTTGCACACGTCATCGGATTTACCGATATTGACGGCAAAGGTCAGGAAGGTTTGGAACTTTCGCTTGAAGACAGCCTGTATGGCGAAGACGGCGCGGAAGTTGTTTTGCGGGACCGGCAGGGCAATATTGTGGACAGCTTGGACTCCCCGCGCAATAAAGCACCGCAAAACGGCAAAGACATCATCCTTTCCCTCGATCAGAGGATTCAGACCTTGGCCTATGAAGAGTTGAACAAGGCGGTCGAATACCATCAGGCAAAAGCCGGAACGGTGGTGGTTTTGGATGCCCGCACGGGGGAAATCCTCGCCTTGGCCAATACGCCCGCCTACGATCCCAACAGACCCGGCCGGGCAGACAGCGAACAGCGGCGCAACCGTGCCGTAACCGATATGATCGAACCTGGTTCGGCAATCAAACCGTTCGTGATTGCGAAGGCATTGGATGCGGGCAAAACCGATTTGAACGAACGGCTGAATACGCAGCCTTATAAAATCGGACCGTCTCCCGTGCGCGACGATACCCATGTTTACCCCTCTTTGGATGTGCGCGGCATTATGCAGAAATCGTCCAACGTCGGCACAAGCAAACTGTCTGCGCGTTTCGGCGCCGAAGAAATGTATGACTTCTATCATGAATTGGGCATCGGTGTGCGTATGCACTCGGGCTTTCCGGGGGAAACTGCAGGTTTGTTGAGAAATTGGCGCAGGTGGCGGCCCATCGAACAGGCGACGATGTCTTTCGGTTACGGCCTGCAATTGAGCCTGCTGCAATTGGCGCGCGCCTATACCGCACTGACGCACGACGGCGTTTTGCTGCCGCTCAGCTTTGAGAAGCAGGCGGTTGCGCCGCAAGGCAAACGCATATTCAAAGAATCGACCGCGCGCGAGGTACGCAATCTGATGGTTTCCGTAACCGAGCCGGGCGGCACCGGTACGGCGGGTGCGGTGGACGGTTTCGATGTCGGCGCTAAAACCGGCACGGCGCGCAAGTTCGTCAACGGGCGTTATGCCGACAACAAACACGTCGCTACCTTTATCGGTTTTGCCCCCGCCAAAAACCCCCGTGTGATTGTGGCGGTAACCATTGACGAACCGACTGCCCACGGCTATTACGGCGGCGTAGTGGCAGGGCCGCCCTTCAAAAAAATTATGGGCGGCAGCCTGAACATCTTGGGCATTTCCCCGACCAAGCCACTGACCGCCGCAGCCGTCAAAACACCGTCTTAA",
"mtrR": "TTGCACGGATAAAAAGTCTTTTTTATAATCCGCCCTCGTCAAACCGACCCGAAACGAAAACGCCATTATGAGAAAAACCAAAACCGAAGCCTTGAAAACCAAAGAACACCTGATGCTTGCCGCCTTGGAAACCTTTTACCGCAAAGGGATTGCCCGCACCTCGCTCAACGAAATCGCCCAAGCCGCCGGCGTAACGCGCGACGCGCTCTATTGGCATTTCAAAAATAAGGAAGACTTGTTTGACGCGTTGTTCCAACGTATCTGCGACGACATCGAAAACTGCATCGCGCAAGATGCCGCAGATGCCGAAGGAGGTTCTTGGACGGTATTCCGCCACACGCTGCTGCACTTTTTCGAGCGGCTGCAAAGCAACGACATCCACTACAAATTCCACAACATCCTGTTTTTAAAGTGCGAACATACGGAACAAAACGCCGCCGTTATCGCCATTGCCCGCAAGCATCAGGCAATCTGGCGCGAGAAAATTACCGCCGTTTTGACCGAAGCGGTGGAAAATCAGGATTTGGCTGACGATTTGGACAAGGAAACGGCGGTCATCTTCATCAAATCGACGTTGGACGGGCTGATTTGGCGTTGGTTCTCTTCCGGCGAAAGTTTCGATTTGGGCAAAACCGCCCCGCGCATCATCGGGATAATGATGGACAACTTGGAAAACCATCCCTGCCTGCGCCGGAAATAA",
"porB": "AAAAACACCGACGACAACGTCAATGCTTGG",
"ponA": "AAAAACAACGGCGGGCGTTGGGCGGTGGTTCAAGGGCCGTTGCCGCAGGGGGCTTTGGTTTCGCTGGATGCAAAA",
"gyrA": "CTGTACGCGATGCACGAGCTGAAAAATAACTGGAATGCCGCCTACAAAAAATCGGCGCGCATCGTCGGCGACGTCATCGGTAAATACCACCCCCACGGCGATTTCGCAGTTTACGCCACCATCGTCCGTATGGCGCAAAATTTCGCTATGCGTTATGTGCTGATAGACGGACAGGGCAACTTCGGATCGGTGGACGGGCTTGCCGCCGCAGCCATGCGCTATACCGAAATCCGCATGGCGAAAATCTCACATGAAATGCTGGCA",
"parC": "GTTTCAGACGGCCAAAAGCCCGTGCAGCGGCGCATTTTGTTTGCCATGCGCGATATGGGTTTGACGGCGGGGGCGAAGCCGGTGAAATCGGCGCGCGTGGTCGGCGAGATTTTGGGTAAATACCATCCGCACGGCAACAGTTCCGCCTATGAGGCGATGGTGCGCATGGCTCAGGATTTTACCTTGCGCTATCCCTTAATCGACGGCATCGGCAACTTCGGTTCGCGCGACGGCGACGGGGCGGCGGCGATGCGTTACACCGAAGCGCGGCTCACGCCGATTGCGGAATTGCTGTTGTCCGAAATCAATCAGGGGACGGTGGATTTTATGCC",
"23S" : "TAGACGGAGAGACCCCGTGAACCTTTACTGTAGCTTTGCATTGGACTTTGAAGTCACTTGTGTAGGATAGGTGGGAGGCTTGGAAGCAGAGACGCCAGTCTCTGTGGAGTCGTCCTTGAAATACCACCCTGGTGTCTTTGAGGTTCTAACCCAGACCCGTCATCCGGGTCGGGGACCGTGCATGGTAGGCAGTTTGACTGGGGCGGTCTCCTCCCAAAGCGTAACGGAGGAGTTCGAAGGTTACCTAGGTCCGGTCGGAAATCGGACTGATAGTGCAATGGCAAAAGGTAGCTTAACTGCGAGACCGACAAGTCGGGCAGGTGCGAAAGCAGGACATAGTGATCCGGTGGTTCTGTATGGAAGGGCCATCGCTCAACGGATAAAAGGTACTCCGGGGATAACAGGCTGATTCCGCCCAAGAGTTCATATCGACGGCGGAGTTTGGCACCTCGATGTCGGCTCATCACATCCTGGGGCTGTAGTCGGTCCCAAGGGTATGGCTGTTCGCCATTTAAAGTGGTACGTGAGCTGGGTTTAAAACGTCGTGAGACAGTTTGGTCCCTATCT",
}
cls.edited_positive_sequences = {
"penA": "ATGTTGATTAAAAGCGAATATAAGCCCCGGATGCTGCCCAAAGAAGAGCAGGTCAAAAAGCCGATGACCAGTAACGGACGGATTAGCTTCGTCCTGATGGCAATGGCGGTCTTGTTTGCCTGTCTGATTGCCCGCGGGCTGTATCTGCAGACGGTAACGTATAACTTTTTGAAAGAACAGGGCGACAACCGGATTGTGCGGACTCAAGCATTGCCGGCTACACGCGGTACGGTTTCGGACCGGAACGGTGCGGTTTTGGCGTTGAGCGCGCCGACGGAGTCCCTGTTTGCCGTGCCTAAAGATATGAAGGAAATGCCGTCTGCCGCCCAATTGGAACGCCTGTCCGAGCTTGTCGATGTGCCGGTCGATGTTTTGAGGAACAAACTCGAACAGAAAGGCAAGTCGTTTATTTGGATCAAGCGGCAGCTCGATCCCAAGGTTGCCGAAGAGGTCAAAGCCTTGGGTTTGGAAAACTTTGTATTTGAAAAAGAATTAAAACGCCATTACCCGATGGGCAACCTGTTTGCACACGTCATCGGATTTACCGATATTGACGGCAAAGGTCAGGAAGGTTTGGAACTTTCGCTTGAAGACAGCCTGTATGGCGAAGACGGCGCGGAAGTTGTTTTGCGGGACCGGCAGGGCAATATTGTGGACAGCTTGGACTCCCCGCGCAATAAAGCACCGCAAAACGGCAAAGACATCATCCTTTCCCTCGATCAGAGGATTCAGACCTTGGCCTATGAAGAGTTGAACAAGGCGGTCGAATACCATCAGGCAAAAGCCGGAACGGTGGTGGTTTTGGATGCCCGCACGGGGGAAATCCTCGCCTTGGCCAATACGCCCGCCTACGATCCCAACAGACCCGGCCGGGCAGACAGCGAACAGCGGCGCAACCGTGCCGTAACCGATATGATCGAACCTGGTTCGGCAATCAAACCGTTCGTGATTGCGAAGGCATTGGATGCGGGCAAAACCGATTTGAACGAACGGCTGAATACGCAGCCTTATAAAATCGGACCGTCTCCCGTGCGCGATGATACCCATGTTTACCCCTCTTTGGATGTGCGCGGCATTATGCAGAAATCGTCCAACGTCGGCACAAGCAAACTGTCTGCGCGTTTCGGCGCCGAAGAAATGTATGACTTCTATCATGAATTGGGCATCGGTGTGCGTATGCACTCGGGCTTTCCGGGGGAAACTGCAGGTTTGTTGAGAAATTGGCGCAGGTGGCGGCCCATCGAACAGGCGACGATGTCTTTCGGTTACGGTCTGCAATTGAGCCTGCTGCAATTGGCGCGCGCCTATACCGCACTGACGCACGACGGCGTTTTGCTGCCGCTCAGCTTTGAGAAGCAGGCGGTTGCGCCGCAAGGCAAACGCATATTCAAAGAATCGACCGCGCGCGAGGTACGCAATCTGATGGTTTCCGTAACCGAGCCGGGCGGCACCGGTACGGCGGGTGCGGTGGACGGTTTCGATGTCGGCGCTAAAACCGGCACGGCGCGCAAGTTCGTCAACGGGCGTTATGCCGACAACAAACACGTCGCTACCTTTATCGGTTTTGCCCCCGCCAAAAACCCCCGTGTGATTGTGGCGGTAACCATCGACGAACCGACTGCCCACGGCTATTACGGCGGCGTAGTGGCAGGGCCGCCCTTCAAAAAAATTATGGGCGGCAGCCTGAACATCTTGGGCATTTCCCCGACCAAGCCACTGACCGCCGCAGCCGTCAAAACACCGTCTTAA",
"mtrR": "TTGCACGGATAAAAAGTCTTTTTTTATAATCCGCCCTCGTCAAACCGACCCGAAACGAAAACGCCATTATGAGAAAAACCAAAACCGAAGCCTTGAAAACCAAAGAACACCTGATGCTTGCCGCCTTGGAAACCTTTTACCGCAAAGGGATTGCCCGCACCTCGCTCAACGAAATCGCCCAAGCCGCCGGCGTAACGCGCGGCGCGCTTTATTGGCATTTCAAAAATAAGGAAGACTTGTTCGACGCGCTGTTCCAACGTATCTGCGACGACATCGAAAACTGCATCGCGCAAGATGCCGCAGATGCCGAAGGAGGGTCTTGGGCGGTATTCCGCCACACGCTGCTGCACTTTTTCGAGCGGCTGCAAAGCAACGACATCTACTACAAATTCCACAACATCCTGTTTTTAAAATGCGAACACACGGAGCAAAACGCCGCCGTTATCGCCATTGCCCGCAAGCATCAGGCAATCTGGCGCGAGAAAATTACCGCCGTTTTGACCGAAGCGGTGGAAAATCAGGATTTGGCTGACGATTTGGACAAGGAAACGGCGGTCATCTTCATCAAATCGACGTTGGACGGGCTGATTTGGCGTTGGTTCTCTTCCGGCGAAAGTTTCGATTTGGGCAAAACCGCCCCCCGCGCATCATCGGGATAATGATGGACAACTTGGAAAACCATCCCTGCCTGCGCCGGAAATAA",
"porB": "AAAGACACCGGCGGCTTCAATCCTTGGGAG",
"ponA": "AAAAACAACGGCGGGCGTTGGGCGGGGGTTCAAGAGCCGTTGCTGCAGGGGGCTTTGGTTTCGCTGGATGCAAAA",
"gyrA": "ctgtacgcgatgcacgagctgaaaaataactggaatgccgcctacaaaaaatcggcgcgcatcgtcggcgacgtcatcggtaaataccacccccacggcgattccgcagtttacgacaccatcgtccgtatggcgcaaaatttcgctatgcgttatgtgctgatagacggacagggcaacttcggatcggtggacgggcttgccgccgcagccatgcgctataccgaaatccgcatggcgaaaatctcacatgaaatgctggca",
"parC": "GTTTCAGACGGCCAAAAGCCCGTGCAGCGGCGCATTTTGTTTGCCATGCGCGATATGGGTTTGACGGCGGGGGCGAAGCCGGTGAAATCGGCGCGCGTGGTCGGCGAGATTTTGGGTAAATACCATCCGCACGGCGACAGTTCCGCCTATGAGGCGATGGTGCGCATGGCTCAGGATTTTACCTTGCGCTACCCCTTAATCGACGGCATCGGCAACTTCGGTTCGCGCGACGGCGACGGGGCGGCGGCGATGCGTTACACCGAAGCGCGGCTGACGCCGATTGCGGAATTGCTGTTGTCCGAAATCAATCAGGGGACGGTGGATTTTATGCC",
"23S" : "TAGACGGAAAGACCCCGTGAACCTTTACTGTAGCTTTGCATTGGACTTTGAAGTCACTTGTGTAGGATAGGTGGGAGGCTTGGAAGCAGAGACGCCAGTCTCTGTGGAGTCGTCCTTGAAATACCACCCTGGTGTCTTTGAGGTTCTAACCCAGACCCGTCATCCGGGTCGGGGACCGTGCATGGTAGGCAGTTTGACTGGGGCGGTCTCCTCCCAAAGCGTAACGGAGGAGTTCGAAGGTTACCTAGGTCCGGTCGGAAATCGGACTGATAGTGCAATGGCAAAAGGTAGCTTAACTGCGAGACCGACAAGTCGGGCAGGTGCGAAAGCAGGACATAGTGATCCGGTGGTTCTGTATGGAAGGGCCATCGCTCAACGGATAAAAGGTACTCCGGGGATAACAGGCTGATTCCGCCCAAGAGTTCATATCGACGGCGGAGTTTGGCACCTCGATGTCGGCTCATCACATCCTGGGGCTGTAGTCGGTCCCAAGGGTATGGCTGTTCGCCATTTAAAGTGGTACGTGAGCTGGGTTTAAAACGTCGTGAGACAGTTTGGTCTCTATCT",
}
cls.negative_sequences = {
"penA": "ATGTTGATTAAAAGCGAATATAAGCCCCGGATGCTGCCCAAAGAAGAGCAGGTCAAAAAGCCGATGACCAGTAACGGACGGATTAGCTTCGTCCTGATGGCAATGGCGGTCTTGTTTGCCTGTCTGATTGCCCGCGGGCTGTATCTGCAGACGGTAACGTATAACTTTTTGAAAGAACAGGGCGACAACCGGATTGTGCGGACTCAAGCATTGCCGGCTACACGCGGTACGGTTTCGGACCGGAACGGTGCGGTTTTGGCGTTGAGCGCGCCGACGGAGTCCCTGTTTGCCGTGCCTAAAGATATGAAGGAAATGCCGTCTGCCGCCCAATTGGAACGCCTGTCCGAGCTTGTCGATGTGCCGGTCGATGTTTTGAGGAACAAACTCGAACAGAAAGGCAAGTCGTTTATTTGGATCAAGCGGCAGCTCGATCCCAAGGTTGCCGAAGAGGTCAAAGCCTTGGGTTTGGAAAACTTTGTATTTGAAAAAGAATTAAAACGCCATTACCCGATGGGCAACCTGTTTGCACACGTCATCGGATTTACCGATATTGACGGCAAAGGTCAGGAAGGTTTGGAACTTTCGCTTGAAGACAGCCTGTATGGCGAAGACGGCGCGGAAGTTGTTTTGCGGGACCGGCAGGGCAATATTGTGGACAGCTTGGACTCCCCGCGCAATAAAGCACCGCAAAACGGCAAAGACATCATCCTTTCCCTCGATCAGAGGATTCAGACCTTGGCCTATGAAGAGTTGAACAAGGCGGTCGAATACCATCAGGCAAAAGCCGGAACGGTGGTGGTTTTGGATGCCCGCACGGGGGAAATCCTCGCCTTGGCCAATACGCCCGCCTACGATCCCAACAGACCCGGCCGGGCAGACAGCGAACAGCGGCGCAACCGTGCCGTAACCGATATGATCGAACCTGGTTCGGCAATCAAACCGTTCGTGATTGCGAAGGCATTGGATGCGGGCAAAACCGATTTGAACGAACGGCTGAATACGCAGCCTTATAAAATCGGACCGTCTCCCGTGCGCGATACCCATGTTTACCCCTCTTTGGATGTGCGCGGCATTATGCAGAAATCGTCCAACGTCGGCACAAGCAAACTGTCTGCGCGTTTCGGCGCCGAAGAAATGTATGACTTCTATCATGAATTGGGCATCGGTGTGCGTATGCACTCGGGCTTTCCGGGGGAAACTGCAGGTTTGTTGAGAAATTGGCGCAGGTGGCGGCCCATCGAACAGGCGACGATGTCTTTCGGTTACGGTCTGCAATTGAGCCTGCTGCAATTGGCGCGCGCCTATACCGCACTGACGCACGACGGCGTTTTGCTGCCGCTCAGCTTTGAGAAGCAGGCGGTTGCGCCGCAAGGCAAACGCATATTCAAAGAATCGACCGCGCGCGAGGTACGCAATCTGATGGTTTCCGTAACCGAGCCGGGCGGCACCGGTACGGCGGGTGCGGTGGACGGTTTCGATGTCGGCGCTAAAACCGGCACGGCGCGCAAGTTCGTCAACGGGCGTTATGCCGACAACAAACACGTCGCTACCTTTATCGGTTTTGCCCCCGCCAAAAACCCCCGTGTGATTGTGGCGGTAACCATCGACGAACCGACTGCCCACGGCTATTACGGCGGCGTAGTGGCAGGGCCGCCCTTCAAAAAAATTATGGGCGGCAGCCTGAACATCTTGGGCATTTCCCCGACCAAGCCACTGACCGCCGCAGCCGTCAAAACACCGTCTTAA",
"mtrR": "TTGCACGGATAAAAAGTCTTTTTTTATAATCCGCCCTCGTCAAACCGACCCGAAACGAAAACGCCATTATGAGAAAAACCAAAACCGAAGCCTTGAAAACCAAAGAACACCTGATGCTTGCCGCCTTGGAAACCTTTTACCGCAAAGGGATTGCCCGCACCTCGCTCAACGAAATCGCCCAAGCCGCCGGCGTAACGCGCGGCGCGCTTTATTGGCATTTCAAAAATAAGGAAGACTTGTTCGACGCGCTGTTCCAACGTATCTGCGACGACATCGAAAACTGCATCGCGCAAGATGCCGCAGATGCCGAAGGAGGGTCTTGGGCGGTATTCCGCCACACGCTGCTGCACTTTTTCGAGCGGCTGCAAAGCAACGACATCTACTACAAATTCCACAACATCCTGTTTTTAAAATGCGAACACACGGAGCAAAACGCCGCCGTTATCGCCATTGCCCGCAAGCATCAGGCAATCTGGCGCGAGAAAATTACCGCCGTTTTGACCGAAGCGGTGGAAAATCAGGATTTGGCTGACGATTTGGACAAGGAAACGGCGGTCATCTTCATCAAATCGACGTTGGACGGGCTGATTTGGCGTTGGTTCTCTTCCGGCGAAAGTTTCGATTTGGGCAAAACCGCCCCCCGCGCATCATCGGGATAATGATGGACAACTTGGAAAACCATCCCTGCCTGCGCCGGAAATAA",
"porB": "AAAAACACCGGCGCCAACGTCAATGCTTGG",
"ponA": "AAAAACAACGGCGGGCGTTGGGCGGTGGTTCAAGAGCCGTTGCTGCAGGGGGCTTTGGTTTCGCTGGATGCAAAA",
"gyrA": "ctgtacgcgatgcacgagctgaaaaataactggaatgccgcctacaaaaaatcggcgcgcatcgtcggcgacgtcatcggtaaataccacccccacggcgattccgcagtttacgacaccatcgtccgtatggcgcaaaatttcgctatgcgttatgtgctgatagacggacagggcaacttcggatcggtggacgggcttgccgccgcagccatgcgctataccgaaatccgcatggcgaaaatctcacatgaaatgctggca",
"parC": "GTTTCAGACGGCCAAAAGCCCGTGCAGCGGCGCATTTTGTTTGCCATGCGCGATATGGGTTTGACGGCGGGGGCGAAGCCGGTGAAATCGGCGCGCGTGGTCGGCGAGATTTTGGGTAAATACCATCCGCACGGCGACAGTTCCGCCTATGAGGCGATGGTGCGCATGGCTCAGGATTTTACCTTGCGCTACCCCTTAATCGACGGCATCGGCAACTTCGGTTCGCGCGACGGCGACGGGGCGGCGGCGATGCGTTACACCGAAGCGCGGCTGACGCCGATTGCGGAATTGCTGTTGTCCGAAATCAATCAGGGGACGGTGGATTTTATGCC",
"23S" : "TAGACGGAAAGACCCCGTGAACCTTTACTGTAGCTTTGCATTGGACTTTGAAGTCACTTGTGTAGGATAGGTGGGAGGCTTGGAAGCAGAGACGCCAGTCTCTGTGGAGTCGTCCTTGAAATACCACCCTGGTGTCTTTGAGGTTCTAACCCAGACCCGTCATCCGGGTCGGGGACCGTGCATGGTAGGCAGTTTGACTGGGGCGGTCTCCTCCCAAAGCGTAACGGAGGAGTTCGAAGGTTACCTAGGTCCGGTCGGAAATCGGACTGATAGTGCAATGGCAAAAGGTAGCTTAACTGCGAGACCGACAAGTCGGGCAGGTGCGAAAGCAGGACATAGTGATCCGGTGGTTCTGTATGGAAGGGCCATCGCTCAACGGATAAAAGGTACTCCGGGGATAACAGGCTGATTCCGCCCAAGAGTTCATATCGACGGCGGAGTTTGGCACCTCGATGTCGGCTCATCACATCCTGGGGCTGTAGTCGGTCCCAAGGGTATGGCTGTTCGCCATTTAAAGTGGTACGTGAGCTGGGTTTAAAACGTCGTGAGACAGTTTGGTCCCTATCT",
}
cls.invalid_sequences = {
"penA": "BTGTTGATTAAAAGCGAATATAAGCCCCGGATGCTGCCCAAAGAAGAGCAGGTCAAAAAGCCGATGACCAGTAACGGACGGATTAGCTTCGTCCTGATGGCAATGGCGGTCTTGTTTGCCTGTCTGATTGCCCGCGGGCTGTATCTGCAGACGGTAACGTATAACTTTTTGAAAGAACAGGGCGACAACCGGATTGTGCGGACTCAAGCATTGCCGGCTACACGCGGTACGGTTTCGGACCGGAACGGTGCGGTTTTGGCGTTGAGCGCGCCGACGGAGTCCCTGTTTGCCGTGCCTAAAGATATGAAGGAAATGCCGTCTGCCGCCCAATTGGAACGCCTGTCCGAGCTTGTCGATGTGCCGGTCGATGTTTTGAGGAACAAACTCGAACAGAAAGGCAAGTCGTTTATTTGGATCAAGCGGCAGCTCGATCCCAAGGTTGCCGAAGAGGTCAAAGCCTTGGGTTTGGAAAACTTTGTATTTGAAAAAGAATTAAAACGCCATTACCCGATGGGCAACCTGTTTGCACACGTCATCGGATTTACCGATATTGACGGCAAAGGTCAGGAAGGTTTGGAACTTTCGCTTGAAGACAGCCTGTATGGCGAAGACGGCGCGGAAGTTGTTTTGCGGGACCGGCAGGGCAATATTGTGGACAGCTTGGACTCCCCGCGCAATAAAGCACCGCAAAACGGCAAAGACATCATCCTTTCCCTCGATCAGAGGATTCAGACCTTGGCCTATGAAGAGTTGAACAAGGCGGTCGAATACCATCAGGCAAAAGCCGGAACGGTGGTGGTTTTGGATGCCCGCACGGGGGAAATCCTCGCCTTGGCCAATACGCCCGCCTACGATCCCAACAGACCCGGCCGGGCAGACAGCGAACAGCGGCGCAACCGTGCCGTAACCGATATGATCGAACCTGGTTCGGCAATCAAACCGTTCGTGATTGCGAAGGCATTGGATGCGGGCAAAACCGATTTGAACGAACGGCTGAATACGCAGCCTTATAAAATCGGACCGTCTCCCGTGCGCGATGATACCCATGTTTACCCCTCTTTGGATGTGCGCGGCATTATGCAGAAATCGTCCAACGTCGGCACAAGCAAACTGTCTGCGCGTTTCGGCGCCGAAGAAATGTATGACTTCTATCATGAATTGGGCATCGGTGTGCGTATGCACTCGGGCTTTCCGGGGGAAACTGCAGGTTTGTTGAGAAATTGGCGCAGGTGGCGGCCCATCGAACAGGCGACGATGTCTTTCGGTTACGGTCTGCAATTGAGCCTGCTGCAATTGGCGCGCGCCTATACCGCACTGACGCACGACGGCGTTTTGCTGCCGCTCAGCTTTGAGAAGCAGGCGGTTGCGCCGCAAGGCAAACGCATATTCAAAGAATCGACCGCGCGCGAGGTACGCAATCTGATGGTTTCCGTAACCGAGCCGGGCGGCACCGGTACGGCGGGTGCGGTGGACGGTTTCGATGTCGGCGCTAAAACCGGCACGGCGCGCAAGTTCGTCAACGGGCGTTATGCCGACAACAAACACGTCGCTACCTTTATCGGTTTTGCCCCCGCCAAAAACCCCCGTGTGATTGTGGCGGTAACCATCGACGAACCGACTGCCCACGGCTATTACGGCGGCGTAGTGGCAGGGCCGCCCTTCAAAAAAATTATGGGCGGCAGCCTGAACATCTTGGGCATTTCCCCGACCAAGCCACTGACCGCCGCAGCCGTCAAAACACCGTCTTAA",
"mtrR": "TTGCACGGATAAAAAGTCTTTTTTTATAATCCGCCCTCGTCAAACCGACCCGAAACGAARACGCCATTATGAGAAAAACCAAAACCGAAGCCTTGAAAACCAAAGAACACCTGATGCTTGCCGCCTTGGAAACCTTTTACCGCAAAGGGATTGCCCGCACCTCGCTCAACGAAATCGCCCAAGCCGCCGGCGTAACGCGCGGCGCGCTTTATTGGCATTTCAAAAATAAGGAAGACTTGTTCGACGCGCTGTTCCAACGTATCTGCGACGACATCGAAAACTGCATCGCGCAAGATGCCGCAGATGCCGAAGGAGGGTCTTGGGCGGTATTCCGCCACACGCTGCTGCACTTTTTCGAGCGGCTGCAAAGCAACGACATCTACTACAAATTCCACAACATCCTGTTTTTAAAATGCGAACACACGGAGCAAAACGCCGCCGTTATCGCCATTGCCCGCAAGCATCAGGCAATCTGGCGCGAGAAAATTACCGCCGTTTTGACCGAAGCGGTGGAAAATCAGGATTTGGCTGACGATTTGGACAAGGAAACGGCGGTCATCTTCATCAAATCGACGTTGGACGGGCTGATTTGGCGTTGGTTCTCTTCCGGCGAAAGTTTCGATTTGGGCAAAACCGCCCCCCGCGCATCATCGGGATAATGATGGACAACTTGGAAAACCATCCCTGCCTGCGCCGGAAATAA",
"porB": "AAAAACACCGACGACAACGTCAATGCTTGe",
"ponA": "AAAAACAACGGCGGGCGTTGGGCGGTGGTTCAAGYGGCCGTTGCCGCAGGGGGCTTTGGTTTCGCTGGATGCAAAA",
"gyrA": "ctgtacgcgatgcacgagctgaaaaataactggaatgccgcctacaaaaaatcggcgcgcatcgtcggcgacgtcatcggtaaataccacccccacggcgkttccgcagtttacgacaccatcgtccgtatggcgcaaaatttcgctatgcgttatgtgctgatagacggacagggcaacttcggatcggtggacgggcttgccgccgcagccatgcgctataccgaaatccgcatggcgaaaatctcacatgaaatgctggca",
"parC": "GTTTCAGACGGCCAAAAGCCCGTGCAGCGGCGCATTTTGTTTGCCATJCGCGATATGGGTTTGACGGCGGGGGCGAAGCCGGTGAAATCGGCGCGCGTGGTCGGCGAGATTTTGGGTAAATACCATCCGCACGGCGACAGTTCCGCCTATGAGGCGATGGTGCGCATGGCTCAGGATTTTACCTTGCGCTACCCCTTAATCGACGGCATCGGCAACTTCGGTTCGCGCGACGGCGACGGGGCGGCGGCGATGCGTTACACCGAAGCGCGGCTGACGCCGATTGCGGAATTGCTGTTGTCCGAAATCAATCAGGGGACGGTGGATTTTATGCC",
"23S" : "TAGACGGAAAGACCCCGTGAACCTTTACTGTAGCTTTGCATTGGACTTTGAAGTCACTTGTGTAGGATAGGTGGGAGGCTTGGAAGCAGAGACGCCAGTCTCTGTGGAGTCGTCCTTGAAATACCACCCTGGTGTCTTTGAGGTTCTAACCCAGACCCGTCATCCGGGTCGGGGACCGTGCATGGTAGGCAGTTTGACTGGGGCGGTCTCCTCCCAAAGCGTAACGGAGGAGTTCGAAGGTTACCTAGGTCCGGTCGGAAATCGGACTGATAGTGCAATGGCAAAAGGTAGCTTAACTGCGAGACCGACAAGTCGGGCAGGTGCGAAAGCAGGACATAGTGATCCGGTGGTTCTGTATGGAAGGGCCATCGCTCAACGGATAAAAGGTACTCCGGGGATAACAGGCTGATTCCGCCCAAGAGTTCATATCGACGGCGGAGTTTGGCACCTCGATGTCGGCTCATCACATCCTGGGGCTGTAGTCGGTCCCAAGGGTATGGCTGTTCGCCATTTAARAGTGGTACGTGAGCTGGGTTTAAAACGTCGTGAGACAGTTTGGTCCCTATCT",
}
cls.empty_sequences = {
"penA": "",
"mtrR": "",
"porB": "",
"ponA": "",
"gyrA": "",
"parC": "",
"23S" : "",
}
cls.success_msgs = {
"Allele with Allele Type 1.000 Edited Successfully!",
"Allele with Allele Type 2 Edited Successfully!",
"Allele with Allele Type 3 Edited Successfully!",
"Allele with Allele Type 4 Edited Successfully!",
"Allele with Allele Type 5 Edited Successfully!",
"Allele with Allele Type 6 Edited Successfully!",
"Allele with Allele Type 7 Edited Successfully!",
}
cls.seq_exists_msgs = {
"The sequence you have submitted already exists for the loci penA with allele type 2.000.",
"The sequence you have submitted already exists for the loci mtrR with allele type 3.",
"The sequence you have submitted already exists for the loci porB with allele type 4.",
"The sequence you have submitted already exists for the loci ponA with allele type 5.",
"The sequence you have submitted already exists for the loci gyrA with allele type 6.",
"The sequence you have submitted already exists for the loci penA with allele type 7.",
"The sequence you have submitted already exists for the loci penA with allele type 8.",
}
cls.allele_type_exists_msgs = {
"Please enter a different allele type. A sequence with type 0.000 for the loci penA already exists.",
"Please enter a different allele type. A sequence with type 1 for the loci mtrR already exists.",
"Please enter a different allele type. A sequence with type 2 for the loci porB already exists.",
"Please enter a different allele type. A sequence with type 3 for the loci ponA already exists.",
"Please enter a different allele type. A sequence with type 4 for the loci gyrA already exists.",
"Please enter a different allele type. A sequence with type 5 for the loci parC already exists.",
"Please enter a different allele type. A sequence with type 6 for the loci 23S already exists.",
}
if not cls.DRIVER:
if constants.USE_CHROME_DRIVER:
cls.driver = webdriver.Chrome(executable_path=constants.DRIVER_PATH)
else:
cls.driver = webdriver.Firefox()
cls.driver.implicitly_wait(constants.IMPLICIT_WAIT)
cls.driver.set_window_size(1024, 768)
else:
cls.driver = cls.DRIVER
#self.populateDB()
def signIn(self):
METHOD_NAME = "signIn"
driver = self.driver
test_number = 1
test_input = ("test01", "Mypass1!")
username = test_input[0]
password = test_input[1]
cookies = driver.get_cookies()
lang_selected = False
eula_accepted = False
for cookie in cookies:
if cookie['name'] == 'ngstar_eula_acceptance':
eula_accepted = True
if cookie['name'] == 'ngstar_lang_pref':
lang_selected = True
if lang_selected == False:
driver.get(constants.WELCOME_URL)
element = driver.find_element_by_id("btn-en")
element.click()
element = driver.find_element_by_id("launch-ngstar")
element.click()
if eula_accepted == False:
element = driver.find_element_by_id("eula_accept")
element.click()
# click Sign In button
driver.get(constants.HOME_URL)
msg = "Test #{0} in {1} in {2} with [Input: {3}]: " \
"Could not find [{4}] button".format(test_number,
CLASS_NAME,
METHOD_NAME,
test_input,
self.SIGN_IN_BTN_NAME)
self.assertIn(self.SIGN_IN_BTN_NAME, driver.page_source, msg)
element = driver.find_element_by_link_text(self.SIGN_IN_BTN_NAME)
element.click()
test_number = test_number + 1
# input username and password on Sign In page
element = driver.find_element_by_id(self.USERNAME_TEXTBOX_ID)
element.send_keys(username)
element = driver.find_element_by_id(self.PASSWORD_TEXTBOX_ID)
element.send_keys(password)
element.submit()
# logout
msg = "Test #{0} in {1} in {2} with [Input: {3}]: " \
"Could not find [{4}] button".format(test_number,
CLASS_NAME,
METHOD_NAME,
test_input,
self.SIGN_OUT_BTN_NAME)
self.assertIn(self.SIGN_OUT_BTN_NAME, driver.page_source, msg)
test_number = test_number + 1
def signOut(self):
METHOD_NAME = "signOut"
driver = self.driver
test_number = 1
test_input = ("test01", "Mypass1!")
driver.get(constants.HOME_URL)
msg = "Test #{0} in {1} in {2} with [Input: {3}]: " \
"Could not find [{4}] button".format(test_number,
CLASS_NAME,
METHOD_NAME,
test_input,
self.SIGN_OUT_BTN_NAME)
self.assertIn(self.SIGN_OUT_BTN_NAME, driver.page_source, msg)
element = driver.find_element_by_link_text(self.SIGN_OUT_BTN_NAME)
element.click()
test_number = test_number + 1
def populateDB(self):
driver = self.driver
for loci_name in self.loci_names:
allele_type = self.allele_types[loci_name]
sequence = self.sequences[loci_name]
WebDriverWait(driver, 10).until(lambda driver: driver.execute_script("return window.jQuery && window.jQuery.active === 0;"))
driver.get(constants.ADD_ALLELE_URL)
element = driver.find_element_by_xpath('//*[@id="select2-loci_name_option-container"]')
element.click()
element = driver.find_element_by_xpath('/html/body/span/span/span[1]/input')
element.click()
element.send_keys(loci_name)
element.send_keys(Keys.RETURN)
element = driver.find_element_by_id(self.ADD_ALLELE_TYPE_TEXTBOX_ID)
element.send_keys(allele_type)
script = "var $item = $('#" + self.ADD_ALLELE_SEQ_TEXTBOX_ID + "'); \
$($item).val('" + sequence + "');"
driver.execute_script(script)
element = driver.find_element_by_id(self.SUBMIT_BTN_ID)
element.click()
element = driver.find_element_by_id(self.ALLELE_LIST_ALERT_ID)
self.assertIn(self.ADD_ALLELE_SUCCESS_MSG, element.text)
element = driver.find_element_by_xpath("//table/tbody/tr[1]/td[1]")
self.assertIn(allele_type, element.text)
element = driver.find_element_by_xpath("//table/tbody/tr[1]/td[2]")
self.assertIn(loci_name, element.text)
def populateDB_ADDITIONAL(self):
driver = self.driver
for loci_name in self.loci_names:
allele_type = self.allele_types_additional[loci_name]
sequence = self.sequences_additional[loci_name]
WebDriverWait(driver, 10).until(lambda driver: driver.execute_script("return window.jQuery && window.jQuery.active === 0;"))
driver.get(constants.ADD_ALLELE_URL)
element = driver.find_element_by_xpath('//*[@id="select2-loci_name_option-container"]')
element.click()
element = driver.find_element_by_xpath('/html/body/span/span/span[1]/input')
element.click()
element.send_keys(loci_name)
element.send_keys(Keys.RETURN)
element = driver.find_element_by_id(self.ADD_ALLELE_TYPE_TEXTBOX_ID)
element.send_keys(allele_type)
script = "var $item = $('#" + self.ADD_ALLELE_SEQ_TEXTBOX_ID + "'); \
$($item).val('" + sequence + "');"
driver.execute_script(script)
element = driver.find_element_by_id(self.SUBMIT_BTN_ID)
element.click()
element = driver.find_element_by_id(self.ALLELE_LIST_ALERT_ID)
self.assertIn(self.ADD_ALLELE_SUCCESS_MSG, element.text)
element = driver.find_element_by_xpath("//table/tbody/tr[2]/td[1]")
self.assertIn(allele_type, element.text)
element = driver.find_element_by_xpath("//table/tbody/tr[2]/td[2]")
self.assertIn(loci_name, element.text)
# running test suite is terminated if an assert is thrown (if an assert isADD
# thrown in populateDB, or any other method, tests won't continue which is
# what we want)
def test_edit_allele_positive_cases(self):
driver = self.driver
self.signIn()
self.populateDB()
for loci_name in self.loci_names:
allele_type = self.allele_types_radio_btns[loci_name]
allele_type_edited = self.allele_types_edited[loci_name]
sequence = self.edited_positive_sequences[loci_name]
radio_button_value = loci_name + ":" + allele_type
driver.get(constants.LIST_LOCI_ALLELES_BASE_URL + loci_name)
element = driver.find_element_by_css_selector("input[type='radio'][value='" + radio_button_value + "']")
element.click()
element = driver.find_element_by_css_selector(self.EDIT_ALLELE_BTN_CSS_SEL)
element.click()
WebDriverWait(driver, 10).until(lambda driver: driver.execute_script("return window.jQuery && window.jQuery.active === 0;"))
element = driver.find_element_by_xpath('//*[@id="select2-loci_name_option-container"]')
element.click()
element = driver.find_element_by_xpath('/html/body/span/span/span[1]/input')
element.click()
element.send_keys(loci_name)
element.send_keys(Keys.RETURN)
element = driver.find_element_by_id(self.ADD_ALLELE_TYPE_TEXTBOX_ID)
element.clear()
element.send_keys(allele_type_edited)
script = "var $item = $('#" + self.ADD_ALLELE_SEQ_TEXTBOX_ID + "'); \
$($item).val('""');"
driver.execute_script(script)
script = "var $item = $('#" + self.ADD_ALLELE_SEQ_TEXTBOX_ID + "'); \
$($item).val('" + sequence + "');"
driver.execute_script(script)
element = driver.find_element_by_id(self.SUBMIT_BTN_ID)
element.click()
element = driver.find_element_by_id(self.ALLELE_LIST_ALERT_ID)
for edit_success_msg in self.success_msgs:
self.EDIT_ALLELE_SUCCESS_MSG = edit_success_msg
if element.text == self.EDIT_ALLELE_SUCCESS_MSG:
self.assertIn(self.EDIT_ALLELE_SUCCESS_MSG, element.text)
element = driver.find_element_by_xpath("//table/tbody/tr[1]/td[1]")
self.assertIn(allele_type_edited, element.text)
element = driver.find_element_by_xpath("//table/tbody/tr[1]/td[2]")
self.assertIn(loci_name, element.text)
self.signOut()
self.clearDB_edited_alleles()
def test_edit_allele_negative_cases(self):
driver = self.driver
self.signIn()
self.populateDB()
self.populateDB_ADDITIONAL()
for loci_name in self.loci_names:
allele_type = self.allele_types_radio_btns[loci_name]
allele_type_edited = self.allele_types_edited[loci_name]
sequence = self.sequences_additional[loci_name]
radio_button_value = loci_name + ":" + allele_type
driver.get(constants.LIST_LOCI_ALLELES_BASE_URL + loci_name)
element = driver.find_element_by_css_selector("input[type='radio'][value='" + radio_button_value + "']")
element.click()
element = driver.find_element_by_css_selector(self.EDIT_ALLELE_BTN_CSS_SEL)
element.click()
WebDriverWait(driver, 10).until(lambda driver: driver.execute_script("return window.jQuery && window.jQuery.active === 0;"))
element = driver.find_element_by_xpath('//*[@id="select2-loci_name_option-container"]')
element.click()
element = driver.find_element_by_xpath('/html/body/span/span/span[1]/input')
element.click()
element.send_keys(loci_name)
element.send_keys(Keys.RETURN)
element = driver.find_element_by_id(self.ADD_ALLELE_TYPE_TEXTBOX_ID)
element.clear()
element.send_keys(allele_type_edited)
script = "var $item = $('#" + self.ADD_ALLELE_SEQ_TEXTBOX_ID + "'); \
$($item).val('""');"
driver.execute_script(script)
script = "var $item = $('#" + self.ADD_ALLELE_SEQ_TEXTBOX_ID + "'); \
$($item).val('" + sequence + "');"
driver.execute_script(script)
element = driver.find_element_by_id(self.SUBMIT_BTN_ID)
element.click()
element = driver.find_element_by_id(self.ALERT_ID)
for seq_exists_msg in self.seq_exists_msgs:
self.ALLELE_SEQ_EXISTS = seq_exists_msg
if element.text == self.ALLELE_SEQ_EXISTS:
self.assertIn(self.ALLELE_SEQ_EXISTS, element.text)
for loci_name in self.loci_names:
allele_type = self.allele_types_radio_btns[loci_name]
allele_type_edited = self.allele_types_additional[loci_name]
sequence = self.edited_positive_sequences[loci_name]
radio_button_value = loci_name + ":" + allele_type
driver.get(constants.LIST_LOCI_ALLELES_BASE_URL + loci_name)
element = driver.find_element_by_css_selector("input[type='radio'][value='" + radio_button_value + "']")
element.click()
element = driver.find_element_by_css_selector(self.EDIT_ALLELE_BTN_CSS_SEL)
element.click()
WebDriverWait(driver, 10).until(lambda driver: driver.execute_script("return window.jQuery && window.jQuery.active === 0;"))
element = driver.find_element_by_xpath('//*[@id="select2-loci_name_option-container"]')
element.click()
element = driver.find_element_by_xpath('/html/body/span/span/span[1]/input')
element.click()
element.send_keys(loci_name)
element.send_keys(Keys.RETURN)
element = driver.find_element_by_id(self.ADD_ALLELE_TYPE_TEXTBOX_ID)
element.clear()
element.send_keys(allele_type_edited)
script = "var $item = $('#" + self.ADD_ALLELE_SEQ_TEXTBOX_ID + "'); \
$($item).val('""');"
driver.execute_script(script)
script = "var $item = $('#" + self.ADD_ALLELE_SEQ_TEXTBOX_ID + "'); \
$($item).val('" + sequence + "');"
driver.execute_script(script)
element = driver.find_element_by_id(self.SUBMIT_BTN_ID)
element.click()
element = driver.find_element_by_id(self.ALERT_ID)
for allele_type_exists_msg in self.allele_type_exists_msgs:
self.ALLELE_TYPE_DUPLICATE = allele_type_exists_msg
if element.text == self.ALLELE_TYPE_DUPLICATE:
self.assertIn(self.ALLELE_TYPE_DUPLICATE, element.text)
self.signOut()
self.clearDB()
self.clearDB_alleles_additional()
def test_edit_allele_invalid_cases(self):
driver = self.driver
self.signIn()
self.populateDB()
for loci_name in self.loci_names:
allele_type = self.allele_types_radio_btns[loci_name]
allele_type_edited = self.allele_types_edited[loci_name]
sequence = self.invalid_sequences[loci_name]
radio_button_value = loci_name + ":" + allele_type
driver.get(constants.LIST_LOCI_ALLELES_BASE_URL + loci_name)
element = driver.find_element_by_css_selector("input[type='radio'][value='" + radio_button_value + "']")
element.click()
element = driver.find_element_by_css_selector(self.EDIT_ALLELE_BTN_CSS_SEL)
element.click()
WebDriverWait(driver, 10).until(lambda driver: driver.execute_script("return window.jQuery && window.jQuery.active === 0;"))
element = driver.find_element_by_xpath('//*[@id="select2-loci_name_option-container"]')
element.click()
element = driver.find_element_by_xpath('/html/body/span/span/span[1]/input')
element.click()
element.send_keys(loci_name)
element.send_keys(Keys.RETURN)
element = driver.find_element_by_id(self.ADD_ALLELE_TYPE_TEXTBOX_ID)
element.clear()
element.send_keys(allele_type_edited)
script = "var $item = $('#" + self.ADD_ALLELE_SEQ_TEXTBOX_ID + "'); \
$($item).val('""');"
driver.execute_script(script)
script = "var $item = $('#" + self.ADD_ALLELE_SEQ_TEXTBOX_ID + "'); \
$($item).val('" + sequence + "');"
driver.execute_script(script)
element = driver.find_element_by_id(self.SUBMIT_BTN_ID)
element.click()
elements = driver.find_elements_by_class_name("help-block")
error_msgs = [element.text for element in elements]
self.assertIn(self.SEQUENCE_CHAR_ERROR, error_msgs, error_msgs)
for loci_name in self.loci_names:
allele_type = self.allele_types_radio_btns[loci_name]
allele_type_invalid = self.invalid_allele_types[loci_name]
sequence = self.edited_positive_sequences[loci_name]
radio_button_value = loci_name + ":" + allele_type
driver.get(constants.LIST_LOCI_ALLELES_BASE_URL + loci_name)
element = driver.find_element_by_css_selector("input[type='radio'][value='" + radio_button_value + "']")
element.click()
element = driver.find_element_by_css_selector(self.EDIT_ALLELE_BTN_CSS_SEL)
element.click()
WebDriverWait(driver, 10).until(lambda driver: driver.execute_script("return window.jQuery && window.jQuery.active === 0;"))
element = driver.find_element_by_xpath('//*[@id="select2-loci_name_option-container"]')
element.click()
element = driver.find_element_by_xpath('/html/body/span/span/span[1]/input')
element.click()
element.send_keys(loci_name)
element.send_keys(Keys.RETURN)
element = driver.find_element_by_id(self.ADD_ALLELE_TYPE_TEXTBOX_ID)
element.clear()
element.send_keys(allele_type_invalid)
script = "var $item = $('#" + self.ADD_ALLELE_SEQ_TEXTBOX_ID + "'); \
$($item).val('""');"
driver.execute_script(script)
script = "var $item = $('#" + self.ADD_ALLELE_SEQ_TEXTBOX_ID + "'); \
$($item).val('" + sequence + "');"
driver.execute_script(script)
element = driver.find_element_by_id(self.SUBMIT_BTN_ID)
element.click()
elements = driver.find_elements_by_class_name("help-block")
error_msgs = [element.text for element in elements]
self.assertIn(self.ALLELE_TYPE_CHAR_ERROR, error_msgs, error_msgs)
self.signOut()
self.clearDB()
def test_edit_allele_empty_cases(self):
driver = self.driver
self.signIn()
self.populateDB()
for loci_name in self.loci_names:
allele_type = self.allele_types_radio_btns[loci_name]
allele_type_edited = self.allele_types_edited[loci_name]
sequence = self.empty_sequences[loci_name]
radio_button_value = loci_name + ":" + allele_type
driver.get(constants.LIST_LOCI_ALLELES_BASE_URL + loci_name)
element = driver.find_element_by_css_selector("input[type='radio'][value='" + radio_button_value + "']")
element.click()
element = driver.find_element_by_css_selector(self.EDIT_ALLELE_BTN_CSS_SEL)
element.click()
WebDriverWait(driver, 10).until(lambda driver: driver.execute_script("return window.jQuery && window.jQuery.active === 0;"))
element = driver.find_element_by_xpath('//*[@id="select2-loci_name_option-container"]')
element.click()
element = driver.find_element_by_xpath('/html/body/span/span/span[1]/input')
element.click()
element.send_keys(loci_name)
element.send_keys(Keys.RETURN)
element = driver.find_element_by_id(self.ADD_ALLELE_TYPE_TEXTBOX_ID)
element.clear()
element.send_keys(allele_type_edited)
script = "var $item = $('#" + self.ADD_ALLELE_SEQ_TEXTBOX_ID + "'); \
$($item).val('""');"
driver.execute_script(script)
script = "var $item = $('#" + self.ADD_ALLELE_SEQ_TEXTBOX_ID + "'); \
$($item).val('" + sequence + "');"
driver.execute_script(script)
element = driver.find_element_by_id(self.SUBMIT_BTN_ID)
element.click()
elements = driver.find_elements_by_class_name("help-block")
error_msgs = [element.text for element in elements]
self.assertIn(self.SEQUENCE_EMPTY, error_msgs, error_msgs)
for loci_name in self.loci_names:
allele_type = self.allele_types_radio_btns[loci_name]
allele_type_invalid = self.empty_allele_types[loci_name]
sequence = self.edited_positive_sequences[loci_name]
radio_button_value = loci_name + ":" + allele_type
driver.get(constants.LIST_LOCI_ALLELES_BASE_URL + loci_name)
element = driver.find_element_by_css_selector("input[type='radio'][value='" + radio_button_value + "']")
element.click()
element = driver.find_element_by_css_selector(self.EDIT_ALLELE_BTN_CSS_SEL)
element.click()
WebDriverWait(driver, 10).until(lambda driver: driver.execute_script("return window.jQuery && window.jQuery.active === 0;"))
element = driver.find_element_by_xpath('//*[@id="select2-loci_name_option-container"]')
element.click()
element = driver.find_element_by_xpath('/html/body/span/span/span[1]/input')
element.click()
element.send_keys(loci_name)
element.send_keys(Keys.RETURN)
element = driver.find_element_by_id(self.ADD_ALLELE_TYPE_TEXTBOX_ID)
element.clear()
element.send_keys(allele_type_invalid)
script = "var $item = $('#" + self.ADD_ALLELE_SEQ_TEXTBOX_ID + "'); \
$($item).val('""');"
driver.execute_script(script)
script = "var $item = $('#" + self.ADD_ALLELE_SEQ_TEXTBOX_ID + "'); \
$($item).val('" + sequence + "');"
driver.execute_script(script)
element = driver.find_element_by_id(self.SUBMIT_BTN_ID)
element.click()
elements = driver.find_elements_by_class_name("help-block")
error_msgs = [element.text for element in elements]
self.assertIn(self.ALLELE_TYPE_EMPTY, error_msgs, error_msgs)
self.signOut()
self.clearDB()
def clearDB(self):
driver = self.driver
self.signIn()
for loci_name in self.loci_names:
allele_type = self.allele_types_clear_db[loci_name]
radio_button_value = loci_name + ":" + allele_type
driver.get(constants.LIST_LOCI_ALLELES_BASE_URL + loci_name)
element = driver.find_element_by_css_selector("input[type='radio'][value='" + radio_button_value + "']")
element.click()
element = driver.find_element_by_id(self.DELETE_ALLELE_BTN_CSS_SEL)
element.click()
WebDriverWait(driver, 10).until(lambda driver: driver.execute_script("return window.jQuery && window.jQuery.active === 0;"))
script = "window.jQuery(document).ready(function() { \
$('" + "#" + self.DELETE_ALLELE_ALERT_BTN_ID + "').click(); \
})"
driver.execute_script(script)
elements = driver.find_elements_by_tag_name("strong")
msgs = [e.text for e in elements]
self.assertIn(self.DELETE_ALLELE_SUCCESS_MSG, msgs)
self.signOut()
def clearDB_edited_alleles(self):
driver = self.driver
self.signIn()
for loci_name in self.loci_names:
allele_type = self.allele_types_clear_db_edited[loci_name]
radio_button_value = loci_name + ":" + allele_type
driver.get(constants.LIST_LOCI_ALLELES_BASE_URL + loci_name)
element = driver.find_element_by_css_selector("input[type='radio'][value='" + radio_button_value + "']")
element.click()
element = driver.find_element_by_id(self.DELETE_ALLELE_BTN_CSS_SEL)
element.click()
WebDriverWait(driver, 10).until(lambda driver: driver.execute_script("return window.jQuery && window.jQuery.active === 0;"))
script = "window.jQuery(document).ready(function() { \
$('" + "#" + self.DELETE_ALLELE_ALERT_BTN_ID + "').click(); \
})"
driver.execute_script(script)
elements = driver.find_elements_by_tag_name("strong")
msgs = [e.text for e in elements]
self.assertIn(self.DELETE_ALLELE_SUCCESS_MSG, msgs)
self.signOut()
def clearDB_alleles_additional(self):
driver = self.driver
self.signIn()
for loci_name in self.loci_names:
allele_type = self.allele_types_clear_db_additional[loci_name]
radio_button_value = loci_name + ":" + allele_type
driver.get(constants.LIST_LOCI_ALLELES_BASE_URL + loci_name)
element = driver.find_element_by_css_selector("input[type='radio'][value='" + radio_button_value + "']")
element.click()
element = driver.find_element_by_id(self.DELETE_ALLELE_BTN_CSS_SEL)
element.click()
WebDriverWait(driver, 10).until(lambda driver: driver.execute_script("return window.jQuery && window.jQuery.active === 0;"))
script = "window.jQuery(document).ready(function() { \
$('" + "#" + self.DELETE_ALLELE_ALERT_BTN_ID + "').click(); \
})"
driver.execute_script(script)
elements = driver.find_elements_by_tag_name("strong")
msgs = [e.text for e in elements]
self.assertIn(self.DELETE_ALLELE_SUCCESS_MSG, msgs)
self.signOut()
@classmethod
def tearDownClass(cls):
if not cls.DRIVER:
cls.driver.close()
if __name__ == '__main__':
unittest.main()
| [
"sukhdeep.sidhu@phac-aspc.gc.ca"
] | sukhdeep.sidhu@phac-aspc.gc.ca |
15424b23819be3293d71f6569139a936467bd8ca | d1ae4f79e505d5bb3a8401017c343260d1582ab9 | /mgmt/migrations/0017_auto_20210708_1225.py | 6ca91b7b1a01ed47bfe9a26a1f9bd82fd69ad25b | [
"CC0-1.0"
] | permissive | AndreDrDre/AviationStockManagement | af0edd438503f90f935fc38e3f18e8296a2a6ca3 | 8331edbacc936919ede04da12d7e529e5e38be1c | refs/heads/master | 2023-07-03T22:46:16.832584 | 2021-08-08T11:12:40 | 2021-08-08T11:28:37 | 392,350,381 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 923 | py | # Generated by Django 3.1.7 on 2021-07-08 12:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mgmt', '0016_auto_20210708_0725'),
]
operations = [
migrations.AddField(
model_name='parts',
name='breadth',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AddField(
model_name='parts',
name='height',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AddField(
model_name='parts',
name='length',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AddField(
model_name='parts',
name='weight',
field=models.CharField(blank=True, max_length=50, null=True),
),
]
| [
"originsgrand@gmail.com"
] | originsgrand@gmail.com |
b3cb46437c638706e438609c9ce6a94d3b03e0c0 | fa1d763e8ca852048de2c344d6642c8655d91f8a | /EnsambleTraining.py | 518eb9d6dbcc41e015e24045122eaabd67168b67 | [
"MIT"
] | permissive | wuhao1938/RadiativeTransportPinns | 9f389dc6af4354c033bfc8296db5acbfdef58ef0 | 11a9ecd0086d8fc87c634e0a87be144988a112e5 | refs/heads/master | 2023-01-03T12:29:42.337325 | 2020-11-03T09:26:40 | 2020-11-03T09:26:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,472 | py | import itertools
from ImportFile import *
rs = 0
N_coll = int(sys.argv[1])
N_u = int(sys.argv[2])
N_int = int(sys.argv[3])
n_object = 0
ob = "None"
folder_name = sys.argv[4]
point = "sobol"
validation_size = 0.0
network_properties = {
"hidden_layers": [8, 12, 16, 20],
"neurons": [20, 24, 28, 32, 36, 40],
"residual_parameter": [0.1, 1, 10],
"kernel_regularizer": [2],
"regularization_parameter": [0],
"batch_size": [(N_coll + N_u + N_int)],
"epochs": [1],
"activation": ["tanh"],
}
shuffle = "false"
cluster = sys.argv[5]
GPU = "GeForceGTX1080" # "GeForceGTX1080", "GeForceGTX1080Ti", "TeslaV100_SXM2_32GB", "None"
n_retrain = 20
if not os.path.isdir(folder_name):
os.mkdir(folder_name)
settings = list(itertools.product(*network_properties.values()))
i = 0
for setup in settings:
print(setup)
folder_path = folder_name + "/Setup_" + str(i)
print("###################################")
setup_properties = {
"hidden_layers": setup[0],
"neurons": setup[1],
"residual_parameter": setup[2],
"kernel_regularizer": setup[3],
"regularization_parameter": setup[4],
"batch_size": setup[5],
"epochs": setup[6],
"activation": setup[7]
}
arguments = list()
arguments.append(str(rs))
arguments.append(str(N_coll))
arguments.append(str(N_u))
arguments.append(str(N_int))
arguments.append(str(n_object))
arguments.append(str(ob))
arguments.append(str(folder_path))
arguments.append(str(point))
arguments.append(str(validation_size))
if sys.platform == "linux" or sys.platform == "linux2" or sys.platform == "darwin":
arguments.append("\'" + str(setup_properties).replace("\'", "\"") + "\'")
else:
arguments.append(str(setup_properties).replace("\'", "\""))
arguments.append(str(shuffle))
arguments.append(str(cluster))
arguments.append(str(GPU))
arguments.append(str(n_retrain))
if sys.platform == "linux" or sys.platform == "linux2" or sys.platform == "darwin":
if cluster == "true":
string_to_exec = "bsub python3 single_retraining.py "
else:
string_to_exec = "python3 single_retraining.py "
for arg in arguments:
string_to_exec = string_to_exec + " " + arg
print(string_to_exec)
os.system(string_to_exec)
i = i + 1
| [
"roberto.molinaro@sam.math.ethz.ch"
] | roberto.molinaro@sam.math.ethz.ch |
def00a2abdbb12ba52b231da7124685b93516b93 | 23ef81cb94356fd321c07f06dab2877e04131b4d | /yiyuanduobao_shop/migrations/0058_item_proxy_sale_qr_code.py | da3d99780c61df4a84d1c939d53fdc4bb41fd205 | [] | no_license | dongshaohui/one_dolor | 0c688787d8cee42957bec087b74b5ea353cc80fc | 13dea458568152a3913c6f70ecd9a7e1f6e9514e | refs/heads/master | 2020-07-03T03:12:22.409542 | 2016-11-21T08:15:06 | 2016-11-21T08:15:06 | 74,202,604 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('yiyuanduobao_shop', '0057_item_winner_customer'),
]
operations = [
migrations.AddField(
model_name='item',
name='proxy_sale_qr_code',
field=models.CharField(default=b'', max_length=500, verbose_name='\u672c\u671f\u4ee3\u5356\u4e8c\u7ef4\u7801'),
preserve_default=True,
),
]
| [
"405989455@qq.com"
] | 405989455@qq.com |
27c911a05a5069e2b072829435f0c67bc36b9c08 | 143dcd5d562a2016d77fb39f8996babd66bc2ab5 | /PokerLib/Deck.py | 8480434bb5cec6f3769adb998b186d731a41b263 | [] | no_license | EmotionalPoker/MAS_SimulationOfEmotions | 320c4888a0f9f5df62f4e7b688980feaab23d8ac | 4ab7003f0103777fac7bbdb09ec1b3fb50bc4459 | refs/heads/master | 2020-07-26T09:44:01.412436 | 2020-01-14T21:49:31 | 2020-01-14T21:49:31 | 208,606,980 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,473 | py | # -*- coding: utf-8 -*-
"""
Created on January 1 2020
@author: Hari Vidharth
"""
from PokerLib.Card import *
import random
class Deck:
"""
Deck class builds the deck of cards consisting of card objects in straight
and/or shuffle format.
"""
def __init__(self):
self.cards = []
def build_deck(self):
"""
Builds the deck of cards in straight format.
"""
for suit in ["♣", "♦", "♥", "♠"]:
for value in range(2, 15):
if value == 11:
value = "J"
elif value == 12:
value = "Q"
elif value == 13:
value = "K"
elif value == 14:
value = "A"
self.cards.append(Card(value, suit))
def shuffle_deck(self):
"""
Shuffles the deck of cards in a random format.
"""
for _ in range(0, len(self.cards)):
random_card = random.randint(0, len(self.cards) - 1)
(self.cards[_], self.cards[random_card]) = (
self.cards[random_card], self.cards[_])
def return_deck(self):
"""
Returns the deck of card objects in a list in straight and/or shuffle
format.
"""
return_deck = []
for _ in self.cards:
return_deck.append(_.return_card())
return return_deck
| [
"noreply@github.com"
] | noreply@github.com |
67062b58ee7a9698f0a4144b3ffecee350a032fe | 735f7827a79adebd97b44db5149d5f35dadf2d1b | /backend/world/settings.py | 0fa4392578891a1071617f48fd83edc33c81241d | [] | no_license | letsgogeeky/django-react-world-navigation | b920347290694fb686615e93f18afda4763d0ccb | bccb0458a3432049d67fc611de6af2c0e84de9aa | refs/heads/master | 2023-01-29T14:18:13.203711 | 2021-05-09T16:35:35 | 2021-05-09T16:35:35 | 249,480,773 | 1 | 0 | null | 2023-01-05T17:00:15 | 2020-03-23T16:13:47 | PLpgSQL | UTF-8 | Python | false | false | 3,751 | py | """
Django settings for world project.
Generated by 'django-admin startproject' using Django 3.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'rzgfj%7tm2=9&+5vaau!t*0(-a72t7+f71s=yz*i$yvw@d)p=o'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['backend', '127.0.0.1', 'localhost']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'worldapp.apps.WorldappConfig',
'rest_framework',
'django_filters',
]
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE': 10,
'DEFAULT_FILTER_BACKENDS': ('django_filters.rest_framework.DjangoFilterBackend',)
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'world.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'world.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'world',
'USER': 'ramy',
'PASSWORD': 'world123',
'HOST': 'db',
'PORT': '5432'
}
}
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.postgresql_psycopg2',
# 'NAME': 'world',
# 'USER': 'postgres',
# 'PASSWORD': '!@Arch!@34',
# 'HOST': '127.0.0.1',
# 'PORT': '5432'
# }
# }
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| [
"ramy_master99@hotmail.com"
] | ramy_master99@hotmail.com |
991bf09321be69f3c8fa52619262fea573f8454b | 1ec6fe8811cb2b21b68eca7d75ac6b3c88e0f8ba | /Week_07/G20200447010071/sinaComments/sinaComments/settings.py | 73e937a6840e6729277c585f95ea5eb7da5430a8 | [] | no_license | hopeqpy/Python000-class01 | 5f0aa8f3aaba7da97819ec073fd9d16c0cd902e8 | 73b8f8606c5cce0ea8982aed3705ad4cfc70cc70 | refs/heads/master | 2022-06-26T13:36:58.766271 | 2020-05-07T07:24:24 | 2020-05-07T07:24:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,284 | py | # -*- coding: utf-8 -*-
# Scrapy settings for sinaComments project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'sinaComments'
SPIDER_MODULES = ['sinaComments.spiders']
NEWSPIDER_MODULE = 'sinaComments.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'sinaComments (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
CONCURRENT_REQUESTS = 10
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'sinaComments.middlewares.SinacommentsSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'sinaComments.middlewares.SinacommentsDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'sinaComments.pipelines.SinacommentsPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
LOG_LEVEL = 'INFO'
LOG_ENABLED=False
MYSQL = {
'ip': '127.0.0.1',
'username': 'root',
'password': 'root',
'db': 'sina_news',
} | [
"musiteam@musiiot.top"
] | musiteam@musiiot.top |
ec1ef633d00e9670270fe396e2434e18e0fc41ea | 3763802d04372963fdef84f1bd699f08d1a0fc62 | /dm_console.py | a5e0529f4fda4abf7e9fea69abb40fcf4086b73d | [] | no_license | AidanHelmboldTBD/Data_Mining | 4d86ee32abc5c53d54fe9bc1cf370712535bee09 | 39d3d731c6a7379fdd1ed4c19b3183cdda4e20be | refs/heads/master | 2021-01-19T23:26:22.663155 | 2017-04-24T09:07:26 | 2017-04-24T09:07:26 | 88,985,360 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,931 | py | from dm import g_barplot, g_histogram, g_scatter, g_chi2, g_kde
import argparse
import sys
import logging
from pyspark import SparkContext, SQLContext, SparkConf, HiveContext
import pyspark.sql.functions as F
import pyspark.sql.types as T
from pyspark.sql.types import StructType, StructField
from itertools import combinations
import numpy as np
import pandas as pd
def quiet_log(sc):
log4j = sc._jvm.org.apache.log4j
log4j.LogManager.getRootLogger().setLevel(log4j.Level.ERROR)
return sc
def load_parquet(database, table, quiet):
sc = SparkContext()
if quiet:
sc = quiet_log(sc)
sqlContext = SQLContext(sc)
sqlContext.setConf('spark.sql.parquet.binaryAsString', 'True')
print (database, table)
return sqlContext.sql('Select * from parquet.`/user/hive/warehouse/{:s}.db/{:s}`'.format(database, table)), sc, sqlContext
path = '/var/lib/hadoop-hdfs/Jannes Test/dm_library/graphs'
http_path = 'http://cdh578egzp.telkom.co.za:8880/files/Jannes%20Test/dm_library/graphs'
def create_table(df, table_name, sqlContext, cols = None, size_limit = 30):
df.persist()
no_plot_cols = []
output = []
cols_complete = []
var_cols = ['colm', 'col_type', 'uniques', 'missing', 'mean', 'stddev', 'graph']
type_dict = {'float':'numeric','long':'numeric', 'integer':'numeric',
'smallint':'numeric', 'int':'numeric', 'bigint':'numeric', 'string':'categorical',
'timestamp':'date', 'binary':'indicator','decimal(9,2)':'numeric'}
if cols == None:
cols = df.columns
for c in cols:
print 'Getting {:s} data'.format(c)
sys.stdout.flush()
#print("Producing graphs" + str(col_graphs))
cols_complete.append(c)
rem_cols = list(set(df.columns) - set(cols_complete))
#Initialize columns
uniq = 0
null = 0
mean = 0
std_dev = 0
g = 0
g_path = 0
col_g = []
# col_g_paths = []
# col_g.extend(np.zeros(len(col_graphs)))
uniq = df.select(c).distinct().count()
print ('... uniques: {:d}'.format(uniq))
col_type = df.select(c).dtypes[0][1]
col_type = type_dict[col_type]
if uniq == 2:
col_type = 'indicator'
print ('... column type: {:s}'.format(col_type))
null = df.where(F.col(c).isNull()).count()
print ('... nulls: {:d}'.format(null))
if (uniq < size_limit) & (col_type in ['categorical', 'indicator']):
g, g_path = g_barplot(df, c)
if col_type in ['numeric']:
df_sum = df.select(c).agg(F.avg(F.col(c)),
F.stddev(F.col(c))).take(1)
mean = df_sum[0][0]
std_dev = df_sum[0][1]
print ('... numerical summary: {:0.2f}, {:0.2f}'.format(mean, std_dev))
g, g_path = g_histogram(df, c)
print('... Single Graph Done')
output.append(tuple([c, col_type, uniq, null, mean, std_dev, g_path]))
# 2 factor charts here
# create the table
schema_list = [T.StructField("colm", T.StringType(), True),
T.StructField("col_type", T.StringType(), True),
T.StructField("uniques", T.IntegerType(), True),
T.StructField("missing", T.IntegerType(), True),
T.StructField("mean", T.FloatType(), True),
T.StructField("stddev", T.FloatType(), True),
T.StructField("graph", T.StringType(), True) ]
# graph_schema_list = [T.StructField(x, T.StringType(), True) for x in col_graphs]
# schema_list.extend(graph_schema_list)
schema = T.StructType(schema_list)
print schema
rdd = sc.parallelize(output)
hive = HiveContext(sc)
hive.createDataFrame(rdd, schema=schema)\
.write.mode('overwrite')\
.saveAsTable('datamining.' + table_name,format='parquet')
df.unpersist()
print '... {:s} saved to cluster'.format(table_name)
sys.stdout.flush()
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument('-db', '--database', help='please provide the database in the cluster',required=True)
ap.add_argument('-t', '--table', help='please provide the table in the cluster',required=True)
ap.add_argument('-q', '--quiet', help='silence logging', action='store_true')
args = vars(ap.parse_args())
print args
df, sc, sqlContext = load_parquet(args['database'], args['table'], args['quiet'])
create_table(df,
'{:s}_{:s}'.format(args['database'], args['table']),
sqlContext)
sc.stop()
| [
"helmboa@telkom.co.za"
] | helmboa@telkom.co.za |
e7e3f19d55f167659b9939895e3c7c8b47ad52da | c6818c06aacb1eca1fffa8bbc51b6f3aac25c177 | /acre/asgi.py | 7a5ee240ac0ce6bd5657ed8a2e6ac3c7e5f609cc | [] | no_license | Acon94/ACRE | 2d0769780c9f81eba05085ffd8b0af225666d6de | 73622a6dc4ba0f30e8d3e90b02d23c8efd14a5e1 | refs/heads/master | 2022-08-02T02:07:53.004308 | 2020-05-29T15:25:50 | 2020-05-29T15:25:50 | 267,840,531 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | """
ASGI config for acre project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'acre.settings')
application = get_asgi_application()
| [
"andrew@Andrews-MacBook-Pro.local"
] | andrew@Andrews-MacBook-Pro.local |
ad415f804534293782b6644669084ae9324a02ec | c3b7a8fe4bc39002b30cce9202b9c6a4e7b8a921 | /twitter_bot/scripts/check_followback.py | 557971d243c1832db6be390c24a6a559489ba42c | [
"MIT"
] | permissive | Phosphorus-M/Awesome-twitter-bot | a90489bb1e3b6bcaf053f8e8c137aa038bf8bf0f | 483d26b62b46816b741f99c7641beb5cc8f000a0 | refs/heads/main | 2023-08-11T03:49:05.839801 | 2021-09-14T07:25:30 | 2021-09-14T07:25:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,127 | py | import json
from django.conf import settings
from twitter_data.models import User, Feature
from twitter_data.twitter_bot import TwitterBot
from datetime import datetime, timedelta
def run():
feature_config = Feature.get_feature("TWITTER_CONFIG")
sleep_time = feature_config.get("sleep_time", 1)
feature_config = Feature.get_feature("FOLLOW_BACK")
check_time_days = int(feature_config.get("check_time_days", 7))
users_to_check = int(feature_config.get("users_to_check", 20))
bot = TwitterBot(
settings.CONSUMER_KEY,
settings.CONSUMER_SECRET,
settings.ACCESS_TOKEN,
settings.ACCESS_TOKEN_SECRET,
sleep_time,
)
a_week_ago = datetime.now() - timedelta(days=check_time_days)
check_follow_back = User.objects.filter(
priority=False, created_at__lte=a_week_ago
).order_by("?")[:users_to_check]
bot.get_followers()
for user in check_follow_back:
if not bot.check_follow_back(user.user_profile):
user.must_follow = False
user.must_like = False
user.must_rt = False
user.save()
| [
"hectorandrespp@gmail.com"
] | hectorandrespp@gmail.com |
fc4f46a7c5fcfbcef821e98ce66427ec860721bc | 152f163da48e75ae1175621020771b1d2f1e5167 | /c_integration_example/mylib.py | 610958cfd640e9ab4aeec7ea362b82c27990be7b | [] | no_license | jerryhan88/BNC_py | a674d880f30587d157fc120a3abdf6da3e806c7e | 5cab88648f8d51a06baae89677d2ad5eaa3580d4 | refs/heads/master | 2020-07-12T13:01:32.605623 | 2020-02-19T05:54:33 | 2020-02-19T05:54:33 | 204,825,305 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,090 | py | import sys, os
import os.path as opath
import ctypes, ctypes.util
mylibC_path = './mylib.c'
mylibO_path = './mylib.o'
mylibD_path = './mylib.dylib'
def create_dylib():
os.system('clang -c -fPIC %s -o %s' % (mylibC_path, mylibO_path))
os.system('clang -shared %s -o %s' % (mylibO_path, mylibD_path))
os.system('rm %s' % mylibO_path)
if opath.exists(mylibD_path):
if opath.getctime(mylibD_path) < opath.getmtime(mylibC_path):
create_dylib()
else:
create_dylib()
mylib_path = ctypes.util.find_library(mylibD_path[:-len('.dylib')])
if not mylib_path:
print("Unable to find the specified library.")
sys.exit()
try:
mylib = ctypes.CDLL(mylib_path)
except OSError:
print("Unable to load the system C library")
sys.exit()
test_empty = mylib.test_empty
test_add = mylib.test_add
test_add.argtypes = [ctypes.c_float, ctypes.c_float]
test_add.restype = ctypes.c_float
test_passing_array = mylib.test_passing_array
test_passing_array.argtypes = [ctypes.POINTER(ctypes.c_int), ctypes.c_int]
test_passing_array.restype = None
print(test_add(1, 2)) | [
"chungkyun.han@gmail.com"
] | chungkyun.han@gmail.com |
bc770a4a78f1a3e117c15af7a3ea4c7b4937bf1e | 63b0fed007d152fe5e96640b844081c07ca20a11 | /ABC/ABC200~ABC299/ABC291/c.py | 468e2709c60f01b71d7144cca09a88563e9ae6c3 | [] | no_license | Nikkuniku/AtcoderProgramming | 8ff54541c8e65d0c93ce42f3a98aec061adf2f05 | fbaf7b40084c52e35c803b6b03346f2a06fb5367 | refs/heads/master | 2023-08-21T10:20:43.520468 | 2023-08-12T09:53:07 | 2023-08-12T09:53:07 | 254,373,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 373 | py | from collections import defaultdict
N = int(input())
S = input()
d = defaultdict(lambda: False)
d[(0, 0)] = True
nx, ny = 0, 0
ans = 'No'
for i in range(N):
s = S[i]
if s == 'R':
nx += 1
elif s == 'L':
nx -= 1
elif s == 'U':
ny += 1
else:
ny -= 1
if d[(nx, ny)]:
ans = 'Yes'
d[(nx, ny)] = True
print(ans)
| [
"ymdysk911@gmail.com"
] | ymdysk911@gmail.com |
bf0641beb73b4e0752e6d0ccef6dbc7a58ac5397 | e2fc43e14c90bb853d67a92bdebf772231ec4c9f | /app/helpers.py | 447f25b23094433d5278368885593f31f58e5701 | [] | no_license | aorticweb/hatchway-backend | fcad4b9d579e01bd68456e0153bbf8a4b003bb15 | 439256ce0b18c04b0e27fe22a444a89d223292c8 | refs/heads/master | 2023-04-29T18:56:16.479272 | 2021-05-11T18:05:24 | 2021-05-11T18:05:24 | 354,562,388 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,465 | py | import itertools
from typing import List
import ray
import requests
import requests_cache
from app import config
if not ray.is_initialized():
ray.init()
requests_cache.install_cache("hatchway_cache", backend="sqlite", expire_after=180)
class Source:
"""
Wrapper around url calls to data source
"""
def __init__(self, base: str = config.SOURCE_URL):
self._session = requests.session()
self.base = base
def get_data(self, tag: str):
"""
Perform url to get posts data
"""
resp = self._session.get(self.base, params={"tag": tag})
if resp.status_code == 400:
# TODO:
# Log error here
return {}
return resp.json()
@ray.remote
def get_post_by_tag(tag: str):
"""
Ray remote function to get posts data for a tag
"""
return Source().get_data(tag=tag).get("posts", [])
def param_validation(tags: str, sort_by: str, direction: str):
"""
Validate tags, sortBy and direction url paramaters passed in request
"""
if not len(tags):
return True, "Tags parameter is required"
if sort_by not in config.VALID_SORTS:
return True, "sortBy parameter is invalid"
# For Consistency
if direction not in ["asc", "desc"]:
return True, "direction parameter is invalid"
return False, ""
ListOfListOfDict = List[List[dict]]
def _filter_posts(posts: ListOfListOfDict, sort: str, direction: str):
"""
Concat lists of lists of posts from multiple api requests into a single list,
remove duplicate posts from list and sort list according to sort field and direction
"""
posts = list(itertools.chain.from_iterable(posts))
# Unique posts
seen = set()
unique_posts = []
for p in posts:
if p["id"] not in seen:
seen.add(p["id"])
unique_posts.append(p)
# Sort posts
reverse = direction == "desc"
posts = sorted(unique_posts, key=lambda x: x.get(sort, ""), reverse=reverse)
return posts
def _get_posts(tags :str):
"""
Perform api calls (one per tag) concurently to get posts data
"""
tags = tags.split(",")
posts = [get_post_by_tag.remote(t) for t in tags]
posts = ray.get(posts)
return posts
def get_posts(tags :str, sort_by :str, direction :str):
"""
get post and filter posts data
"""
posts = _get_posts(tags)
return _filter_posts(posts, sort_by, direction) | [
"williamkamgne@gmail.com"
] | williamkamgne@gmail.com |
b4e0bf30738df3d12eacd8276d511194095699dd | 0a544baa0c37d53714dcbdeb1bef602ad2899d98 | /pySOT/experimental_design.py | 940ab34b8e8e42085e2110ec012d99f34f2dfb85 | [
"BSD-3-Clause"
] | permissive | evayang234/pySOT | ace424171d6af06ad3a085bc652b93a1e954d06b | d0899e82a386415ffa2550ea0a56c5de7bffefb1 | refs/heads/master | 2020-12-28T14:48:35.310695 | 2015-10-29T03:49:29 | 2015-10-29T03:49:29 | 45,196,595 | 0 | 0 | null | 2015-10-29T16:40:11 | 2015-10-29T16:40:11 | null | UTF-8 | Python | false | false | 3,758 | py | """
.. module:: experimental_design
:synopsis: Methods for generating an experimental design.
.. moduleauthor:: David Eriksson <dme65@cornell.edu>,
Yi Shen <ys623@cornell.edu>
:Module: experimental_design
:Author: David Eriksson <dme65@cornell.edu>,
Yi Shen <ys623@cornell.edu>
"""
import numpy as np
import pyDOE as pydoe
class LatinHypercube(object):
"""Latin Hypercube experimental design
:ivar dim: Number of dimensions
:ivar npts: Number of desired sampling points
:ivar criterion: A string that tells lhs how to sample the
points (default: None which simply randomizes the points
within the intervals):
- "center" or "c": center the points within the sampling intervals
- "maximin" or "m": maximize the minimum distance
between points, but place the point in a randomized
location within its interval
- "centermaximin" or "cm": same as "maximin", but
centered within the intervals
- "correlation" or "corr": minimize the maximum
correlation coefficient
"""
def __init__(self, dim, npts, criterion='c'):
self.dim = dim
self.npts = npts
self.criterion = criterion
def generate_points(self):
"""Generate a matrix with the initial sample points,
scaled to the unit cube
:return: Latin hypercube design in the unit cube
"""
return pydoe.lhs(self.dim, self.npts, self.criterion)
class SymmetricLatinHypercube(object):
"""Symmetric Latin Hypercube experimental design
:ivar dim: Number of dimensions
:ivar npts: Number of desired sampling points
"""
def __init__(self, dim, npts):
self.dim = dim
self.npts = npts
def _slhd(self):
"""Generate matrix of sample points in the unit box"""
# Generate a one-dimensional array based on sample number
points = np.zeros([self.npts, self.dim])
points[:, 0] = np.arange(1, self.npts+1)
# Get the last index of the row in the top half of the hypercube
middleind = self.npts//2
# special manipulation if odd number of rows
if self.npts % 2 == 1:
points[middleind, :] = middleind + 1
# Generate the top half of the hypercube matrix
for j in range(1, self.dim):
for i in range(middleind):
if np.random.random() < 0.5:
points[i, j] = self.npts-i
else:
points[i, j] = i + 1
np.random.shuffle(points[:middleind, j])
# Generate the bottom half of the hypercube matrix
for i in range(middleind, self.npts):
points[i, :] = self.npts + 1 - points[self.npts - 1 - i, :]
return points/self.npts
def generate_points(self):
"""Generate a matrix no rank deficiency with the initial
sample points, scaled to the unit cube
:return: Symmetric Latin hypercube design in the unit cube
"""
rank_pmat = 0
pmat = np.ones((self.npts, self.dim+1))
xsample = None
while rank_pmat != self.dim + 1:
xsample = self._slhd()
pmat[:, 1:] = xsample
rank_pmat = np.linalg.matrix_rank(pmat)
return xsample
# ========================= For Test =======================
def _main():
print("========================= LHD =======================")
lhs = LatinHypercube(4, 10, criterion='c')
print(lhs.generate_points())
print("\n========================= SLHD =======================")
slhd = SymmetricLatinHypercube(3, 10)
print(slhd.generate_points())
if __name__ == "__main__":
_main()
| [
"dme65@cornell.edu"
] | dme65@cornell.edu |
f4c50198426a22de4657d97af5065df4920d777b | 4f111dfacab0acc93900e7746538f85e0b3d8d78 | /day3/01关系运算符.py | 7daa883448340f101de2cd7477971865e50ce034 | [] | no_license | ljxproject/basecode | 5541f25cfe90d5fad26eac0b6e72802aa1fad1f4 | 485e4b41593839bfc61e67261247fb88dc80cc1d | refs/heads/master | 2020-03-26T16:16:26.422617 | 2018-08-17T08:05:11 | 2018-08-17T08:05:11 | 145,091,118 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | '''
关系元算符与关系表达式
关系运算符有:
> < == != >= <=
格式: 表达式1 关系运算符 表达式2
功能: 运算表达式1与表达式2的值,
值: 如果关系成立,则返回True,否则False
'''
num1 = 2
num2 = 5
mum3 = num1 != num2
print(mum3)
print(num1 != num2)
| [
"403496369@qq.com"
] | 403496369@qq.com |
8e4afde0ad3d7cdf9500900a9d52568869e8ccec | b9d7194bb50a01e7e56d19ba2f3c048084af54b5 | /_OLD_/bottle.py | 8171ee3221df8251f6911cd57ccc179a1fc2edcf | [] | no_license | BernardoGO/TCC---2017 | 099e72d788974446b58fe5f409a2df25e3613cc5 | 75025e095956624470c22d8f3118441d5c28bdd7 | refs/heads/master | 2018-12-04T10:53:07.809161 | 2018-09-06T04:59:30 | 2018-09-06T04:59:30 | 64,803,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,571 | py | import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dropout, Flatten, Dense
from keras import applications
from keras.models import save_model, load_model
# dimensions of our images.
img_width, img_height = 150, 150
top_model_weights_path = 'bottleneck_fc_model.h5'
train_data_dir = 'data/train'
validation_data_dir = 'data/validation'
nb_train_samples = 150000
nb_validation_samples = 24000
epochs = 50
batch_size = 16
def save_bottlebeck_features():
datagen = ImageDataGenerator(rescale=1. / 255)
# build the VGG16 network
model = applications.VGG16(include_top=False, weights='imagenet')
generator = datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode=None,
shuffle=False)
bottleneck_features_train = model.predict_generator(
generator, nb_train_samples // batch_size)
np.save(open('bottleneck_features_train.npy', 'wb'),
bottleneck_features_train)
generator = datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode=None,
shuffle=False)
bottleneck_features_validation = model.predict_generator(
generator, nb_validation_samples // batch_size)
np.save(open('bottleneck_features_validation.npy', 'wb'),
bottleneck_features_validation)
def train_top_model():
train_data = np.load(open('bottleneck_features_train.npy', "rb"))
train_labels = np.array(
[0] * (nb_train_samples // 2) + [1] * (nb_train_samples // 2))
validation_data = np.load(open('bottleneck_features_validation.npy', "rb"))
validation_labels = np.array(
[0] * (nb_validation_samples // 2) + [1] * (nb_validation_samples // 2))
model = Sequential()
model.add(Flatten(input_shape=train_data.shape[1:]))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop',
loss='binary_crossentropy', metrics=['accuracy'])
model.fit(train_data, train_labels,
epochs=epochs,
batch_size=batch_size,
validation_data=(validation_data, validation_labels))
model.save_weights(top_model_weights_path)
#model.load_weights(top_model_weights_path)
#save_model(model, "model1111.h5")
save_bottlebeck_features()
train_top_model()
| [
"bernardo.godinho.oliveira@gmail.com"
] | bernardo.godinho.oliveira@gmail.com |
204577af9b5d3ab3153806385f5886097d1e9c0c | 0753b80a7c3289c6239764b75704cd11d070ff39 | /python/hand_write.py | d10d2db56506a951bbcdf31ced5f87f23eadf167 | [] | no_license | cristianfreire/workspace | de4b7b485cf8a4f0af60094e30dcd4c6727a757f | 3acc8aef96235fcc1a1b730cd785821a0b751248 | refs/heads/master | 2023-08-23T00:57:43.011187 | 2021-10-30T19:23:07 | 2021-10-30T19:23:07 | 422,774,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | from pywhatkit.chr_to_handwriting import text_to_handwriting
texto = input('enter the text here: ')
#pywhatkit.text_to_handwriting(text, rgb=[0,255,0])
| [
"cristianpassos51@gmail.com"
] | cristianpassos51@gmail.com |
0b231fcc73526c6bd8bb5185239f91dd9e68d7cf | 13edd8f1bc3b86fd881f85fbeafe94811392d7fc | /fourth_module/多线程多进程/new/多线程/01 开启线程的两种方式.py | cbdb541d1193f3e8f003cc5d85896cfbaa111812 | [] | no_license | ryan-yang-2049/oldboy_python_study | f4c90c9d8aac499e1d810a797ab368217f664bb1 | 6e1ab7f217d9bf9aa7801266dee7ab4d7a602b9f | refs/heads/master | 2022-07-22T23:49:28.520668 | 2019-06-11T13:26:25 | 2019-06-11T13:26:25 | 129,877,980 | 0 | 1 | null | 2022-07-18T17:12:54 | 2018-04-17T09:12:48 | HTML | UTF-8 | Python | false | false | 643 | py | # -*- coding: utf-8 -*-
"""
__title__ = '01 开启线程的两种方式.py'
__author__ = 'yangyang'
__mtime__ = '2018.02.07'
"""
from threading import Thread
import os,time
# def task(name):
# print("%s is running,PID: %s" % (name,os.getpid()))
#
# if __name__ == '__main__':
# p = Thread(target=task,args=('ryan',))
# p.start()
# print("主线程,PID:%s"%os.getpid())
class MyThread(Thread):
def __init__(self,name):
super().__init__()
self.name = name
def run(self):
print("%s is running,PID: %s"%(self.name,os.getpid()))
if __name__ == '__main__':
obj = MyThread('ryan')
obj.start()
print("主线程,PID: %s"%os.getpid()) | [
"11066986@qq.com"
] | 11066986@qq.com |
26e94e33c7d3dda0924333d6df8e6e72572d6ac1 | a842f224d1b0c2e74b2043e8d03f49e3298086df | /grep_scales.py | 2b83cfe14e04d138314104c9309a15a7056c7411 | [] | no_license | ronsengupta/grep-scales | 68f8037171cdfd3f43c02d3d77f4f633e4196856 | 5740902b4694ae8d1cdee04e213f41c3d99bc428 | refs/heads/master | 2020-06-12T23:00:48.071262 | 2016-04-10T08:48:04 | 2016-04-10T08:48:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,232 | py | from shutit_module import ShutItModule
class grep_scales(ShutItModule):
def build(self, shutit):
afile = r'''THIS LINE IS THE 1ST UPPER CASE LINE IN THIS FILE.
this line is the 1st lower case line in this file.
This Line Has All Its First Character Of The Word With Upper Case.
Two lines above this line is empty.
And this is the last line.
'''
shutit.send_file('afile',afile)
shutit.send('alias grep=grep')
afile_message = '''I have created a file called 'afile' that looks like this:
BEGINS
''' + afile + '''
ENDS
'''
follow_on_context={'check_command':'ls','context':'docker'}
#shutit.challenge('move file afile to filename: 1',challenge_type='golf',expect='1',follow_on_context=follow_on_context)
shutit.challenge(afile_message + '''
For your first task, grep out the last line, ie the one that reads: 'And this is the last line.'.''','And this is the last line.',hints=['last','grep last afile'])
shutit.golf(afile_message + 'Return a count of the number of lines with "UPPER" in it (case sensitive)','1',hints=['-c','ask again to get answer','grep -c UPPER afile'])
shutit.golf(afile_message + 'Return a count of the number of lines with "UPPER" in it (case insensitive)','2',hints=['-c','-i','ask again to get answer','grep -c -i UPPER afile'])
shutit.golf(afile_message + 'Return lines that have the word "in" in it (case insensitive)','264200b0557e7c2e75cffc57778311f4',expect_type='md5sum',hints=['-w','-i','ask again to get answer','grep -w -i in afile'])
shutit.golf(afile_message + '''Return lines that DON'T have the word 'case' (case insensitive) in it.''','ca75d0d8558569109e342ac5e09c4d01',expect_type='md5sum',hints=['-v','-i','ask again to get answer','grep -v case afile'])
shutit.golf(afile_message + '''Return line with "UPPER" in it, along with the line number.''','cc9246de53156c4259be5bf05dacadf6',expect_type='md5sum',hints=['-n','ask again to get answer','grep -n UPPER afile'])
shutit.golf(afile_message + 'Print the line after the empty line.','63b6f5fd46648742a6f7aacff644dd92',expect_type='md5sum',hints=['-A','-A1','ask again to get answer','grep -A1 ^$ afile'])
shutit.golf(afile_message + 'Print the two lines that come before the first line with nothing in it.','444cc6679be200fc6579678b6afe19e9',expect_type='md5sum',hints=['-B','-B2','^$ to match the empty line','ask again to get answer','grep -B2 ^$ afile'])
shutit.golf(afile_message + 'Print the line before the empty line, the empty line, and the line after.','7ba4233c4599e0aefd11e93a66c4bf17',expect_type='md5sum',hints=['-C','-C1','ask again to get answer','grep -C1 ^$ afile'],congratulations='Well done, all done!')
#-o, --only-matching Print only the matched (non-empty) parts of a matching line, with each such part on a separate output line.
#-l, --files-with-matches Suppress normal output; instead print the name of each input file from which output would normally have been printed. The scanning will stop on the first match.
#-r
#-e
return True
def module():
return grep_scales(
'tk.shutit.grep_scales.grep_scales', 1845506479.0001,
description='Practice your grep scales!',
maintainer='ian.miell@gmail.com',
delivery_methods=['docker'],
depends=['shutit.tk.setup']
)
| [
"ian.miell@gmail.com"
] | ian.miell@gmail.com |
26e48b27df9cabe1bcb4e3351346367719f93918 | f5881267633bf6cc24752c397d4a598ee9015883 | /legendre.py | e2c06acb81c7694f366f6b5dcddd93db332b8651 | [] | no_license | Deveshnie/Tutorials | a27bda3abed0f10f305b83f36412b9ee1d643afc | ce0c2cb8060d46f0258d262266a83f03ae356149 | refs/heads/master | 2021-01-21T04:35:30.068446 | 2016-06-21T11:19:39 | 2016-06-21T11:19:39 | 55,842,881 | 0 | 0 | null | 2016-04-09T14:30:50 | 2016-04-09T12:21:41 | null | UTF-8 | Python | false | false | 738 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 15 14:17:20 2016
@author: Deveshnie
"""
import numpy as np
def get_legendre(x,order):
mat=np.zeros([x.size,order])
mat[:,0]=1.0
if order>1:
mat[:,1]=x
for i in range(1,order-1):
mat[:,i+1]=((2.0*i+1)*x*mat[:,i]-i*mat[:,i-1])/(i+1.0)
return np.matrix(mat)
if __name__=='__main__':
x=np.arange(-1,1,0.01)
for order in np.arange(1,11,1):
mat=get_legendre(x,order)
y=np.exp(get_legendre(x,10))
A=(get_legendre(x,10))
fitp=np.linalg.inv(A.transpose()*A)*A.transpose()*np.matrix(y)
err=np.abs(np.mean(y))
pred=A*fitp
print err
print fitp
| [
"noreply@github.com"
] | noreply@github.com |
d48b8afce0c65c253461d749a9f39c508502e23b | cbf70512796ba1bbf0c89897691e3ab107399c79 | /mmpose/datasets/datasets/mesh/mosh_dataset.py | 40692575a5b6fbf797c5f4a3b9fa1890d3478ab4 | [
"Apache-2.0"
] | permissive | yaochaorui/mmpose | 0294f39a596d091746a9220eff0c473bfdc5c875 | 056d8db55373e933a971eadc66f92f1d1e773332 | refs/heads/master | 2022-12-26T04:33:26.296987 | 2020-10-04T14:24:29 | 2020-10-04T14:24:29 | 296,587,521 | 0 | 0 | Apache-2.0 | 2020-09-18T10:20:06 | 2020-09-18T10:20:05 | null | UTF-8 | Python | false | false | 1,961 | py | import copy as cp
from abc import ABCMeta
import numpy as np
from torch.utils.data import Dataset
from mmpose.datasets.builder import DATASETS
from mmpose.datasets.pipelines import Compose
@DATASETS.register_module()
class MoshDataset(Dataset, metaclass=ABCMeta):
"""Mosh Dataset for the adversarial training in 3D human mesh estimation
task.
The dataset return a dict containing real-world SMPL parameters.
Args:
ann_file (str): Path to the annotation file.
pipeline (list[dict | callable]): A sequence of data transforms.
test_mode (bool): Store True when building test or
validation dataset. Default: False.
"""
def __init__(self, ann_file, pipeline, test_mode=False):
self.annotations_path = ann_file
self.pipeline = pipeline
self.test_mode = test_mode
self.db = self._get_db(ann_file)
self.pipeline = Compose(self.pipeline)
def _get_db(self, ann_file):
"""Load dataset."""
data = np.load(ann_file)
_betas = data['shape'].astype(np.float32)
_poses = data['pose'].astype(np.float32)
tmpl = dict(
pose=None,
beta=None,
)
gt_db = []
dataset_len = len(_betas)
for i in range(dataset_len):
newitem = cp.deepcopy(tmpl)
newitem['pose'] = _poses[i]
newitem['beta'] = _betas[i]
gt_db.append(newitem)
return gt_db
def __len__(self, ):
"""Get the size of the dataset."""
return len(self.db)
def __getitem__(self, idx):
"""Get the sample given index."""
item = cp.deepcopy(self.db[idx])
trivial, pose, beta = \
np.zeros(3, dtype=np.float32), item['pose'], item['beta']
results = {
'mosh_theta':
np.concatenate((trivial, pose, beta), axis=0).astype(np.float32)
}
return self.pipeline(results)
| [
"noreply@github.com"
] | noreply@github.com |
dc9f52ec800d1c7438118f86a1ffb7ec303d5138 | 26f27599e989d1b970150e87ceb5befa460fff3e | /run_case.py | 2fd5906a01370594b65e90aab79c70b309532d61 | [] | no_license | zhaozhiquan/iOS-ui-automation | 740dbb72ae5200fc291503e3b5f7313a10cfd09e | 09e2ac4efc163ad3f7768a38d4b7baa12db1b542 | refs/heads/master | 2021-06-17T03:36:40.883619 | 2017-03-28T10:48:18 | 2017-03-28T10:48:18 | 83,384,286 | 10 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,295 | py | #!/usr/bin/env python
#-*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import unittest
import HTMLTestRunner1 as HTMLTestRunner
import time
import os
import shutil
casepath = ".//TestCase"
#casepath = '/Users/zhaozhiquan/automation/AndroidSdk/TestCase'
def Creatsuite():
#定义单元测试容器
testunit = unittest.TestSuite()
#定搜索用例文件的方法
discover = unittest.defaultTestLoader.discover(casepath, pattern='test06*', top_level_dir=None)
#将测试用例加入测试容器中
for testsuite in discover:
for casename in testsuite:
testunit.addTest(casename)
print testunit
return testunit
test_case = Creatsuite()
#获取系统当前日期
day = time.strftime('%Y-%m-%d')
#定义个报告存放路径,支持相对路径
aaa=os.path.exists('./result/'+day)
if aaa:
shutil.rmtree('./result/'+day)
os.mkdir('./result/'+day)
os.mkdir('./result/'+day+'/screencap')
filename = './result/'+day+'/result.html'
fp = file(filename, 'wb')
#定义测试报告
runner = HTMLTestRunner.HTMLTestRunner(stream=fp, title=u'iOS联运sdk测试报告', description=u'用例执行情况:')
#运行测试用例
runner.run(test_case)
fp.close() #关闭报告文件
| [
"zhaozhiquan@zhaozhiquandeMac-mini.local"
] | zhaozhiquan@zhaozhiquandeMac-mini.local |
0646a4549b39129de0a15f26baa43501a289a487 | 498a2557e9c3a2c6b276d65cb711f5820a55b4e1 | /pybb/migrations/0001_initial.py | 0e2df21edaad1953fa81edc925ac0426b9d9dd9c | [
"Apache-2.0"
] | permissive | Burrito-Bazooka/logos-v2 | 3084f1eeeff6b3038b3c9f9c943a6e185f8a7b16 | 4046044fdc21da824e940334f25392fbd9b82181 | refs/heads/master | 2020-04-08T17:45:48.989475 | 2016-05-24T19:02:14 | 2016-05-24T19:02:14 | 61,166,115 | 0 | 0 | null | 2016-06-15T00:49:44 | 2016-06-15T00:49:43 | null | UTF-8 | Python | false | false | 1,160 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Category'
db.create_table('pybb_category', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('position', self.gf('django.db.models.fields.IntegerField')(default=0, db_index=True, blank=True)),
))
db.send_create_signal('pybb', ['Category'])
def backwards(self, orm):
# Deleting model 'Category'
db.delete_table('pybb_category')
models = {
'pybb.category': {
'Meta': {'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'position': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True', 'blank': 'True'})
}
}
complete_apps = ['pybb'] | [
"kiwiheretic@myself.com"
] | kiwiheretic@myself.com |
00d288a2b6044bd45e41cb8a04842120a28cf531 | 90047daeb462598a924d76ddf4288e832e86417c | /chromecast/browser/DEPS | c273dc2c7d0751e9b9e547fd0285090933fa1b4b | [
"BSD-3-Clause"
] | permissive | massbrowser/android | 99b8c21fa4552a13c06bbedd0f9c88dd4a4ad080 | a9c4371682c9443d6e1d66005d4db61a24a9617c | refs/heads/master | 2022-11-04T21:15:50.656802 | 2017-06-08T12:31:39 | 2017-06-08T12:31:39 | 93,747,579 | 2 | 2 | BSD-3-Clause | 2022-10-31T10:34:25 | 2017-06-08T12:36:07 | null | UTF-8 | Python | false | false | 990 | include_rules = [
"+cc/base/switches.h",
"+chromecast/common",
"+chromecast/graphics",
"+chromecast/app/grit/chromecast_settings.h",
"+chromecast/app/resources/grit/shell_resources.h",
"+chromecast/media",
"+chromecast/net",
"+chromecast/service",
"+components/cdm/browser",
"+components/crash",
"+components/network_hints/browser",
"+components/prefs",
"+components/proxy_config",
"+content/public/android",
"+content/public/browser",
"+content/public/common",
"+content/public/test",
"+device/geolocation",
"+gin/v8_initializer.h",
"+gpu/command_buffer/service/gpu_switches.h",
"+media/audio",
"+media/base",
"+media/mojo",
"+mojo/public",
"+net",
"+services/service_manager/public",
"+ui/aura",
"+ui/base",
"+ui/compositor",
"+ui/events",
"+ui/gfx",
"+ui/gl",
"+ui/display",
"+ui/ozone/platform/cast/overlay_manager_cast.h",
# TODO(sanfin): Remove this by fixing the crash handler on android.
"!chromecast/app",
]
| [
"xElvis89x@gmail.com"
] | xElvis89x@gmail.com | |
da7ad20af508871997836695e88be46cea160ed8 | d5481fc03a8b243312e5bcf67d44f6c7788a7081 | /coursework/services/scrapper/__init__.py | 7f9df60d8c13c2b78fc92b451d17d49f237ebbfa | [] | no_license | yklym/databases-2 | c2bea85f209cf4e2cb08517e335d43840674196f | af0304cb2fd3eeef34306e4d41f60d18f547008e | refs/heads/main | 2023-05-22T07:59:20.097891 | 2021-06-11T21:11:37 | 2021-06-11T21:11:37 | 336,562,906 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30 | py | from .scrapper import Scrapper | [
"yaroslav.klymenko@binary-studio.com"
] | yaroslav.klymenko@binary-studio.com |
e249ef353906868c69319e6708152df045730d43 | 728c2b90fc4b0b017a8a7b0f4262a18bb9c4a82d | /hello | 4ee0a44122ec326e9137c49e39fd29bed11a0b78 | [] | no_license | jasonBirchall/ecs-codepipeline | 91def43c0252f46fc03f112f13e7e54b06e0b3d1 | 100e1732f7edc0f36023e936ca0be380fa35c6de | refs/heads/master | 2021-05-15T03:20:42.927958 | 2018-01-02T10:58:51 | 2018-01-02T10:58:51 | 115,045,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 343 | #!/usr/bin/env python
from socket import gethostname
from circuits.web import Server, Controller
class Root(Controller):
def index(self):
return "Hello Presentation!"
def hostname(self):
return gethostname()
def main():
(Server(("0.0.0.0", 80)) + Root()).run()
if __name__ == "__main__":
main()
| [
"jason.birchall@digital.justice.gov.uk"
] | jason.birchall@digital.justice.gov.uk | |
61302f16e42eb705b67baa1b9802ba9c5a24bfc9 | 68ed2de5e338321e8ba789fc00bf2ddd649cbc66 | /MediaCenter/views.py | 81fb84e5b8cbc0a9f26340e8daad83d8453919c9 | [] | no_license | jonmetz/piHomeServer | 3ece86a67cf47833da5fbc8dda0f8bfc15f8efec | 2aadc6deed142395fe88e9f7130ff231a7f06449 | refs/heads/master | 2021-01-20T02:15:39.023520 | 2013-10-13T10:26:35 | 2013-10-13T10:26:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,884 | py | from django.shortcuts import render
from subprocess import Popen, PIPE
import eyed3
import os
# Since the player process's stdin is going to be written to by functions that are called independent of one another, they cannot be pure
# player_process will be passed between functions as a global variable
player_process = None
class Song:
# Contains the important information about a particular song, what one would usually see in some sort of music player app, also contains
# url and path to file
# TODO find out how the hell to handle a file with actual underscores
def __init__(self, filename, directory):
self.filename = filename
self.directory = directory
self.url = '/MediaCenter/MusicLibrary/play/'+replace_all(filename, ' ','_')+'/'
tags = eyed3.load(directory + filename).tag
self.artist = tags.artist
self.album = tags.album
self.title = tags.title
self.track_number = tags.track_num
def get_songs(path):
# Get list of song objects from the /Media/music directory
# Search directory for song files
song_files = [files for dirpath, dirnames, files in os.walk(os.path.abspath(path))][0]
# return song objects created from each file (excluding the logfiles created by omxplayer
return [Song(filename, path) for filename in song_files if filename != 'omxplayer.log' and filename != 'omxplayer.old.log']
def replace_all(string, target, replacement):
# Hackish, ugly way of finding all instances of 'target' in 'string' and replacing them with 'replacement'
no_target = string.split(target)
if len(no_target) > 1:
new_string = ''
l_nt=len(no_target)
for substring_number in range(0,l_nt-1):
new_string += no_target[substring_number]+replacement
new_string += no_target[substring_number+1]
else:
new_string = string
return new_string
def media_center(request):
return render(request, 'MediaCenter.html', {})
def music_library(request):
# Get better way to find path (using os)
song_list = get_songs('/home/pi/piHomeServer/Media/music/')
return render(request, 'MusicLibrary.html', {'song_list' : song_list})
def play_media(media_path):
return Popen(["omxplayer", media_path], stdin=PIPE, stdout=PIPE)
def music_player(request, song):
action = None
global player_process
if not player_process:
if '_' in song:
formatted_song = replace_all(song, '_',' ')
else:
formatted_song = song
player_process = play_media('/home/pi/piHomeServer/Media/music/'+formatted_song)
else:
if request.GET and 'action' in request.GET:
action = request.GET['action']
print('action %s' % action)
if not action:
action = False
elif action == 'pause':
player_process.stdin.write('p')
elif action == 'play':
player_process.stdin.write('p')
action = None
# entering the arrow keys in omxplayer's stdin causes the following actions
elif action == 'stop':
player_process.stdin.write('^[[A')
player_process = None
# Maybe just redirect the url
return music_library(request)
# What next, figure this out
elif action == 'next':
pass
elif action == 'last':
player_process.stdin.write('^[[B')
elif action == 'fastforward':
player_process.stdin.write('^[[C')
elif action == 'rewind':
player_process.stdin.write('^[[D')
else:
action == 'invalid'
else:
print('action not in url')
return render(request, 'MusicPlayer.html', {'song':song, 'action':action})
| [
"jon.metzman@gmail.com"
] | jon.metzman@gmail.com |
00728e4101b62fa2bf7ba2c3784d4576344c6cc3 | d5b3de6729e165bddcc17b8c3c285df808cd9fd0 | /application/modules/fonction/views_fct.py | 209fd03dd4976dbac54b11d2915ca69f51eb9231 | [] | no_license | wilrona/Gesacom | 907848d44d9fa1a285b5c7a452c647fc6cbbc2fa | 31ec26c78994030844f750039a89a43a66d61abf | refs/heads/master | 2020-04-06T15:00:36.522832 | 2016-09-08T13:19:06 | 2016-09-08T13:19:06 | 49,956,069 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,146 | py | __author__ = 'Ronald'
from ...modules import *
from models_fct import Fonction
from forms_fct import FormFonction
# Flask-Cache (configured to use App Engine Memcache API)
cache = Cache(app)
prefix = Blueprint('fonction', __name__)
@prefix.route('/fonction')
@login_required
@roles_required([('super_admin', 'fonction')])
def index():
menu = 'societe'
submenu = 'entreprise'
context = 'fonction'
title_page = 'Parametre - Fonctions'
search = False
q = request.args.get('q')
if q:
search = True
try:
page = int(request.args.get('page', 1))
except ValueError:
page = 1
datas = Fonction.query()
pagination = Pagination(css_framework='bootstrap3', page=page, total=datas.count(), search=search, record_name='fonctions')
if datas.count() > 10:
if page == 1:
offset = 0
else:
page -= 1
offset = page * 10
datas = datas.fetch(limit=10, offset=offset)
return render_template('fonction/index.html', **locals())
@prefix.route('/fonction/edit', methods=['GET', 'POST'])
@prefix.route('/fonction/edit/<int:fonction_id>', methods=['GET', 'POST'])
@login_required
@roles_required([('super_admin', 'fonction')], ['edit'])
def edit(fonction_id=None):
if fonction_id:
grades = Fonction.get_by_id(fonction_id)
form = FormFonction(obj=grades)
else:
grades = Fonction()
form = FormFonction()
success = False
if form.validate_on_submit():
grades.libelle = form.libelle.data
grades.put()
flash('Enregistement effectue avec succes', 'success')
success = True
return render_template('fonction/edit.html', **locals())
@prefix.route('/fonction/delete/<int:fonction_id>')
@login_required
@roles_required([('super_admin', 'fonction')], ['edit'])
def delete(fonction_id):
fonctions = Fonction.get_by_id(fonction_id)
if not fonctions.count():
fonctions.key.delete()
flash('Suppression reussie', 'success')
else:
flash('Impossible de supprimer', 'danger')
return redirect(url_for('fonction.index')) | [
"wilrona@gmail.com"
] | wilrona@gmail.com |
6a5206bf620b8cc36abb77da8111418a16762c14 | 3845a30f9c37994855d1dfe866276f9c2569d78f | /p3.py | a55a6c453cfb2ecb57a7d349ef97a24e01273f4f | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | nabilhassein/project-euler | 767200f9a4595cf967ea453817e648fe0a812a1c | ed01886a2d87ad93ade7b9f87da3b07f8dec0f2a | refs/heads/master | 2020-04-25T03:53:04.766538 | 2013-08-26T23:18:46 | 2013-08-26T23:18:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 838 | py | # Largest prime factor
# Problem 3
# The prime factors of 13195 are 5, 7, 13 and 29.
# What is the largest prime factor of the number 600851475143?
### END PROBLEM STATEMENT; BEGIN MY COMMENTARY
# This is the first result I found by searching the Web via Google for
# "factor prime number python":
# http://stackoverflow.com/questions/15347174/python-finding-prime-factors
# It was so short that I had already internalized it by the time I could think
# to look away, and it was so elegant that it was pointless to try to write
# another solution. So I merely altered it slightly.
# The other solutions are my original work,
# except where noted (nowhere other than here at the time of writing).
def problem3(n):
i = 2
while i * i < n:
while n % i == 0:
n = n / i
i = i + 1
return n
print problem3(600851475143) | [
"nabil.hassein@gmail.com"
] | nabil.hassein@gmail.com |
44ba4fddbe998c26c1153ddcb13928ea9af0115c | 6a53e107ab4bbef6d955dd466c7b61650bf9b3c5 | /Fonction_de_chargement.py | c22e2bf4c20a8f56344de50c518edeebd04bb9ba | [] | no_license | Oreobliton/Projet-Rpg | 3da051f40b92b310d08a7153e98bcaf9849037f4 | 0d7b4968668eeeed2d5a2754062f1365cfa7fc1c | refs/heads/master | 2020-03-23T23:03:57.933893 | 2018-07-24T04:45:05 | 2018-07-24T04:45:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,774 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 9 10:48:43 2017
@author: Zorino, Flywalker, mornviir
"""
#Importation de gros modules colombiens
from Classes_Monstre_Perso_Item_Armes import *
from Fonction_de_sauvegarde import *
from Fonction_de_chargement import *
from Fonction_du_menu import *
from Fonction_concernant_le_mode_AVENTURE import *
import os.path
from random import *
#fonction perdue, sais pas où la ranger
def déparsagePropre(str):
str2 = ""
for i in range(len(str)-1):
str2 += str[i]
return str2
#LE CHARGEMENT DU PERSO
###############################"On lance TOUT le module à partir de loadUser, faut juste taper loadUser(perso)
def loadUser(perso): #parse 1 = ;
name = input("insérez le nom du personnage (le fichier) : ")
choix = input("Voulez vous spécifier la destination [O/n]?: ")
if choix == 'O':
desti = input('insérez votre destination : ')
else:
desti = "/home/mornviir/Documents/python/Projo RPG/users/" + name + "/"
usr = open(desti + "usr"+ name, 'r')
testo = usr.read()
testo = déparsagePropre(testo)
L = testo.split(";")
perso = Perso(L[0])
perso.argent = int(L[1])
perso.exp = int(L[2])
perso.mains_libres = int(L[3])
perso.nom_classe = L[4]
usr.close()
return loadStats(perso,name,desti)
#LE CHARGEMENT DES STATS
def def_stats(perso,vie,force,armure,agilite,mana):
Dstats = {"Vie": vie , "Force": force , "Valeur_Armure": armure , "Agilité": agilite , "Mana": mana}
perso.Dstats = Dstats
def loadStats(perso,name,desti): #parse 1 = ;
stat = open(desti + "stat"+ name, 'r')
testo = stat.read()
testo = déparsagePropre(testo)
L = testo.split(";")
L[4] = L[4].replace("\n","")
def_stats(perso,int(L[0]),int(L[1]),int(L[2]),int(L[3]),int(L[4]))
stat.close()
return loadInvent(perso,name,desti)
#LE CHARGEMENT DE L'INVENTAIRE
def loadInvent(perso,name,desti): #parse 1 = ; ||parse 2 = £
invent = open(desti + "invent"+ name, 'r')
nbr = open(desti + "nbr" + name, 'r')
NBRtesto = nbr.read()
NBRtesto = déparsagePropre(NBRtesto)
NBRL = NBRtesto.split(";")
#le truc plus sera utilisé pour les tours de boucle lors du ADD ITEM DE L'INFINI
pvirguletesto = invent.read()
pvirguletesto = déparsagePropre(pvirguletesto)
L = pvirguletesto.split(";")
refnbr = 0
for i in L:
L2 = i.split("£")
temp = create_item(L2[0],L2[1],L2[2],L2[3])
perso.addItemToInventory(temp, int(NBRL[refnbr]))
refnbr += 1
return perso.show_inventory()
nbr.close()
invent.close()
# str : nom, int : cout, str : description, dico : application
| [
"noreply@github.com"
] | noreply@github.com |
f76d667d0fdea002d1fd512d3a7f98728174a0a4 | 2ece848b37f7fa6f13ce0e94ddfd0fbd46c72b8f | /backend/utils/crawl_mode.py | c0224bad7279761898d0187465fdb7edceb18649 | [
"Apache-2.0"
] | permissive | rockeyzhu/eastmoney | 1a2d2db18bd658abe8e65875bf863f1cfcefd545 | c8aa33a69ebee54c64f22a8edbcf30ed0f29b293 | refs/heads/master | 2023-03-06T12:20:03.896607 | 2021-02-20T07:20:53 | 2021-02-20T07:20:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | import configparser
def get_crawl_mode():
config = configparser.ConfigParser()
config.sections()
config.read("config.ini")
return config['CRAWL_MODE']['crawl_mode']
| [
"1397991131@qq.com"
] | 1397991131@qq.com |
860e1b6b4199edd993d0f6b16cdc645e43a2bee9 | 4cef505611a04383310ce6556fac7acb02dbc8a1 | /Unmapped content SE/Unmapped content SE/checking_unmapped_content_SE_api_new1.py | 9ba60d429c9bc2219b1ffb1ca9dea5b0474b5f8b | [] | no_license | Sayan8981/Projectx | 9d8727eec144da35f2acffc787f3c769beef02e1 | bcf93fe885e4cd68bb2c30c408a3b03e785965c3 | refs/heads/master | 2022-03-26T18:13:02.831185 | 2020-01-16T06:52:31 | 2020-01-16T06:52:31 | 187,637,492 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 53,452 | py | """writer:Saayan"""
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
import MySQLdb
import collections
from pprint import pprint
import sys
import urllib2
import json
import os
from urllib2 import HTTPError
from urllib2 import URLError
import csv
import urllib
import os
import pymysql
import datetime
import httplib
import socket
import unidecode
sys.setrecursionlimit(2000)
import threading
def open_csv(start,name,end,id):
inputFile="unmapped_content_SE1"
f = open(os.getcwd()+'/'+inputFile+'.csv', 'rb')
reader = csv.reader(f)
fullist=list(reader)
result_sheet='/GuideBoxValidationTVSHowPreProd_PX_Saayan%d.csv'%id
if(os.path.isfile(os.getcwd()+result_sheet)):
os.remove(os.getcwd()+result_sheet)
csv.register_dialect('excel',lineterminator = '\n',skipinitialspace=True,escapechar='')
w=open(os.getcwd()+result_sheet,"wa")
with w as mycsvfile:
fieldnames = ["Id","Title","TotalEpisodes","ReleaseYear","Gb_id","Gb_id_PX","Season Number","Episode Number","EpisodeTitle","OzoneOriginalEpisodeTitle","OzoneEpisodeTitle","OzoneRoviId","Scheme","Search","Match","AmazonLink","Amazon_Flag","StarzLink","Starz_Flag","NetflixLink","Netflix_flag","NBCLink","NBC_flag","CBSLink","CBS_flag","VUDULink","VUDU_flag","ITUNESLink","ITUNES_flag","Ott_flag","Result","Ozone_Series_id","Px_series_id","Rovi_id","Px_series_title","Px_episode_title","Px_release_year","Px_season_number","Px_episode_number","projectx_id","amazon_flag","starz_flag","netflix_flag","cbs_flag","vudu_flag","itunes_flag","amazon_flag_expired","vudu_flag_expired","starz_flag_expired","netflix_flag_expired","cbs_flag_expired","itunes_flag_expired","comment","Series_duplicate","Duplicate id","series_match","episode_title_match","title_match","Season_number_match","Episode_number_match","Release_year_match"]
writer = csv.DictWriter(mycsvfile,fieldnames=fieldnames,dialect="excel",lineterminator = '\n')
writer.writeheader()
total=0
Token='Token token=efeb15f572641809acbc0c26c9c1b63f4f7f1fd7dcb68070e45e26f3a40ec8e3'
Token1='Token token=0b4af23eaf275daaf41c7e57749532f128660ec3befa0ff3aee94636e86a43e7'
domain_name='http://preprod-projectx-1556298832.us-east-1.elb.amazonaws.com'
for r in range(start,end-1):
total=total+1
print ({"thread_name":name,"total":total})
source_amazon=[]
source_starz=[]
source_netflix=[]
source_cbs=[]
source_vudu=[]
source_itunes=[]
search_px_id=[]
search_px_id_=[]
search_px_id_filtered=[]
series_id_px=[]
arr_px=[]
arr_rovi=[]
arr_gb=[]
sec_arr=[]
s=0
t=0
u=0
v=0
w=0
x=0
Result=str(fullist[r][29])
if Result=="MAP FAIL":
Id=str(fullist[r][0])
Title=unicode(str(fullist[r][1]),'utf-8')
Title=unidecode.unidecode(Title)
TotalEpisodes=str(fullist[r][2])
ReleaseYear=str(fullist[r][3])
Gb_id=str(fullist[r][4])
Season_Number=str(fullist[r][5])
Episode_Number=str(fullist[r][6])
EpisodeTitle=unicode(str(fullist[r][7]),'utf-8')
EpisodeTitle=unidecode.unidecode(EpisodeTitle)
OzoneOriginalEpisodeTitle=str(fullist[r][8])
OzoneEpisodeTitle=str(fullist[r][9])
OzoneRoviId=str(fullist[r][10])
Scheme=str(fullist[r][11])
Search=str(fullist[r][12])
Match=str(fullist[r][13])
AmazonLink=str(fullist[r][14])
Amazon_Flag=str(fullist[r][15])
StarzLink=str(fullist[r][16])
Starz_Flag=str(fullist[r][17])
NetflixLink=str(fullist[r][18])
Netflix_flag=str(fullist[r][19])
NBCLink=str(fullist[r][20])
NBC_flag=str(fullist[r][21])
CBSLink=str(fullist[r][22])
CBS_flag=str(fullist[r][23])
VUDULink=str(fullist[r][24])
VUDU_flag=str(fullist[r][25])
ITUNESLink=str(fullist[r][26])
ITUNES_flag=str(fullist[r][27])
Ott_flag=str(fullist[r][28])
Result=str(fullist[r][29])
Ozone_Series_id=str(fullist[r][30])
print Result
print Gb_id
amazon_flag_expired=''
vudu_flag_expired=''
starz_flag_expired=''
netflix_flag_expired=''
cbs_flag_expired=''
itunes_flag_expired=''
try:
try:
if eval(AmazonLink):
source_amazon=[]
for oo in eval(AmazonLink):
source_amazon.append(oo)
for l in source_amazon:
if source_amazon.count(l)>1:
source_amazon.remove(l)
except SyntaxError:
source_amazon=[0]
try:
if eval(StarzLink):
source_starz=[]
for oo in eval(StarzLink):
source_starz.append(oo)
for l in source_starz:
if source_starz.count(l)>1:
source_starz.remove(l)
except SyntaxError:
source_starz=[0]
try:
if eval(NetflixLink):
source_netflix=[]
for oo in eval(NetflixLink):
source_netflix.append(oo)
for l in source_netflix:
if source_netflix.count(l)>1:
source_netflix.remove(l)
except SyntaxError:
source_netflix=[0]
try:
if eval(CBSLink):
source_cbs=[]
for oo in eval(CBSLink):
source_cbs.append(oo)
for l in source_cbs:
if source_cbs.count(l)>1:
source_cbs.remove(l)
except SyntaxError:
source_cbs=[0]
try:
if eval(VUDULink):
source_vudu=[]
for oo in eval(VUDULink):
source_vudu.append(oo)
for l in source_vudu:
if source_vudu.count(l)>1:
source_vudu.remove(l)
except SyntaxError:
source_vudu=[0]
try:
if eval(ITUNESLink):
source_itunes=[]
for oo in eval(ITUNESLink):
source_itunes.append(oo)
for l in source_itunes:
if source_itunes.count(l)>1:
source_itunes.remove(l)
except SyntaxError:
source_itunes=[0]
#import pdb;pdb.set_trace()
if source_amazon!=[0]:
url_amazon="http://34.231.212.186:81/projectx/%s/amazon/ottprojectx"%source_amazon[0]
response_amazon=urllib2.Request(url_amazon)
response_amazon.add_header('Authorization',Token)
resp_amazon=urllib2.urlopen(response_amazon)
data_amazon=resp_amazon.read()
data_resp_amazon=json.loads(data_amazon)
for ii in data_resp_amazon:
if ii.get("sub_type")=="SE" and ii.get("type")=='Program' and ii.get("data_source")=='GuideBox':
arr_px.append(ii.get("projectx_id"))
arr_gb.append(ii.get("source_id"))
if ii.get("type")=='Program' and ii.get("data_source")=='Rovi':
arr_px.append(ii.get("projectx_id"))
arr_rovi.append(ii.get("source_id"))
for aa in arr_px:
if arr_px.count(aa)>1:
arr_px.remove(aa)
for jj in arr_px:
sec_arr.append(jj)
s=len(sec_arr)
if len(sec_arr)>=1:
amazon_flag='True'
else:
expired_link="https://preprod.caavo.com/expired_ott/source_program_id/is_available?source_program_id=%s&service_short_name=amazon"%source_amazon[0]
response_expired=urllib2.Request(expired_link)
response_expired.add_header('Authorization',Token1)
resp_exp=urllib2.urlopen(response_expired)
data_available=resp_exp.read()
data_resp_exp=json.loads(data_available)
if data_resp_exp.get("is_available")==False:
amazon_flag_expired='False'
amazon_flag='False'
else:
amazon_flag_expired='True'
amazon_flag='False'
else:
amazon_flag=''
arr_px=[]
if source_starz!=[0]:
url_starz="http://34.231.212.186:81/projectx/%s/starz/ottprojectx"%source_starz[0]
response_starz=urllib2.Request(url_starz)
response_starz.add_header('Authorization',Token)
resp_starz=urllib2.urlopen(response_starz)
data_starz=resp_starz.read()
data_resp_starz=json.loads(data_starz)
for ii in data_resp_starz:
if ii.get("sub_type")=="SE" and ii.get("type")=='Program' and ii.get("data_source")=='GuideBox':
arr_px.append(ii.get("projectx_id"))
arr_gb.append(ii.get("source_id"))
if ii.get("type")=='Program' and ii.get("data_source")=='Rovi':
arr_px.append(ii.get("projectx_id"))
arr_rovi.append(ii.get("source_id"))
for aa in arr_px:
if arr_px.count(aa)>1:
arr_px.remove(aa)
for jj in arr_px:
sec_arr.append(jj)
t=len(sec_arr)
if len(sec_arr)>s:
starz_flag='True'
else:
expired_link="https://preprod.caavo.com/expired_ott/source_program_id/is_available?source_program_id=%s&service_short_name=starz"%source_starz[0]
response_expired=urllib2.Request(expired_link)
response_expired.add_header('Authorization',Token1)
resp_exp=urllib2.urlopen(response_expired)
data_available=resp_exp.read()
data_resp_exp=json.loads(data_available)
if data_resp_exp.get("is_available")==False:
starz_flag_expired='False'
starz_flag='False'
else:
starz_flag_expired='True'
starz_flag='False'
else:
starz_flag=''
arr_px=[]
if source_netflix!=[0]:
url_netflix="http://34.231.212.186:81/projectx/%s/netflixusa/ottprojectx"%source_netflix[0]
response_netflix=urllib2.Request(url_netflix)
response_netflix.add_header('Authorization',Token)
resp_netflix=urllib2.urlopen(response_netflix)
data_netflix=resp_netflix.read()
data_resp_netflix=json.loads(data_netflix)
for ii in data_resp_netflix:
if ii.get("sub_type")=="SE" and ii.get("type")=='Program' and ii.get("data_source")=='GuideBox':
arr_px.append(ii.get("projectx_id"))
arr_gb.append(ii.get("source_id"))
if ii.get("type")=='Program' and ii.get("data_source")=='Rovi':
arr_px.append(ii.get("projectx_id"))
arr_rovi.append(ii.get("source_id"))
for aa in arr_px:
if arr_px.count(aa)>1:
arr_px.remove(aa)
for jj in arr_px:
sec_arr.append(jj)
u=len(sec_arr)
if len(sec_arr)>t:
netflix_flag='True'
else:
expired_link="https://preprod.caavo.com/expired_ott/source_program_id/is_available?source_program_id=%s&service_short_name=netflixusa"%source_netflix[0]
response_expired=urllib2.Request(expired_link)
response_expired.add_header('Authorization',Token)
resp_exp=urllib2.urlopen(response_expired)
data_available=resp_exp.read()
data_resp_exp=json.loads(data_available)
if data_resp_exp.get("is_available")==False:
netflix_flag_expired='False'
netflix_flag='False'
else:
netflix_flag_expired='True'
netflix_flag='False'
else:
netflix_flag=''
arr_px=[]
if source_cbs!=[0]:
url_cbs="http://34.231.212.186:81/projectx/%s/cbs/ottprojectx"%source_cbs[0]
response_cbs=urllib2.Request(url_cbs)
response_cbs.add_header('Authorization',Token)
resp_cbs=urllib2.urlopen(response_cbs)
data_cbs=resp_cbs.read()
data_resp_cbs=json.loads(data_cbs)
for ii in data_resp_cbs:
if ii.get("sub_type")=="SE" and ii.get("type")=='Program' and ii.get("data_source")=='GuideBox':
arr_px.append(ii.get("projectx_id"))
arr_gb.append(ii.get("source_id"))
if ii.get("type")=='Program' and ii.get("data_source")=='Rovi':
arr_px.append(ii.get("projectx_id"))
arr_rovi.append(ii.get("source_id"))
for aa in arr_px:
if arr_px.count(aa)>1:
arr_px.remove(aa)
for jj in arr_px:
sec_arr.append(jj)
v=len(sec_arr)
if len(sec_arr)>u:
cbs_flag='True'
else:
expired_link="https://preprod.caavo.com/expired_ott/source_program_id/is_available?source_program_id=%s&service_short_name=cbs"%source_cbs[0]
response_expired=urllib2.Request(expired_link)
response_expired.add_header('Authorization',Token1)
resp_exp=urllib2.urlopen(response_expired)
data_available=resp_exp.read()
data_resp_exp=json.loads(data_available)
if data_resp_exp.get("is_available")==False:
cbs_flag_expired='False'
cbs_flag='False'
else:
cbs_flag_expired='True'
cbs_flag='False'
else:
cbs_flag=''
arr_px=[]
if source_vudu!=[0]:
url_vudu="http://34.231.212.186:81/projectx/%s/vudu/ottprojectx"%source_vudu[0]
response_vudu=urllib2.Request(url_vudu)
response_vudu.add_header('Authorization',Token)
resp_vudu=urllib2.urlopen(response_vudu)
data_vudu=resp_vudu.read()
data_resp_vudu=json.loads(data_vudu)
for ii in data_resp_vudu:
if ii.get("sub_type")=="SE" and ii.get("type")=='Program' and ii.get("data_source")=='GuideBox':
arr_px.append(ii.get("projectx_id"))
arr_gb.append(ii.get("source_id"))
if ii.get("type")=='Program' and ii.get("data_source")=='Rovi':
arr_px.append(ii.get("projectx_id"))
arr_rovi.append(ii.get("source_id"))
for aa in arr_px:
if arr_px.count(aa)>1:
arr_px.remove(aa)
for jj in arr_px:
sec_arr.append(jj)
w=len(sec_arr)
if len(sec_arr)>v:
vudu_flag='True'
else:
expired_link="https://preprod.caavo.com/expired_ott/source_program_id/is_available?source_program_id=%s&service_short_name=netflixusa"%source_vudu[0]
response_expired=urllib2.Request(expired_link)
response_expired.add_header('Authorization',Token1)
resp_exp=urllib2.urlopen(response_expired)
data_available=resp_exp.read()
data_resp_exp=json.loads(data_available)
if data_resp_exp.get("is_available")==False:
vudu_flag_expired='False'
vudu_flag='False'
else:
vudu_flag_expired='True'
vudu_flag='False'
else:
vudu_flag=''
arr_px=[]
if source_itunes!=[0]:
url_itune="http://34.231.212.186:81/projectx/%s/itunes/ottprojectx"%source_itunes[0]
response_itune=urllib2.Request(url_itune)
response_itune.add_header('Authorization',Token)
resp_itune=urllib2.urlopen(response_itune)
data_itune=resp_itune.read()
data_resp_itune=json.loads(data_itune)
for ii in data_resp_itune:
if ii.get("sub_type")=="SE" and ii.get("type")=='Program' and ii.get("data_source")=='GuideBox':
arr_px.append(ii.get("projectx_id"))
arr_gb.append(ii.get("source_id"))
if ii.get("type")=='Program' and ii.get("data_source")=='Rovi':
arr_px.append(ii.get("projectx_id"))
arr_rovi.append(ii.get("source_id"))
for aa in arr_px:
if arr_px.count(aa)>1:
arr_px.remove(aa)
for jj in arr_px:
sec_arr.append(jj)
x=len(sec_arr)
if len(sec_arr)>w:
itunes_flag='True'
else:
expired_link="https://preprod.caavo.com/expired_ott/source_program_id/is_available?source_program_id=%s&service_short_name=netflixusa"%source_itunes[0]
response_expired=urllib2.Request(expired_link)
response_expired.add_header('Authorization',Token1)
resp_exp=urllib2.urlopen(response_expired)
data_available=resp_exp.read()
data_resp_exp=json.loads(data_available)
if data_resp_exp.get("is_available")==False:
itunes_flag_expired='False'
itunes_flag='False'
else:
itunes_flag_expired='True'
itunes_flag='False'
else:
itunes_flag=''
for bb in sec_arr:
while sec_arr.count(bb)>1:
sec_arr.remove(bb)
while sec_arr.count(bb)>1:
sec_arr.remove(bb)
for bb in arr_rovi:
if arr_rovi.count(bb)>1:
arr_rovi.remove(bb)
if bb in arr_rovi:
if arr_rovi.count(bb)>1:
arr_rovi.remove(bb)
for bb in arr_gb:
if arr_gb.count(bb)>1:
arr_gb.remove(bb)
if bb in arr_gb:
if arr_gb.count(bb)>1:
arr_gb.remove(bb)
if amazon_flag=='True' or starz_flag=='True' or netflix_flag=='True' or cbs_flag=='True' or vudu_flag=='True' or itunes_flag=='True':
if len(sec_arr)==1:
url_px="http://preprod-projectx-1556298832.us-east-1.elb.amazonaws.com/programs/%d?&ott=true"%sec_arr[0]
response_px=urllib2.Request(url_px)
response_px.add_header('Authorization',Token)
resp_px=urllib2.urlopen(response_px)
data_px=resp_px.read()
data_resp_px=json.loads(data_px)
for kk in data_resp_px:
if kk.get("original_title")!='':
series_title=unicode(kk.get("original_title"))
series_title=unidecode.unidecode(series_title)
else:
series_title=unicode(kk.get("long_title"))
series_title=unidecode.unidecode(series_title)
if kk.get("original_episode_title")!='':
episode_title=unicode(kk.get("original_episode_title"))
episode_title=unidecode.unidecode(episode_title)
ratio_title=fuzz.ratio(episode_title.upper(),EpisodeTitle.upper())
if ratio_title >=70:
episode_title_match="Above"+'90%'
title_match='Pass'
else:
episode_title =unicode(kk.get("episode_title"))
episode_title=unidecode.unidecode(episode_title)
ratio_title=fuzz.ratio(episode_title.upper(),EpisodeTitle.upper())
if ratio_title >=70:
episode_title_match="Above"+'90%'
title_match='Pass'
else:
episode_title_match="Below"+'90%'
title_match='Fail'
else:
episode_title =unicode(kk.get("episode_title"))
episode_title=unidecode.unidecode(episode_title)
release_year=kk.get("release_year")
season_number=kk.get("episode_season_number")
episode_number=kk.get("episode_season_sequence")
series_id=str(kk.get("series_id"))
if Ozone_Series_id==series_id:
series_match='Pass'
else:
series_match='Fail/Not ingested'
ratio_title=fuzz.ratio(episode_title.upper(),EpisodeTitle.upper())
if ratio_title >=70:
episode_title_match="Above"+'90%'
title_match='Pass'
else:
episode_title_match="Below"+'90%'
title_match='Fail'
if str(season_number)==Season_Number:
Season_number_match="Pass"
else:
Season_number_match='Fail'
if str(episode_number)==Episode_Number:
Episode_number_match="Pass"
else:
Episode_number_match='Fail'
if str(release_year)==ReleaseYear:
Release_year_match="Pass"
else:
r_y=release_year
r_ys=release_year
r_y=r_y+1
if str(r_y)==ReleaseYear:
Release_year_match='Pass'
else:
r_ys=r_ys-1
if str(r_ys)==ReleaseYear:
Release_year_match='Pass'
else:
Release_year_match='Fail'
writer.writerow({"Id":Id,"Title":Title,"TotalEpisodes":TotalEpisodes,"ReleaseYear":ReleaseYear,"Gb_id":Gb_id,"Gb_id_PX":arr_gb,"Season Number":Season_Number,"Episode Number":Episode_Number,"EpisodeTitle":EpisodeTitle,"OzoneOriginalEpisodeTitle":OzoneOriginalEpisodeTitle,"OzoneEpisodeTitle":OzoneEpisodeTitle,"OzoneRoviId":OzoneRoviId,"Scheme":Scheme,"Search":Search,"Match":Match,"AmazonLink":AmazonLink,"Amazon_Flag":Amazon_Flag,"StarzLink":StarzLink,"Starz_Flag":Starz_Flag,"NetflixLink":NetflixLink,"Netflix_flag":Netflix_flag,"NBCLink":NBCLink,"NBC_flag":NBC_flag,"CBSLink":CBSLink,"CBS_flag":CBS_flag,"VUDULink":VUDULink,"VUDU_flag":VUDU_flag,"ITUNESLink":ITUNESLink,"ITUNES_flag":ITUNES_flag,"Ott_flag":Ott_flag,"Result":Result,"Ozone_Series_id":Ozone_Series_id,"EpisodeTitle":EpisodeTitle,"Px_series_id":series_id,"Px_series_title":series_title,"Px_episode_title":episode_title,"Px_release_year":release_year,"Px_season_number":season_number,"Px_episode_number":episode_number,"Rovi_id":arr_rovi,"projectx_id":sec_arr,"amazon_flag":amazon_flag,"starz_flag":starz_flag,"netflix_flag":netflix_flag,"cbs_flag":cbs_flag,"vudu_flag":vudu_flag,"itunes_flag":itunes_flag,"comment":'All link or any of them is present in projectx API',"series_match":series_match,"episode_title_match":episode_title_match,"title_match":title_match,"Season_number_match":Season_number_match,"Episode_number_match":Episode_number_match,"Release_year_match":Release_year_match})
if len(sec_arr)>1:
arr_gb=[]
arr_rovi=[]
search_px_id__=[]
search_px_id1_=[]
duplicate=""
search_px_id1=[]
next_page_url=""
data_resp_search=dict()
px_link="http://preprod-projectx-1556298832.us-east-1.elb.amazonaws.com/programs?ids=%s&ott=true&aliases=true" %'{}'.format(",".join([str(i) for i in sec_arr]))
response_link=urllib2.Request(px_link)
response_link.add_header('Authorization',Token)
resp_link=urllib2.urlopen(response_link)
data_link=resp_link.read()
data_resp_link3=json.loads(data_link)
for kk in data_resp_link3:
series_id_px.append(kk.get("series_id"))
for ll in series_id_px:
while series_id_px.count(ll)>1:
series_id_px.remove(ll)
if len(series_id_px)>1:
search_api="http://preprod-projectx-1556298832.us-east-1.elb.amazonaws.com/v3/voice_search?q=%s&safe_search=false&credit_summary=true&credit_types=Actor&aliases=true&ott=true"%urllib2.quote(Title)
response_search=urllib2.Request(search_api)
response_search.add_header('User-Agent','Branch Fyra v1.0')
response_search.add_header('Authorization',Token)
resp_search=urllib2.urlopen(response_search)
data_search=resp_search.read()
data_resp_search=json.loads(data_search)
if data_resp_search.get("top_results"):
for ii in data_resp_search.get("top_results"):
if ii.get("action_type")=="ott_search" and ii.get("action_type")!="web_results" and ii.get("results"):
for jj in ii.get("results"):
if jj.get("object").get("show_type")=='SM':
search_px_id.append(jj.get("object").get("id"))
if search_px_id:
for mm in search_px_id:
if mm in series_id_px:
search_px_id_.append(mm)
else:
search_px_id_filtered.append(mm)
if len(search_px_id_)==1 or search_px_id_==[]:
try:
search_px_id1_.append(search_px_id_[0])
search_px_id_=[]
search_px_id=[]
duplicate='False'
except IndexError:
search_px_id_=[]
search_px_id=[]
duplicate='False'
else:
if search_px_id_!=search_px_id__:
search_px_id__=search_px_id__+search_px_id_
duplicate='True'
search_px_id=[]
else:
search_px_id__=search_px_id__
duplicate='True'
search_px_id=[]
if duplicate=='False':
while data_resp_search.get("results"):
for nn in data_resp_search.get("results"):
if nn.get("action_type")=="ott_search" and (nn.get("results")==[] or nn.get("results")):
next_page_url=nn.get("next_page_url")
if next_page_url is not None:
search_api1=domain_name+next_page_url.replace(' ',"%20")
if search_api1!=domain_name :
search_api=search_api1
response_search=urllib2.Request(search_api)
response_search.add_header('User-Agent','Branch Fyra v1.0')
response_search.add_header('Authorization',Token)
resp_search=urllib2.urlopen(response_search)
data_search=resp_search.read()
data_resp_search=json.loads(data_search)
else:
data_resp_search={"resilts":[]}
else:
data_resp_search={"resilts":[]}
if data_resp_search.get("results"):
for nn in data_resp_search.get('results'):
if nn.get("results"):
for jj in nn.get("results"):
if jj.get("object").get("show_type")=='SM':
search_px_id.append(jj.get("object").get("id"))
if search_px_id:
for mm in search_px_id:
if mm in series_id_px:
search_px_id_.append(mm)
else:
search_px_id_filtered.append(mm)
if len(search_px_id_)==1 or search_px_id_==[]:
try:
search_px_id1_.append(search_px_id_[0])
search_px_id_=[]
search_px_id=[]
duplicate='False'
except IndexError:
search_px_id_=[]
search_px_id=[]
duplicate='False'
else:
if search_px_id_!=search_px_id__:
search_px_id__=search_px_id__+search_px_id_
duplicate='True'
search_px_id=[]
else:
search_px_id__=search_px_id__
duplicate='True'
search_px_id=[]
if len(search_px_id__)>1 and duplicate=='True':
series_duplicate="True"
writer.writerow({"Id":Id,"Title":Title,"TotalEpisodes":TotalEpisodes,"ReleaseYear":ReleaseYear,"Gb_id":Gb_id,"Gb_id_PX":arr_gb,"Season Number":Season_Number,"Episode Number":Episode_Number,"EpisodeTitle":EpisodeTitle,"OzoneOriginalEpisodeTitle":OzoneOriginalEpisodeTitle,"OzoneEpisodeTitle":OzoneEpisodeTitle,"OzoneRoviId":OzoneRoviId,"Scheme":Scheme,"Search":Search,"Match":Match,"AmazonLink":AmazonLink,"Amazon_Flag":Amazon_Flag,"StarzLink":StarzLink,"Starz_Flag":Starz_Flag,"NetflixLink":NetflixLink,"Netflix_flag":Netflix_flag,"NBCLink":NBCLink,"NBC_flag":NBC_flag,"CBSLink":CBSLink,"CBS_flag":CBS_flag,"VUDULink":VUDULink,"VUDU_flag":VUDU_flag,"ITUNESLink":ITUNESLink,"ITUNES_flag":ITUNES_flag,"Ott_flag":Ott_flag,"Result":Result,"Ozone_Series_id":Ozone_Series_id,"Px_series_id":'',"Px_series_title":'',"Px_episode_title":'',"Px_release_year":'',"Px_season_number":'',"Px_episode_number":'',"Rovi_id":arr_rovi,"projectx_id":sec_arr,"amazon_flag":amazon_flag,"starz_flag":starz_flag,"netflix_flag":netflix_flag,"cbs_flag":cbs_flag,"vudu_flag":vudu_flag,"itunes_flag":itunes_flag,"comment":'Multiple projectx ids found for series in search API',"Series_duplicate":series_duplicate,"Duplicate id":search_px_id__,"series_match":'',"episode_title_match":'',"title_match":'',"Season_number_match":'',"Episode_number_match":'',"Release_year_match":''})
else:
series_duplicate="False"
writer.writerow({"Id":Id,"Title":Title,"TotalEpisodes":TotalEpisodes,"ReleaseYear":ReleaseYear,"Gb_id":Gb_id,"Gb_id_PX":arr_gb,"Season Number":Season_Number,"Episode Number":Episode_Number,"EpisodeTitle":EpisodeTitle,"OzoneOriginalEpisodeTitle":OzoneOriginalEpisodeTitle,"OzoneEpisodeTitle":OzoneEpisodeTitle,"OzoneRoviId":OzoneRoviId,"Scheme":Scheme,"Search":Search,"Match":Match,"AmazonLink":AmazonLink,"Amazon_Flag":Amazon_Flag,"StarzLink":StarzLink,"Starz_Flag":Starz_Flag,"NetflixLink":NetflixLink,"Netflix_flag":Netflix_flag,"NBCLink":NBCLink,"NBC_flag":NBC_flag,"CBSLink":CBSLink,"CBS_flag":CBS_flag,"VUDULink":VUDULink,"VUDU_flag":VUDU_flag,"ITUNESLink":ITUNESLink,"ITUNES_flag":ITUNES_flag,"Ott_flag":Ott_flag,"Result":Result,"Ozone_Series_id":Ozone_Series_id,"Px_series_id":'',"Px_series_title":'',"Px_episode_title":'',"Px_release_year":'',"Px_season_number":'',"Px_episode_number":'',"Rovi_id":arr_rovi,"projectx_id":sec_arr,"amazon_flag":amazon_flag,"starz_flag":starz_flag,"netflix_flag":netflix_flag,"cbs_flag":cbs_flag,"vudu_flag":vudu_flag,"itunes_flag":itunes_flag,"comment":'Multiple projectx ids found',"Series_duplicate":series_duplicate,"Duplicate id":[],"series_match":'',"episode_title_match":'',"title_match":'',"Season_number_match":'',"Episode_number_match":'',"Release_year_match":''})
else:
series_duplicate="False"
writer.writerow({"Id":Id,"Title":Title,"TotalEpisodes":TotalEpisodes,"ReleaseYear":ReleaseYear,"Gb_id":Gb_id,"Gb_id_PX":arr_gb,"Season Number":Season_Number,"Episode Number":Episode_Number,"EpisodeTitle":EpisodeTitle,"OzoneOriginalEpisodeTitle":OzoneOriginalEpisodeTitle,"OzoneEpisodeTitle":OzoneEpisodeTitle,"OzoneRoviId":OzoneRoviId,"Scheme":Scheme,"Search":Search,"Match":Match,"AmazonLink":AmazonLink,"Amazon_Flag":Amazon_Flag,"StarzLink":StarzLink,"Starz_Flag":Starz_Flag,"NetflixLink":NetflixLink,"Netflix_flag":Netflix_flag,"NBCLink":NBCLink,"NBC_flag":NBC_flag,"CBSLink":CBSLink,"CBS_flag":CBS_flag,"VUDULink":VUDULink,"VUDU_flag":VUDU_flag,"ITUNESLink":ITUNESLink,"ITUNES_flag":ITUNES_flag,"Ott_flag":Ott_flag,"Result":Result,"Ozone_Series_id":Ozone_Series_id,"Px_series_id":'',"Px_series_title":'',"Px_episode_title":'',"Px_release_year":'',"Px_season_number":'',"Px_episode_number":'',"Rovi_id":arr_rovi,"projectx_id":sec_arr,"amazon_flag":amazon_flag,"starz_flag":starz_flag,"netflix_flag":netflix_flag,"cbs_flag":cbs_flag,"vudu_flag":vudu_flag,"itunes_flag":itunes_flag,"comment":'Multiple projectx ids found',"Series_duplicate":series_duplicate,"Duplicate id":[],"series_match":'',"episode_title_match":'',"title_match":'',"Season_number_match":'',"Episode_number_match":'',"Release_year_match":''})
elif amazon_flag=='' and starz_flag=='' and netflix_flag=='' and cbs_flag=='' and vudu_flag=='' and itunes_flag=='':
writer.writerow({"Id":Id,"Title":Title,"TotalEpisodes":TotalEpisodes,"ReleaseYear":ReleaseYear,"Gb_id":Gb_id,"Gb_id_PX":arr_gb,"Season Number":Season_Number,"Episode Number":Episode_Number,"EpisodeTitle":EpisodeTitle,"OzoneOriginalEpisodeTitle":OzoneOriginalEpisodeTitle,"OzoneEpisodeTitle":OzoneEpisodeTitle,"OzoneRoviId":OzoneRoviId,"Scheme":Scheme,"Search":Search,"Match":Match,"AmazonLink":AmazonLink,"Amazon_Flag":Amazon_Flag,"StarzLink":StarzLink,"Starz_Flag":Starz_Flag,"NetflixLink":NetflixLink,"Netflix_flag":Netflix_flag,"NBCLink":NBCLink,"NBC_flag":NBC_flag,"CBSLink":CBSLink,"CBS_flag":CBS_flag,"VUDULink":VUDULink,"VUDU_flag":VUDU_flag,"ITUNESLink":ITUNESLink,"ITUNES_flag":ITUNES_flag,"Ott_flag":Ott_flag,"Result":Result,"Ozone_Series_id":Ozone_Series_id,"Px_series_id":'',"Px_series_title":'',"Px_episode_title":'',"Px_release_year":'',"Px_season_number":'',"Px_episode_number":'',"Rovi_id":'',"projectx_id":'',"amazon_flag":amazon_flag,"starz_flag":starz_flag,"netflix_flag":netflix_flag,"cbs_flag":cbs_flag,"vudu_flag":vudu_flag,"itunes_flag":itunes_flag,"comment":'this links not in the sheet',"series_match":'',"episode_title_match":'',"title_match":'',"Season_number_match":'',"Episode_number_match":'',"Release_year_match":''})
elif amazon_flag_expired=='False' and vudu_flag_expired=='False' and starz_flag_expired=='False' and netflix_flag_expired=='False' and cbs_flag_expired=='False' and itunes_flag_expired=='False':
writer.writerow({"Id":Id,"Title":Title,"TotalEpisodes":TotalEpisodes,"ReleaseYear":ReleaseYear,"Gb_id":Gb_id,"Gb_id_PX":arr_gb,"Season Number":Season_Number,"Episode Number":Episode_Number,"EpisodeTitle":EpisodeTitle,"OzoneOriginalEpisodeTitle":OzoneOriginalEpisodeTitle,"OzoneEpisodeTitle":OzoneEpisodeTitle,"OzoneRoviId":OzoneRoviId,"Scheme":Scheme,"Search":Search,"Match":Match,"AmazonLink":AmazonLink,"Amazon_Flag":Amazon_Flag,"StarzLink":StarzLink,"Starz_Flag":Starz_Flag,"NetflixLink":NetflixLink,"Netflix_flag":Netflix_flag,"NBCLink":NBCLink,"NBC_flag":NBC_flag,"CBSLink":CBSLink,"CBS_flag":CBS_flag,"VUDULink":VUDULink,"VUDU_flag":VUDU_flag,"ITUNESLink":ITUNESLink,"ITUNES_flag":ITUNES_flag,"Ott_flag":Ott_flag,"Result":Result,"Ozone_Series_id":Ozone_Series_id,"Px_series_id":'',"Px_series_title":'',"Px_episode_title":'',"Px_release_year":'',"Px_season_number":'',"Px_episode_number":'',"Rovi_id":'',"projectx_id":'',"amazon_flag":amazon_flag,"starz_flag":starz_flag,"netflix_flag":netflix_flag,"cbs_flag":cbs_flag,"vudu_flag":vudu_flag,"itunes_flag":itunes_flag,"amazon_flag_expired":amazon_flag_expired,"vudu_flag_expired":vudu_flag_expired,"starz_flag_expired":starz_flag_expired,"netflix_flag_expired":netflix_flag_expired,"cbs_flag_expired":cbs_flag_expired,"itunes_flag_expired":itunes_flag_expired,"comment":'this links not expired',"series_match":'',"episode_title_match":'',"title_match":'',"Season_number_match":'',"Episode_number_match":'',"Release_year_match":''})
else:
link=[]
link_present=''
gb_api="http://34.231.212.186:81/projectx/guideboxdata?sourceId=%d&showType=SE"%eval(Gb_id)
response_gb=urllib2.Request(gb_api)
response_gb.add_header('Authorization',Token)
resp_gb=urllib2.urlopen(response_gb)
data_gb=resp_gb.read()
data_resp_gb=json.loads(data_gb)
if data_resp_gb.get("tv_everywhere_web_sources") or data_resp_gb.get("subscription_web_sources") or data_resp_gb.get("free_web_sources") or data_resp_gb.get("purchase_web_sources") :
if data_resp_gb.get("tv_everywhere_web_sources"):
for aa in data_resp_gb.get("tv_everywhere_web_sources"):
link.append(aa.get('link'))
if data_resp_gb.get("subscription_web_sources"):
for aa in data_resp_gb.get("subscription_web_sources"):
link.append(aa.get('link'))
if data_resp_gb.get("free_web_sources"):
for aa in data_resp_gb.get("free_web_sources"):
link.append(aa.get('link'))
if data_resp_gb.get("purchase_web_sources"):
for aa in data_resp_gb.get("purchase_web_sources"):
link.append(aa.get('link'))
if source_amazon[0]==0:
source_amazon[0]=' '
if source_starz[0]==0:
source_starz[0]=' '
if source_netflix[0]==0:
source_netflix[0]=' '
if source_cbs[0]==0:
source_cbs[0]=' '
if source_vudu[0]==0:
source_vudu[0]=' '
if source_itunes[0]==0:
source_itunes[0]=' '
for bb in link:
if str(source_amazon[0]) in bb or str(source_starz[0]) in bb or str(source_netflix[0]) in bb or str(source_cbs[0]) in bb or str(source_vudu[0]) in bb or str(source_itunes[0]) in bb:
link_present='True'
break
else:
link_present='False'
if link_present=='True':
writer.writerow({"Id":Id,"Title":Title,"TotalEpisodes":TotalEpisodes,"ReleaseYear":ReleaseYear,"Gb_id":Gb_id,"Gb_id_PX":arr_gb,"Season Number":Season_Number,"Episode Number":Episode_Number,"EpisodeTitle":EpisodeTitle,"OzoneOriginalEpisodeTitle":OzoneOriginalEpisodeTitle,"OzoneEpisodeTitle":OzoneEpisodeTitle,"OzoneRoviId":OzoneRoviId,"Scheme":Scheme,"Search":Search,"Match":Match,"AmazonLink":AmazonLink,"Amazon_Flag":Amazon_Flag,"StarzLink":StarzLink,"Starz_Flag":Starz_Flag,"NetflixLink":NetflixLink,"Netflix_flag":Netflix_flag,"NBCLink":NBCLink,"NBC_flag":NBC_flag,"CBSLink":CBSLink,"CBS_flag":CBS_flag,"VUDULink":VUDULink,"VUDU_flag":VUDU_flag,"ITUNESLink":ITUNESLink,"ITUNES_flag":ITUNES_flag,"Ott_flag":Ott_flag,"Result":Result,"Ozone_Series_id":Ozone_Series_id,"Px_series_id":'',"Px_series_title":'',"Px_episode_title":'',"Px_release_year":'',"Px_season_number":'',"Px_episode_number":'',"Rovi_id":'',"projectx_id":'',"amazon_flag":amazon_flag,"starz_flag":starz_flag,"netflix_flag":netflix_flag,"cbs_flag":cbs_flag,"vudu_flag":vudu_flag,"itunes_flag":itunes_flag,"amazon_flag_expired":amazon_flag_expired,"vudu_flag_expired":vudu_flag_expired,"starz_flag_expired":starz_flag_expired,"netflix_flag_expired":netflix_flag_expired,"cbs_flag_expired":cbs_flag_expired,"itunes_flag_expired":itunes_flag_expired,"comment":'this link not ingested but ott link present in db',"series_match":'',"episode_title_match":'',"title_match":'',"Season_number_match":'',"Episode_number_match":'',"Release_year_match":''})
else:
writer.writerow({"Id":Id,"Title":Title,"TotalEpisodes":TotalEpisodes,"ReleaseYear":ReleaseYear,"Gb_id":Gb_id,"Gb_id_PX":arr_gb,"Season Number":Season_Number,"Episode Number":Episode_Number,"EpisodeTitle":EpisodeTitle,"OzoneOriginalEpisodeTitle":OzoneOriginalEpisodeTitle,"OzoneEpisodeTitle":OzoneEpisodeTitle,"OzoneRoviId":OzoneRoviId,"Scheme":Scheme,"Search":Search,"Match":Match,"AmazonLink":AmazonLink,"Amazon_Flag":Amazon_Flag,"StarzLink":StarzLink,"Starz_Flag":Starz_Flag,"NetflixLink":NetflixLink,"Netflix_flag":Netflix_flag,"NBCLink":NBCLink,"NBC_flag":NBC_flag,"CBSLink":CBSLink,"CBS_flag":CBS_flag,"VUDULink":VUDULink,"VUDU_flag":VUDU_flag,"ITUNESLink":ITUNESLink,"ITUNES_flag":ITUNES_flag,"Ott_flag":Ott_flag,"Result":Result,"Ozone_Series_id":Ozone_Series_id,"Px_series_id":'',"Px_series_title":'',"Px_episode_title":'',"Px_release_year":'',"Px_season_number":'',"Px_episode_number":'',"Rovi_id":'',"projectx_id":'',"amazon_flag":amazon_flag,"starz_flag":starz_flag,"netflix_flag":netflix_flag,"cbs_flag":cbs_flag,"vudu_flag":vudu_flag,"itunes_flag":itunes_flag,"amazon_flag_expired":amazon_flag_expired,"vudu_flag_expired":vudu_flag_expired,"starz_flag_expired":starz_flag_expired,"netflix_flag_expired":netflix_flag_expired,"cbs_flag_expired":cbs_flag_expired,"itunes_flag_expired":itunes_flag_expired,"comment":'this link not ingested and not present in DB',"series_match":'',"episode_title_match":'',"title_match":'',"Season_number_match":'',"Episode_number_match":'',"Release_year_match":''})
else:
writer.writerow({"Id":Id,"Title":Title,"TotalEpisodes":TotalEpisodes,"ReleaseYear":ReleaseYear,"Gb_id":Gb_id,"Gb_id_PX":arr_gb,"Season Number":Season_Number,"Episode Number":Episode_Number,"EpisodeTitle":EpisodeTitle,"OzoneOriginalEpisodeTitle":OzoneOriginalEpisodeTitle,"OzoneEpisodeTitle":OzoneEpisodeTitle,"OzoneRoviId":OzoneRoviId,"Scheme":Scheme,"Search":Search,"Match":Match,"AmazonLink":AmazonLink,"Amazon_Flag":Amazon_Flag,"StarzLink":StarzLink,"Starz_Flag":Starz_Flag,"NetflixLink":NetflixLink,"Netflix_flag":Netflix_flag,"NBCLink":NBCLink,"NBC_flag":NBC_flag,"CBSLink":CBSLink,"CBS_flag":CBS_flag,"VUDULink":VUDULink,"VUDU_flag":VUDU_flag,"ITUNESLink":ITUNESLink,"ITUNES_flag":ITUNES_flag,"Ott_flag":Ott_flag,"Result":Result,"Ozone_Series_id":Ozone_Series_id,"Px_series_id":'',"Px_series_title":'',"Px_episode_title":'',"Px_release_year":'',"Px_season_number":'',"Px_episode_number":'',"Rovi_id":'',"projectx_id":'',"amazon_flag":amazon_flag,"starz_flag":starz_flag,"netflix_flag":netflix_flag,"cbs_flag":cbs_flag,"vudu_flag":vudu_flag,"itunes_flag":itunes_flag,"amazon_flag_expired":amazon_flag_expired,"vudu_flag_expired":vudu_flag_expired,"starz_flag_expired":starz_flag_expired,"netflix_flag_expired":netflix_flag_expired,"cbs_flag_expired":cbs_flag_expired,"itunes_flag_expired":itunes_flag_expired,"comment":'this link not ingested and not present in DB',"series_match":'',"episode_title_match":'',"title_match":'',"Season_number_match":'',"Episode_number_match":'',"Release_year_match":''})
print datetime.datetime.now()
except httplib.BadStatusLine:
print ("exception caught httplib.BadStatusLine..............................Retrying.............")
continue
except urllib2.HTTPError:
print ("exception caught HTTPError....................................Retrying.......")
continue
except socket.error:
print ("exception caught SocketError..........................Retrying.................")
continue
except URLError:
print ("exception caught URLError.....................Retrying......................")
continue
print datetime.datetime.now()
#open_csv()
t1 =threading.Thread(target=open_csv,args=(1,"thread - 1",6242,1))
t1.start()
| [
"noreply@github.com"
] | noreply@github.com |
840afe04e363b8a9e7f6c02c693b8349fd79a76f | 7c7e2576f38b8cf9600d578eacaa4478cf8a1ea7 | /tfy/tfy/doctype/accounting_dimension_default/test_accounting_dimension_default.py | 9e7b1918417fd9c059a41d42971133ce5c8d55fc | [
"MIT"
] | permissive | deepeshgarg007/TFY | 5efdd1dde94a12e1a802254f41af3508d15d8def | 5309d369ad3c4ab3386798011d5ee7a14a503cf4 | refs/heads/master | 2020-10-01T11:32:35.386952 | 2020-01-24T12:47:08 | 2020-01-24T12:47:08 | 227,528,048 | 0 | 0 | NOASSERTION | 2019-12-12T05:35:26 | 2019-12-12T05:35:25 | null | UTF-8 | Python | false | false | 238 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2019, hello@openetech.com and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestAccountingDimensionDefault(unittest.TestCase):
pass
| [
"pawan@erpnext.com"
] | pawan@erpnext.com |
450daccea38af6db92113e915deff3244bbba285 | 90c180c3bcac46f38e559c190598b7d5e0535e7f | /ch3/ch3-11.py | 835b0082a6d27aa8b257e3961d8fadedde6d07d0 | [] | no_license | q10242/pythone-practice | ac1883d314008d7a566a1d5053fbef366470b662 | 2d7d725d3d9267a416fccf68fc9019e0d95cf2bb | refs/heads/master | 2020-03-18T02:47:25.114712 | 2018-05-22T03:18:11 | 2018-05-22T03:18:11 | 134,207,428 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py |
x = True
print(x)
print(type(x))
print(int(x))
y= False
print y
print(type(y))
print(int(y))
| [
"q10242@gmail.com"
] | q10242@gmail.com |
b33bd4048606496e909cb4c89b8e8ab970602f37 | 3fa59b149e61137fe72ed2587c0becf5324e1071 | /setup.py | fe6a94ab1f54dcf524f0c9e1e5215d0e25f26d90 | [
"MIT"
] | permissive | jpjanet/citsampler | b26c2d9d82b5d20d294535a5da10e7d0d64c029b | 1c395efa648871cebc994729bc50155fad998553 | refs/heads/master | 2020-05-17T19:47:22.261455 | 2019-04-29T01:16:35 | 2019-04-29T01:16:35 | 183,926,189 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 795 | py | #!/usr/bin/env python
from setuptools import setup, find_packages
# Package meta-data.
NAME = 'citsampler'
DESCRIPTION = 'simple rejection sampling MCMC'
URL = 'https://github.com/jpjanet/citsampler.git'
EMAIL = 'jp@mit.edu'
AUTHOR = 'JP Janet'
REQUIRES_PYTHON = '>=3.6.0'
VERSION = '0.1.0'
REQUIRED = ['pyclustering', 'numpy']
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(),
install_requires=REQUIRED,
entry_points={'console_scripts': ['citsampler = citsampler.__main__:main']},
package_data={'citsampler':['scripts/*.sh','examples/*t.xt']},
tests_require=['pytest'],
setup_requires=[''],
include_package_data = True)
| [
"jpjanet@mit.edu"
] | jpjanet@mit.edu |
1c701b7f82a26d6edc9e76f2f086ad91478e6738 | c0fc2932d1d358d77b2475c31ab48cbfaba26c95 | /offsets_day5.py | 786a4806b947d46ca590aa9c9a6784ed14c73288 | [] | no_license | eirikbsu/Advent2017 | 74518a50fb7631e5df71f8a9348cc34e0e62e004 | 2f2e3e0aa8cc49661b6996f46a5d0a02e014b058 | refs/heads/master | 2021-08-31T07:28:36.749287 | 2017-12-20T16:30:35 | 2017-12-20T16:30:35 | 113,969,321 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 803 | py | """
Day 5 - offsets
"""
instructions = []
with open("day5_input.txt", "r") as file:
for line in file:
line_int = int(line)
instructions.append(line_int)
numberofsteps = 0
escaped = False
currentposition = 0
instructionlistlength = len(instructions)
while escaped == False:
if currentposition >= instructionlistlength:
escaped = True
else:
temp_position = currentposition
currentposition = currentposition + instructions[currentposition]
if instructions[temp_position] >= 3:
instructions[temp_position] = instructions[temp_position] - 1
else:
instructions[temp_position] = instructions[temp_position] + 1
numberofsteps = numberofsteps + 1
print("Number of steps used to escape: %i" % numberofsteps)
| [
"ebsundmark@gmail.com"
] | ebsundmark@gmail.com |
87cbb71f345b15df29bc16293a3a4183a2d5e827 | 934371d2df9c92087f6997ead546170ddf60f21e | /src/data_manager.py | 783807af91af5186428abbb6ffa9e26cbd67c27e | [] | no_license | mferrato/CISC844FinalProject | ca28e65180b9e6aeac1c7b215ea1ed25816994e9 | 69c4068fdb2fe548cb916fb92c87474cb7221a1b | refs/heads/master | 2022-07-18T20:03:04.826453 | 2020-05-22T04:47:30 | 2020-05-22T04:47:30 | 265,935,207 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,110 | py | import pandas as pd
data_directory = f'../dataset/'
class Dataset():
def __init__(self):
self.dataframe = None
self.labels = None
def load(self, path):
# Reads dataset from excel file
new_dataframe = pd.read_excel(data_directory + path)
# Gets rid of the first two instances, which have no data
new_dataframe = new_dataframe.iloc[2:]
# Gets rid of all events not pertaining Relapse or Non-Relapse
new_dataframe = new_dataframe[new_dataframe['First Event'] != 'Censored']
new_dataframe = new_dataframe[new_dataframe['First Event'] != 'Death']
new_dataframe = new_dataframe[new_dataframe['First Event'] != 'SMN']
# Converts Label into binary (0 - None, 1 - Relapse)
new_labels = new_dataframe['First Event'].apply(label_classification)
new_dataframe['First Event'] = new_labels
self.labels = new_dataframe['First Event']
self.dataframe = new_dataframe
def print(self):
print(self.dataframe)
def shape(self):
return self.dataframe.shape
def feature_list(self):
return self.dataframe.columns
def get_dataset(self):
return self.dataframe
def get_labels(self):
return self.labels
# Converts MRD to categorical
def mrd_classification(x):
if(x == 0):
return 0 # No Risk
elif(x >= 0 and x < 0.1):
return 1 # Low Risk
elif(x >= 0.1 and x < 1.0):
return 2 # Medium Risk
elif(x >= 1.0):
return 3 # High Risk
# Converts Blast to categorical
def blast_classification(x):
if(x <= 5):
return 0 # Low Risk
else:
return 1 # High Risk
# Covert label to binary
def label_classification(x):
if(x == 'None'):
return 0 # None
else:
return 1 # Relapse
# Changes certain classification to a numeric representation
def categorical_string_to_number(x):
if (x == 'No'):
return 0
elif (x == 'Yes'):
return 1
else:
return 2
# Changes gender to a numerical representation
def gender_classification(x):
if (x == 'Male'):
return 0
elif (x == 'Female'):
return 1
| [
"mferrato@udel.edu"
] | mferrato@udel.edu |
c5a5e944bd41c1e4cfadd2c3c620774ec34c22e1 | 31e41995dea5e4a41bc9b942da7e5266cd686757 | /learning/training/python/py2/pgms/sec4/outputparams.py | 5894f5ae44a48540fe4caeee5abca53df43f5154 | [] | no_license | tamle022276/python | 3b75758b8794801d202565c05d32976c146beffd | 4fec225d1e5e2bf0adac5048f7f9f3313ac76e23 | refs/heads/master | 2020-04-01T21:03:01.458768 | 2017-03-13T20:47:35 | 2017-03-13T20:47:35 | 64,878,939 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 291 | py | #!/usr/bin/env python
# outputparams.py - simulate output parameters
def assign(m, n):
m = 10
n = [3, 4]
return m, n
a = 5; b = [1, 2]
(a, b) = assign(a, b) # updates a, b
print a, b
#####################################
#
# $ outputparams.py
# 10 [3, 4]
#
| [
"tam.le@teradata.com"
] | tam.le@teradata.com |
da0c2a1cf4183a389e9a794b268a35920914e270 | 226be49a7396e7c6004ba4de567f6c22b5b245c0 | /packaging/fremantle/.py2deb_build_folder/gread/src/opt/GRead/views/basic/utils/toolbar.py | ce31b63c1a91f4abdca09d651a501e4d2d0b2425 | [] | no_license | twidi/GRead | 0e315c0c924fa169cb5d16e927c6b54e79e25bd9 | 51429189762b706fbe8ca1b927d89071a556d51e | refs/heads/master | 2021-01-10T19:54:43.098022 | 2010-11-23T00:41:17 | 2010-11-23T00:41:17 | 1,146,572 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,891 | py | # -*- coding: utf-8 -*-
"""
Lib to manage toolbars which appear on mousedown(maemo) or mousemove(not maem0)
and stay visible a few seconds
"""
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import time
class ToolbarOwnerEventFilter(QObject):
def __init__(self, *args, **kwargs):
super(ToolbarOwnerEventFilter, self).__init__(*args, **kwargs)
def eventFilter(self, obj, e):
if e.type() == QEvent.Resize:
self.parent().replace_toolbars()
return False
class ToolbarManager(QObject):
def __init__(self, toolbars, event_target, *args, **kwargs):
super(ToolbarManager, self).__init__(*args, **kwargs)
parent = self.parent()
self.event_target = event_target
self.toolbars = toolbars
self.mode_opacity = False # don't know how to change opacity !
self.timer = QTimer()
self.delay = 0
self.max_delay = 1000.0 # ms (don't forget ".0")
parent.installEventFilter(self)
parent.installEventFilter(ToolbarOwnerEventFilter(parent=self))
QObject.connect(self.timer, SIGNAL("timeout()"), self.hide)
def add_toolbar(self, toolbar):
if toolbar not in self.toolbars:
self.toolbars.append(toolbar)
toolbar.action.triggered.connect(self.display)
def replace_toolbars(self):
for toolbar in self.toolbars:
toolbar.replace()
def display(self):
for toolbar in self.toolbars:
if self.mode_opacity:
toolbar.setStyleSheet("opacity:1")
toolbar.show()
self.timer.stop()
self.delay = self.max_delay
self.timer.start(self.max_delay)
def hide(self):
if not self.delay:
return
if self.mode_opacity:
self.delay = int(self.delay/20)*10
else:
self.delay = 0
if self.delay == 0:
self.timer.stop()
for toolbar in self.toolbars:
toolbar.hide()
else:
opacity = 255*self.delay/self.max_delay
for toolbar in self.toolbars:
toolbar.setStyleSheet("opacity:%f" % opacity)
self.timer.setInterval(self.delay)
def eventFilter(self, obj, e):
if e.type() == QEvent.HoverMove:
if (not self.delay) or self.delay < 500:
self.display()
return False
class Toolbar(QObject):
def __init__(self, text, tooltip, callback, x, y, *args, **kwargs):
super(Toolbar, self).__init__(*args, **kwargs)
parent = self.parent()
self.enabled = False
self.x = x
self.y = y
self.toolbar = QToolBar(parent)
self.toolbar.setAllowedAreas(Qt.NoToolBarArea)
parent.addToolBar(Qt.NoToolBarArea, self.toolbar)
self.action = QAction(text, parent)
self.action.setToolTip(tooltip)
self.toolbar.addAction(self.action)
self.button = self.toolbar.children()[-1]
self.toolbar.setContentsMargins(0, 0, 0, 0)
font = self.button.font()
font.setPointSizeF(font.pointSizeF() * 3)
self.button.setFont(font)
palette = self.toolbar.palette()
self.button.setStyleSheet(
"""
QToolButton {
border : none;
border-radius : %(border_radius)s;
background: transparent;
color: %(background_hover)s;
}
QToolButton:hover {
background: %(background_hover)s;
color: %(foreground_hover)s;
}
""" %
{
'border_radius': int(self.button.height()/2),
'background_hover': palette.color(palette.Highlight).name(),
'foreground_hover': palette.color(palette.HighlightedText).name(),
}
)
self.toolbar.setStyleSheet("border:none;background:transparent")
self.toolbar.resize(self.button.sizeHint())
self.move(x, y)
self.toolbar.setMovable(False)
self.toolbar.hide()
if callback:
self.action.triggered.connect(callback)
def set_tooltip(self, tooltip):
self.action.setToolTip(tooltip)
def replace(self):
self.move(self.x, self.y)
def move(self, x, y):
"""
Move the toolbar to coordinates x,y
If a coordinate is 0 < ? <= 1, it's a percent
of the width or height
"""
w_width = self.parent().width()
t_width = self.toolbar.width()
if not x or x < 0:
_x = 0
elif x > 1:
_x = x
else:
_x = int(x * (w_width - t_width))
if _x < 2:
_x = 2
elif _x > (w_width - t_width -2):
_x = (w_width - t_width -2)
w_height = self.parent().height()
t_height = self.toolbar.height()
if not y or y < 0:
_y = 0
elif y > 1:
_y = y
else:
_y = int(y * (w_height - t_height))
if _y < 2:
_y = 2
elif _y > (w_height - t_height -2):
_y = (w_height - t_height -2)
self.toolbar.move(_x, _y)
def move_x(self, x):
self.move(x, self.toolbar.y())
def move_y(self, y):
self.move(self.toolbar.x(), y)
def disable(self):
self.enabled = False
def enable(self):
self.enabled = True
def hide(self):
self.toolbar.hide()
def show(self):
if not self.enabled:
return
#self.toolbar.setStyleSheet("opacity:1")
self.toolbar.show()
| [
"s.angel@twidi.com"
] | s.angel@twidi.com |
7fc78a96811a0f46faa2e7fdc489c6ccfdf5de20 | b7f1b4df5d350e0edf55521172091c81f02f639e | /components/arc/video_accelerator/DEPS | be1c9c99ce26a0e5b89f2611421f734fc2f70e77 | [
"BSD-3-Clause"
] | permissive | blusno1/chromium-1 | f13b84547474da4d2702341228167328d8cd3083 | 9dd22fe142b48f14765a36f69344ed4dbc289eb3 | refs/heads/master | 2023-05-17T23:50:16.605396 | 2018-01-12T19:39:49 | 2018-01-12T19:39:49 | 117,339,342 | 4 | 2 | NOASSERTION | 2020-07-17T07:35:37 | 2018-01-13T11:48:57 | null | UTF-8 | Python | false | false | 296 | include_rules = [
"+components/arc/common",
"+gpu/command_buffer/service/gpu_preferences.h",
"+media/video",
"+media/base/video_frame.h",
"+media/base/video_types.h",
"+media/gpu",
"+mojo/edk/embedder",
"+services/service_manager/public/cpp",
"+ui/gfx",
"+ui/ozone/public",
]
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org | |
62ef588765d6c7e6b84af0dc6cae39a3959746b4 | 9787395b9f6ffe27753067862112c21f79ffa25f | /lab4/generals/message_queue_to_propagate.py | 11203373d96fa9ec9e3be071f63370bd9139cf2b | [] | no_license | mrfaiz/distributed-system-ws20-21 | f6b397dd7d710a21dc48bc626bffd10afab22eca | a92434ea960dcaaea57acb36a656a1177ee4689f | refs/heads/master | 2023-03-21T13:16:41.017653 | 2021-03-10T23:27:04 | 2021-03-10T23:27:04 | 313,585,576 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 336 | py | from queue import Queue
from threading import Lock
from propagate_message_info import PropagateMessageInfo
class MessageQueueToPropagate:
def __init__(self):
self.queue = Queue()
def getData(self):
return self.queue.get()
def putData(self, data: PropagateMessageInfo):
self.queue.put(data) | [
"khulna22@gmail.com"
] | khulna22@gmail.com |
7ed8d7af33e5f1067a316bbf43e6b0b05e27818a | 832e56f182baceb19037e55ab1b4180e121065fe | /build/hector_slam/hector_trajectory_server/catkin_generated/pkg.installspace.context.pc.py | cd2eba0b70034f53bd53f7f65b2b39be5e18acff | [] | no_license | Automated-Aerial-Cinematography/hec_ws | 8127989967ca713d049a77843fd61e3638775245 | 89bede36ac09a60de119d5fb9ec504910957c77e | refs/heads/master | 2020-04-10T15:32:05.815875 | 2018-12-10T03:47:55 | 2018-12-10T03:47:55 | 161,112,807 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "hector_trajectory_server"
PROJECT_SPACE_DIR = "/home/sanjuksha/MotionPlanning/project/hec_ws/install"
PROJECT_VERSION = "0.3.5"
| [
"sanjuksha@gmail.com"
] | sanjuksha@gmail.com |
2dcb1660882ed55001083899c2ed586bd7c9d624 | 091b91a631520db2af9a935cb571258d3fd0ce63 | /Python/6x/1029.py | 76277e3ca3d243d4cce10ebf36ce3ff61a2d9f0c | [] | no_license | victorhundo/URI | 75fec1efe959e5ad8e366cba91a3374ddedb1228 | f7547b187303d386d7ace577644eb1f2d864e97a | refs/heads/master | 2018-10-09T19:49:27.540405 | 2018-08-01T19:26:44 | 2018-08-01T19:26:44 | 108,522,027 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 514 | py | testes = int(input())
for i in range(testes):
n = int(input())
valor = [0] * (n + 1)
chamadas = [0] * (n + 1)
for i in range(n + 1):
if (i == 0):
valor[i] = 0
chamadas[i] = 0
elif (i == 1):
valor[i] = 1
chamadas[i] = 0
else:
valor[i] = valor[i-1] + valor[i-2]
chamadas[i] = chamadas[i-1] + chamadas[i-2] + 2
msg = "fib({}) = {} calls = {}"
print(msg.format(n,chamadas[n],valor[n]))
| [
"victorhundo@gmail.com"
] | victorhundo@gmail.com |
0753aa3356700a799716e651ae0e43a81566ad1d | c20c7d658218f30521048bce11bf5a6e16384b27 | /f_10_字典.py | 98b8fcf90013ec921cb5094995f685d4f3931b34 | [] | no_license | fjf3997/study_python | e6d5a5717250dd37b27a1933fc22ae08c9df3539 | 2ae6d335bba478e05f2427bd11653d500249d6f7 | refs/heads/master | 2020-07-02T15:35:39.752721 | 2019-08-19T09:25:39 | 2019-08-19T09:25:39 | 201,574,792 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | fjf = {"name": "樊家富",
"age": 18,
"gender": True,
"height": 185,
"weight": 70}
# 取值
print(fjf["name"])
# 修改增加
fjf["hobby"] = "basketball"
fjf["name"] = "cxk"
# 删除
fjf.pop("name")
# 求取键值对的长度
print(len(fjf))
# 更新字典
temp = {"country": "china",
"age": 20}
fjf.update(temp)
# 情况字典
fjf.clear()
print(fjf)
| [
"1763994902@qq.com"
] | 1763994902@qq.com |
478bd1be33ce309931c2ad4f54f7c5793a151e68 | aa142e6a319ba4cccbc1c00008a00ff19a2a6ce3 | /src/metrics/surfaceEstimation.py | 1db97c1ea2d4edf7e7d805f5c0a478f90cd83951 | [] | no_license | elhamAm/PlantPhenotyping | 20f1599e811f90888de0be4282771d019554469d | d96103c4fba337f42728e6bb50b95fcbda017689 | refs/heads/master | 2023-07-26T02:11:23.506189 | 2021-09-06T14:30:47 | 2021-09-06T14:30:47 | 403,652,663 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 821 | py | import numpy as np
import open3d as o3d
from numpy.linalg import norm
import trimesh.graph as trimesh
def findAreaOfTop(meshPath):
mesh = o3d.io.read_triangle_mesh(meshPath)
#sumNew = mesh.get_surface_area()
tris = np.asarray(mesh.triangles)
vers = np.asarray(mesh.vertices)
mesh.compute_vertex_normals(normalized=True)
mesh.compute_triangle_normals(normalized=True)
tri_normals = np.asarray(mesh.triangle_normals)
summ = 0
for i in range(len(tris)):
tri = tris[i]
v1 = vers[tri[0]]
v2 = vers[tri[1]]
v3 = vers[tri[2]]
area = np.cross(v2 - v1, v3 - v1) / 2
area = norm(area, 2)
summ += area
listAdj = trimesh.face_adjacency(faces=tris)
adjs = []
for i in range(len(tris)):
adjs.append([])
for pair in listAdj:
adjs[pair[0]].append(pair[1])
adjs[pair[1]].append(pair[0])
return summ
| [
"eaminmans@cnb-d102-56.inf.ethz.ch"
] | eaminmans@cnb-d102-56.inf.ethz.ch |
5ebdc3a4b1499d03dc0954911ba0248fd4c5dfb8 | e254a1124bbe6be741159073a22898b0824e2a4f | /customuser/admin.py | 6c225a0579ce6bb67949bffc24b32ad6df83f3a0 | [] | no_license | skiboorg/stdiplom | 0df83b8e42e999abc43a01157cb24cffd10d0666 | 13101381c7db8a4b949048e8cbfcf9673cf7ecde | refs/heads/master | 2022-11-12T23:55:21.136176 | 2020-06-29T05:57:03 | 2020-06-29T05:57:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,030 | py | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as DjangoUserAdmin
from django.utils.translation import ugettext_lazy as _
from .models import User,Guest
@admin.register(User)
class UserAdmin(DjangoUserAdmin):
"""Define admin model for custom User model with no email field."""
fieldsets = (
(None, {'fields': ('email', 'password', 'used_promo')}),
(_('Personal info'), {'fields': ('fio', 'phone', 'comment', 'is_allow_email')}),
(_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser',
'groups', 'user_permissions')}),
(_('Important dates'), {'fields': ('last_login', 'date_joined')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'password1', 'password2', 'phone'),
}),
)
list_display = ('email', 'fio', 'phone')
ordering = ('email',)
search_fields = ('email', 'fio', 'phone')
admin.site.register(Guest) | [
"ddnnss.i1@gmail.com"
] | ddnnss.i1@gmail.com |
f9144fedb5cc9fddb71593c4a93abd6afdf370cf | bdac4e8a4624b6e963f698eaa7589c048a503658 | /Momentum.py | f5964bf5c1bfbb2e44e733cd7bc02141fcfaa8b8 | [] | no_license | June2552/Deep_learning_reposit | 6bd17dc4342cc5747b65b38890ecf89f1ef87b90 | 37d08f2ad168897b5b064a9cc930b83f4d78cd34 | refs/heads/main | 2023-03-31T08:33:16.339282 | 2021-04-13T13:55:38 | 2021-04-13T13:55:38 | 355,958,433 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 472 | py | class Momentum:
def __init__(self, lr=0.01, momentum=0.9):
self.lr = lr
self.momentum = momentum
self.v = None
def update(selfself, params, grads):
if self.v is None:
self.v = {}
for key, val in params.items():
self.v[key] = np.zeros_like(val)
for key in params.keys():
self.v[key] = self.momentum*self.v[key] - self.lr*grads[key]
params[key] += self.v[key]
| [
"junhyeogj04@gmail.com"
] | junhyeogj04@gmail.com |
fb4e50c6e9fca9fa04f4da9e14ac7568cc02f627 | c13f9a89dece8d24492a834aeefa730db9542006 | /p007.py | 03d69a8ed944a79f0d6aa2eecd3ac7dae6e59d53 | [] | no_license | ilya-il/projecteuler.net | 6f5bbc96afe806e56b009523e84e21a2d2080dc1 | 0cf00ceadeab332d7a6dd497462bdc4541c5ae48 | refs/heads/master | 2021-09-07T02:00:33.226681 | 2018-02-15T12:25:47 | 2018-02-15T12:25:47 | 110,359,245 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,506 | py | #!/usr/bin/python3
# coding: utf-8
# IL 30.10.2017
"""
ProjectEuler Problem 7
"""
__author__ = 'ilya_il'
import time
def get_prime_numbers(upper_bound):
prime_numbers = [2, ]
for n in range(3, upper_bound, 2):
for pn in prime_numbers:
if n % pn == 0:
break
else:
prime_numbers.append(n)
return prime_numbers
def get_prime_number_by_pos(pos):
prime_numbers = [2, ]
n = 3
while len(prime_numbers) < pos:
for pn in prime_numbers:
if n % pn == 0:
break
else:
prime_numbers.append(n)
n += 2
# return last number in list
return prime_numbers[-1]
def get_prime_number_by_pos2(pos):
upper_bound = 105000
nums = [n for n in range(2, upper_bound)]
# get prime numbers
n = 0
while nums[n]**2 <= upper_bound:
if nums[n] != 0:
# n - index of prime number
# pn - prime number
pn = nums[n]
for i in range(pn + n, upper_bound - 2, pn):
nums[i] = 0
n += 1
# count prime numbers
n = 0
res = 0
print(nums)
for i in range(0, upper_bound - 2):
if nums[i] != 0:
n += 1
if n == pos:
res = nums[i]
print(pos)
break
# return last number in list
return res
st = time.time()
print(get_prime_number_by_pos2(10001))
print("--- %s seconds ---" % (time.time() - st))
| [
"il.khimki@yandex.ru"
] | il.khimki@yandex.ru |
5242e3b788b825eca2903c994d8d2e0fb3c67c25 | f1aab3818e194882348a9bed0bc6d9dee1d0c894 | /NPP/process_spec.py | 7ec4905f47b5b59e346ece781fa3944b0adc029b | [
"MIT"
] | permissive | cbchoi/nppsim | 73b6ef3a66f12686eaac94209c0f8884c4952ab4 | 4d096f9d2fdb5ebf3e3e83be7b1974bfc92554c1 | refs/heads/master | 2021-02-14T14:15:29.319118 | 2020-03-04T07:00:21 | 2020-03-04T07:00:21 | 244,810,497 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,995 | py | from collections import OrderedDict
from evsim.behavior_model import BehaviorModel
from evsim.system_message import SysMessage
from evsim.definition import *
from config import *
class ProcessSpec(BehaviorModel):
def __init__(self, name):
BehaviorModel.__init__(self, name)
self.init_state = None
self.shared_variables = []
self.search_structure_external = {}
self.search_structure_internal = {}
def set_init_state(self, _state):
self.init_state = _state
def retrieve_init_state(self):
return self.init_state
def insert_shared_variables(self, _type, _name):
self.shared_variables.append((_type, _name))
def retrieve_shared_variables(self):
return self.shared_variables
def insert_guarded_internal_transition(self, pre_state, event, post_state, condition, actions):
self.internal_transition_map_tuple[(pre_state, event, condition)] = (actions, post_state, actions)
if (pre_state, condition) in self.internal_transition_map_state:
self.internal_transition_map_state[(pre_state, condition)].append(event, post_state)
else:
self.internal_transition_map_state[(pre_state, condition)] = [(event, post_state, actions)]
if pre_state in self.search_structure_internal:
self.search_structure_internal[pre_state].append((condition, event, post_state, actions))
else:
self.search_structure_internal[pre_state] = [(condition, event, post_state, actions)]
pass
def insert_guarded_external_transition(self, pre_state, event, post_state, condition, actions):
self.external_transition_map_tuple[(pre_state, event, condition)] = (actions, post_state)
if (pre_state, condition) in self.external_transition_map_state:
self.external_transition_map_state[(pre_state, condition)].append(event, post_state, actions)
else:
self.external_transition_map_state[(pre_state, condition)] = [(event, post_state, actions)]
if pre_state in self.search_structure_external:
self.search_structure_external[pre_state].append((condition, event, post_state, actions))
else:
self.search_structure_external[pre_state] = [(condition, event, post_state, actions)]
def retrieve_g_external_transition(self, pre_state):
return self.search_structure_external[pre_state]
def retrieve_g_internal_transition(self, pre_state):
return self.search_structure_internal[pre_state]
def serialize(self):
json_obj = OrderedDict()
json_obj["name"] = self._name
json_obj["states"] = self._states
json_obj["input_ports"] = self.retrieve_input_ports()
json_obj["output_ports"] = self.retrieve_output_ports()
json_obj["shared_variables"] = self.shared_variables
json_obj["external_trans"] = self.external_transition_map_state
json_obj["internal_trans"] = self.internal_transition_map_state
return json_obj
def deserialize(self, json):
self._name = json["name"]
for k, v in json["states"].items():
self.insert_state(k, v)
# Handle In ports
for port in json["input_ports"]:
self.insert_input_port(port)
# Handle out ports
for port in json["output_ports"]:
self.insert_output_port(port)
# Handle out ports
for var in json["shared_variables"]:
self.insert_shared_variables(var)
# Handle External Transition
for k, v in json["external_trans"].items():
print(v)
for ns in v:
self.insert_guarded_external_transition(k[0], ns[0], ns[1], k[1], ns[2])
# Handle Internal Transition
for k, v in json["internal_trans"].items():
for ns in v:
self.insert_guarded_internal_transition(k[0], ns[0], ns[1], k[1], ns[2]) | [
"cbchoi@Changbeomui-MacBookPro.local"
] | cbchoi@Changbeomui-MacBookPro.local |
aaa7031c4835b8beaa1ff181be57e0baa1bc6fda | 39461655ed7f8daf6fcc442d9508f37929bbc4e4 | /chapter3/3.3.2-1_logit_and_sigmoid.py | 0dd2ba5d7a9305fccabbc2acbf759c7ccd2dfb1f | [] | no_license | mkomatsu-0223/Study_Machine-Learning | 79d03d368758b2ef66092ed38d6de69e4a4c27a1 | 0787ca4a419788935e1ad04ff936860532f4317f | refs/heads/main | 2023-06-21T14:39:13.117132 | 2021-08-05T13:37:24 | 2021-08-05T13:37:24 | 381,750,677 | 1 | 0 | null | 2021-08-05T13:37:24 | 2021-06-30T15:34:05 | Python | UTF-8 | Python | false | false | 1,791 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Jun 24 21:54:13 2021
@author: KOMATSU
"""
import numpy as np
import matplotlib.pyplot as plt
# シグモイド関数を定義
def sigmoid(z):
return 1.0 / (1.0 + np.exp(-z))
# y=1のコストを計算する関数
def cost_1(z):
return - np.log(sigmoid(z))
# y=0のコストを計算する関数
def cost_0(z):
return - np.log(1 - sigmoid(z))
# 0.1間隔で-7以上7未満のデータを生成
z = np.arange(-7, 7, 0.1)
# 生成したデータでシグモイド関数を実行
phi_z = sigmoid(z)
# 元のデータとシグモイド関数出力をプロット
plt.plot(z, phi_z)
# 垂直線を追加
plt.axvline(0.0, color='k')
# y軸の上限/下限を設定
plt.ylim(-0.1, 1.1)
# 軸のラベルを設定
plt.xlabel('z')
plt.ylabel('$\phi (z)$')
# y軸の目盛を追加
plt.yticks([0.0, 0.5, 1.0])
# Axesクラスのオブジェクトの取得
ax = plt.gca()
# y軸の目盛に合わせて水平グリッド線を追加
ax.yaxis.grid(True)
# グラフを表示
plt.tight_layout()
plt.show()
# 0.1間隔で-10以上10未満のデータを生成
z = np.arange(-10, 10, 0.1)
# シグモイド関数を実行
phi_z = sigmoid(z)
# y=1のコスト計算関数を実行
c1 = [cost_1(x) for x in z]
# 元のデータとシグモイド関数出力をプロット
plt.plot(phi_z, c1, label='J(w) if y=1')
# y=0のコスト計算関数を実行
c0 = [cost_0(x) for x in z]
# 元のデータとシグモイド関数出力をプロット
plt.plot(phi_z, c0, linestyle='--', label='J(w) if y=0')
# y軸の上限/下限を設定
plt.ylim(0.0, 5.1)
plt.xlim([0, 1])
# 軸のラベルを設定
plt.xlabel('$\phi$(z)')
plt.ylabel('J(w)')
# 凡例を設定
plt.legend(loc='upper center')
# グラフを表示
plt.tight_layout()
plt.show()
| [
"komatsu_milkyway@yahoo.co.jp"
] | komatsu_milkyway@yahoo.co.jp |
a91672d089cb0c860f05281b2ef3658478439521 | 4baa202014ceb55863ac43001ff1082da65ccffc | /userPortal/migrations/0016_auto_20160825_1954.py | 28adb76837743f334e931924f232699a536960be | [] | no_license | danielsbonnin/b_and_d | 03ab745ad921e0802e3615596086ee34b359b4bc | f0edf44b9a69bd7ed78ca7d668c1223d0fadfc43 | refs/heads/master | 2021-05-15T00:30:34.234741 | 2017-10-04T23:33:13 | 2017-10-04T23:33:13 | 103,151,154 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 516 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-26 00:54
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('userPortal', '0015_dailyrequirementsreport_user'),
]
operations = [
migrations.RemoveField(
model_name='child',
name='did_do_homework',
),
migrations.RemoveField(
model_name='child',
name='did_read',
),
]
| [
"danielsbonnin@gmail.com"
] | danielsbonnin@gmail.com |
3b01ac301331ea9f6f0296f4f7b0d38b3eca0120 | 40bd0c4e5b5f44adceeb7586418833394edaaa9c | /blog/migrations/0001_initial.py | b30f556d8e3aa2bf79bd6d418762da92b2c17c97 | [] | no_license | uzay00/ilk-blogum | 522d94c9baf1ac6df8e42d3b2d9f6e27443fd23a | c7896e1fffad591dea67798ad8d3b70e88ed04d1 | refs/heads/master | 2020-12-31T00:30:43.581675 | 2017-03-29T11:47:39 | 2017-03-29T11:47:39 | 86,560,634 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,058 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-29 08:57
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('baslik', models.CharField(max_length=200)),
('yazi', models.TextField()),
('yaratilma_tarihi', models.DateTimeField(default=django.utils.timezone.now)),
('yayinlanma_tarihi', models.DateTimeField(blank=True, null=True)),
('yazar', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"uzay00@gmail.com"
] | uzay00@gmail.com |
d5851cdf090e446c4ab7873e0849f31e475286aa | bc8f716ee07e3a9762ac248e7188d56b62417d0d | /KmeansScreen.py | 4cf8026abb89e72cdc1c6f9368e02e701d58dca6 | [] | no_license | silvavn/thesiswork1 | 98b759037a72d89dc81476cdf3360c86d3b28046 | 510b8dd1e4282dc6559b27202cdef26e18fb85f9 | refs/heads/master | 2021-07-02T19:41:26.825894 | 2017-09-22T16:30:59 | 2017-09-22T16:30:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,011 | py | #!/usr/bin/env python
import tkinter as tk
from tkinter.simpledialog import *
#Screen that controls the Click on distribution
#numpy.random.normal(loc=0.0, scale=1.0, size=None)
class DistributionScreen(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
self.grid()
master.title("Distribution Config")
tk.Label(self, text="Scale in Std. Dev.:").grid(row=0)
tk.Label(self, text="Size (Num. of points):").grid(row=1)
tk.Label(self, text="Cluster Label:").grid(row=2)
self.scale = StringVar(self)
self.scale_entry = tk.Entry(self, textvariable=self.scale)
self.scale_entry.insert(0, "30")
self.scale_entry.grid(row=0,column=1)
self.size = StringVar(self)
self.size_entry = tk.Entry(self, textvariable=self.size)
self.size_entry.insert(0, "150")
self.size_entry.grid(row=1,column=1)
self.label = StringVar(self)
self.label_entry = tk.Entry(self, textvariable=self.label)
self.label_entry.insert(0, "None")
self.label_entry.grid(row=2,column=1)
def close_windows(self):
self.master.destroy()
class InterpolationtionScreen(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
self.grid()
master.title("Interpolation Config")
tk.Label(self, text="Num Steps:").grid(row=0)
tk.Label(self, text="Added noise(Std. Dev.):").grid(row=1)
self.num_steps = StringVar(self)
self.num_steps_entry = tk.Entry(self, textvariable=self.num_steps)
self.num_steps_entry.insert(0, "10")
self.num_steps_entry.grid(row=0,column=1)
self.scale = StringVar(self)
self.scale_entry = tk.Entry(self, textvariable=self.scale)
self.scale_entry.insert(0, "2")
self.scale_entry.grid(row=1,column=1)
self.variation_option = IntVar(self)
self.vo_opt1 = tk.Radiobutton(self, text="Noise", variable=self.variation_option, value=1)
self.vo_opt2 = tk.Radiobutton(self, text="New Cluster", variable=self.variation_option, value=2)
self.vo_opt1.grid(row=2, column=0)
self.vo_opt2.grid(row=2, column=1)
def close_windows(self):
self.master.destroy()
#Screen that controls the MONIC Framework
#Currently Implements:
#@Tau Match, @Tau Split, @Cluster Shape, @Quadtree Depth, @GridX Resolution, @GridY Resolution
class MONICScreen(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
self.grid()
master.title("MONIC Config")
tk.Label(self, text="tau match:").grid(row=0)
tk.Label(self, text="tau split:").grid(row=1)
tk.Label(self, text="Cluster Shape:").grid(row=2)
tk.Label(self, text="Quadtree Depth:").grid(row=3)
tk.Label(self, text="Grid X Resolution:").grid(row=4)
tk.Label(self, text="Grid Y Resolution:").grid(row=5)
self.match = StringVar(self)
self.match_entry = tk.Entry(self, textvariable=self.match)
self.match_entry.insert(0, "0.5")
self.match_entry.grid(row=0,column=1)
self.split = StringVar(self)
self.split_entry = tk.Entry(self, textvariable=self.split)
self.split_entry.insert(0, "0.1")
self.split_entry.grid(row=1,column=1)
self.shape_state = StringVar(self)
self.shape_state.set("Circle")
self.shapemenu = OptionMenu(self, self.shape_state, "Circle", "Box", "Grid", "Quadtree")#, command=self.clustering_controller)
self.shapemenu.grid(row=2, column=1)
self.qt_depth = StringVar(self)
self.qt_depth_entry = tk.Entry(self, textvariable=self.qt_depth)
self.qt_depth_entry.insert(0, "5")
self.qt_depth_entry.grid(row=3,column=1)
self.grid_res_x = StringVar(self)
self.grid_res_x_entry = tk.Entry(self, textvariable=self.grid_res_x)
self.grid_res_x_entry.insert(0, "5")
self.grid_res_x_entry.grid(row=4,column=1)
self.grid_res_y = StringVar(self)
self.grid_res_y_entry = tk.Entry(self, textvariable=self.grid_res_y)
self.grid_res_y_entry.insert(0, "5")
self.grid_res_y_entry.grid(row=5,column=1)
def close_windows(self):
self.master.destroy()
#Screen that controls the DBSCAN Algorithm
#TODO
#Implement metric, algorithm, leaf_size, p
#class sklearn.cluster.DBSCAN(eps=0.5, min_samples=5, metric='euclidean', algorithm='auto', leaf_size=30, p=None, n_jobs=1)
class DBSCANScreen(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
self.grid()
master.title("DBSCAN Config")
tk.Label(self, text="eps (datapoints max distance):").grid(row=0)
tk.Label(self, text="minsamples:").grid(row=1)
tk.Label(self, text="Num Jobs:").grid(row=2)
self.eps = StringVar(self)
self.eps_entry = tk.Entry(self, textvariable=self.eps)
self.eps_entry.insert(0, "50.0")
self.eps_entry.grid(row=0,column=1)
self.min_samples = StringVar(self)
self.min_samples_entry = tk.Entry(self, textvariable=self.min_samples)
self.min_samples_entry.insert(0, "5")
self.min_samples_entry.grid(row=1,column=1)
self.n_jobs = StringVar(self)
self.n_jobs_entry = tk.Entry(self, textvariable=self.n_jobs)
self.n_jobs_entry.insert(0, "1")
self.n_jobs_entry.grid(row=2,column=1)
def close_windows(self):
self.master.destroy()
#Screen that controls the Kmeans Algorithm
class KmeansScreen(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
self.grid()
master.title("Kmeans Config")
tk.Label(self, text="Number of Clusters:").grid(row=0)
tk.Label(self, text="Number of Jobs:").grid(row=1)
self.num_jobs = StringVar(self)
self.n_jobs_entry = tk.Entry(self, textvariable=self.num_jobs)
self.n_jobs_entry.insert(0, "1")
self.n_jobs_entry.grid(row=1,column=1)
self.num_clusters = StringVar(self)
self.n_clusters_entry = tk.Entry(self, textvariable=self.num_clusters)
self.n_clusters_entry.insert(0, "1")
self.n_clusters_entry.grid(row=0,column=1)
tk.Button(self, text='Quit', command=self.close_windows).grid(columnspan=2)
#master.geometry('%dx%d+%d+%d' % (self.winfo_width(), self.winfo_height(), 50, 50))
def close_windows(self):
self.master.destroy() | [
"victor_grego@msn.com"
] | victor_grego@msn.com |
02988134999579f39f20f4c3022896d4181260f2 | 801b8ca51c656a7b5dd6f31c72ef6878d51e4c0e | /feed/v1/api/serializers.py | be840689180935c8519c027b524f524e75539e31 | [] | no_license | adamgrossman/peer_post | 265242d682e822cf1a2594ea3299f439eb24a343 | 6b553dae372909a0e20302bca61da32ea1e6ff5e | refs/heads/master | 2021-01-17T08:58:17.363439 | 2016-03-27T22:31:23 | 2016-03-27T22:31:23 | 28,731,532 | 1 | 1 | null | 2016-03-27T22:31:24 | 2015-01-03T01:25:55 | Python | UTF-8 | Python | false | false | 2,517 | py | from rest_framework import serializers
from feed.models import Member, Group, Link, Comment, Vote
class MemberSerializer(serializers.ModelSerializer):
posted = serializers.SlugRelatedField(many=True, read_only=True, slug_field='title')
comments = serializers.StringRelatedField(many=True)
date_joined = serializers.DateTimeField(format="%m/%d/%Y")
class Meta:
model = Member
fields = ('id', 'username', 'first_name', 'last_name', 'profile_photo', 'bio', 'date_joined', 'posted', 'comments')
class GroupSerializer(serializers.ModelSerializer):
links = serializers.SerializerMethodField()
class Meta:
model = Group
fields = ('id', 'title', 'description', 'created_at', 'links')
def get_links(self, obj):
return Link.objects.filter(group=obj).values_list('url', 'title', 'description', 'created_at', 'posted_user')
class CommentSerializer(serializers.ModelSerializer):
author_name = serializers.SerializerMethodField()
children = serializers.SerializerMethodField()
created_at = serializers.DateTimeField(format="%m/%d/%Y")
class Meta:
model = Comment
fields = ('id', 'created_at', 'body', 'author_name', 'parent', 'children')
def get_author_name(self, obj):
return obj.author.username
def get_children(self, obj):
children = Comment.objects.filter(lft=obj.id)
child = CommentSerializer(children, many=True)
return child.data
class LinkSerializer(serializers.ModelSerializer):
comments = serializers.SerializerMethodField()
user_name = serializers.SerializerMethodField()
group_name = serializers.SerializerMethodField()
score = serializers.SerializerMethodField()
class Meta:
model = Link
fields = ('id', 'title', 'url', 'description', 'created_at', 'posted_user', 'user_name', 'group', 'group_name', 'flag', 'score', 'comments',)
def get_user_name(self, obj):
return obj.posted_user.username
def get_comments(self, obj):
all_comments = Comment.objects.filter(link=obj, parent__isnull=True)
serializer = CommentSerializer(all_comments, many=True)
return serializer.data
def get_group_name(self, obj):
return obj.group.title
def get_score(self, obj):
up_votes = Vote.objects.filter(link=obj).filter(up_vote=True).count()
down_votes = Vote.objects.filter(link=obj).filter(up_vote=False).count()
score = up_votes - down_votes
return score
| [
"adam.grossman08@me.com"
] | adam.grossman08@me.com |
2b66aa65a56259e78f6982af2c65f86099041989 | 5ca4a9526cceb69d653fde083b07422a5bf65e78 | /env/bin/epylint | f07770b7ee332a8ca6ee9d59f2e1dc4f3a11b2a2 | [] | no_license | hgyoon/M-DICE-REACT-DJANGO | 679fec6849baa3d28c83869dc6e99457a3baf2b9 | 6cf1006f7bad2c3122b7d6ad34bd311ec01d1a00 | refs/heads/master | 2023-03-06T11:01:49.194562 | 2021-01-28T02:40:45 | 2021-01-28T02:40:45 | 304,380,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | #!/Users/panda/jsDev/PavementProj/djangoV2/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_epylint
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run_epylint())
| [
"hgyoon@umich.edu"
] | hgyoon@umich.edu | |
0dde26ff06a5f5bdf0da66bdea5403aae187b87e | 95ed3c52785461503c2443f3fd8c5dad4757a191 | /scripts/sbstest.py | 993bfaf338646056095fd4b2af4a91b6dc4b0565 | [] | no_license | dunsword/lsapp | 39af9d7f727ceabb388f025d23fbaf45e1abdb6e | 8c4e1d9ab5af2163515a2ca8db5f43d442c405bd | refs/heads/master | 2021-01-10T12:26:42.123828 | 2013-10-12T14:08:18 | 2013-10-12T14:08:18 | 8,550,942 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 589 | py | # coding=utf-8
'''
Created on 2012-11-29
@author: DELL
'''
import api.sbs.api
oauth= api.sbs.api.OAuthRequest()
result=oauth.auth(u'100',u'accessTest7118jqq54113accessTest',u'猪猪侠',u'pass123')
print result.decode('gbk'),
blist= api.sbs.api.BoardThreadListRequest()
r2=blist.getBoardThreadList(u'100', u'accessTest7118jqq54113accessTest', boardId='682585627')
boardThreadList=r2['board_thread_list']
print type(boardThreadList)
for thread in boardThreadList:
print type(thread)
print thread['fid']
print thread['tid']
print thread['board']['name']
break
| [
"dunsword@163.com"
] | dunsword@163.com |
d6522db0345b146f5c997b5624fec7901716705a | 006341ca12525aa0979d6101600e78c4bd9532ab | /CMS/Zope-3.2.1/Dependencies/zope.server-Zope-3.2.1/zope.server/linereceiver/linetask.py | b6e21554887b4b549e2db8b1c9d3414ff467116b | [
"ZPL-2.1",
"Python-2.0",
"ICU",
"LicenseRef-scancode-public-domain",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"ZPL-2.0"
] | permissive | germanfriday/code-examples-sandbox | d0f29e20a3eed1f8430d06441ac2d33bac5e4253 | 4c538584703754c956ca66392fdcecf0a0ca2314 | refs/heads/main | 2023-05-30T22:21:57.918503 | 2021-06-15T15:06:47 | 2021-06-15T15:06:47 | 377,200,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,064 | py | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Line Task
$Id: linetask.py 27442 2004-09-03 08:16:55Z shane $
"""
import socket
import time
from zope.server.interfaces import ITask
from zope.interface import implements
class LineTask(object):
"""This is a generic task that can be used with command line
protocols to handle commands in a separate thread.
"""
implements(ITask)
def __init__(self, channel, command, m_name):
self.channel = channel
self.m_name = m_name
self.args = command.args
self.close_on_finish = 0
def service(self):
"""Called to execute the task.
"""
try:
try:
self.start()
getattr(self.channel, self.m_name)(self.args)
self.finish()
except socket.error:
self.close_on_finish = 1
if self.channel.adj.log_socket_errors:
raise
except:
self.channel.exception()
finally:
if self.close_on_finish:
self.channel.close_when_done()
def cancel(self):
'See ITask'
self.channel.close_when_done()
def defer(self):
'See ITask'
pass
def start(self):
now = time.time()
self.start_time = now
def finish(self):
hit_log = self.channel.server.hit_log
if hit_log is not None:
hit_log.log(self)
| [
"chris@thegermanfriday.com"
] | chris@thegermanfriday.com |
a0e7fb644e67152d9a01b4d7110b100bf035ea8f | 743b85b69266ed58040d24fc8d6df57f62c1c958 | /scripts/test.py | cf53eaf4e5876fa6d639c412b80aa95da0cd8867 | [] | no_license | theRealSuperMario/imm | 125d82798d965acaac92378fa8706d47edb239b0 | 261ed40616069f16281b56be0395030d64073259 | refs/heads/master | 2021-08-05T20:56:08.585547 | 2020-08-18T13:09:17 | 2020-08-18T13:09:17 | 210,142,517 | 0 | 0 | null | 2019-09-22T12:21:17 | 2019-09-22T12:21:17 | null | UTF-8 | Python | false | false | 6,911 | py | # ==========================================================
# Author: Tomas Jakab
# ==========================================================
from __future__ import print_function
from __future__ import absolute_import
import numpy as np
import os.path as osp
from imm.eval import eval_imm
from imm.models.imm_model import IMMModel
import sklearn.linear_model
from imm.utils.dataset_import import import_dataset
def evaluate(net, net_file, model_config, training_config, train_dset, test_dset,
batch_size=100, bias=False):
# %% ---------------------------------------------------------------------------
# ------------------------------- Run TensorFlow -------------------------------
# ------------------------------------------------------------------------------
def evaluate(dset):
results = eval_imm.evaluate(
dset, net, model_config, net_file, training_config, batch_size=batch_size,
random_seed=0, eval_tensors=['gauss_yx', 'future_landmarks'])
results = {k: np.concatenate(v) for k, v in results.items()}
return results
train_tensors = evaluate(train_dset)
test_tensors = evaluate(test_dset)
# %% ---------------------------------------------------------------------------
# --------------------------- Regress landmarks --------------------------------
# ------------------------------------------------------------------------------
def convert_landmarks(tensors, im_size):
landmarks = tensors['gauss_yx']
landmarks_gt = tensors['future_landmarks'].astype(np.float32)
im_size = np.array(im_size)
landmarks = ((landmarks + 1) / 2.0) * im_size
n_samples = landmarks.shape[0]
landmarks = landmarks.reshape((n_samples, -1))
landmarks_gt = landmarks_gt.reshape((n_samples, -1))
return landmarks, landmarks_gt
X_train, y_train = convert_landmarks(train_tensors, train_dset.image_size)
X_test, y_test = convert_landmarks(test_tensors, train_dset.image_size)
# regression
regr = sklearn.linear_model.Ridge(alpha=0.0, fit_intercept=bias)
_ = regr.fit(X_train, y_train)
y_predict = regr.predict(X_test)
landmarks_gt = test_tensors['future_landmarks'].astype(np.float32)
landmarks_regressed = y_predict.reshape(landmarks_gt.shape)
# normalized error with respect to intra-occular distance
eyes = landmarks_gt[:, :2, :]
occular_distances = np.sqrt(
np.sum((eyes[:, 0, :] - eyes[:, 1, :])**2, axis=-1))
distances = np.sqrt(np.sum((landmarks_gt - landmarks_regressed)**2, axis=-1))
mean_error = np.mean(distances / occular_distances[:, None])
return mean_error
def main(args):
experiment_name = args.experiment_name
iteration = args.iteration
im_size = args.im_size
bias = args.bias
batch_size = args.batch_size
n_train_samples = None
buffer_name = args.buffer_name
postfix = ''
if bias:
postfix += '-bias'
else:
postfix += '-no_bias'
postfix += '-' + args.test_dataset
postfix += '-' + args.test_split
if n_train_samples is not None:
postfix += '%.0fk' % (n_train_samples / 1000.0)
config = eval_imm.load_configs(
[args.paths_config,
osp.join('configs', 'experiments', experiment_name + '.yaml')])
if args.train_dataset == 'mafl':
train_dataset_class = import_dataset('celeba')
train_dset = train_dataset_class(
config.training.datadir, dataset='mafl', subset='train',
order_stream=True, max_samples=n_train_samples, tps=False,
image_size=[im_size, im_size])
elif args.train_dataset == 'aflw':
train_dataset_class = import_dataset('aflw')
train_dset = train_dataset_class(
config.training.datadir, subset='train',
order_stream=True, max_samples=n_train_samples, tps=False,
image_size=[im_size, im_size])
else:
raise ValueError('Dataset %s not supported.' % args.train_dataset)
if args.test_dataset == 'mafl':
test_dataset_class = import_dataset('celeba')
test_dset = test_dataset_class(
config.training.datadir, dataset='mafl', subset=args.test_split,
order_stream=True, tps=False,
image_size=[im_size, im_size])
elif args.test_dataset == 'aflw':
test_dataset_class = import_dataset('aflw')
test_dset = test_dataset_class(
config.training.datadir, subset=args.test_split,
order_stream=True, tps=False,
image_size=[im_size, im_size])
else:
raise ValueError('Dataset %s not supported.' % args.test_dataset)
net = IMMModel
model_config = config.model
training_config = config.training
if iteration is not None:
net_file = 'model.ckpt-' + str(iteration)
else:
net_file = 'model.ckpt'
checkpoint_file = osp.join(config.training.logdir, net_file + '.meta')
if not osp.isfile(checkpoint_file):
raise ValueError('Checkpoint file %s not found.' % checkpoint_file)
mean_error = evaluate(
net, net_file, model_config, training_config, train_dset, test_dset,
batch_size=batch_size, bias=bias)
if hasattr(config.training.train_dset_params, 'dataset'):
model_dataset = config.training.train_dset_params.dataset
else:
model_dataset = config.training.dset
print('')
print('========================= RESULTS =========================')
print('model trained in unsupervised way on %s dataset' % model_dataset)
print('regressor trained on %s training set' % args.train_dataset)
print('error on %s datset %s set: %.5f (%.3f percent)' % (
args.test_dataset, args.test_split,
mean_error, mean_error * 100.0))
print('===========================================================')
if __name__=='__main__':
import argparse
parser = argparse.ArgumentParser(description='Test model on face datasets.')
parser.add_argument('--experiment-name', type=str, required=True, help='Name of the experiment to evaluate.')
parser.add_argument('--train-dataset', type=str, required=True, help='Training dataset for regressor (mafl|aflw).')
parser.add_argument('--test-dataset', type=str, required=True, help='Testing dataset for regressed landmarks (mafl|aflw).')
parser.add_argument('--paths-config', type=str, default='configs/paths/default.yaml', required=False, help='Path to the paths config.')
parser.add_argument('--iteration', type=int, default=None, required=False, help='Checkpoint iteration to evaluate.')
parser.add_argument('--test-split', type=str, default='test', required=False, help='Test split (val|test).')
parser.add_argument('--buffer-name', type=str, default=None, required=False, help='Name of the buffer when using matlab data pipeline.')
parser.add_argument('--im-size', type=int, default=128, required=False, help='Image size.')
parser.add_argument('--bias', action='store_true', required=False, help='Use bias in the regressor.')
parser.add_argument('--batch-size', type=int, default=100, required=False, help='batch_size')
args = parser.parse_args()
main(args)
| [
"tomas.jakab.64@gmail.com"
] | tomas.jakab.64@gmail.com |
1ecda05effcd112594905dd6c1737795add64c5f | 04e75bae29029c2b79730f0749ef9be543c48824 | /database.py | 5e7e9439b918fb9d7caa19b2dc58ca97cb5f8621 | [] | no_license | venky252003/Python_Flask | 3cd2b8018e9a50e6f069d5daaeb7e0ccfd5e92d8 | 705afc5a910ce37f708cc6cb439773b6829b6645 | refs/heads/master | 2022-04-21T15:34:15.769515 | 2020-04-04T11:17:34 | 2020-04-04T11:17:34 | 236,959,362 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,460 | py | #!/usr/bin/env python
"""
Author: Nick Russo
Purpose: A simple Flask web app that demonstrates the Model View Controller
(MVC) pattern in a meaningful and somewhat realistic way.
"""
class Database:
"""
Represent the interface to the data (model). Uses statically-defined
data to keep things simple for now.
"""
def __init__(self, path):
"""
Constructor to initialize the data attribute as
a dictionary where the account number is the key and
the value is another dictionary with keys "paid" and "due".
"""
with open(path, 'r') as handle:
#import json
#self.data = json.load(handle)
#import yaml
#self.data = yaml.safe_load(handle)
import xmltodict
self.data = xmltodict.parse(handle.read())["root"]
print(self.data)
def balance(self, acct_id):
"""
Determines the customer balance by finding the difference between
what has been paid and what is still owed on the account, The "model"
can provide methods to help interface with the data; it is not
limited to only storing data. A positive number means the customer
owes us money and a negative number means they overpaid and have
a credit with us.
"""
acct = self.data.get(acct_id)
if acct:
return int(acct["due"]) - int(acct["paid"])
return None
| [
"venky25@gmail.com"
] | venky25@gmail.com |
c61065937a7d81c532e4c6f752dd3cd2a339f82c | 0ac67fd1be569ee10bb07febdc756d80454c7887 | /main.py | 3e22505ca687dc00fc6e5b0548385643a160d626 | [] | no_license | b-thebest/pixel-detector-and-evaluator | db7ef6251cd3c42d6c3cf7a69956da0b64f8114d | 75f8db7e35c29c7cf85e26752c8591856764a6f2 | refs/heads/master | 2022-11-23T10:52:16.094621 | 2020-07-08T16:06:16 | 2020-07-08T16:06:16 | 282,961,643 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,852 | py | import win32ui
from PIL import Image
from ctypes import windll
from win32 import win32gui
from time import sleep
def capture_screen(hwnd, w, h):
# https://stackoverflow.com/questions/19695214/python-screenshot-of-inactive-window-printwindow-win32gui
hwndDC = win32gui.GetWindowDC(hwnd)
mfcDC = win32ui.CreateDCFromHandle(hwndDC)
saveDC = mfcDC.CreateCompatibleDC()
saveBitMap = win32ui.CreateBitmap()
saveBitMap.CreateCompatibleBitmap(mfcDC, w, h)
saveDC.SelectObject(saveBitMap)
result = windll.user32.PrintWindow(hwnd, saveDC.GetSafeHdc(), 0)
bmpinfo = saveBitMap.GetInfo()
bmpstr = saveBitMap.GetBitmapBits(True)
im = Image.frombuffer(
'RGB',
(bmpinfo['bmWidth'], bmpinfo['bmHeight']),
bmpstr, 'raw', 'BGRX', 0, 1)
win32gui.DeleteObject(saveBitMap.GetHandle())
saveDC.DeleteDC()
mfcDC.DeleteDC()
win32gui.ReleaseDC(hwnd, hwndDC)
if result == 1:
return im
return None
def ypp_window_callback(hwnd, _extras):
rect = win32gui.GetWindowRect(hwnd)
x = rect[0]
y = rect[1]
w = rect[2] - x
h = rect[3] - y
window_title = win32gui.GetWindowText(hwnd)
if 'Merciless Client' in window_title:
print('Window found! location=(%d, %d), size=(%d, %d)' % (x, y, w, h))
pixelDetector(hwnd, w, h)
def pixelDetector(hwnd, w, h):
while True:
screen_image = capture_screen(hwnd, w, h)
for x in range(2459, 2464, 1):
for y in range(113, 118, 1):
r, g, b = screen_image.getpixel((x, y))
if r == 255 and g == 0 and b == 0:
# reddish area means sleep time
sleep(5)
else:
#CLICK
pass
if __name__ == "__main__":
win32gui.EnumWindows(ypp_window_callback, None) | [
"bmoizali90@gmail.com"
] | bmoizali90@gmail.com |
e5798a30289cb14e434258ef6f5991871653b614 | 444c493405f94aaf261acc8c3f9a45d97406459f | /book/migrations/0003_auto_20200328_2235.py | 192b25a5a53dfd3fdaf8b4cc92e2799ae582babe | [] | no_license | eyluldnz/DjangoProjem | 6e8bf30c842d2127b46f590e0da233673e557b39 | 70d10fb471d542c3e8518aa4c7c98665ccbf16ee | refs/heads/master | 2022-10-31T01:27:12.342987 | 2020-06-17T22:58:54 | 2020-06-17T22:58:54 | 251,072,984 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 866 | py | # Generated by Django 3.0.3 on 2020-03-28 19:35
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('book', '0002_book'),
]
operations = [
migrations.AlterField(
model_name='book',
name='title',
field=models.CharField(max_length=150),
),
migrations.CreateModel(
name='Images',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
('image', models.ImageField(blank=True, upload_to='images/')),
('book', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='book.Book')),
],
),
]
| [
"eyluldnzcn@gmail.com"
] | eyluldnzcn@gmail.com |
b09c853ed3a8f42a9e13c2c95955b062cd69f4fd | 7b059fc1b18bcad469e49dbd991a8ba7be21b423 | /ros/src/twist_controller/twist_controller.py | 2acfdb71d64837e78d42bf716d0cc4afe90cbb0d | [] | no_license | koosha-t/carnd-capstone | e2382ecc773b57c132fa918cf270cf2e3b2f9f7d | 3e6d558b6bc31e1f0c432e61dfbda012820de79c | refs/heads/master | 2021-08-23T03:17:20.697679 | 2017-12-02T20:48:07 | 2017-12-02T20:48:07 | 112,555,288 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,523 | py | import rospy
from yaw_controller import YawController
GAS_DENSITY = 2.858
ONE_MPH = 0.44704
class Controller(object):
def __init__(self, *args, **kwargs):
# TODO: Implement
# wheel_base, steer_ratio, min_speed, max_lat_accel, max_steer_angle
self.wheel_base = None
self.steer_ratio = None
self.min_speed = None
self.max_lat_accel = None
self.max_steer_angle = None
for key in kwargs:
if key == 'wheel_base':
self.wheel_base = kwargs[key]
elif key == 'steer_ratio':
self.steer_ratio = kwargs[key]
elif key == 'min_speed':
self.min_speed = kwargs[key]
elif key == 'max_lat_accel':
self.max_lat_accel = kwargs[key]
elif key == 'max_steer_angle':
self.max_steer_angle = kwargs[key]
#rospy.loginfo("args*:{}".format(args))
rospy.loginfo("args* kw:{}".format(kwargs))
self.yaw_controller = YawController(self.wheel_base, self.steer_ratio,self. min_speed,self. max_lat_accel, self.max_steer_angle)
pass
def control(self, *args, **kwargs):
# TODO: Change the arg, kwarg list to suit your needs
# Return throttle, brake, steer
if kwargs["dbw_enabled"] is False:
return 0.,0.,0.
current_velocity_linear = kwargs["current_velocity_linear"]
target_velocity_linear = kwargs["target_velocity_linear"]
target_velocity_angular = kwargs["target_velocity_angular"]
steer =self.yaw_controller.get_steering(target_velocity_linear.x, target_velocity_angular.z, current_velocity_linear.x)
return 1., 0., steer
| [
"koosha.sbuces@gmail.com"
] | koosha.sbuces@gmail.com |
aff439be5207e0f11177cb64be552d6e34acda4b | fda64ad75ffa65f3e3a4c09972ae6b9a0268383f | /21_API.py | a07a295aa26cecb552fffa043a14dac9619a1876 | [] | no_license | dhruv611/Python | 260ce9e189380d49c54a8d57a26b9441e5472848 | 011cd6a3b67f85e98760c8ea9015733e169f1753 | refs/heads/master | 2021-05-16T21:41:54.455601 | 2020-07-14T06:14:20 | 2020-07-14T06:14:20 | 250,480,631 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,070 | py | import json
import urllib.request,urllib.parse,urllib.error
#This code will execute with API_KEY only, which i dont have, so the code is not executing.
serviceUrl = 'http://maps.googleapis.com/maps/api/geocode/json?'
while True:
address = input('Enter address: ')
if(len(address)<1):
break
url = serviceUrl + urllib.parse.urlencode({'Address' : address})
print('Retreiving: ',url)
url1 = urllib.request.urlopen(url)
data = url1.read().decode()
print('Retreived ',len(data),' characters.')
try:
list = json.loads(data)
except:
list = None
if not list or 'status' not in list or list['status'] != 'OK':
print('Error in data retrieval.')
print(list)
continue
print(json.dumps(list, indent = 4))
lat = list['results'][0]['geometry']['location']['lat']
lng = list['results'][0]['geometry']['location']['lng']
print('lat', lat, 'lng', lng)
location = list['results'][0]['formatted_address']
print(location)
| [
"noreply@github.com"
] | noreply@github.com |
c5c4b6f0b936cd29d654915642a877ac48a21b78 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03806/s696918602.py | 35f161589d7dd759d0031fd48f8415a6aae2215a | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 666 | py | def main():
INF = 100 * 40 + 1
MX = 4000
N, Ma, Mb = map(int, input().split())
dp = [[INF] * (MX * 2 + 1) for _ in range(2)]
i, j = 0, 1
for _ in range(N):
ai, bi, ci = map(int, input().split())
x = Ma * bi - Mb * ai # Σai:Σbi=Ma:Mb<->Ma*Σbi-Mb*Σai=0
for k in range(-MX, MX + 1):
dp[j][k] = dp[i][k]
dp[j][x] = min(dp[j][x], ci)
for k in range(-MX + x, MX + 1):
dp[j][k] = min(
dp[j][k],
dp[i][k - x] + ci
)
i, j = j, i
res = dp[i][0]
print(-1 if res == INF else res)
if __name__ == '__main__':
main()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
09748ed4d962cf5b7f4a079ab8e5b4811299f4c0 | 99b062cb9f5f3ff10c9f1fa00e43f6e8151a43a6 | /Django/PROJECT02/PROJECT02/jobs/models.py | 5d8ee670119eeaf75fc29f8879c7f9b7d6106061 | [] | no_license | HSx3/TIL | 92acc90758015c2e31660617bd927f7f100f5f64 | 981c9aaaf09c930d980205f68a28f2fc8006efcb | refs/heads/master | 2020-04-11T21:13:36.239246 | 2019-05-08T08:18:03 | 2019-05-08T08:18:03 | 162,099,042 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | from django.db import models
# Create your models here.
class Job(models.Model):
name = models.CharField(max_length=20)
pastjob = models.CharField(max_length=30)
def __str__(self):
return self.name | [
"hs.ssafy@gmail.com"
] | hs.ssafy@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.