index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
988,200 | 228bd2388acbdc6a7d4b04c6bdc20278ca93861c | import numpy as np
import pandas as pd
from scipy.stats import ranksums
from sklearn.metrics import roc_curve
from statsmodels.stats.multitest import multipletests
def _get_optimal_threshold(scores, labels):
pos_scores = scores > 0
_labels = labels[pos_scores]
_scores = scores[pos_scores]
fpr, tpr, thresholds = roc_curve(_labels, _scores)
optimal_idx = np.argmax(tpr - fpr)
optimal_threshold = thresholds[optimal_idx]
return optimal_threshold
def dem_motif_enrichment(hypo_score_df, hyper_score_df, log2_fc_threshold=0.5, alpha=0.05, motif_score_threshold=3):
"""
Perform the Wilcoxon rank sum test on hypo and hyper region motif scores.
Parameters
----------
hypo_score_df :
The motif score dataframe of hypo regions.
hyper_score_df :
The motif score dataframe of hyper regions.
log2_fc_threshold :
The log2 fold change threshold to determine if a motif is enriched.
alpha :
The adjusted p-value threshold to determine if a motif is enriched.
motif_score_threshold :
The motif score threshold to determine if a motif hit in a region.
Returns
-------
motif_enrichment :
The motif enrichment dataframe.
hypo_motif_hits :
The motif hits in hypo regions.
hyper_motif_hits :
The motif hits in hyper regions.
"""
# determine hypo hyper regions are not overlapped
assert hypo_score_df.columns.intersection(hyper_score_df.columns).size == 0, "hypo and hyper regions are overlapped"
# determine motif rows are the same
# noinspection PyTypeChecker
assert sum(hypo_score_df.index != hyper_score_df.index) == 0, "motif rows are not the same"
fg_mat = hypo_score_df.values
bg_mat = hyper_score_df.values
motifs = hypo_score_df.index
wilcox_test = [ranksums(fg_mat[x], y=bg_mat[x]) for x in range(fg_mat.shape[0])]
# Log2FC
mean_fg = fg_mat.mean(axis=1)
mean_bg = bg_mat.mean(axis=1)
log_fc = np.log2((mean_fg + 10**-12) / (mean_bg + 10**-12)).tolist()
# P-value correction
# noinspection PyUnresolvedReferences
p_value = [w.pvalue for w in wilcox_test]
judge, q_value, *_ = multipletests(p_value, alpha=alpha)
# Motif df
motif_df = pd.DataFrame(
{"log2_fc": log_fc, "q_value": q_value, "mean_fg": mean_fg, "mean_bg": mean_bg}, index=motifs
)
fc_judge = motif_df["log2_fc"].abs() > log2_fc_threshold
motif_df = motif_df[fc_judge & judge].copy()
# Motif hits versus background
keep_motifs = motif_df.index.tolist()
keep_motifs_bool = motifs.isin(keep_motifs)
scores_mat = np.concatenate([fg_mat[keep_motifs_bool], bg_mat[keep_motifs_bool]], axis=1)
labels = np.repeat([1, 0], (fg_mat.shape[1], bg_mat.shape[1]))
motif_hit_thresholds = []
for i in range(scores_mat.shape[0]):
opt_score = _get_optimal_threshold(scores=scores_mat[i], labels=labels)
hit_threshold = max(motif_score_threshold, opt_score)
motif_hit_thresholds.append(hit_threshold)
motif_hit_thresholds = np.array(motif_hit_thresholds)
hypo_motif_hits = pd.DataFrame(
fg_mat[keep_motifs_bool] > motif_hit_thresholds[:, None], index=keep_motifs, columns=hypo_score_df.columns
)
hyper_motif_hits = pd.DataFrame(
bg_mat[keep_motifs_bool] > motif_hit_thresholds[:, None], index=keep_motifs, columns=hyper_score_df.columns
)
return motif_df, hypo_motif_hits, hyper_motif_hits
|
988,201 | bba45e3c3c8208de356521bf5b57ed2b92e2c87a | import numpy as np
import torch as t
import pyqtgraph as pg
import pyqtgraph.opengl as gl
import argparse as ap
from model import *
from copy import deepcopy
from time import time
parser = ap.ArgumentParser()
parser.add_argument("filename",
help="Name of the file of the PyTorch neural net to be visualized",
type=str
)
parser.add_argument("-l","--layer",
help="""The error surface will be with respect to the weights in this layer.
\nNote: Pytorch stores biases as a separate layer. Layer 0 is the weights
from input to hidden, Layer 1 is the biases for that same layer.""",
type=int,
default=1,
)
parser.add_argument("-s","--sub_layer",
help="""If a layer contains more than one weights vector, a sub_layer may be
specified. For example, layer=0 + sub_layer=1, will show the error
surface with respect to the weights between the second neuron in the
first (input) layer and the hidden layer, that is to say, the second
weights vector in the first layer. Note that PyTorch will cannot freeze
other sublayers, so the loss function will appear to be noisy.""",
type=int,
default=0,
)
parser.add_argument("-o", "--orbit_speed",
help="Speed at which the plot rotates (or viewer orbits)",
type=float,
default=0.2,
)
parser.add_argument("-lr", "--learning_rate",
help="""Learning rate during gradient decent (only affects visualization,
models should be pre-trained""",
type=float,
default=0.002,
)
parser.add_argument("-e", "--epochs",
help="Number of epoch to train before re-randomizing, and re-training.",
type=int,
default=64,
)
parser.add_argument("-r", "--resolution",
help="""Defines n, where loss surface is an nxn grid streched over the x and
y bounds. Note: large resolutions may be difficult to compute.""",
type=int,
default=200,
)
parser.add_argument("-x", "--x_bounds",
help="Defines x bounds of loss surface, e.g. '-x -8 8'",
type=int,
nargs=2,
default=(-8,8),
)
parser.add_argument("-y", "--y_bounds",
help="Defines y bounds of loss surface, e.g. '-y -8 8'",
type=int,
nargs=2,
default=(-8,8),
)
args = parser.parse_args()
## build a QApplication before building other widgets
pg.mkQApp()
## make a widget for displaying 3D objects
view = gl.GLViewWidget()
view.showMaximized()
## create flat grid and directional lines
grid = gl.GLGridItem()
view.addItem(grid)
x_dir = gl.GLLinePlotItem(
pos=np.array([[0,0,0],[1,0,0]]),
color=np.array([[1,0,0,1],[1,0,0,0]]),
width=2,
)
view.addItem(x_dir)
y_dir = gl.GLLinePlotItem(
pos=np.array([[0,0,0],[0,1,0]]),
color=np.array([[0,1,0,1],[0,1,0,0]]),
width=2,
)
view.addItem(y_dir)
z_dir = gl.GLLinePlotItem(
pos=np.array([[0,0,0],[0,0,1]]),
color=np.array([[0,0,1,1],[0,0,1,0]]),
width=2,
)
view.addItem(z_dir)
#create surface
surface = gl.GLSurfacePlotItem(
drawEdges=True,
drawFaces=False,
#computeNormals=False,
)
view.addItem(surface)
def create_loss_surface(
net,
inputs,
targets,
layer,
sub_layer=0,
x_bounds=(-8,8),
y_bounds=(-8,8),
resolution=250,
):
with t.no_grad():
x_values = np.linspace(*x_bounds,resolution)
y_values = np.linspace(*y_bounds,resolution)
surface = np.empty((len(x_values),len(y_values)))
w = net.get_nth_layer(layer,sub_layer).data
for i,x in enumerate(x_values):
for j,y in enumerate(y_values):
w[0] = x
w[1] = y
outputs = net.forward(inputs).detach()
surface[i,j] = ((outputs - targets)**2).mean().item()
#surface[i,j] = t.nn.MSELoss()(outputs,targets).item()
return x_values,y_values,surface # surface is really just z_values
def descend_gradient(
net,
inputs,
targets,
layer,
sub_layer=0,
lr=0.002,
momentum=0.0,
epochs=100,
size=10,
orbit_speed=0.05
):
try:
scatter = gl.GLScatterPlotItem(size=size,color=(1,0,0,1))#red
path = gl.GLLinePlotItem(color=(1,1,1,.4))
view.addItem(scatter)
view.addItem(path)
points = np.empty((epochs,3))
colors = np.zeros((epochs,4))
colors[:,3] = 1 # opacity
w = net.get_nth_layer(layer,sub_layer)
w.data[:] = t.rand(w.shape) *12 -6
# lock all weight except visible layer
for i,param in enumerate(net.parameters()):
if i != layer:
param.requires_grad = False
for i in range(epochs):
points[i,:2] = w.detach().numpy()
#points[i,2] = loss.item()
outputs = net.forward(inputs).detach()
points[i,2] = ((outputs - targets)**2).mean().item()
colors[:i+1,1] = np.linspace(0,1,i+1)
colors[:i+1,0] = np.linspace(1,0,i+1)
scatter.setData(pos=points[:i+1], color=colors[:i+1])
path.setData(pos=points[:i+1])
loss = net._one_epoch(inputs,targets,lr=lr,momentum=momentum)
pg.QtGui.QApplication.processEvents()
view.orbit(orbit_speed,0)
if view.isHidden(): return # game over
except KeyboardInterrupt: # also game over
print('Training halted')
def run_simulation(
nn,
layer,
sub_layer=0,
n = 100,
resolution = 200,
open_window = "max",
orbit_speed = .05,
):
if open_window == "full":
view.showFullScreen()
elif open_window == "max":
view.showMaximized()
elif open_window:
view.show()
data = gen_data(n)
x,y,z = create_loss_surface(
nn,
*data,
layer,
sub_layer,
x_bounds=args.x_bounds,
y_bounds=args.y_bounds,
resolution=args.resolution,
)
surface.setData(x=x, y=y, z=z)
stop = time() + 3
while not view.isHidden() and time() < stop:
pg.QtGui.QApplication.processEvents()
while not view.isHidden():
descend_gradient(
nn,
*data,
layer,
sub_layer,
epochs=args.epochs,
orbit_speed=args.orbit_speed,
)
view.orbit(orbit_speed,0)
pg.QtGui.QApplication.processEvents()
def main():
nn = t.load(args.filename)
run_simulation(
nn,
layer=args.layer,
sub_layer=args.sub_layer,
)
if __name__ == "__main__": main()
|
988,202 | ab287c0caf66d44156942059af5f9ffdd5f81ff4 | import xlrd
path = "denemeExcel.xlsx"
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
cred = credentials.Certificate("seracker-cd8df-firebase-adminsdk-c5wua-8f94ae24d2.json")
firebase_admin.initialize_app(cred)
dosyaAc = xlrd.open_workbook(path)
dataSheet =dosyaAc.sheet_by_index(0)
db = firestore.client()
doc = db.collection("Users").document("yQZwNV5L042W1VGPwiRB").collection("nabiz").document("2021-05-17")
satirSayisi = dataSheet.nrows
print(satirSayisi)
nabizVerileri = list()
for i in range(1,10300):
nabizVeri = str(dataSheet.cell_value(i,0))
nabizVeri = nabizVeri[10:]
if(nabizVeri[10] == "M"): #iki haneli saat
if(nabizVeri[9] == "A"):
saat = nabizVeri[0:8]
elif(nabizVeri[9] == "P"):
ham = int(nabizVeri[0:2])
if(ham < 12):
ham +=12
elif(ham == 12):
ham ="00"
saat =str(ham) + nabizVeri[2:8]
nabizData = nabizVeri[12:]
nabizVerileri.append(int(nabizData))
else:#tek haneli saat
if(nabizVeri[8] == "A"):
saat = "0"+nabizVeri[0:7]
elif(nabizVeri[8] == "P"):
ham = int(nabizVeri[0:1])
ham +=12
saat =str(ham) + nabizVeri[1:7]
nabizData = nabizVeri[11:]
nabizVerileri.append(int(nabizData))
#saat iki haneli oluyor 11:16:49 PM,53 9:35:16 PM,59
#am pm kontrol el
#nabız 2 ve 3 haneli oluyor 10:11:09 PM,55
nabiz = {"nabizDatas":nabizVerileri}
doc.set(nabiz,merge=True)
for j in range(0,10000):
pass
#print(nabizVerileri[j][10:])
# import xlrd
# path = "denemeExcel.xlsx"
# import firebase_admin
# from firebase_admin import credentials
# from firebase_admin import firestore
# cred = credentials.Certificate("seracker-cd8df-firebase-adminsdk-c5wua-8f94ae24d2.json")
# firebase_admin.initialize_app(cred)
# dosyaAc = xlrd.open_workbook(path)
# dataSheet =dosyaAc.sheet_by_index(0)
# db = firestore.client()
# doc = db.collection("Users").document("yQZwNV5L042W1VGPwiRB").collection("nabiz").document("2021-05-21")
# satirSayisi = dataSheet.nrows
# print(satirSayisi)
# nabizVerileri = list()
# for i in range(40180,66615):
# nabizVeri = str(dataSheet.cell_value(i,0))
# nabizVeri = nabizVeri[10:]
# if(nabizVeri[10] == "M"): #iki haneli saat
# if(nabizVeri[9] == "A"):
# saat = nabizVeri[0:8]
# elif(nabizVeri[9] == "P"):
# ham = int(nabizVeri[0:2])
# if(ham < 12):
# ham +=12
# elif(ham == 12):
# ham ="00"
# saat =str(ham) + nabizVeri[2:8]
# nabizData = nabizVeri[12:]
# nabizVerileri.append(nabizData+" "+"saat: "+saat)
# else:#tek haneli saat
# if(nabizVeri[8] == "A"):
# saat = "0"+nabizVeri[0:7]
# elif(nabizVeri[8] == "P"):
# ham = int(nabizVeri[0:1])
# ham +=12
# saat =str(ham) + nabizVeri[1:7]
# nabizData = nabizVeri[11:]
# nabizVerileri.append(nabizData+" "+"saat: "+saat)
# #saat iki haneli oluyor 11:16:49 PM,53 9:35:16 PM,59
# #am pm kontrol el
# #nabız 2 ve 3 haneli oluyor 10:11:09 PM,55
# nabiz = {"nabiz":nabizVerileri}
# doc.set(nabiz)
# for j in range(0,10000):
# pass
# #print(nabizVerileri[j][10:])
|
988,203 | ab459bdbbbd51db64c2049f7da4624e4c64b84e4 | # a simple movie renamer:
# Python3 only
import sys
import os
import re
import glob
import argparse
# function to rename movie
def rename_movie(fn: str, debug: bool) -> str:
# save path name of the original file
file_path = os.path.dirname(fn)
# split file name to get the base name
file_name = os.path.basename(fn)
(file_basename, file_ext) = os.path.splitext(file_name)
# replace all regexes
for reg in regs:
# save original pattern and replacement regex
(pattern, repl) = reg
# replace pattern in the base name
file_basename = re.sub(pattern, repl, file_basename)
if debug:
print(file_basename)
# build new name
new_name = "{0}{1}".format(file_basename.strip(), file_ext)
return('{0}/{1}'.format(file_path, new_name))
# list of regexes to be substituted
regs = [
[r'(19\d\d)', r'(\1)'],
[r'(20\d\d)', r'(\1)'],
[r'\[.*?\]', ''],
[r'(?i)bdrip', ''],
[r'(?i)french', ''],
[r'(?i)vostfr', ''],
[r'(?i)h264', ''],
[r'(?i)ita', ''],
[r'(?i)episodio', ''],
[r'(?i)x264.*', ''],
[r'(?i)dvdrip.*', ''],
[r'(?i)xvid.*', ''],
[r'(?i)brrip.*', ''],
[r'(?i)720p', ''],
[r'(?i)hdlight', ''],
[r'(?i)1080p', ''],
[r'(?i)true', ''],
[r'(?i)multi', ''],
[r'(?i)bluray', ''],
[r'(?i)web', ''],
[r'(?i)weeds', ''],
[r'(?i)hdtv.*', ''],
[r'\-', ''],
[r'_', ' '],
[r'\.', ' '],
]
# manage arguments
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--debug', action='store_true',
default=False,
dest='debug',
help='watch regexes substitutions')
parser.add_argument('-f',
dest='files',
help='List of files',
nargs='*')
args = parser.parse_args()
# if we're on Linux or any Unix shell, glob expansion is made before the script is called. No need to call glob function.
if os.name == 'posix':
files_to_rename = args.files
elif os.name == 'nt':
files_to_rename = glob.glob(args.files)
# for each argument (which is supposed to be a file name), get new name and rename file
for fn in files_to_rename:
# rename file
new_name = rename_movie(fn, args.debug)
# rename file if any
answer = input('Do you want to rename "{0}" by "{1}" ? (Y/N): '.format(fn, new_name))
if answer.lower().startswith('n'):
continue
else:
if not args.debug:
os.rename(fn, new_name)
|
988,204 | 36b93362d57b3aea9a8e8a251f0f34c968d0f1bc | from timeit import Timer
import matplotlib.pyplot as plt
tz = []
te = []
x = range(10000,1000001,10000)
for i in x:
print i
l = list(range(i))
##Create Timer object with Statement and Setup
t1 = Timer(stmt="l.pop(0)", setup="from __main__ import l")
l = list(range(i))
t2 = Timer("l.pop()", "from __main__ import l")
tz.append(t1.timeit(number=1000))
te.append(t2.timeit(number=1000))
plt.figure()
plt.subplot(211)
plt.plot(x,tz, label="Pop(0)")
plt.legend()
plt.subplot(212)
plt.plot(x,te, label="Pop")
plt.legend()
plt.show()
|
988,205 | 14cf2ca151faebb6e0ed635efa6e51446b24c8e6 | from rest_framework import serializers
from .models import MoviesList
class MoviesListSerializer(serializers.ModelSerializer):
class Meta:
model = MoviesList
fields = ('movie_name','hero_name','heroin_name','music_director_name','director_name')
|
988,206 | c853a3737592a5dbb5caca7696fb0027b335daca | # Generated by Django 3.0.3 on 2020-08-05 02:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('importDB', '0009_master_ranking_university_name_color'),
]
operations = [
migrations.CreateModel(
name='HRMIS_V_AW_FOR_RANKING',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('STAFF_ID', models.CharField(max_length=5)),
('FNAME_THAI', models.CharField(max_length=300)),
('LNAME_THAI', models.CharField(max_length=300)),
('FNAME_ENG', models.CharField(max_length=300)),
('LNAME_ENG', models.CharField(max_length=300)),
('POS_NAME_THAI', models.CharField(max_length=300)),
('TYPE_ID', models.IntegerField()),
('CORRESPONDING', models.IntegerField()),
('END_YEAR', models.IntegerField()),
('JDB_ID', models.IntegerField()),
('JDB_NAME', models.CharField(max_length=500)),
('AT_PERCENT', models.IntegerField()),
],
),
]
|
988,207 | fd3e9ed0263ebd6bad72b4e39ce2fe989bc4712b | # -*- coding: utf-8 -*-
__author__ = 'Renan Cakirerk <renan@cakirerk.org>'
import json
from mongoengine import *
from datetime import datetime
from django.core.mail import send_mail
from django.db import models
from django.db.models import ForeignKey
from django.utils import timezone
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin
from django.utils.translation import ugettext_lazy as _
from django.utils.http import urlquote
from django.contrib.auth.models import BaseUserManager
"""
MONGODB MODELS
"""
class Location(DynamicDocument):
"""
Example queries:
db.location.find({position: { $near : { $geometry : { type : "Point" , coordinates : [-122.255833, 37.79885] }, $maxDistance : 14800 } } } )
Location.objects(position__near=[-122.255833, 37.79885], position__max_distance=14700)
Example addresses:
Degrees Converted to Decimal via http://boulter.com/gps/#37%B0%2047.117%27%2C%20-122%B0%2025.368%27
150 Franklin St.
37° 46.553', -122° 25.256'
Decimal -> Lat 37.775883, Long -122.420933
0.7 Miles or 1.1265408 kilometers or 1126.54 meters to:
1188 Franklin St.
37° 47.117', -122° 25.368'
Decimal -> Lat 37.785283, Long -122.4228
14.2 Miles or 22.8527 kilometers or 22852.68 meters to:
1444 1st Ave Pl Oakland, CA 94606
37° 47.931', -122° 15.350'
Decimal -> Lat 37.79885, Long -122.255833
"""
user_id = LongField(required=True)
time_created = DateTimeField(default=datetime.now())
position = PointField(required=True) # [long, lat] -> google gives [Lat, Long]
def near(self, meters):
"""
Returns the closest points in <meters> radius
"""
lng = self.position['coordinates'][0]
lat = self.position['coordinates'][1]
return Location.objects(position__near=[lng, lat], position__max_distance=meters)
class Recommendations(DynamicDocument):
"""
Stores the temporary recommendations and views for caching purposes
"""
user_id = LongField(required=True, unique=True)
recommendation_views = DynamicField(default=[])
recommendations = DynamicField(default=[])
class Taste(DynamicDocument):
"""
Stores who a user liked or disliked
"""
user_id = LongField(required=True, unique=True)
# These will carry data as:
# likes[user_id] = counter
# we might show the same user
# occasionally so we would like to track it
likes = DynamicField(default={})
dislikes = DynamicField(default={})
class Matches(DynamicDocument):
"""
Stores a users matches
"""
user_id = LongField(required=True, unique=True)
matches = DynamicField(default={})
class FBLike(DynamicDocument):
"""
Stores Facebook Like Objects
"""
l_id = LongField()
time_created = DateTimeField(default=datetime.now())
time_updated = DateTimeField(default=datetime.now())
meta = {
'indexes': ['l_id']
}
class FBProfile(DynamicDocument):
fb_id = StringField(required=True, unique=True)
username = StringField()
first_name = StringField(default=None)
last_name = StringField(default=None)
gender = StringField(default=None)
birthday = StringField(default=None)
friends = DynamicField()
likes = DynamicField()
is_generated_user = BooleanField(default=False)
time_created = DateTimeField(default=datetime.now())
time_updated = DateTimeField(default=datetime.now())
time_deleted = DateTimeField(default=None)
meta = {
'indexes': ['email', 'fb_id', 'username']
}
@property
def avatar(self):
# TODO: (renan) Avatar size
return self.picture['picture']['data']
def common_likes(self, user):
"""
Returns the common likes with another users Facebook Profile
"""
self_like_ids = set(self.likes.keys()) if self.likes else set()
other_like_ids = set(user.fb_profile.likes.keys()) if user.fb_profile.likes else set()
common_like_ids = self_like_ids.intersection(other_like_ids)
return common_like_ids
def common_friends(self, user):
"""
Returns the common friends with another users Facebook Profile
"""
self_friend_ids = set(self.friends.keys()) if self.friends else set()
other_friend_ids = set(user.fb_profile.friends.keys()) if user.fb_profile.friends else set()
common_friend_ids = self_friend_ids.intersection(other_friend_ids)
return common_friend_ids
"""
POSTGRESQL MODELS
"""
class SallasanaUserManager(BaseUserManager):
def _create_user(self, email, password,
is_staff, is_superuser, **extra_fields):
"""
Creates and saves a User with the given email and password.
"""
now = timezone.now()
if not email:
raise ValueError('The given email must be set')
email = self.normalize_email(email)
user = self.model(email=email,
is_staff=is_staff, is_active=True,
is_superuser=is_superuser, last_login=now,
date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, email, password=None, **extra_fields):
return self._create_user(email, password, False, False,
**extra_fields)
def create_superuser(self, email, password, **extra_fields):
return self._create_user(email, password, True, True,
**extra_fields)
class SallasanaUser(AbstractBaseUser, PermissionsMixin):
"""
A fully featured User model with admin-compliant permissions that uses
a full-length email field as the username.
Email and password are required. Other fields are optional.
"""
email = models.EmailField(_('email address'), max_length=255, unique=True)
first_name = models.CharField(_('first name'), max_length=255, blank=True)
last_name = models.CharField(_('last name'), max_length=255, blank=True)
is_staff = models.BooleanField(_('staff status'), default=False,
help_text=_('Designates whether the user can log into this admin site.'))
is_active = models.BooleanField(_('active'), default=True,
help_text=_('Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.'))
# This is for simulating Foreign Key with a MongoDB object
most_recent_location_id = models.CharField(_('most recent position'), max_length=64, blank=True)
interest_radius = models.IntegerField(default=100, blank=True)
interest_gender = models.IntegerField(default=0, blank=True)
interest_age_min = models.IntegerField(default=18, blank=True)
interest_age_max = models.IntegerField(default=24, blank=True)
# This gives a point to let the recommendation engine know where to start in the user db
last_seeked_user_index = models.BigIntegerField(default=0, blank=True)
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
objects = SallasanaUserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
def get_absolute_url(self):
return "/users/%s/" % urlquote(self.email)
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"""
Returns the short name for the user.
"""
return self.first_name
def email_user(self, subject, message, from_email=None):
"""
Sends an email to this User.
"""
send_mail(subject, message, from_email, [self.email])
@property
def access_token(self):
"""
Returns the current access token
"""
social_auth = self.social_auth.get()
return social_auth.tokens
@property
def fb_id(self):
"""
Returns the persons Facebook User ID
"""
social_auth = self.social_auth.latest('id')
return social_auth.uid
@property
def fb_profile(self):
"""
Returns the Facebook profile stored in MongoDB
"""
return FBProfile.objects.get(fb_id=self.fb_id)
def set_most_recent_coordinates(self, lon, lat):
"""
Sets the latest known coordinates of the user
"""
location = Location(uid=self.id, position=[lon, lat])
location.save()
self.most_recent_location_id = str(location.id)
self.save()
@property
def most_recent_coordinates(self):
"""
Returns the latest known coordinates of the user
"""
location = Location.objects.get(id=self.most_recent_location_id)
return location.position['coordinates']
def users_nearby(self, meters):
"""
Returns the user objects that are in <meters> radius
"""
location = Location.objects.get(id=self.most_recent_location_id)
lng = location.position['coordinates'][0]
lat = location.position['coordinates'][1]
nearby_locations = Location.objects(position__near=[lng, lat], position__max_distance=meters)
nearby_user_ids = []
for loc in nearby_locations:
nearby_user_ids.append(loc.uid)
return SallasanaUser.objects.filter(id__in=nearby_user_ids)
def get_recommendations(self):
"""
Returns the recommendations for the user
"""
try:
recommendations = Recommendations.objects.get(user_id=self.id)
except DoesNotExist:
print "No recommendation object found. Creating one now."
recommendations = Recommendations(user_id=self.id)
recommendations.save()
return recommendations
def get_taste(self):
"""
Returns the taste of the user
"""
try:
taste = Taste.objects.get(user_id=self.id)
except DoesNotExist:
print "No taste object found. Creating one now."
taste = Taste(user_id=self.id)
taste.save()
return taste
def to_dict(self):
data = {
'id': self.id,
'username': self.fb_profile.username,
'first_name': self.first_name,
'last_name': self.last_name,
'full_name': self.get_full_name(),
'avatar': self.fb_profile.avatar.get('url'),
'gender': self.fb_profile.gender,
'birthday': self.fb_profile.birthday
}
return json.dumps(data)
|
988,208 | b1fbef88006022e7e1dbf4acda97440a7210954d | from django.shortcuts import render, HttpResponse, redirect
from django.utils.crypto import get_random_string
# Create your views here.
def index(request):
try:
request.session['count']
except KeyError:
request.session['count'] = 0
return render(request, 'index.html')
def generator(request):
request.session['count'] += 1
print request.session['count']
request.session['word'] = get_random_string(length=10)
return redirect('/')
def reset(request):
del request.session['count']
request.session['word'] = ''
return redirect('/') |
988,209 | 7503b8385b45e899d19f19b153a09de3a67cc62c | '''
'''
# This solution handles non-duplicate characters
class Solution:
def minWindow(self, s: str, t: str) -> str:
characterIndexMap = {}
characterSetMap = set()
currentBestLength, currentBestString = float('inf'), ""
# Building our comparison set
comparisonSet = set(t)
for i in range(0, len(s)):
c = s[i]
if c in comparisonSet:
characterIndexMap[c] = i
if c not in characterSetMap and c in comparisonSet:
characterSetMap.add(c)
# Do we have enough to start making our calculations?
if len(characterSetMap) == len(t):
# Calculate minimal and maximal values
mini, maxi = float("inf"), float("-inf")
for k in characterIndexMap.keys():
if characterIndexMap[k] < mini:
mini = characterIndexMap[k]
if characterIndexMap[k] > maxi:
maxi = characterIndexMap[k]
if maxi - mini < currentBestLength:
currentBestLength = (maxi - mini)
currentBestString = s[mini:maxi + 1]
return currentBestString
from collections import Counter
# Handles duplicates
class SolutionDuplicates:
def minWindow(self, s: str, t: str) -> str:
if not t or not s:
return ""
dict_t = Counter(t)
required = len(dict_t)
# Filter all the characters from s into a new list along with their index.
# The filtering criteria is that the character should be present in t.
filtered_s = []
for i, char in enumerate(s):
if char in dict_t:
filtered_s.append((i, char))
l, r = 0, 0
formed = 0
window_counts = {}
ans = float("inf"), None, None
# Look for the characters only in the filtered list instead of entire s. This helps to reduce our search.
# Hence, we follow the sliding window approach on as small list.
while r < len(filtered_s):
character = filtered_s[r][1]
window_counts[character] = window_counts.get(character, 0) + 1
if window_counts[character] == dict_t[character]:
formed += 1
# If the current window has all the characters in desired frequencies i.e. t is present in the window
while l <= r and formed == required:
character = filtered_s[l][1]
# Save the smallest window until now.
end = filtered_s[r][0]
start = filtered_s[l][0]
if end - start + 1 < ans[0]:
ans = (end - start + 1, start, end)
window_counts[character] -= 1
if window_counts[character] < dict_t[character]:
formed -= 1
l += 1
r += 1
return "" if ans[0] == float("inf") else s[ans[1] : ans[2] + 1] |
988,210 | b2993204a31ebe870268f01de34d934b6725aa58 | print("hello python")
print("how are you?")
print("I'm fine")
print("what do you call a fish without an eye?")
print("fsh")
print("I'm cold")
print("I'm very hungry")
print("I like coffee")
print("I love python")
print("A watched pot never boils")
|
988,211 | 6f2be5b7f11a1656ad04f24bc82436f7d340e87c | from urllib.request import urlopen
import urllib.parse
import requests
from requests.exceptions import HTTPError
from django.conf import settings
from django.core.files import File
from django.core.files.temp import NamedTemporaryFile
from django.core.management import BaseCommand
from django.template.defaultfilters import slugify
from intrepidboats.apps.boats.models import Video
from intrepidboats.apps.difference.models import SharedTestimonial
from intrepidboats.apps.owners_portal.models import SharedVideo
from intrepidboats.libs.vimeo_rate_limiting.models import VimeoRateLimiting
class Command(BaseCommand):
help = 'Obtain video thumbnails from Vimeo'
def handle(self, *args, **options):
gallery_videos_no_thumb = SharedVideo.objects.filter(completed=True, thumbnail='')
testimonial_videos_no_thumb = SharedTestimonial.objects.filter(video_id__isnull=False, thumbnail='')
boat_model_videos_no_thumb = Video.objects.filter(thumbnail='') | Video.objects.filter(thumbnail__isnull=True)
for video in gallery_videos_no_thumb:
thumbnail_file = self.get_vimeo_thumbnail(video.video_id)
if thumbnail_file:
video.thumbnail.save(self.generate_filename(video.comment), thumbnail_file)
video.save()
for testimonial in testimonial_videos_no_thumb:
thumbnail_file = self.get_vimeo_thumbnail(testimonial.video_id)
if thumbnail_file:
testimonial.thumbnail.save(self.generate_filename(testimonial.message), thumbnail_file)
testimonial.save()
for video in boat_model_videos_no_thumb:
thumbnail_file = self.get_vimeo_thumbnail(video.vimeo_video_code)
if thumbnail_file:
video.thumbnail.save(self.generate_filename(video.vimeo_video_code), thumbnail_file)
video.save()
def get_vimeo_thumbnail(self, vimeo_id):
rate_limit_data = VimeoRateLimiting.get_instance()
if not rate_limit_data.available_for_request():
return None
field = 'pictures'
vimeo_api_url = settings.VIMEO_CONFIG['VIMEO_API_URL']
videos_url = urllib.parse.urljoin(vimeo_api_url, 'videos')
api_url = '{}/{}?fields={}'.format(videos_url, vimeo_id, field)
headers = {"Authorization": "Bearer %s" % settings.VIMEO_CONFIG['PRO_UPLOAD_TOKEN']}
response = requests.get(api_url, headers=headers)
rate_limit_data.update_with(
reset_time=response.headers._store['x-ratelimit-reset'][1],
remaining_requests=response.headers._store['x-ratelimit-remaining'][1],
)
try:
response.raise_for_status()
except HTTPError:
return None
try:
sizes = response.json()[field]['sizes']
index = 3 if len(sizes) >= 4 else -1
thumbnail_url = sizes[index]['link']
except (TypeError, IndexError):
return None
return self.download_thumbnail(thumbnail_url)
def download_thumbnail(self, url):
img_temp = NamedTemporaryFile(delete=True)
try:
img_temp.write(urlopen(url).read())
except HTTPError:
return None
img_temp.flush()
return File(img_temp)
def generate_filename(self, text_field):
return '{}.jpg'.format(slugify(text_field)[:40])
|
988,212 | dff8c9a23e9baef4a47bf0f69081f2a4dd10dfc9 | # https://api.spoonacular.com/recipes/random?apiKey=40b4dc4ae9fe4482b9d5633dd6ff2738&number=1&tags=glutenfree,dinner
import models
import requests
from flask import Blueprint, jsonify, request, Response
from playhouse.shortcuts import model_to_dict
glutenFreeRecipe = Blueprint('glutenFreeRecipes', 'glutenFreeRecipe') # this will be my route
## here we are retrieving the random recipe from the spoonacular API
@glutenFreeRecipe.route('/', methods=["GET"])
def get_random_recipes():
try:
glutenFreeRecipe = "test code"
glutenFreeRecipe = requests.get('https://api.spoonacular.com/recipes/random?apiKey=40b4dc4ae9fe4482b9d5633dd6ff2738&number=1&tags=glutenfree,dinner')
# print(breakfastRecipe.content)
# recipe.headers['content-type':]
# return jsonify(data=recipe.content, status={"code": 200, "message": "Success"})
return Response(glutenFreeRecipe, mimetype='application/json')
except models.DoesNotExist:
return jsonify(data={}, status={"code": 401, "message": "Error getting the resources"})
@glutenFreeRecipe.route('/', methods=["POST"])
def saved_recipe():
## see request payload anagolous to req.body in express
payload = request.get_json()
print(type(payload), 'payload')
saved_recipe = models.SavedRecipe.create(**payload)
## see the object
print(saved_recipe.__dict__)
## Look at all the methods
print(dir(saved_recipe))
# Change the model to a dict
print(model_to_dict(saved_recipe), 'model to dict')
saved_recipe_dict = model_to_dict(saved_recipe)
return jsonify(data=saved_recipe_dict, status={"code": 201, "message": "Success"}) |
988,213 | c8af933cb3ef4cf2ec33ab2e8cac2f43c726fec1 | import time
import VL53L0X
import subprocess
import RPi.GPIO as GPIO
#box sizes
diameter = 120
totalDist = 150
#GPIO Mode (BOARD / BCM)
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
#GPIO and Setup for Pressuare Pad
pressurePad = 4
GPIO.setup(pressurePad,GPIO.IN)
#GPIO and Setup for LEDs
red = 6
green = 5
GPIO.setup(red,GPIO.OUT)
GPIO.setup(green,GPIO.OUT)
GPIO.output(red,GPIO.LOW)
GPIO.output(green,GPIO.HIGH)
#GPIO and Setup for HC-SR04
GPIO_TRIGGER_1 = 18
GPIO_ECHO_1 = 23
GPIO_TRIGGER_2 = 17
GPIO_ECHO_2 = 22
GPIO.setup(GPIO_TRIGGER_1, GPIO.OUT)
GPIO.setup(GPIO_ECHO_1, GPIO.IN)
GPIO.setup(GPIO_TRIGGER_2, GPIO.OUT)
GPIO.setup(GPIO_ECHO_2, GPIO.IN)
# Create a VL53L0X object
tof = VL53L0X.VL53L0X()
# Start ranging
tof.start_ranging(VL53L0X.VL53L0X_BETTER_ACCURACY_MODE)
def redLED():
GPIO.output(red,GPIO.HIGH)
GPIO.output(green,GPIO.LOW)
def greenLED():
GPIO.output(green,GPIO.HIGH)
GPIO.output(red,GPIO.LOW)
def Blink():
greenLED()
time.sleep(0.1)
redLED()
time.sleep(0.1)
greenLED()
time.sleep(0.1)
redLED()
time.sleep(0.1)
greenLED()
def distanceHCSR04(trigger,echo):
GPIO.output(trigger, True)
time.sleep(0.00001)
GPIO.output(trigger, False)
StartTime = time.time()
StopTime = time.time()
while GPIO.input(echo) == 0:
StartTime = time.time()
while GPIO.input(echo) == 1:
StopTime = time.time()
TimeElapsed = StopTime - StartTime
distance = int((TimeElapsed * 343000) / 2)
return distance
def size():
time.sleep(1)
dist1 = distanceHCSR04(GPIO_TRIGGER_1,GPIO_ECHO_1)
dist2 = distanceHCSR04(GPIO_TRIGGER_2,GPIO_ECHO_2)
dist3 = tof.get_distance()
print ("Sensor 1: ", dist1)
print ("Sensor 2: ", dist2)
print ("Sensor 3: ", dist3)
def objectFound():
redLED()
print("There an object")
print("The size is:")
size()
cam = subprocess.Popen('./pic.sh')
cam.wait()
greenLED()
def objectGone():
print ("The object is gone")
Blink()
pressure()
def pressure():
input = GPIO.input(pressurePad)
if (input):
objectFound()
while (input):
input = GPIO.input(pressurePad)
time.sleep(0.2)
objectGone()
while True:
pressure()
time.sleep(0.2)
|
988,214 | 49112b644e355128dceeda533b9a25b4294b40d8 | from gitgud.levels.intro import level as intro_level
from gitgud.levels.rampup import level as rampup_level
from gitgud.levels.extras import level as extras_level
from gitgud.levels.util import AllLevels
all_levels = AllLevels([
intro_level,
rampup_level,
extras_level
])
|
988,215 | e11e223a4e1055031d9527916d20b90156e48c7f | # -*- coding: utf-8 -*-
# Copyright (c) 2010 'Quadra Informatique'
# Copyright (c) 2010 'ENS Lyon - UNIS'
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to the
# Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
"""
This module contains the tool of zopeskel.unis
"""
import os
from setuptools import setup, find_packages
version = '1.14'
documentation = ''
if os.path.exists(os.path.join("docs", "unis-zopeskel-usage.txt")):
documentation += open(os.path.join("docs", "unis-zopeskel-usage.txt")).read() + "\n"
history = ''
if os.path.exists(os.path.join("docs", "HISTORY.txt")):
history += open(os.path.join("docs", "HISTORY.txt")).read()
setup(name='zopeskel.unis',
version=version,
description="Different kind of buildout templates used by Quadra-Informatique",
long_description=open("README.txt").read() + "\n" + documentation + history,
# Get more strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Programming Language :: Python",
"Development Status :: 5 - Production/Stable",
"Framework :: Plone",
"Framework :: Zope2",
"Intended Audience :: System Administrators",
"Intended Audience :: Education",
"License :: OSI Approved",
"Natural Language :: English",
"Natural Language :: French",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.6",
"Topic :: Software Development :: Code Generators",
"Topic :: System :: Archiving :: Backup",
"Topic :: System :: Clustering",
"Topic :: System :: Installation/Setup",
"Topic :: Utilities",
],
keywords='',
author='Quadra-Informatique',
author_email='plone@quadra-informatique.fr',
url='http://github.com/collective/zopeskel.unis',
license='CeCill-B',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['zopeskel'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
# -*- Extra requirements: -*-
'PasteScript',
'Cheetah>1.0,<=2.2.1',
'ZopeSkel<3.0a1',
],
entry_points="""
# -*- Entry points: -*-
[paste.paster_create_template]
unis_plone4_buildout = zopeskel.unis.templates:UnisPlone4Buildout
""",
)
|
988,216 | 29dc73d18002f43dd96cc3b7ac058f8753a6f8a5 | tin_code = {
"hc" : "Học",
"ng" : "Người",
"pt" : "Phát Triển",
"any" : "Anh người yêu"
}
while True:
for key in tin_code:
print(key , end = "\t")
print()
n = input("Enter: ")
if n in tin_code:
print(tin_code[n])
else:
print("Not found, you want(Y / N)??")
cha = input("Enter :")
if cha == "Y" or cha == "y":
new = input("Enter key:")
tin_code[new] = input("Enter value:")
else:
break |
988,217 | 1c52db3691c8e808f9831971fe2b36e939ab8ff2 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'User'
db.create_table(u'movies_user', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('password', self.gf('django.db.models.fields.CharField')(max_length=128)),
('last_login', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('is_superuser', self.gf('django.db.models.fields.BooleanField')(default=False)),
('username', self.gf('django.db.models.fields.CharField')(unique=True, max_length=30)),
('first_name', self.gf('django.db.models.fields.CharField')(max_length=30, blank=True)),
('last_name', self.gf('django.db.models.fields.CharField')(max_length=30, blank=True)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75, blank=True)),
('is_staff', self.gf('django.db.models.fields.BooleanField')(default=False)),
('is_active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('date_joined', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('preferences', self.gf('annoying.fields.JSONField')(default='{"lang": "ru"}')),
))
db.send_create_signal(u'movies', ['User'])
# Adding M2M table for field groups on 'User'
m2m_table_name = db.shorten_name(u'movies_user_groups')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('user', models.ForeignKey(orm[u'movies.user'], null=False)),
('group', models.ForeignKey(orm[u'auth.group'], null=False))
))
db.create_unique(m2m_table_name, ['user_id', 'group_id'])
# Adding M2M table for field user_permissions on 'User'
m2m_table_name = db.shorten_name(u'movies_user_user_permissions')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('user', models.ForeignKey(orm[u'movies.user'], null=False)),
('permission', models.ForeignKey(orm[u'auth.permission'], null=False))
))
db.create_unique(m2m_table_name, ['user_id', 'permission_id'])
# Adding model 'List'
db.create_table(u'movies_list', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=255)),
('key_name', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal(u'movies', ['List'])
# Adding model 'Movie'
db.create_table(u'movies_movie', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=255)),
('title_ru', self.gf('django.db.models.fields.CharField')(max_length=255)),
('overview', self.gf('django.db.models.fields.TextField')(null=True)),
('plot', self.gf('django.db.models.fields.TextField')(null=True)),
('director', self.gf('django.db.models.fields.CharField')(max_length=255, null=True)),
('writer', self.gf('django.db.models.fields.CharField')(max_length=255, null=True)),
('genre', self.gf('django.db.models.fields.CharField')(max_length=255, null=True)),
('actors', self.gf('django.db.models.fields.CharField')(max_length=255, null=True)),
('imdb_id', self.gf('django.db.models.fields.CharField')(unique=True, max_length=15)),
('tmdb_id', self.gf('django.db.models.fields.IntegerField')(unique=True)),
('imdb_rating', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=2, decimal_places=1)),
('poster_ru', self.gf('django.db.models.fields.CharField')(max_length=255, null=True)),
('poster_en', self.gf('django.db.models.fields.CharField')(max_length=255, null=True)),
('release_date', self.gf('django.db.models.fields.DateField')(null=True)),
('runtime', self.gf('django.db.models.fields.TimeField')(null=True)),
('homepage', self.gf('django.db.models.fields.URLField')(max_length=200, null=True)),
('trailers', self.gf('annoying.fields.JSONField')(null=True)),
))
db.send_create_signal(u'movies', ['Movie'])
# Adding model 'Record'
db.create_table(u'movies_record', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['movies.User'])),
('movie', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['movies.Movie'])),
('list', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['movies.List'])),
('rating', self.gf('django.db.models.fields.IntegerField')(default=0)),
('comment', self.gf('django.db.models.fields.CharField')(default='', max_length=255)),
('date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal(u'movies', ['Record'])
# Adding model 'Action'
db.create_table(u'movies_action', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal(u'movies', ['Action'])
# Adding model 'ActionRecord'
db.create_table(u'movies_actionrecord', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['movies.User'])),
('action', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['movies.Action'])),
('movie', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['movies.Movie'])),
('list', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['movies.List'], null=True, blank=True)),
('comment', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('rating', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal(u'movies', ['ActionRecord'])
def backwards(self, orm):
# Deleting model 'User'
db.delete_table(u'movies_user')
# Removing M2M table for field groups on 'User'
db.delete_table(db.shorten_name(u'movies_user_groups'))
# Removing M2M table for field user_permissions on 'User'
db.delete_table(db.shorten_name(u'movies_user_user_permissions'))
# Deleting model 'List'
db.delete_table(u'movies_list')
# Deleting model 'Movie'
db.delete_table(u'movies_movie')
# Deleting model 'Record'
db.delete_table(u'movies_record')
# Deleting model 'Action'
db.delete_table(u'movies_action')
# Deleting model 'ActionRecord'
db.delete_table(u'movies_actionrecord')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'movies.action': {
'Meta': {'object_name': 'Action'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'movies.actionrecord': {
'Meta': {'object_name': 'ActionRecord'},
'action': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['movies.Action']"}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'list': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['movies.List']", 'null': 'True', 'blank': 'True'}),
'movie': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['movies.Movie']"}),
'rating': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['movies.User']"})
},
u'movies.list': {
'Meta': {'object_name': 'List'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'movies.movie': {
'Meta': {'ordering': "['pk']", 'object_name': 'Movie'},
'actors': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'director': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'genre': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'homepage': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imdb_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '15'}),
'imdb_rating': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '2', 'decimal_places': '1'}),
'overview': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'plot': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'poster_en': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'poster_ru': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'release_date': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'runtime': ('django.db.models.fields.TimeField', [], {'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'title_ru': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tmdb_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True'}),
'trailers': ('annoying.fields.JSONField', [], {'null': 'True'}),
'writer': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'})
},
u'movies.record': {
'Meta': {'object_name': 'Record'},
'comment': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'list': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['movies.List']"}),
'movie': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['movies.Movie']"}),
'rating': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['movies.User']"})
},
u'movies.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'preferences': ('annoying.fields.JSONField', [], {'default': '\'{"lang": "ru"}\''}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
}
}
complete_apps = ['movies'] |
988,218 | 59624bf4eb328b6d4eba9ac42803f14704213032 | """
This examples trains a CrossEncoder for the NLI task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it learns to predict the labels: "contradiction": 0, "entailment": 1, "neutral": 2.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_nli.py
"""
import pandas
from torch.utils.data import DataLoader
import math
from sentence_transformers import LoggingHandler, util
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CESoftmaxAccuracyEvaluator
from sentence_transformers.readers import InputExample
import logging
from datetime import datetime
import numpy as np
import csv
#### Just some code to print debug information to stdout
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
handlers=[LoggingHandler()])
#### /print debug information to stdout
# As dataset, we use SNLI + MultiNLI
# Check if dataset exsist. If not, download and extract it
dataset_path = 'contradictory-my-dear-watson/train.csv'
# Read the AllNLI.tsv.gz file and create the training dataset
logging.info("Read AllNLI train dataset")
label2int = {"contradiction": 0, "entailment": 1, "neutral": 2}
train_samples = []
dev_samples = []
train_data = pandas.read_csv(dataset_path)
train_data['label'] = train_data['label'].replace([0, 2], [2, 0])
for id, row in train_data.iterrows():
label_id = int(row['label'])
train_samples.append(InputExample(texts=[row['premise'], row['hypothesis']], label=label_id))
train_batch_size = 16
num_epochs = 10
model_save_path = 'output/training_allnli-' + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# Define our CrossEncoder model. We use distilroberta-base as basis and setup it up to predict 3 labels
# model = CrossEncoder('sentence-transformers/distilbert-base-nli-stsb-mean-tokens', num_labels=len(label2int))
# model = CrossEncoder('sentence-transformers/distilbert-multilingual-nli-stsb-quora-ranking',
# num_labels=len(label2int))
# model = CrossEncoder('sentence-transformers/xlm-r-100langs-bert-base-nli-mean-tokens', num_labels=len(label2int))
model = CrossEncoder('joeddav/xlm-roberta-large-xnli', num_labels=len(label2int))
# We wrap train_samples, which is a list ot InputExample, in a pytorch DataLoader
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
# During training, we use CESoftmaxAccuracyEvaluator to measure the accuracy on the dev set.
evaluator = CESoftmaxAccuracyEvaluator.from_input_examples(dev_samples, name='AllNLI-dev')
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logging.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(train_dataloader=train_dataloader,
epochs=num_epochs,
warmup_steps=warmup_steps,
output_path=model_save_path)
test_dataset = 'contradictory-my-dear-watson/test.csv'
df = pandas.read_csv(test_dataset)
sentence_pairs = []
ids = []
for id, row in df.iterrows():
label_id = 0
ids.append(row['id'])
sentence_pairs.append([row['premise'], row['hypothesis']])
pred_scores = model.predict(sentence_pairs, convert_to_numpy=True, show_progress_bar=False, batch_size=4)
pred_labels = np.argmax(pred_scores, axis=1)
out_df = pandas.DataFrame([ids, pred_labels]).transpose()
out_df = out_df.rename(columns={0: 'id', 1: 'prediction'})
out_df['prediction'] = out_df['prediction'].replace([2, 0], [0, 2])
out_df.to_csv('submission.csv', index=False)
|
988,219 | e5b1bad365ad465bf36e5e05a4358a2352425555 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-16 06:40
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('checkout_app', '0003_auto_20170615_1355'),
]
operations = [
migrations.CreateModel(
name='GoogleExpressUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.CharField(max_length=120, verbose_name='Google Express email')),
('password', models.CharField(max_length=120, verbose_name='Google Express password')),
],
),
]
|
988,220 | 0a7d9453d07dc86d2955c49787783a8342833394 | from torchvision import models
import torch.nn as nn
import torch
import torch.nn.functional as F
import math
import time
class Graph_Layer(nn.Module):
def __init__(self,in_size, n_out = None, method = 'cos', k = None):
super(Graph_Layer, self).__init__()
self.in_size = in_size
self.n_out = self.in_size if n_out is None else n_out
self.weight = nn.Parameter(torch.randn(in_size,self.n_out))
nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
self.method = method
def forward(self, x, sim_feat, to_keep = None):
G = self.get_affinity(sim_feat, to_keep)
temp = torch.mm(G,x)
out = torch.mm(temp,self.weight)
return out
def get_affinity(self,input, to_keep = None):
# if to_keep is not None:
# input=input[:5]
# to_keep = 2
if 'cos' in self.method:
input = F.normalize(input)
G = torch.mm(input,torch.t(input))
if 'zero_self' in self.method:
eye_inv = (torch.eye(G.size(0)).cuda()+1) % 2
G = G*eye_inv
if to_keep is not None:
topk, indices = torch.topk(G, k=to_keep, dim =1)
G = G*0
G = G.scatter(1, indices, topk)
G = G/torch.sum(G,dim = 1, keepdim = True)
# if to_keep is not None:
# print G
# raw_input()
return G
class Graph_Layer_Wrapper(nn.Module):
def __init__(self,in_size, n_out = None, non_lin = 'HT', method = 'cos'):
super(Graph_Layer_Wrapper, self).__init__()
self.graph_layer = Graph_Layer(in_size, n_out = n_out, method = method)
if non_lin=='HT':
self.non_linearity = nn.Hardtanh()
elif non_lin.lower()=='rl':
self.non_linearity = nn.ReLU()
else:
error_message = str('non_lin %s not recognized', non_lin)
raise ValueError(error_message)
def forward(self, x, sim_feat, to_keep = None):
sim_feat = self.non_linearity(sim_feat)
out = self.graph_layer(x, sim_feat, to_keep = to_keep)
return out
def get_affinity(self,input,to_keep = None):
input = self.non_linearity(input)
return self.graph_layer.get_affinity(input, to_keep = to_keep) |
988,221 | d1098dbf2ee7f66b3ce8c7f4dd2c14833769ab3d | import pandas as pd
import streamlit as st
import plotly.express as px
@st.cache
def get_data():
url = "http://data.insideairbnb.com/united-states/ny/new-york-city/2019-09-12/visualisations/listings.csv"
return pd.read_csv(url)
df = get_data()
cols = ["name", "host_name", "neighbourhood", "room_type", "price"]
st_ms = st.multiselect("Columns", df.columns.tolist(), default=cols)
st.dataframe(df) |
988,222 | 5a116da5be1ca55030caddba5883d3688f00a5bf | from halo_api.constants import build_url
import requests
class Auth(object):
_access_token: str
_refresh_token: str
def _request_proxy(self, method, *args, **kwargs) -> dict:
if not self._access_token:
raise Exception("you aren't logged in!")
default_headers = {
"Authorization": f"Bearer {self._access_token}",
"x-refresh-token": self._refresh_token,
}
headers = kwargs.get("headers", {})
headers = {**headers, **default_headers}
kwargs["headers"] = headers
req = getattr(requests, method)(*args, **kwargs)
try:
js = req.json()
error = js.get("error")
if error == "refresh":
resp = requests.get(
build_url("/accounts/token/refresh"), headers=default_headers
)
err = resp.json().get("error")
message = f"Could not refresh token automatically, please log in again!\n\
Server said\n{err}"
if err:
raise Exception(message)
self._access_token = resp.headers.get("x-access-token")
self._refresh_token = resp.headers.get("x-refresh-token")
return self._request_proxy(method, *args, **kwargs)
if error:
raise ValueError(error)
return js
except ValueError:
raise
except Exception as e:
print(e)
return req
def patch(self, url, *args, **kwargs):
return self._request_proxy("patch", url, *args, **kwargs)
def post(self, url, *args, **kwargs):
return self._request_proxy("post", url, *args, **kwargs)
def get(self, url, *args, **kwargs):
return self._request_proxy("get", url, *args, **kwargs)
def __init__(self, user: str, password: str):
self.user = user
self.password = password
def login(self):
ret = requests.post(
build_url("/accounts/login"),
json={"user": self.user, "password": self.password},
)
if not ret.ok:
raise Exception(
f"Authentication error!\n Server said:\n{ret.json()['error']}"
)
resp = ret.json()["user_data"]
user = resp["user"]
name = resp["name"]
print(f"Logged in as {user} ({name})")
self._access_token = ret.headers.get("x-access-token")
self._refresh_token = ret.headers.get("x-refresh-token")
return self
|
988,223 | b93f3287013ae873ba663b2e015d5d58c00a0653 | from django.db import models
# Chart of Accounts
class COA(models.Model):
is_active = models.BooleanField(verbose_name='active', default=True)
name = models.CharField(verbose_name="name", max_length=30, null=True)
number = models.CharField(verbose_name="number", max_length=30, unique=True, null=True)
description = models.CharField(verbose_name="description", max_length=255, null=True)
normal_side = models.CharField(verbose_name="normal_side", max_length=255, null=True)
category = models.CharField(verbose_name="category", max_length=255, null=True)
subcategory = models.CharField(verbose_name="subcategory", max_length=255, null=True)
initial_balance = models.FloatField(verbose_name="initial balance", default=0.00, null=True)
debit = models.FloatField(verbose_name="debit", default=0.00, null=True)
credit = models.FloatField(verbose_name="credit", default=0.00, null=True)
balance = models.FloatField(verbose_name="balance", default=0.00, null=True)
date_added = models.DateField(verbose_name='date joined', auto_now_add=True, null=True)
user_id = models.IntegerField(verbose_name='user_id', null=True)
order = models.CharField(verbose_name='order', max_length=255, null=True)
statement = models.CharField(verbose_name='statement', max_length=10, null=True)
def __str__(self):
return self.name |
988,224 | 101b45d3f8906eeba4d79dc47ef0bf50c2b21456 | d = {'name': 'xc', 'age': 18}
print(d['name'])
dd = dict(name='xc',age=16)
print(dd) |
988,225 | 5a087e8d9df87e7b9744b22dd1783c99693f193e | import random
import elote as elo
import constructs.Food
class Initializer:
def __init__(self, foods, mu=500, sigma=40):
for food in foods:
food.elo_competitor = elo.EloCompetitor(initial_rating=random.gauss(mu, sigma))
food.rating = food.elo_competitor.rating
self.foods = foods
def __str__(self):
return f"{[food.__str__() for food in self.foods]}"
|
988,226 | 6ccadd3c30b9ce8bf404b58b663e9d5142766768 | from django.db import models
from mezzanine.pages.models import Page
from mezzanine.core.models import RichText
# Create your models here.
class Registration(Page,RichText):
def __unicode__(self):
return self.title
class RegisterInfo(models.Model):
form = models.ForeignKey("Registration")
date = models.DecimalField()
name = models.CharField(max_length=50)
workplace = models.CharField(max_length=200)
jobtime = models.DecimalField()
graduate = models.CharField(max_length=100)
email = models.EmailField()
phone = models.DecimalField()
resume = models.TextField()
separate = models.CharField(max_length=10)
question1 = models.TextField()
question2 = models.TextField()
question3 = models.TextField()
remark = models.CharField(max_length=10)
invoice = models.CharField(max_length=100)
referrer = models.CharField(max_length=100)
|
988,227 | 8ffd5fc2d3a09a52fc83ec7d39806a062ee3aad9 | from django.shortcuts import render, redirect, get_object_or_404
from django.core.urlresolvers import reverse
from django.core.exceptions import ObjectDoesNotExist
from django.db import transaction
# Decorator to use built-in authentication system
from django.contrib.auth.decorators import login_required
# Used to create and manually log in a user
from django.contrib.auth.models import User
from django.contrib.auth import login, authenticate
# Needed to manually create HttpResponses or raise an Http404 exception
from django.http import HttpResponse, Http404
# Helper function to guess a MIME type from a file name
from mimetypes import guess_type
# Used to send mail from within Django
from django.core.mail import send_mail
# Used to generate a one-time-use token to verify a user's email address
from django.contrib.auth.tokens import default_token_generator
from models import *
from forms import *
@transaction.atomic
@login_required
def homepage(request):
context = {}
# Get current user first
context['current_user'] = request.user
# Get all grumbls
grumbls = Grumbl.get_grumbls_others(request.user)
# context['grumbls'] = grumbls # Need to be deleted, take care of htmls
context['grumbl_combos'] = []
# Get all comments for each grumbl
for grumbl in grumbls:
comments = Comment.get_comments(grumbl)
num_comments = len(comments)
num_dislikes = len(grumbl.dislike_list.all())
grumbl_combo = {'grumbl':grumbl,
'comments':comments,
'num_comments':num_comments,
'num_dislikes':num_dislikes}
context['grumbl_combos'].append(grumbl_combo)
context['form_grumbl'] = GrumblForm()
context['form_comment'] = CommentForm()
context['form_search'] = SearchForm()
return render(request, 'homepage.html', context)
@login_required
def my_grumbls(request):
context = {}
# Get current user first
context['current_user'] = request.user
# Store forms for HTML files
context['form_grumbl'] = GrumblForm()
context['form_comment'] = CommentForm()
context['form_search'] = SearchForm()
grumbls = Grumbl.get_grumbls_self(request.user)
context['grumbl_combos'] = []
# Get all comments for each grumbl
for grumbl in grumbls:
comments = Comment.get_comments(grumbl)
num_comments = len(comments)
num_dislikes = len(grumbl.dislike_list.all())
grumbl_combo = {'grumbl':grumbl,
'comments':comments,
'num_comments':num_comments,
'num_dislikes':num_dislikes}
context['grumbl_combos'].append(grumbl_combo)
return render(request, 'my-grumbls.html', context)
@transaction.atomic
@login_required
def add_grumbl(request, next):
# Handle POST requests and then redirect.
form_grumbl = GrumblForm(request.POST)
# Validates the form. Error info contained in the context.
if not form_grumbl.is_valid():
return render(request, 'homepage.html', context)
# If we get valid data from the form, save it.
new_grumbl = Grumbl(text=form_grumbl.cleaned_data['grumbl'], user=request.user)
new_grumbl.save()
return redirect(next)
@transaction.atomic
@login_required
def add_comment(request, grumbl_id, next):
# Handle POST requests and then redirect.
form_comment = CommentForm(request.POST)
# Validates the form. Error info contained in the context.
if not form_comment.is_valid():
return render(request, 'homepage.html', context) # always invalid here.
# Get the parent grumbl via g_id
errors = []
try:
parent_grumbl = Grumbl.objects.get(id=grumbl_id)
# If we get valid data from the form, save it.
new_comment = Comment(text=form_comment.cleaned_data['grumbl_comment'],
user=request.user,
grumbl=parent_grumbl)
new_comment.save()
except ObjectDoesNotExist:
errors.append('The grumbl did not exist.')
# Prevent from reposting via refreshing the page.
return redirect(next)
@login_required
def dislike(request, grumbl_id, next):
# Get the parent grumbl via g_id
errors = []
try:
parent_grumbl = Grumbl.objects.get(id=grumbl_id)
current_user = request.user
if current_user in parent_grumbl.dislike_list.all():
parent_grumbl.dislike_list.remove(current_user)
else:
parent_grumbl.dislike_list.add(current_user)
except ObjectDoesNotExist:
errors.append('The grumbl did not exist.')
return redirect(next)
@login_required
def block(request, user_id):
# Get the user via u_id
try:
target_user = User.objects.get(id=user_id)
current_user = request.user
if target_user in current_user.relationship.block_list.all():
current_user.relationship.block_list.remove(target_user)
else:
current_user.relationship.block_list.add(target_user)
except ObjectDoesNotExist:
errors = []
errors.append('The user did not exist.')
return redirect('/')
@login_required
def follow(request, user_id):
# Get the user via u_id
try:
target_user = User.objects.get(id=user_id)
current_user = request.user
if target_user in current_user.relationship.follow_list.all():
current_user.relationship.follow_list.remove(target_user)
else:
current_user.relationship.follow_list.add(target_user)
except ObjectDoesNotExist:
errors = []
errors.append('The user did not exist.')
return redirect('/')
@login_required
def my_following(request):
context = {}
if request.method == 'POST':
# Get the user via u_id
try:
target_user = User.objects.get(id=user_id)
current_user = request.user
if target_user in current_user.relationship.follow_list.all():
current_user.relationship.follow_list.remove(target_user)
else:
current_user.relationship.follow_list.add(target_user)
except ObjectDoesNotExist:
errors = []
errors.append('The user did not exist.')
return redirect('myfollowing')
# Get current user first
current_user = request.user
context['current_user'] = current_user
follow_list = current_user.relationship.follow_list.all()
profiles = []
for user in follow_list:
profile = Profile.objects.get(user=user)
profiles.append(profile)
context['profiles'] =profiles
context['form_search'] = SearchForm()
return render(request, 'my-following.html', context)
@login_required
def my_blocking(request):
context = {}
if request.method == 'POST':
# Get the user via u_id
try:
target_user = User.objects.get(id=user_id)
current_user = request.user
if target_user in current_user.relationship.block_list.all():
current_user.relationship.block_list.remove(target_user)
else:
current_user.relationship.block_list.add(target_user)
except ObjectDoesNotExist:
errors = []
errors.append('The user did not exist.')
return redirect('myblocking')
# Get current user first
current_user = request.user
context['current_user'] = current_user
block_list = current_user.relationship.block_list.all()
profiles = []
for user in block_list:
profile = Profile.objects.get(user=user)
profiles.append(profile)
context['profiles'] =profiles
context['form_search'] = SearchForm()
return render(request, 'my-blocking.html', context)
@login_required
def search(request):
context = {}
# Get current user first
context['current_user'] = request.user
context['form_search'] = SearchForm()
form_search_main = SearchForm(request.GET)
context['form_search_main'] = form_search_main
# search_content = request.GET['search-content']
# context['search_content'] = search_content
if request.GET['search_type'] == 'search_grumbls':
context['grumbls'] = Grumbl.search_grumbls(request.GET['search_content'])
else:
context['grumblrs'] = Profile.search_grumblrs(request.GET['search_content'])
return render(request, 'search.html', context)
@transaction.atomic
@login_required
def profile(request, user_id):
context = {}
# Get current user first
user = request.user
context['current_user'] = user
target_user = User.objects.get(id=user_id)
context['form_search'] = SearchForm()
# current_profile = Profile.objects.filter(user = request.user) # request.user need to be modified
target_profile = Profile.objects.get(user=target_user)
context['target_profile'] = target_profile
context['target_profile_num_grumbls'] = Profile.get_num_grumbls(target_user)
return render(request, 'profile.html', context)
@login_required
def edit_profile(request):
# BIG UPDATE
context = {}
# Get current user first
context['current_user'] = request.user
context['form_search'] = SearchForm()
profile_to_edit = get_object_or_404(Profile, user=request.user)
if request.method == 'GET':
form_profile = ProfileForm(instance=profile_to_edit)
context['form_profile'] = form_profile
return render(request, 'edit-profile.html', context)
else:
# If method is POST
form_profile = ProfileForm(request.POST, request.FILES, instance=profile_to_edit) # Won't conflict?
if not form_profile.is_valid():
context['form_profile'] = form_profile
return render(request, 'edit-profile.html', context)
form_profile.save()
url = '/profile/' + str(request.user.id)
return redirect(url)
@login_required
def get_photo(request, username):
profile = get_object_or_404(Profile, user=User.objects.get(username=username))
if not profile.avatar:
raise Http404
content_type = guess_type(profile.avatar.name)
return HttpResponse(profile.avatar, content_type=content_type)
@transaction.atomic
def register(request):
context = {}
# Just display the registration form if it is a GET request
if request.method == 'GET':
context['form_registration'] = RegistrationForm()
return render(request, 'register.html', context)
else:
# Creates a bound form from the request POST parameters and makes the
# form available in the request context dictionary.
form_registration = RegistrationForm(request.POST)
context['form_registration'] = form_registration
# Validates the form.
if not form_registration.is_valid():
return render(request, 'register.html', context)
# If we get here the form data was valid. Register and login the user.
new_user = User.objects.create_user(username=form_registration.cleaned_data['username'],
email = form_registration.cleaned_data['email'],
password=form_registration.cleaned_data['password1'])
# Mark the user as inactive to prevent login before email confirmation.
new_user.is_active = False
new_user.save()
# Create a profile for the new user at the same time.
new_user_profile = Profile(user = new_user)
new_user_profile.save()
# Create a relationship for the new user at the same time.
new_user_relationship = Relationship(user = new_user)
new_user_relationship.save()
# Generate a one-time use token and an email message body
token = default_token_generator.make_token(new_user)
email_body = """
Welcome to the Simple Address Book. Please click the link below to
verify your email address and complete the registration of your account:
http://%s%s
""" % (request.get_host(),
reverse('confirm', args=(new_user.username, token)))
send_mail(subject="Verify your email address",
message= email_body,
from_email="dfan+@andrew.cmu.edu",
recipient_list=[new_user.email])
context['email'] = form_registration.cleaned_data['email']
return render(request, 'needs-confirmation.html', context)
@transaction.atomic
def confirm_registration(request, username, token):
user = get_object_or_404(User, username=username)
# Send 404 error if token is invalid
if not default_token_generator.check_token(user, token):
raise Http404
# Otherwise token was valid, activate the user.
user.is_active = True
user.save()
return render(request, 'confirmed.html', {})
# # Logs in the new user and redirects to his/her todo list
# new_user = authenticate(username=form_registration.cleaned_data['username'],
# email = form_registration.cleaned_data['email'],
# password=form_registration.cleaned_data['password1'])
# login(request, new_user)
# return redirect('/')
|
988,228 | 258564184bc5d138fe4fa9e4db9bd41ed462ec65 | # with open('./ErQiaoCrawler/ErQiaoCrawler/anthology.bib','r', encoding='utf-8') as f:
# content_list = f.readlines()
# contents = [x.strip() for x in content_list]
# url_list = []
# for content in contents:
# if content.startswith('url ='):
# url_list.append(content[7:-2])
# with open('./ErQiaoCrawler/ErQiaoCrawler/url_list.txt','a') as f2:
# f2.write(content[7:-2]+'\n')
# from pytube import YouTube
# yt = YouTube("https://youtube.com/watch?v=zs0yOpHWBf8")
# print(yt.streams.filter(progressive=True).all())
# 统计数据
# import pymongo
# collection = ['crossmind', 'crossmind_comment', 'crossmind_reaction', 'acl_anthology']
# client = pymongo.MongoClient('mongodb://localhost:27017')
# db = client['daoerxianqiao']
# crossmind = db['crossmind']
# crossmind_comment = db['crossmind_comment']
# crossmind_reaction = db['crossmind_reaction']
# acl_anthology = db['acl_anthology']
# print('crossmind视频基本信息数目:' + str(crossmind.estimated_document_count()))
# print('crossmind视频数目:' + str(crossmind.count_documents({"video_path":{"$ne":None}})))
# print('crossmindPDF数目:' + str(crossmind.count_documents({"pdf_path":{"$ne":None}})))
# print('crossmind_comment数目:' + str(crossmind_comment.estimated_document_count()))
# print('crossmind_reaction数目:' + str(crossmind_reaction.estimated_document_count()))
# print('acl_anthology基本信息数目:' + str(acl_anthology.estimated_document_count()))
# print('acl_anthology视频数目:' + str(acl_anthology.count_documents({"Video_path":{"$ne":None}})))
# print('acl_anthologyPDF视频数目:' + str(acl_anthology.count_documents({"PDF_path":{"$ne":None}})))
# print('acl_anthologyPTT页数目:' + str(acl_anthology.count_documents({"slide_path":{"$ne":None}})))
# print('acl_anthology其他附件数目:' + str(acl_anthology.count_documents({"Dataset_path":{"$ne":None}})+
# acl_anthology.count_documents({"Software_path":{"$ne":None}})+
# acl_anthology.count_documents({"Source_path":{"$ne":None}})))
|
988,229 | 7b1a3eaaa18003c59f0c4c6dc6558a9f4228bf15 | class Solution:
def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:
if not matrix:
return False
m = len(matrix)
n = len(matrix[0])
left = 0
right = m*n-1
while left <= right:
mid = (left + right)//2
mid_ele = matrix[mid//n][mid % n]
if mid_ele == target:
return True
elif mid_ele < target:
left = mid+1
else:
right = mid-1
return False |
988,230 | c44e131eba8d3f62e4f1062231b8895432058280 | import matplotlib.pyplot as plt
import numpy as np
import re
def draw(plt, values, type, line_style, color, label):
plt.plot(np.arange(len(values)), values, type, linestyle=line_style, color=color, label=label)
if __name__ == '__main__':
file_names = ['vgg_16_reduced.log', 'inception_bn.log']
types = ['-', 'x']
plt.figure(figsize=(8, 6))
plt.xlabel("Epoch")
plt.ylabel("RMSE")
for i, file_name in enumerate(file_names):
log = open(file_name).read()
log_tr = re.compile('.*Epoch\[(\d+)\].*Batch \[(\d+)\].*Train-rmse=([-+]?\d*\.\d+|\d+)').findall(log)
log_va = re.compile('.*Epoch\[(\d+)\].*Validation-rmse=([-+]?\d*\.\d+|\d+)').findall(log)
log_n_tr = re.compile('.*Epoch\[(\d+)\].*Batch \[(\d+)\].*Train-NRMSE=([-+]?\d*\.\d+|\d+)').findall(log)
log_n_va = re.compile('.*Epoch\[(\d+)\].*Validation-NRMSE=([-+]?\d*\.\d+|\d+)').findall(log)
log_tr = np.array(log_tr)
log_n_tr = np.array(log_n_tr)
data = {}
for epoch, batch, rmse in log_tr:
if len(data) == 0 or int(epoch) is not data[len(data) - 1][0]:
data[len(data)] = [int(epoch), float(rmse), 1]
else:
data[len(data) - 1][1] += float(rmse)
data[len(data) - 1][2] += 1
tr_value = []
for vals in data:
tr_value.append(data[vals][1] / data[vals][2])
data = {}
for epoch, batch, rmse in log_n_tr:
if len(data) == 0 or int(epoch) is not data[len(data) - 1][0]:
data[len(data)] = [int(epoch), float(rmse), 1]
else:
data[len(data) - 1][1] += float(rmse)
data[len(data) - 1][2] += 1
n_tr_value = []
for vals in data:
n_tr_value.append(data[vals][1] / data[vals][2])
idx = np.arange(len(tr_value))
va_value = []
for vals in log_va:
va_value.append(vals[1])
n_va_value = []
for vals in log_n_va:
n_va_value.append(vals[1])
draw(plt, tr_value, types[i], '-', 'r', "Train-RMSE/image size, " + file_name)
draw(plt, va_value, types[i], '-', 'b', "Validation-RMSE/image size, " + file_name)
draw(plt, n_tr_value, types[i], '--', 'r', "Train-RMSE/iod, " + file_name)
draw(plt, n_va_value, types[i], '--', 'b', "Validation-RMSE/iod, " + file_name)
plt.legend(loc="best")
plt.yticks(np.arange(0, 0.2, 0.01))
plt.ylim([0,0.2])
plt.show() |
988,231 | ba994f23e21dc43b01faff434d694f62f1927529 | num_int = 123
num_str = "456"
print("Data type of num_int:",type(num_int))
print("Data type of num_str:",type(num_str))
print(num_int+num_str)
|
988,232 | f99e443d0466488b3713e09c4610f2d3f7e52c6c | from .table import Table
from .database import Database
class View(Table):
def __init__(self, db: Database, schema: str, name: str, view_def: str) -> None:
self.view_definition = None
super().__init__(db, schema, name)
self.set_view_definition(view_def)
def set_view_definition(self, view_definition: str):
if view_definition is not None and len(view_definition.strip()) > 0:
self.view_definition = view_definition
def get_view_definition(self):
return self.view_definition
def is_view(self):
return True
|
988,233 | b289c184c70b195f8f5053be9b319556b4033894 | # -*- encoding: utf-8 -*-
"""
@File : models.py
@Time : 2020/3/12 20:07
@Author : Flack
@Email : opencoding@hotmail.com
@ide : PyCharm
@project : stockapi
@description : 描述
"""
import time
import random
from typing import List
from pydantic import BaseModel, Field
class EntityModel(BaseModel):
seq: str = Field(default=time.time() + random.randint(1000, 9999), title='本次作业序列号', )
bus: str = Field(default=time.time() + random.randint(1000, 9999), title='本次作业业务号', )
# pass
class StocksModel(EntityModel):
stocks: List[str] = Field(default='', title='股票代码列表')
class StockInfoModel(BaseModel):
stock_no: str = Field(default='', title='股票代码')
name: str = Field(default='', title='股票名称')
today_start: float = Field(default=0.00, title='今日开盘价')
yesterday_end: float = Field(default=0.00, title='昨日收盘价')
current_price: float = Field(default=0.00, title='当前价格')
current_high: float = Field(default=0.00, title='今日最高价')
current_low: float = Field(default=0.00, title='今日最低价')
in_1_price: float = Field(default=0.00, title='竞买价,即买一报价')
out_1_price: float = Field(default=0.00, title='竞卖价,即卖一报价')
trade_quantity: float = Field(default=0.00, title='成交的股票数,由于股票交易以一百股为基本单位,所以在使用时,通常把该值除以一百')
trade_money: float = Field(default=0.00, title='成交金额,单位为“元”,为了一目了然,通常以“万元”为成交金额的单位,所以通常把该值除以一万')
in_1_47_num: float = Field(default=0.00, title='买一申请4695股,即47手')
in_1_47_price: float = Field(default=0.00, title='买一报价')
in_2_price: float = Field(default=0.00, title='买二')
in_2_num: float = Field(default=0.00, title='买二')
in_3_price: float = Field(default=0.00, title='买三')
in_3_num: float = Field(default=0.00, title='买三')
in_4_price: float = Field(default=0.00, title='买四')
in_4_num: float = Field(default=0.00, title='买四')
in_5_price: float = Field(default=0.00, title='买五')
in_5_num: float = Field(default=0.00, title='买五')
out_1_31_num: float = Field(default=0.00, title='卖一申报3100股,即31手')
out_1_31_price: float = Field(default=0.00, title='卖一报价')
out_2_price: float = Field(default=0.00, title='卖二')
out_2_num: float = Field(default=0.00, title='卖二')
out_3_price: float = Field(default=0.00, title='卖三')
out_3_num: float = Field(default=0.00, title='卖三')
out_4_price: float = Field(default=0.00, title='卖四')
out_4_num: float = Field(default=0.00, title='卖四')
out_5_price: float = Field(default=0.00, title='卖五')
out_5_num: float = Field(default=0.00, title='卖五')
trade_date: str = Field(default='', title='交易日期')
trade_time: str = Field(default='', title='交易时间')
|
988,234 | c99416300683e8c718feeba278d098a2c1eec120 | # Am Anfang müssen wir ein paar Sachen importieren
from BeautifulSoup import BeautifulSoup
import urllib
import urllib2
import re
import json
import scraperwiki
# define the order our columns are displayed in the datastore
scraperwiki.metadata.save('data_columns', ['id', 'title'])
# Webseite herunterladen
# (Das »"url%s" % char« ersetzt das »%s« im String durch die Variable char,
# wenn man das mit mehreren machen wollte, ginge das so: »"url%sblah%sblah%i" % (string, noch_ein_string, zahl)«)
website = scraperwiki.scrape("http://tickets.shop.ebay.de/Festivals-Konzerte-/34814/i.html?Stadt=Berlin&_trkparms=65%253A12%257C66%253A4%257C39%253A1%257C72%253A4344&rt=nc&_catref=1&_dmpt=Festivals_Konzerte_1&_ipg=999999&_trksid=p3286.c0.m14.l1581&_pgn=1")
# Neue Instanz von BeautifulSoup mit dem gerade heruntergeladenen Quelltext erstellen
soup = BeautifulSoup(website)
#print(website)
#print("----")
#print len(soup.find("a", "tipps" ))
#print("----")
#<li class="toppg-t">Seite 3 von 3</li>
for li in soup.findAll("li", "toppg-t") :
pages = li.string.split(" von ")[1]
print ("PAGES: " + pages)
for page in range(1, int(pages)+1):
print ("PAGE: " + str(page))
website = scraperwiki.scrape("http://tickets.shop.ebay.de/Festivals-Konzerte-/34814/i.html?Stadt=Berlin&_trkparms=65%253A12%257C66%253A4%257C39%253A1%257C72%253A4344&rt=nc&_catref=1&_dmpt=Festivals_Konzerte_1&_ipg=999999&_trksid=p3286.c0.m14.l1581&_pgn="+pages)
soup2 = BeautifulSoup(website)
for a in soup2.findAll("a", "vip" ):
title = a.string
href = a["href"]
id = href.split("/")[4].split("?")[0]
print title
print id
record = {}
record['id'] = id
record['title'] = title
scraperwiki.datastore.save(["id"], record)
print("----")
print("-----------------------------------------")
# Am Anfang müssen wir ein paar Sachen importieren
from BeautifulSoup import BeautifulSoup
import urllib
import urllib2
import re
import json
import scraperwiki
# define the order our columns are displayed in the datastore
scraperwiki.metadata.save('data_columns', ['id', 'title'])
# Webseite herunterladen
# (Das »"url%s" % char« ersetzt das »%s« im String durch die Variable char,
# wenn man das mit mehreren machen wollte, ginge das so: »"url%sblah%sblah%i" % (string, noch_ein_string, zahl)«)
website = scraperwiki.scrape("http://tickets.shop.ebay.de/Festivals-Konzerte-/34814/i.html?Stadt=Berlin&_trkparms=65%253A12%257C66%253A4%257C39%253A1%257C72%253A4344&rt=nc&_catref=1&_dmpt=Festivals_Konzerte_1&_ipg=999999&_trksid=p3286.c0.m14.l1581&_pgn=1")
# Neue Instanz von BeautifulSoup mit dem gerade heruntergeladenen Quelltext erstellen
soup = BeautifulSoup(website)
#print(website)
#print("----")
#print len(soup.find("a", "tipps" ))
#print("----")
#<li class="toppg-t">Seite 3 von 3</li>
for li in soup.findAll("li", "toppg-t") :
pages = li.string.split(" von ")[1]
print ("PAGES: " + pages)
for page in range(1, int(pages)+1):
print ("PAGE: " + str(page))
website = scraperwiki.scrape("http://tickets.shop.ebay.de/Festivals-Konzerte-/34814/i.html?Stadt=Berlin&_trkparms=65%253A12%257C66%253A4%257C39%253A1%257C72%253A4344&rt=nc&_catref=1&_dmpt=Festivals_Konzerte_1&_ipg=999999&_trksid=p3286.c0.m14.l1581&_pgn="+pages)
soup2 = BeautifulSoup(website)
for a in soup2.findAll("a", "vip" ):
title = a.string
href = a["href"]
id = href.split("/")[4].split("?")[0]
print title
print id
record = {}
record['id'] = id
record['title'] = title
scraperwiki.datastore.save(["id"], record)
print("----")
print("-----------------------------------------")
|
988,235 | 9cc69980e18d63be98d701e2484f20c2c30d4be7 |
nama = input("Masukkan Nama : ")
nilaiKeatifan = int(input("nilai Keatifan : "))
nilaiTugas = int(input("nilai Tugas : "))
nilaiUjian = int(input("nilai Ujian : "))
nilaMurniK = nilaiKeatifan * 20/100
nilaMurniT = nilaiTugas * 30/100
nilaMurniU = nilaiUjian * 50/100
nilaiAkhir = nilaMurniK + nilaMurniU + nilaMurniT
print("Nilai Akhir Mahasiswa = ",nilaiAkhir) |
988,236 | 005f4132dc31ac3ed9d9f352eb8aa4cce392153d | from Configuration.AlCa.GlobalTag import GlobalTag
from CondCore.CondDB.CondDB_cfi import *
#----------------------------------------------------------------------------------------------------
lhcInfoDefined = False
def UseLHCInfoLocal(process):
global lhcInfoDefined
lhcInfoDefined = True
def UseLHCInfoGT(process):
global lhcInfoDefined
lhcInfoDefined = True
# these are not defined in local environment by default
#del process.ctppsRPLHCInfoCorrectionsDataESSourceXML
#del process.esPreferLocalLHCInfo
def UseLHCInfoFile(process, connection, tag):
global lhcInfoDefined
lhcInfoDefined = True
del process.ctppsRPLHCInfoCorrectionsDataESSourceXML
del process.esPreferLocalLHCInfo
process.ctppsInterpolatedOpticalFunctionsESSource.lhcInfoLabel = ""
process.ctppsProtons.lhcInfoLabel = ""
process.CondDBALHCInfo = CondDB.clone( connect = connection )
process.PoolDBESSource = cms.ESSource("PoolDBESSource",
CondDBLHCInfo,
DumpStat = cms.untracked.bool(False),
toGet = cms.VPSet(cms.PSet(
record = cms.string('LHCInfoRcd'),
tag = cms.string(tag)
)),
)
#----------------------------------------------------------------------------------------------------
alignmentDefined = False
def UseAlignmentLocal(process):
global alignmentDefined
alignmentDefined = True
if not hasattr(process, 'esPreferLocalAlignment'):
raise ValueError("local alignment chosen, but process.esPreferLocalAlignment not defined")
def UseAlignmentGT(process):
global alignmentDefined
alignmentDefined = True
if hasattr(process, 'esPreferLocalAlignment'):
del process.ctppsRPAlignmentCorrectionsDataESSourceXML
del process.esPreferLocalAlignment
def UseAlignmentFile(process, connection, tag):
global alignmentDefined
alignmentDefined = True
if hasattr(process, 'esPreferLocalAlignment'):
del process.ctppsRPAlignmentCorrectionsDataESSourceXML
del process.esPreferLocalAlignment
process.CondDBAlignment = CondDB.clone( connect = connection )
process.PoolDBESSourceAlignment = cms.ESSource("PoolDBESSource",
process.CondDBAlignment,
#timetype = cms.untracked.string('runnumber'),
toGet = cms.VPSet(cms.PSet(
record = cms.string('RPRealAlignmentRecord'),
tag = cms.string(tag)
))
)
process.esPreferDBFileAlignment = cms.ESPrefer("PoolDBESSource", "PoolDBESSourceAlignment")
def UseAlignmentDB(process, connection, tag):
global alignmentDefined
alignmentDefined = True
if hasattr(process, 'esPreferLocalAlignment'):
del process.ctppsRPAlignmentCorrectionsDataESSourceXML
del process.esPreferLocalAlignment
process.CondDBAlignment = CondDB.clone( connect = connection )
process.PoolDBESSourceAlignment = cms.ESSource("PoolDBESSource",
process.CondDBAlignment,
#timetype = cms.untracked.string('runnumber'),
toGet = cms.VPSet(cms.PSet(
record = cms.string('RPRealAlignmentRecord'),
tag = cms.string(tag)
))
)
process.esPreferDBFileAlignment = cms.ESPrefer("PoolDBESSource", "PoolDBESSourceAlignment")
#----------------------------------------------------------------------------------------------------
opticsDefined = False
def UseOpticsLocal(process):
global opticsDefined
opticsDefined = True
if not hasattr(process, 'esPreferLocalOptics'):
raise ValueError("local optics chosen, but process.esPreferLocalOptics not defined")
def UseOpticsGT(process):
global opticsDefined
opticsDefined = True
if hasattr(process, 'esPreferLocalOptics'):
del process.ctppsOpticalFunctionsESSource
del process.esPreferLocalOptics
def UseOpticsFile(process, connection, tag):
global opticsDefined
opticsDefined = True
if hasattr(process, 'esPreferLocalOptics'):
del process.ctppsOpticalFunctionsESSource
del process.esPreferLocalOptics
process.CondDBOptics = CondDB.clone( connect = connection )
process.PoolDBESSourceOptics = cms.ESSource("PoolDBESSource",
process.CondDBOptics,
DumpStat = cms.untracked.bool(False),
toGet = cms.VPSet(cms.PSet(
record = cms.string("CTPPSOpticsRcd"),
tag = cms.string(tag)
)),
)
process.esPreferDBFileOptics = cms.ESPrefer("PoolDBESSource", "PoolDBESSourceOptics")
def UseOpticsDB(process, connection, tag):
global opticsDefined
opticsDefined = True
if hasattr(process, 'esPreferLocalOptics'):
del process.ctppsOpticalFunctionsESSource
del process.esPreferLocalOptics
process.CondDBOptics = CondDB.clone( connect = connection )
process.PoolDBESSourceOptics = cms.ESSource("PoolDBESSource",
process.CondDBOptics,
DumpStat = cms.untracked.bool(False),
toGet = cms.VPSet(cms.PSet(
record = cms.string('CTPPSOpticsRcd'),
tag = cms.string(tag)
)),
)
process.esPreferDBFileOptics = cms.ESPrefer("PoolDBESSource", "PoolDBESSourceOptics")
#----------------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------
def CheckConditions():
# check choices
if not lhcInfoDefined:
raise ValueError("LHCInfo not defined")
if not alignmentDefined:
raise ValueError("alignment not defined")
if not opticsDefined:
raise ValueError("optics not defined")
|
988,237 | 3a9894adb41324ec3581ce1335745fb1257e1aa3 | import sys
import math
input = sys.stdin.readline
def sum_diff(array, i):
result = []
for num in array:
if num > i:
result.append(num - i)
else:
result.append(0)
return sum(result)
n, m = input().split()
m = int(m)
array = [int(x) for x in input().split()]
max_num = max(array)
for i in range(max_num):
diff = sum_diff(array, i)
if diff < m:
max_num = i - 1
break
print(max_num)
|
988,238 | 40c25f0d983b62f4c7c99747f17803f863cab425 | from kafka import KafkaConsumer
from json import loads
import psycopg2
connection = psycopg2.connect(user="postgres",
password="123456789",
host="209.188.7.148",
port="5432",
database="data")
cursor = connection.cursor()
consumer = KafkaConsumer(
'customers',
bootstrap_servers = ['209.188.7.148:9092'],
auto_offset_reset = 'earliest',
enable_auto_commit = True,
value_deserializer = lambda x: loads(x.decode('utf-8')))
INSERT = """INSERT INTO public.customers_new(
customer_id, dob, gender, city_code)
VALUES (%s,%s, %s, %s);"""
for message in consumer:
message = message.value
cursor.execute(INSERT, (message['ID'], message['DOB'], message['Gender'], message['City']))
connection.commit()
print('{}'.format(message))
|
988,239 | 074245b42de0703d7a4e5e896fa07bef318e5c89 | #!/usr/bin/env python
dictonary ={}
def find(alist):
for i in xrange(len(alist)):
if alist[i] not in dictonary.keys():
dictonary[alist[i]] = 1
else:
dictonary[alist[i]] +=1
output =[]
for i in dictonary.keys():
temp = [i]*dictonary[i]
output.extend(temp)
print output
Input = [0, 1, 1, 0, 1, 2, 1, 2, 0, 0, 0, 1]
find(Input) |
988,240 | f0b2c32d6f43c09e48cf863a868ee0b3e235c0e4 | from marshmallow import Schema, fields, EXCLUDE
class UserSchema(Schema):
id = fields.Int()
phone = fields.Int()
telegram = fields.Int()
name = fields.Str(allow_none=True, missing=False)
email = fields.Str(allow_none=True, missing=False)
registration = fields.Date()
password = fields.Str()
messangers = fields.Str(allow_none=True, missing=False)
@classmethod
def get_user_data(cls, data):
user_schema = cls(unknown=EXCLUDE)
user = user_schema.load(data)
return user
class NoteSchema(Schema):
id = fields.Int(allow_none=True)
name = fields.Str(allow_none=True, missing=False)
description = fields.Str(allow_none=True, missing=False)
date_created = fields.Date(allow_none=True)
date_remind = fields.Date(allow_none=True)
time = fields.Time(allow_none=True)
# status = fields.Boolean(allow_none=True, missing=False)
# user = fields.Nested(UserSchema)
@classmethod
def get_note(cls, data):
note_schema = cls(unknown=EXCLUDE)
date = note_schema.load(data)
return date |
988,241 | 69b48fe831830fc6a24be3e800f5075e5f3d09ac |
import matplotlib.pyplot as plt
import numpy as np
from pylab import mpl
mpl.rcParams["font.sans-serif"] = ["SimHei"]
mpl.rcParams["axes.unicode_minus"] = False
def barChart():
x_data = ['2012', '2013', '2014', '2015', '2016', '2017', '2018', '2019', '2020']
y_data_1 = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9] )
y_data_2 = [4, 5, 6, 7, 1, 2, 3, 4,9]
plt.bar(x = x_data, height= y_data_1, label='C语言基础', color='c', alpha=0.8)
# plt.bar(x=x_data, height = y_data_1)
plt.legend(loc=[1, 0])
# plt.legend()
plt.show()
def muchBarChart():
x_data = ['2012', '2013', '2014', '2015', '2016', '2017', '2018', '2019', '2020']
y_data_1 = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9])
y_data_2 = [4, 5, 6, 7, 1, 2, 3, 4, 9]
x = np.arange(9)
width =0.35
plt.bar( x, y_data_1, width , label='C语言基础', color='c', alpha=1)
plt.bar( x + width, y_data_2, width, label='B语言基础', color='b', alpha=1)
plt.xticks( x + width/2, x_data)
plt.legend(loc=[1, 0])
plt.show()
def lineAndBarChart():
x_data = ['2012', '2013', '2014', '2015', '2016', '2017', '2018', '2019', '2020']
y_data_1 = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9])
y_data_2 = [4, 6,5 , 7, 1, 2, 3, 4, 9]
x = np.arange(9)
width = 0.35
plt.bar(x, y_data_1, width, label='C语言基础', color='c', alpha=1)
plt.bar(x + width, y_data_2, width, label='B语言基础', color='b', alpha=1)
plt.plot(x + width ,y_data_2 )
plt.xticks(x + width / 2, x_data)
plt.legend(loc=[1, 0])
plt.show()
if __name__ == '__main__':
lineAndBarChart() |
988,242 | 717e28239689bb1ef578e21ddc26fabbedb7b6af | import tkinter
canvas=tkinter.Canvas()
canvas.pack()
canvas.config(width= 1500, height=900)
# HOSPITAL
canvas.create_rectangle(30,70, 190,300, fill="blue4"); #stvorec
canvas.create_text(110,175, text="H", font="Arial 100 bold", fill="white"); #H
canvas.create_text(110,230, text="NEMOCNICA", font="Arial 10 bold", fill="white"); #nemocnica
# POLICIA
canvas.create_rectangle(220,70, 370,300, fill="blue4"); #MODRY stvorec
canvas.create_rectangle(240,90, 350,230, fill="white"); #BIELY stvorec
canvas.create_text(295,160, text="POLÍCIA", font="Arial 15 bold"); #policia
# 80
canvas.create_oval(390,70, 620,300 ); #biely kruh vonkajsi
canvas.create_oval(395,75, 615,295, fill="red"); #cerveny kruh
canvas.create_oval(420,100, 590,270, fill="white"); #biely vnutorny kruh
canvas.create_text(505,185, text="80", font="Arial 70 bold"); #80
# RESERVE
canvas.create_rectangle(650,70, 810,300, fill="blue4"); #stvorec
canvas.create_text(730,140, text="P", font="Arial 90 bold", fill="white"); #P
canvas.create_text(730,270, text="RÉSERVÉ", font="Arial 20 bold", fill="white"); #reserve
# 6t
canvas.create_oval(830,70, 1060,300, fill="red"); #cerveny kruh
canvas.create_oval(855,95, 1035,275, fill="white"); #biely kruh
canvas.create_text(945,185, text="6 t", font="Arial 70 bold"); #6t
# podpis
canvas.create_text(500,500, text="Ďurček, III.D", font="Arial 20 bold");
|
988,243 | c181a00b53d2a77719c10c64fa328858cb765c7b | class FGraph:
def __init__(self, data):
self.data = data
def findFriends(self, user):
return user.friends
def findFriendsFriends(self, user):
allSecondFriends = []
friends = self.findFriends(user)
for friend in friends:
for user in self.data:
if user.id == friend:
allSecondFriends = allSecondFriends + self.findFriends(user)
allSecondFriends = list(set(allSecondFriends))
break
allSecondFriends = [x for x in allSecondFriends if x not in friends]
return allSecondFriends
def findCommonFriends(self, test):
friends = self.findFriends(test)
secondFriends = self.findFriendsFriends(test)
commonFriends = []
for second in secondFriends:
for user in self.data:
if user.id == second:
common = len(set(friends) & set(user.friends))
commonFriends.append([user.id, common])
break
commonFriends.sort(key=lambda x: x[1], reverse=True)
i = 1
print("Recommendations for user " + test.id)
while i < 5:
print(commonFriends[i][0] + ", because they have " + str(commonFriends[i][1]) + " friends in common!")
i += 1
def findRealFriends(self, test):
#Friends of test
friends = self.findFriends(test)
#Second friends of test
secondFriends = self.findFriendsFriends(test)
commonFriends = []
#For each secondFriend find common friends
for second in secondFriends:
commons = []
suggestion = 0
for user in self.data:
if user.id == second:
commons = set(friends) & set(user.friends)
break
for common in commons:
for user in self.data:
if user.id == second:
suggestion += 1/len(user.friends)
commonFriends.append([second, suggestion])
commonFriends.sort(key=lambda x: x[1], reverse=True)
print("Recommendations for user " + test.id)
i = 1
while i < 5:
print(str(commonFriends[i][0]) + ", because their friendship ratio is " + str(commonFriends[i][1]))
i += 1
|
988,244 | 841f81e981af8ad64d2da89b78a1f41c9c884963 | x = float(input("Enter the first number: "))
y = float(input("Enter the second number: "))
z = float(input("Enter the third number: "))
a = float(input("Enter the last number: "))
b = (x+y+z)/3
c = a*b
print("The average of x , y and z multiplied by a is: ", c)
|
988,245 | 2b0fdf598311f85597d04727f6d33815a43fe54c | #coding = utf-8
import pytest
from selenium import webdriver
from time import sleep, ctime
import os
options = webdriver.ChromeOptions()
options.binary_location = "C:/Program Files/Google/Chrome/Application/chrome.exe"
chrome_driver_binary = "D:/Google/chromedriver.exe"
driver = webdriver.Chrome(chrome_driver_binary, chrome_options=options)
class TestCase():
def setup_method(self):
driver.get("https://www.baidu.com/")
sleep(3)
def teardown_method(self):
sleep(3)
def teardown_class(self):
sleep(3)
driver.quit()
def test_one(self):
driver.find_element_by_id("kw").send_keys("Test search")
driver.find_element_by_id("su").click()
def test_two(self):
driver.find_element_by_id("kw").send_keys("***")
driver.find_element_by_id("su").click()
def test_three(self):
driver.find_element_by_id("kw").send_keys("@")
driver.find_element_by_id("su").click()
def test_four(self):
driver.find_element_by_id("kw").send_keys(" ")
driver.find_element_by_id("su").click()
def test_five(self):
driver.find_element_by_link_text("新闻").click()
|
988,246 | cbf7579847a348e8ae9c6f445469d82c685efb29 | import pygame
from pygame.locals import *
from .EventSE import EventSound
class MenuDetectIcon():
def __init__(self, point:tuple):
self.point = (point[0], point[1], point[2], point[3])#x,y,w,h
class MainMenu():
def __init__(self, menu_list:list, color:tuple, size:tuple, font_size:int):
self.menu_list = menu_list
self.color = color
self.size = size
self.font_size = font_size
self.font = pygame.font.Font('font_data/PixelMplus-20130602/PixelMplus10-Regular.ttf', font_size)
self.renders = []
#self.text_rect = pygame.Rect()
self.menu_box = pygame.Rect(self.size[0], self.size[1], self.size[2], self.size[3])
self.carsol = [[self.size[0] + 10, self.size[1] + 20],[self.size[0] + 30, self.size[1] + 30], [self.size[0] + 10, self.size[1] + 40]]
self.carsol_cnt = 0
self.sound = EventSound()
for text in menu_list:
self.renders.append(self.font.render(text, True, (255, 255, 255)))
def display(self, screen:pygame.Surface):
pygame.draw.rect(screen, self.color, self.menu_box)#本体
pygame.draw.rect(screen, (255, 255, 255), self.menu_box, 3)#縁
pygame.draw.polygon(screen, (255, 255, 255), self.carsol)
for i, render in enumerate(self.renders):
screen.blit(render, (self.size[0] + 40, self.size[1]+ 15 + i * (self.font_size + 20)))
def carsol_controle(self, event:pygame.event):
if event.type == KEYDOWN:
if event.key == K_RETURN:
pass
#self.sound.key_Enter.play()
if self.carsol_cnt < len(self.menu_list)-1:
if event.key == K_DOWN:
self.carsol_cnt += 1
self.sound.menu_carsol_move.play()
for pos in self.carsol:
pos[1] += (self.font_size + 20)
if self.carsol_cnt != 0:
if event.key == K_UP:
self.carsol_cnt -= 1
self.sound.menu_carsol_move.play()
for pos in self.carsol:
pos[1] -= (self.font_size + 20)
#Example
if __name__ == "__main__":
from LocalFunc import exit_game
# init pygame window
pygame.init()
#font = pygame.font.Font('font_data/PixelMplus-20130602/PixelMplus12-Regular.ttf', 20)
#font.set_bold(True)
width = 800 #screeen
height = 640
pygame.display.set_mode((width, height), 0, 32)
screen = pygame.display.get_surface()
pygame.display.set_caption("マス打")
list = [
"はじめる",
"終わる",
"しこる"
]
menu_point = (20, 20, 200, 200)
menu = MainMenu(list, (0, 0, 0), menu_point, 30)
sound = EventSound()
while True:
menu.display(screen)
pygame.display.update()
for event in pygame.event.get():
sound.event_catch_se(event)
menu.carsol_controle(event)
exit_game(event)
|
988,247 | d594f52e0268cb32963015216203ea4829e6d639 | import numpy
import math
import random
import matplotlib.pyplot
import sklearn.discriminant_analysis
# create data from a multivariate normal distribution
mean1 = [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4]
mean2 = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
cov = [[3, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[1, 4, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 4, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 5, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 6, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 10, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 11, 0, 0, 0],
[0, 0, 4, 0, 0, 0, 0, 3, 0, 0],
[0, 0, 0, 5, 0, 0, 0, 0, 2, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 6],]
# make sure the matrix is symmetric positive definite
cov = numpy.dot(cov, numpy.transpose(cov))
x1 = numpy.random.multivariate_normal(mean1,cov, 1000)
x2 = numpy.random.multivariate_normal(mean2,cov, 1000)
X = numpy.concatenate((x1,x2))
Xc = numpy.zeros(1000)
Xc = numpy.concatenate((Xc, numpy.ones(1000)))
# PCA
Xmc = X - numpy.mean(X)
D,E = numpy.linalg.eig(numpy.dot(Xmc.T,Xmc))
sortIndex = numpy.argsort(D)
ESorted = numpy.zeros((10,10))
index = 0
for i in range(0,10):
ESorted[:,index] = E[:,sortIndex[i]]
index = index + 1
meanSquareError = numpy.zeros(10,)
classificationError = numpy.zeros(10,)
ySorted = numpy.dot(X,ESorted)
lda = sklearn.discriminant_analysis.LinearDiscriminantAnalysis()
for numDims in range(5,11):
# reconstruction
yReduced = ySorted[:,0:numDims]
EReduced = ESorted[:,0:numDims]
XReconstructed = numpy.dot(yReduced, numpy.transpose(EReduced))
meanSquareError[10 - numDims] = sum(sum((XReconstructed - X)**2))/2000
# classification
#training
lda.fit(yReduced,Xc)
#testing
prediction = lda.predict(yReduced)
classificationError[10 - numDims] = sum(prediction != Xc) # sum(prediction != Xc)
n = numpy.linspace(0,5,6)
matplotlib.pyplot.plot(n,meanSquareError[0:6])
matplotlib.pyplot.title("PCA")
matplotlib.pyplot.xlabel("Number of Dimensions Removed")
matplotlib.pyplot.ylabel("Mean Square Error")
matplotlib.pyplot.show()
matplotlib.pyplot.plot(n,classificationError[0:6])
matplotlib.pyplot.title("PCA")
matplotlib.pyplot.xlabel("Number of Dimensions Removed")
matplotlib.pyplot.ylabel("Number of Errors")
matplotlib.pyplot.show()
#use backward search to remove columns
#so that the remaining provide the least error
minError = 1000000*numpy.ones(6,)
# classification with no columns removed
#training
lda.fit(ySorted,Xc)
#testing
prediction = lda.predict(ySorted)
minError[0] = sum(prediction != Xc)
#find the column to remove that provides the lowest error
minErrorColumn = 0
ySelected = ySorted
numCols = 10
for iteration in range(1,6):
for column in range(numCols):
yReduced = numpy.delete(ySelected,column,1)
# classification
#training
lda.fit(yReduced,Xc)
#testing
prediction = lda.predict(yReduced)
classificationError = sum(prediction != Xc)
if classificationError < minError[iteration]:
minError[iteration] = classificationError
minErrorColumn = column
numCols = numCols - 1
ySelected = numpy.delete(ySelected,minErrorColumn,1)
matplotlib.pyplot.plot(n,minError[0:6])
matplotlib.pyplot.title("Backward Search")
matplotlib.pyplot.xlabel("Number of Dimensions Removed")
matplotlib.pyplot.ylabel("Number of Errors")
matplotlib.pyplot.show()
print("done") |
988,248 | 825bb8b62e1305c0a71a2bb12278d52c54db3a06 | # Code Festival - Python Practice 003
# Author : ㄱㄱㅊ
# Title : Variable type
# Date : 20-01-16
l = [100, 200, 300]
print(type(l))
# 3) class 'list'
|
988,249 | f2359a212cc4db5955cb71e885a210818bc8332d | import numpy as np
_Lx = 64
_Ly = 64
_base = 2**(_Lx-1)
_mask = 2**(_Lx) - 1
def lshift(k):
return ( (k<<1) + (k//_base) ) & _mask
def rshift(k):
return ( (k>>1) + (k%2)*_base) & _mask
def compare(a,b=0,kind=0):
"""
return bit fields in 'a' that can move to
empty bit fields in 'b'
'kind' specifies the type of action
0: XOR(a,b) and then AND with a
1: check bits in 'a' that can move to the right
-1: check bits in 'a' that can move to the left
"""
return { 0: (a ^ b ) & a,
1: (a ^ lshift(a)) & a,
-1: (a ^ rshift(a)) & a}[kind] & _mask
def pretty_string(a):
return format(a,'b').rjust(_L,'0')
def count_bits(a):
return format(a,'b').count('1')
def update(lines,rates):
prob = 0.0
rng = np.random.rand()
k=0
iflag = 0
while ((k<_Ly) and (iflag < 1)):
kp = (k+1+_Ly)%_Ly
km = (k-1+_Ly)%_Ly
stack = [rshift(lines[k]),lshift(lines[k]),
lines[kp],lines[km]]
for entry in stack:
v = compare(lines[k],entry)
icount= count_bits(v)
prob += icount*rates
if rng < prob:
iselected = np.random.randint(icount)
split = list(pretty_string(v))
k +=1
return lines
def init():
return [1 & _mask for k in range(_Ly)]
def sample(steps,rates=0.1):
lines = init()
for k in range(steps):
lines = update(lines,rates)
return lines
|
988,250 | 32baa34f99480708251df01e082b51814677d430 | nums = (i*i for i in range(5))
for x in nums: print x
for x in nums: print x
''' Output
0
1
4
9
16
'''
|
988,251 | a08b7c3336d7dc69fd5dfcd89ecd8df8eedad829 | from argparse import ArgumentParser
def parse_args():
parser = ArgumentParser(description='list directory contents')
parser.add_argument('path', help='path to directory')
parser.add_argument('-a', action='store_true', help='include hidden files')
parser.add_argument('-R', action='store_true', help='recursively show files')
parser.add_argument('-l', action='store_true', help='display file size')
parser.add_argument('-c', action='store_true', help='display file line count')
parser.add_argument('-d', action='store_true', help='display only folders and files count')
parser.add_argument('-r', action='store_true', help='reverse display order')
return parser.parse_args()
|
988,252 | 129acdca8f6195d2c101352913a38ee44b6bc497 | #!/usr/bin/env python3
from RubikEncryption import RubikEncryption
ct = input('[>] Ciphertext : ').strip()
key = input('[>] Key : ').split()
re = RubikEncryption()
pt = re.decrypt(ct, key)
print('[+] Plaintext : {}'.format(pt))
|
988,253 | 6253b2114646a6153b78992dd5869a13c267cf8b | from lp.tools.scriptloader import ScriptLoader
from config import *
sl = ScriptLoader()
sl.dbname = ONTEST_DB_NAME
sl.user = ONTEST_USER
sl.host = ONTEST_HOST
sl.password = ONTEST_PASSWORD
sl.script_file = "dbscripts/schema.sql"
# sl.delete_old()
sl.execute()
|
988,254 | 52f84ef53f240b7d128b73538026f7c0631ec36c | n = int(input())
a = [int(input()) for _ in range(n)]
maxa = max(a)
# print(maxa)
if a.count(maxa) == 1:
b = sorted(a, reverse=True)
maxa2 = b[1]
for i in range(n):
if a[i] == maxa:
print(maxa2)
else:
print(maxa)
else:
for i in range(n):
print(maxa)
|
988,255 | 4ca781aeed2a30992ba0a89e2529fa00bfc5e762 | #!/usr/bin/python
import sys
import Adafruit_DHT
import time
import datetime
import paho.mqtt.publish as publish
channelID = "1136666"
apiKey = "3GXSCUY59YQ3M7YZ"
topic = "channels/" + channelID + "/publish/" + apiKey
mqttHost = "mqtt.thingspeak.com"
tTransport = "tcp"
tPort = 1883
tTLS = None
print("[INFO] Data prepared to be uploaded")
with open("/home/pi/rpi_weather_station.csv", "a") as log:
while True:
now = datetime.datetime.now()
timeString = now.strftime("%Y-%m-%d %H:%M")
humidity, temperature = Adafruit_DHT.read_retry(11, 4)
#print 'Temp: {0:0.1f} C Humidity: {1:0.1f} %'.format(temperature, humidity)
log.write("{},{},{}\n".format(timeString, temperature, humidity))
tPayload = "field1=" + str(temperature) + "&field2=" + str(humidity)
try:
publish.single(topic, payload=tPayload, hostname=mqttHost, port=tPort, tls=tTLS, transport=tTransport)
print("[INFO] data sent for 2 fields: ", temperature, humidity)
except:
print("[info] failure in sending data")
time.sleep(10)
|
988,256 | 95319efc4b4e1ead27fb3ed70db4b743def53722 | #model list=[Encoder,Discriminator_z, Discriminator_x,Generator,Phi,InvPhi]
import torch
from torch import nn,optim
import itertools
from functional.functional import noise_vector
from .loss import *
def joint_train(model_list,train_load,num_epochs=30,device=None):
for epoch in range(num_epochs):
t_start = time.time()
print("Epoch:", epoch)
#model_list=[Encoder,Decoder,D_reconstruct,D_feature,Phi,Invphi]
Encoder=model_list[0]
Decoder=model_list[1]
D_reconstruct=model_list[2]
D_feature=model_list[3]
Phi=model_list[4]
Invphi=model_list[5]
dr_optimizer = optim.Adam(D_reconstruct.parameters(), lr=0.00005, betas=(0, 0.999))
df_optimizer = optim.Adam(D_feature.parameters(), lr=0.00005, betas=(0, 0.999))
g_optimizer = optim.Adam(itertools.chain(*[Encoder.parameters(),Decoder.parameters(),
Phi.parameters(),Invphi.parameters()]), lr=0.00005, betas=(0, 0.999))
d_optimizer = optim.Adam(Decoder.parameters(), lr=0.00005, betas=(0, 0.999))
e_optimizer = optim.Adam(Encoder.parameters(), lr=0.00005, betas=(0, 0.999))
opt_phi = optim.Adam(Phi.parameters(), lr=0.0001, betas=(0.9, 0.999))
opt_invphi = optim.Adam(Invphi.parameters(), lr=0.0001, betas=(0.9, 0.999))
params = [Phi.parameters(), Invphi.parameters()]
opt_transform=optim.Adam(itertools.chain(*params),lr=0.001,betas=(0.9,0.999))
for batch_i, (real_images, gender,glasses) in enumerate(train_load):
batch_size = real_images.size(0)
real_images=real_images.to(device,dtype=torch.float)
noi = noise_vector(real_images.size(0))
# 1. GAN Loss
fake_data = Decoder(noi).detach()
d_error, d_pred_real, d_pred_fake =gen_image_loss(True,real_images.float(),
fake_data,D_reconstruct,
dr_optimizer,d_optimizer)
fake_data = Decoder(noi)#noise(real_batch.size(0)))
g_error = gen_image_loss(False, real_images.float(), fake_data,
D_reconstruct,
dr_optimizer,d_optimizer)
#2 Adv.Feature Loss
real_feature=Encoder(real_images.float()).detach()
df_error_adv=adv_feature_loss(True,noi,real_feature,
D_feature,
df_optimizer,e_optimizer)
real_feature=Encoder(real_images.float())
en_error_adv=adv_feature_loss(False,noi,real_feature,
D_feature,
df_optimizer,e_optimizer)
######################################################
latent_vector=Encoder(real_images).detach()
glass_vector,gender_vector,remain=Phi(latent_vector)
#3 Reconstruction Loss
z_tilde=Invphi(torch.cat((glass_vector,gender_vector,remain),1))
loss_reconstruction=reconstruction_loss_phi(latent_vector,z_tilde,opt_transform)
#4 Task Loss
opt_phi.zero_grad()
loss=TripleletLoss(glass_vector,glasses) + TripleletLoss(gender_vector,gender)
loss.backward(retain_graph=True)
opt_phi.step()
#5 Cyclic Loss
loss_cycle=cyclic_loss(glass_vector,gender_vector,remain,glasses,gender,opt_transform,
[Encoder,Decoder,Phi,Invphi])
##############################################################
#6 Adv.Image Loss
fake_data=Decoder(Invphi(torch.cat(Phi(Encoder(real_images.float())),1))).detach()
d_error1=adv_img_loss(True,real_images.float(),fake_data,
D_reconstruct,dr_optimizer,g_optimizer)
fake_data=Decoder(Invphi(torch.cat(Phi(Encoder(real_images.float())),1)))
g_error2=adv_img_loss(True,real_images.float(),fake_data,
D_reconstruct,dr_optimizer,g_optimizer)
# 7. Full_Reconstruction Loss
reconstructed_data=fake_data
recons_loss=full_reconstuction_loss(g_optimizer,real_images.float(),reconstructed_data)
if (batch_i) % 300 == 0:
print("Batch: ", batch_i)
print("1:Discriminator_Error: ", d_error.item()," Generator_Error: ", g_error.item()," Recons_Error: ", recons_loss.item())
print("2:Feature Discriminator Error: ",df_error_adv.item(),"Encoder Error: ", en_error_adv.item())
print("3 Discriminator_adv_error", d_error1.item(), "Generator_error: ", g_error2.item())
print("4 Task Loss: ", loss.item())
print("5 Reconstruction Loss: ",loss_reconstruction.item())
print("6 Cyclic Loss: ",loss_cycle.item())
t_end = time.time()
duration_avg = (t_end - t_start) / (epoch + 1.0)
print("Elapsed Time: ",duration_avg)
torch.save(Encoder,'EncoderF.h')
torch.save(Decoder,'DecoderF.h')
torch.save(D_feature,'DiscriminatorfF.h')
torch.save(D_reconstruct,'DiscriminatorrF.h')
torch.save(Phi,'PhiF.h')
torch.save(Invphi,'invphiF.h')
|
988,257 | 2dab6861e0bc503926964c519956769f3b7c35dc | # this function calculates the flow rate in the Bernoulli Table
def calc_gpm(kfactor, psi, emitterexponent):
return kfactor * psi ** emitterexponent
|
988,258 | 9efe39f0b44f61f3de4063e0c6ea47f43ce12318 | from pyquat import Quaternion
from math_helper import norm, skew
import numpy as np
# for i in range(100):
q = Quaternion.random()
# q = Quaternion.from_axis_angle(np.array([[0, 0, 1]]).T, np.pi/2.)
yaw_true = q.yaw
v = np.array([[0, 0, 1]]).T
beta = q.elements[1:]
beta /= norm(beta)
alpha = 2.*np.arccos(q.elements[0,0])
yaw_steven = 2.*np.arctan(beta.T.dot(v) * np.tan(alpha/2.))
w = q.rot(v)
s = v.T.dot(w)
delta = skew(v).dot(w)
qhat = Quaternion.exp(s*delta)
qstar = q * qhat.inverse
yaw_superjax = qstar.yaw
print "superjax", (yaw_superjax)
print "steven", (yaw_steven)
print "true", (yaw_true)
# assert abs(yaw_true - yaw_test) < 1e-8, "wrong: true = %f, test = %f" % (yaw_true, yaw_test) |
988,259 | 3b6af96b356a98c374caf92ec6d8241a8924365c | import pymongo
# connect with a client or create a new one
myclient = pymongo.MongoClient()#"mongodb://localhost:27017/")
myclient.list_database_names()
# look for table or create a new one
mydb = myclient["orderbook"]
mydb.list_collection_names()
# look for collection or create a new one
mycol = mydb["BTCUSDbitfinex"]
for x in mycol.find():
print(x)
"""
To inspect NoSQL databases, in terminal:
mongo
show dbs # list and show memory usage
use orderbook # switch
db.dropDatabase() # eliminate
""" |
988,260 | b817a5e35694df78d99306ab195f5fa3bd60c4c9 | from .packages import os, Cipher, algorithms, modes
from .default_config import generator_name_list
def generator_import(generator_name, current_file_size, seed_length=None):
if generator_name == "basic_randint":
from .generator_classes.basic_randint_functions import CurrentGenerator
generator = CurrentGenerator()
elif generator_name == "bytearray_getrandbits":
from .generator_classes.bytearray_and_getrandbits_functions import CurrentGenerator
generator = CurrentGenerator()
elif generator_name == "map_getrandbits":
from .generator_classes.map_and_getrandbits_functions import CurrentGenerator
generator = CurrentGenerator()
elif generator_name == "os_urandom":
if os is None:
raise ValueError("'os_urandom' algorithm must rely on os module in python!")
from .generator_classes.os_urandom_functions import CurrentGenerator
generator = CurrentGenerator()
elif generator_name == "itertools_and_struct":
from .generator_classes.itertools_and_struct_functions import CurrentGenerator
generator = CurrentGenerator()
elif generator_name == "itertools_optimized":
from .generator_classes.itertools_and_struct_optimized_functions import CurrentGenerator
generator = CurrentGenerator()
elif generator_name == "python_native_crypto":
from .generator_classes.python_native_crypto_functions import CurrentGenerator
if seed_length is None:
raise ValueError("Seed length must be assigned for 'python_native_crypto' algorithm")
generator = CurrentGenerator(seed_length)
elif generator_name == "cryptography_package":
error_string = "'cryptography_package' algorithm must rely on cryptography module in python!\n" \
"cryptography.hazmat.primitives.ciphers.{} cannot be found!"
if Cipher is None:
raise ValueError(error_string.format('Cipher'))
elif algorithms is None:
raise ValueError(error_string.format('algorithms'))
elif modes is None:
raise ValueError(error_string.format('modes'))
from .generator_classes.cryptography_package_functions import CurrentGenerator
if seed_length is None:
raise ValueError("Seed length must be assigned for 'cryptography_package' algorithm")
generator = CurrentGenerator(seed_length, current_file_size)
else:
raise ValueError(
"Cannot detect generator: {}\nGenerator should be among the following list:\n{}".format(
generator_name, generator_name_list))
return generator
|
988,261 | f35b30b73ed8c222f21207946c42f6bc606ee963 | #!/usr/bin/env python
from distutils.core import setup
from setuptools import find_packages
from glob import glob
setup(
name="dynmen_scripts",
version="0.1.0",
url="https://github.com/frostidaho/dynmen_scripts",
author="Idaho Frost",
author_email="frostidaho@gmail.com",
description="A collection of scripts using dynmen",
long_description=open('README.rst').read(),
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
entry_points={
'console_scripts': [
'rofi-run = dynmen_scripts.rofi_run:main_run',
'rofi-window = dynmen_scripts.rofi_run:main_window',
'dyn-dmenu-run = dynmen_scripts.dmenu_run:main',
'dyn-tmux-sessions = dynmen_scripts.tmux.__main__:main',
'xquery = dynmen_scripts.xquery:main',
],
},
install_requires=['dynmen', 'python-xlib'],
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
|
988,262 | 3872e5c5c3cd6b092372b62bb254ed347cae24b6 | from flask import Flask
import settings
import os
#initialize Flask
app = Flask(__name__)
# redundant
@app.route('/')
def home():
return
#shutdown Raspberry Pi
@app.route('/shutdown')
def shutdown():
os.system("sudo shutdown -h now")
#parse the settings and colors to the set
@app.route('/<method>/<color>/<color2>')
def method(method, color, color2):
setting = settings.Setting()
setting.setColors(color, color2)
setting.setSetting(method)
return "Empty"
#run server
app.run(host="0.0.0.0") |
988,263 | 36a491c3c8f5ca6e80cd26ae05b8e50b306b30ca | def soltion():
T = int(input())
#1부터 최대값까지의 세제곱들의 해시맵을 만들어놓음
mDict = {}
for i in range(pow(10, 6) + 1):
mDict[pow(i, 3)] = i
keys = mDict.keys()
for test_case in range(1, T + 1):
printString ="#"+ str(test_case)
n = int(input())
if n in keys:
print(printString + " " + str(mDict[n]))
else:
print(printString + " " + str(-1))
soltion() |
988,264 | 2884d66689a5cb0a6e66204a72d485c569e36717 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (C) 2016 Canonical
#
# Authors:
# Didier Roche
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
'''Copying and treating files internal metadata'''
import logging
import os
import re
import shutil
from .tools import replace_file_inline
logger = logging.getLogger(__name__)
import_re = re.compile("##IMPORT (.*)")
required_variable_re = re.compile('\[\[([^\[\]]*)\]\]')
optional_variable_re = re.compile('\<\<([^<>]*)\>\>')
relative_markdown_links = re.compile("\(((?!http|www).[^\)]*\.md)\)")
def import_and_copy_file(source_path, destination_path):
'''Copy and import file content.
We handle:
1. symlinks are replaced with real files
2. ##IMPORT <file_path> to copy destination file content into current file
We return an error if we couldn't copy or import all listed filed
'''
success = True
try:
with open(destination_path, 'w') as dest_f:
with open(source_path) as source_f:
success = _copycontent_withimports_tag(source_f, dest_f)
except UnicodeDecodeError as e:
# Fall back to direct copy for binary files
logger.debug("Directly copy as can't read {} as text: {}".format(source_path, e))
shutil.copy2(source_path, destination_path)
except FileNotFoundError:
logger.error("Couldn't open {}".format(source_path))
success = False
return success
def _copycontent_withimports_tag(source_f, dest_f):
'''Copy content from one file to the other, handling import tags
imports within imports are handled, as well as symlinks chaining'''
success = True
for line in source_f:
result = import_re.findall(line)
if result:
rel_path = result[0]
try:
with open(os.path.join(os.path.dirname(os.path.realpath(source_f.name)), rel_path)) as import_f:
if not _copycontent_withimports_tag(import_f, dest_f):
success = False
except FileNotFoundError:
logger.error("Couldn't import {} from {}".format(rel_path, source_f.name))
success = False
else:
dest_f.write(line)
return success
def _replace_from_map(line, regexp, replace_pattern, device_vars):
'''Abstract replacing the variable from map.
returning tuple is: (newline, failed_keywords_list)
'''
unfound_keywords_list = []
for keyword in regexp.findall(line):
try:
replace_with = device_vars[keyword]
except KeyError:
unfound_keywords_list.append(keyword)
replace_with = ""
# we always replace, even with something empty (useful for optional content)
line = line.replace(replace_pattern.format(keyword), replace_with)
return (line, unfound_keywords_list)
def _replace_line_content(line, filename, device_name, device_vars):
'''Return current line with replaced variable substitution.
returning tuple is: (newline, success)
'''
success = True
# handle optional variables first
replace_pattern = "<<{}>>"
(line, unfound_keywords) = _replace_from_map(line, optional_variable_re, replace_pattern, device_vars)
for keyword in unfound_keywords:
logger.info("{} doesn't have any mapping for {} which is optional in {}".format(
device_name, keyword, filename))
# handle required variables
replace_pattern = "[[{}]]"
(line, unfound_keywords) = _replace_from_map(line, required_variable_re, replace_pattern, device_vars)
for keyword in unfound_keywords:
logger.error("{} doesn't have any mapping for {} which is required in {}".format(
device_name, keyword, filename))
success = False
return (line, success)
def replace_variables(path, device_name=None, device_vars={}):
'''This variable replacement is done on files being in a per device directory.
We handle variable substitution (only if device_name is provided)
[[VARIABLE]] are required variables. It will print an error and return as such (not interrupting though)
<<VARIABLE>> are optional variables. It will print an info and not return an error
'''
success = True
with replace_file_inline(path) as (source_f, dest_f):
for line in source_f:
(line, new_success) = _replace_line_content(line, path, device_name, device_vars)
success = new_success and success
dest_f.write(line)
return success
def reformat_links(path):
'''Strip down the final .md on any relative path in links as it will be replaced with real file names'''
with replace_file_inline(path) as (source_f, dest_f):
for line in source_f:
for link_to_replace in relative_markdown_links.findall(line):
line = line.replace(link_to_replace, link_to_replace[:-3])
dest_f.write(line)
def prepend_external_link(path, message, url):
'''Prepend to existing page some known template type'''
with replace_file_inline(path) as (source_f, dest_f):
line_count = 1
for line in source_f:
# add the message as a second line
if line_count == 2:
dest_f.write("> [{}]({})\n".format(message, url))
line_count += 1
dest_f.write(line)
|
988,265 | 8de84162abbe1656b3abadfa39007074e6314b0a | # U06_Ex13_toNumbers.py
#
# Author: Grace Ritter
# Course: Coding for OOP
# Section: A2
# Date: 29 Jan 2019
# IDE: PyCharm
#
# Assignment Info
# Exercise: 13
# Source: Python Programming
# Chapter: 6
#
# Program Description
#
# This program allows the user to enter numbers into a list where the numbers are strings. The program will return the
# list as numbers instead of strings.
#
# Algorithm (pseudocode)
# print introduction
# get user input of length of list
# set the list to have nothing in it but has the length
# of what the user put in
# start a for loop using the list with i
# set i equal to user input (as string)
# print output with original list
# use toNumbers function on list
# print output with the new list
def main():
sampleList = ['1', '2.0', '3', '4', '5', '6', '7', '8', '9', '10']
listTypes = getTypes(sampleList)
print('Before List: {0}; Types: {1}'.format(sampleList, listTypes))
toNumbers(sampleList)
listTypes = getTypes(sampleList)
print(' After List: {0}; Types: {1}'.format(sampleList, listTypes))
def toNumbers(numlist):
for i in range(len(numlist)):
numlist[i] = float(numlist[i])
def main():
numlist = int(input("length of list: ")) * [None]
for i in range(len(numlist)):
numlist[i] = str(input("Enter a number in entree #{0} of the list: ".format(i + 1)))
print("For each string in the list {0},".format(numlist,), end=" ")
toNumbers(numlist)
print("is numbers: {0}.".format(numlist))
if __name__ == '__main__':
main() |
988,266 | 0a450bcb0bdfe7ba5da0be0b19c81c43c5820128 | from torch.utils.data import Dataset, DataLoader
import numpy as np
import scipy.io as sio
from PIL import Image
import os
import matplotlib.pyplot as plt
import torch
import torchvision.transforms as transforms
ipm_params = {
'FRONT_LEFT':
{
'quaternions' : [ 0.68316462 , -0.68338771 , 0.17581486 , -0.18799863 ],
'translation' : [ 1.28400265 , 0.31639086 , 1.67877024 ],
'intrinsics' : [[879.03824732 , 0. , 613.17597314 ],
[ 0. , 879.03824732 , 524.14407205 ],
[ 0. , 0. , 1. ] ],
},
'FRONT':
{
'quaternions' : [ 0.50745829 , -0.49812866 , 0.49496606 , -0.49934369 ],
'translation' : [ 1.50612211 , -0.03602647 , 1.69421848 ],
'intrinsics' : [[882.61644117 , 0. , 621.63358525 ],
[ 0. , 882.61644117 , 524.38397862 ],
[ 0. , 0. , 1. ] ],
},
'FRONT_RIGHT':
{
'quaternions' : [ -0.19470424 , 0.17808752 , -0.68312934 , 0.68095909 ],
'translation' : [ 1.27320628 , -0.31664681 , 1.68114556 ],
'intrinsics' : [[880.41134027 , 0. , 618.9494972 ],
[ 0. , 880.41134027 , 521.38918482 ],
[ 0. , 0. , 1. ] ],
},
'BACK_LEFT':
{
'quaternions' : [ -0.67797289 , 0.6871698 , 0.19201452 , -0.1768143 ],
'translation' : [ 1.04338732 , 0.31565584 , 1.66400371 ],
'intrinsics' : [[881.28264688 , 0. , 612.29732111 ],
[ 0. , 881.28264688 , 521.77447199 ],
[ 0. , 0. , 1. ] ],
},
'BACK':
{
'quaternions' : [ -0.49033062 , 0.50741961 , 0.50819262 , -0.49379061 ],
'translation' : [ 0.81558292 , -0.00559198 , 1.65395645 ],
'intrinsics' : [[882.93018422 , 0. , 616.45479905 ],
[ 0. , 882.93018422 , 528.27123027 ],
[ 0. , 0. , 1. ] ],
},
'BACK_RIGHT':
{
'quaternions' : [ -0.17126042 , 0.1897148 , 0.68851343 , -0.6786766 ],
'translation' : [ 1.04116266 , -0.31121292 , 1.66718288 ],
'intrinsics' : [[881.63835671 , 0. , 607.66308183 ],
[ 0. , 881.63835671 , 525.6185326 ],
[ 0. , 0. , 1. ] ],
},
}
class pdataset(Dataset):
"""Project dataset"""
def __init__(self, list_file, data_root_path, img_size = [256, 306], bundle_size = 3):
self.data_root_path = data_root_path
self.img_size = img_size
self.bundle_size = bundle_size
with open(list_file) as file:
self.frame_pathes = [x[:-1] for x in file.readlines()]
self.transform = transforms.Compose([
# transforms.ToPILImage(),
transforms.Resize((128, 416)),
transforms.ToTensor()
])
def __len__(self):
return len(self.frame_pathes)
def __getitem__(self, item):
# read camera intrinsics
view = self.frame_pathes[item].split('CAM_')[1].split('.')[0]
camparams = np.array(ipm_params[view]['intrinsics'])/4.0
camparams[-1,-1] = 1.
camparams = camparams.ravel()
# read image bundle
# slice the image into #bundle_size number of images
frame_list = []
#frame_list_ = []
#print(self.frame_pathes[item])
for i in range(self.bundle_size):
sample_id = int(self.frame_pathes[item].split('/')[1].split('_')[1])
old = self.frame_pathes[item].split('/')
old[1] = old[1].split('_')[0]+'_'+str(sample_id+i)
newpath = '/'.join(old)
img_file = os.path.join(self.data_root_path, newpath)
#print(img_file)
# img_ = Image.open(img_file)
# img_ = self.transform(img_)
# frame_list_.append(img_)
left = 0
top = 0
right = 306
bottom = 200
frame_list.append(np.array(Image.open(img_file).crop((left, top, right, bottom)) .resize((416, 128),resample = Image.BILINEAR)))
frames = np.asarray(frame_list).astype(float).transpose(0, 3, 1, 2)
# frames_ = (torch.stack(frame_list_, dim=0) * 255.).int().numpy().astype(float)
return frames, camparams
if __name__ == "__main__":
dataset = pdataset(list_file, data_root_path)
for i, data in enumerate(dataset):
if i == 20:
print(data[1])
break
|
988,267 | 35e2182427da309f4d31631b20127c84c02812d9 | from weapon import Weapon
import unittest
class TestWeapon(unittest.TestCase):
def test_weapon_init(self):
test_weapon = Weapon("axe", 20, 0.2)
self.assertEqual(test_weapon.type, "axe")
self.assertEqual(test_weapon.damage, 20)
self.assertEqual(test_weapon.critical_strike_percent, 0.2)
def test_critical_hit(self):
test_weapon = Weapon("axe", 20, 0.2)
is_False = False
is_True = False
for i in range(100):
if test_weapon.critical_hit():
is_True = True
else:
is_False = True
self.assertTrue(is_True)
self.assertTrue(is_False)
if __name__ == '__main__':
unittest.main()
|
988,268 | fd5e896cff96406596a15301ef802470d9e3a965 | import time
import math
import copy
p=[]
pyhz={}
def load_pyhz():
file=open('pyhz')
for l in file.readlines():
line=l.split()
k=line.pop(0)
pyhz[k]=line
def veterbi_level(py):
lvl={'all':0.0}
for hz in pyhz[py]:
lvl[hz]={'p':0.0,'str':hz}
f=open(hz+'.txt')
temp=eval(f.read())
lvl['all']=lvl['all']+temp['num']
f.close()
# print(lvl)
return lvl
def P_x1x2(x1,x2,all1,all2):
f1=open(x1+'.txt')
x1_dict=eval(f1.read())
f2=open(x2+'.txt')
x2_dict=eval(f2.read())
if x2 in x1_dict:
p1=x1_dict[x2]/x1_dict['num']
p2=x2_dict['num']/all2
P=(20*p1+p2)/2
else:
P=0
P=-math.log(P+0.0000001)
f1.close()
f2.close()
return P
def P_x2(x2,all2):
f2=open(x2+'.txt')
x2_dict=eval(f2.read())
P=x2_dict['num']/all2
P=-math.log(P+0.0000001)
f2.close()
return P
def level_link_to_hz(vl,hz,all2):
tempvl1 = copy.deepcopy(vl)
# print('tempvl1: ',tempvl1)
for key1 in tempvl1:
if key1!='all':
tempvl1[key1]['p']=P_x1x2(key1,hz,tempvl1['all'],all2)+tempvl1[key1]['p']
return tempvl1
def link(vl1,vl2):
for key2 in vl2:
if key2!='all':
tempvl1=copy.deepcopy(level_link_to_hz(vl1,key2,vl2['all']))
del tempvl1['all']
# tempvl1=vl1
# print(key2,tempvl1)
# for key1 in tempvl1:
# if key1!='all':
# all1=tempvl1['all']
# all2=vl2['all']
# tempvl1[key1]['p']=P_x1x2(key1,key2,all1,all2)+vl1[key1]['p']
# del tempvl1['all']
k=min(tempvl1,key=lambda x:tempvl1[x]['p'])
vl2[key2]['p']=tempvl1[k]['p']
vl2[key2]['str']=tempvl1[k]['str']+vl2[key2]['str']
def listen():
global p
p.clear()
l=input()
p=l.split()
head=veterbi_level(p.pop(0))
for key in head:
if key!='all':
head[key]['p']=P_x2(key,head['all'])
# print(head)
for temp in p:
next=veterbi_level(temp)
link(head,next)
head=next
# print(head)
return head
# for temp in p:
# veterbi_level(temp)
# print(p)
if __name__ == '__main__':
load_pyhz()
while 1:
print('>>>')
resultdic=copy.deepcopy(listen())
del resultdic['all']
k = min(resultdic, key=lambda x: resultdic[x]['p'])
print(resultdic[k]['str'])
# print(pyhz) |
988,269 | c5bf699240f81845811a50ee3ea64faa0f667b56 | from string import ascii_lowercase
def read_file(file):
dependencies = []
with open(file,'r') as read_lines:
for line in read_lines:
dependency_step = line[5]
_, current_step = line.split('step ')
current_step = current_step[0]
dependencies.append([dependency_step, current_step])
return dependencies
class Node:
def __init__(self, name, prev_dep, next_dep):
self.name = name
self.prev_dep = prev_dep
self.next_dep = next_dep
def generate_nodes(connections):
available_nodes = []
for connection in connections:
append_left, append_right = True, True
for node in available_nodes:
if node.name == connection[0]:
node.next_dep.append(connection[1])
append_left = False
elif node.name == connection[1]:
node.prev_dep.append(connection[0])
append_right = False
if append_left:
available_nodes.append(Node(connection[0], [], [connection[1]]))
if append_right:
available_nodes.append(Node(connection[1], [connection[0]], []))
return available_nodes
def get_build_order(available_nodes, completed_nodes):
build_order = ''
while len(completed_nodes) < len(available_nodes):
executable_nodes = get_executable_nodes(available_nodes, completed_nodes)
build_order += executable_nodes[0]
completed_nodes.append(executable_nodes[0])
return build_order
def any_previous_dependencies(current_node, completed_nodes):
# returns False if any previous dependency is not completed
return all(dependency in completed_nodes for dependency in current_node.prev_dep)
# Part 2
def get_execution_time(letter):
# Create dictionary with letter and number
LETTERS = {letter : value for value, letter in enumerate(ascii_lowercase, start=1)}
return LETTERS[letter.lower()] + 60
class Worker:
task = ''
start_time = 0
work_length = 0
def is_idle(self):
return self.task == ''
def new_task(self, task, start_time, work_length):
self.task = task
self.start_time = start_time
self.work_length = work_length
def finish_task(self, time):
if time == self.start_time+self.work_length:
finished_task = self.task
self.task = ''
return finished_task
else:
return ''
def get_executable_nodes(available_nodes, completed_nodes):
executable_nodes = []
for node in available_nodes:
if node.name not in completed_nodes:
if len(node.prev_dep) == 0 or any_previous_dependencies(node, completed_nodes):
executable_nodes.append(node.name)
executable_nodes.sort()
return executable_nodes
def get_completion_time(available_nodes, no_workers):
# Initiate
time = 0
completed_nodes = []
completion_timers = []
in_progress = []
workers = [Worker() for i in range(no_workers)]
while len(available_nodes) > len(completed_nodes):
# Check what is done at current time and set workers to idle
finished_timers = [index for index, value in enumerate(completion_timers) if value == time]
if finished_timers:
finished_task = ''.join(worker.finish_task(time) for worker in workers)
for task in finished_task:
completed_nodes.append(task)
in_progress.remove(task)
executable_nodes = get_executable_nodes(available_nodes, completed_nodes)
executable_nodes = [node for node in executable_nodes if node not in in_progress]
for node_name in executable_nodes:
for worker in workers:
if worker.is_idle():
execution_time = get_execution_time(node_name)
worker.new_task(node_name, time, execution_time)
completion_timers.append(time+execution_time)
in_progress.append(node_name)
# Next node
break
time += 1
return time - 1
if __name__ == '__main__':
connections = read_file('input')
available_nodes = generate_nodes(connections)
build_order = get_build_order(available_nodes, completed_nodes=[])
print(build_order)
# Part 2
completion_time = get_completion_time(available_nodes, no_workers=5)
print(completion_time)
|
988,270 | 1eb0db6ed9abc5ac1a1f7e14eda392c6ad9fc974 | # from sklearn.feature_extraction.text import TfidfVectorizer
# tfidf_vectorizer = TfidfVectorizer(max_df=0.8, max_features=200000,
# min_df=0.2, stop_words='english',
# use_idf=True, tokenizer=tokenize_and_stem, ngram_range=(1,3))
# tfidf_matrix = tfidf_vectorizer.fit_transform(synopses)
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import KMeans
from sklearn.metrics import adjusted_rand_score
import csv
import time
import re
start_time = time.time()
def load_file(file_path):
comments = []
with open(file_path, 'r') as file_reader:
reader = csv.reader(file_reader, delimiter=',', quotechar='"')
reader.next()
for row in reader:
text = re.sub('([A-Za-z]+:\/\/[A-Za-z0-9]+\.[A-Za-z0-9]+[^\s-]*)|([A-Za-z]+\.[A-Za-z0-9]+\.[A-Za-z0-9]+[^\s-]*)', '', row[0]) #url get rid
text = re.sub('\s\s+', ' ', text)
comments.append(text)
return comments
documents = load_file("2016askhistorians.csv")
true_k = 6
vectorizer = TfidfVectorizer(stop_words='english', max_features=200000, use_idf=True, ngram_range=(1,3))
X = vectorizer.fit_transform(documents)
model = KMeans(n_clusters=true_k, init='k-means++', max_iter=1000, n_init=1)
model.fit(X)
print("Top terms per cluster:")
order_centroids = model.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(true_k):
print "Cluster %d:" % i,
for ind in order_centroids[i, :15]:
print ' %s' % terms[ind].encode('utf-8'),
print
elapsed_time = time.time() - start_time
print "elapsed time in seconds: " + str(elapsed_time)
|
988,271 | aca01265859016b0830ea8c5a27f0eea2ce5d399 | import oauth2 as oauth
from tweepy import OAuthHandler
import pickle
import os
import requests
from config import *
class Utils:
# CONSUMER_KEY = "2WT1TSU4IlVNUgX9hUB2hkEwp"
# CONSUMER_SECRET = "Bfh9WFZA4jUlGZj3DqgzhD8ecJ7zL78PDUYKQcM45WQofPoGUM"
# OAUTH_TOKEN = "767206872-duSzf95K69mSe0QvKXZRJtx0M9clovjeh23vrJPp"
# OAUTH_TOKEN_SECRET = "oGd3eGFaDXIgSJmpGUShahTDpDVXGEeklcn8utocmstUi"
@staticmethod
def saveimage(image_url,filename):
image = requests.get(image_url)
file = open(filename,'wb')
file.write(image.content)
file.close()
@staticmethod
def getOauthClient():
consumer = oauth.Consumer(key=CONSUMER_KEY, secret=CONSUMER_SECRET)
access_token = oauth.Token(key=OAUTH_TOKEN, secret=OAUTH_TOKEN_SECRET)
client = oauth.Client(consumer, access_token)
return client
@staticmethod
def getStreamOauth():
auth = OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
return auth
@staticmethod
def persist(filename,object):
file_handle = open(filename,'w')
pickle.dump(object,file_handle)
@staticmethod
def isFileEmpty(filename):
return os.stat(filename).st_size == 0
@staticmethod
def getObject(filename):
if Utils.isFileEmpty(filename):
return dict()
try:
return pickle.load(open(filename,'r'))
except Exception, e:
raise
|
988,272 | 0350a323064d76d630d4f00b886c50c23800fc42 | class Solution(object):
def listCombinations(self, list1, list2):
rLst = []
for i in list1:
for j in list2:
rLst.append(i+j)
return rLst
def letterCombinations(self, digits):
"""
:type digits: str
:rtype: List[str]
"""
rLst = []
bts = { '2':['a', 'b', 'c'], '3':['d', 'e', 'f'],
'4':['g', 'h', 'i'], '5':['j', 'k', 'l'], '6':['m', 'n', 'o'],
'7':['p', 'q', 'r', 's'], '8':['t', 'u', 'v'], '9':['w', 'x', 'y', 'z']}
for d in digits:
"""print "d:", d, " bts[d],", bts[d]"""
if rLst == []:
rLst = bts[d]
else:
rLst = self.listCombinations(rLst, bts[d])
"""print "rLst,", rLst"""
return rLst
|
988,273 | 11e6763447211882b06237338c31173cd660dd81 | def num(a,b):
for i in range(1,(b+1)):
print(i,end='')
a=int(input())
num(a)
|
988,274 | 03b6e13dfc75c8c9ce7df4d70f1a724cd2d34c0d | import re
var_dict = {}
# write your code here
def calculation(list_of_operations):
subtraction = False
result = 0
for operation in list_of_operations:
if operation.lstrip("+-").isnumeric():
if subtraction:
result -= int(operation)
else:
result += int(operation)
subtraction = False
elif operation == " ":
continue
elif re.fullmatch(r"[a-zA-Z]+", operation):
if operation in var_dict.keys():
if subtraction:
result -= int(var_dict[operation])
else:
result += int(var_dict[operation])
subtraction = False
else:
print("Unknown variable")
return
else:
for sign in operation:
if sign == "-":
subtraction = not subtraction
print(result)
def calculate_postfix(postfix: list):
calculation_stack = list()
for item in postfix:
if isinstance(item, int):
calculation_stack.append(item)
else:
if len(calculation_stack) < 2:
print("Invalid expression")
return
number1 = calculation_stack.pop()
number2 = calculation_stack.pop()
if item == "+":
calculation_stack.append(number1 + number2)
elif item == "-":
calculation_stack.append(number2 - number1)
elif item == "*":
calculation_stack.append(number1 * number2)
elif item == "/":
calculation_stack.append(number2 // number1)
return calculation_stack.pop()
def infix_to_postfix(infix_string) -> list:
infix_list = break_formula_into_list(infix_string)
operator_precedence = {
"+": 0,
"-": 0,
"*": 1,
"/": 1
}
if not infix_list:
return None
postfix = list()
operator_stack = list()
for item in infix_list:
item_type = define_symbol_type(item)
if item_type == "number":
postfix.append(int(item))
elif item_type == "word":
dict_item = var_dict.get(item)
if dict_item:
postfix.append(int(dict_item))
else:
print("Unknown variable")
return None
elif item_type == "operator":
if len(operator_stack) == 0 or operator_stack[-1] == "(":
operator_stack.append(item)
else:
old_operator = operator_stack[-1]
if operator_precedence[item] > operator_precedence[old_operator]:
operator_stack.append(item)
else:
while True:
postfix.append(operator_stack.pop())
if len(operator_stack) == 0 or \
operator_stack[-1] == "(" or \
operator_precedence[item] > operator_precedence[operator_stack[-1]]:
break
operator_stack.append(item)
elif item == "(":
operator_stack.append(item)
elif item == ")":
if len(operator_stack) == 0:
print("Invalid expression")
return None
old_operator = operator_stack.pop()
while old_operator != "(":
if len(operator_stack) == 0:
print("Invalid expression")
return None
postfix.append(old_operator)
old_operator = operator_stack.pop()
while len(operator_stack) > 0:
postfix.append(operator_stack.pop())
return postfix
def break_formula_into_list(formula: str) -> list:
if re.search("([a-zA-Z][0-9])|([0-9][a-zA-Z])", formula):
print("Invalid identifier")
return None
if re.search(r"(\*\*)|(//)", formula):
print("Invalid expression")
return None
formula_list = re.findall(r"[+\-/*]|[0-9]+|[a-zA-Z]+|[\(\)]", formula)
if formula.replace(" ", "") == "".join(formula_list):
return formula_list
else:
print("Incorrect")
return None
# formula_list = list()
# current_symbol_type = None
# symbol_memory = None
# number_open_brackets = 0
#
# for symbol in formula:
# symbol_type = define_symbol_type(symbol)
# if not current_symbol_type:
# if symbol_type in ["digit", "letter"]:
# current_symbol_type = symbol_type
# symbol_memory = str(symbol)
# continue
# else:
# if current_symbol_type and current_symbol_type != symbol_type:
# if current_symbol_type == "digit":
#
#
#
def define_symbol_type(symbol: str) -> str:
parant = ["(", ")"]
operator = ["+", "-", "*", "/"]
if symbol in parant:
return "parant"
elif symbol.isnumeric():
return "number"
elif symbol in operator:
return "operator"
elif re.fullmatch("[a-zA-Z]+", symbol):
return "word"
else:
return "unknown"
def var_determination(var_string):
string_parts = var_string.split("=")
if len(string_parts) != 2:
print("Invalid assignment")
return
match_var_name = re.fullmatch(r"[a-zA-Z]+", string_parts[0])
# print("Var valid:", bool(match_var_name))
match_assignment = re.fullmatch(r"([a-zA-Z]+)|([\+\-]?\d+)", string_parts[1])
# print("Assignment valid:", bool(match_assignment))
if not match_var_name:
print("Invalid identifier")
return
if not match_assignment:
print("Invalid assignment")
return
if string_parts[1].lstrip("+-").isnumeric():
var_dict[string_parts[0]] = string_parts[1]
else:
if string_parts[1] in var_dict.keys():
var_dict[string_parts[0]] = var_dict[string_parts[1]]
else:
print("Unknown variable")
while True:
user_input = input()
input_list = list(user_input.split(" "))
user_input_cleaned = user_input.replace(" ", "")
if user_input.count("=") >= 1:
var_determination(user_input_cleaned)
elif len(input_list) >= 2:
postfix_list = infix_to_postfix(user_input)
if postfix_list:
result = calculate_postfix(postfix_list)
if result or result == 0:
print(result)
elif len(input_list) == 1:
if input_list[0] == "/exit":
break
elif input_list[0] == "/help":
print("The program calculates formulas includin +-*/()")
elif input_list[0].lstrip("+-").isnumeric():
print(int(input_list[0]))
elif input_list[0].startswith("/"):
print("Unknown command")
elif input_list[0] in var_dict.keys():
print(var_dict[input_list[0]])
elif re.fullmatch(r"[a-zA-Z]+", input_list[0]):
print("Unknown variable")
elif input_list[0] != "":
print("Invalid identifier")
print("Bye!")
|
988,275 | f5fa75117f7c2a8d0ec23b630c4170c570d5078b | #!/usr/bin/env python
# coding: utf-8
'''
File Name: direct_arID_trSc.py
Edit Time: 20180422 1926
Content:
match the trackID in di_arID_trID to score from user_track
func:
match_ID_score(file_ID, file_score, file_out)
change the ID in file_in1 to score in file_in2
num1
Version:
1.0
'''
import pdb # debug module
from numpy import size
from time import gmtime, strftime
def match_ID_score(file_ID, file_score, file_out):
f_ID = open(file_ID, 'r')
f_score = open(file_score, 'r')
score_line = f_score.readline()
f_out = open(file_out, 'w')
ID_line = f_ID.readline()
arr_ID = ID_line.strip().split('|')
# arr_ID[0] userID
# arr_ID[1] albumID
# arr_ID[2:] trackID
lastUserID = arr_ID[0]
ii = 1
buff_score = [[]]
# buff_score[y][0] userID
# buff_score[y][1] item_ID
# buff_score[y][2:] item_score
buff_ID = []
buff_ID.append(ID_line.strip().split('|'))
# buff_ID[ii][0] userID
# buff_ID[ii][1] albumID
# buff_ID[ii][2:] trackID
for ID_line in f_ID: # find userID, albumID, trackID
arr_ID = ID_line.strip().split('|')
userID = arr_ID[0]
if userID != lastUserID: #change user
num = ii
buff_score = [list([]) for i in xrange(num)]
# pdb.set_trace()
for y in xrange(0, num):
buff_score[y][:] = ['None']*size(buff_ID[y])
buff_score[y][0] = buff_ID[y][0]
buff_score[y][1] = buff_ID[y][1]
while (score_line):
arr_score = score_line.strip().split('|')
trainUserID = arr_score[0]
trainItemID = arr_score[1]
trainRating = arr_score[2]
if int(trainUserID) > int(lastUserID):
for nn in xrange(0, num):
outStr = str(lastUserID)
for mm in buff_score[nn][1:]:
outStr = outStr+'|'+str(mm)
f_out.write(outStr + '\n')
break
score_line = f_score.readline()
if int(trainUserID) < int(lastUserID):
continue
if trainUserID == lastUserID: # find same user
# pdb.set_trace()
for nn in xrange(0, num):
y = size(buff_ID[nn][:])
for mm in xrange(2, y):
if trainItemID == buff_ID[nn][mm]:
buff_score[nn][mm] = trainRating
ii = 0
buff_ID = []
buff_ID.append(ID_line.strip().split('|'))
ii = ii + 1
lastUserID = userID
num = ii
buff_score = [list([]) for i in xrange(num)]
# pdb.set_trace()
for y in xrange(0, num):
buff_score[y][:] = ['None']*size(buff_ID[y])
buff_score[y][0] = buff_ID[y][0]
buff_score[y][1] = buff_ID[y][1]
while (score_line):
arr_score = score_line.strip().split('|')
trainUserID = arr_score[0]
trainItemID = arr_score[1]
trainRating = arr_score[2]
if int(trainUserID) > int(lastUserID):
for nn in xrange(0, num):
outStr = str(lastUserID)
for mm in buff_score[nn][1:]:
outStr = outStr+'|'+str(mm)
f_out.write(outStr + '\n')
break
score_line = f_score.readline()
if int(trainUserID) < int(lastUserID):
continue
if trainUserID == lastUserID: # find same user
for nn in xrange(0, num):
y = size(buff_ID[nn][:])
for mm in xrange(2, y):
if trainItemID == buff_ID[nn][mm]:
buff_score[nn][mm] = trainRating
f_ID.close()
f_score.close()
def main():
dataDir = '/home/z/Documents/python/EE627_project/data/data_in_matrixForm/'
file_ID = dataDir + 'di_arID_trID.txt'
file_score = dataDir + 'user_track.txt'
t = strftime('%Y%m%d%H%M', gmtime())
title = 'di_arID_trSc'+t+'.txt'
file_out = dataDir + title
match_ID_score(file_ID, file_score, file_out)
if __name__ == '__main__':
main()
|
988,276 | 485e7db3ffc571aa2a4c5631e9feb8914887e155 | # coding: utf8
from __future__ import unicode_literals, print_function, division
from collections import Counter
from clldutils.path import Path
from pylexibank import util
def test_aligned():
assert util.aligned([('a', 'b'), ('xxx', 'yyy')]) == ' a b\n xxx yyy'
def test_jsondump(tmpdir):
fname = str(tmpdir.join('dump.json'))
res = util.jsondump({'a': 2}, fname)
assert 'a' in res
res = util.jsondump({'b': 3}, fname)
assert res['b'] == 3 and res['a'] == 2
def test_getEvoBibAsBibtex(mocker):
bib = '<pre>@book{key,\ntitle={The Title}\n}\n</pre>'
mocker.patch(
'pylexibank.util.get_url', mocker.Mock(return_value=mocker.Mock(text=bib)))
assert '@book' in util.getEvoBibAsBibtex('')
class MockResponse(object):
def __init__(self, p):
p = Path(p)
self.status_code = 200 if p.exists() else 404
self.path = p
def iter_content(self, *args, **kw):
if self.path.exists():
with open(self.path.as_posix(), 'rb') as fp:
yield fp.read()
class MockRequests(object):
def __init__(self, p):
self.path = p
def get(self, *args, **kw):
return MockResponse(self.path)
def test_DataDir(repos, tmppath, mocker):
dd = util.DataDir(tmppath)
dd.write('test.xml', '<a>b</a>')
assert dd.read_xml('test.xml').tag == 'r'
dd.remove('test.xml')
dd.write('test.tsv', 'a\tb\nc\td')
assert dd.read_tsv('test.tsv') == [['a', 'b'], ['c', 'd']]
t = 'äöüß'
assert t == dd.read(dd.write('test.txt', t))
log = mocker.Mock()
mocker.patch(
'pylexibank.util.requests',
MockRequests(repos / 'datasets' / 'test_dataset' / 'test.zip'))
dd.download_and_unpack('', 'test.xlsx', log=log)
assert log.info.called
dd.xls2csv('test.xlsx')
assert dd.read_csv('test.Sheet1.csv') == [['a', 'b', 'c']]
def test_split_by_year():
assert util.split_by_year(' 2012. abc') == ('', '2012', 'abc')
assert util.split_by_year(' (2012) abc') == ('', '2012', 'abc')
assert util.split_by_year('abc') == (None, None, 'abc')
def test_data_path():
assert util.data_path('abc', repos=Path('def')).as_posix().startswith('def')
assert util.data_path('abc', repos=Path('def')).as_posix().endswith('abc')
def test_get_badge():
for r in util.pb(list(range(10))):
util.get_badge((r / 10.0) + 0.5, 'name')
def test_get_reference():
assert util.get_reference('John Doe', '1998', 'The Title', None, {})
assert util.get_reference(None, None, None, None, {}) is None
assert util.get_reference(None, None, 'The Title', None, {}).source.id == 'thetitle'
def test_sorted_obj():
d1 = {'a': [1, 2, 3], 'b': dict(a=3, b=1)}
d2 = {'b': Counter('baaa'), 'a': [1, 2, 3]}
assert util.sorted_obj(d1) == util.sorted_obj(d2)
assert util.sorted_obj(d2)['b']['a'] == 3
util.sorted_obj(['http://www.w3.org/ns/csvw', {'@language': 'en'}])
|
988,277 | 43a23c59a0bcacfb56af433a75856a14127b2f6e | from Parser import get_sorted_data
import json
"""
The data is now parsed as a JSON file that can be ingested
by a higher level entity. This issue now, however, is that we
wish to decrease the amount of data we are sending to reduce traffic. One method is
to apply a moving average filter.
Modify `get_sorted_data` to take `num_samples` as an
optional parameter. If `num_samples` is specified, get_sorted_data
will apply a moving average filter that will take the average of the
previous `num_samples` number of samples per header. If `num_samples` is not
specified, then no moving average filter will be applied and the
code will run as in part1.py. As in the previous part, your output
will be a JSON file.
For example, in:
{
"A1": [
{ "osTimeStamp": "1552242292.1146932", "value": 997 },
{ "osTimeStamp": "1552242292.1148102", "value": 997 },
{ "osTimeStamp": "1552242296.999962", "value": 995 },
{ "osTimeStamp": "1552242297.000105", "value": 995 },
{ "osTimeStamp": "1552242302.0143418", "value": 990 },
{ "osTimeStamp": "1552242302.014453", "value": 990 }
],
"A2": [
{ "osTimeStamp": "1552242292.1153128", "value": 3602 },
{ "osTimeStamp": "1552242292.115364", "value": 3602 },
{ "osTimeStamp": "1552242297.0008519", "value": 3602 },
{ "osTimeStamp": "1552242297.000939", "value": 3602 },
{ "osTimeStamp": "1552242302.015125", "value": 3602 },
{ "osTimeStamp": "1552242302.015239", "value": 3602 },
{ "osTimeStamp": "1552242307.015094", "value": 3602 },
{ "osTimeStamp": "1552242307.0151591", "value": 3602 },
{ "osTimeStamp": "1552242311.983746", "value": 3603 }
]
}
If `num_samples` is given a value of '3' then a moving average filter
of length 3 will be applied to the "value" fields under A1 and A2 seperately
* Be sure to handle edge cases. For example, if `num_samples` is too high. It's up to you how you handle them.
* For this excercise, you may also modify part2.py in addition to Parser.py if it helps you
* Feel free to use any modules/frameworks which may be of use to you in
derriving an effective solution!
"""
if __name__ == '__main__':
#input_filename = input('Enter the relative path of the file to parse: ')
input_filename ="Data.txt"
#output_filename = input('Enter the relative path of the output file: ')
output_filename="output.json"
# Sort the input data
response = get_sorted_data(input_filename,num_samples=1)
# Now write a JSON file with the new data
with open(output_filename, 'w') as f:
json.dump(response, f)
|
988,278 | e71e47b473c6b3b3757c6b486cbb450ca494623c | # put your python code here
length = int(input())
width = int(input())
height = int(input())
def sum_of_edges(l, w, h):
s = 4 * (l + w + h)
return s
def surface_area(l, w, h):
S = 2 * (l * w + w * h + l * h)
return S
def volume(l, w, h):
V = l * w * h
return V
print(sum_of_edges(length, width, height))
print(surface_area(length, width, height))
print(volume(length, width, height))
|
988,279 | 56712cfbf9dbdde1d2af8ec03a495beb07d5d956 | from .base_reconstructor import BaseReconstructor
from .raw_reconstructor import RawReconstructor
from .model_reconstructor import ModelReconstructor
|
988,280 | 9805fa8e4d37a54080974bc791671eecb66cbb8c | # Author: Aniketh S Deshpande
# API-name: StudentDashboard
# Flask-Server
# Database: MongoDB
from flask import Flask, request, jsonify
from flask_restful import Resource, Api
from flask_pymongo import PyMongo
from flask_cors import CORS
from random import randint
from hashlib import sha1
from config import get_ip
from show_quiz import SudentPassKeyAuth
ip_address = get_ip()
app = Flask(__name__)
CORS(app)
app.config["MONGO_URI"] = "mongodb://localhost:27017/quiz"
api = Api(app)
mongo = PyMongo(app)
# Student Dashboard API
class StudentDashboard(Resource):
def post(self):
obj = request.get_json(force=True)
response_given = list(mongo.db.responses.aggregate([
{
'$match': {
'email_id': obj['email_id']
}
},
{
'$lookup': {
'from': 'items',
'localField': 'item_password',
'foreignField': 'item_password',
'as': 'lookup'
}
},
{
'$project': {
'lookup': {
'subject': 1,
'ia': 1,
'semester': 1,
'questions': 1,
'positive_marks': 1,
'negative_marks': 1,
'gate': 1
},
'student_response': 1,
'score': 1,
'item_password': 1
}
}
])
)
response_given_list = []
for response in response_given:
response_given_list.append({
'subject': response['lookup'][0]['subject'],
'ia': response['lookup'][0]['ia'],
'semester': response['lookup'][0]['semester'],
'questions': response['lookup'][0]['questions'],
'positive_marks': response['lookup'][0]['positive_marks'],
'negative_marks': response['lookup'][0]['negative_marks'],
'student_response': response['student_response'],
'score': response['score'],
'gate': response['lookup'][0]['gate']
})
if len(response_given_list)>0:
return {
'status': 'success',
'response_list': response_given_list
}
else:
return {
'status': 'failed',
'error': 'NO_RESPONSES'
}
# resources routing
api.add_resource(StudentDashboard, '/student_dashboard')
if __name__ == '__main__':
app.run(debug=True, host=ip_address, port=5055)
|
988,281 | 7411f4a27867bb1d2e15cb69c03c7462b0dc461e | import os
class Config(object):
SECRET_KEY=os.environ.get("SECRET_KEY") or "secret_string"
MONGODB_SETTINGS = { 'db' : 'MY_BANK' ,'host' : os.environ["DB_PORT_27017_TCP_ADDR"] , 'port': 27017}
|
988,282 | d246134bc8457297e33e3494f64a2a3f977b8a1f | # coding=utf-8
from __future__ import print_function
import sys
# if len(sys.argv) != 4:
# print('Usage:')
# print('python train.py datacfg cfgfile weightfile')
# exit()
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.backends.cudnn as cudnn
from torchvision import datasets, transforms
# from torch.autograd import Variable
import dataset
# import random
# import math
from YOLO2_M import MobileNet, YOLO2_M
from utils import *
from cfg import parse_cfg
# from region_loss import RegionLoss
from darknet import Darknet
# from models.tiny_yolo import TinyYoloNet
from colorama import Fore, Back, Style
# import os
# os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# Training settings
datacfg = "cfg/k6.data"
cfgfile = "./cfg/mobilenet_yolo_voc.cfg"
weightfile = "models/mobilenet_feature.pth"
trained_weightfile = "./backup/k6_mob-416x800/000046.weights"
data_options = read_data_cfg(datacfg)
net_options = parse_cfg(cfgfile)[0]
trainlist = data_options['train']
testlist = data_options['valid']
backupdir = data_options['backup']
nsamples = file_lines(trainlist)
gpus = data_options['gpus'] # e.g. 0,1,2,3
ngpus = len(gpus.split(','))
num_workers = int(data_options['num_workers'])
batch_size = int(net_options['batch'])
max_batches = int(net_options['max_batches'])
learning_rate = float(net_options['learning_rate'])
momentum = float(net_options['momentum'])
decay = float(net_options['decay'])
steps = [float(step) for step in net_options['steps'].split(',')]
scales = [float(scale) for scale in net_options['scales'].split(',')]
show_inter = int(net_options['show_inter'])
# Train parameters
max_epochs = 1000 # 手动设置
use_cuda = True
seed = int(time.time())
eps = 1e-5
save_interval = 2 # epoches
# Test parameters
conf_thresh = 0.25
nms_thresh = 0.4
iou_thresh = 0.5
resume_train = False # 是否从之前的模型恢复训练(保存的weight文件里面会记录之前的参数和epoch数等信息)
use_visdom = True
visdom_env = "YOLO2-Mobilenet-k6"
cudnn.benchmark = True
###############
import visdom
if use_visdom:
viz = visdom.Visdom(env=visdom_env)
print("Total train samples = ", nsamples)
torch.manual_seed(seed)
if use_cuda:
os.environ['CUDA_VISIBLE_DEVICES'] = gpus
print(Fore.YELLOW + "Visible GPUs: " + gpus), print(Style.RESET_ALL)
torch.cuda.manual_seed(seed)
base = MobileNet()
model = YOLO2_M(cfgfile, base)
region_loss = model.loss
if resume_train:
print("RRResume training from previous model.")
model = torch.load(trained_weightfile)
else:
print('Train from scrach, loading base network...')
pretrained_weights = torch.load(weightfile)
model.base.load_state_dict(pretrained_weights)
model.print_network()
region_loss.seen = model.seen
processed_batches = model.seen / batch_size
init_width = model.width
init_height = model.height
init_epoch = model.seen / nsamples
kwargs = {'num_workers': num_workers, 'pin_memory': True} if use_cuda else {}
test_loader = torch.utils.data.DataLoader(
dataset.listDataset(testlist, shape=(init_width, init_height),
shuffle=False,
transform=transforms.Compose([
transforms.ToTensor(),
]), train=False),
batch_size=batch_size, shuffle=False, **kwargs)
if use_cuda:
if ngpus > 1:
print(Fore.YELLOW + "Using MULTI GPUs: " + str(ngpus)), print(Style.RESET_ALL)
model = torch.nn.DataParallel(model).cuda()
print(Fore.YELLOW + "Deploy model on GPUs. done."), print(Style.RESET_ALL)
else:
print(Fore.YELLOW + "Using SINGLE GPUs: " + str(ngpus)), print(Style.RESET_ALL)
model = model.cuda()
params_dict = dict(model.named_parameters())
params = []
for key, value in params_dict.items():
if key.find('.bn') >= 0 or key.find('.bias') >= 0:
params += [{'params': [value], 'weight_decay': 0.0}]
else:
params += [{'params': [value], 'weight_decay': decay * batch_size}]
optimizer = optim.SGD(model.parameters(), lr=learning_rate / batch_size, momentum=momentum, dampening=0,
weight_decay=decay * batch_size)
def adjust_learning_rate(optimizer, batch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = learning_rate
for i in range(len(steps)):
scale = scales[i] if i < len(scales) else 1
if batch >= steps[i]:
lr = lr * scale
if batch == steps[i]:
break
else:
break
for param_group in optimizer.param_groups:
param_group['lr'] = lr / batch_size
return lr
if use_visdom:
# initialize visdom loss plot
epoch_lot = viz.line(
X=torch.zeros((1,)).cpu(),
Y=torch.zeros((1, 3)).cpu(),
opts=dict(
xlabel='epoch',
ylabel='prf',
title='Test result',
legend=["precision", "recall", "fscore"]
)
)
lot = viz.line(
X=torch.zeros((1,)).cpu(),
Y=torch.zeros((1, 3)).cpu(),
opts=dict(
xlabel='iter',
ylabel='loss',
title='Losses',
legend=["loss_conf", "loss_cls", "loss"]
)
)
lr_lot = viz.line(
X=torch.zeros((1,)).cpu(),
Y=torch.zeros((1,)).cpu(),
opts=dict(
xlabel='batch',
ylabel='lr',
title='Iteration YOLO2-OCR learning rate',
legend=['lr']
)
)
def train(epoch):
global processed_batches, viz, lr_lot
t0 = time.time()
if ngpus > 1:
cur_model = model.module
else:
cur_model = model
train_loader = torch.utils.data.DataLoader(
dataset.listDataset(trainlist, shape=(init_width, init_height),
shuffle=True,
transform=transforms.Compose([
transforms.ToTensor(),
]),
train=True,
seen=cur_model.seen,
batch_size=batch_size,
num_workers=num_workers),
batch_size=batch_size, shuffle=False, **kwargs)
lr = adjust_learning_rate(optimizer, processed_batches)
# print(Fore.MAGENTA)
# print('epoch %d, processed %d samples, lr %f' % (epoch, epoch * len(train_loader.dataset), lr))
logging('epoch %d, processed %d samples, lr %f' % (epoch, epoch * len(train_loader.dataset), lr))
# print(Style.RESET_ALL)
batch_steps = len(train_loader.dataset) / batch_size
model.train()
t1 = time.time()
avg_time = torch.zeros(9)
for batch_idx, (_, data, target) in enumerate(train_loader):
if batch_idx % show_inter == 0:
# print(Fore.YELLOW)
print("epoch: %d/%d, batch: %d/%d" % (epoch, max_epochs, batch_idx, batch_steps))
# print(Style.RESET_ALL)
t2 = time.time()
adjust_learning_rate(optimizer, processed_batches)
processed_batches = processed_batches + 1
# if (batch_idx+1) % dot_interval == 0:
# sys.stdout.write('.')
if use_cuda:
data = data.cuda()
# target= target.cuda()
t3 = time.time()
data, target = Variable(data), Variable(target)
t4 = time.time()
optimizer.zero_grad()
t5 = time.time()
output = model(data)
t6 = time.time()
region_loss.seen = region_loss.seen + data.data.size(0)
loss = region_loss(output, target, batch_idx, show_inter)
t7 = time.time()
loss.backward()
t8 = time.time()
optimizer.step()
t9 = time.time()
if False and batch_idx > 1:
avg_time[0] = avg_time[0] + (t2 - t1)
avg_time[1] = avg_time[1] + (t3 - t2)
avg_time[2] = avg_time[2] + (t4 - t3)
avg_time[3] = avg_time[3] + (t5 - t4)
avg_time[4] = avg_time[4] + (t6 - t5)
avg_time[5] = avg_time[5] + (t7 - t6)
avg_time[6] = avg_time[6] + (t8 - t7)
avg_time[7] = avg_time[7] + (t9 - t8)
avg_time[8] = avg_time[8] + (t9 - t1)
print('-------------------------------')
print(' load data : %f' % (avg_time[0] / (batch_idx)))
print(' cpu to cuda : %f' % (avg_time[1] / (batch_idx)))
print('cuda to variable : %f' % (avg_time[2] / (batch_idx)))
print(' zero_grad : %f' % (avg_time[3] / (batch_idx)))
print(' forward feature : %f' % (avg_time[4] / (batch_idx)))
print(' forward loss : %f' % (avg_time[5] / (batch_idx)))
print(' backward : %f' % (avg_time[6] / (batch_idx)))
print(' step : %f' % (avg_time[7] / (batch_idx)))
print(' total : %f' % (avg_time[8] / (batch_idx)))
t1 = time.time()
if use_visdom:
viz.line(
X=torch.ones((1,)).cpu() * epoch,
Y=torch.FloatTensor([lr]).cpu(),
win=lr_lot,
update='append'
)
viz.line(
X=torch.ones((1, 3)).cpu() * processed_batches,
Y=torch.FloatTensor([region_loss.loss_conf, region_loss.loss_cls, region_loss.loss]).unsqueeze(0).cpu(),
win=lot,
update='append'
)
print('')
t1 = time.time()
logging('training with %f samples/s' % (len(train_loader.dataset) / (t1 - t0)))
if (epoch + 1) % save_interval == 0:
if not os.path.exists(backupdir):
os.makedirs(backupdir)
logging('save weights to %s/%06d.weights' % (backupdir, epoch + 1))
cur_model.seen = (epoch + 1) * len(train_loader.dataset)
torch.save(cur_model.state_dict(), '%s/%06d.weights' % (backupdir, epoch + 1))
# cur_model.save_weights('%s/%06d.weights' % (backupdir, epoch + 1))
def test(epoch, use_visdom=True):
global viz, epoch_lot
logging("test epoch: %d" % epoch)
def truths_length(truths):
for i in range(50):
if truths[i][1] == 0:
return i
model.eval()
if ngpus > 1:
cur_model = model.module
else:
cur_model = model
num_classes = cur_model.num_classes
anchors = cur_model.anchors
num_anchors = cur_model.num_anchors
total = 0.0
proposals = 0.0
correct = 0.0
for batch_idx, (_, data, target) in enumerate(test_loader):
if use_cuda:
data = data.cuda()
data = Variable(data, volatile=True)
output = model(data).data
all_boxes = get_region_boxes(output, conf_thresh, num_classes, anchors, num_anchors)
for i in range(output.size(0)):
boxes = all_boxes[i]
boxes = nms(boxes, nms_thresh)
truths = target[i].view(-1, 5)
num_gts = truths_length(truths)
total = total + num_gts
for i in range(len(boxes)):
if boxes[i][4] > conf_thresh:
proposals = proposals + 1
for i in range(num_gts):
box_gt = [truths[i][1], truths[i][2], truths[i][3], truths[i][4], 1.0, 1.0, truths[i][0]]
best_iou = 0
for j in range(len(boxes)):
iou = bbox_iou(box_gt, boxes[j], x1y1x2y2=False)
best_iou = max(iou, best_iou)
if best_iou > iou_thresh and boxes[j][6] == box_gt[6]: # 标签正确并且预测的最大iou大于设置的阈值
correct = correct + 1
# this_detected = True
break
precision = 1.0 * correct / (proposals + eps)
recall = 1.0 * correct / (total + eps)
fscore = 2.0 * precision * recall / (precision + recall + eps)
logging("precision: %f, recall: %f, fscore: %f" % (precision, recall, fscore))
if use_visdom:
logging("vis test.")
viz.line(
X=torch.ones((1, 3)).cpu()*epoch,
Y=torch.FloatTensor([precision, recall, fscore]).unsqueeze(0).cpu(),
win=epoch_lot,
update='append'
)
evaluate = False
if evaluate:
logging('evaluating ...')
test(0)
else:
print(init_epoch, max_epochs)
for epoch in range(init_epoch, max_epochs):
train(epoch)
test(epoch)
|
988,283 | 63c36d352f0ec6d1449f6a092926616aa3ae2b5f | from __future__ import annotations
from iotbx.detectors.hamamatsu import HamamatsuImage
from dxtbx.format.FormatSMVADSC import FormatSMVADSC
class FormatSMVHamamatsu(FormatSMVADSC):
@staticmethod
def understand(image_file):
size, header = FormatSMVHamamatsu.get_smv_header(image_file)
wanted_header_items = ["DETECTOR_NAME"]
if any(item not in header for item in wanted_header_items):
return False
return "hamamatsu" in header["DETECTOR_NAME"].lower()
def detectorbase_start(self):
self.detectorbase = HamamatsuImage(self._image_file)
self.detectorbase.open_file = self.open_file
self.detectorbase.readHeader()
|
988,284 | d1619e8d8c34b4aee40dbea7c0b5fdfc01c209d3 | """Unit testing."""
import unittest
from unittest.mock import patch
import dice_hand
from dice_hand import Dicehand
from dice import Dice
class TestingDiceHand(unittest.TestCase):
"""Tests dice hand class."""
def test_init(self):
"""Tests initializer and its properties."""
hand = Dicehand()
self.assertIsInstance(hand, Dicehand)
self.assertEqual(hand.current_score, 0)
def setUp(self):
"""Set up for variable."""
self.the_dice_hand = dice_hand.Dicehand()
def test_throw_with_two_dices_having_more_than_value_of_one(self):
"""Testing purpose."""
self.the_dice_hand.current_score = 0
dice_values = [6, 6]
with patch.object(Dice, "throw", create=True,
side_effect=dice_values):
self.the_dice_hand.throw(Dice, Dice)
self.assertEqual(12, self.the_dice_hand.get_round_score())
def test_throw_with_one_dice_having_value_of_one(self):
"""Testing purpose."""
self.the_dice_hand.current_score = 20
dice_values = [6, 1]
with patch.object(Dice, "throw", create=True,
side_effect=dice_values):
self.the_dice_hand.throw(Dice, Dice)
self.assertEqual(0, self.the_dice_hand.get_round_score())
def test_get_score(self):
"""Tests get score method which returns score."""
hand = dice_hand.Dicehand()
res = hand.get_round_score()
exp = res == hand.current_score
self.assertTrue(exp)
|
988,285 | 595579eb45c4db2af52c180c83b3133dac7227f0 | import pdb
import numpy as np
np.random.seed(211)
import pandas as pd
from toposort import toposort_flatten
from collections import defaultdict
import multiprocessing
from pgmpy.readwrite import BIFReader
from pgmpy.factors.discrete import TabularCPD
from pgmpy.sampling import BayesianModelSampling
class BnNetwork():
'''
This class will have contol all the functionality related to base graph,
intervention graph, generating samples or mixture from interventions.
'''
#Initializing the global variabels
base_graph=None #The actual Bayesian Network without interventions
nodes=None #The name of nodes in graph
card_node=None #The cardiantlity of the nodes
edges=None #The edge list in base graph
adj_set=None #Adjacency list of base graph
topo_i2n=None #The topological ordering (index to node dict)
topo_n2i=None #node to index in topological ordering
states_c2i=None #dict of dict
states_i2c=None #dict of each nodes's category from idx to name
#Variables for one-hot encoding of samples
vector_length=None #Length of one-hot vector
one2loc=None #Mapping from one-hot index to actual node and cat
loc2one=None #Mapping from node,cat to location in one hot
data_schema=None #Empty dataframe to hold the reconverted people
#For intervention
orphan_nodes=set() #Set of nodes which no longer will have parent
def __init__(self,modelpath):
#Reading the model from the file
reader=BIFReader(modelpath)
#Initializing the base graph
print("Initializing the base_graph")
self._initialize_base_graph(reader)
#Initializing the one hot variables
self._get_one_hot_mapping()
def _initialize_base_graph(self,reader):
'''
This function will create the base distribution, have a hash of nodes
name and a numbering.
'''
#Getting the base distributions
base_graph=reader.get_model()
base_graph.check_model()
#Getting the names of nodes and its edges
nodes=reader.get_variables()
edges=reader.get_edges()
#Getting the variables names /state for each nodes
self.states=reader.get_states()
self.states_c2i={key:{val:np.int32(idx) for idx,val
in enumerate(kval)}
for key,kval in self.states.items()}
self.states_i2c={key:{np.int32(idx):val for idx,val
in enumerate(kval)}
for key,kval in self.states.items()}
#Getting the topological order and adjacency list
adj_set=defaultdict(set)
inv_adj_set=defaultdict(set)
in_degree={node:0 for node in nodes}
for fr,to in edges:
adj_set[fr].add(to)
inv_adj_set[to].add(fr)
in_degree[to]+=1
#BEWARE: this function take inverse adj_list
topo_nodes=toposort_flatten(inv_adj_set)
topo_i2n={i:node for i,node in enumerate(topo_nodes)}
topo_n2i={node:i for i,node in enumerate(topo_nodes)}
#Now we will calcuate the topological level ordering
topo_level={}
curr_level=0
while(len(in_degree)>0):
# pdb.set_trace()
#Getting the nodes with zero indegree
zero_nodes=[node for node,deg_left in in_degree.items()
if deg_left==0]
topo_level[curr_level]=set(zero_nodes)
curr_level+=1
#Now we will reduce the indegree of connection form these nodes
for node in zero_nodes:
#Removing these nodes from in_degree list
del in_degree[node]
#Now reducing the degrre of it's to conenction
for to_node in adj_set[node]:
in_degree[to_node]-=1
#Now we are done with the topological levels
self.topo_level=topo_level
#Adding the property to class
self.base_graph=base_graph
self.nodes=nodes
self.card_node=base_graph.get_cardinality()
self.edges=edges
self.topo_i2n=topo_i2n
self.topo_n2i=topo_n2i
self.adj_set=adj_set
#Generating the intervention Graph
def do(self,node_ids,node_cats):
'''
Perform size(node_ids)-order internveiton on the graph.
node_ids : list of node indexes where to perform internvetions
node_cats : the category which we have to intervene at those nodes.
'''
#Copying the model first of all (new intervened dist will be this)
do_graph=self.base_graph.copy()
#Now one by one we will perform all the necessary intervnetions
orphan_nodes=set()
for node_id,node_cat in zip(node_ids,node_cats):
self._single_do(do_graph,node_id,node_cat,orphan_nodes)
return do_graph
def _single_do(self,do_graph,node_idx,node_cat,orphan_nodes):
'''
This function will generate the intervention graph at the given
node number and category according to topological order. This is limited
to perfoming a single do to the graph.
node_cat is zero indexed
'''
# print("Creating intervention Graph")
#Getting the name of node
node=self.topo_i2n[node_idx]
assert node_cat<self.card_node[node],"category index out of bound!!"
#Now saving the cpds of the children of current node
child_old_cpds=[do_graph.get_cpds(child).copy() for child in self.adj_set[node]]
# pdb.set_trace()
#Now we will perform the do operation
do_graph.remove_node(node)
#We have not removed the edge from parents to current node in adj_set
#If we ensure not to edge later from any parent to it later then fine
orphan_nodes.add(node)
#But this has removed all node and children connection. Readd
do_graph.add_node(node)
for child in self.adj_set[node]:
if child not in orphan_nodes:
do_graph.add_edge(node,child)
#Now we will add the cpds of childrens
child_cur_cpds=[do_graph.get_cpds(child) for child in self.adj_set[node]]
for cur_cpds,old_cpds in zip(child_cur_cpds,child_old_cpds):
do_graph.remove_cpds(cur_cpds)
do_graph.add_cpds(old_cpds)
#Now we have to change the cpd of current node
#Set the probability of intervented category to 1
node_cpd=TabularCPD(node,
self.card_node[node],
np.zeros((self.card_node[node],1)))
node_cpd.values[node_cat]=1.0
#Add this cpd to graph
do_graph.add_cpds(node_cpd)
# pdb.set_trace()
#Finally testing the model
do_graph.check_model()
#Sampling functions
def generate_sample_from_mixture(self,do_config,sample_size,savepath=None):
'''
Generating the sample for the mixture distribution given by do_config.
do_config : list of [ [node_ids,node_cats,pi], ... ]
node_ids could represent multiple interventions
'''
all_samples=[]
#Now we will sample from the base distribution
_,_,pis=zip(*do_config)
phi=1-sum(pis)
assert phi>=0,"Illegal mixture Distribtuion"
if int(sample_size*phi)>0:
sampler=BayesianModelSampling(self.base_graph)
samples=sampler.forward_sample(size=int(sample_size*phi),
return_type="dataframe")
all_samples.append(samples)
# pdb.set_trace()
#One by one we will generate the mixture graph and corresponding sample
for node_ids,node_cats,pi in do_config:
#Getting the intervention distributions
do_graph=self.do(node_ids,node_cats)
#Now we are ready to sample from our distribution
sampler=BayesianModelSampling(do_graph)
#Only sample the amount required
num_sample=int(sample_size*pi)
if num_sample>0:
samples=sampler.forward_sample(size=num_sample,
return_type="dataframe")
all_samples.append(samples)
# pdb.set_trace()
#Now we will merge all the samples in one and shuffle it
all_samples=pd.concat(all_samples)
all_samples=all_samples.sample(frac=1.0).reset_index(drop=True)
#We will save the schema for the later reconversions
self.data_schema=pd.DataFrame(columns=all_samples.columns)
#Saving the dataframe (for reproducabilty)
if savepath!=None:
filepath="{}mixture_{}_{}.csv".format(savepath,num_sample,
str(do_config))
all_samples.to_csv(filepath,index=False)
return all_samples
def encode_sample_one_hot(self,samples_df):
'''
This function will take tha sample dataframe and encode them in one
hot way, merging all nodes into a single vector.
'''
#Now we will create the input one by one for each sample
samples_one_hot=[]
for sidx in range(samples_df.shape[0]):
#Getting the sample and empyty vector
sample=samples_df.iloc[sidx]
vector=np.zeros((1,self.vector_length),dtype=np.float32)
for nidx in range(len(self.topo_i2n)):
node=self.topo_i2n[nidx]
cat=sample[node]
#Assigning the location to be hot
vec_pos=self.loc2one[(node,cat)]
vector[0,vec_pos]=1
#Adding the one hot vector to list
samples_one_hot.append(vector)
#Now we will convert them to one array
samples_one_hot=np.concatenate(samples_one_hot,axis=0)
# pdb.set_trace()
return samples_one_hot
def _get_one_hot_mapping(self):
'''
Generate the location map from category number to one hot and
vice-versa.
'''
vector_length=0
one2loc={}
loc2one={}
for nidx in range(len(self.topo_i2n)):
node=self.topo_i2n[nidx]
card=self.card_node[node]
#Now we will hash the nodes
for cidx in range(card):
loc2one[(node,cidx)]=vector_length
one2loc[vector_length]=(node,cidx)
vector_length+=1
self.vector_length=vector_length
self.one2loc=one2loc
self.loc2one=loc2one
#Probability of a sample (for loss function)
def get_sample_probability(self,interv_locs,input_samples):
'''
Given a set of sample, this function will calculate the overall sample,
probability and then reaturn it back to tensorflow for likliehood
calculation nad backprop.
We return the sample probability for each of the intervention component
not bother about summing up the overall probability which will be done
inside the decoder or full model.
Output shape: [num_sample,num_intervention_loc(=sparsity)]
'''
#First of all generating all the required intervention graph
interv_graphs=[]
for loc in interv_locs:
node_ids,cat_ids=loc
#TODO: Right now we will deal with single inerventions
#assert len(node_ids)==1,"Multiple simultaneous intervention"
if node_ids[0]==len(self.topo_i2n) and len(node_ids)==1:
interv_graphs.append(self.base_graph)
else:
interv_graphs.append(self.do(node_ids,cat_ids))
#Now we are ready to get sample prob for each interventions
input_samples=input_samples.numpy()
#reconvert the input samples from one hot to actual
# input_samples=self.decode_sample_one_hot(input_samples)
#For each sample, generate the graph and calculate the probability
def worker_kernel(child_conn,child_samples,
interv_graphs,network_parameters):
#First of all decode the sample from one-hot representation
child_samples=decode_sample_one_hot(child_samples,
network_parameters)
#Calculating the probability
all_sample_prob=[]
for idx in range(child_samples.shape[0]):
sample=child_samples.iloc[idx]
sample_prob=[]
for graph in interv_graphs:
prob=get_graph_sample_probability(graph,
sample,network_parameters)
sample_prob.append(prob)
all_sample_prob.append(sample_prob)
#Now converting this to a numpy array
all_sample_prob=np.array(all_sample_prob)
#Sending all the samples back to the parent
child_conn.send(all_sample_prob)
child_conn.close()
#Now we will create multiple workers to parallelize
njobs=multiprocessing.cpu_count()-2
num_per_job=int(np.ceil((input_samples.shape[0]*1.0)/njobs))
process_pipe_list=[]
process_list=[]
for jidx in range(njobs):
#Slicing our big input for our job
print("Worker:{} manipulating from:{} to:{}".format(jidx,
jidx*num_per_job,
(jidx+1)*num_per_job))
child_samples=input_samples[jidx*num_per_job:(jidx+1)*num_per_job]
#Now we will first create a pipe to receive results
parent_conn,child_conn=multiprocessing.Pipe()
process_pipe_list.append(parent_conn)
#Starting the child process
network_parameters={}
network_parameters["topo_i2n"]=self.topo_i2n
network_parameters["card_node"]=self.card_node
network_parameters["data_schema"]=self.data_schema.copy()
network_parameters["vector_length"]=self.vector_length
network_parameters["one2loc"]=self.one2loc
p=multiprocessing.Process(target=worker_kernel,
args=(child_conn,
child_samples,
interv_graphs,
network_parameters))
p.start()
process_list.append(p)
#Now we will receive the results for all the child (join)
child_probs=[parent_conn.recv() for parent_conn in process_pipe_list]
#Stopping all the process
[p.join() for p in process_list]
#Now we will return the final concatenated result
all_sample_prob=np.concatenate(child_probs,axis=0)
print("merged prob size:",all_sample_prob.shape)
return all_sample_prob
#Helper function to be used by parallel worker to compute probability of
#individual sample
def get_graph_sample_probability(graph,sample,network_parameters,marginal=False):
'''
This function will calcuate the probability of a sample in a graph,
which will be later used to calcuate the overall mixture probability.
graph : the graph on which we have to calculate the sample probability
sample : the sample array in form of dictionary or numpy recarray
Since we cant vectorize this function, cuz every sample will generate,
a separate distribution and in that distribution we have to calculate
the probability. We will see later how to vectorize
'''
#Getting the network parametrs
topo_i2n=network_parameters["topo_i2n"]
card_node=network_parameters["card_node"]
def _get_columns_index(nodes_idx,nodes_card):
'''
This will convert the index to row major number for column of cpd to
access.
'''
assert len(nodes_idx)==len(nodes_card)
multiplier=nodes_card.copy()
multiplier[-1]=1 #we dont have to offset last index
for tidx in range(len(nodes_card)-2,-1,-1):
multiplier[tidx]=nodes_card[tidx+1]*multiplier[tidx+1]
#Now we are ready with the offset multiplier
ridx=0
for tidx in range(len(nodes_idx)):
assert nodes_idx[tidx]<nodes_card[tidx]
ridx+=nodes_idx[tidx]*multiplier[tidx]
return ridx
#Now we will start in the topological order to get prob
marginal_length=len(topo_i2n)
if marginal==True:
marginal_length=len(sample)
#Initialing the porbabilty
overall_prob=1.0
for nidx in range(marginal_length):
#Getting the information of node
node=topo_i2n[nidx]
node_cpd=graph.get_cpds(node)
#Getting the row in which to look
row_idx=node_val=sample[node]
#Now we have to get the columns number
pnodes=(node_cpd.variables.copy())
pnodes.remove(node_cpd.variable)
col_idx=None
if len(pnodes)!=0:
pnodes_card=[card_node[pn] for pn in pnodes]
pnodes_vals=[sample[pn] for pn in pnodes]
col_idx=_get_columns_index(pnodes_vals,pnodes_card)
#Just to be safe we will reorder for now (Comment later for performance)
node_cpd.reorder_parents(pnodes)
else:
col_idx=0
#Now we will calculate the probabilityof the node given its parents
prob_node_given_parents=node_cpd.get_values()[row_idx,col_idx]
#Updating the overall probability
overall_prob=overall_prob*prob_node_given_parents
return overall_prob
def decode_sample_one_hot(samples_one_hot,network_parameters):
'''
This function will reconvert the samples to a dataframe as before,
just like papaji didnt get to know what mishap has happened.
'''
#Getting the network parameters
df=network_parameters["data_schema"]
vector_length=network_parameters["vector_length"]
one2loc=network_parameters["one2loc"]
#Now we are ready to reconvert peeps
all_row_entry=[]
for sidx in range(samples_one_hot.shape[0]):
sample=samples_one_hot[sidx,:]
assert sample.shape[0]==vector_length
#Now we will decode this example
row_entry={}
for tidx in range(vector_length):
val=sample[tidx]
if val==0:
continue
else:
node,cat=one2loc[tidx]
row_entry[node]=cat
#Adding new row to the dataframe
# pdb.set_trace()
all_row_entry.append(pd.DataFrame(row_entry,index=[0]))
#Concatenating all the rows into one big dataframe
df=pd.concat(all_row_entry,ignore_index=True)
return df
##########################################################################
##### Data handling for Flipkar dataset
def load_flipkart_mixture_sample(filepath,base_network):
'''
This will be used in the inference case when the the sample is taken
in real world data. Then we have to rename the names of Category
to the number in the sample dataset
'''
print("Reading the Real World Mixture Sample")
df=pd.read_csv(filepath)
#Subset only those variable which are in our bn network
df=df[base_network.nodes]
#COnverting everything into string
def convert_to_str(element):
#Hack to remove the error made by bnlearn by removing (,) with _
element=str(element).replace("(","_").replace(")","_")
return element
df=df.applymap(np.vectorize(convert_to_str))
print("Converting the Category to Index")
# pdb.set_trace()
#Now we will define the maping function of the dataframe
def map_cat2index(columns):
# print("columns.name:",columns.name)
return columns.map(base_network.states_c2i[columns.name])
#Applying the mapping fucntion to every columns
df_map=df.apply(map_cat2index,axis=0)
assert df.shape==df_map.shape
#Now we will remove the rows which which are null
df_map=df_map[df_map.isnull().any(axis=1)==False]
#Find out why it's getting converted to float (to accomodate NA mostly)
df_map.astype(np.int32)
print("Total Rows Left:",df.shape[0],df_map.shape[0])
# pdb.set_trace()
return df_map
if __name__=="__main__":
#Testing the base model and intervention
graph_name="asia"
modelpath="dataset/{}/{}.bif".format(graph_name,graph_name)
network=BnNetwork(modelpath)
#Testing internvention
do_graph=network.do([3,4,1,5],[1,1,0,0])
# pdb.set_trace()
#Testing the sampler for mixture
sample_size=10
savepath="dataset/{}/".format(graph_name)
do_config=[
[[2,3],[1,0],0.5],
[[1,6],[0,1],0.3]
]
samples=network.generate_sample_from_mixture(do_config,sample_size,
savepath)
# pdb.set_trace()
#Testing the probability calculation function
prob=network.get_graph_sample_probability(network.base_graph,
samples.iloc[0])
# pdb.set_trace()
#Testing the encoding and decoding function
sample_one_hot=network.encode_sample_one_hot(samples)
sample_prime=network.decode_sample_one_hot(sample_one_hot)
sample_prime=sample_prime[samples.columns]
assert samples.equals(sample_prime),"Encoded and Decoded data not same"
# pdb.set_trace()
|
988,286 | 194579d5a1d415fbaa3d3382d0513b1c0a10c4a6 | #maximum number of skeleton
nMax = 2
#Correcting location of skeleton
offset={'x':-5, 'y':-55}
connecting_joint = [1, 0, 20, 2, 20, 4, 5, 6, 20, 8, 9, 10, 0, 12, 13, 14, 0, 16, 17, 18, 1, 7, 7, 11, 11]
body_colors = [(255, 0, 0), (0, 230, 0)]
joint_colors = [(0, 0, 160), (64, 0, 128), (255, 128, 64), (64, 128, 128), (255, 128, 192), (0, 255, 0),
(128, 64, 64), (0, 128, 255), (128, 128, 128), (128, 128, 0), (0, 255, 255), (255, 128, 64),
(64, 0, 128), (128, 64,0), (0, 0, 0), (128, 128, 192), (0, 64, 128), (64, 0, 64), (128, 0, 255),
(255, 0, 255), (0, 128, 192), (0, 128, 64), (0, 0, 64), (255, 255, 128), (0, 128, 128)
] |
988,287 | b50c41db95a156a331ceda7cd8e6ee362c1f1198 | ../../../../../../share/pyshared/nova/openstack/common/excutils.py |
988,288 | cbab68d568c58f07cdcd8383aaa5cea9fd226194 | import filecmp
def loop_check(j):
# Compare every <pair>_strategy_j-1.txt to <pair>_strategy_p.txt (p<j-1) to make sure we aren't in a loop
global output
if j < 2: return True
for p in range(1, j-1):
print(p)
all_clear = False
for pair in pairs:
if not filecmp.cmp(output+"/"+pair+"_strategy_"+str(j-1)+".txt", output+"/"+pair+"_strategy_"+str(p)+".txt", shallow=False):
all_clear = True
break
if not all_clear:
print("At iteration " + str(j-1) + " strategies are identical to at iteration " + str(p))
print("Loop identified, length " + str(j-1-p))
return False
return True
output = "output"
chars = ["K","A","W","R","H"]
pairs = []
for i in range(len(chars)):
for j in range(i+1,len(chars)):
pairs += [chars[i] + chars[j]]
for i in range(1,15):
print(i)
print(loop_check(i))
|
988,289 | ea2ba4012023678a4ec729c6112c5b1cd283b5bd | #coding=utf-8
import setting
import browser as bo
from db.SQLModel import Record2DB
from logs.log import Logger
Log = Logger(__file__)
Rdb = Record2DB()
class Actions(object):
def __init__(self, driver, conf):
self.d = driver
self.conf = conf
def select_size_color(self):
"""如果遇到需要选颜色尺寸才能加入购物车的情况再就写个方法"""
pass
def add_to_cart(self):
try:
self.d.rand_move()
cart_xp = '//input[@id="add-to-cart-button"]'
exbox_xp = '//div[@class="a-popover-header"]/button[@aria-label="Close"]'
if self.d.is_element_exist(cart_xp):
self.d.move_to_click(cart_xp)
if self.d.is_element_exist(exbox_xp):
self.d.click_opt(exbox_xp)
Rdb.insert_log(self.conf.task_guid, self.conf.user, '添加购物车', '成功加入购物车')
# bo.update_cookie_to_db(self.d, self.conf.task_guid, self.conf.user, '添加购物车')
Log.info('<add to cart>: 成功加入购物车')
return True
else:
Rdb.insert_log(self.conf.task_guid, self.conf.user, '添加购物车', '加入购物车失败,没找到节点')
# bo.update_cookie_to_db(self.d, self.conf.task_guid, self.conf.user, '添加购物车')
Log.info('<add to cart>: 加入购物车失败,没找到add to cart节点')
return False
except:
Rdb.insert_log(self.conf.task_guid, self.conf.user, '添加购物车', '加入购物车失败')
# bo.update_cookie_to_db(self.d, self.conf.task_guid, self.conf.user, '添加购物车')
Log.info('<add to cart>: 加入购物车执行失败')
return False
def add_to_list(self):
try:
self.d.rand_move()
list_xp = '//input[@id="add-to-wishlist-button-submit"]'
create_xp = '//form[@class="reg-create-form"]/div[@class="a-form-actions"]/span[last()]//input'
closebox_xp = '//div[@class="a-popover-header"]/button[@aria-label="Close"]'
if self.d.is_element_exist(list_xp):
self.d.move_to_click(list_xp)
if self.d.is_element_exist(create_xp):
self.d.click_opt(create_xp)
if self.d.is_element_exist(closebox_xp):
self.d.click_opt(closebox_xp)
# bo.update_cookie_to_db(self.d, self.conf.task_guid, self.conf.user, '添加wish list')
Rdb.insert_log(self.conf.task_guid, self.conf.user, '添加wish list', '添加收藏成功')
Log.info('<add to list>: 添加收藏成功')
return True
else:
Rdb.insert_log(self.conf.task_guid, self.conf.user, '添加wish list', '添加收藏失败,没找到节点')
# bo.update_cookie_to_db(self.d, self.conf.task_guid, self.conf.user, '添加wish list')
Log.info('<add to list>: 添加收藏失败,没找到节点')
return False
except:
Rdb.insert_log(self.conf.task_guid, self.conf.user, '添加wish list', '添加收藏操作失败')
# bo.update_cookie_to_db(self.d, self.conf.task_guid, self.conf.user, '添加wish list')
Log.info('<add to list>: 添加收藏操作失败')
return False
if __name__ == '__main__':
conf = setting.ChromeConf() # 可传入浏览器参数类,修改设置参数
driver = bo.Initial_browser(conf).set_chrome() # 初始化浏览器
d = bo.Driver(driver)
d.request('https://www.amazon.com/dp/B071H84S86')
h = Actions(d)
h.add_to_list() |
988,290 | f6640aa2f28190a2cef00ea060ec5d7bfa98b986 | """Definitions for Mikrotik Router binary sensor entities."""
from dataclasses import dataclass, field
from typing import List
from homeassistant.helpers.device_registry import CONNECTION_NETWORK_MAC
from homeassistant.helpers.entity import EntityCategory
from homeassistant.components.binary_sensor import (
BinarySensorDeviceClass,
BinarySensorEntityDescription,
)
from .const import DOMAIN
DEVICE_ATTRIBUTES_PPP_SECRET = [
"connected",
"service",
"profile",
"comment",
"caller-id",
"encoding",
]
DEVICE_ATTRIBUTES_IFACE = [
"running",
"enabled",
"comment",
"client-ip-address",
"client-mac-address",
"port-mac-address",
"last-link-down-time",
"last-link-up-time",
"link-downs",
"actual-mtu",
"type",
"name",
]
DEVICE_ATTRIBUTES_IFACE_ETHER = [
"status",
"auto-negotiation",
"rate",
"full-duplex",
"default-name",
"poe-out",
]
DEVICE_ATTRIBUTES_IFACE_SFP = [
"status",
"auto-negotiation",
"advertising",
"link-partner-advertising",
"sfp-temperature",
"sfp-supply-voltage",
"sfp-module-present",
"sfp-tx-bias-current",
"sfp-tx-power",
"sfp-rx-power",
"sfp-rx-loss",
"sfp-tx-fault",
"sfp-type",
"sfp-connector-type",
"sfp-vendor-name",
"sfp-vendor-part-number",
"sfp-vendor-revision",
"sfp-vendor-serial",
"sfp-manufacturing-date",
"eeprom-checksum",
]
DEVICE_ATTRIBUTES_IFACE_WIRELESS = [
"ssid",
"mode",
"radio-name",
"interface-type",
"country",
"installation",
"antenna-gain",
"frequency",
"band",
"channel-width",
"secondary-frequency",
"wireless-protocol",
"rate-set",
"distance",
"tx-power-mode",
"vlan-id",
"wds-mode",
"wds-default-bridge",
"bridge-mode",
"hide-ssid",
]
DEVICE_ATTRIBUTES_UPS = [
"name",
"offline-time",
"min-runtime",
"alarm-setting",
"model",
"serial",
"manufacture-date",
"nominal-battery-voltage",
"runtime-left",
"battery-charge",
"battery-voltage",
"line-voltage",
"load",
"hid-self-test",
]
@dataclass
class MikrotikBinarySensorEntityDescription(BinarySensorEntityDescription):
"""Class describing mikrotik entities."""
icon_enabled: str = ""
icon_disabled: str = ""
ha_group: str = ""
ha_connection: str = ""
ha_connection_value: str = ""
data_path: str = ""
data_attribute: str = "available"
data_name: str = ""
data_name_comment: bool = False
data_uid: str = ""
data_reference: str = ""
data_attributes_list: List = field(default_factory=lambda: [])
func: str = "MikrotikBinarySensor"
SENSOR_TYPES = {
"system_ups": MikrotikBinarySensorEntityDescription(
key="system_ups",
name="UPS",
icon_enabled="",
icon_disabled="",
device_class=BinarySensorDeviceClass.POWER,
entity_category=EntityCategory.DIAGNOSTIC,
ha_group="System",
data_path="ups",
data_attribute="on-line",
data_uid="",
data_reference="",
data_attributes_list=DEVICE_ATTRIBUTES_UPS,
),
"ppp_tracker": MikrotikBinarySensorEntityDescription(
key="ppp_tracker",
name="PPP",
icon_enabled="mdi:account-network-outline",
icon_disabled="mdi:account-off-outline",
device_class=BinarySensorDeviceClass.CONNECTIVITY,
ha_group="PPP",
ha_connection=DOMAIN,
ha_connection_value="PPP",
data_path="ppp_secret",
data_attribute="connected",
data_name="name",
data_uid="name",
data_reference="name",
data_attributes_list=DEVICE_ATTRIBUTES_PPP_SECRET,
func="MikrotikPPPSecretBinarySensor",
),
"interface": MikrotikBinarySensorEntityDescription(
key="interface",
name="Connection",
icon_enabled="mdi:lan-connect",
icon_disabled="mdi:lan-pending",
device_class=BinarySensorDeviceClass.CONNECTIVITY,
ha_group="data__default-name",
ha_connection=CONNECTION_NETWORK_MAC,
ha_connection_value="data__port-mac-address",
data_path="interface",
data_attribute="running",
data_name="default-name",
data_uid="default-name",
data_reference="default-name",
data_attributes_list=DEVICE_ATTRIBUTES_IFACE,
func="MikrotikPortBinarySensor",
),
}
SENSOR_SERVICES = {}
|
988,291 | 32ce0cff87b2b86f6c30a530b0149dd1cbc989e4 | '''
Paweł Kruczkiewicz
Problemem przy wykasowaniu jednego klucza z tablicy z haszowaniem z adresowaniem otwartym jest przerwany ciąg liczb o tym samym kluczu.
Element o kluczu k1 mogł zostać wstawiony w miejsce T[k1+c], gdzie w bardzo pesymistycznym przypadku (bardzo bardzo pesymistycznym) c jest niemal równe bądź równe n
Tym samym naiwne rozwiązanie, czyli próba wyszukania wszystkich wstawionych wcześniej kluczy odbyłaby się w pesymistycznym czasie O(n^2), bo każdy element mógł być wyszukiwany w czasie n.
W tym naiwnym rozwiązaniu program naprawiłby to miejsce, w którym napotkał "dziurę" czyli None
Niestety, nie mam pomysłu na lepsze rozwiązanie niż powyższe, więc zabiorę się implementację tego zazwyczaj liniowego, ale jednak możliwie kwadratowego rozwiązania.
Swoje próby znalezienia odpowiedzi lepszej zostawiam poniżej, bo może coś mądrego tam jednak dopisałem. To taka bardziej informacja dla mnie, jak blisko byłem właściwego rozwiązania.
Złożoność: Dla wyszukiwania w czasie O(1): O(N)
Dla wyszukiwania w czasie O(N): O(N) * O(N) = O(N^2) - przy dobrej funkcji haszującej i odpowiednim powiększaniu tablicy z haszowaniem przy dużym współczynniku przepełnienia - prawie znikome prawdopodobieństwo)
[Poniżej znajdują się próby znalezienia innego sposobu rozwiązania tego zadania. Zachowane w celach archiwalnych - chcę wiedzieć, jak wiele nie wiem]
Dlatego należy zauważyć, ze przejście po każdym indeksie tablicy z haszowaniem jest liniowe. Jeżeli znajdziemy sposób na upewnienie się, że dany element jest osiągalny w czasie O(1), to mamy rozwiązanie zawsze liniowe.
Jak to zrobić?
Przechodzimy po tablicy z haszowaniem.
Liczymy hasz pierwszego elementu. Jeżeli zgadza się z jego indeksem, lecimy dalej, jeśli nie, to przechodzimy do wyznaczonego przez hasz indeksu i staramy się przeskoczyć do naszego elementu. Zapamiętujemy ten "idealny hasz"
Jeżeli po drodze napotkamy pole z taken=False, to nastawiamy je na True, bo to jest miejsce zniszczone w wyniku ataku komputerowego. (Złożoność tego kroku: pesymistycznie O(N))
Przechodzimy do kolejnego elementu. Jeżeli hasz zgadza się z poprzednim elementem, przechodzimy dalej.
Jeżeli hasz zgadza się z indeksem (czyli element trafił na swoje miejsce bez konfliktu) również przechodzimy dalej.
Jeżeli hasz się nie zgadza ani z idealnym haszem ani z indeksem, to sprawdzamy, czy hasz trafił między idealny hasz a iterowany element.
Jeśli trafił, to idziemy dalej, jeśli nie, to znaczy, że klucz pozwalający na dotarcie do tego elementu został skasowany gdzieś dalej, więc go zapamiętujemy.
Jeżeli napotkamy None
'''
class Node:
def __init__(self, key = None, taken = False):
self.key = key
self.taken = taken
def __str__(self):
if not self.taken:
print('pusty')
else:
print('klucz: ', self.key)
def h(key):
v = int('0b10101010', 2)
for l in key:
v ^= ord(l) % 255
return v % N
N=11
hash_tab = [Node() for i in range(N)]
def find(hash_tab, key): #przekopiowałem tutaj, ponieważ funkcja recover nie działała bez tego. Mam nadzieję, ze to nie grzech.
idx = h(key)
for i in range(N):
if not hash_tab[idx].taken: return None
if hash_tab[idx].key == key: return idx
idx = (idx + 1) % N
return None
def recover_aux(hash_tab, key):
idx = h(key)
for i in range(N):
if not hash_tab[idx].taken:
hash_tab[idx].taken = True
return hash_tab
idx = (idx + 1) % N
def recover(hash_tab):
for elem in hash_tab:
if elem.key is None:
continue
if find(hash_tab, elem.key) is None:
return recover_aux(hash_tab, elem.key)
else:
pass
|
988,292 | eb11e0dac892079ea84b8f89b1018024fbf22742 | from abc import ABCMeta, abstractmethod
from future.utils import with_metaclass
class CodeDriver(with_metaclass(ABCMeta, object)):
"""CodeDriver is the parent of all code drivers. Any child must implement the methods below
Methods
-------
create_ref()
add remaining files, make a commit and add to datmo ref
exists_ref()
check if commit reference exists
delete_ref()
delete commit reference if exists
list_refs()
list all commit references
push_ref()
push commit reference given
fetch_ref()
fetch commit reference given
check_unstaged_changes()
check if there exists any unstaged changes for code
checkout_ref()
checkout to commit reference given
"""
@abstractmethod
def __init__(self):
pass
@abstractmethod
def current_hash(self):
"""Get the current hash or commit id of the code if not unstaged
Returns
-------
hash : str
the hash of the code or commit id only if it has already been created
Raises
------
UnstagedChanges
does not proceed to return hash unless the changes are already staged
"""
@abstractmethod
def create_ref(self, commit_id=None):
"""Add remaining files, make a commit and add to datmo ref
Parameters
----------
commit_id : str, optional
if commit_id is given, it will not add files and not create a commit
Returns
-------
commit_id : str
commit_id for the ref created
Raises
------
CodeNotInitialized
error if not initialized (must initialize first)
CommitDoesNotExist
commit id specified does not match a valid commit
CommitFailed
commit could not be created
"""
pass
@abstractmethod
def latest_ref(self):
"""Return the latest ref of the code
Returns
-------
commit_id : str
the latest commit_id in the ref list
Raises
------
CodeNotInitialized
error if not initialized (must initialize first)
"""
@abstractmethod
def exists_ref(self, commit_id):
"""Check if commit reference exists
Parameters
----------
commit_id : str
commit id specified to check if commit ref exists
Returns
-------
bool
True if exists else False
Raises
------
CodeNotInitialized
error if not initialized (must initialize first)
"""
pass
@abstractmethod
def delete_ref(self, commit_id):
"""Delete commit ref if exists
Parameters
----------
commit_id : str
commit id for commit ref
Returns
-------
bool
True if success
Raises
------
CodeNotInitialized
error if not initialized (must initialize first)
"""
pass
@abstractmethod
def list_refs(self):
"""List all commit references
Returns
-------
list
includes all commit ref ids present
Raises
------
CodeNotInitialized
error if not initialized (must initialize first)
"""
pass
# @abstractmethod
# def push_ref(self, commit_id="*"):
# """Push commit reference given
#
# Parameters
# ----------
# commit_id : str, optional
# commit id for commit ref (default is * to signify
# all refs)
#
# Returns
# -------
# bool
# True if success
# """
# pass
#
# @abstractmethod
# def fetch_ref(self, commit_id):
# """Fetch commit reference given
#
# Parameters
# ----------
# commit_id : str
# commit id for commit ref
#
# Returns
# -------
# bool
# True if success
# """
# pass
@abstractmethod
def check_unstaged_changes(self):
"""Checks if there exists any unstaged changes for code
Raises
------
CodeNotInitialized
error if not initialized (must initialize first)
UnstagedChanges
error if not there exists unstaged changes in environment
"""
pass
@abstractmethod
def checkout_ref(self, commit_id):
"""Checkout commit reference given without affecting the .datmo directory
Parameters
----------
commit_id : str
commit id for commit ref
Returns
-------
bool
True if success
Raises
------
CodeNotInitialized
error if not initialized (must initialize first)
"""
pass
|
988,293 | 83a1eafdec66d4601d335b9b3009f953d6759c35 | #!/usr/bin/python3
'''
Defines the Place class
'''
from models.base_model import BaseModel
class Place(BaseModel):
'''Places for Hbnb application
Attributes:
city_id (str): will be the City.id
user_id (str): will be the User.id
name (str): name of the Place
description (str): description of the Place
number_rooms (int): number of rooms in the Place
number_bathrooms (int): number of bathrooms in the Place
max_guest (int): maximum number of guests in the Place
price_by_night (int): the nightly price of the Place
latitude (float): the latitude of the Place
longitude (float): the longitude of the Place
amenity_ids (list): list of Amenity.id of the Place
'''
city_id = ''
user_id = ''
name = ''
description = ''
number_rooms = 0
number_bathrooms = 0
max_guest = 0
price_by_night = 0
latitude = 0.0
longitude = 0.0
amenity_ids = []
|
988,294 | e626af480990905be1124e36f445b9b969c9c34d | config = {
'host': 'http://www.dm5.com',
'get_news_path': '/manhua-new/dm5.ashx?action=getupdatecomics&d=',
}
|
988,295 | 9008cf6a5e828f2f8716be0ab5d879833916b4c2 | print ('hello, django girls!')
participant = {'name': 'Lindy', 'city': 'Groningen'}
if 3>2:
print (participant)
|
988,296 | 53eb26eb7de4718dbd3e43f49b230ba5f6fc14a0 | import time
import sys
import ibmiotf.application
import ibmiotf.device
import random
#Provide your IBM Watson Device Credentials
organization = "1o823s"
deviceType = "raspberrypi"
deviceId = "123456"
authMethod = "token"
authToken = "123456789"
# Initialize GPIO
def myCommandCallback(cmd):
print("Command received: %s" % cmd.data)
print(type(cmd.data))
i=cmd.data['command']
if i=='lighton':
print("light is on")
elif i=='lightoff':
print("light is off")
try:
deviceOptions = {"org": organization, "type": deviceType, "id": deviceId, "auth-method": authMethod, "auth-token": authToken}
deviceCli = ibmiotf.device.Client(deviceOptions)#.............................................
except Exception as e:
print("Caught exception connecting device: %s" % str(e))
sys.exit()
# Connect and send a datapoint "hello" with value "world" into the cloud as an event of type "greeting" 10 times
deviceCli.connect()
while True:
hit=random.randint(10,20)
#print(hum)
gbe=random.randint(10,50)
#Send garbage & height to IBM Watson
data = { 'garbage' : gbe, 'height': hit }
#print (data)
def myOnPublishCallback():
print ("Published garbage = %s C" % gbe, "height= %s %%" % hit, "to IBM Watson")
success = deviceCli.publishEvent("garbage", "json", data, qos=0, on_publish=myOnPublishCallback)
if not success:
print("Not connected to IoTF")
time.sleep(2)
deviceCli.commandCallback = myCommandCallback
# Disconnect the device and application from the cloud
deviceCli.disconnect()
|
988,297 | 99202022fadef66ffd76ced06d703e96133887cd | import re
from rest_framework import serializers
from apps.NetBanking.models import Users, Account, Transactions , AccountTransaction
from django.core.validators import RegexValidator
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from datetime import datetime
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = Users
fields = [
'id',
'First_name',
'Last_name',
'username',
'DOB',
'Address',
'password',
]
def validate_First_name(self , firstname):
if firstname == "":
raise serializers.ValidationError("First name needed..please enter correct first name ")
return firstname
def validate_Last_name(self , lastname):
if lastname == "":
raise serializers.ValidationError("Last name needed..please enter correct last name ")
return lastname
def validate_username(self , username):
if username == "":
raise serializers.ValidationError(" Unique Username needed boss..")
else:
validators=[
RegexValidator(
r'^[\w.@+-]+$',
_('Enter a valid username. '
'This value may contain only letters, numbers '
'and @/./+/-/_ characters.'), 'invalid'),
],
error_messages={
'unique': _("A user with that username already exists."),
}
return username
def validate_DOB(self, dob):
today = datetime.now().date()
diff = today - dob
if dob > today:
raise serializers.ValidationError("Give date is in future...")
else:
age = diff.days / 365
if (age < 18):
raise serializers.ValidationError("You are no eligible to have a bank account..so cannot create user account ")
return dob
def validate_password(self , password):
if password == "":
raise serializers.ValidationError("Enter password ")
else:
if (len(password) < 8):
raise serializers.ValidationError("Password length must be minimum 8 characters")
elif not re.search("[a-z]", password):
raise serializers.ValidationError("password should contain atleatone lowercase letters")
elif not re.search("[A-Z]", password):
raise serializers.ValidationError(" password must contain atleast one UpperCase letter")
elif not re.search("[0-9]", password):
raise serializers.ValidationError(" password must contain atleast one digit [0 - 9]")
elif not re.search("[_@$^&*!#]", password):
raise serializers.ValidationError(" password must contain atleast one special character ")
return password
class AccountSerializer(serializers.ModelSerializer):
class Meta:
model = Account
fields = '__all__'
def validate_pin(self , pin):
if pin == "":
raise serializers.ValidationError("Enter pin (Pin must only contain numbers) ")
else:
if int(len(str(pin))) <= 4:
raise serializers.ValidationError("Pin length must be minimum 4 digits")
return pin
def create(self, validated_data):
return Account.objects.create(**validated_data)
def update(self, instance, validated_data):
instance.user = validated_data.get('user', instance.user)
instance.account_no = validated_data.get('account_no', instance.account_no)
instance.pin = validated_data.get('pin', instance.pin)
instance.balance = validated_data.get('balance', instance.balance)
instance.save()
return instance
class TransationsSerializer(serializers.ModelSerializer):
class Meta:
model = Transactions
fields = [
'senders',
'receivers',
'moneysent',
]
depth = 1
class AccountTransactionSerializer(serializers.ModelSerializer):
class Meta:
model = AccountTransaction
fields = [
'transactedtime',
'moneydeposited',
'moneywithdrawed',
'active_account'
]
|
988,298 | 7280af1e7bebd8a21e91c0707167e07bf244811a | import os
from iris.command import Command
from iris.logger import Logger
from iris.type import Email
class EmailHintCommand(Command):
name = 'emailhint'
description = 'Guess domain of censored email'
@Command.execute
def run(self, email: Email):
email_name, email_domain = email.split('@')
with open(os.path.join('data', 'domains.txt')) as f:
domains = [x.strip() for x in f.readlines() if len(x.strip()) > 0]
def __guess_domain(email_domain: str, domain: str) -> bool:
""" Guess censored domain of email """
if len(email_domain) != len(domain):
return False
char_poses = []
for char_pos, char in enumerate(email_domain):
if char != '*':
char_poses.append((char_pos, char))
for char_pos, char in char_poses:
if domain[char_pos] != char:
return False
return True
valid_domains = [domain for domain in domains if __guess_domain(email_domain, domain) is True]
if len(valid_domains) == 0:
raise Exception('Failed to guess email domain')
for domain in valid_domains:
Logger.success(f'{email} => {email_name}@{domain}')
|
988,299 | 01774c08e2031a5116d4e7bc4ca808944b03599b | from copy import deepcopy
from flask import Flask
from flask import jsonify
from flask import request
import urllib.request
import json
import threading
threadLock = threading.Lock()
UNIQUE_GAME_ID = 0
app = Flask(__name__)
@app.route('/api/')
def hello():
return jsonify("test")
@app.route('/api/get_message', methods=['POST'])
def get_message():
"""
Function to query a chatbot getting the user id, the bot id (what bot should I query ?)
and the message from the user.
It returns the chatbot anwser in json format.
"""
# get params from the POST request
# try
print("zeeeeeeeeeeeeeee", request.data.decode())
user_id = request.json['user_id']
bot_id = request.json['bot_id'] # ex: 5005
message = request.json['message']
# query the concerned bot
bot_url = "http://localhost:" + str(bot_id) + "/webhooks/rest/webhook"
params = {"sender": user_id, "message": message}
result = http_json_request(params, bot_url)
new_msg = ""
pile_run = deepcopy(result)
while len(pile_run) > 0:
msg = pile_run.pop(0)
if "buttons" in msg:
params["message"] = msg["buttons"][0]["payload"]
pile_run.extend(http_json_request(params, bot_url))
elif "custom" in msg:
message += "<{}>\n".format(msg["custom"]["type"])
else:
new_msg += "{}\n".format(msg["text"])
return new_msg
# except Exception as err:
# print("Erreur dans get_message() :", err)
# return "Error"
@app.route('/api/get_id', methods=['GET'])
def get_id():
"""
Returns an id that's ensured to be unique
"""
global UNIQUE_GAME_ID
with threadLock:
UNIQUE_GAME_ID += 1
return str(UNIQUE_GAME_ID)
def http_json_request(json_data, url, method="POST"):
"""
Function to make http json request
:param json_data: (dict) json to send
:param url: (string) url to send the json
:param method: (string) method of the request (POST, PUT, ...)
:return: (dic) Answer of the request in json format
"""
try:
req = urllib.request.Request(url, method=method)
# Add the json header
req.add_header('Content-Type', 'application/json; charset=utf-8')
# Encode the json data
json_string_encode = json.dumps(json_data).encode("utf-8")
req.add_header('Content-Length', len(json_string_encode))
# Send the request
response = urllib.request.urlopen(req, json_string_encode)
# Get the json response
data = response.read()
# Get the encoding
encoding = response.info().get_content_charset('utf-8')
return json.loads(data.decode(encoding))
except Exception as e:
error_msg = "Error in the http request {}: {}".format(url, e)
print(error_msg)
return None
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.