index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
9,700 | 60b70171dededd758e00d6446842355a47b54cc0 | #!/usr/bin/env python3
import sys
import collections as cl
def II(): return int(sys.stdin.readline())
def MI(): return map(int, sys.stdin.readline().split())
def LI(): return list(map(int, sys.stdin.readline().split()))
MOD = 998244353
def main():
N, K = MI()
kukan = []
for _ in range(K):
tmp = LI()
kukan.append(tmp)
dp = [0] * (N + 1)
dp[1] = 1
dp_sum = [0] * (N+1)
dp_sum[1] = 1
for i in range(N+1):
for k in range(K):
l, r = kukan[k]
pre_l = i - r
pre_r = i - l
if pre_r < 0:
continue
pre_l = max(pre_l, 0)
dp[i] += dp_sum[pre_r] - dp_sum[pre_l - 1]
dp_sum[i] = dp[i] + dp_sum[i-1]
dp_sum[i] %= MOD
dp[i] %= MOD
print(dp[-1])
main()
|
9,701 | 51868f26599c5878f8eb976d928c30d0bf61547d | import collections
def range(state):
ran = state["tmp"]["analysis"]["range"]
rang = {
key : [ state["rank"][i] for i in val & ran ]
for key, val in state["tmp"]["analysis"]["keys"].items()
if val & ran
}
for item in state["tmp"]["items"]:
item.setdefault("rank", 0)
item_keys = set(item.keys())
rang_keys = set(rang.keys())
keys = item_keys & rang_keys
for key in keys:
val = item[key]
ruls = rang[key]
for rule in ruls:
item["rank"] += _rank(val, rule)
def _rank(val, rule):
if "rank" not in rule or "val" not in rule:
return 0
if isinstance(val, dict):
return sum([ _rank(val, rule) for val in val.values() ])
if isinstance(val, collections.Iterable):
return sum([ _rank(val, rule) for val in val ])
if "from" in rule["val"] and "to" in rule["val"]:
return rule["rank"] if rule["val"]["from"] < val < rule["val"]["to"] else 0
if "from" in rule["val"]:
return rule["rank"] if rule["val"]["from"] < val else 0
if "to" in rule["val"]:
return rule["rank"] if val < rule["val"]["to"] else 0
return 0
|
9,702 | 986df5a41bc87ecb390dfbd1db9e1f5cd6c5b8fb |
import argparse
import cv2
import numpy as np
refPt = []
cropping = False
def click_and_crop(event, x, y, flags, param):
global refPt, cropping
if event == cv2.EVENT_LBUTTONDOWN:
refPt = [(x, y)]
cropping = True
elif event == cv2.EVENT_LBUTTONUP:
refPt.append((x, y))
cropping = False
cv2.rectangle(image, refPt[0], refPt[1], (0, 255, 0), 2)
cv2.imshow("image", image)
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="Path to the image")
args = vars(ap.parse_args())
image = cv2.imread(args["image"])
clone = image.copy()
cv2.namedWindow("image")
cv2.setMouseCallback("image", click_and_crop)
while True:
cv2.imshow("image", image)
key = cv2.waitKey(1) & 0xFF
if key == ord("r"):
image = clone.copy()
elif key == ord("c"):
break
if len(refPt) == 2:
roi = clone[refPt[0][1]:refPt[1][1], refPt[0][0]:refPt[1][0]]
cv2.imshow("ROI", roi)
count=0
sum=np.array([0,0,0])
for i in range (0,np.size(roi,0)):
for j in range(0,np.size(roi,1)):
count+=1
sum+=roi[i,j]
print "Average bgr: ",sum/count
cv2.waitKey(0)
cv2.destroyAllWindows() |
9,703 | dd0e96a1f93cbffedc11262a883dda285f5c224c | from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import func
from extensions import bcrypt
db = SQLAlchemy()
class User(db.Model):
id = db.Column(db.Integer(), primary_key=True)
username = db.Column(db.String(255))
password = db.Column(db.String(255))
posts = db.relationship(
'Post',
backref='user',
lazy='dynamic'
)
def __init__(self, username):
self.username = username
def set_password(self,password):
self.password = bcrypt.generate_password_hash(password)
def check_password(self,password):
return bcrypt.check_password_hash(self.password,password)
def __repr__(self):
return '<User ' + self.username + '>'
tags = db.Table(
'post_tags',
db.Column('post_id', db.Integer(), db.ForeignKey('post.id')),
db.Column('tag_id', db.Integer, db.ForeignKey('tag.id'))
)
class Post(db.Model):
id = db.Column(db.Integer(), primary_key=True)
title = db.Column(db.String(255))
text = db.Column(db.Text())
date = db.Column(db.DateTime())
user_id = db.Column(db.Integer(), db.ForeignKey('user.id'))
comments = db.relationship(
'Comment',
backref='post',
lazy='dynamic'
)
tags = db.relationship(
'Tag',
secondary=tags,
backref=db.backref(
'posts',
lazy='dynamic'
)
)
def __init__(self, title):
self.title = title
def __repr__(self):
return '<Post ' + self.title + '>'
class Comment(db.Model):
id = db.Column(db.Integer(), primary_key=True)
title = db.Column(db.String(255))
text = db.Column(db.Text())
date = db.Column(db.DateTime())
post_id = db.Column(db.Integer(), db.ForeignKey('post.id'))
def __init__(self, title):
self.title = title
def __repr__(self):
return '<Comment ' + self.title + '>'
class Tag(db.Model):
id = db.Column(db.Integer(), primary_key=True)
title = db.Column(db.String(255))
def __init__(self, title):
self.title = title
def __repr__(self):
return '<Tag ' + self.title + '>'
def sidebar_data():
recent = Post.query.order_by(
Post.date.desc()
).limit(5).all()
top_tags = db.session.query(
Tag, func.count(tags.c.post_id).label('total')
).join(
tags
).group_by(Tag).order_by('total DESC').limit(5).all()
return recent, top_tags |
9,704 | 78c4e14e5afdf857082b60bf4020f0f785d93a0d | from p5 import *
import numpy as np
from numpy.random import default_rng
from boids import Boid
from data import Data
n=30;
width = 1920
height = 1080
flock=[]
infected=[]
rng = default_rng()
frames=0
for i in range(n):
x = rng.integers(low=0, high=1920)
y = rng.integers(low=0, high=1080)
if i==0:
flock.append(Boid(x,y, width, height,infected=True,curado=False,alive=True))
else:
flock.append(Boid(x,y, width, height,infected=False,curado=False,alive=True))
def setup():
#this happens just once
size(width, height) #instead of create_canvas
def draw():
global flock,frames
background(30, 30, 47)
for boid in flock:
boid.edges()
boid.apply_behaviour(flock)
boid.infection(flock)
boid.update()
boid.show()
boid.livesordie()
Data.count(flock)
run() |
9,705 | 7764effac0b95ad8f62b91dd470c1d0e40704a7d | ''' tk_image_view_url_io.py
display an image from a URL using Tkinter, PIL and data_stream
tested with Python27 and Python33 by vegaseat 01mar2013
'''
import io
# allows for image formats other than gif
from PIL import Image, ImageTk
try:
# Python2
import Tkinter as tk
from urllib2 import urlopen
except ImportError:
# Python3
import tkinter as tk
from urllib.request import urlopen
root = tk.Tk()
# find yourself a picture on an internet web page you like
# (right click on the picture, under properties copy the address)
#url = "http://www.google.com/intl/en/images/logo.gif"
# or use image previously downloaded to tinypic.com
#url = "http://i48.tinypic.com/w6sjn6.jpg"
#url = "http://i50.tinypic.com/34g8vo5.jpg"
#url = "https://media.geeksforgeeks.org/wp-content/uploads/Computer-Networking-Diagram.png"
url = "https://static.toiimg.com/thumb/msid-79594506,imgsize-721231,width-400,resizemode-4/79594506.jpg"
image_bytes = urlopen(url).read()
# internal data file
data_stream = io.BytesIO(image_bytes)
# open as a PIL image object
pil_image = Image.open(data_stream)
# optionally show image info
# get the size of the image
w, h = pil_image.size
# split off image file name
fname = url.split('/')[-1]
sf = "{} ({}x{})".format(fname, w, h)
root.title(sf)
# convert PIL image object to Tkinter PhotoImage object
tk_image = ImageTk.PhotoImage(pil_image)
# put the image on a typical widget
label = tk.Label(root, image=tk_image, bg='brown')
label.pack(padx=5, pady=5)
root.mainloop()
|
9,706 | 91806afea92587476ac743346b88098b197a033c | import pygame
import time
from menus import MainMenu
from scenes import TestWorldGen
from scenes import TestAnimation
from scenes import TestLevel2
from scenes import MainGame
import random
class GameManager:
def __init__(self):
self.screen = pygame.display.set_mode((1280, 720),
flags=pygame.FULLSCREEN |
pygame.HWSURFACE |
pygame.DOUBLEBUF) # type: pygame.Surface
self.running = True
self.delta_time = 1
self.active_scene = None
# self.load_scene(MainMenu.MainMenu, (self,))
# self.load_scene(TestWorldGen.TestWorldGen, (self,))
# self.load_scene(TestAnimation.TestAnimation, (self,))
# self.load_scene(TestLevel2.TestLevel, (self, ))
self.load_scene(MainGame.MainGame, (self,))
self.fps_font = pygame.font.Font("game_data/fonts/calling_code.ttf", 14)
self.pygame_clock = pygame.time.Clock() # type: pygame
self.pygame_clock.tick()
pygame.joystick.init()
self.joystick = [pygame.joystick.Joystick(i) for i in range(pygame.joystick.get_count())]
for joystick in self.joystick:
joystick.init()
random.seed(time.time())
self.player_joy = -1
def __del__(self):
self.exit()
def main_loop(self):
while self.running:
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
self.exit()
self.delta_time = float(self.pygame_clock.tick(60)) / (10 ** 3)
fps_text = self.fps_font.render("FPS: {}".format(round(1 / self.delta_time)), False, (255, 255, 255))
self.active_scene.main_loop(events)
self.screen.blit(fps_text, (self.screen.get_width() - fps_text.get_width(), 0))
pygame.display.flip()
def load_scene(self, scene_object, scene_parameters):
self.active_scene = scene_object(*scene_parameters)
def exit(self):
self.running = False
|
9,707 | 4f06d87ec79c20206ff45ba72ab77844076be553 |
import pandas as pd
from greyatomlib.pandas_project.q01_read_csv_data_to_df.build import read_csv_data_to_df
def get_runs_counts_by_match():
ipl_df = read_csv_data_to_df("data/ipl_dataset.csv")
df1 = pd.DataFrame(ipl_df[['match_code','runs','venue']])
df2 = df1.groupby(['match_code','runs'], as_index=False).count()
df = df2.pivot(index='match_code',columns='runs')
df = df.fillna(0)
df = df.astype('int')
return df
get_runs_counts_by_match()
|
9,708 | 4b78c99dd6156afe960effcacb25804446310f7c | # MIT LICENSE
#
# Copyright 1997 - 2019 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class VirtualChassis(Base):
"""Virtual Chassis is used to get and to manage a Virtual Chassis topology and get the list of discovered appliances
The VirtualChassis class encapsulates a required virtualChassis resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'virtualChassis'
def __init__(self, parent):
super(VirtualChassis, self).__init__(parent)
@property
def DiscoveredAppliance(self):
"""An instance of the DiscoveredAppliance class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.discoveredappliance.discoveredappliance.DiscoveredAppliance)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.discoveredappliance.discoveredappliance import DiscoveredAppliance
return DiscoveredAppliance(self)
@property
def Hypervisor(self):
"""An instance of the Hypervisor class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.hypervisor.hypervisor.Hypervisor)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.hypervisor.hypervisor import Hypervisor
return Hypervisor(self)
@property
def IxVmCard(self):
"""An instance of the IxVmCard class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.ixvmcard.ixvmcard.IxVmCard)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.ixvmcard.ixvmcard import IxVmCard
return IxVmCard(self)
@property
def EnableLicenseCheck(self):
"""Enables license check on port connect
Returns:
bool
"""
return self._get_attribute('enableLicenseCheck')
@EnableLicenseCheck.setter
def EnableLicenseCheck(self, value):
self._set_attribute('enableLicenseCheck', value)
@property
def Hostname(self):
"""Virtual Chassis hostname or IP
Returns:
str
"""
return self._get_attribute('hostname')
@property
def LicenseServer(self):
"""The address of the license server
Returns:
str
"""
return self._get_attribute('licenseServer')
@LicenseServer.setter
def LicenseServer(self, value):
self._set_attribute('licenseServer', value)
@property
def NtpServer(self):
"""The address of the NTP server
Returns:
str
"""
return self._get_attribute('ntpServer')
@NtpServer.setter
def NtpServer(self, value):
self._set_attribute('ntpServer', value)
@property
def StartTxDelay(self):
"""The delay amount for transmit
Returns:
str
"""
return self._get_attribute('startTxDelay')
@StartTxDelay.setter
def StartTxDelay(self, value):
self._set_attribute('startTxDelay', value)
def update(self, EnableLicenseCheck=None, LicenseServer=None, NtpServer=None, StartTxDelay=None):
"""Updates a child instance of virtualChassis on the server.
Args:
EnableLicenseCheck (bool): Enables license check on port connect
LicenseServer (str): The address of the license server
NtpServer (str): The address of the NTP server
StartTxDelay (str): The delay amount for transmit
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
self._update(locals())
|
9,709 | 0a42c54ef1412b7f3b8e95da1d65ee05dfa14089 | from dataframe import *
from chaid import SuperCHAID, SuperCHAIDVisualizer
supernode_features = [manufacturing_region]
features_list = [customer_region, product_family, make_vs_buy]
dependant_variable = gm
super_tree = SuperCHAID(supernode_features, features_list, dependant_variable)
super_tree.fit(df)
visualizer = SuperCHAIDVisualizer(super_tree)
visualizer.export("tree")
input_row = df.loc[0]
input_row[make_vs_buy] = np.nan
print(input_row[supernode_features + features_list])
print()
result = super_tree.predict(input_row, impute=True)
if result is not None:
segment, segment_pairs, imputed_pairs = result
print("Imputed pairs:", imputed_pairs)
print("Supernode pairs:", segment.supernode_pairs)
print("Segment pairs:", segment_pairs)
print(segment)
|
9,710 | 239f055fd76a3ecb5f384c256ad850ea42739b8f |
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import math
from tkinter import *
from tkinter.ttk import *
from facedetectandtrack import *
x_vals = []
root = Tk()
counter=0
#def graph():
plt.style.use('seaborn')
def animate(i):
data = pd.read_csv('data.csv')
global x_vals
global counter
x_vals.append(counter)
try:
x = data.iloc[x_vals,0]
y = data.iloc[x_vals,1]
if counter>10:
x_vals.pop(0)
plt.cla()
axes=plt.gca()
axes.set_ylim([0,30])
#plt.plot(x, y)
counter=counter+1
height = root.winfo_screenheight()
width = root.winfo_screenwidth()
screen_x1 = width/2
screen_y1 = height/2
X = screen_x1 - face_x2
Y = screen_y1 - face_y2
d_x = (X*X)
d_y = (Y*Y)
D = d_x + d_y
distance = math.sqrt(D)
#print(distance)
plt.scatter(counter ,distance, s= 50,linewidth=1)
plt.xlabel("Time")
plt.ylabel("Movement of student from the center of screen")
plt.tight_layout()
except IndexError as e:
print('Graph ended')
exit(0)
ani = FuncAnimation(plt.gcf(), animate, interval=1000)
plt.savefig("Scatter_Graph.png")
plt.tight_layout()
plt.show() |
9,711 | 35647ed5e2c128a5bf819a1e47ead7e958172b1c | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 24 22:05:12 2019
@author: admin
"""
for index in range(test_set.shape[0]):
print(index) |
9,712 | 6907a1e08d728732eebf81fec7c0dab8729448e2 | # Generated by Django 2.1.5 on 2019-01-20 18:11
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Destination',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('image', models.ImageField(upload_to='img/destinations')),
],
),
migrations.CreateModel(
name='Gallery',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
('image', models.ImageField(upload_to='img/tours')),
],
),
migrations.CreateModel(
name='Tour',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50, verbose_name='title for admin')),
('status', models.BooleanField(default=False)),
('price', models.IntegerField()),
('stars', models.IntegerField(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5)])),
('feautured', models.BooleanField(default=True)),
('destination', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tours.Destination')),
],
),
migrations.CreateModel(
name='TourDetail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
('descreption', models.TextField()),
('tour', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tours.Tour')),
],
),
migrations.AddField(
model_name='gallery',
name='tour',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tours.Tour'),
),
]
|
9,713 | e3417980599448f1293b56cb95312088e7a8abe3 | import os
import imageio
import h5py
import numpy as np
def create_segmentation_test_data(data_path, raw_key, label_key, shape, chunks):
with h5py.File(data_path, 'a') as f:
f.create_dataset(raw_key, data=np.random.rand(*shape), chunks=chunks)
f.create_dataset(label_key, data=np.random.randint(0, 4, size=shape), chunks=chunks)
def create_image_collection_test_data(folder, n_images, min_shape, max_shape):
im_folder = os.path.join(folder, 'images')
label_folder = os.path.join(folder, 'labels')
os.makedirs(im_folder, exist_ok=True)
os.makedirs(label_folder, exist_ok=True)
for i in range(n_images):
shape = tuple(np.random.randint(mins, maxs) for mins, maxs in zip(min_shape, max_shape))
raw = np.random.rand(*shape).astype('int16')
label = np.random.randint(0, 4, size=shape)
imageio.imwrite(os.path.join(im_folder, f"im_{i}.tif"), raw)
imageio.imwrite(os.path.join(label_folder, f"im_{i}.tif"), label)
|
9,714 | b7a7941b3555b30ac7e743a5457df76f9eb7cb15 | #write a program that displays the wor "Hello!"
print("Hello!")
|
9,715 | c9be3d25824093528e2bee51c045d05e036daa67 | import sklearn.metrics as metrics
import sklearn.cross_validation as cv
from sklearn.externals import joblib
import MachineLearning.Reinforcement.InternalSQLManager as sqlManager
class ReinforcementLearner:
def __init__(self, clf=None, load=False, clfName=None):
"""
Initialise the Classifier, either from the provided model or from the stored classifier
:param clf: The current classifier, not yet fitted to the data
:param load: Set to True in order to load a previously saved model
"""
if load:
self.clf = joblib.load("model.pkl")
self.reTrain = True
else:
self.clf = clf
self.reTrain = False
if clfName == None:
self.name = self.clf.__class__.__name__
else:
self.name = clfName
def fit(self, X, y, scoring="accuracy", crossval=5):
"""
Fit the Reinforcement classifier with data, either adding to previous previous data or learning for first time.
:param X: Input Features
:param y: Class Labels
:param scoring: Scoring used for cross validation
:param crossval: Cross Validation number of folds
:return: True if a new model is fit to the data, or a previous model is updated
False if old model when fit to new data performs poorly in comparison to
earlier data
"""
if not self.reTrain: # Train first time
score = cv.cross_val_score(self.clf, X, y, scoring, cv=crossval)
sqlManager.insertValue(self.name, 0.0, score.mean(), 0, len(y), 1) # Store the first result of clf
self.clf.fit(X, y)
joblib.dump(self.clf, "model.pkl") # Store the CLF
print("Data Fit")
return True
else:
previousData = sqlManager.selectNewestRecord(self.name) # Check the last entry of CLF
if len(previousData) > 0:
oldSize = previousData[5]
newSize = len(y)
accScore = previousData[3]
score = cv.cross_val_score(self.clf, X, y, scoring, cv=crossval)
newAccScore = score.mean()
print("Old Accuracy Score : ", accScore)
print("New Accuracy Score : ", newAccScore)
if accScore <= newAccScore: # If new data is benefitial, increases accuracy
print("Reinforcement Learning : Newer model is superior. Saving Model.")
self.clf.fit(X, y)
sqlManager.insertValue(self.name, accScore, newAccScore, oldSize, newSize, 1)
joblib.dump(self.clf, "model.pkl")
return True
else:
print("Reinforcement Learning : Newer model is inferior. Not saving model.")
return False
def predict(self, X):
return self.clf.predict(X)
def __exit__(self, exc_type, exc_val, exc_tb):
sqlManager.close()
if __name__ == "__main__":
pass
|
9,716 | 13c55c313c740edce48fc979e8956fdd018e8aab | """This module contains a class supporting composition of AugraphyPipelines"""
class ComposePipelines:
"""The composition of multiple AugraphyPipelines.
Define AugraphyPipelines elsewhere, then use this to compose them.
ComposePipelines objects are callable on images (as numpy.ndarrays).
:param pipelines: A list contains multiple augraphy.base.AugraphyPipeline.
:type pipelines: list or tuple
"""
def __init__(self, pipelines):
self.pipelines = pipelines
def __call__(self, image):
augmented_image = image.copy()
newpipeline = dict()
for i, pipeline in enumerate(self.pipelines):
data_output = pipeline.augment(augmented_image)
augmented_image = data_output["output"]
for key in data_output.keys():
newkey = "pipeline" + str(i) + "-" + key
newpipeline[newkey] = data_output[key]
return newpipeline
|
9,717 | bcdd36b534fd3551de9cb40efc11581f4d95a002 | import sys
from Node import Node
from PriorityQueue import PriorityQueue
def Print(text):
if text is None or len(text) == 0:
print('invalid text.')
print('--------------------------------------------------------------')
return
text_set = set()
for i in text:
text_set.add(i)
if len(text_set) == 1:
print('invalid text.')
print('--------------------------------------------------------------')
return
print("The size of the data is: {}\n".format(sys.getsizeof(text)))
print("The content of the data is: {}\n".format(text))
encoded_data, tree = huffman_encoding(text)
print("The size of the encoded data is: {}\n".format(sys.getsizeof(int(encoded_data, base=2))))
print("The content of the encoded data is: {}\n".format(encoded_data))
decoded_data = huffman_decoding(encoded_data, tree)
print("The size of the decoded data is: {}\n".format(sys.getsizeof(decoded_data)))
print("The content of the encoded data is: {}\n".format(decoded_data))
print('--------------------------------------------------------------')
# this method will print huffman tree
def inorder(root):
if root is not None:
inorder(root.left)
print('Data: ', root.data, 'Freq: ', root.frequency)
if root.right is not None:
print('Right: ', root.right.data)
if root.left is not None:
print('Left: ', root.left.data)
inorder(root.right)
# end method inorder(root)
def generate_encoded_data(root):
"""
:param root: is a root of huffman tree
:return: dictionary contains all codes for each letter in the text.
"""
return generate_encoded_data2(root, {}, '')
# helper method
def generate_encoded_data2(root, dic, code):
if root is not None:
# go left of the tree if root has a left child.
if root.left is not None:
s = code + '0'
generate_encoded_data2(root.left, dic, s)
# if root is a leaf node then add this letter as a key and the code as a value.
if str(root.data).isalpha() or root.data == ' ':
dic.update({root.data: code})
# go left of the tree if root has a right child.
if root.right is not None:
s = code + '1'
generate_encoded_data2(root.right, dic, s)
return dic
else:
return None
def huffman_encoding(data):
"""
:param data: is the text that will we encode.
:return: encoded text as a binary and a root of huffman tree.
"""
if len(data) == 0 or data is None:
print('Please enter a valid data.')
return '', None
min_heap = PriorityQueue()
count_dic = {}
# count frequency of each letter and add it in count_dic as a value of the letter.
for i in range(len(data)):
if data[i] in count_dic:
count_dic[data[i]] += 1
else:
count_dic[data[i]] = 1
# add all element in count_dic to min_heap.
for i, j in count_dic.items():
new_node = Node(i, j)
min_heap.push(new_node, new_node.frequency)
count: int = 1
# create huffman tree phase 1.
while min_heap.size() >= 2:
item_1 = min_heap.pop()
item_2 = min_heap.pop()
sum_frequency = item_1.frequency + item_2.frequency
node = Node(count, sum_frequency, item_1, item_2)
min_heap.push(node, node.frequency)
count += 1
# the root of huffman tree.
root = min_heap.pop()
# generate the Encoded Data.
codes_ = generate_encoded_data(root)
# create string represent encoded data.
encoded = ''
for char in data:
if codes_.get(char) is not None:
encoded += codes_.get(char)
return encoded, root
def huffman_decoding(data, root):
"""
:param data: is the encoded text as a binary.
:param root: is the root of huffman tree.
:return: the decoded data.
"""
if len(data) == 0:
print('Please enter a valid data.')
return '', None
decoded = ''
i = 0
curr = root
while i < len(data):
"""
If the current bit of encoded data is 0, move to the left child, else move to the right child of the tree if
the current bit is 1.
"""
if data[i] == '0':
curr = curr.left
else:
curr = curr.right
# go to the next cell of the encoded data.
i += 1
# if curr is leaf node then this node contain a letter.
if curr.is_leaf():
# add this letter to decoded data.
decoded += curr.data
# return and start from the root to find the next letter.
curr = root
return decoded
# Test case 1 -----------------------------------
a_great_sentence = 'The bird is the word'
Print(a_great_sentence)
# Test case 2 -----------------------------------
t1 = ''
Print(t1) # will print 'invalid text'
# Test case 3 -----------------------------------
t2 = 'AAAAAB'
Print(t2)
# Test case 4 -----------------------------------
t3 = 'AAAAA'
Print(t3) # will print 'invalid text'
|
9,718 | ae7fc034249b7dde6d6bca33e2e6c8f464284cfc | #!/usr/bin/env python3
import datetime
import time
import board
from busio import I2C
import adafruit_bme680
# Create library object using our Bus I2C port
i2c = I2C(board.SCL, board.SDA)
bme680 = adafruit_bme680.Adafruit_BME680_I2C(i2c, debug=False)
# change this to match the location's pressure (hPa) at sea level
bme680.sea_level_pressure = 1006.0
file = open("/home/pi/Payload/src/sensory/burning_data.txt","a")
while True:
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
file.write("\ntimestamp: %s " % st)
print("\ntimestamp: %s " % st)
print("Temperature: %0.1f C" % bme680.temperature)
file.write("Temperature: %0.1f C" % bme680.temperature)
print("Gas: %d ohm" % bme680.gas)
file.write("Gas: %d ohm" % bme680.gas)
print("Humidity: %0.1f %%" % bme680.humidity)
file.write("Humidity: %0.1f %%" % bme680.humidity)
print("Pressure: %0.3f hPa" % bme680.pressure)
file.write("Pressure: %0.3f hPa" % bme680.pressure)
print("Altitude = %0.2f meters" % bme680.altitude)
file.write("Altitude = %0.2f meters" % bme680.altitude)
time.sleep(1)
#>>> st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
#import datetime
|
9,719 | 76664114382bdeb0bffb996e4dd4448b6c87520d | import sys
def ler (t):
i =0
for s in sys.stdin:
l=s.split(" ")
t.append(l)
def melhor (t):
i=1
x=int(t[0][0].strip("\n"))
n=len(t)
while(i<n):
u=int((t[i][2]).strip())
if(u<x)
i+=1
def vendedor():
t=[]
ler(t)
melhor(t)
vendedor() |
9,720 | e0874554c326bb11b53552e362bc8073bb57bc93 | import widget
import column
class Columns(widget.Widget):
def __init__(self, parent):
super(Columns, self).__init__(parent)
# self.root.mouse.on_drag_release.append(self.on_drag_release)
"""
def on_drag_release(self, x0, y0, x, y):
if not self.contains_point(x0, y0):
return None
if not self.contains_point(x, y):
return None
idx_from = self.get_child_idx_at_point(x0, y0)
idx_to = self.get_child_idx_at_point(x, y)
self.reorder_children(idx_from, idx_to)
"""
def add_column(self):
col = column.Column(self)
self.children.append(col)
def update(self):
area_sum = len(self.children)
ratio_accumulator = 0
for child in self.children:
area_share = 1
area_ratio = float(area_share)/area_sum
x = self.x + ratio_accumulator * self.dx
y = self.y
dx = area_ratio * self.dx
dy = self.dy
child.resize(x, y, dx, dy)
ratio_accumulator += area_ratio
def on_reposition_window(self, target, x, y):
if self.contains_point(x, y):
windows_from = target.parent
windows_to = self.get_child_at_point(x, y).windows
windows_from.detach_child(target)
idx_to = windows_to.get_child_idx_at_point(x, y)
if idx_to is None:
idx_to = 0
else:
idx_to += 1
windows_to.attach_child_at_idx(idx_to, target)
return False
|
9,721 | 843901b65a556e57470f73be2657e9fd3c0facc6 | def parse(num):
strnum = str(num)
words = []
for item in range(len(strnum)-1, -1, -1):
words.append(strnum[item])
hundred = words[:3]
thousand = words[3:6]
million = words[6:len(words)]
hundred = hundred[::-1]
thousand = thousand[::-1]
million = million[::-1]
units = ['zero','one','two','three','four','five','six','seven','eight','nine']
tens = ['ten','eleven','twelve','thirteen','fourteen','fifteen','sixteen','seventeen','eighteen','nineteen']
tens_more = ['zero','ten','twenty','thirty','forty','fifty','sixty','seventy','eighty','ninety']
reads = []
if len(million)>0:
if len(million)==3:
num = int(million[0])
reads.append(units[num])
reads.append('hundred')
reads.append('and')
num = int(million[1])
if num>1:
reads.append(tens_more[num])
if num!=0:
num = int(million[2])
reads.append(units[num])
else:
num = int(million[1])
reads.append(tens[num])
if len(million)==2:
num = int(million[0])
if num>1:
reads.append(tens_more[num])
num = int(million[1])
if num!=0:
reads.append(units[num])
else:
num = int(million[1])
reads.append(tens[num])
if len(million)==1:
num = int(million[0])
reads.append(units[num])
reads.append('million')
reads.append('and')
if __name__ == "__main__":
parse(23456789) |
9,722 | b2a2e06c5db8b12acbc852bafc4ea869b006c1c8 | import itertools
import urllib
import word2vec
# MSD: http://corpus.leeds.ac.uk/mocky/ru-table.tab
# Universal: http://universaldependencies.org/ru/pos/index.html
def convert_pos_MSD_to_Universal(pos):
if pos.startswith('A'):
return 'ADJ'
elif pos.startswith('C'):
return 'CCONJ'
elif pos.startswith('I'):
return 'INTJ'
elif pos.startswith('M'):
return 'NUM'
elif pos.startswith('Nc'):
return 'NOUN'
elif pos.startswith('Np'):
return 'PROPN'
elif pos.startswith('N'):
return 'NOUN'
elif pos.startswith('P'):
return 'PRON' # TODO: or DET
elif pos.startswith('Q'):
return 'PART'
elif pos.startswith('R'):
return 'ADV'
elif pos.startswith('S'):
return 'ADP'
elif pos.startswith('V'):
return 'VERB' # TODO: or AUX
elif pos.startswith('SENT') or pos.startswith('PUNC'):
return 'PUNCT'
else:
return 'X'
# ------------------
# get_dep_tree(sentence)
# ---
# Creates a word dependency tree from a sentence.
# Returns: deptree=(node, [deptree])
# Creates a deptree from the webservice response dictionary
def make_dep_tree(respDict, idx):
if idx == 0:
el = None
else:
el = respDict[idx]
children = [(k, respDict[k]) for k in respDict if int(respDict[k][6]) == idx]
childTrees = [ make_dep_tree(respDict, k) for (k, c) in children ]
return (el, childTrees)
def get_dep_tree(sentence):
url = 'http://deptree.jental.name/parse?' + urllib.parse.urlencode({'text': sentence})
respRaw = urllib.request.urlopen(url)
resp = respRaw.read()
respStr = resp.decode('utf-8')
respList = [ r[1:-1].split('\\t') for r in respStr[1:-1].split(',') ]
respDict = dict([(int(r[0]), r + [convert_pos_MSD_to_Universal(r[5])]) for r in respList])
(root, trees) = make_dep_tree(respDict, 0)
if len(trees) == 0:
print('No tree', sentence, trees)
return None
else:
return trees[0]
# ------------------
# filter_dep_tree(tree)
# ---
# Filters out invaluable parts of speech.
# Returns: deptree=(node, [deptree])
def filter_dep_tree(tree):
root, children = tree
posp = convert_pos_MSD_to_Universal(root[3])
if (posp == 'ADJ' or posp == 'NUM' or posp == 'NOUN' or posp == 'PROPN' or posp == 'ADV' or posp == 'VERB'):
res = [ (root, list(itertools.chain.from_iterable([ filter_dep_tree(c) for c in children ]))) ]
else:
cd = [ filter_dep_tree(c) for c in children ]
if len(cd) > 0:
res = list(itertools.chain.from_iterable(cd))
else:
res = []
return res
# ------------------
# filter_dep_tree(tree)
# ---
# Prints a word dependency tree
def print_dep_tree(tree):
def pdt(t, offset):
root, children = t
print(''.join([ ' ' for i in range(0, offset) ]), root[1], root[3])
for c in children:
pdt(c, offset + 1)
pdt(tree, 0)
|
9,723 | fb82724aab7e0819c9921d41dcb612b304b25753 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# 차트에 한글 가능하도록
from matplotlib import font_manager, rc, rcParams
font_name = font_manager.FontProperties(
fname="c:/windows/Fonts/malgun.ttf").get_name()
rc('font',family=font_name)
rcParams['axes.unicode_minus'] = False # 부호표시 (-,+) 사용할때
###
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# 100행 3열 랜덤생성 2019,1,1 부터 100일
df1 = pd.DataFrame(np.random.randn(100, 3), index=pd.date_range('1/1/2019', periods=100),
columns=['A','B','C']).cumsum() # 값을 누적 시켜 넣는다.
print(df1)
# pandas 의 DataFrame 에서 내부적으로 matplotlib 를 import 해서 연결되어 있기때문에 plot 함수를 사용해서 그려준다.
df1.plot()
plt.show()
|
9,724 | 4ea266d4f4c18efbba4204d7301652f8966c18a5 | # -*- coding: utf-8 -*-
"""
Animation practical output
The code that follows builds on the "Communications.py" file
Additional code that follows has in part been modified from that of
https://www.geog.leeds.ac.uk/courses/computing/practicals/python/agent-framework/part8/index.html
https://www.geog.leeds.ac.uk/courses/computing/practicals/python/agent-framework/part8/examples/animatedmodel.py
https://www.geog.leeds.ac.uk/courses/computing/practicals/python/agent-framework/part8/examples/animatedmodel2.py
"""
import random
import operator
import matplotlib.pyplot
import matplotlib.animation
import agentframeworkanimate
import csv
# Reading the in.txt file to create the environment.
with open("in.txt", newline="") as raster:
dataset = csv.reader(raster, quoting=csv.QUOTE_NONNUMERIC)
environment = []
for row in dataset:
rowlist = []
for value in row:
rowlist.append(value)
environment.append(rowlist)
# Setting initial parameters.
num_of_agents = 10
num_of_iterations = 100
neighbourhood = 20
agents = []
# Variables to animate the model.
fig = matplotlib.pyplot.figure(figsize=(7, 7))
ax = fig.add_axes([0, 0, 1, 1])
ax.set_autoscale_on(False)
# Make the agents.
# Addition of environment as argument for Agent class to allow interaction between agents and environment.
# Addition of agents as argument for Agent class to allow agents to interact with each other.
for i in range(num_of_agents):
agents.append(agentframeworkanimate.Agent(environment, agents))
carry_on = True
# Creating model animation.
def update(frame_number):
fig.clear()
global carry_on
# Move the agents and store what they eat
for j in range(num_of_iterations):
# Shuffle function used to randomise the order agents are processed with each iteration.
random.shuffle(agents)
for i in range(num_of_agents):
agents[i].move()
agents[i].eat()
agents[i].share_with_neighbours(neighbourhood)
# Stopping condition for animation when all agents have 100 in their store.
if agents[i].store == 100:
carry_on = False
print("Stopping condition met")
# Generate scatterplot of agents after model iterations.
matplotlib.pyplot.xlim(0, 99)
matplotlib.pyplot.ylim(0, 99)
matplotlib.pyplot.imshow(environment)
for i in range(num_of_agents):
matplotlib.pyplot.scatter(agents[i].x,agents[i].y)
# Generator function to stop animation.
# Will stop animation after 10 iterations unless carry_on variable is set to False.
def gen_function(b = [0]):
a = 0
global carry_on
while (a < 100) & (carry_on):
yield a
a = a + 1
# Animation will run until generator function condition is met
#animation = matplotlib.animation.FuncAnimation(fig, update, interval=1, repeat=False, frames=10)
animation = matplotlib.animation.FuncAnimation(fig, update, frames=gen_function, repeat=False)
matplotlib.pyplot.show()
# Writing the final environment to a text file.
with open("out.txt", "w", newline="") as finalenviron:
writer = csv.writer(finalenviron, delimiter=",")
for row in environment:
writer.writerow(row)
|
9,725 | c2b6e51622681ac916e860ed4ff5715808dff102 | import numpy as np
import matplotlib as plt
import math
from DoublePendulum import DP #imports useful modules and double pendulum class from DoublePendulum.py
import json
import pandas as pd
import copy
from pathlib import Path
#accessing config file
with open('config.json') as config_file:
initdata = json.load(config_file)
#retrieving variables from config file
initMA = initdata['Mass A']
initMB = initdata['Mass B']
initLA = initdata['Length A']
initLB = initdata['Length B']
initAA = initdata['Angle A']
initAB = initdata['Angle B']
method = initdata['Method']
timeStep = initdata['Time Step']
nCycles = initdata['Number of Cycles']
# Setting Initial Conditions based on the config file
pend = DP(initMA,initMB,initLA,initLB,math.radians(initAA),math.radians(initAB),[0,0],[0,0],[0,0],[0,0],0,0,1,1,1,1,1,1,1)
pend.updCartesian()
pend.updEnergies()
data = []
time = 0
x1 = 0
x2 = 0
y1 = 0
y2 = 0
if method == 1:
for n in range(nCycles):
#print(n)
time += timeStep
pend.updEuler(timeStep)
pend.updCartesian()
pend.updEnergies()
pend.updMomentum()
x1 = pend.xy1[0]
x2 = pend.xy2[0]
y1 = pend.xy1[1]
y2 = pend.xy2[1]
p11 = pend.p1[0]
p12 = pend.p1[1]
p21 = pend.p2[0]
p22 = pend.p2[1]
print(p22)
item = [time, copy.deepcopy(pend.totalE), copy.deepcopy(pend.KE1), copy.deepcopy(pend.KE2), copy.deepcopy(pend.PE1), copy.deepcopy(pend.PE2), copy.deepcopy(x1), copy.deepcopy(x2), copy.deepcopy(y1), copy.deepcopy(y2), copy.deepcopy(p11), copy.deepcopy(p12), copy.deepcopy(p21), copy.deepcopy(p22)]
data.append(item)
elif method == 2:
for n in range(nCycles):
print(n)
time += timeStep
pend.updEulerCromer(timeStep)
pend.updCartesian()
pend.updEnergies()
pend.updMomentum()
x1 = pend.xy1[0]
x2 = pend.xy2[0]
y1 = pend.xy1[1]
y2 = pend.xy2[1]
p11 = pend.p1[0]
p12 = pend.p1[1]
p21 = pend.p2[0]
p22 = pend.p2[1]
item = [time, copy.deepcopy(pend.totalE), copy.deepcopy(pend.KE1), copy.deepcopy(pend.KE2), copy.deepcopy(pend.PE1), copy.deepcopy(pend.PE2), copy.deepcopy(x1), copy.deepcopy(x2), copy.deepcopy(y1), copy.deepcopy(y2),copy.deepcopy(p11),copy.deepcopy(p12),copy.deepcopy(p21),copy.deepcopy(p22)]
data.append(item)
elif method == 3:
for n in range(nCycles):
print(n)
time += timeStep
pend.updRungeKutta(timeStep)
pend.updCartesian()
pend.updEnergies()
pend.updMomentum()
x1 = pend.xy1[0]
x2 = pend.xy2[0]
y1 = pend.xy1[1]
y2 = pend.xy2[1]
p11 = pend.p1[0]
p12 = pend.p1[1]
p21 = pend.p2[0]
p22 = pend.p2[1]
item = [time, copy.deepcopy(pend.totalE), copy.deepcopy(pend.KE1), copy.deepcopy(pend.KE2), copy.deepcopy(pend.PE1), copy.deepcopy(pend.PE2), copy.deepcopy(x1), copy.deepcopy(x2), copy.deepcopy(y1), copy.deepcopy(y2),copy.deepcopy(p11),copy.deepcopy(p12),copy.deepcopy(p21),copy.deepcopy(p22)]
data.append(item)
else:
print('invalid method selection, update config file')
exit()
np.save(Path.cwd()/'datafile', data, allow_pickle=True)
print('data file saved')
|
9,726 | e686d8617360c5a3ce35bd4d2bdeb2376b33f53a | #!/usr/bin/env python
import re
pdfs_file = './pdf_names_2017.txt'
sessions_file = './session_names_2017.txt'
with open(pdfs_file) as f:
pdf_names = f.read().splitlines()
with open(sessions_file) as f:
session_names = f.read().splitlines()
#for i in xrange(0,len(pdf_names)):
# print str(i+1).zfill(3) + '_-_' + pdf_names[i][:-4] + '_-_' + session_names[i] + pdf_names[i][-4:]
card_pre = """
<section class="section--center mdl-grid mdl-grid--no-spacing mdl-shadow--2dp">
<header class="section__play-btn mdl-cell mdl-cell--3-col-desktop mdl-cell--2-col-tablet mdl-cell--4-col-phone mdl-color--teal-100 mdl-color-text--white">
<i class="material-icons">record_voice_over</i>
</header>
<div class="mdl-card mdl-cell mdl-cell--9-col-desktop mdl-cell--6-col-tablet mdl-cell--4-col-phone">
<div class="mdl-card__supporting-text">
"""
card_content = """
<h4>Incidental_Findings_-_Introduction_and_Overview</h4>
Monday_0700_LBerland
"""
card_post_1 = """
</div>
<div class="mdl-card__actions">
<a href="pdf/"""
card_post_2 = """" target="_blank" class="mdl-button">Handout</a>
</div>
</div>
</section>
"""
"""
<section class="section--center mdl-grid mdl-grid--no-spacing mdl-shadow--2dp">
<header class="section__play-btn mdl-cell mdl-cell--3-col-desktop mdl-cell--2-col-tablet mdl-cell--4-col-phone mdl-color--teal-100 mdl-color-text--white">
<i class="material-icons">record_voice_over</i>
</header>
<div class="mdl-card mdl-cell mdl-cell--9-col-desktop mdl-cell--6-col-tablet mdl-cell--4-col-phone">
<div class="mdl-card__supporting-text">
<h4>Incidental_Findings_-_Introduction_and_Overview</h4>
Monday_0700_LBerland
</div>
<div class="mdl-card__actions">
<a href="#" class="mdl-button">Handout</a>
</div>
</div>
</section>
"""
for i in xrange(0,len(pdf_names)):
print card_pre + "<h4>" + session_names[i] + "</h4>" + pdf_names[i][:-4].replace("_"," ") + card_post_1 + pdf_names[i] + card_post_2
|
9,727 | 5cb390b06026bc0899c0b10dc93f3ec1f2ffefa6 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# __author__ = "Sponge_sy"
# Date: 2021/9/11
import numpy
from tqdm import tqdm
from bert4keras.tokenizers import Tokenizer
from bert4keras.models import build_transformer_model
from bert4keras.snippets import sequence_padding, DataGenerator
from utils import *
class data_generator(DataGenerator):
"""Data Generator"""
def __init__(self, pattern="", is_pre=True, *args, **kwargs):
super(data_generator, self).__init__(*args, **kwargs)
self.pattern = pattern
self.is_pre = is_pre
def __iter__(self, random=False):
batch_token_ids, batch_segment_ids, batch_output_ids = [], [], []
for is_end, text in self.sample(random):
if (self.is_pre):
token_ids, segment_ids = tokenizer.encode(first_text=self.pattern, second_text=text, maxlen=maxlen)
else:
token_ids, segment_ids = tokenizer.encode(first_text=text, second_text=self.pattern, maxlen=maxlen)
source_ids, target_ids = token_ids[:], token_ids[:]
batch_token_ids.append(source_ids)
batch_segment_ids.append(segment_ids)
if len(batch_token_ids) == self.batch_size or is_end:
batch_token_ids = sequence_padding(batch_token_ids)
batch_segment_ids = sequence_padding(batch_segment_ids)
yield [batch_token_ids, batch_segment_ids], None
batch_token_ids, batch_segment_ids, = [], []
def predict(data_generator_list, data):
print("\n*******************Start to Zero-Shot predict*******************", flush=True)
patterns_logits = [[] for _ in patterns]
samples_logits = [[] for _ in data]
for i in range(len(data_generator_list)):
print("\nPattern{}".format(i), flush=True)
data_generator = data_generator_list[i]
counter = 0
for (x, _) in tqdm(data_generator):
outputs = model.predict(x[:2])
for out in outputs:
logit_pos = out[0].T
patterns_logits[i].append(logit_pos)
samples_logits[counter].append(logit_pos)
counter += 1
preds = []
for i in range(len(patterns_logits[0])):
pred = numpy.argmax([logits[i] for logits in patterns_logits])
preds.append(int(pred))
return preds, samples_logits
if __name__ == "__main__":
# Load the hyper-parameters-----------------------------------------------------------
maxlen = 128 # The max length 128 is used in our paper
batch_size = 40 # Will not influence the results
# Choose a model----------------------------------------------------------------------
# Recommend to use 'uer-mixed-bert-base'
# model_names = ['google-bert', 'google-bert-small', 'google-bert-zh',
# 'hfl-bert-wwm', 'hfl-bert-wwm-ext',
# 'uer-mixed-bert-tiny', 'uer-mixed-bert-small',
# 'uer-mixed-bert-base', 'uer-mixed-bert-large']
model_name = 'uer-mixed-bert-base'
# Choose a dataset----------------------------------------------------------------------
# dataset_names = ['eprstmt', 'tnews', 'csldcp', 'iflytek']
# dataset_name = 'eprstmt'
# Load model and dataset class
bert_model = Model(model_name=model_name)
# Create a template --------------------------------------------------------------------
label_names = ['entertainment', 'sports', 'music', 'games', 'economics', 'education']
patterns = ["This is {} news".format(label) for label in label_names]
# Prefix or Suffix-------------------------------------------------------------------
is_pre = True
# Load the demo set--------------------------------------------------------------------
demo_data_en = ['FIFA unveils biennial World Cup plan, UEFA threatens boycott',
'COVID vaccines hold up against severe Delta: US data',
'Justin Drew Bieber was born on March 1, 1994 at St. ',
'Horizon launches latest chip to take on global rivals',
'Twitch video gamers rise up to stop ‘hate raids’']
demo_data = demo_data_en
demo_generator_list = []
for p in patterns:
demo_generator_list.append(data_generator(pattern=p, is_pre=is_pre, data=demo_data, batch_size=batch_size))
# Build BERT model---------------------------------------------------------------------
tokenizer = Tokenizer('.' + bert_model.dict_path, do_lower_case=True)
# Load BERET model with NSP head
model = build_transformer_model(
config_path='.' + bert_model.config_path, checkpoint_path='.' + bert_model.checkpoint_path, with_nsp=True,
)
# Zero-Shot predict and evaluate-------------------------------------------------------
preds, samples_logits = predict(demo_generator_list, demo_data)
for i, (p, d) in enumerate(zip(preds, demo_data)):
pred_label = label_names[p]
print("Sample {}:".format(i))
print("Original Text: {}".format(d))
print("Predict label: {}".format(pred_label))
print("Logits: {}".format(samples_logits[i]))
print()
|
9,728 | c9bc331f4805a956146619c59d183fc3bcbe47cb | from conans import ConanFile, CMake, tools
import os
class Demo(ConanFile):
name = "Demo"
version = "0.1"
license = "<Put the package license here>"
url = "<Package recipe repository url here, for issues about the package>"
description = "<Description of Testlib here>"
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False]}
default_options = "shared=False"
generators = "cmake"
exports_sources = "src/*"
requires = "TestLib/0.1@gbmhunter/testing"
def build(self):
cmake = CMake(self)
cmake.configure(source_folder="src/")
print('BLAHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH = ' + str(self.deps_cpp_info["TestLib"]))
cmake.build()
def imports(self):
self.copy("*.dll", dst="bin", src="bin")
self.copy("*.dylib*", dst="bin", src="lib")
self.copy('*.so*', dst='bin', src='lib')
def test(self):
if not tools.cross_building(self.settings):
os.chdir("bin")
self.run(".%sexample" % os.sep)
def package(self):
self.copy("*.h", dst="include", src="src")
self.copy("*.lib", dst="lib", keep_path=False)
self.copy("*.dll", dst="bin", keep_path=False)
self.copy("*.dylib*", dst="lib", keep_path=False)
self.copy("*.so", dst="lib", keep_path=False)
self.copy("*.a", dst="lib", keep_path=False) |
9,729 | b4d48427dddc7c0240cf05c003cbf7b0163279ee | from django.contrib import admin
from .models import (AddressLink, Address, Child, Citation,
Configuration, Event, Exclusion, FactType,
Family, Group, Label, LinkAncestry,
Link, MediaLink, Multimedia, Name,
Person, Place, ResearchItem, Research,
Role, Source, SourceTemplate, Url,
Witness)
from . import EXODUS_DB_NAME
from .utils.admin import MultiDBModelAdmin
from .utils.rootsmagic import read_and_pprint_date
class RootsMagicModelAdmin(MultiDBModelAdmin):
using = EXODUS_DB_NAME
class AddressLinkAdmin(RootsMagicModelAdmin):
list_display = [
"id",
"owner_type",
"address",
"owner_id",
"address_number",
"details",
]
class AddressAdmin(RootsMagicModelAdmin):
pass
class ChildAdmin(RootsMagicModelAdmin):
list_display = [
"record_id",
"child",
"family",
"father_relationship",
"mother_relationship",
"child_order",
"is_private",
"father_proof",
"mother_proof",
"note",
]
raw_id_fields = [
'child',
'family',
]
class CitationAdmin(RootsMagicModelAdmin):
list_display = [
"id",
"owner_type",
"source_id",
"owner_id",
"quality",
"is_private",
"comments",
"actual_text",
"reference_number",
"flags",
# "fields",
]
class ConfigurationAdmin(RootsMagicModelAdmin):
pass
class EventAdmin(RootsMagicModelAdmin):
list_display = [
"id",
"event_type",
"owner",
"owner_type",
"owner_id",
"family",
"place",
"site",
# "date",
"pretty_date",
"sort_date",
"is_primary",
"is_private",
"proof",
"status",
"edit_date",
"sentence",
# "details",
# "note",
]
def pretty_date(self, obj):
return read_and_pprint_date(obj.date)
pretty_date.short_description = "Date"
class ExclusionAdmin(RootsMagicModelAdmin):
pass
class FactTypeAdmin(RootsMagicModelAdmin):
list_display = [
"id",
"owner_type",
"name",
"abbreviation",
"gedcom_tag",
"use_value",
"use_date",
"use_place",
"sentence",
"flags",
]
class FamilyAdmin(RootsMagicModelAdmin):
list_display = [
"id",
"father",
"mother",
"child",
"husband_order",
"wife_order",
"is_private",
"proof",
"spouse_label",
"father_label",
"mother_label",
# "note",
]
class GroupAdmin(RootsMagicModelAdmin):
pass
class LabelAdmin(RootsMagicModelAdmin):
pass
class LinkAncestryAdmin(RootsMagicModelAdmin):
pass
class LinkAdmin(RootsMagicModelAdmin):
list_display = [
"id",
"ext_system",
"link_type",
"rootsmagic",
"ext_id",
"modified",
"ext_version",
"ext_date",
"status",
"note",
]
class MediaLinkAdmin(RootsMagicModelAdmin):
list_display = [
"link_id",
"media",
"owner",
"owner_type",
"owner_id",
"is_primary",
"include_1",
"include_2",
"include_3",
"include_4",
"sort_order",
"rectangle_left",
"rectangle_top",
"rectangle_right",
"rectangle_bottom",
"note",
"caption",
"reference_number",
"date",
"sort_date",
# "description",
]
class MultimediaAdmin(RootsMagicModelAdmin):
list_display = [
"id",
"media_type",
"media_path",
"media_file",
"url",
"thumbnail",
"caption",
"reference_number",
# "date",
"pretty_date",
"sort_date",
# "description",
]
def pretty_date(self, obj):
return read_and_pprint_date(obj.date)
pretty_date.short_description = "Date"
class NameAdmin(RootsMagicModelAdmin):
list_display = [
"id",
"owner",
"surname",
"given",
"prefix",
"suffix",
"nickname",
"name_type",
"date",
"sort_date",
"is_primary",
"is_private",
"proof",
"edit_date",
"sentence",
# "note",
"birth_year",
"death_year",
]
class PersonAdmin(RootsMagicModelAdmin):
list_display = [
"id",
'primary_name',
"sex_short",
"edit_date",
"parent",
"spouse",
"color",
"relate_1",
"relate_2",
"flags",
"is_living",
"is_private",
"proof",
"unique_id",
"bookmark",
# "note",
]
class PlaceAdmin(RootsMagicModelAdmin):
list_display = [
"id",
"place_type",
"name",
"abbreviation",
"normalized",
"master_place",
# "latitude",
# "longitude",
"pretty_latlong",
"exact_latituate_longitude",
"note",
]
raw_id_fields = [
"master_place"
]
readonly_fields = [
"pretty_latlong"
]
class ResearchItemAdmin(RootsMagicModelAdmin):
pass
class ResearchAdmin(RootsMagicModelAdmin):
pass
class RoleAdmin(RootsMagicModelAdmin):
list_display = [
"id",
"role_name",
"event_type",
"role_type",
"sentence",
]
class SourceAdmin(RootsMagicModelAdmin):
raw_id_fields = ['template']
class SourceTemplateAdmin(RootsMagicModelAdmin):
pass
class UrlAdmin(RootsMagicModelAdmin):
list_display = [
"id",
"owner_type",
"owner_id",
"link_type",
"name",
"url",
"note",
]
class WitnessAdmin(RootsMagicModelAdmin):
list_display = [
"id",
"event",
"person",
"witness_order",
"role",
"sentence",
"note",
"given",
"surname",
"prefix",
"suffix",
]
admin.site.register(AddressLink, AddressLinkAdmin)
admin.site.register(Address, AddressAdmin)
admin.site.register(Child, ChildAdmin)
admin.site.register(Citation, CitationAdmin)
admin.site.register(Configuration, ConfigurationAdmin)
admin.site.register(Event, EventAdmin)
admin.site.register(Exclusion, ExclusionAdmin)
admin.site.register(FactType, FactTypeAdmin)
admin.site.register(Family, FamilyAdmin)
admin.site.register(Group, GroupAdmin)
admin.site.register(Label, LabelAdmin)
admin.site.register(LinkAncestry, LinkAncestryAdmin)
admin.site.register(Link, LinkAdmin)
admin.site.register(MediaLink, MediaLinkAdmin)
admin.site.register(Multimedia, MultimediaAdmin)
admin.site.register(Name, NameAdmin)
admin.site.register(Person, PersonAdmin)
admin.site.register(Place, PlaceAdmin)
admin.site.register(ResearchItem, ResearchItemAdmin)
admin.site.register(Research, ResearchAdmin)
admin.site.register(Role, RoleAdmin)
admin.site.register(Source, SourceAdmin)
admin.site.register(SourceTemplate, SourceTemplateAdmin)
admin.site.register(Url, UrlAdmin)
admin.site.register(Witness, WitnessAdmin)
|
9,730 | 3a96ede91069df0c71905415e598dbbd9d3056fd | import os
import sys
import re
import traceback
import logging
import queue
import threading
from logging.handlers import TimedRotatingFileHandler
from pathlib import Path
import click
import inotify.adapters
from inotify.constants import (IN_ATTRIB, IN_DELETE, IN_MOVED_FROM,
IN_MOVED_TO, IN_CLOSE_WRITE)
from lxd_image_server.simplestreams.images import Images
from lxd_image_server.tools.cert import generate_cert
from lxd_image_server.tools.operation import Operations
from lxd_image_server.tools.mirror import MirrorManager
from lxd_image_server.tools.config import Config
logger = logging.getLogger('lxd-image-server')
event_queue = queue.Queue()
def threaded(fn):
def wrapper(*args, **kwargs):
threading.Thread(target=fn, args=args, kwargs=kwargs).start()
return wrapper
def configure_log(log_file, verbose=False):
filename = log_file
if log_file == 'STDOUT':
handler = logging.StreamHandler(sys.stdout)
elif log_file == 'STDERR':
handler = logging.StreamHandler(sys.stderr)
else:
handler = TimedRotatingFileHandler(
filename,
when="d", interval=7, backupCount=4)
formatter = logging.Formatter('[%(asctime)s] [LxdImgServer] [%(levelname)s] %(message)s')
handler.setFormatter(formatter)
logger.setLevel('DEBUG' if verbose else 'INFO')
logger.addHandler(handler)
def needs_update(events):
modified_files = []
for event in list(events):
if re.match('\d{8}_\d{2}:\d{2}', event[3]) or \
any(k in event[1]
for k in ('IN_MOVED_FROM', 'IN_MOVED_TO',
'IN_DELETE', 'IN_CLOSE_WRITE')):
logger.debug('Event: PATH=[{}] FILENAME=[{}] EVENT_TYPES={}'
.format(event[2], event[3], event[1]))
modified_files.append(event)
return modified_files
def config_inotify_setup(skipWatchingNonExistent: bool) -> inotify.adapters.Inotify:
i = inotify.adapters.Inotify()
watchedDirs = {}
for p in Config.paths:
if os.path.exists(p):
if os.path.isfile(p):
logger.debug("Watching existing config file {}".format(p))
i.add_watch(p, mask= inotify.constants.IN_CLOSE_WRITE | inotify.constants.IN_DELETE)
else:
logger.debug("Watching existing config directory {}".format(p))
i.add_watch(p) # SEEME: all events?
elif not skipWatchingNonExistent:
(d, n) = os.path.split(p)
while not os.path.exists(d):
(d, n) = os.path.split(d)
if d not in watchedDirs:
i.add_watch(d, inotify.constants.IN_DELETE | inotify.constants.IN_CLOSE_WRITE | inotify.constants.IN_CREATE)
logger.debug("Watching directory {} as base for {}".format(d, p))
watchedDirs[d] = True
return i
@threaded
def update_config(skipWatchingNonExistent = True):
i = config_inotify_setup(skipWatchingNonExistent)
while True:
reload = False
for event in i.event_gen(yield_nones=False):
(_, mask, dir, file) = event
fp = os.path.join(dir, file).rstrip(os.path.sep)
for p in Config.paths:
if p == fp or (dir == p):
reload = True
break
if reload:
break
if reload:
logger.debug("Will reload configuration")
Config.reload_data()
i = config_inotify_setup()
MirrorManager.update_mirror_list()
else:
logger.debug("No need to reload configuration")
@threaded
def update_metadata(img_dir, streams_dir):
MirrorManager.img_dir = img_dir
MirrorManager.update_mirror_list()
while True:
events = event_queue.get()
ops = Operations(events, str(Path(img_dir).resolve()))
if ops:
logger.info('Updating server: %s', ','.join(
str(x) for x in ops.ops))
images = Images(str(Path(streams_dir).resolve()), logger=logger)
images.update(ops.ops)
images.save()
MirrorManager.update()
logger.info('Server updated')
def fix_permissions(path):
Path(path).chmod(0o775)
for root, dirs, files in os.walk(path):
for elem in files:
Path(root, elem).chmod(0o775)
for elem in dirs:
Path(root, elem).chmod(0o775)
@click.group()
@click.option('--log-file', default='./lxd-image-server.log',
show_default=True)
@click.option('--verbose', help='Sets log level to debug',
is_flag=True, default=False)
def cli(log_file, verbose):
configure_log(log_file, verbose)
@cli.command()
@click.option('--img_dir', default='/var/www/simplestreams/images',
show_default=True,
type=click.Path(exists=True, file_okay=False,
resolve_path=True),
callback=lambda ctx, param, val: Path(val))
@click.option('--streams_dir', default='/var/www/simplestreams/streams/v1',
show_default=True,
type=click.Path(exists=True, file_okay=False,
resolve_path=True))
@click.pass_context
def update(ctx, img_dir, streams_dir):
logger.info('Updating server')
img_dir = Path(img_dir).expanduser().resolve()
streams_dir = Path(streams_dir).expanduser().resolve()
images = Images(str(Path(streams_dir).resolve()), rebuild=True, logger=logger)
# Generate a fake event to update all tree
fake_events = [
(None, ['IN_ISDIR', 'IN_CREATE'],
str(img_dir.parent), str(img_dir.name))
]
operations = Operations(fake_events, str(img_dir))
images.update(operations.ops)
images.save()
logger.info('Server updated')
@cli.command()
@click.option('--root_dir', default='/var/www/simplestreams',
show_default=True)
@click.option('--ssl_dir', default='/etc/nginx/ssl', show_default=True,
callback=lambda ctx, param, val: Path(val))
@click.option('--ssl_skip', default=False, is_flag=True)
@click.option('--nginx_skip', default=False, is_flag=True)
@click.pass_context
def init(ctx, root_dir, ssl_dir, ssl_skip, nginx_skip):
root_dir = Path(root_dir).expanduser().resolve()
if not Path(root_dir).exists():
logger.error('Root directory does not exists')
else:
if nginx_skip:
ssl_skip = True
if not ssl_skip:
if not ssl_dir.exists():
os.makedirs(str(ssl_dir))
if not (ssl_dir / 'nginx.key').exists():
generate_cert(str(ssl_dir))
img_dir = str(Path(root_dir, 'images'))
streams_dir = str(Path(root_dir, 'streams/v1'))
if not Path(img_dir).exists():
os.makedirs(img_dir)
if not Path(streams_dir).exists():
os.makedirs(streams_dir)
if not nginx_skip:
conf_path = Path('/etc/nginx/sites-enabled/simplestreams.conf')
if not conf_path.exists():
conf_path.symlink_to(
'/etc/nginx/sites-available/simplestreams.conf')
os.system('nginx -s reload')
if not Path(root_dir, 'streams', 'v1', 'images.json').exists():
ctx.invoke(update, img_dir=Path(root_dir, 'images'),
streams_dir=Path(root_dir, 'streams', 'v1'))
fix_permissions(img_dir)
fix_permissions(streams_dir)
@cli.command()
@click.option('--img_dir', default='/var/www/simplestreams/images',
show_default=True,
type=click.Path(exists=True, file_okay=False,
resolve_path=True))
@click.option('--streams_dir', default='/var/www/simplestreams/streams/v1',
type=click.Path(exists=True, file_okay=False,
resolve_path=True), show_default=True)
@click.option('--skip-watch-config-non-existent', default=False, type=bool, is_flag=True)
@click.pass_context
def watch(ctx, img_dir, streams_dir, skip_watch_config_non_existent: bool):
path_img_dir = str(Path(img_dir).expanduser().resolve())
path_streams_dir = str(Path(streams_dir).expanduser().resolve())
logger.info("Starting watch process")
Config.load_data()
# Lauch threads
# SEEME: in case an event will come from watching config files, there is a race condition between update_config
# thread using indirectly MirrorManager.img_dir and thread update_metadata setting MirrorManager.img_dir
# Also, race condition on calling MirrorManager.update_mirror_list() in both threads.
update_config(skip_watch_config_non_existent)
update_metadata(path_img_dir, path_streams_dir)
logger.debug("Watching image directory {}".format(path_img_dir))
i = inotify.adapters.InotifyTree(path_img_dir,
mask=(IN_ATTRIB | IN_DELETE |
IN_MOVED_FROM | IN_MOVED_TO |
IN_CLOSE_WRITE))
while True:
events = i.event_gen(yield_nones=False, timeout_s=15)
files_changed = needs_update(events)
if files_changed:
event_queue.put(files_changed)
def main():
try:
sys.exit(cli())
except Exception:
logger.error(traceback.format_exc())
sys.exit(1)
if __name__ == '__main__':
main()
|
9,731 | 0726a4fa3af196e2ba1592019f09afb0e7bb47d7 | import os
import requests
def download(url: str, dest_folder: str):
#https://stackoverflow.com/a/56951135/8761164
if not os.path.exists(dest_folder):
os.makedirs(dest_folder) # create folder if it does not exist
filename = url.split('/')[-1].replace(" ", "_") # be careful with file names
file_path = os.path.join(dest_folder, filename)
r = requests.get(url, stream=True)
if r.ok:
print("saving to", os.path.abspath(file_path))
with open(file_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024 * 8):
if chunk:
f.write(chunk)
f.flush()
os.fsync(f.fileno())
else:
print("Download failed: status code {}\n{}".format(r.status_code, r.text))
def parse_lat(lat: int):
lat_str = 'N' if lat >= 0 else 'S'
if 10 > lat > -10:
lat_str += '0'
lat_str += str(abs(lat))
return lat_str
def parse_long(long: int):
long_str = 'E' if long >= 0 else 'W'
if 100 > long > -100:
long_str += '0'
if 10 > long > -10:
long_str += '0'
long_str += str(abs(long))
return long_str
if __name__=='__main__':
for lat in range(47, 21, -1):
for long in range(-14, 43, 1):
#print(parse_lat(lat), parse_long(long))
#print(f"https://gdemdl.aster.jspacesystems.or.jp/download/Download_{parse_lat(lat)}{parse_long(long)}.zip")
download(f"https://gdemdl.aster.jspacesystems.or.jp/download/Download_{parse_lat(lat)}{parse_long(long)}.zip", dest_folder="/media/data-ext/aster-gdem") |
9,732 | 9ad36f157abae849a1550cb96e650746d57f491d | from collections import Counter
from docx import Document
import docx2txt
plain_text = docx2txt.process("kashmiri.docx")
list_of_words = plain_text.split()
#print(Counter(list_of_words))
counter_list_of_words = Counter(list_of_words)
elements = counter_list_of_words.items()
# for a, b in sorted(elements, key=lambda x: x[1], reverse=True):
# print(a)
# print(b)
doc = Document()
# Create and Name Table Heading
table = doc.add_table(rows=1, cols=2)
cell1 = table.cell(0, 0)
cell1.text = 'Word'
cell2 = table.cell(0, 1)
cell2.text = 'Frequency'
#Iterate over collection elements and append to table craeted
for word, frequency in sorted(elements, key=lambda x: x[1], reverse=True):
cell = table.add_row().cells
cell[0].text = str(word)
cell[1].text = str(frequency)
doc.save("results.docx") |
9,733 | 7016a7dda80c0cfae0e15cf239f6ae64eb9004b7 | # Jeremy Jao
# University of Pittsburgh: DBMI
# 6/18/2013
#
# This is the thing that returns the dictionary of the key. we can edit more code to return different values in the keys (gene) in each dictionary inside the dictionary.
# my sys.argv isn't working in my situation due to my IDE (nor do I not know how it would work.... but yeah........... It's easy to code this.
import cPickle
#import sys
#
#arg = sys.argv[0]
print "Displaying dictionary for " + "MESTIT1"
hi = open("geneDictionary.pickle", "r")
hello = cPickle.load(hi)
print hello["MESTIT1"] |
9,734 | b8b20d6c977a6c1df6a592188c6e799f12da6a23 | ##########################################################################################
## Scene Classification ##
## Authors : Chris Andrew, Santhoshini Reddy, Nikath Yasmeen, Sai Hima, Sriya Ragini ##
################################################################### ##
## Description: This project was developed as part of the DIP course at IIIT Sri City ##
## All code is available for free usage for educational purposes ##
## Authors do not authorize commercial use of the source code ##
##########################################################################################
# The following module shuffles the data to enable 10 fold cross-validation analysis
################ Imports ################
from random import shuffle
################ Global ################
path = "data/"
filename = "data"
################ Source ################
# ------------------------------------
f = open(path+filename+".csv",'r')
data = list()
train_data = list()
train_class = list()
# ------------------------------------
for line in f:
l = line.strip()
l = l.split(',')
l = map(float , l)
data.append(l)
# ------------------------------------
f.close()
# ------------------------------------
for i in range(100):
shuffle(data)
# ------------------------------------
for l in data:
train_data.append(l[0:-1])
train_class.append(int(l[-1]))
# ------------------------------------
f = open(path+filename+"_r.csv",'w')
for i in range(len(train_data)):
for entry in train_data[i]:
f.write(str(entry)+',')
# ------------------------------------
f.write(str(train_class[i])+'\n')
# ------------------------------------
f.close()
# ------------------------------------------------------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------------------------------
|
9,735 | c77ca4aa720b172d75aff2ceda096a4969057a00 | # coding=utf-8
# __author__ = 'liwenxuan'
import random
chars = "1234567890ABCDEF"
ids = ["{0}{1}{2}{3}".format(i, j, k, l) for i in chars for j in chars for k in chars for l in chars]
def random_peer_id(prefix="F"*8, server_id="0000"):
"""
用于生成随机的peer_id(后四位随机)
:param prefix: 生成的peer_id的前八位, 测试用prefix为"FFFFFFFF"
:param server_id: 区分不同server的标识, 不区分server时, server_id为"0000"
:return:
"""
assert len(str(prefix)) == 8 and len(str(server_id)) == 4
return str(prefix) + str(server_id) + "0"*16 + random.choice(ids) # length: 8+4+16+4 = 32
def random_file_id(file_id_prefix="F"*8, server_id="0000"):
"""
用于生成随机的file_id(后四位随机)
:param file_id_prefix: 生成的file_id的前八位, 测试用prefix为"FFFFFFFF"
:param server_id: 区分不同server的标识, 不区分server时, server_id为"0000"
:return:
"""
assert len(str(file_id_prefix)) <= 8 and len(str(server_id)) == 4
return str(file_id_prefix).ljust(8, "F") + str(server_id) + "F"*16 + random.choice(ids) # length: 8+4+16+4 = 32
if __name__ == "__main__":
pass
print "peer_id", random_peer_id()
print "file_id", random_file_id()
|
9,736 | 972c479ea40232e14fbf678ca2ccf9716e473fe8 | from rest_framework import serializers
from .models import data
from django.contrib.auth.models import User
class dataSerializer(serializers.ModelSerializer):
class Meta:
model = data
fields = ['id','task','duedate','person','done', 'task_user']
class userSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ['username', 'password', 'email'] |
9,737 | 0d07ad60c58828ce19153063fb5d7d80135cb9ec | from django.http import HttpResponse
from django.shortcuts import render
from dashboard.models import Farmer
import random, json, requests
from django.core import serializers
from collections import namedtuple
def sendSMS(message):
if message:
assert isinstance(message, (str, unicode))
payload = {"From":"18126524546", "To":"+18126524546", "Body":message}
else:
payload = {"From":"18126524546", "To":"+18126524546", "Body":"we curlin"}
r = requests.post("https://api.twilio.com/2010-04-01/Accounts/AC2538516e85acddb63169a9c56019a68a/Messages",
auth=('AC2538516e85acddb63169a9c56019a68a','170945ab2aed0d2ec992a22e9fa41ca4'),
data=payload)
print r.text
def jsonify(content):
response = serializers.serialize('json', content);
return HttpResponse(response, mimetype='application/json')
def getLatestSMS(request):
return jsonify(str(getTwilioSMSData()))
def getTwilioSMSData(request):
r = requests.get('https://api.twilio.com/2010-04-01/Accounts/AC2538516e85acddb63169a9c56019a68a/Messages.json', auth=('AC2538516e85acddb63169a9c56019a68a', '170945ab2aed0d2ec992a22e9fa41ca4'))
all_messages = []
for item in r.json()['messages']:
all_messages.append(SMS(
phone = item['from'],
body = item['body'],
direction = item['direction'], #inbound or outbound
date_created = item['date_created'],
sid = item['sid']
))
last_SMS_id = all_messages[0].sid
farmer = matchPhoneToFarmer(all_messages[0].phone[1:])
return str({'farmer':farmer, 'text':all_messages[0]})
def matchPhoneToFarmer(phone):
print "PHONE: ", phone
for farmer in sample_farmers:
if phone==farmer.phone:
return farmer
return None
def generateSampleData():
sample_names = ("Bob", "Dan", "Chloe", "Lyra", "Dev", "Eric")
sample_land_area = (100, 140, 120, 30, 10, 1500)
sample_phone = ("17792329691","17792329696","17792329691","17792329691","17792329691","17792329691")
sample_lat = (11.1 + random.random()/15,
11.1 + random.random()/15,
11.1 + random.random()/15,
11.1 + random.random()/15,
11.1 + random.random()/15,
11.1 + random.random()/15)
sample_long = (79.646 + random.random()/15,
79.646 + random.random()/15,
79.646 + random.random()/15,
79.646 + random.random()/15,
79.646 + random.random()/15,
79.646)
sample_diseased = (True, False, False, False, True, True)
for i in range(6):
name=sample_names[i]
land_area=sample_land_area[i],
phone = sample_phone[i]
latitude=sample_lat[i],
longitude=sample_long[i],
is_diseased=sample_diseased[i]
sample_farmers.append(Farmer(name=name, land_area=land_area, phone=phone,
latitude=latitude, longitude=longitude,
is_diseased=is_diseased))
def returnFarmerDataJSON(request):
data = []
for item in sample_farmers:
data.append(str(item))
response = serializers.serialize('json', sample_farmers);
return HttpResponse(response, mimetype='application/json')
def dashboard(request):
# sendSMS("yo what's up")
print getTwilioSMSData(request)
context = {}
return render(request, 'dashboard/dashboard.html', context)
sample_farmers = []
generateSampleData()
SMS = namedtuple('SMS', ['phone', 'body', 'direction', 'date_created', 'sid'])
last_SMS_id = ""
|
9,738 | a5b74c31aed103b55404afc538af60c3eb18cb1b | """TcEx Framework Key Value Redis Module"""
class KeyValueRedis:
"""TcEx Key Value Redis Module.
Args:
context (str): The Redis context (hash) for hashed based operations.
redis_client (redis.Client): An instance of redis client.
"""
def __init__(self, context, redis_client):
"""Initialize the Class properties."""
self._context = context
self._redis_client = redis_client
@property
def context(self):
"""Return the current context."""
return self._context
@context.setter
def context(self, context):
"""Set or update the current context."""
self._context = context
def create(self, key, value):
"""Create key/value pair in Redis.
Args:
key (str): The field name (key) for the kv pair in Redis.
value (any): The value for the kv pair in Redis.
Returns:
str: The response from Redis.
"""
return self._redis_client.hset(self.context, key, value)
def delete(self, key):
"""Alias for hdel method.
Args:
key (str): The field name (key) for the kv pair in Redis.
Returns:
str: The response from Redis.
"""
return self._redis_client.hdel(self.context, key)
def hgetall(self):
"""Read data from Redis for the current context.
Returns:
list: The response data from Redis.
"""
return self._redis_client.hgetall(self.context)
def read(self, key):
"""Read data from Redis for the provided key.
Returns:
str: The response data from Redis.
"""
value = self._redis_client.hget(self.context, key)
# convert retrieved bytes to string
if isinstance(value, bytes):
value = value.decode('utf-8')
return value
|
9,739 | a67612e8301728d1fb366d7c8909fa830f04bf45 | #Max Low
#9-25-17
#quiz2.py -- numbers , bigger smaller same, divisible by 3, product and correct person
numone = int(input('Enter a number: '))
numtwo = int(input('Enter a 2nd number: '))
if numone > numtwo:
print('The first number is bigger')
elif numtwo > numone:
print('The second number is bigger')
else:
print('The numbers are the same')
if numone % 3 == 0 and numtwo % 3 == 0:
print('They are both divisible by 3')
elif numone % 3 == 0:
print('Only the first number is divisible by three')
elif numtwo % 3 == 0:
print('Only the second number is divisible by three')
else:
print('Neither number is divisible by 3')
product = int(input('What is the product of your two numbers?: '))
if product == numone*numtwo:
print('correct')
else:
print('incorrect') |
9,740 | cd8d95e2bf433020db2db06a21263f75e3f81331 | #!/bin/python
"""
len()
lower()
upper()
str()
"""
parrot = "Norwegian Blue"
print len(parrot)
|
9,741 | 2b8b5b893d61d11d2795f5be96fde759256a15e8 | """
This is the main script
"""
import datetime
import sqlite3
from sqlite3 import Error
import nltk.sentiment
from chatterbot import ChatBot
from pythonosc import udp_client
def _create_connection(db_file):
""" Create a database connection to the SQLite database """
try:
conn = sqlite3.connect(db_file)
cur = conn.cursor()
# Create a new SQLite table
cur.execute("CREATE TABLE {tn} ({r1}, {r2}, {time} {ft})"
.format(tn=TABLE_NAME, r1=INPUT_COLUMN, r2=OUTPUT_COLUMN,
time='time', ft='TEXT'))
except Error as err:
print(err)
finally:
conn.commit()
conn.close()
def _log_conversation(db_file, line):
""" Log conversation in SQLite database """
try:
conn = sqlite3.connect(db_file)
cur = conn.cursor()
cur.execute("""INSERT INTO {tn} ({c1}, {c2}, {time}) VALUES ("{v1}", "{v2}", "{now}")""".
format(tn=TABLE_NAME, c1=INPUT_COLUMN, c2=OUTPUT_COLUMN, time='time',
v1=' '.join(line.keys()), v2=' '.join(line.values()),
now=str(datetime.datetime.now())))
conn.commit()
except Error as err:
print(err)
finally:
conn.close()
def main(text):
"""This is the main function to run the CHATBOT, analyse
the responses with nltk and send OSC messages to Pure Data.
"""
# Get CHATBOT response from the user input.
bot_response = CHATBOT.get_response(text).text
print(bot_response)
# Get polarity score from CHATBOT response.
analysis = VADER_ANALYZER.polarity_scores(text)
# Change polarity score relatively to a audible frequency.
freq = (analysis['compound'] - -1) / (1 - -1) * (800 - 200) + 200
# Send OSC message, to be listened to by pd.
CLIENT.send_message("/filter", freq)
# Log conversation.
exchange = {text: bot_response}
_log_conversation("conversation.db", exchange)
if __name__ == '__main__':
# Set up database
TABLE_NAME = 'conversation_log'
INPUT_COLUMN = 'input_column'
OUTPUT_COLUMN = 'output_column'
CONVERSATION_DB = "conversation.db"
_create_connection(CONVERSATION_DB)
# Set up chatbot.
CHATBOT = ChatBot(
'Sentiment Music Bot',
trainer='chatterbot.trainers.ChatterBotCorpusTrainer')
# Train based on the english corpus.
CHATBOT.train("chatterbot.corpus.english")
# Download lexicon for nltk.
nltk.download('vader_lexicon')
# Set up sentiment analyzer.
VADER_ANALYZER = nltk.sentiment.vader.SentimentIntensityAnalyzer()
# Set up OSC client.
IP = 'localhost'
PORT = 9000
CLIENT = udp_client.SimpleUDPClient(IP, PORT)
# Run chatbot.
while True:
USER_RESPONSE = input("Talk ('exit' to exit): ")
if USER_RESPONSE == 'exit': # Exit on 'exit' string.
break
else:
main(USER_RESPONSE)
|
9,742 | a315d01f0fb16f0c74c447c07b76f33e6ff6427d | from auth_passwordreset_reset import auth_passwordreset_reset
from auth_register import auth_register
from data import *
import pytest
#invalid reset code
def test_auth_passwordreset_reset1():
#create a test account
register = auth_register("Someemial@hotmail.com.au", "Hello123", "First", "Last")
#call password reset request
auth_passwordreset_request("Someemial@hotmail.com.au")
#assuming that the code from the email was "WER123"
#this should not work as the code "ABS124" doesnt match "WER123"
with pytest.raises(ValueError, match='*Incorrect Reset Code*'):
auth_passwordreset_reset("ABS124", "SomePass")
#invalid password
def test_auth_passwordreset_reset2():
#create a test account
register = auth_register("Someemial@hotmail.com.au", "Hello123", "First", "Last")
#call password reset request
auth_passwordreset_request("Someemial@hotmail.com.au")
#assume that the code generated was "AUW624"
#these should not work as the new passowrd lengths are <5
with pytest.raises(ValueError, match='*Invalid Password Length*'):
auth_passwordreset_reset("AUW624", "")
auth_passwordreset_reset("AUW624", "nope")
#valid case
def test_auth_passwordreset_reset3():
#create a test account
register = auth_register("Someemial@hotmail.com.au", "Hello123", "First", "Last")
#call password reset request
auth_passwordreset_request("Someemial@hotmail.com.au")
#assume that the code generated was "AUW624"
auth_passwordreset_reset("AUW624", "Valispass12")
#test to see if password updated
assert new_user_password == "Valispass12"
#this sequence should successfully reset the password
|
9,743 | 75b1674066958a8fa28e74121a35d688bcc473d9 | from odoo import models, fields, api, _
class SaleAdvancePaymentInv(models.TransientModel):
_inherit = "sale.advance.payment.inv"
date_start_invoice_timesheet = fields.Date(
string='Start Date',
help="Only timesheets not yet invoiced (and validated, if applicable) from this period will be invoiced. "
"If the period is not indicated, all timesheets not yet invoiced (and validated, if applicable) will "
"be invoiced without distinction.", required=True)
date_end_invoice_timesheet = fields.Date(
string='End Date',
help="Only timesheets not yet invoiced (and validated, if applicable) from this period will be invoiced. "
"If the period is not indicated, all timesheets not yet invoiced (and validated, if applicable) will "
"be invoiced without distinction.", required=True)
|
9,744 | a718d82713503c4ce3d94225ff0db04991ad4094 | # Generated by Django 3.0 on 2020-05-04 16:15
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('game_skeleton', '0001_initial'),
('contenttypes', '0002_remove_content_type_name'),
('class_room', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='UserHero',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('datetime_created', models.DateTimeField(auto_now=True)),
('datetime_edited', models.DateTimeField(auto_now_add=True)),
('datetime_finished', models.DateTimeField(blank=True, null=True)),
('capacity', models.FloatField()),
('wallet', models.DecimalField(decimal_places=4, default=0.0, max_digits=10)),
('hero_class', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='game_skeleton.HeroClass')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='heroes', to='class_room.User')),
],
),
migrations.CreateModel(
name='EventHistory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_id', models.PositiveIntegerField()),
('is_draft', models.BooleanField(default=False, help_text='Draft note does not participate in hero capacity calculation.')),
('datetime_created', models.DateTimeField(auto_now=True)),
('datetime_edited', models.DateTimeField(auto_now_add=True)),
('author', models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='actions', to='class_room.User')),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='events', to='class_room.User')),
],
options={
'verbose_name_plural': 'User`s history events',
},
),
]
|
9,745 | afdb14d60374049753b3c980c717a13456c7ff5c | from django.contrib import admin
from django.urls import path
from .views import NewsCreateListView, NewsDetailGenericView
urlpatterns = [
path('news/', NewsCreateListView.as_view()),
path('news_detailed/<int:id>/', NewsDetailGenericView.as_view()),
] |
9,746 | cb6f68c8b8a6cead1d9fcd25fa2a4e60f7a8fb28 | import math
def upsample1(d, p):
# 普通结界
assert 1 <= p <= 10
return d + p
def upsample2(d, p):
# 倍增结界
assert 2 <= p <= 3
return d * p
def downsample(d, p):
# 聚集结界
assert 2 <= p <= 10
return math.ceil(d / p)
# 初始化杀伤力范围
lethal_radius = 1
# 结界参数(z, p)
config = [(1, 6),
(2, 3),
(3, 3),
(2, 3),
(2, 3),
(3, 7)]
for i in range(int(input())):
z, p = list(map(int, input().strip().split()))
if z == 1:
lethal_radius = upsample1(lethal_radius, p)
if z == 2:
lethal_radius = upsample2(lethal_radius, p)
if z == 3:
lethal_radius = downsample(lethal_radius, p)
print(lethal_radius)
|
9,747 | 0972bd1241ad91f54f8dfde6327ee226c27bf2ca | from datetime import datetime
import time
from os import system
import RPi.GPIO as GPIO
import firebase_admin
from firebase_admin import credentials
from firebase_admin import db
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(21, GPIO.OUT) # este pin es de salida carro
GPIO.setup(26, GPIO.OUT) # este pin es de salida carro
GPIO.setup(19, GPIO.OUT) # este pin es de salida carro
GPIO.setup(13, GPIO.OUT) # este pin es de salida carro
GPIO.setup(6, GPIO.OUT) # este pin es de salida carro
GPIO.setup(5, GPIO.OUT) # este pin es de salida carro
GPIO.setup(11, GPIO.OUT) # este pin es de salida carro
GPIO.setup(20, GPIO.IN) #Este pin es una entrada carro pequeno
GPIO.setup(16, GPIO.IN) #Este pin es una entrada carro grande
PATH_CRED = '/home/pi/Desktop/cred.json'
URL_DB = 'https://arquiii-default-rtdb.firebaseio.com/'
cred = credentials.Certificate(PATH_CRED)
firebase_admin.initialize_app(cred, {
'databaseURL': URL_DB
})
REF = db.reference("/")
REF.set({
'Proceso':
{
}
})
REF = db.reference("/Vehiculos")
while True:
tiempo = datetime.now()
#Si hay un 1 en el pin 20
if GPIO.input(20):
tiempoE = 5 #tiempo que va a cambiar por estacion
if GPIO.input(20):
tamano = "Pequeno"
elif GPIO.input(20) and GPIO.input(16):
tamano = "Grande"
else:
tamano = "Mediano"
print("Se ha detectado un automovil de tamano",tamano)
REF.push({
"Recepcion": str(tiempo),
"Tamano": tamano,
})
if (tiempo == 5):
print("Activacion de agua... ")
tiempo += 5
GPIO.output(26, True)
print("Desactivacion de agua...")
tiempo = datetime.now()
REF.push({
"Tiempo agua": str(tiempo),
})
GPIO.output(26, False)
elif (tiempo == 10):
print("Activacion de rocio de shampoo... ")
tiempo += 5
GPIO.output(19, True)
print("Desactivacion de rocio de shampoo...")
tiempo = datetime.now()
REF.push({
"Tiempo rocio": str(tiempo),
})
GPIO.output(19, False)
elif (tiempo == 15):
print("Activacion de rodillos de limpieza... ")
tiempo += 5
GPIO.output(13, True)
print("Desactivacion de rodillos de limpieza...")
tiempo = datetime.now()
REF.push({
"Tiempo rodillo": str(tiempo),
})
GPIO.output(13, False)
elif (tiempo == 20):
print("Activacion de escobas de limpieza ")
tiempo += 5
GPIO.output(6, True)
print("Desactivacion de escobas de limpieza...")
tiempo = datetime.now()
REF.push({
"Tiempo escoba": str(tiempo),
})
GPIO.output(6, False)
elif (tiempo == 25):
print("Activacion de rocio de agua 2nda vez ")
tiempo += 5
GPIO.output(5, True)
print("Desactivacion de rocio de agua 2nda vez...")
tiempo = datetime.now()
REF.push({
"Tiempo agua 2nda": str(tiempo),
})
GPIO.output(5, False)
elif (tiempo == 30):
print("Activacion de rodillos de secado")
tiempo += 5
GPIO.output(11, True)
print("Desactivacion de rodillos de secado...")
tiempo = datetime.now()
REF.push({
"Tiempo rodillos": str(tiempo),
})
GPIO.output(11, False)
GPIO.cleanup()
|
9,748 | f8c222b1a84a092a3388cb801a88495bc227b1d5 |
import datetime
import hashlib
import json
from flask import Flask, jsonify, request
import requests
from uuid import uuid4
from urllib.parse import urlparse
from Crypto.PublicKey import RSA
# Part 1 - Building a Blockchain
class Blockchain:
#chain(emptylist) , farmer_details(emptylist), nodes(set), create_block(function to create the genesis block)
def __init__(self):
self.chain = []
self.farmer_details = []
self.create_block(proof = 1, previous_hash = '0')
self.nodes = set()
#It creates a dictionary block which contains index(length of chain+1),timestamp( by using the module datetime),
#Proof( passes as parameter),previous_hash(passed as parameter),
#Farmer_details(from self) and append this to the chain.
def create_block(self, proof, previous_hash):
block = {'index': len(self.chain) + 1,
'timestamp': str(datetime.datetime.now()),
'proof': proof,
'previous_hash': previous_hash,
'farmer_details': self.farmer_details}
self.farmer_details = []
self.chain.append(block)
return block
#It returns the last block of the chain.
def get_previous_block(self):
return self.chain[-1]
#It runs a lop and check if hash of new proof^2- previous proof^2 contains 4 leading zeroes.
#if yes,then it returns the new proof otherwise increment the new proof by 1 and iterates again.
def proof_of_work(self, previous_proof):
new_proof = 1
check_proof = False
while check_proof is False:
hash_operation = hashlib.sha256(str(new_proof**2 - previous_proof**2).encode()).hexdigest()
if hash_operation[:4] == '0000':
check_proof = True
else:
new_proof += 1
return new_proof
#- It returns the hash of the block using sha256
def hash(self, block):
encoded_block = json.dumps(block, sort_keys = True).encode()
return hashlib.sha256(encoded_block).hexdigest()
#It iterates a loop from 0 to chain length and check if hash of the block is same as returned by the hash function,
#then it checks if hash of the proof of current block^2-proof of previous block^2 contains 4 leading zeroes or not.
# if no, then chain is not valid.
def is_chain_valid(self, chain):
previous_block = chain[0]
block_index = 1
while block_index < len(chain):
block = chain[block_index]
if block['previous_hash'] != self.hash(previous_block):
return False
previous_proof = previous_block['proof']
proof = block['proof']
hash_operation = hashlib.sha256(str(proof**2 - previous_proof**2).encode()).hexdigest()
if hash_operation[:4] != '0000':
return False
previous_block = block
block_index += 1
return True
#- It creates the private key using the RSA.generate(1024),then creates the public key,
# hash of transaction(it is the hash of the sum of hashes of the name,crop_name,quantity,rate),
#data( it is the hash of the transaction in the int form),
#signature( it is created by raising the data to the power of privatekey.d%privatekey.n).
# Then it append a dictionary containing all these information in the hash format to the chain farmer_details
#and returns the index of the new block.
def add_farmerdetails(self, name, crop_name, quantity,rate):
privatekey = RSA.generate(1024)
publickey = privatekey.publickey()
hash_of_transaction=hashlib.sha256((hashlib.sha256(name.encode()).hexdigest()+hashlib.sha256(crop_name.encode()).hexdigest()+hashlib.sha256(str(quantity).encode()).hexdigest()+hashlib.sha256(str(rate).encode()).hexdigest()).encode()).hexdigest()
data=int(hash_of_transaction,16)
signature=pow(data,privatekey.d,privatekey.n)
self.farmer_details.append({'name_of_farmer': hashlib.sha256(name.encode()).hexdigest(),
'crop_name': hashlib.sha256(crop_name.encode()).hexdigest(),
'quantity_inkg': hashlib.sha256(str(quantity).encode()).hexdigest(),
'rate_perkg': hashlib.sha256(str(rate).encode()).hexdigest(),
'hash_of_transaction': hash_of_transaction,
'signature': signature
})
previous_block = self.get_previous_block()
return previous_block['index'] + 1
#It takes the url using urlparse of the address and then adds this to the set nodes in the self.
def add_node(self, address):
parsed_url = urlparse(address)
self.nodes.add(parsed_url.netloc)
#It access all the nodes in the set nodes and then iterates a loop to get their chain length using get_chain (to be described)
# and replaces the current chain with the longest chain of all the nodes.
def replace_chain(self):
network = self.nodes
longest_chain = None
max_length = len(self.chain)
for node in network:
response = requests.get(f'http://{node}/get_chain')
if response.status_code == 200:
length = response.json()['length']
chain = response.json()['chain']
if length > max_length and self.is_chain_valid(chain):
max_length = length
longest_chain = chain
if longest_chain:
self.chain = longest_chain
return True
return False
# Part 2 - Mining our Blockchain
# Creating a Web App
app = Flask(__name__)
# Creating an address for the node on Port 5001
node_address = str(uuid4()).replace('-', '')
# Creating a Blockchain
blockchain = Blockchain()
# Mining a new block
#- It access the previous block by calling the function get_previous_block(),
#then access the previous proof by previous_block[‘proof’],
#then it creates a new proof by using the function proof_of_work(‘previous_proof’),
#then it finds the hash of the previous block by using the function blockchain.hash(previous_block),
# then calls the function create_block( proof,previous_hash),then finds the hash of this block.
# It creates a response containing all the details of the new block,jsonify it and returns it.
@app.route('/mine_block', methods = ['GET'])
def mine_block():
previous_block = blockchain.get_previous_block()
previous_proof = previous_block['proof']
proof = blockchain.proof_of_work(previous_proof)
previous_hash = blockchain.hash(previous_block)
#blockchain.add_transaction(sender = node_address, receiver = 'Hadelin', amount = 1)
block = blockchain.create_block(proof, previous_hash)
current_block=blockchain.get_previous_block()
current_hash=blockchain.hash(current_block)
response = {'message': 'Congratulations, you just mined a block!',
'index': block['index'],
'timestamp': block['timestamp'],
'proof': block['proof'],
'previous_hash': block['previous_hash'],
'farmer': block['farmer_details'],
'current_hash': current_hash}
return jsonify(response), 200
# Getting the full Blockchain
#- It creates an empty list chain_till_now, then iterates over all the blocks in the blockchain and find it’s hash
#then check if the list farmer_details is empty or not,
#if it is empty then it appends a dictionary containing the current block’s index,timestamp,proof,previous_hash, current_hash, farmer_details.
# If the farmer_details list is not empty then it first finds the length of the list farmer_details
#then it iterates over the length of the list farmer_details and appends the hash of transaction
# contained within the dictionary of the list farmer_details. Then it creates the hash of this appended hash. This is the merged hash.
# Then it creates a dictionary containing merged hash,index,timestamp,proof,previous_hash,farmer_details and current hash.
# Then, it appends this dictionary to the list chain till now.
# It then creates the response containing the chain till now and length of the blockchain,jasonifies it and returns it.
@app.route('/print_chain',methods=['GET'])
def print_chain():
chain_till_now =[]
for xblock in blockchain.chain:
xcurrent_hash=blockchain.hash(xblock)
if len(xblock['farmer_details'])==0:
chain_till_now.append({'index': xblock['index'],
'timestamp': xblock['timestamp'],
'proof': xblock['proof'],
'previous_hash': xblock['previous_hash'],
'farmer': xblock['farmer_details'],
'current_hash': xcurrent_hash})
else:
l=len(xblock['farmer_details'])
sum=""
l-=1
while(l>=0):
sum=xblock['farmer_details'][l]['hash_of_transaction']+sum
l-=1
chain_till_now.append({'Merged_hash': hashlib.sha256(sum.encode()).hexdigest(),
'index': xblock['index'],
'timestamp': xblock['timestamp'],
'proof': xblock['proof'],
'previous_hash': xblock['previous_hash'],
'farmer': xblock['farmer_details'],
'current_hash': xcurrent_hash})
response = {'chain': chain_till_now,
'length': len(blockchain.chain)}
return jsonify(response), 200
#- It creats the response containing the blockchain.chain and its length,jasonifies it and returns it.
@app.route('/get_chain', methods = ['GET'])
def get_chain():
response = {'chain': blockchain.chain,
'length': len(blockchain.chain)}
return jsonify(response), 200
# Checking if the Blockchain is valid
#- It calls the function is_chain_valid and returns a string as response based on whether the chain is valid or not.
@app.route('/is_valid', methods = ['GET'])
def is_valid():
is_valid = blockchain.is_chain_valid(blockchain.chain)
if is_valid:
response = {'message': 'All good. The Blockchain is valid.'}
else:
response = {'message': 'Houston, we have a problem. The Blockchain is not valid.'}
return jsonify(response), 200
# Adding a new transaction to the Blockchain
#It takes the input in Jason format and checks if all the keys in the farmer keys(name_of_farmer,crop_name,quantity_inkg, rate_perkg) are available in the json file.
#If no, It returns that some elements are missing
# otherwise it calls the function add_farmer_details by passing the farmer details in the json file as parameter and
#returns the index of the block in which these details will be added.
@app.route('/add_farmerdetails', methods = ['POST'])
def add_farmer_details():
json = request.get_json()
farmer_keys = ['name_of_farmer', 'crop_name', 'quantity_inkg','rate_perkg']
if not all(key in json for key in farmer_keys):
return 'Some elements of the farmer_details are missing', 400
index = blockchain.add_farmerdetails(json['name_of_farmer'], json['crop_name'], json['quantity_inkg'], json['rate_perkg'])
response = {'message': f'These details will be added to Block {index}'}
return jsonify(response), 201
# Part 3 - Decentralizing our Blockchain
# Connecting new nodes
#It takes a Jason file as request and first check if it contains any node or not.
# If it contains the nodes then it calls the function blockchain.add_node .
#Then it returns the list of blockchain.nodes as response.
@app.route('/connect_node', methods = ['POST'])
def connect_node():
json = request.get_json()
nodes = json.get('nodes')
if nodes is None:
return "No node", 400
for node in nodes:
blockchain.add_node(node)
response = {'message': 'All the nodes are now connected. The puspesh Blockchain now contains the following nodes:',
'total_nodes': list(blockchain.nodes)}
return jsonify(response), 201
# Replacing the chain by the longest chain if needed
#- It calls the function blockcain.replace_chain. If the chain is replaced
#it returns the response with a message that the nodes has the different chains so the chain has been replaced by the longest chain alongwith the blockchain.chain.
# Otherwise it returns the response with a message all good the chain is the longest one with the blockchain.chain .
#then it jsonify the response and returns it.
@app.route('/replace_chain', methods = ['GET'])
def replace_chain():
is_chain_replaced = blockchain.replace_chain()
if is_chain_replaced:
response = {'message': 'The nodes had different chains so the chain was replaced by the longest one.',
'new_chain': blockchain.chain}
else:
response = {'message': 'All good. The chain is the largest one.',
'actual_chain': blockchain.chain}
return jsonify(response), 200
# Running the app
app.run(host = '0.0.0.0', port = 5001)
|
9,749 | a1e563f94044ff7cd7e0e55542bc4ca2db81df28 | #
# Author:: Noah Kantrowitz <noah@coderanger.net>
#
# Copyright 2014, Noah Kantrowitz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from fabric.api import task, roles
import pytest
from fabric_rundeck import visitor
def fixture_path(*path):
return os.path.join(os.path.dirname(__file__), 'data', *path)
class TestUnwrap(object):
@pytest.fixture
def fn(self):
def fn():
pass
return fn
def test_fn(self, fn):
assert visitor.unwrap(fn) is fn
def test_task(self, fn):
t = task(fn)
assert visitor.unwrap(t) is fn
def test_taskcall(self, fn):
t = task()(fn)
assert visitor.unwrap(t) is fn
def test_task_roles(self, fn):
t = task(roles('foo')(fn))
assert visitor.unwrap(t) is fn
def test_taskcall_roles(self, fn):
t = task()(roles('foo')(fn))
assert visitor.unwrap(t) is fn
def test_roles_task(self, fn):
t = roles('foo')(task(fn))
assert visitor.unwrap(t) is fn
def test_roles_taskcall(self, fn):
t = roles('foo')(task()(fn))
assert visitor.unwrap(t) is fn
def test_lambda(self):
fn = lambda: None
assert visitor.unwrap(fn) is fn
def test_lambda_task(self):
fn = lambda: None
t = task(fn)
assert visitor.unwrap(t) is fn
class TestVisitTask(object):
def test_no_args(self):
def fn():
pass
assert visitor.visit_task(fn, ()) == {
'name': 'fn',
'path': (),
'doc': None,
'cron': None,
'argspec': {
'args': [],
'varargs': None,
'keywords': None,
'defaults': None,
},
}
def test_simple_args(self):
def fn(a, b):
pass
assert visitor.visit_task(fn, ()) == {
'name': 'fn',
'path': (),
'doc': None,
'cron': None,
'argspec': {
'args': ['a', 'b'],
'varargs': None,
'keywords': None,
'defaults': None,
},
}
def test_arg_defaults(self):
def fn(a, b=1, c=None):
pass
assert visitor.visit_task(fn, ()) == {
'name': 'fn',
'path': (),
'doc': None,
'cron': None,
'argspec': {
'args': ['a', 'b', 'c'],
'varargs': None,
'keywords': None,
'defaults': (1, None),
},
}
def test_varargs(self):
def fn(*args, **kwargs):
pass
assert visitor.visit_task(fn, ()) == {
'name': 'fn',
'path': (),
'doc': None,
'cron': None,
'argspec': {
'args': [],
'varargs': 'args',
'keywords': 'kwargs',
'defaults': None,
},
}
def test_docs(self):
def fn(*args, **kwargs):
"""I am a teapot."""
pass
assert visitor.visit_task(fn, ()) == {
'name': 'fn',
'path': (),
'doc': 'I am a teapot.',
'cron': None,
'argspec': {
'args': [],
'varargs': 'args',
'keywords': 'kwargs',
'defaults': None,
},
}
class TestVisit(object):
def test_single(self):
def fn():
pass
callables = {
'fn': fn,
}
data = visitor.visit(callables)
assert len(data) == 1
assert data[0]['name'] == 'fn'
def test_multi(self):
def fn():
pass
def fn2():
pass
def fn3():
pass
callables = {
'fn': fn,
'fn2': fn2,
'fn3': fn3,
}
data = visitor.visit(callables)
assert len(data) == 3
assert data[0]['name'] == 'fn'
assert data[1]['name'] == 'fn2'
assert data[2]['name'] == 'fn3'
def test_nested(self):
def fn():
pass
def fn2():
pass
def fn3():
pass
callables = {
'fn': fn,
'mod': {
'fn2': fn2,
'fn3': fn3,
}
}
data = visitor.visit(callables)
assert len(data) == 3
assert data[0]['name'] == 'fn'
assert data[0]['path'] == ()
assert data[1]['name'] == 'fn2'
assert data[1]['path'] == ('mod',)
assert data[2]['name'] == 'fn3'
assert data[2]['path'] == ('mod',)
class TestVisitFabfile(object):
def test_one(self):
data = visitor.visit_fabfile(fixture_path('fabfile_one.py'))
assert len(data) == 3
|
9,750 | 084299da1c2f41de96e60d37088466c7b61de38e | from appJar import gui
app = gui("Calculator", "560x240")
### FUNCTIONS ###
n1, n2 = 0.0, 0.0
result = 0.0
isFirst = True
calc = ""
def doMath(btn):
global result, n1, n2, isFirst, calc
inputNumber()
if(btn == "Add"): calc = "a"
if(btn == "Substract"): calc = "s"
if(btn == "Multiply"): calc = "m"
if(btn == "Divide"): calc = "d"
app.clearEntry("Number")
def calculate(btn):
global result, n1, n2, isFirst, calc
inputNumber()
if(calc == 'a'): result = n1 + n2
if(calc == 's'): result = n1 - n2
if(calc == 'm'): result = n1 * n2
if(calc == 'd'):
try:
result = n1 / n2
except ZeroDivisionError:
clearOut(btn)
app.errorBox("DivisionByZero", "You can't divide by Zero.")
app.clearEntry("Number")
app.setLabel("Result", result)
def clearOut(btn):
global result, n1, n2, isFirst, calc
n1, n2 = 0.0, 0.0
result = 0.0
isFirst = True
calc = ""
def inputNumber():
global n1, n2, isFirst
if(isFirst):
n1 = app.getEntry("Number")
isFirst = False
else:
n2 = app.getEntry("Number")
isFirst = True
### FUNCTIONS ###
app.setStretch("column")
app.setSticky("")
app.setResizable(True)
app.addNumericEntry("Number")
app.setEntryDefault("Number", "Enter Number")
app.addButtons(["Add", "Substract", "Multiply", "Divide"], doMath)
app.addButtons(["Calculate!", "clearOut"], [calculate, clearOut])
app.setButton("clearOut", "C")
app.addEmptyLabel("Result")
app.go()
|
9,751 | 3319614d154b16190f3cd8f4f65c3b0e0da277e9 | # -*- coding: utf-8 -*-
class Solution:
"""
@param head: The first node of the linked list.
@return: The node where the cycle begins.
if there is no cycle, return null
"""
def detectCycle(self, head):
# write your code here
# 先确定是否有环,然后确定环的大小,再遍历确定位置。
cycle_len = -1
one_node, two_node = head, head
while two_node:
for i in xrange(2):
if two_node:
two_node = two_node.next
if two_node == one_node:
cycle_len = 1
two_node = one_node.next
while two_node != one_node: # 算出环的长度
cycle_len += 1
two_node = two_node.next
break
else:
break
one_node = one_node.next
if (not two_node) or (cycle_len != -1):
break
if cycle_len == -1:
return None
one_node, two_node = head, head # two_node先前进的距离等于环的长度
i = 0
while i < cycle_len:
two_node = two_node.next
i += 1
while one_node != two_node:
one_node = one_node.next
two_node = two_node.next
return one_node |
9,752 | b93f6c3192f8dd58b96dfdc6ea2b17e12cce34d0 | from collections import defaultdict, deque
N = int(input())
adj_list = defaultdict(list)
E = []
V_number = [None]*N
for _ in range(N-1):
a, b = map(int, input().split())
E.append((a, b))
adj_list[a].append(b)
adj_list[b].append(a)
C = sorted(list(map(int, input().split())), reverse=True)
q = deque([1])
i = 0
while q:
v = q.popleft()
V_number[v-1] = C[i]
i += 1
for u in adj_list[v]:
if V_number[u-1] is None:
q.append(u)
print(sum(C[1:]))
print(*V_number) |
9,753 | 9535335c70129f997d7b8739444a503d0b984ac8 | import json
import os
import pickle
import random
import urllib.request
from pathlib import Path
import tensorflow as tf
from matplotlib import pyplot as plt
class CNN(object):
def __init__(self):
self.model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 1)),
tf.keras.layers.MaxPool2D((2, 2)),
tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),
tf.keras.layers.MaxPool2D(2, 2),
tf.keras.layers.Conv2D(128, (3, 3), activation='relu'),
tf.keras.layers.MaxPool2D(2, 2),
tf.keras.layers.Conv2D(128, (3, 3), activation='relu'),
tf.keras.layers.MaxPool2D(2, 2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
self.last_training_history = {}
def print_model_info(self):
print(self.model.summary())
def get_model(self):
return self.model
def load_weights(self, filepath='model.h5'):
self.model.load_weights(filepath)
self.model.compile(
optimizer='adam',
loss='binary_crossentropy',
metrics=['acc']
)
def load_last_training_history(self, filepath='result.pk'):
with open(filepath, 'rb') as f:
self.last_training_history = pickle.load(f)
def get_last_training_history(self):
return self.last_training_history
def plot_last_training_history(self, save_plot=False):
for key in self.last_training_history:
y = self.last_training_history[key]
plt.plot([i + 1 for i in range(len(y))], y, label=key)
plt.legend()
plt.grid()
plt.xlabel('epoch')
if save_plot:
plt.savefig('training_history.png', dpi=300)
else:
plt.show()
def train(self, directory, epochs=100, save_model=False, save_history=False):
train_datagen = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1. / 255,
rotation_range=20,
width_shift_range=0.15,
height_shift_range=0.15,
shear_range=0.15,
zoom_range=0.15,
fill_mode='nearest',
horizontal_flip=True,
vertical_flip=False,
brightness_range=None,
channel_shift_range=0
)
test_datagen = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
directory,
target_size=(150, 150),
batch_size=32,
color_mode='grayscale',
class_mode='binary'
)
test_generator = test_datagen.flow_from_directory(
directory,
target_size=(150, 150),
batch_size=32,
color_mode='grayscale',
class_mode='binary'
)
self.model.compile(
optimizer='adam',
loss='binary_crossentropy',
metrics=['acc']
)
history = self.model.fit(
train_generator,
epochs=epochs,
validation_data=test_generator
)
if save_model:
self.model.save('model.h5')
if save_history:
with open('result.pk', 'wb') as f:
pickle.dump(history.history, f)
self.last_training_history = history.history
return history.history
def predict_directory(self, directory, probabilities=True):
if directory[-1] != '\\' and directory[-1] != '/':
directory += '/'
predictions = {}
onlyfiles = [f for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f))]
for image_file in onlyfiles:
img = tf.keras.preprocessing.image.load_img(directory + image_file, target_size=(150, 150),
color_mode='grayscale')
x = tf.keras.preprocessing.image.img_to_array(img, )
x = x.reshape((1,) + x.shape)
x = x / 255
y = self.model.predict(x)[0][0]
if probabilities:
predictions[image_file] = y
else:
predictions[image_file] = y > 0.5
return predictions
def predict_single_image(self, file_url):
self.load_weights()
self.load_last_training_history()
file_name = "image.jpg"
urllib.request.urlretrieve(file_url, file_name)
img = tf.keras.preprocessing.image.load_img(file_name, target_size=(150, 150),
color_mode='grayscale')
x = tf.keras.preprocessing.image.img_to_array(img, )
x = x.reshape((1,) + x.shape)
x = x / 255
prediction = self.model.predict(x)[0][0]
is_default_image = prediction < 0.5
print(prediction)
os.remove(file_name)
return json.dumps(True) if is_default_image else json.dumps(False)
def evaluate_on_directory(self, directory):
val_datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1. / 255)
val_generator = val_datagen.flow_from_directory(
directory,
target_size=(150, 150),
batch_size=32,
color_mode='grayscale',
class_mode='binary'
)
return self.model.evaluate(val_generator)
def split_directory(directory, train_size=0.75, test_size=0.2, val_size=0.05):
assert train_size + test_size + val_size == 1
assert 0 <= train_size <= 1 and 0 <= test_size <= 1 and 0 <= val_size <= 1
subdirs = next(os.walk(directory))[1]
if train_size > 0:
os.mkdir(directory + '/train')
for subdir in subdirs:
os.mkdir(directory + '/train/' + subdir)
if test_size > 0:
os.mkdir(directory + '/test')
for subdir in subdirs:
os.mkdir(directory + '/test/' + subdir)
if val_size > 0:
os.mkdir(directory + '/val')
for subdir in subdirs:
os.mkdir(directory + '/val/' + subdir)
pathlist = Path(directory).rglob('*.*')
for path in pathlist:
instance_path = str(path)
instance_properties = instance_path.split('/') if '/' in instance_path else instance_path.split('\\')
instance_name = instance_properties[-1]
instance_class = instance_properties[-2]
r = random.random()
if r < val_size:
subfolder = '/val/'
elif r < test_size + val_size:
subfolder = '/test/'
else:
subfolder = '/train/'
os.rename(instance_path, '/'.join(instance_properties[:-2]) + subfolder + instance_class + '/' + instance_name)
if __name__ == '__main__':
cnn = CNN()
cnn.load_weights()
cnn.load_last_training_history()
cnn.print_model_info()
|
9,754 | 76f2312a01bf8475220a9fcc16209faddfccd2ae | import os
import sys
import logging.config
import sqlalchemy as sql
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Float, String, Text, Integer
import pandas as pd
import numpy as np
sys.path.append('./config')
import config
logging.basicConfig(level=logging.INFO, format='%(name)s - %(levelname)s - %(asctime)s - %(message)s')
logger = logging.getLogger(__file__)
Base = declarative_base()
class BeanAttributes(Base):
""" Defines the data model for the table `bean_attributes`. """
__tablename__ = 'bean_attributes'
id = Column(Integer, primary_key=True)
species = Column(String(100), unique=False, nullable=True)
owner = Column(String(100), unique=False, nullable=True)
country = Column(String(100), unique=False, nullable=True)
farm_name = Column(String(100), unique=False, nullable=True)
company = Column(String(100), unique=False, nullable=True)
region = Column(String(100), unique=False, nullable=True)
producer = Column(String(100), unique=False, nullable=True)
grading_date = Column(String(100), unique=False, nullable=True)
processing_method = Column(Text, unique=False, nullable=True)
aroma = Column(Float, unique=False, nullable=True)
flavor = Column(Float, unique=False, nullable=True)
aftertaste = Column(Float, unique=False, nullable=True)
acidity = Column(Float, unique=False, nullable=True)
body = Column(Float, unique=False, nullable=True)
balance = Column(Float, unique=False, nullable=True)
uniformity = Column(Float, unique=False, nullable=True)
cleancup = Column(Float, unique=False, nullable=True)
sweetness = Column(Float, unique=False, nullable=True)
total_cup_point = Column(Float, unique=False, nullable=True)
moisture = Column(Float, unique=False, nullable=True)
color = Column(String(100), unique=False, nullable=True)
cluster = Column(Integer, unique=False, nullable=True)
def __repr__(self):
return '<BeanAttributes %r>' % self.id
def persist_to_db(engine_string):
"""Persist the data to database.
Args:
engine_string (`str`): Engine string for SQLAlchemy.
Returns:
None.
"""
engine = sql.create_engine(engine_string)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
# Delete all existing records in the table
if config.LOCAL_DB_FLAG:
try:
session.execute('''DELETE FROM msia_db.bean_attributes''')
except:
pass
else:
try:
session.execute('''DELETE FROM bean_attributes''')
except:
pass
# Read the data table and persist it into the database
raw_data = pd.read_csv(config.DATA_TABLE_PATH)
raw_data = raw_data.replace(np.nan, '', regex=True)
try:
for i in range(raw_data.shape[0]):
bean_row = BeanAttributes(id=int(raw_data.iloc[i]['Unnamed: 0']),
species=str(raw_data.iloc[i]['Species']),
owner=str(raw_data.iloc[i]['Owner.1']),
country=str(raw_data.iloc[i]['Country.of.Origin']),
farm_name=str(raw_data.iloc[i]['Farm.Name']),
company=str(raw_data.iloc[i]['Company']),
region=str(raw_data.iloc[i]['Region']),
producer=str(raw_data.iloc[i]['Producer']),
grading_date=str(raw_data.iloc[i]['Grading.Date']),
processing_method=str(raw_data.iloc[i]['Processing.Method']),
aroma=float(raw_data.iloc[i]['Aroma']),
flavor=float(raw_data.iloc[i]['Flavor']),
aftertaste=float(raw_data.iloc[i]['Aftertaste']),
acidity=float(raw_data.iloc[i]['Acidity']),
body=float(raw_data.iloc[i]['Body']),
balance=float(raw_data.iloc[i]['Balance']),
uniformity=float(raw_data.iloc[i]['Uniformity']),
cleancup=float(raw_data.iloc[i]['Clean.Cup']),
sweetness=float(raw_data.iloc[i]['Sweetness']),
total_cup_point=float(raw_data.iloc[i]['Total.Cup.Points']),
moisture=float(raw_data.iloc[i]['Moisture']),
color=str(raw_data.iloc[i]['Color']),
cluster=int(raw_data.iloc[i]['cluster'])
)
session.add(bean_row)
logger.debug('Row %d added to table ' % i)
session.commit()
except sql.exc.IntegrityError: # Check primary key duplication
logger.error("Duplicated coffee bean")
except Exception as e:
logger.error("Incorrect credentials, access denied", e)
finally:
session.close()
if __name__ == "__main__":
# Obtain parameters from os
conn_type = "mysql+pymysql"
user = os.environ.get("MYSQL_USER")
password = os.environ.get("MYSQL_PASSWORD")
host = os.environ.get("MYSQL_HOST")
port = os.environ.get("MYSQL_PORT")
database = os.environ.get("DATABASE_NAME")
local_database_path = config.LOCAL_DATABASE_PATH
# If users wish to write to their own SQLALCHEMY_DATABASE_URI in the environment
if config.SQLALCHEMY_DATABASE_URI is None:
# Whether to create a local SQLite database or an AWS RDS database
if config.LOCAL_DB_FLAG:
engine_string = "sqlite:///{}".format(local_database_path)
else:
engine_string = "{}://{}:{}@{}:{}/{}".format(conn_type, user, password, host, port, database)
else:
engine_string = config.SQLALCHEMY_DATABASE_URI
try:
engine_string = 'sqlite:///data/bean.db'
persist_to_db(engine_string)
logger.info("Data successfully persisted into the database")
except Exception as e:
logger.error(e)
sys.exit(1)
|
9,755 | 388904b6b826a1c718b85f2951a3189bb5abea2a | # import adafruit_ads1x15 as adс
# from adafruit_ads1x15 import ads1x15 as adc
# from adafruit_ads1x15 import analog_in
import time
import busio
import board
from adafruit_ads1x15 import ads1015 as ADS
from adafruit_ads1x15.analog_in import AnalogIn
i2c = busio.I2C(board.SCL, board.SDA)
ads = ADS.ADS1015(i2c)
chan = AnalogIn(ads, ADS.P0)
print("{:>5}\t{:>5}".format('raw', 'v'))
while True:
print("{:>5}\t{:>5.3f}".format(chan.value, chan.voltage))
time.sleep(0.5)
# print(dir(analog_in.AnalogIn()))
# analog_in.AnalogIn()
# GAIN = 1
# a = adc
#
# print('| {0:>6} | {1:>6} | {2:>6} | {3:>6} |'.format(*range(4)))
# print('-' * 37)
# # Main loop.
# while True:
# # Read all the ADC channel values in a list.
# values = [0]*4
# for i in range(4):
# # Read the specified ADC channel using the previously set gain value.
# values[i] = a.read_adc(i, gain=GAIN)
#
# print('| {0:>6} | {1:>6} | {2:>6} | {3:>6} |'.format(*values))
# # Pause for half a second.
# time.sleep(0.5)
|
9,756 | 01128ebd156b24791548c50c92d2fc1969c42e70 | import numpy as np
import sklearn.cluster as sc
import sklearn.metrics as sm
import matplotlib.pyplot as mp
x = np.loadtxt('C:\\Users\\Administrator\\Desktop\\sucai\\ml_data\\perf.txt', delimiter=',')
# 准备训练模型相关数据
epsilons, scores, models = np.linspace(0.3, 1.2, 10), [], []
# 遍历所有的半径,训练模型,查看得分
for epsilon in epsilons:
model = sc.DBSCAN(eps=epsilon, min_samples=5)
model.fit(x)
score = sm.silhouette_score(x, model.labels_, sample_size=len(x), metric='euclidean')
scores.append(score)
models.append(model)
# 转成ndarray数组
scores = np.array(scores)
best_i = scores.argmax() # 最优分数
best_eps = epsilons[best_i]
best_sco = scores[best_i]
# 获取最优模型
best_model = models[best_i]
# 对输入x进行预测得到预测类别
pred_y = best_model.fit_predict(x)
# 获取孤立样本,外周样本,核心样本
core_mask = np.zeros(len(x), dtype=bool)
# 获取核心样本的索引,把对应位置的元素改为True
core_mask[best_model.core_sample_indices_] = True
# 孤立样本的类别标签为-1
offset_mask = best_model.labels_ == -1
# 外周样本掩码(不是核心也不是孤立样本)
p_mask = ~(core_mask | offset_mask)
# 绘制这些样本数据
mp.figure('DBSCAN cluster', facecolor='lightgray')
mp.title('DBSCAN cluster', fontsize=16)
mp.xlabel('x', fontsize=14)
mp.ylabel('y', fontsize=14)
mp.tick_params(labelsize=10)
# 绘制核心样本
mp.scatter(x[core_mask][:, 0], x[core_mask][:, 1], s=60, cmap='brg', c=pred_y[core_mask])
# 绘制外周样本
mp.scatter(x[p_mask][:, 0], x[p_mask][:, 1], s=60, cmap='brg', c=pred_y[p_mask], alpha=0.5)
# 绘制孤立样本
mp.scatter(x[offset_mask][:, 0], x[offset_mask][:, 1], s=60, c='gray')
mp.show() |
9,757 | 9cad36de6231f310ef9022f16f6ed0da83a003b3 | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 6 12:20:45 2017
@author: 7
"""
from os import listdir
from PIL import Image as PImage
from scipy import misc
import numpy as np
from Image_loader import LoadImages
"""
def LoadImages(path):
# return array of images
imagesList = listdir(path)
loadedImages = []
for image in imagesList:
img = misc.imread(path + image)
loadedImages.append(img)
return loadedImages
"""
def ModifyImages(path,path1):
# modify images to same scale
imagesList = listdir(path)
for image in imagesList:
old_img = PImage.open(path + image)
old_size = old_img.size
new_size = (540,420)
new_img = PImage.new("L", new_size)
new_img.paste(old_img,((new_size[0]-old_size[0])//2,(new_size[1]-old_size[1])//2))
new_img.save(path1 + image)
"""
path = "train\\"
path1 = "train_modified\\"
ModifyImages(path,path1)
imgs = LoadImages(path1)
a = np.array( imgs )
print (a.shape)
print("finished")
path = "test\\"
path1 = "test_modified\\"
ModifyImages(path,path1)
imgs = LoadImages(path1)
a = np.array( imgs )
print (a.shape)
print("finished")
path = "train_cleaned\\"
path1 = "train_cleaned_modified\\"
ModifyImages(path,path1)
imgs = LoadImages(path1)
a = np.array( imgs )
print (a.shape)
print("finished")
""" |
9,758 | 0fb424dafaac184882ea56f36265e0b19b5a4c50 |
import torch
import torch.nn.functional as f
import time
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import numpy as np
dtype = torch.float
device = torch.device("cpu")
# device = torch.device("cuda:0") # Uncomment this to run on GPU
N, D_in, H, D_out = 64, 1000, 100, 10
x = torch.randn(N, D_in, device=device, dtype=dtype)
y = torch.randn(N, D_out, device=device, dtype=dtype)
model = torch.nn.Sequential(
torch.nn.Linear(D_in, H),
torch.nn.ReLU(),
torch.nn.Linear(H, D_out),
)
def plot_grad_flow(named_parameters):
'''Plots the gradients flowing through different layers in the net during training.
Can be used for checking for possible gradient vanishing / exploding problems.
Usage: Plug this function in Trainer class after loss.backwards() as
"plot_grad_flow(self.model.named_parameters())" to visualize the gradient flow'''
ave_grads = []
max_grads = []
layers = []
for n, p in named_parameters:
if (p.requires_grad) and ("bias" not in n):
layers.append(n)
ave_grads.append(p.grad.abs().mean())
max_grads.append(p.grad.abs().max())
plt.bar(np.arange(len(max_grads)), max_grads, alpha=0.1, lw=1, color="c")
plt.bar(np.arange(len(max_grads)), ave_grads, alpha=0.1, lw=1, color="b")
plt.hlines(0, 0, len(ave_grads) + 1, lw=2, color="k")
plt.xticks(range(0, len(ave_grads), 1), layers, rotation="vertical")
plt.xlim(left=0, right=len(ave_grads))
plt.ylim(bottom=-0.001, top=0.02) # zoom in on the lower gradient regions
plt.xlabel("Layers")
plt.ylabel("average gradient")
plt.title("Gradient flow")
plt.grid(True)
plt.legend([Line2D([0], [0], color="c", lw=4),
Line2D([0], [0], color="b", lw=4),
Line2D([0], [0], color="k", lw=4)], ['max-gradient', 'mean-gradient', 'zero-gradient'])
plt.show()
learning_rate = 1e-6
y_pred = model(x)
loss = (y_pred - y).pow(2).sum()
loss.backward()
plot_grad_flow(model.named_parameters())
|
9,759 | 09d32b48ae88b1066dd0aa435a351c4fb1fc04ec | from flask import Flask, request, render_template
from random import choice, sample
app = Flask(__name__)
horoscopes = [
'your day will be awesome',
'your day will be terrific',
'your day will be fantastic',
'neato, you have a fantabulous day ahead',
'your day will be oh-so-not-meh',
'this day will be brilliant',
'looks like today is just ducky',
'I proclaim your day to be INCREDIBLE',
'this day will be wonderful',
'smash this day',
'this day shall be lovely',
'your day will be just satenacious']
@app.route('/')
def index():
"""Show the homepage and ask the user's name."""
return render_template('index.html')
@app.route('/horoscope')
def get_horoscope():
"""Give the user a horoscope"""
name = request.args.get('name')
num_horoscopes = int(request.args.get('num_horoscopes'))
show_horoscopes = request.args.get('show_horoscopes')
horoscopes_to_show = sample(horoscopes, num_horoscopes)
# predictions = ', '.join(sample(horoscopes, num_horoscopes))
return render_template(
'horoscopes.html',
name=name,
show_horoscopes=show_horoscopes,
horoscopes_to_show=horoscopes_to_show))
"""
if show_horoscopes:
return f"Hello there, {name}: {predictions}."
else:
return f"Hello there, {name}! Have a nice day!"
"""
if __name__ == "__main__":
app.run(debug=True)
|
9,760 | 5a895c864c496e1073d75937909c994432a71d75 | import socket
import json
from typing import Dict
listadionica = ["GS", "MS", "WFC", "VALBZ", "BOND", "VALE", "XLF"]
class Burza:
def __init__ (self, test):
if test:
host_name = "test-exch-partitivnisumari"
port = 25000
else:
host_name = "production"
port = 25000
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host_name, port))
self.stream = s.makefile('rw', 1)
self.zapisi(("type": "hello", "team": 'PARTITIVNISUMARI'))
assert self.citaj()['type'] == 'hello'
self.order_id = 0
def citaj(self, store_last=True):
data = self.stream.readline()
if(data == ""):
return None
else:
data = json.loads(data)
self.last_data = data
!!!
return data
def zapisi(self, data):
json.dump(data, self.stream)
self.stream.write("\n")
def kupi(self, buy_sell, symbol, price, size):
trade = {'type': 'add', 'order_id': self.order_id,
'symbol': symbol, 'dir': buy_sell, 'price': price, 'size': size}
self.order_id += 1
if buy_sell == "SELL":
self.zapisi(trade)
!!!
elif buy_sell == "BUY":
self.zapisi(trade)
!!!
def logger(dicc, ord):
if ord['type'] == 'book':
buy = ord['buy']
sell = ord['sell']
count_buy = 0
value_buy = 0
for p, n in buy:
value_buy += p * n
count_buy += n
count_sell = 0
value_sell = 0
for p, n in sell:
value_sell += p * n
count_sell += n
if count_buy != 0 and count_sell != 0:
dicc[ord['symbol']].append((value_buy//count_buy, value_sell//count_sell))
def logN(burza, n):
dicc = {}
readed_results = []
for i in range(n):
readed_results.append(burza.citaj())
for ord in readed_results:
if ord['type'] == 'book':
buy = ord['buy']
sell = ord['sell']
count_buy = 0
value_buy = 0
for p, n in buy:
value_buy += |
9,761 | 1ae69eaaa08a0045faad13281a6a3de8f7529c7a | # -*- coding: utf-8 -*-
import csv
import datetime
from django.conf import settings
from django.contrib import admin
from django.http import HttpResponse
from django.utils.encoding import smart_str
from djforms.scholars.models import *
def export_scholars(modeladmin, request, queryset):
"""Export the presentation data."""
response = HttpResponse('', content_type='text/csv; charset=utf-8')
response['Content-Disposition'] = 'attachment; filename=cos.csv'
writer = csv.writer(response)
writer.writerow([
'Title',
'Reviewer',
'Leader',
'Leader Email',
'Sponsor',
'Other Sponsor',
'Presenters',
'Funding Source',
'Work Type',
'Permission to Reproduce',
'Faculty Sponsor Approval',
'Table',
'Electricity',
'Link',
'Poster',
'Date created',
])
for presentation in queryset:
link = 'http://{0}{1}'.format(
settings.SERVER_URL,
presentation.get_absolute_url(),
)
poster = 'http://{0}/assets/{1}'.format(
settings.SERVER_URL, presentation.poster_file,
)
try:
leader = '{0}, {1}'.format(
presentation.leader.last_name,
presentation.leader.first_name,
)
except Exception:
leader = ''
presenters = ''
for presenter in presentation.presenters.all():
if not presenter.leader:
presenters += '{0}, {1}|'.format(
presenter.last_name, presenter.first_name,
)
title = smart_str(
presentation.title,
encoding='utf-8',
strings_only=False,
errors='strict',
)
funding = smart_str(
presentation.funding,
encoding='utf-8',
strings_only=False,
errors='strict',
)
work_type = smart_str(
presentation.work_type,
encoding='utf-8',
strings_only=False,
errors='strict',
)
sponsor_email = ''
if presentation.leader:
sponsor_email = presentation.leader.sponsor_email
sponsor_other = presentation.leader.sponsor_other
writer.writerow([
title,
presentation.reviewer,
leader,
presentation.user.email,
sponsor_email,
sponsor_other,
presenters[:-1],
funding,
work_type,
presentation.permission,
presentation.shared,
presentation.need_table,
presentation.need_electricity,
link,poster,
presentation.date_created,
])
return response
export_scholars.short_description = """
Export the selected Celebration of Scholars Submissions
"""
class PresentationAdmin(admin.ModelAdmin):
"""Admin class for the presentation data model."""
model = Presentation
actions = [export_scholars]
raw_id_fields = ('user', 'updated_by', 'leader')
list_max_show_all = 500
list_per_page = 500
list_display = (
'title',
'reviewer',
'last_name',
'first_name',
'email',
'sponsor',
'sponsor_other',
'get_presenters',
'funding',
'work_type',
'permission',
'shared',
'need_table',
'need_electricity',
'status',
'poster',
'date_created',
)
ordering = [
'-date_created',
'title',
'work_type',
'permission',
'shared',
'need_table',
'need_electricity',
'status',
]
search_fields = (
'title',
'user__last_name',
'user__email',
'funding',
)
list_filter = ('status', 'date_created')
list_editable = ['reviewer']
def queryset(self, request):
"""Only show presentations that were created after a certain date."""
TODAY = datetime.date.today()
YEAR = int(TODAY.year)
qs = super(PresentationAdmin, self).queryset(request)
start_date = datetime.date(YEAR, 1, 1)
return qs.filter(date_created__gte=start_date)
def save_model(self, request, obj, form, change):
"""Override the save method to update some things."""
if change:
obj.updated_by = request.user
obj.save()
class PresenterAdmin(admin.ModelAdmin):
"""Admin class for the presenter model."""
model = Presenter
list_max_show_all = 500
list_per_page = 500
list_display = (
'date_created',
'last_name',
'first_name',
'email',
'leader',
'prez_type',
'college_year',
'major',
'hometown',
'sponsor',
'sponsor_name',
'sponsor_email',
'sponsor_other',
'department',
)
ordering = [
'date_created',
'last_name',
'first_name',
'email',
'leader',
'prez_type',
'college_year',
'major',
'hometown',
'sponsor',
'sponsor_name',
'sponsor_email',
'sponsor_other',
'department',
]
search_fields = (
'last_name',
'first_name',
'email',
)
admin.site.register(Presenter, PresenterAdmin)
admin.site.register(Presentation, PresentationAdmin)
|
9,762 | 0e73153d004137d374637abf70faffabf0bab1fb | # Generated by Django 3.1 on 2020-09-09 15:58
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('orders', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='orderproduct',
old_name='products',
new_name='product',
),
]
|
9,763 | 398f9f52b83ffddfb452abbeaad2e83610580fee | # -*- coding: utf-8 -*-
# project: fshell
# author: s0nnet
# time: 2017-01-08
# desc: data_fuzzhash
import sys
sys.path.append("./dao")
from fss_data_fuzzhash_dao import *
class FssFuzzHash:
@staticmethod
def insert_node(agent_id, data):
return FssFuzzHashDao.insert_node(agent_id, data)
|
9,764 | ac5c6a534d5131438d9590b070e6b392d4ebed0c | from pynhost.grammars import extension
from pynhost.grammars import baseutils as bu
class AtomExtensionGrammar(extension.ExtensionGrammar):
activate = '{ctrl+alt+8}'
search_chars = bu.merge_dicts(bu.OPERATORS, bu.ALPHABET, bu.CHAR_MAP)
def __init__(self):
super().__init__()
self.app_context = 'Autumntastic'
self.mappings = {
}
|
9,765 | 3aa8c9b39174f0ed5799d6991516b34ca669b7d6 | from django.db import models # db에 있는 models을 가져옴
from django.utils import timezone # 유틸에 있는 timezone을 가져옴
# Create your models here.
class Post(models.Model):
# Post라는 객체를 정의함 인수로 장고모델을 가져왔음
# 장고모델이기 때문에 데이터베이스에 저장된다.
author = models.ForeignKey('auth.User') # 외래키, 다른 객체에 대한 링크
title = models.CharField(max_length=200) # 글자수 제한
text = models.TextField() # 글자수제한없음
created_date = models.DateTimeField(default=timezone.now) # Date형식
published_date = models.DateTimeField(blank=True, null=True)
def publish(self): # 파이썬의 메소드
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title
class User(models.Model):
id = models.CharField(max_length=30, primary_key='true')
password = models.CharField(max_length=50)
reg_date = models.DateField(default=timezone.now)
upt_date = models.DateField(default=timezone.now)
last_pwd = models.CharField(max_length=50)
def chg_password(self):
self.last_pwd = self.password
self.save()
def __id__(self):
return self.id
|
9,766 | 1fbdb0b40f0d65fffec482b63aa2192968b01d4b | #define the simple_divide function here
def simple_divide(item, denom):
# start a try-except block
try:
return item/denom
except ZeroDivisionError:
return 0
def fancy_divide(list_of_numbers, index):
denom = list_of_numbers[index]
return [simple_divide(item, denom) for item in list_of_numbers]
def main():
data = input()
l=data.split()
l1=[]
for j in l:
l1.append(float(j))
s=input()
index=int(s)
print(fancy_divide(l1,index))
if __name__== "__main__":
main()
|
9,767 | 9e511c769f6ccedc06845a382171fb3729913d05 | import generic
name = __name__
def options(opt):
generic._options(opt, name)
def configure(cfg):
generic._configure(cfg, name, incs=('czmq.h',), libs=('czmq',),
pcname = name.lower(),
uses = 'LIBZMQ', mandatory=True)
|
9,768 | 69ebdab4cd1f0b5154305410381db252205ff97d | #!/usr/bin/env python
# -*- coding:UTF-8 -*-
'''
@Description: 数据库迁移
@Author: Zpp
@Date: 2020-03-30 11:01:56
@LastEditors: Zpp
@LastEditTime: 2020-04-28 09:55:26
'''
import sys
import os
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(rootPath)
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from conf.setting import Config
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = Config().get_sql_url()
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
from models.salary import *
from models.system import *
from models.log import *
# 初始化 migrate
# 两个参数一个是 Flask 的 app,一个是数据库 db
migrate = Migrate(app, db)
# 初始化管理器
manager = Manager(app)
# 添加 db 命令,并与 MigrateCommand 绑定
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
|
9,769 | bf04bf41f657a6ada4777fe5de98d6a68beda9d3 | import scipy.sparse
from multiprocessing.sharedctypes import Array
from ctypes import c_double
import numpy as np
from multiprocessing import Pool
import matplotlib.pyplot as plt
from time import time
import scipy.io as sio
import sys
# np.random.seed(1)
d = 100
n = 100000
k=10
learning_rate = 0.4
T_freq = 100
num_threads = 1
epochs = 1
Iterations = 10
def getSyntheticData(n,d,k):
mean = np.array([0] * d)
alpha = 0.8
cov_diag = [alpha**i for i in range(d)]
covariance = np.diag(cov_diag)
truth = np.sum(cov_diag[:k])
samples = np.random.multivariate_normal(mean,covariance,n)
return [samples,covariance,truth]
def oja_async(sample):
# print rate_shared[0]
sample = sample.reshape(d,1)
U = np.frombuffer(coef_shared)
U = U.reshape(d,k)
grad = np.dot(sample,np.dot(sample.T,U))
rate_shared[0] = rate_shared[0]+1
U = U + (learning_rate/rate_shared[0])*grad
# U = U + (learning_rate/np.sqrt(rate_shared[0]))*grad
for i in range(d):
for j in range(k):
coef_shared[j+i*k] = U[i][j]
U= np.linalg.qr(U)[0]
if rate_shared[0]%T_freq ==0:
error = truth-np.trace(np.dot(np.dot(U.T,covariance),U))
return [error,time()]
# else:
# return None
def hogwild(samples,k,num_threads):
n = len(samples)
d = len(samples[0])
st = time()
# print num_threads
p = Pool(num_threads)
error_n_times = p.map(oja_async, samples)
error_n_times_refined = [e_n_t for e_n_t in error_n_times if e_n_t!= None]
# print error_n_times_refined;
errors = [ent[0] for ent in error_n_times_refined]
end_times = [ent[1] for ent in error_n_times_refined]
times = [et - st for et in end_times]
errors = [x for _,x in sorted(zip(times,errors))]
times = sorted(times)
n_t_freq = n/T_freq
return [errors[:n_t_freq],times[:n_t_freq]]
def evaluate(model):
data_train = data["train"]
# data_test = data["test"]
covariance_train = np.dot(data_train,data_train.T)/n
# covariance_test = np.dot(data_test,data_test.T)/n
truth_train = np.trace(covariance_train)
# truth_test = np.trace(covariance_test)
# error_train = np.linalg.norm(data_train - np.dot(np.dot(model,model.T),data_train),"fro")/n
# error_test = np.linalg.norm(data_test - np.dot(np.dot(model,model.T),data_test),"fro")/n
error_train = truth_train - np.trace(np.dot(np.dot(model.T,covariance_train),model))
# error_test = truth_test - np.trace(np.dot(np.dot(model.T,covariance_test),model))
# return error_train, error_test
return error_train, error_train
def ojaNormal(samples,k):
errors = []
elapsed_times = []
start_time = time()
U = np.random.randn(d,k)
# U = np.linalg.qr(U)[0]
t = 0
for x in samples:
t=t+1
x = x.reshape(d,1)
U = U + (np.dot(x,np.dot(x.T,U)))*learning_rate/t
if t%T_freq == 0:
U_proj= np.linalg.qr(U)[0]
# U = U_proj
error = truth- np.trace(np.dot(np.dot(U_proj.T,covariance),U_proj))
errors.append(error)
elapsed_times.append(time() - start_time)
U_final = np.linalg.qr(U)[0]
return [errors,elapsed_times]
def plotEverything(errors_oja, times_oja,errors_hogwild_one, times_hogwild_one,errors_hogwild_two, times_hogwild_two,errors_hogwild_four, times_hogwild_four):
plt.figure(0)
plt.xlabel('Time (secs)')
plt.ylabel('Error')
plt.plot(times_oja,errors_oja)
plt.plot(times_hogwild_one,errors_hogwild_one)
plt.plot(times_hogwild_two,errors_hogwild_two)
plt.plot(times_hogwild_four,errors_hogwild_four)
plt.legend(("oja","hogwild, 1 process","hogwild 2 processes","hogwild, 4 processes"))
# plt.legend(("oja","hogwild 2 processes","hogwild, 4 processes"))
plt.title("k = "+str(k))
iterations_oja = range(1,len(errors_oja)+1)
iterations_hogwild_one = range(1,len(errors_hogwild_one)+1)
iterations_hogwild_two = range(1,len(errors_hogwild_two)+1)
iterations_hogwild_four = range(1,len(errors_hogwild_four)+1)
plt.figure(1)
plt.xlabel('Iterations')
plt.ylabel('Error')
plt.plot(iterations_oja,errors_oja)
plt.plot(iterations_hogwild_one,errors_hogwild_one)
plt.plot(iterations_hogwild_two,errors_hogwild_two)
plt.plot(iterations_hogwild_four,errors_hogwild_four)
plt.legend(("oja","hogwild, 1 process","hogwild 2 processes","hogwild, 4 processes"))
# plt.legend(("oja","hogwild 2 processes","hogwild, 4 processes"))
plt.title("k = "+str(k))
plt.show()
[samples,covariance,truth] = getSyntheticData(n,d,k)
total_samples = []
for i in range(epochs):
total_samples.extend(samples)
errors_oja_sum = [0]*n
times_oja_sum = [0]*n
errors_hogwild_sum_one = [0]*n
times_hogwild_sum_one = [0]*n
errors_hogwild_sum_two = [0]*n
times_hogwild_sum_two = [0]*n
errors_hogwild_sum_four= [0]*n
times_hogwild_sum_four = [0]*n
for t in range(Iterations):
[errors_oja, times_oja] = ojaNormal(total_samples,k)
errors_oja_sum = [e_sum + e for (e_sum,e) in zip(errors_oja_sum,errors_oja)]
times_oja_sum = [t_sum + t for (t_sum,t) in zip(times_oja_sum,times_oja)]
coef_shared = Array(c_double,
(np.random.randn(d,k).flat),
lock=False)
rate_shared = Array(c_double,
[0],
lock=False)
[errors_hogwild_one, times_hogwild_one] = hogwild(total_samples,k,1)
coef_shared = Array(c_double,
(np.random.randn(d,k).flat),
lock=False)
rate_shared = Array(c_double,
[0],
lock=False)
[errors_hogwild_two, times_hogwild_two] = hogwild(total_samples,k,2)
coef_shared = Array(c_double,
(np.random.randn(d,k).flat),
lock=False)
rate_shared = Array(c_double,
[0],
lock=False)
[errors_hogwild_four, times_hogwild_four] = hogwild(total_samples,k,4)
errors_hogwild_sum_one = [e_sum + e for (e_sum,e) in zip(errors_hogwild_sum_one,errors_hogwild_one)]
times_hogwild_sum_one = [t_sum + t for (t_sum,t) in zip(times_hogwild_sum_one,times_hogwild_one)]
errors_hogwild_sum_two = [e_sum + e for (e_sum,e) in zip(errors_hogwild_sum_two,errors_hogwild_two)]
times_hogwild_sum_two = [t_sum + t for (t_sum,t) in zip(times_hogwild_sum_two,times_hogwild_two)]
errors_hogwild_sum_four = [e_sum + e for (e_sum,e) in zip(errors_hogwild_sum_four,errors_hogwild_four)]
times_hogwild_sum_four = [t_sum + t for (t_sum,t) in zip(times_hogwild_sum_four,times_hogwild_four)]
errors_oja_average = [e/Iterations for e in errors_oja_sum]
times_oja_average = [t/Iterations for t in times_oja_sum]
times_hogwild_average_one = [t/Iterations for t in times_hogwild_sum_one]
errors_hogwild_average_one = [e/Iterations for e in errors_hogwild_sum_one]
times_hogwild_average_two = [t/Iterations for t in times_hogwild_sum_two]
errors_hogwild_average_two = [e/Iterations for e in errors_hogwild_sum_two]
times_hogwild_average_four = [t/Iterations for t in times_hogwild_sum_four]
errors_hogwild_average_four = [e/Iterations for e in errors_hogwild_sum_four]
plotEverything(errors_oja_average, times_oja_average,errors_hogwild_average_one, times_hogwild_average_one,errors_hogwild_average_two, times_hogwild_average_two,errors_hogwild_average_four, times_hogwild_average_four)
|
9,770 | ebc2acbcbab787b07c97b0a4ea8fbaeb9d8e30aa | 30. Convertir P libras inglesas a D dólares y C centavos. Usar el tipo de cambio $2.80 = 1 libra
p=2.80
x=int(input("Desea convertir sus libras a dolar(1) o a centavos(2)"))
if x == 1:
d=float(input("¿Cuantas libras desea convertir a dólar?\n"))
conversion = (d/p)
if x == 2:
c=float(input("¿Cuantas libras desea convertir a centavos?\n"))
conversion = c/100
print("El resultado es:")
print(float(conversion))
|
9,771 | eafe89de10c4187057b0cc1e0e9772f03a576b0d | __version__ = "1.2.0"
import hashlib
from collections import Counter
from re import findall
from secrets import choice
from string import ascii_letters, ascii_lowercase, ascii_uppercase
from string import digits as all_digits
from string import punctuation
import requests
def check_password(password):
"""Check a given password against known data breaches
Note:
This method uses the `Have I Been Pwned <https://haveibeenpwned.com/>`_ Passwords API. The unhashed password nor its full `SHA-1 <https://en.wikipedia.org/wiki/SHA-1>`_ hash never leave the device.
Args:
password (str): The password to check
Returns:
int: The number of times the password has been found
"""
sha1 = hashlib.sha1(password.encode("utf-8")).hexdigest()
response = requests.get(f"https://api.pwnedpasswords.com/range/{sha1[:5]}")
hash_suffix_list = [x.split(":") for x in response.text.splitlines(False)]
try:
count = [
count for suffix, count in hash_suffix_list if sha1.endswith(suffix.lower())
][0]
except IndexError:
return 0
return int(count)
class PasswordRequirements:
"""A set of requirements to check passwords against
Keyword Args:
min_length (int): The minimum length of the password
min_digits (int): The minimum number of digits in the password
min_special (int): The minimum number of special characters in the password
min_alpha (int): The minimum number of alphabetical characters in the password
min_upper (int): The minimum number of uppercase letters in the password
min_lower (int): The minimum number of lowercase letters in the password
check_breaches (bool): Whether to ensure that passwords aren't found in known data breaches (uses :meth:`~passwd.check_password`)
func (function): A function that takes in a password (:class:`str`) and returns a :class:`bool` that must be ``True`` for the password to meet all requirements
"""
def __init__(
self,
*,
min_length=0,
min_digits=0,
min_special=0,
min_alpha=0,
min_upper=0,
min_lower=0,
check_breaches=False,
func=None,
):
self.min_length = min_length
self.min_digits = min_digits
self.min_special = min_special
self.min_alpha = min_alpha
self.min_upper = min_upper
self.min_lower = min_lower
self.check_breaches = check_breaches
self.func = func
def check(self, password):
"""Check a password against the requirements
Args:
password (str): The password to check
Returns:
bool: Whether the password meets all the given requirements
"""
if len(password) < self.min_length:
return False
digits = len(findall(r"\d", password))
if digits < self.min_digits:
return False
special_chars = sum(v for k, v in Counter(password).items() if k in punctuation)
if special_chars < self.min_special:
return False
alpha_chars = sum(v for k, v in Counter(password).items() if k in ascii_letters)
if alpha_chars < self.min_alpha:
return False
upper_chars = sum(
v for k, v in Counter(password).items() if k in ascii_uppercase
)
if upper_chars < self.min_upper:
return False
lower_chars = sum(
v for k, v in Counter(password).items() if k in ascii_lowercase
)
if lower_chars < self.min_lower:
return False
if self.check_breaches and check_password(password):
return False
if self.func and not self.func(password):
return False
return True
class PasswordGenerator:
"""A random password generator
Args:
length (int): The length of the password
Keyword Args:
uppercase (bool): Whether to allow uppercase letters in the password
lowercase (bool): Whether to allow lowercase letters in the password
digits (bool): Whether to allow numerical digits in the password
special (bool): Whether to allow special characters in the password
"""
def __init__(
self, length, *, uppercase=True, lowercase=True, digits=True, special=True
):
self.length = length
self.uppercase = uppercase
self.lowercase = lowercase
self.digits = digits
self.special = special
def generate(
self, length=None, uppercase=None, lowercase=None, digits=None, special=None
):
"""Generate a random password
Keyword Args:
length (int): The length of the password
uppercase (bool): Whether to allow uppercase letters in the password
lowercase (bool): Whether to allow lowercase letters in the password
digits (bool): Whether to allow numerical digits in the password
special (bool): Whether to allow special characters in the password
Returns:
str: The freshly generated password
"""
if length is None:
length = self.length
allowed_chars = ""
if uppercase is not None:
allowed_chars += ascii_uppercase if uppercase else ""
elif self.uppercase:
allowed_chars += ascii_uppercase
if lowercase is not None:
allowed_chars += ascii_lowercase if lowercase else ""
elif self.lowercase:
allowed_chars += ascii_lowercase
if digits is not None:
allowed_chars += all_digits if digits else ""
elif self.digits:
allowed_chars += all_digits
if special is not None:
allowed_chars += punctuation if special else ""
elif self.special:
allowed_chars += punctuation
return "".join(choice(allowed_chars) for _ in range(length))
def __len__(self):
return self.length if self.length >= 0 else 0
|
9,772 | e7b96c0161e65f3f22f2ad0832fc6d1bb529f150 | """
In search.py, you will implement generic search algorithms which are called
by Pacman agents (in searchAgents.py).
"""
import util
class SearchProblem:
"""
This class outlines the structure of a search problem, but doesn't implement
any of the methods (in object-oriented terminology: an abstract class).
You do not need to change anything in this class, ever.
"""
def getStartState(self):
"""
Returns the start state for the search problem
"""
util.raiseNotDefined()
def isGoalState(self, state):
"""
state: Search state
Returns True if and only if the state is a valid goal state
"""
util.raiseNotDefined()
def getSuccessors(self, state):
"""
state: Search state
For a given state, this should return a list of triples,
(successor, action, stepCost), where 'successor' is a
successor to the current state, 'action' is the action
required to get there, and 'stepCost' is the incremental
cost of expanding to that successor
"""
util.raiseNotDefined()
def getCostOfActions(self, actions):
"""
actions: A list of actions to take
This method returns the total cost of a particular sequence of actions. The sequence must
be composed of legal moves
"""
util.raiseNotDefined()
def tinyMazeSearch(problem):
"""
Returns a sequence of moves that solves tinyMaze. For any other
maze, the sequence of moves will be incorrect, so only use this for tinyMaze
"""
from game import Directions
s = Directions.SOUTH
w = Directions.WEST
return [s,s,w,s,w,w,s,w]
def depthFirstSearch(problem):
"""
Search the deepest nodes in the search tree first
Your search algorithm needs to return a list of actions that reaches
the goal. Make sure to implement a graph search algorithm
To get started, you might want to try some of these simple commands to
understand the search problem that is being passed in:
print("Start:", problem.getStartState())
print("Is the start a goal?", problem.isGoalState(problem.getStartState()))
print("Start's successors:", problem.getSuccessors(problem.getStartState()))
"""
"*** YOUR CODE HERE ***"
# Frontier stored in a Stack
frontier = util.Stack()
# Visited states stored in a list
visitedStates = []
# Format of each element: (current coordinates, [path taken to get there])
frontier.push((problem.getStartState(), []))
# while there are still states to explore
while not frontier.isEmpty():
# store the current state and path in separate variables
currentState, pathTaken = frontier.pop()
# for skipping states that have already been visited
if currentState in visitedStates:
continue
# for returning the correct path to the goal state upon discovering it
if problem.isGoalState(currentState):
return pathTaken
# count the current state as "visited"
visitedStates.append(currentState)
# for each successor state, check whether they have already been visited. if not, add their coordinates to the frontier, and append their respective direction to the path list
for coordinates, direction, cost in problem.getSuccessors(currentState):
if coordinates not in visitedStates:
frontier.push((coordinates, pathTaken + [direction]))
util.raiseNotDefined()
def breadthFirstSearch(problem):
"""
Search the shallowest nodes in the search tree first.
"""
"*** YOUR CODE HERE ***"
# BFS is identical to DFS, save for the data structure used to store the frontier
# Frontier stored in a Queue
frontier = util.Queue()
# Visited states stored in a list
visitedStates = []
# Format of each element: (current coordinates, [path taken to get there])
frontier.push((problem.getStartState(), []))
# while there are still states to explore
while not frontier.isEmpty():
# store the current state and path in separate variables
currentState, pathTaken = frontier.pop()
# for skipping states that have already been visited
if currentState in visitedStates:
continue
# for returning the correct path to the goal state upon discovering it
if problem.isGoalState(currentState):
return pathTaken
# count the current state as "visited"
visitedStates.append(currentState)
# for each successor state, check whether they have already been visited. if not, add their coordinates to the frontier, and append their respective direction to the path list
for coordinates, direction, cost in problem.getSuccessors(currentState):
if coordinates not in visitedStates:
frontier.push((coordinates, pathTaken + [direction]))
util.raiseNotDefined()
def uniformCostSearch(problem):
"Search the node of least total cost first. "
"*** YOUR CODE HERE ***"
#UCS is similar to DFS and BFS, save for a few key differences
# Frontier stored in a Priority Queue
frontier = util.PriorityQueue()
# Visited states stored in a list
visitedStates = []
# Format of each element: ((current coordinates, [path taken to get there]), cost)
frontier.push((problem.getStartState(), []), 0)
# while there are still states to explore
while not frontier.isEmpty():
# store the current state and path in separate variables
currentState, pathTaken = frontier.pop()
# for skipping states that have already been visited
if currentState in visitedStates:
continue
# for returning the correct path to the goal state upon discovering it
if problem.isGoalState(currentState):
return pathTaken
# count the current state as "visited"
visitedStates.append(currentState)
# for each successor state, check whether they have already been visited.
for coordinates, direction, cost in problem.getSuccessors(currentState):
if coordinates not in visitedStates:
# if not, re-calculate the cost to reach the given coordinates, and push the updated information to the frontier
newCost = problem.getCostOfActions(pathTaken + [direction])
frontier.push((coordinates, pathTaken + [direction]), newCost)
util.raiseNotDefined()
def nullHeuristic(state, problem=None):
"""
A heuristic function estimates the cost from the current state to the nearest
goal in the provided SearchProblem. This heuristic is trivial.
"""
return 0
def aStarSearch(problem, heuristic=nullHeuristic):
"Search the node that has the lowest combined cost and heuristic first."
"*** YOUR CODE HERE ***"
# A* is different in that the heuristic argument provided is included in some parts
# Frontier stored in a Priority Queue
frontier = util.PriorityQueue()
# Visited states stored in a list
visitedStates = []
# Format of each element: ((current coordinates, [path taken to get there]), heuristic function)
frontier.push((problem.getStartState(), []), heuristic(problem.getStartState(), problem))
# while there are still states to explore
while not frontier.isEmpty():
# store the current state and path in separate variables
currentState, pathTaken = frontier.pop()
# for skipping states that have already been visited
if currentState in visitedStates:
continue
# for returning the correct path to the goal state upon discovering it
if problem.isGoalState(currentState):
return pathTaken
# count the current state as "visited"
visitedStates.append(currentState)
# for each successor state, check whether they have already been visited.
for coordinates, direction, cost in problem.getSuccessors(currentState):
if coordinates not in visitedStates:
# if not, re-calculate the cost to reach the given coordinates, and push the updated information to the frontier. Here, unlike UCS, the heuristic function is added to the newCost variable
newCost = problem.getCostOfActions(pathTaken + [direction]) + heuristic(coordinates, problem)
frontier.push((coordinates, pathTaken + [direction]), newCost)
util.raiseNotDefined()
# Abbreviations
bfs = breadthFirstSearch
dfs = depthFirstSearch
astar = aStarSearch
ucs = uniformCostSearch
|
9,773 | 13e27c29839286988b37d2d3685f54d42fd57973 | # -*- coding: utf-8 -*-
# Copyright (c) 2018-2020 Christiaan Frans Rademan <chris@fwiw.co.za>.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
from luxon import register
from luxon import router
from luxon.helpers.api import sql_list, obj
from infinitystone.models.roles import infinitystone_role
@register.resources()
class Roles(object):
def __init__(self):
router.add('GET', '/v1/role/{id}', self.role,
tag='roles:view')
router.add('GET', '/v1/roles', self.roles,
tag='roles:view')
router.add('POST', '/v1/role', self.create,
tag='roles:admin')
router.add(['PUT', 'PATCH'], '/v1/role/{id}', self.update,
tag='roles:admin')
router.add('DELETE', '/v1/role/{id}', self.delete,
tag='roles:admin')
def role(self, req, resp, id):
return obj(req, infinitystone_role, sql_id=id)
def roles(self, req, resp):
return sql_list(req, 'infinitystone_role',
search={'id': str,
'name': str})
def create(self, req, resp):
role = obj(req, infinitystone_role)
role.commit()
return role
def update(self, req, resp, id):
role = obj(req, infinitystone_role, sql_id=id)
role.commit()
return role
def delete(self, req, resp, id):
role = obj(req, infinitystone_role, sql_id=id)
role.commit()
return role
|
9,774 | f72cdf8d91c31760335b96052a34615307f48727 | from cpp_service.SubService import SubService
import config
if __name__ == "__main__":
gateway = config.gateway["trading_system_gateway"]
host = gateway["host"]
port = gateway["port"]
server_id = gateway["server_id"]
licences = gateway["licences"]
service = SubService(host, port, server_id, licences)
"""订阅order"""
service.sub_order()
|
9,775 | 00b4a57537358797bfe37eee76bbf73ef42de081 |
#Define a function max_of_three() that takes three numbers as
#arguments and returns the largest of them.
def max_of_three(a,b,c):
max=0
if a > b:
max = a
else:
max = b
if max > c :
return max
else:
return c
print max(234,124,43)
def max_of_three2(a, b, c):
if a > b and a > c:
print a
elif b > c:
print b
else:
print c
print max_of_three2(0, 15, 2) |
9,776 | 286953e381d03c0817d57f9ee4e15f2a0ce808a9 | from django_evolution.mutations import ChangeField
MUTATIONS = [
ChangeField('ReviewRequest', 'depends_on', initial=None, null=False),
ChangeField('ReviewRequestDraft', 'depends_on', initial=None, null=False),
]
|
9,777 | 8279f8a80d96a7231e35100d2c39fa5e1f34f5f5 | from scipy.cluster.hierarchy import dendrogram, linkage
from get_train import get, pre
import matplotlib.pyplot as plt
#%%
index = [
'BAC',
'JPM',
'GS',
'C',
'AAPL',
'IBM',
'MSFT',
'ORCL'
]
years = [
2010,
2013,
2016
]
features = [
'TOTAL ASSETS',
'Cash & Equivalents',
'Receivables - Total (Net)',
'Inventories - Total',
'Sales (Net)',
'Cost of Good Sold',
'GROSS PROFIT'
]
methods = [
'single',
'complete',
'average',
'ward'
]
#%%
fig, axes = plt.subplots(4, 3, figsize=(16, 9))
fig.tight_layout()
fig.subplots_adjust(wspace=0.05)
i = 0
j = 0
for year in years:
train = get(year, features, index)
train = pre(train)
for method in methods:
ax = axes[i, j]
Z = linkage(train, method=method)
dn = dendrogram(Z, ax=ax, labels=index)
ax.set_yticks([])
i += 1
j += 1
i = 0
for i in range(3):
axes[i, 0].set_ylabel(
methods[i],
rotation=0,
labelpad=25
)
axes[3, 0].set_ylabel(
'WARD',
rotation=0,
labelpad=25,
color='r'
)
for j in range(3):
axes[0, j].set_title(years[j])
|
9,778 | 6339a1a06319a748030b3411c7a8d00f36336e65 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class OpenStackDeprecationWarning(DeprecationWarning):
"""Base class for warnings about deprecated features in openstacksdk."""
class RemovedResourceWarning(OpenStackDeprecationWarning):
"""Indicates that a resource has been removed in newer API versions and
should not be used.
"""
class RemovedFieldWarning(OpenStackDeprecationWarning):
"""Indicates that a field has been removed in newer API versions and should
not be used.
"""
class LegacyAPIWarning(OpenStackDeprecationWarning):
"""Indicates an API that is in 'legacy' status, a long term deprecation."""
class OpenStackWarning(Warning):
"""Base class for general warnings in openstacksdk."""
class ConfigurationWarning(OpenStackWarning):
"""Indicates an issue with configuration."""
class UnsupportedServiceVersion(OpenStackWarning):
"""Indicates a major version that SDK doesn't understand."""
|
9,779 | 2f9a081845685a4748c8b028ae4ee3a056a10284 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This file is part of CbM (https://github.com/ec-jrc/cbm).
# Author : Konstantinos Anastasakis
# Credits : GTCAP Team
# Copyright : 2021 European Commission, Joint Research Centre
# License : 3-Clause BSD
import os
import glob
from ipywidgets import (Text, Label, HBox, VBox, Layout, Dropdown,
ToggleButtons, Output, HTML, Button,
FileUpload, IntText, RadioButtons)
from cbm.utils import config
from cbm.ipycbm.utils import settings_ds, cbm_widgets
from cbm.ipycbm.ipy_ext import ext_func
from cbm.foi import foi_v1
from cbm.datas import db
try:
from cbm.foi import foi_v2
except Exception as err:
print(err)
def foi_tab_v1():
path_foi = f"{config.get_value(['paths', 'temp'])}/foi/"
path_foi_func = foi_v1.path_foi_func
progress = Output()
def outlog(*text):
with progress:
print(*text)
foi_info = HTML("""FOI procedures version 1 (requires access to a database).
""", placeholder='FOI Information')
# Connect to database
config_info = HTML(value="""1. Connect to database and object storage.<br>
FOI procedures need direct access to the database. In case there no
image is provided, access to object storage will be needed as well
to generate the base image from sentinel images.
""", placeholder='FOI Information')
config_conn = Button(
value=False,
button_style='info',
tooltip='Configure db connection.',
icon='cogs',
layout=Layout(width='40px')
)
config_conn_box = HBox([])
@config_conn.on_click
def config_conn_on_click(b):
if config_conn_box.children == ():
config_conn_box.children = [settings_ds.direct_conn()]
else:
config_conn_box.children = ()
config_box = VBox([config_info, config_conn,
config_conn_box])
# Spatial data to be tested
spatial_info = HTML(
"""2. Select the spatial data to be tested - parcels that will be
checked for heterogeneity and cardinality.<br>
- Select a table from the database""")
db_tables = Dropdown(
options=[],
description='db Tables:'
)
refresh_db_tables = Button(
value=False,
button_style='info',
tooltip='Get db tables.',
icon='refresh',
layout=Layout(width='40px')
)
@refresh_db_tables.on_click
def refresh_db_tables_on_click(b):
db_tables.options = db.tables(config.get_value(['set', 'db_conn']))
db_tables_box = HBox([db_tables, refresh_db_tables])
upload_shp = Button(
description='Create new table',
value=False,
button_style='info',
tooltip='upload_shp.',
icon='up'
)
upload_box = VBox([])
@upload_shp.on_click
def upload_shp_on_click(b):
if upload_box.children == ():
upload_box.children = [ext_func.upload_shp(path_foi, True)]
else:
upload_box.children = ()
spatial_box = VBox([spatial_info, upload_shp, upload_box, db_tables_box])
# Thematic raster.
img_info = HTML(
"""3. Thematic raster - classification raster, or raster from other
source that will be used for testing heterogeneity and cardinality.<br>
- Upload or generate raster base image.
(Only upload is currently available)""")
img_option = ToggleButtons(
options=['Upload', 'Generate'],
value=None,
disabled=True,
button_style='info', # 'success', 'info', 'warning', 'danger' or ''
tooltips=['Upnload your base image', 'Get from object storage']
)
def on_img_option_change(change):
if img_option.value == 'Upload':
img_box.children = [HBox([img_info, img_option, img_file])]
else:
img_box.children = ()
img_option.observe(on_img_option_change, 'value')
img_file = cbm_widgets.get_files_dropdown(
f'{path_foi}raster', '.tif, .tiff', 'Select Raster')
img_box = VBox([img_info, img_option, img_file])
# YAML File upload
yml_info = HTML(
"""4. YAML file that holds the classes form the thematic raster.<br>
- This can be also a simple list of values in the notebook
corespondence between pixel values and names for the classes""")
yml_file = cbm_widgets.get_files_dropdown(path_foi, '.yml, .yaml',
'Select YML')
yml_box = VBox([yml_info, yml_file])
# Database functions
dbf_info = HTML("""5. Create database functions.<br>
- Import required database functions for FOI analysis to the database""")
dbf_insert = Button(
value=False,
button_style='info',
tooltip='Create functions.',
icon='fa-share-square'
)
@dbf_insert.on_click
def dbf_insert_on_click(b):
outlog('path_foi_func :', path_foi_func)
progress.clear_output()
try:
functions = glob.glob(f"{path_foi_func}*.func")
db = config.get_value(['set', 'db_conn'])
sche = config.get_value(['db', db, 'sche'])
user = config.get_value(['db', db, 'user'])
for f in functions:
db.insert_function(open(f).read().format(
schema=sche, owner=user))
outlog(f"The '{f}' Was imported to the database.")
finc_list = [
f"ipycbm_{f.split('/')[-1].split('.')[0]}, " for f in functions]
outlog(
f"The functions: {('').join(finc_list)} where added to the database")
except Exception as err:
outlog("Could not add functions to dattabase.", err)
dbf_box = VBox(
[dbf_info, dbf_insert])
# FOI Parameters
param_info = HTML(
"""6. Set FOI v1 Parameters""")
# heterogeneity_threshold
param_heto_info = HTML("""
Minimum and maximum thresholds for heterogeneity checks. In the example,
any parcel with percentage of pixels for one class between 30 and 70 from
the total, will be considered heterogenous.
""")
param_min_het = IntText(
value=30,
description='MIN:',
tooltip="Minimum threshold for heterogeneity checks",
layout=Layout(width='150px')
)
param_max_het = IntText(
value=70,
description='MAX:',
tooltip="Maximum threshold for heterogeneity checks",
layout=Layout(width='150px')
)
param_area_info = HTML("""Minimum area for clusters selection -
only clusters bigger from this threshold will be counted.
""")
param_area = IntText(
value=2000,
description='area:',
tooltip="Minimum area for clusters selection.",
layout=Layout(width='200px')
)
param_box = VBox([param_info,
param_heto_info, HBox([param_min_het, param_max_het]),
param_area_info, param_area
])
# Run FOI analysis
run_info = Label("7. Run the FOI analysis.")
run_analysis = Button(
description='Run FOI v1',
value=False,
button_style='info',
tooltip='Run FOI analysis version 1',
icon='play',
)
run_box = VBox([run_info, run_analysis])
@run_analysis.on_click
def run_analysis_on_click(b):
with progress:
foi_v1.main(
db_tables.value,
f"{path_foi}raster/{img_file.children[1].children[0].value}",
f"{path_foi}{yml_file.children[1].children[0].value}",
param_min_het.value, param_max_het.value, param_area.value)
wbox = VBox([foi_info,
config_box,
spatial_box,
img_box,
yml_box,
dbf_box,
param_box,
run_box,
progress])
return wbox
def foi_tab_v2():
path_foi = f"{config.get_value(['paths', 'temp'])}/foi/"
progress = Output()
def outlog(*text):
with progress:
print(*text)
foi_info = HTML("""FOI procedures version 2 (does not require access to a database).
""", placeholder='FOI Information')
# Vector file
shp_info = HTML(
"""1. Spatial data to be tested -
parcels that will be checked for heterogeneity and cardinality.""")
shp_file = cbm_widgets.get_files_dropdown(
f'{path_foi}vector', '', 'Select .shp', True, True)
shp_box = VBox([shp_info, shp_file])
# Thematic raster.
img_info = HTML(
"""2. Thematic raster - classification raster, or raster from other
source that will be used for testing heterogeneity and cardinality.<br>
- Upload or generate raster base image.
(Only upload is currently available)""")
img_option = ToggleButtons(
options=['Upload', 'Generate'],
value=None,
disabled=True,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltips=['Upnload your base image', 'Get from object storage']
)
def on_img_option_change(change):
if img_option.value == 'Upload':
img_box.children = [HBox([img_info, img_option, img_file])]
else:
img_box.children = ()
img_option.observe(on_img_option_change, 'value')
img_file = cbm_widgets.get_files_dropdown(
f'{path_foi}raster', '.tif, .tiff', 'Select Raster')
img_box = VBox([img_info, img_option, img_file])
# YAML File upload
yml_info = HTML(
"""3. YAML file that holds the classes form the thematic raster.<br>
- This can be also a simple list of values in the notebook
corespondence between pixel values and names for the classes""")
yml_file = cbm_widgets.get_files_dropdown(path_foi, '.yml, .yaml',
'Select YML')
yml_box = VBox([yml_info, yml_file])
# FOI Prerequisites
pre_info = Label("4. Set FOI v2 Parameters.")
# heterogeneity_threshold
pre_heto_chec = HTML("""
Minimum and maximum thresholds for heterogeneity checks. In the example,
any parcel with percentage of pixels for one class between 30 and 70 from
the total, will be considered heterogenous.
""")
pre_min_het = IntText(
value=30,
description='MIN:',
tooltip="Minimum threshold for heterogeneity checks",
disabled=False,
layout=Layout(width='150px')
)
pre_max_het = IntText(
value=70,
description='MAX:',
tooltip="Maximum threshold for heterogeneity checks",
disabled=False,
layout=Layout(width='150px')
)
pre_heto_chec_box = HBox([pre_min_het, pre_max_het])
pre_min_cluster_size = IntText(
value=20,
description='pixels:',
tooltip="Minimum area for clusters selection.",
disabled=False,
layout=Layout(width='200px')
)
pre_pixel_connectivity = IntText(
value=8,
description='connectivity type:',
tooltip="Type of pixel connectivity in analysis. Accepted values: 4 or 8.",
disabled=False,
layout=Layout(width='200px')
)
pre_negative_buffer = IntText(
value=-10,
description='negative buffer:',
tooltip="Negative buffer to be applied on the FOI",
disabled=False,
layout=Layout(width='200px')
)
pre_box = VBox([
pre_info, pre_heto_chec, pre_heto_chec_box,
pre_pixel_connectivity, pre_negative_buffer,
HBox([pre_min_cluster_size,
HTML("Minimum area for clusters selection - only clusters bigger from this threshold will be counted.")])
])
# Run FOI analysis
run_info = Label("5. Run the FOI analysis.")
run_analysis = Button(
description='Run FOI v2',
value=False,
disabled=False,
button_style='info',
tooltip='Run FOI analysis version 2',
icon='play',
)
run_box = HBox([run_analysis])
@run_analysis.on_click
def run_analysis_on_click(b):
with progress:
foi_v2.main(
f"{path_foi}vector/{shp_file.children[1].children[0].value}",
f"{path_foi}raster/{img_file.children[1].children[0].value}",
f"{path_foi}{yml_file.children[1].children[0].value}",
pre_negative_buffer.value,
pre_min_het.value,
pre_max_het.value,
pre_pixel_connectivity.value,
pre_min_cluster_size.value)
wbox_v2 = VBox([foi_info,
shp_box,
img_box,
yml_box,
pre_box,
run_info,
run_box,
progress])
return wbox_v2
|
9,780 | c2f859e0ed0e812768dec04b2b1f9ddd349350f6 | # open a converted base to bits file and convert it back to the base sequences
seq2 = ''
with open('chr01.txt') as a:
while 1:
seq = a.read(2)
# print(seq)
seq = seq.replace('00', 'c').replace('01', 'g').replace('10', 'a').replace('11', 't')
seq2 += seq
if not seq:
break
print(len(seq2))
print(seq2)
|
9,781 | 451a36eb205a269a05e3b3d89541278633d12aaa |
class ChartType:
Vanilla = "Vanilla"
Neopolitan = "Neopolitan"
|
9,782 | 4ed6f4db4c9c3319d6289ba402f81bbd8accf915 | import numpy as np
import dxchange
import ptychotomo
if __name__ == "__main__":
# read object
u = dxchange.read_tiff('data/init_object.tiff')
u = u+1j*u/2
nz, n, _ = u.shape
# parameters
center = n/2
ntheta = 384
ne = 3*n//2
ngpus = 1
pnz = nz//2
theta = np.linspace(0, 4*np.pi, ntheta).astype('float32')
# simulate data
with ptychotomo.SolverTomo(theta, ntheta, nz, n, pnz, center, ngpus) as tslv:
data = tslv.fwd_tomo_batch(u)
# adjoint test with data padding
with ptychotomo.SolverTomo(theta, ntheta, nz, ne, pnz, center+(ne-n)/2, ngpus) as tslv:
data = ptychotomo.utils.paddata(data, ne)
ua = tslv.adj_tomo_batch(data)
ua = ptychotomo.utils.unpadobject(ua, n)
print(f'norm data = {np.linalg.norm(data)}')
print(f'norm object = {np.linalg.norm(ua)}')
print(
f'<u,R*Ru>=<Ru,Ru>: {np.sum(u*np.conj(ua)):e} ? {np.sum(data*np.conj(data)):e}')
|
9,783 | 21c581131cff8cf2f4aa407055184d56865a6335 | #!/usr/bin/env python
# Title : STACK_BostonHousing.py
# Description : Stacking was the natural progression of our algorithms trial.
# In here, we'll use prediction from a number of models in order
# to improve accuracy as it add linearly independent data to our
# dataset. Here we also use voting ensembler, using the best es-
# timator three timers on the stack of second level models.
# We'll find CV scores of each model on train_test_split then
# stack the models on a 5-KFold of the data, finding final CV
# score. We'll also plot the comparative graph of Real Prices vs
# Predicted Prices
# Author : Neves4
# Outputs : Figure with one plot : 'Real Prices vs Predicted prices'
# Values : SVR CV Scores: 0.6798 (+/- 0.0895)
# XGB CV Scores: 0.8784 (+/- 0.0598)
# RF CV Scores: 0.8601 (+/- 0.0789)
# STACK CV Scores: 0.8809 (+/- 0.0864)
# License : MIT License
#==============================================================================
##### IMPORTING #####
import numpy as np
import xgboost as xgb
from sklearn import datasets
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import ElasticNet
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
from sklearn.model_selection import cross_val_score, train_test_split, KFold
from sklearn.metrics import r2_score
sns.set() # set seaborn style
##### DECLARING AND TRAINING #####
# Carregamento do dataset do boston, conversão para o framework pandas e como a
# nomenclatura não é automática, foi dado valor às colunas da tabela do pandas.
# Para verificar como estão os dados, chamar print(boston_pd.head())
boston = datasets.load_boston()
boston_pd = pd.DataFrame(boston.data)
boston_pd.columns = boston.feature_names
# É necessária então a divisão dos datasets, pelo método train_test_split. Para
# encontrar o tamanho de cada tensor que foi dividido, print(X_train.shape)
X, Y = boston_pd, boston.target
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.1,
random_state = 42)
# ##### 1ST LEVEL MODELS #####
# # ElasticNet - baseline model #0
# print("------- FITTING ElasticNet -------")
# en_mdl = ElasticNet(alpha = 5.2, l1_ratio = 0.5, random_state = 42)
# en_cv_scores = cross_val_score(en_mdl, X_train, Y_train, cv=5, scoring='r2')
# print(" DONE! CV Scores: {:.4f} (+/- {:.4f})" .format(en_cv_scores.mean(),\
# en_cv_scores.std() * 2))
# SVR - baseline model #1
print("------- FITTING SVR -------")
svr_mdl = SVR(kernel = 'linear', C = 0.11, epsilon = 0.011, gamma = 0.1)
svr_cv_scores = cross_val_score(svr_mdl, X_train, Y_train, cv=5, scoring='r2')
print(" DONE! CV Scores: {:.4f} (+/- {:.4f})" .format(svr_cv_scores.mean(),\
svr_cv_scores.std() * 2))
# XGBRegressor - baseline model #2
print("------- FITTING XGBRegressor -------")
xgb_mdl = xgb.XGBRegressor(learning_rate = 0.0503, n_estimators = 339,
max_depth = 5, min_child_weight = 2, gamma = 0.17,
subsample = 0.84, colsample_bytree = 0.85,
reg_alpha = 0.008, reg_lambda = 1.2,
scale_pos_weight = 1, seed = 42)
xgb_cv_scores = cross_val_score(xgb_mdl, X_train, Y_train, cv=5, scoring='r2')
print(" DONE! CV Scores: {:.4f} (+/- {:.4f})" .format(xgb_cv_scores.mean(),\
xgb_cv_scores.std() * 2))
# RandomForestRegressor - baseline model #3
print("------- FITTING RandomForestRegressor -------")
rf_mdl = RandomForestRegressor(n_estimators = 95, max_features = 'auto',
max_depth = 18, min_samples_split = 2,
min_samples_leaf = 1, bootstrap = True,
random_state = 42)
rf_cv_scores = cross_val_score(rf_mdl, X_train, Y_train, cv=5, scoring='r2')
print(" DONE! CV Scores: {:.4f} (+/- {:.4f})" .format(rf_cv_scores.mean(),\
rf_cv_scores.std() * 2))
class Ensemble(object):
"""Ensemble base_models on train data than fit/predict
The object input is composed of 'n_splits', 'stacker' and list of
'base_models'.
The __init__ method self-assign the inputs.
The fit_predict method divides the dataset in 'n_splits' then it loops
trough ammount of 'base_models' fitting all splits and then averaging it on
a new column in the end. In the end, predictions are made with these new
columns.
If sought the use of voting ensemble, the ammount of models passed on
base_models can be repeated.
"""
def __init__(self, n_splits, stacker, base_models):
self.n_splits = n_splits
self.stacker = stacker
self.base_models = base_models
def fit_predict(self, X, Y, T):
X = np.array(X)
Y = np.array(Y)
T = np.array(T)
# Create folds on the dataset based on n_splits
folds = list(KFold(n_splits = self.n_splits, shuffle = True,
random_state = 42).split(X, Y))
S_train = np.zeros((X.shape[0], len(self.base_models)))
S_test = np.zeros((T.shape[0], len(self.base_models)))
# Loop trough base_models
print("------- FITTING Stacker - 2nd level -------")
for i, clf in enumerate(self.base_models):
# Create a dummy to calculate predictions on all folds
S_test_i = np.zeros((T.shape[0], self.n_splits))
# Loop trough data folds
for j, (train_idx, test_idx) in enumerate(folds):
X_train = X[train_idx]
Y_train = Y[train_idx]
X_holdout = X[test_idx]
Y_holdout = Y[test_idx]
clf.fit(X_train, Y_train)
Y_pred = clf.predict(X_holdout)[:]
print (" Model {}, fold {}. R^2 score: {:.4f}"\
.format(i, j, r2_score(Y_holdout, Y_pred)))
S_train[test_idx, i] = Y_pred
S_test_i[:, j] = clf.predict(T)[:]
# Update test data with average of predictions from the dummy
S_test[:, i] = S_test_i.mean(axis = 1)
# Print final CV score
results = cross_val_score(self.stacker, S_train, Y, cv=5, scoring='r2')
print("\033[1;92mDONE! \033[0;0m\033[1;37mCV scores: {:.4f} (+/- {:.4f})"
.format(results.mean(), results.std() * 2))
# After creating new features on the test data, fit the chosen stacker
# on train data and finally predict on test data, then return
self.stacker.fit(S_train, Y)
final_prediction = self.stacker.predict(S_test)[:]
return final_prediction
stack = Ensemble(n_splits = 5, stacker = svr_mdl,
base_models = (xgb_mdl, rf_mdl, xgb_mdl, svr_mdl, xgb_mdl))
stack_pred = stack.fit_predict(X_train, Y_train, X_test)
##### PLOTS #####
# Plot outputs using scatter. Ticks are diabled and everything else is the clea-
# nest that I could. Predicted prices vs Real Prices
custom_style = {'axes.labelcolor': 'white',
'xtick.color': 'white',
'ytick.color': 'white'}
data = pd.DataFrame(data = {'stack_pred': stack_pred, 'Y_test': Y_test})
ax = sns.lmplot(x='Y_test', y='stack_pred', data = data, truncate=True, size=5)
ax.set_axis_labels("Real prices", "Predicted prices")
plt.tick_params(axis='both', colors='gray')
plt.title("Real vs Predicted prices on Boston Housing", fontweight = 'bold')
plt.tight_layout()
plt.show()
|
9,784 | 41ca762fe6865613ae4ef2f657f86b516353676f | from django.contrib.auth import authenticate, login, logout
from django.template import loader
from django.http import (HttpResponse, JsonResponse,
HttpResponseForbidden, HttpResponseBadRequest)
from django.shortcuts import redirect
from django.views.decorators.http import require_POST
import json
from aimodel.AnalyticSession import AnalyticSession
from data.DatasetConfigManager import DatasetConfigManager
def index(request, err_msg=None):
"""
Renders the index page.
"""
template = loader.get_template("aimodel/index.html")
context = {}
context["err_msg"] = err_msg
return HttpResponse(template.render(context, request))
@require_POST
def log_in(request):
"""
Handles login.
"""
# Get the username and password
username = request.POST.get("username")
password = request.POST.get("password")
if not username or not password:
return index(request, "Invalid credentials!")
# Authenticate and log in
user = authenticate(username=username, password=password)
if user:
login(request, user)
return redirect("/main")
else:
return index(request, "Invalid credentials!")
def main(request):
"""
Renders the main page behind login.
"""
if not request.user.is_authenticated:
return redirect("/")
template = loader.get_template("aimodel/main.html")
context = dict()
context["datasets"] = DatasetConfigManager.loaded_datasets_list()
return HttpResponse(template.render(context, request))
@require_POST
def analytics_session(request):
"""
Starts a new analytic session.
"""
if not request.user.is_authenticated:
return redirect("/")
try:
dataset = request.POST["dataset"]
except KeyError:
err = "Invalid request params!"
return HttpResponseBadRequest(reason=err)
if "analytics" in request.session:
del request.session["analytics"]
request.session["analytics"] = AnalyticSession(dataset)
bucket_info = request.session["analytics"].bucket_info()
template = loader.get_template("ui/analytics.html")
context = dict()
context["init_buckets"] = json.dumps(bucket_info["buckets"])
context["init_bucket_ordering"] =\
json.dumps(bucket_info["bucket_ordering"])
return HttpResponse(template.render(context, request))
def log_out(request):
"""
Logs the user out.
"""
if request.user.is_authenticated:
logout(request)
return redirect("/")
def _check_session_valid(request):
"""
A helper function checking whether the user is logged in and the session
data is present.
"""
if not request.user.is_authenticated:
return HttpResponseForbidden(reason="Access denied!")
if "analytics" not in request.session:
err = "Could not fetch analytic session data."
return HttpResponseBadRequest(reason=err)
return None
def bucket_info(request):
"""
Fetches information about current buckets.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
return JsonResponse(request.session["analytics"].bucket_info())
def create_bucket(request):
"""
Creates a bucket.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
try:
request.session["analytics"].create_bucket()
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse({})
@require_POST
def delete_bucket(request):
"""
Deletes a bucket.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
try:
bucket_id = request_data["bucket_id"]
except KeyError:
err = "Invalid request params!"
return HttpResponseBadRequest(reason=err)
try:
request.session["analytics"].delete_bucket(bucket_id)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse({})
@require_POST
def rename_bucket(request):
"""
Renames a bucket.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
try:
bucket_id = request_data["bucket_id"]
new_bucket_name = request_data["new_bucket_name"]
except KeyError:
err = "Invalid request params!"
return HttpResponseBadRequest(reason=err)
try:
request.session["analytics"].rename_bucket(bucket_id, new_bucket_name)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse({})
@require_POST
def swap_buckets(request):
"""
Swaps the position of two buckets.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
try:
bucket1_id = request_data["bucket1_id"]
bucket2_id = request_data["bucket2_id"]
except KeyError:
err = "Invalid request params!"
return HttpResponseBadRequest(reason=err)
try:
request.session["analytics"].swap_buckets(bucket1_id, bucket2_id)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse({})
@require_POST
def toggle_bucket(request):
"""
Toggles (activates/deactivates) a bucket.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
try:
bucket_id = request_data["bucket_id"]
except KeyError:
err = "Invalid request params!"
return HttpResponseBadRequest(reason=err)
try:
request.session["analytics"].toggle_bucket(bucket_id)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse({})
@require_POST
def interaction_round(request):
"""
Performs an interaction round, providing new image suggestions.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
user_feedback = json.loads(request.body)
try:
suggs = request.session["analytics"].interaction_round(user_feedback)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse(suggs, safe=False)
@require_POST
def bucket_view_data(request):
"""
Obtains bucket view data, i.e., the images in the bucket with bucket
confidences.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
try:
bucket_id = request_data["bucket_id"]
sort_by = request_data["sort_by"]
except KeyError:
err = "Invalid request params!"
return HttpResponseBadRequest(reason=err)
try:
bucket_view_data =\
request.session["analytics"].bucket_view_data(bucket_id, sort_by)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse(bucket_view_data, safe=False)
def toggle_mode(request):
"""
Toggles between Tetris/grid.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request.session["analytics"].toggle_mode()
return JsonResponse({})
@require_POST
def grid_set_size(request):
"""
Resizes the grid.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
try:
dim = request_data["dim"]
new_size = request_data["new_size"]
except KeyError:
err = "Invalid request params!"
return HttpResponseBadRequest(reason=err)
try:
new_grid_data = request.session["analytics"].grid_set_size(dim,
new_size)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse(new_grid_data, safe=False)
@require_POST
def transfer_images(request):
"""
Transfers (moves/copies) images between buckets.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
try:
images = request_data["images"]
bucket_src = request_data["bucket_src"]
bucket_dst = request_data["bucket_dst"]
mode = request_data["mode"]
sort_by = request_data["sort_by"]
except KeyError:
err = "Invalid request params!"
return HttpResponseBadRequest(reason=err)
try:
request.session["analytics"].transfer_images(images,
bucket_src, bucket_dst,
mode)
bucket_view_data =\
request.session["analytics"].bucket_view_data(bucket_src, sort_by)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse(bucket_view_data, safe=False)
@require_POST
def fast_forward(request):
"""
Fast-forwards a bucket.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
try:
bucket = request_data["bucket"]
n_ff = request_data["n_ff"]
except KeyError:
err = "Invalid request params!"
return HttpResponseBadRequest(reason=err)
try:
request.session["analytics"].fast_forward(bucket, n_ff)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse({})
@require_POST
def ff_commit(request):
"""
Commits a fast-forward.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
print(request_data)
try:
bucket = request_data["bucket"]
except KeyError:
err = "Invalid request params!"
return HttpResponseBadRequest(reason=err)
try:
request.session["analytics"].ff_commit(bucket)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse({})
def end_session(request):
"""
Ends an analytic session.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
del request.session["analytics"]
response = {
"redirect_url": "/main"
}
return JsonResponse(response)
|
9,785 | 518dcdca8f5e6b42624083e4327143dfba59b2ba | def emphasize(sentence):
words = sentence.split(" ")
for i, word in enumerate(words):
words[i] = word[0].upper() + word[1:].lower()
return " ".join(words)
exp1 = "Hello World"
ans1 = emphasize("hello world")
assert ans1 == exp1, f"expected {exp1}, got {ans1}"
exp2 = "Good Morning"
ans2 = emphasize("GOOD MORNING")
assert ans2 == exp2, f"expected {exp2}, got {ans2}"
exp3 = "99 Red Balloons!"
ans3 = emphasize("99 red balloons!")
assert ans3 == exp3, f"expected {exp3}, got {ans3}"
print("everything okay")
|
9,786 | 1c55cfa03cd9210b7cf9e728732afe19930e9a41 | import yet
import pickle
sources = pickle.load(open("./db/source_list"))
addr_list = sources.keys()
'''
for i in range(len(addr_list)):
print addr_list[i],
try:
a = yet.tree(None, sources[addr_list[i]])
print ' Owner :',
for i in a.owner.keys():
print i+ '() ' + a.owner[i][1]['name'] + ',',
except Exception as e:
pass
#print 'error!'
print ''
'''
compiled = yet.solc.compile_source(open("./test.sol").read(100000))
ast = compiled[compiled.keys()[0]]['ast']
b = yet.tree(ast)
print 'modifier list'
for i in b.modifier_list:
print i['attributes']['name']
print 'function list'
for i in b.function_list:
print i['attributes']['name']
print ''
for i in b.public_function_list:
print i['attributes']['name']
print b.owner
'''
import pickle
import solc
import re
import utils.getsource as gs
import utils.verified_parse as vp
sources = pickle.load(open('./db/real_source_list', 'r'))
addr_list = sources.keys()
new_sources = {}
compiled_list = []
err_count = 0
for i in range(len(addr_list)):
print str(i)
#print gs.comment_remover(sources[addr_list[i]])
#print gs.clear(sources[addr_list[i]])
try:
new_sources[addr_list[i]] = re.sub('pragma.+[\n]', '', gs.clear(sources[addr_list[i]]))
except:
print 'fuck!!'
err_count += 1
#compiled_list.append(solc.compile_source(tmp))
pickle.dump(new_sources, open("./db/real_source_list.tmp", "wb"))
print 'total error count : ' + str(err_count)
for i in addr_list:
tmp_source = gs.comment_remover(sources[i])
print gs.getcontractname(tmp_source)
'''
|
9,787 | a78bbb85f4912e5f7ea23f689de65cb16a38d814 | import asyncio
from . import edit_or_reply, udy
plugin_category = "utils"
@udy.cod_cmd(
pattern="as$",
command=("as", plugin_category),
info={
"header": "salam.",
"usage": "{tr}as",
},
)
async def _(event):
"animation command"
event = await edit_or_reply(event, "as")
await event.edit("yuuhuuuu")
await asyncio.sleep(2)
await event.edit("Assalamualaikum wr. wb.")
@udy.cod_cmd(
pattern="ws$",
command=("ws", plugin_category),
info={
"header": "answer the salam.",
"usage": "{tr}ws",
},
)
async def _(event):
"animation command"
event = await edit_or_reply(event, "ws")
await event.edit("huuyyyy")
await asyncio.sleep(2)
await event.edit("Waalaikum salam wr. wb.")
|
9,788 | fd059ae6e5eb3f7dc18dff6f9ed206002cea5fb2 | import os
print(os.name)
#print(os.environ)
print(os.environ.get('PATH'))
print(os.path.abspath('.'))
os.path.join(os.path.abspath('.'),'testdir')
os.mkdir(os.path.abspath('.')) |
9,789 | 44e4151279884ce7c5d5a9e5c82916ce2d3ccbc2 | import random
from datetime import timedelta
from typing import Union, Type, Tuple, List, Dict
from django import http
from django.test import TestCase, Client
from django.utils import timezone
from exam_web import errors
from exam_web.models import Student, AcademyGroup, uuid_str, ExamSession, \
UserSession, Question, Stage, QuestionType, ExamTicket, ExamStatus
class ApiClient(Client):
path: str
def __init__(self, path: str, student: Student = None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.student = student
self.path = path
self.headers = {'content_type': 'application/json'}
if student:
self.cookies['student'] = student.id
def path_params(self, **params):
return ApiClient(self.path.format(**params), self.student)
def get(self, **kwargs):
return super().get(self.path, data=kwargs, **self.headers)
def post(self, **json):
return super().post(self.path, data=json, **self.headers)
def __call__(self, **kwargs):
raise AttributeError('Use `get` or `post` methods instead')
class ApiTestCase(TestCase):
group: AcademyGroup
student: Student
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.group = AcademyGroup.objects.create(name='test_group')
cls.student = Student.objects.create(name='test user', group=cls.group)
@classmethod
def tearDownClass(cls):
cls.student.delete()
cls.group.delete()
super().tearDownClass()
def setup_exam_objects(self):
self.session = ExamSession.objects.create(
start_time=timezone.now(), duration=timedelta(minutes=40))
self.student_session = UserSession.objects.create(
student=self.student, exam_session=self.session)
self.questions = [
Question.objects.create(
stage=Stage.first, type=QuestionType.single, max_score=1,
text='test single question', options=['a', 'b', 'c']
),
Question.objects.create(
stage=Stage.first, type=QuestionType.multi, max_score=1,
text='test multi question', options=['a', 'b', 'c']
),
Question.objects.create(
stage=Stage.second, type=QuestionType.open, max_score=1,
text='test open question', options=None,
),
]
self.tickets = [
ExamTicket.objects.create(
student=self.student, session=self.student_session,
question=question) for question in self.questions
]
self.ticket_map = {x.id: x for x in self.tickets}
def teardown_exam_objects(self):
for ticket in self.tickets:
ticket.delete()
for question in self.questions:
question.delete()
self.student_session.delete()
def assertResponseSuccess(self, response: http.HttpResponse):
content = response.content.decode()
self.assertEqual(response.status_code, 200,
(response.status_code, content))
content = response.json()
self.assertIn('result', content, content)
return content['result']
def assertResponseError(
self, response: http.JsonResponse,
error: Union[errors.APIError, Type[errors.APIError]] = None
) -> Tuple[int, str]:
content = response.json()
self.assertGreaterEqual(response.status_code, 400,
(response.status_code, content))
self.assertIn('error', content, content)
if error is not None:
if isinstance(error, type):
error = error()
self.assertEqual(response.status_code, error.status,
(response.status_code, content))
self.assertEqual(content['error'], error.message,
(response.status_code, content))
return response.status_code, content['error']
class TestAuthorize(ApiTestCase):
authorize: ApiClient
def setUp(self):
super().setUp()
self.authorize = ApiClient('/api/authorize')
def test_authorized(self):
response = self.authorize.post(token=self.student.id)
result = self.assertResponseSuccess(response)
self.assertEqual(response.cookies['student'].value, self.student.id)
self.assertEqual(result['name'], self.student.name)
self.assertEqual(result['group'], self.group.name)
self.assertEqual(result['id'], self.student.id)
def test_authorized_unknown_token(self):
response = self.authorize.post(token=uuid_str())
self.assertResponseError(response, errors.Unauthorized)
def test_authorized_invalid_params(self):
response = self.authorize.post()
self.assertResponseError(response, errors.InvalidParameter('token'))
response = self.authorize.post(token=12345678)
self.assertResponseError(response, errors.InvalidParameter('token'))
response = self.authorize.get()
self.assertEqual(response.status_code, 405)
class TestGetExamSessions(ApiTestCase):
get_exams: ApiClient
session: ExamSession
student_session: UserSession
questions: List[Question]
tickets: List[ExamTicket]
def setUp(self):
super().setUp()
self.get_exams = ApiClient('/api/exams', student=self.student)
self.setup_exam_objects()
def tearDown(self):
self.teardown_exam_objects()
super().tearDown()
def test_get_exams_available(self):
result = self.assertResponseSuccess(self.get_exams.get())
self.assertIsInstance(result, list)
self.assertEqual(len(result), 1)
user_session = result[0]
self.assertEqual(
user_session['started_at'], self.session.start_time.isoformat())
self.assertEqual(user_session['duration'],
self.session.duration.total_seconds() / 60)
self.assertEqual(user_session['checked_in'], False)
self.assertEqual(user_session['finished_at'], None)
self.assertEqual(user_session['status'], ExamStatus.available.value)
self.assertEqual(user_session['score'], None)
def test_get_exams_check_in(self):
self.student_session.started_at = timezone.now()
self.student_session.save()
result = self.assertResponseSuccess(self.get_exams.get())
user_session = result[0]
self.assertEqual(user_session['checked_in'], True)
def test_get_exams_submitted(self):
now = timezone.now()
self.student_session.started_at = timezone.now()
self.student_session.finished_at = now
self.student_session.save()
result = self.assertResponseSuccess(self.get_exams.get())
user_session = result[0]
self.assertEqual(user_session['finished_at'], now.isoformat())
self.assertEqual(user_session['status'], ExamStatus.submitted)
self.assertEqual(user_session['score'], None)
def test_get_exams_non_available(self):
self.session.start_time = timezone.now() + self.session.duration
self.session.save()
result = self.assertResponseSuccess(self.get_exams.get())
user_session = result[0]
self.assertEqual(user_session['started_at'],
self.session.start_time.isoformat())
self.assertEqual(user_session['finished_at'], None)
self.assertEqual(user_session['status'], ExamStatus.not_available)
def test_get_exams_unauthorized(self):
self.get_exams.cookies = {}
self.assertResponseError(self.get_exams.get(), errors.Unauthorized)
response = self.get_exams.post()
self.assertEqual(response.status_code, 405)
def test_get_exams_score(self):
for ticket in self.tickets:
ticket.score = 1.0
ticket.save()
result = self.assertResponseSuccess(self.get_exams.get())
user_session = result[0]
self.assertEqual(user_session['score'],
sum(t.score for t in self.tickets))
self.tickets[0].score = None
self.tickets[0].save()
result = self.assertResponseSuccess(self.get_exams.get())
user_session = result[0]
self.assertEqual(user_session['score'], None)
class TestGetExamTickets(ApiTestCase):
get_exams: ApiClient
session: ExamSession
student_session: UserSession
questions: List[Question]
tickets: List[ExamTicket]
ticket_map: Dict[str, ExamTicket]
def setUp(self):
super().setUp()
self.get_exam_questions = \
ApiClient('/api/tickets', student=self.student)
self.setup_exam_objects()
def tearDown(self):
self.teardown_exam_objects()
super().tearDown()
def test_get_exam_questions(self):
self.assertFalse(self.student_session.check_in)
result = self.assertResponseSuccess(
self.get_exam_questions.post(session_id=self.student_session.id))
self.assertEqual(result['status'], ExamStatus.available)
self.assertEqual(result['score'], None)
self.student_session.refresh_from_db()
self.assertTrue(self.student_session.check_in)
questions = result['questions']
self.assertIsInstance(questions, list)
self.assertEqual(len(questions), len(self.tickets))
self.assertEqual([x['id'] for x in questions], [
x.id for x in sorted(self.tickets, key=lambda x: x.question.stage)
])
for question in questions:
ticket = self.ticket_map[question['id']]
ticket_question = ticket.question
self.assertEqual(question.pop('id'), ticket.id)
view = ticket_question.as_dict
view.pop('id')
self.assertEqual(question, view)
def test_get_exam_questions_already_checked_in(self):
self.student_session.check_in = True
checkin_date = self.student_session.started_at
result = self.assertResponseSuccess(
self.get_exam_questions.post(session_id=self.student_session.id))
self.assertEqual(result['status'], ExamStatus.available)
self.assertEqual(result['score'], None)
self.student_session.refresh_from_db()
self.assertTrue(self.student_session.check_in)
self.assertEqual(self.student_session.started_at, checkin_date)
questions = result['questions']
self.assertIsInstance(questions, list)
self.assertEqual(len(questions), len(self.tickets))
def test_get_exam_questions_not_available(self):
self.session.start_time += self.session.duration
self.session.save()
result = self.assertResponseSuccess(
self.get_exam_questions.post(session_id=self.student_session.id))
self.assertEqual(result['status'], ExamStatus.not_available)
self.assertEqual(result['score'], None)
questions = result['questions']
self.assertIsInstance(questions, list)
self.assertEqual(len(questions), 0)
def test_get_exam_questions_submitted(self):
self.student_session.finished_at = timezone.now()
self.student_session.save()
ANSWER = 'answer'
for ticket in self.tickets:
ticket.answer = ANSWER
ticket.save()
result = self.assertResponseSuccess(
self.get_exam_questions.post(session_id=self.student_session.id))
self.assertEqual(result['status'], ExamStatus.submitted)
self.assertEqual(result['score'], None)
questions = result['questions']
self.assertIsInstance(questions, list)
self.assertEqual(len(questions), len(self.tickets))
for question in questions:
ticket = self.ticket_map[question['id']]
ticket.refresh_from_db()
answer = question.pop('answer')
self.assertEqual(answer, ticket.answer)
self.assertEqual(question['score'], None)
def test_get_exam_questions_submitted_and_scored(self):
self.student_session.finished_at = timezone.now()
self.student_session.save()
ANSWER = 'answer'
for ticket in self.tickets:
ticket.answer = ANSWER
ticket.score = 1.0
ticket.save()
result = self.assertResponseSuccess(
self.get_exam_questions.post(session_id=self.student_session.id))
self.assertEqual(result['status'], ExamStatus.submitted)
self.assertEqual(result['score'], sum(t.score for t in self.tickets))
questions = result['questions']
self.assertIsInstance(questions, list)
self.assertEqual(len(questions), len(self.tickets))
for question in questions:
ticket = self.ticket_map[question['id']]
ticket.refresh_from_db()
self.assertEqual(question['score'], ticket.score)
def test_get_exam_questions_invalid_params(self):
self.assertResponseError(self.get_exam_questions.post(),
errors.InvalidParameter('session_id'))
self.assertResponseError(
self.get_exam_questions.post(session_id=uuid_str()),
errors.ExamNotFound)
self.get_exam_questions.cookies = {}
self.assertResponseError(
self.get_exam_questions.post(session_id=self.student_session.id),
errors.Unauthorized)
response = self.get_exam_questions.get()
self.assertEqual(response.status_code, 405)
class TestSubmitExam(ApiTestCase):
def setUp(self):
super().setUp()
self.submit_exam = ApiClient('/api/submit', student=self.student)
self.setup_exam_objects()
def tearDown(self):
self.teardown_exam_objects()
super().tearDown()
def test_submit_exam(self):
answers = {}
ANSWER = 'answer'
for ticket in self.tickets:
if ticket.question.type == QuestionType.single:
answers[ticket.id] = \
random.randint(0, len(ticket.question.options)-1)
elif ticket.question.type == QuestionType.multi:
answers[ticket.id] = random.sample(
list(range(0, len(ticket.question.options))),
k=random.randint(0, len(ticket.question.options))
)
else:
answers[ticket.id] = ANSWER
result = self.assertResponseSuccess(self.submit_exam.post(
session_id=self.student_session.id, answers=answers))
self.assertEqual(result, True)
self.student_session.refresh_from_db()
self.assertEqual(self.student_session.status, ExamStatus.submitted)
for ticket in self.tickets:
ticket.refresh_from_db()
if ticket.question.type == QuestionType.single:
self.assertEqual(
ticket.answer, ticket.question.options[answers[ticket.id]])
elif ticket.question.type == QuestionType.multi:
self.assertEqual(ticket.answer, ';'.join([
ticket.question.options[x]
for x in sorted(answers[ticket.id])
]))
self.assertIsNotNone(ticket.answered_at)
def test_submit_without_any_answer(self):
result = self.assertResponseSuccess(self.submit_exam.post(
session_id=self.student_session.id, answers={}))
self.assertEqual(result, True)
self.student_session.refresh_from_db()
self.assertEqual(self.student_session.status, ExamStatus.submitted)
for ticket in self.tickets:
ticket.refresh_from_db()
self.assertIsNone(ticket.answered_at)
self.assertIsNone(ticket.answer)
def test_submit_partial_answer_errors(self):
ANSWER = 'answer'
answers = {
# неверный порядковый индекс ответа
self.tickets[0].id: len(self.tickets[0].question.options),
# неверный тип ответа
self.tickets[1].id: 0,
# корректный ответ
self.tickets[2].id: ANSWER,
# неверный ид билета
uuid_str(): ANSWER,
# несуществующий тикет
self.tickets[2].id + 1: ANSWER,
}
result = self.assertResponseSuccess(self.submit_exam.post(
session_id=self.student_session.id, answers=answers))
self.assertEqual(result, True)
self.student_session.refresh_from_db()
self.assertEqual(self.student_session.status, ExamStatus.submitted)
for ticket in self.tickets:
ticket.refresh_from_db()
self.assertIsNone(self.tickets[0].answer)
self.assertIsNone(self.tickets[0].answered_at)
self.assertIsNone(self.tickets[1].answer)
self.assertIsNone(self.tickets[1].answered_at)
self.assertEqual(self.tickets[2].answer, ANSWER)
self.assertIsNotNone(self.tickets[2].answered_at)
def test_submit_errors(self):
self.assertResponseError(self.submit_exam.post(),
errors.InvalidParameter('session_id'))
self.assertResponseError(self.submit_exam.post(session_id=123),
errors.InvalidParameter('session_id'))
self.assertResponseError(self.submit_exam.post(session_id=uuid_str()),
errors.InvalidParameter('answers'))
self.assertResponseError(
self.submit_exam.post(session_id=uuid_str(), answers=[]),
errors.InvalidParameter('answers'))
self.assertResponseError(
self.submit_exam.post(session_id=uuid_str(), answers={}),
errors.ExamNotFound)
self.session.start_time += self.session.duration
self.session.save()
self.assertResponseError(self.submit_exam.post(
session_id=self.student_session.id, answers={}),
errors.ExamNotAvailable)
self.student_session.start_time = timezone.now()
self.student_session.save()
self.assertResponseError(self.submit_exam.post(
session_id=self.student_session.id, answers={}),
errors.ExamNotAvailable)
|
9,790 | 1fda8274024bdf74e7fbd4ac4a27d6cfe6032a13 | from distutils.core import setup
setup(name='greeker',
version='0.3.2-git',
description="scrambles nouns in an XML document to produce a specimen for layout testing",
author="Brian Tingle",
author_email="brian.tingle.cdlib.org@gmail.com",
url="http://tingletech.github.com/greeker.py/",
install_requires=["inflect>=0.2.1", "lxml>=2.3.2", "nltk>=2.0.1rc2-git", "numpy", "argparse"],
py_modules=['greeker'],
scripts=['greeker.py'],
)
|
9,791 | 75217256d88c32ed1c502bc104c30092bf74382d | # Find sum/count of Prime digits in a number |
9,792 | acd0b9019ef413699b47ecb2b66a0980cf3aa81f | from cudasim.ParsedModel import ParsedModel
import re
import copy
class Writer:
def __init__(self):
pass
# replace the species and parameters recursively
@staticmethod
def rep(string, find, replace):
ex = find + "[^0-9]"
while re.search(ex, string) is not None:
res = re.search(ex, string)
string = string[0:res.start()] + replace + " " + string[res.end() - 1:]
ex = find + "$"
if re.search(ex, string) is not None:
res = re.search(ex, string)
string = string[0:res.start()] + replace + " " + string[res.end():]
return string
def categorise_variables(self):
# form a list of the species, and parameters which are set by rate rules
model = self.parser.parsedModel
rule_params = []
rule_values = []
constant_params = []
constant_values = []
for i in range(len(model.listOfParameter)):
is_constant = True
if not model.listOfParameter[i].getConstant():
for k in range(len(model.listOfRules)):
if model.listOfRules[k].isRate() and model.ruleVariable[k] == model.parameterId[i]:
rule_params.append(model.parameterId[i])
rule_values.append(str(model.parameter[i]))
is_constant = False
if is_constant:
constant_params.append(model.parameterId[i])
constant_values.append(str(model.parameter[i]))
species_list = copy.copy(model.speciesId)
species_list.extend(rule_params)
species_values = map(lambda x: str(x), model.initValues)
species_values.extend(rule_values)
return species_list, constant_params, species_values, constant_values
|
9,793 | c9b62328a463fd38f3dbd1e7b5e1990f7eec1dba | from django.shortcuts import render
from django.http import HttpResponse
def view1(request):
return HttpResponse(" Hey..,This is the first view using HttpResponce!")
def view2(request):
context={"tag_var":"tag_var"}
return render(request,"new.html",context)
# Create your views here.
|
9,794 | 6cd250b3bffd87657ec7cc28eaffe817c6d9f73f | # Generated by Django 2.0.3 on 2018-04-30 16:25
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('threads', '0007_auto_20180430_1617'),
]
operations = [
migrations.AlterField(
model_name='thread',
name='last_activity',
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
|
9,795 | 00099cab0c816c76fc0fa94d7905175feb6919cf | import django.dispatch
property_viewed = django.dispatch.Signal(providing_args=["property","user", "request", "response"]) |
9,796 | 8de36400f21bfb4e24703d5a65471a961e1afddc | #coding=utf-8
from selenium import webdriver
wd=webdriver.Firefox()
wd.get('https://www.baidu.com/')
wd.find_element_by_id('kw').send_keys(u'哈哈')
wd.quit()
|
9,797 | a52f009a755b45f8ed653a4a0385b1eb667f2318 | __author__ = 'changwoncheo'
# -*- coding: utf-8 -*-
import threading
import logging
logging.basicConfig(filename='crawl2.log',level=logging.DEBUG)
class NoParsingFilter(logging.Filter):
def filter(self, record):
msg = record.getMessage()
return not ('Starting' in msg or 'GET' in msg)
logger = logging.getLogger('Crawler')
requests_log = logging.getLogger("requests")
requests_log.setLevel(logging.CRITICAL) #로깅 되지 않도록
import os
import re
reobj_album = re.compile('\'(.*)\'')#앨범 정규식 (javascript('숫자'))
reobj_djIndex = re.compile(',\'(.*)\'')#앨범 정규식 (javascript('숫자','숫자'))
reobj_filename = re.compile('/(\w*[.]\w*)$')#파일이름 정규식
category = {102:'발라드',103:'댄스',104:'랩_합합',105:'R&B_Soul',106:'록',107:'일렉트로니카',108:'트로트',109:'포크',110:'인디음악'}
def tapNewlineStrip(str):
return str.encode('utf-8').replace('\n','').replace('\t','').decode('utf-8')
def writeJson(fileName,dict):
import json
print dict
with open(fileName, 'w') as outfile:
json.dump(dict, outfile, ensure_ascii = False, encoding = 'utf-8')
|
9,798 | 93e534e8d425510b59310dcbfc5bca9cc32f245e | import sys
import random
#import matplotlib.pyplot as plt
import numpy as np
import time
class Waterfilling:
"""
initializes x and r with optimal flow allocations
and link fair share rates for traffic matrix routes and link
capacities c, and level with number of levels
after running the waterfilling algorithm. note
that if sum of flow allocations at a link is less than capacity
then fair share of link is float('inf').
not that routes and c must be initialized before calling this.
"""
def __init__(self, routes, c, log, prec_library):
#log = True
#print "Waterfilling"
#print mpmath.mp
(self.num_flows, self.num_links) = routes.shape
self.levels = np.ones((self.num_links, 1)) * float('inf')
self.prec_library = prec_library
eps = prec_library.eps1
weights = np.ones((self.num_flows,1))
#print("weights", weights.shape, weights)
#print("routes", routes.shape, routes)
#self.r = np.ones((self.num_links,1)) * mpf_inf
#self.x = np.ones((self.num_flows,1)) * mpf_inf
x = np.zeros((self.num_flows,1))
active_flows = np.ones((self.num_flows, 1), dtype=bool)
rem_cap = c #np.ones((self.num_links, 1)) * prec_library.mpf_one
# for i in range(self.num_links):
# rem_cap[i] = prec_library.mpf(c[i,0])
self.max_level = 0
num_active_flows = np.count_nonzero(active_flows, axis=0)
#print(num_active_flows,"flows left")
while num_active_flows > 0:
# number of rem flows on all links
link_weights = np.dot(routes.T, weights)
assert(rem_cap.shape == link_weights.shape)
try:
fair_shares = np.where(link_weights>0, rem_cap/link_weights, float('inf'))
except:
pass
#print("link_weights", link_weights)
#print("rem_cap", rem_cap)
#print("fair_shares", fair_shares)
fair_shares.reshape(self.num_links, 1)
bl = np.argmin(fair_shares)
#print ("bl",type(bl),bl)
inc = float(fair_shares[bl, 0])
assert(inc < float('inf'))
# increase level, only when link with smallest fair share rate
# has a rate larger than last one, handles the following example
# two links, each cap 10.0, each has one flow, and none in common
# each link identified in different iterations of this loop
if self.max_level == 0 or inc > eps: self.max_level += 1
x = np.where(active_flows, x + inc * weights, x)
if log:
print "In round",self.max_level,\
" link", bl, "has smallest fair share", inc, "b/s",\
"Next rate increase is", inc, " (type ", type(inc), ") cuz of bl ",\
bl, " with rem_cap ", rem_cap[bl,0], " b/s",\
"and ", link_weights[bl,0] , " of the total ",\
num_active_flows, " remaining flows"
rem_cap = rem_cap - inc * link_weights
neg_cap = list(np.where(rem_cap < -1e7)[0]) # for each (aka only) column
if (len(neg_cap) > 0):
print >> sys.stderr, "warning! in watefilling hp links with neg. rem_cap ", neg_cap
bf = np.where(routes[:,bl] > 0)[0]
active_flows[bf] = 0
num_active_flows = np.count_nonzero(active_flows, axis=0)
#print(num_active_flows,"flows left")
weights[bf] = 0
self.levels[bl] = self.max_level
# get max. rate at each link
r = np.ones((self.num_links,1)) * float('inf')
for e in range(self.num_links):
flows = np.nonzero(routes[:, e])[0]
if len(flows) > 0:
sum_demands = sum(x[flows])[0]
cap = c[e,0]
diff = abs(sum_demands - cap)
if (sum_demands > cap or diff < eps):
r[e] = max(x[flows])
print "link",e,"has rate", r[e]
self.level = self.max_level
self.x = x
self.r = r
self.bottleneck_links_arr = np.where(self.r < float('inf'))[0]
self.bottleneck_links = {}
self.non_bottleneck_links = {}
self.sat_flows = {}
self.unsat_flows = {}
# class Eps:
# def __init__(self):
# self.eps1 = 1e-7
# pass
# def main():
# for num_flows in [10, 100, 1000, 10000]:
# start = time.time()
# routes = np.ones((num_flows, 2))
# routes[:, 1] = 0
# routes[0:2, 1] = 1
# routes[0, 0] = 0
# c = np.ones((2,1))
# wf = Waterfilling(routes, c, True, Eps())
# stop = time.time()
# elapsed = stop - start
# print("num_flows", num_flows, "elapsed", elapsed,"s")
# #print wf.x
# #print wf.r
# #print wf.level
# pass
# main()
|
9,799 | c4c24c36fe0afba61f8046055690f0c36df7098c | # Developed by : Jays Patel (cyberthreatinfo.ca)
# This script is use to find the python Composer packages vulnerabilities from linux machine and python source project.
import time
import glob2
import random
import os.path
from os import path
import ast
import sys
import commands
import re
import requests
from pkg_resources import parse_version
import json
import argparse
from tqdm import tqdm
from datetime import datetime
class getComposerVulnerabilities():
def __init__(self, reportPath, project, targetFolder, owner):
self.reportPath = reportPath
self.sourcefolder = targetFolder
self.project = project
if not path.exists("server.config"):
print "[ INFO ] server configuration json file not found in current directory"
sys.exit(1)
with open('server.config') as f:
configData = json.load(f)
self.tokenId = configData['tokenId']
self.server = configData['server']
self.port = configData['port']
self.protocol = configData['protocol']
try:
url = "%s://%s:%s/api/checkToken/%s" % (self.protocol, self.server, self.port, self.tokenId)
response = requests.request("GET", url)
tokenData = response.text
tokenData = json.loads(tokenData)
if tokenData['result']:
print "[ OK ] Token valid, start scanning...."
else:
print "[ INFO ] Token invalid or expire, please login on portal and verify the TokenId"
sys.exit(1)
except:
print "[ OK ] Server connection error, Please check internet connectivity"
sys.exit(1)
self.results = {}
self.results['header'] = {}
now = datetime.now()
self.report_name = now.strftime("%d-%m-%Y_%H:%M:%S")
self.report_path = reportPath
self.results['header']['Date'] = self.report_name
self.results['header']['Project'] = self.project
self.results['header']['Owner'] = owner
self.results['header']['Target'] = "source"
self.results['header']['docker'] = "False"
self.vuln_depe = []
self.vuln_found = []
self.testedWith = []
self.dependanciesCount = []
def gtEq(self, vers1, mVers):
if parse_version(mVers) >= parse_version(vers1):
return True
else:
return False
def gt(self, vers1, mVers):
if parse_version(mVers) > parse_version(vers1):
return True
else:
return False
def ltEq(self, vers1, mVers):
if parse_version(mVers) <= parse_version(vers1):
return True
else:
return False
def lt(self, vers1, mVers):
if parse_version(mVers) < parse_version(vers1):
return True
else:
return False
def eq(self, vers1, mVers):
if parse_version(mVers) == parse_version(vers1):
return True
else:
return False
def getLatestVersion(self, product, vendor, mVers):
response = requests.get('https://repo.packagist.org/p/%s/%s.json' % (vendor, product))
data = response.text
data = json.loads(data)
kData = []
for k,v in data['packages']['%s/%s' % (vendor, product)].items():
if re.findall(r'^v%s' % mVers, str(k)):
value = re.findall(r'%s' % mVers, str(k))[0]
kData.append(k)
max = "0.0"
for v in kData:
if parse_version(v) > parse_version(max):
max = v
return max
def getMatchVersionLists(self, product, vendor, version):
response = requests.get('https://semver.mwl.be/packages/%s/%s/match?constraint=%s&minimum-stability=stable' % (vendor, product, version))
data = response.text
data = json.loads(data)
return data
def maxValue(self, mVersions):
ver1 = '0.0'
for ver in mVersions:
if parse_version(ver) > parse_version(ver1):
ver1 = ver
return ver1
def matchVer(self, mVersions, product, vendor, cve_id, versions, reference, vuln_name, vectorString, baseScore, recommendation, pub_date, severity, dependancy, patch, cwe_text):
mVersions = self.getMatchVersionLists(product, vendor, mVersions)
mVer = self.maxValue(mVersions)
if severity.lower() == "medium" or severity.lower() == "moderate":
severity = "Medium"
elif severity.lower() == "high":
severity = "High"
elif severity.lower() == "low":
severity = "Low"
elif severity.lower() == "critical":
severity = "Critical"
if not patch:
patch = versions
for vers in versions.split(","):
if re.findall(r'\[.*:.*\]', str(vers)):
vers1 = re.findall(r'\[(.*):', str(vers))[0]
vers2 = re.findall(r':(.*)\]', str(vers))[0]
if self.gtEq(vers1, mVer) and self.ltEq(vers2, mVer):
res = {}
if severity not in self.results['Issues']:
self.results['Issues'][severity] = {}
self.results['Issues'][severity]['data'] = []
self.results['Issues'][severity]['header'] = []
res1 = {}
res1['CVEID'] = str(cve_id)
res1['Product'] = str(product)
res1['CWE'] = str(cwe_text)
res1['Severity'] = str(severity)
res['Product'] = str(product)
res['Vendor'] = str(vendor)
res['Severity'] = str(severity)
res['CVEID'] = str(cve_id)
res['Vector String'] = str(vectorString)
res['Vulnerability Name'] = str(vuln_name)
res['Patched Version'] = str(patch)
res['Recommendation'] = str(recommendation)
res['Reference'] = str(reference)
res['Publish Date'] = str(pub_date)
res['Introduced Through'] = str(dependancy)
res['Installed Version'] = str(mVer)
res['CWE'] = str(cwe_text)
if res not in self.results['Issues'][severity]['data']:
self.results['Issues'][severity]['data'].append(res)
self.results['Issues'][severity]['header'].append(res1)
if severity.lower() == "medium" or severity.lower() == "moderate":
self.med.append("Medium")
if severity.lower() == "high":
self.hig.append("High")
if severity.lower() == "low":
self.low.append("Low")
if severity.lower() == "critical":
self.cri.append("Critical")
self.vuln_found.append(product)
if product not in self.vuln_depe:
self.vuln_depe.append(product)
elif re.findall(r'\(.*:.*\]', str(vers)):
vers1 = re.findall(r'\((.*):', str(vers))[0]
vers2 = re.findall(r':(.*)\]', str(vers))[0]
if self.gt(vers1, mVer) and self.ltEq(vers2, mVer):
res = {}
if severity not in self.results['Issues']:
self.results['Issues'][severity] = {}
self.results['Issues'][severity]['data'] = []
self.results['Issues'][severity]['header'] = []
res1 = {}
res1['CVEID'] = str(cve_id)
res1['Product'] = str(product)
res1['CWE'] = str(cwe_text)
res1['Severity'] = str(severity)
res['Product'] = str(product)
res['Vendor'] = str(vendor)
res['Severity'] = str(severity)
res['CVEID'] = str(cve_id)
res['Vector String'] = str(vectorString)
res['Vulnerability Name'] = str(vuln_name)
res['Patched Version'] = str(patch)
res['Recommendation'] = str(recommendation)
res['Reference'] = str(reference)
res['Publish Date'] = str(pub_date)
res['Introduced Through'] = str(dependancy)
res['Installed Version'] = str(mVer)
res['CWE'] = str(cwe_text)
if res not in self.results['Issues'][severity]['data']:
self.results['Issues'][severity].append(res)
if severity.lower() == "medium" or severity.lower() == "moderate":
self.med.append("Medium")
if severity.lower() == "high":
self.hig.append("High")
if severity.lower() == "low":
self.low.append("Low")
if severity.lower() == "critical":
self.cri.append("Critical")
self.vuln_found.append(product)
if product not in self.vuln_depe:
self.vuln_depe.append(product)
elif re.findall(r'\[.*:.*\)', str(vers)):
vers1 = re.findall(r'\[(.*):', str(vers))[0]
vers2 = re.findall(r':(.*)\)', str(vers))[0]
if self.gtEq(vers1, mVer) and self.lt(vers2, mVer):
res = {}
if severity not in self.results['Issues']:
self.results['Issues'][severity] = {}
self.results['Issues'][severity]['data'] = []
self.results['Issues'][severity]['header'] = []
res1 = {}
res1['CVEID'] = str(cve_id)
res1['Product'] = str(product)
res1['CWE'] = str(cwe_text)
res1['Severity'] = str(severity)
res['Product'] = str(product)
res['Vendor'] = str(vendor)
res['Severity'] = str(severity)
res['CVEID'] = str(cve_id)
res['Vector String'] = str(vectorString)
res['Vulnerability Name'] = str(vuln_name)
res['Patched Version'] = str(patch)
res['Recommendation'] = str(recommendation)
res['Reference'] = str(reference)
res['Publish Date'] = str(pub_date)
res['Introduced Through'] = str(dependancy)
res['Installed Version'] = str(mVer)
res['CWE'] = str(cwe_text)
if res not in self.results['Issues'][severity]['data']:
self.results['Issues'][severity]['data'].append(res)
self.results['Issues'][severity]['header'].append(res1)
if severity.lower() == "medium" or severity.lower() == "moderate":
self.med.append("Medium")
if severity.lower() == "high":
self.hig.append("High")
if severity.lower() == "low":
self.low.append("Low")
if severity.lower() == "critical":
self.cri.append("Critical")
self.vuln_found.append(product)
if product not in self.vuln_depe:
self.vuln_depe.append(product)
elif re.findall(r'\(.*:.*\)', str(vers)):
vers1 = re.findall(r'\((.*):', str(vers))[0]
vers2 = re.findall(r':(.*)\)', str(vers))[0]
if self.gt(vers1, mVer) and self.lt(vers2, mVer):
res = {}
if severity not in self.results['Issues']:
self.results['Issues'][severity] = {}
self.results['Issues'][severity]['data'] = []
self.results['Issues'][severity]['header'] = []
res1 = {}
res1['CVEID'] = str(cve_id)
res1['Product'] = str(product)
res1['CWE'] = str(cwe_text)
res1['Severity'] = str(severity)
res['Product'] = str(product)
res['Vendor'] = str(vendor)
res['Severity'] = str(severity)
res['CVEID'] = str(cve_id)
res['Vector String'] = str(vectorString)
res['Vulnerability Name'] = str(vuln_name)
res['Patched Version'] = str(patch)
res['Recommendation'] = str(recommendation)
res['Reference'] = str(reference)
res['Publish Date'] = str(pub_date)
res['Introduced Through'] = str(dependancy)
res['Installed Version'] = str(mVer)
res['CWE'] = str(cwe_text)
if res not in self.results['Issues'][severity]['data']:
self.results['Issues'][severity]['data'].append(res)
self.results['Issues'][severity]['header'].append(res1)
if severity.lower() == "medium" or severity.lower() == "moderate":
self.med.append("Medium")
if severity.lower() == "high":
self.hig.append("High")
if severity.lower() == "low":
self.low.append("Low")
if severity.lower() == "critical":
self.cri.append("Critical")
self.vuln_found.append(product)
if product not in self.vuln_depe:
self.vuln_depe.append(product)
elif re.findall(r'\(.*:.*\)', str(vers)):
vers1 = re.findall(r'\((.*):', str(vers))[0]
vers2 = re.findall(r':(.*)\)', str(vers))[0]
if self.gt(vers1, mVer) and self.lt(vers2, mVer):
res = {}
if severity not in self.results['Issues']:
self.results['Issues'][severity] = {}
self.results['Issues'][severity]['data'] = []
self.results['Issues'][severity]['header'] = []
res1 = {}
res1['CVEID'] = str(cve_id)
res1['Product'] = str(product)
res1['CWE'] = str(cwe_text)
res1['Severity'] = str(severity)
res['Product'] = str(product)
res['Vendor'] = str(vendor)
res['Severity'] = str(severity)
res['CVEID'] = str(cve_id)
res['Vector String'] = str(vectorString)
res['Vulnerability Name'] = str(vuln_name)
res['Patched Version'] = str(patch)
res['Recommendation'] = str(recommendation)
res['Reference'] = str(reference)
res['Publish Date'] = str(pub_date)
res['Introduced Through'] = str(dependancy)
res['Installed Version'] = str(mVer)
res['CWE'] = str(cwe_text)
if res not in self.results['Issues'][severity]['data']:
self.results['Issues'][severity]['data'].append(res)
self.results['Issues'][severity]['header'].append(res1)
if severity.lower() == "medium" or severity.lower() == "moderate":
self.med.append("Medium")
if severity.lower() == "high":
self.hig.append("High")
if severity.lower() == "low":
self.low.append("Low")
if severity.lower() == "critical":
self.cri.append("Critical")
self.vuln_found.append(product)
if product not in self.vuln_depe:
self.vuln_depe.append(product)
else:
vers1 = str(vers)
if self.eq(vers1, mVer):
res = {}
if severity not in self.results['Issues']:
self.results['Issues'][severity] = {}
self.results['Issues'][severity]['data'] = []
self.results['Issues'][severity]['header'] = []
res1 = {}
res1['CVEID'] = str(cve_id)
res1['Product'] = str(product)
res1['CWE'] = str(cwe_text)
res1['Severity'] = str(severity)
res['Product'] = str(product)
res['Vendor'] = str(vendor)
res['Severity'] = str(severity)
res['CVEID'] = str(cve_id)
res['Vector String'] = str(vectorString)
res['Vulnerability Name'] = str(vuln_name)
res['Patched Version'] = str(patch)
res['Recommendation'] = str(recommendation)
res['Reference'] = str(reference)
res['Publish Date'] = str(pub_date)
res['Introduced Through'] = str(dependancy)
res['Installed Version'] = str(mVer)
res['CWE'] = str(cwe_text)
if res not in self.results['Issues'][severity]['data']:
self.results['Issues'][severity]['data'].append(res)
self.results['Issues'][severity]['header'].append(res1)
if severity.lower() == "medium" or severity.lower() == "moderate":
self.med.append("Medium")
if severity.lower() == "high":
self.hig.append("High")
if severity.lower() == "low":
self.low.append("Low")
if severity.lower() == "critical":
self.cri.append("Critical")
self.vuln_found.append(product)
if product not in self.vuln_depe:
self.vuln_depe.append(product)
def getVulnData(self, product, vendor, mVersions, depend):
for row in self.responseData["results"]["%s/%s" % (vendor, product)]:
cve_id = row['cve_id']
versions = row['versions']
reference = row['reference']
vuln_name = row['vuln_name']
vectorString = row['vectorString']
baseScore = row['baseScore']
recommendation = row['recommendation']
pub_date = row['pub_date']
patch = row['patch']
severity = row['severity']
cwe_text = row['cwe_text']
self.matchVer(mVersions, product, vendor, cve_id, versions, reference, vuln_name, vectorString, baseScore, recommendation, pub_date, severity, depend, patch, cwe_text)
def getInstallPkgList(self):
self.installPackageLists = []
self.resultsPkg = {}
for file in glob2.glob('%s/**/composer.*' % (self.sourcefolder), recursive=True):
file = os.path.abspath(file)
filename = os.path.basename(file)
if 'files' not in self.resultsPkg:
self.resultsPkg['files'] = {}
if filename == "composer.lock":
if os.stat(file).st_size != 0:
with open(file) as f:
data = json.load(f)
if filename not in self.resultsPkg['files']:
self.resultsPkg['files'][filename] = {}
self.resultsPkg['files'][filename][file] = {}
if 'packages' in data:
for pkg in data['packages']:
package_name = pkg['name']
if "/" in package_name:
if package_name not in self.installPackageLists:
self.installPackageLists.append(package_name)
vendor = package_name.split("/")[0]
product = package_name.split("/")[1]
versions = pkg['version']
if package_name not in self.resultsPkg['files'][filename][file]:
self.resultsPkg['files'][filename][file][str(package_name)] = {}
self.resultsPkg['files'][filename][file][str(package_name)]["product"] = str(product)
self.resultsPkg['files'][filename][file][str(package_name)]["vendor"] = str(vendor)
self.resultsPkg['files'][filename][file][str(package_name)]["version"] = []
self.resultsPkg['files'][filename][file][str(package_name)]["depend"] = []
if versions not in self.resultsPkg['files'][filename][file][package_name]["version"]:
self.resultsPkg['files'][filename][file][package_name]["version"].append(str(versions))
if 'require' in pkg:
for d in pkg['require']:
if "/" in d:
if d not in self.installPackageLists:
self.installPackageLists.append(d)
vendor1 = d.split("/")[0]
product1 = d.split("/")[1]
versions1 = pkg['require'][d]
if d not in self.resultsPkg['files'][filename][file]:
self.resultsPkg['files'][filename][file][str(d)] = {}
self.resultsPkg['files'][filename][file][str(d)]["product"] = str(product1)
self.resultsPkg['files'][filename][file][str(d)]["vendor"] = str(vendor1)
self.resultsPkg['files'][filename][file][str(d)]["version"] = []
self.resultsPkg['files'][filename][file][str(d)]["depend"] = []
if versions1 not in self.resultsPkg['files'][filename][file][d]["version"]:
self.resultsPkg['files'][filename][file][str(d)]["version"].append(str(versions1))
if "%s@%s" % (str(package_name), str(versions)) not in self.resultsPkg['files'][filename][file][d]["depend"]:
self.resultsPkg['files'][filename][file][str(d)]["depend"].append("%s@%s" % (str(package_name), str(versions)))
if 'require-dev' in pkg:
for d in pkg['require-dev']:
if "/" in d:
if d not in self.installPackageLists:
self.installPackageLists.append(d)
vendor2 = d.split("/")[0]
product2 = d.split("/")[1]
versions2 = pkg['require-dev'][d]
if d not in self.resultsPkg['files'][filename][file]:
self.resultsPkg['files'][filename][file][str(d)] = {}
self.resultsPkg['files'][filename][file][str(d)]["product"] = str(product2)
self.resultsPkg['files'][filename][file][str(d)]["vendor"] = str(vendor2)
self.resultsPkg['files'][filename][file][str(d)]["version"] = []
self.resultsPkg['files'][filename][file][str(d)]["depend"] = []
if versions2 not in self.resultsPkg['files'][filename][file][d]["version"]:
self.resultsPkg['files'][filename][file][str(d)]["version"].append(str(versions2))
if "%s@%s" % (str(package_name), str(versions)) not in self.resultsPkg['files'][filename][file][d]["depend"]:
self.resultsPkg['files'][filename][file][str(d)]["depend"].append("%s@%s" % (str(package_name), str(versions)))
if filename == "composer.json":
if os.stat(file).st_size != 0:
with open(file) as f:
data = json.load(f)
if filename not in self.resultsPkg['files']:
self.resultsPkg['files'][filename] = {}
self.resultsPkg['files'][filename][file] = {}
if 'require' in data:
for d in data['require']:
if "/" in d:
if d not in self.installPackageLists:
self.installPackageLists.append(d)
vendor3 = d.split("/")[0]
product3 = d.split("/")[1]
versions3 = data['require'][d]
if d not in self.resultsPkg['files'][filename][file]:
self.resultsPkg['files'][filename][file][str(d)] = {}
self.resultsPkg['files'][filename][file][str(d)]["product"] = str(product3)
self.resultsPkg['files'][filename][file][str(d)]["vendor"] = str(vendor3)
self.resultsPkg['files'][filename][file][str(d)]["version"] = []
self.resultsPkg['files'][filename][file][str(d)]["depend"] = []
if str(versions3) not in self.resultsPkg['files'][filename][file][d]["version"]:
self.resultsPkg['files'][filename][file][str(d)]["version"].append(str(versions3))
if 'require-dev' in data:
for d in data['require-dev']:
if "/" in d:
if d not in self.installPackageLists:
self.installPackageLists.append(d)
vendor4 = d.split("/")[0]
product4 = d.split("/")[1]
versions4 = data['require-dev'][d]
if d not in self.resultsPkg['files'][filename][file]:
self.resultsPkg['files'][filename][file][str(d)] = {}
self.resultsPkg['files'][filename][file][str(d)]["product"] = str(product4)
self.resultsPkg['files'][filename][file][str(d)]["vendor"] = str(vendor4)
self.resultsPkg['files'][filename][file][str(d)]["version"] = []
self.resultsPkg['files'][filename][file][str(d)]["depend"] = []
if str(versions4) not in self.resultsPkg['files'][filename][file][d]["version"]:
self.resultsPkg['files'][filename][file][str(d)]["version"].append(str(versions4))
return self.resultsPkg
def getUnique(self, lists):
unique_list = []
for x in lists:
if x not in unique_list:
unique_list.append(x)
return unique_list
def scanComposerPackage(self):
print "[ OK ] Preparing..., It's take time to completed."
output = self.getInstallPkgList()
print "[ OK ] Database sync started"
self.syncData(self.installPackageLists)
print "[ OK ] Database sync comleted"
self.med = []
self.hig = []
self.low = []
self.cri = []
print "[ OK ] Scanning started"
self.results['Issues'] = {}
self.results['files'] = {}
for filename in output['files']:
print "[ OK ] Started %s file processing" % filename
if filename not in self.testedWith:
self.testedWith.append(filename)
if filename not in self.results['files']:
self.results['files'][filename] = {}
self.results['files'][filename]['packages'] = []
print "There are total %s %s files are processing" % (filename, len(output['files'][filename]))
for file in output['files'][filename]:
print "File %s Scanning Started" % file
for d in tqdm(output['files'][filename][file]):
vendor = output['files'][filename][file][d]['vendor']
product = output['files'][filename][file][d]['product']
version = output['files'][filename][file][d]['version']
depend = output['files'][filename][file][d]['depend']
if product not in self.dependanciesCount:
self.dependanciesCount.append(product)
self.getVulnData(product, vendor, version[0], ','.join(depend))
res = {}
res['product'] = product
res['version'] = version
res['file'] = file
res['Dependencies'] = ','.join(depend)
self.results['files'][filename]['packages'].append(res)
print "[ OK ] Scanning Completed"
self.results['header']['Tested With'] = ','.join(self.testedWith)
self.results['header']['Severity'] = {}
self.results['header']['Total Scanned Dependancies'] = len(self.dependanciesCount)
self.results['header']['Total Unique Vulnerabilities'] = len(self.vuln_found)
self.results['header']['Total Vulnerable Dependencies'] = len(self.getUnique(self.vuln_depe))
self.results['header']['Severity']['Low'] = len(self.low)
self.results['header']['Severity']['High'] = len(self.hig)
self.results['header']['Severity']['Medium'] = len(self.med)
self.results['header']['Severity']['Critical'] = len(self.cri)
with open("%s/%s.json" % (self.report_path, self.report_name), "w") as f:
json.dump(self.results, f)
print "[ OK ] Vulnerabilities Report ready - %s/%s" % (self.report_path, self.report_name)
url = "%s://%s:%s/api/report-upload/language/%s" % (self.protocol, self.server, self.port, self.tokenId)
fin = open('%s/%s.json' % (self.report_path, self.report_name), 'rb')
files = {'file': fin}
response = requests.post(url, files = files)
if response.status_code == 201:
print "[ OK ] Report Uploaded on server"
else:
print "[ ERROR ] Report Upload Error"
def syncData(self, productLists):
try:
url = "%s://%s:%s/api/scanDetailsVendor/composer" % (self.protocol, self.server, self.port)
headers = {
'Authorization': 'Basic QWRtaW5pc3RyYXRvcjpWZXJzYUAxMjM=',
'Content-Type': 'application/json'
}
payload = "{\"data\": \""+ ','.join(productLists) + "\"}"
response = requests.request("POST", url, headers=headers, data = payload)
responseData = response.json()
self.responseData = responseData
except:
print "[ OK ] Database sync error! Check internet connectivity"
sys.exit(1)
def query_yes_no(self, question, default="yes"):
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = raw_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-r', '--reportPath', type=str, help='Enter Report Path', required=True)
parser.add_argument('-n', '--projectname', type=str, help='Enter Project Name', required=True)
parser.add_argument('-t', '--target', type=str, help='Enter target source folder', required=True)
parser.add_argument('-o', '--owner', type=str, help='Enter project owner')
parser.add_argument('-v', '--version', action='version',
version='%(prog)s 1.0')
results = parser.parse_args()
if not results.owner:
owner = "Unknow"
else:
owner = results.owner
data = """
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the m
Do you want to accept ?
"""
res = getComposerVulnerabilities(results.reportPath, results.projectname, results.target, owner)
if res.query_yes_no(data):
res.scanComposerPackage()
else:
sys.exit(1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.