blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
285bb70b43f6c87ac58cc9c0d7d50b7983f5ac64 | 8b57df3640fd9a726a8729c051dc27dbaee16059 | /notify/apps.py | 985c0feae7a585b8cbecad503a2120d658c0dc2f | [] | no_license | sinjorjob/django-notification-function | 86678df54e12b58a2feb315f10fde585412a2029 | 15ba6111474641498dbcd83ea0bd06d40b348387 | refs/heads/master | 2023-05-31T13:41:26.147761 | 2021-06-26T07:05:31 | 2021-06-26T07:05:31 | 380,437,265 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | from django.apps import AppConfig
class NotifyConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'notify'
| [
"sinforjob@gmail.com"
] | sinforjob@gmail.com |
40d97dddb5dc49b4139e9618dc57139bb7d37fc0 | 78b5dd3afba62ec81f5dc5953fc8a04123d92e44 | /dls_edm/edmTable.py | b0cd98c65baf5340fc86c812fdb732db1dee0de7 | [] | no_license | dls-controls/dls_edm | 82d37640174cd0246602f19d5cba72d0b61658d5 | 8a042f7a695b1c1992aec4f47e5ba8cfd73480e2 | refs/heads/master | 2020-04-04T05:16:16.471970 | 2018-05-14T10:45:33 | 2018-05-14T10:45:33 | 155,739,826 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,745 | py | #!/bin/env python2.4
author = "Tom Cobb"
"""
This script contains EdmTable, a virtual EdmObject that can expand and contract as neccessary, resizing its components
"""
import os, re, sys, shutil
from edmObject import *
class EdmTable(EdmObject):
"""EdmTable(x=0,y=0,xoff=0,yoff=0,xborder=10,yborder=10,xjustify="c",\
yjustify="c")
A virtual EdmObject that can expand and contract and generally behave like a
gridlayout of cells. x,y are the default cell x and y positions (numbered
from top left) that the next object will be placed into. Using the
nextCell() and nextCol() methods modifies these. xoff and yoff are the
default x and y offsets (local to the cell objects will be placed in).
xborder and yborder are the spacing between cells. xjustify and yjustify
are the justification in the cell (choose from "l","c","r","t","m","b":
they stand for left, centre, right, top, middle, bottom)"""
def __init__(self,x=0,y=0,xoff=0,yoff=0,xborder=10,yborder=10,xjustify="l",\
yjustify="t"):
EdmObject.__init__(self,type="EdmTable")
for attr in ["x","y","xoff","yoff","xborder","yborder","xjustify","yjustify"]:
self["__def_"+attr]=eval(attr)
def write(self,text):
"""write(text) -> Error
You cannot write text into an EdmTable, try creating an EdmObject and
writing text into that instead"""
raise IOError, "This is an EdmTable, you cannot write text into it"
def read(self):
"""read() -> text
Read the text of this object by exporting a group and reading that."""
return self.exportGroup().read()
def autofitDimensions(self):
"""autofitDimensions() -> None
Position objects globally so that they appear to be in the grid layout.
If width and height are smaller that the miniumum space needed for this,
make them larger. If they are larger already, stretch the cells to
fit this size"""
ws,hs = self.__dimLists()
minw = sum(ws)+(len(ws)-1)*self["__def_xborder"]
minh = sum(hs)+(len(hs)-1)*self["__def_yborder"]
# if widths and heights are bigger than their minimums, resize cells uniformly
if self["w"] > minw and self["__def_xjustify"] != "l":
wratio = float(self["w"] - minw)/sum(ws)+1
ws = [ int(0.5+w*wratio) for w in ws ]
else:
self["w"] = minw
if self["h"] > minh and self["__def_yjustify"] != "t":
hratio = float(self["h"] - minh)/sum(hs)+1
hs = [ int(0.5+h*hratio) for h in hs ]
else:
self["h"] = minh
# for each object, set its correct x and y value
for ob in self.Objects:
ob.autofitDimensions()
axis_dict = {}
for axis_str,dim_str,list in [("x","w",ws),("y","h",hs)]:
axis = ob["__EdmTable_"+axis_str]
# find value in cell
val = ob["__EdmTable_"+axis_str+"off"]
# find diff between avail dim, and object size + offset
deltaval = list[axis] - val - ob[dim_str]
if ob["__EdmTable_"+axis_str+"justify"] in ["l","t"]:
# objects are already left/top justified
pass
elif ob["__EdmTable_"+axis_str+"justify"] in ["r","b"]:
# to right justfy,
val += deltaval
else:
val += deltaval/2
# now we work out val relative to the screen and set it in the object
val += self[axis_str]+sum(list[:axis])+axis*self["__def_"+axis_str+"border"]
axis_dict[axis_str] = val
ob.setPosition(axis_dict["x"],axis_dict["y"])
def setPosition(self,x,y,relative=False,move_objects=True):
"""setPosition(x,y,relative=False,move_objects=True)
Set the position of self to be x,y. If relative, new_x,new_y =
old_x*x,old_y*y. If move_objects, then move children proportionally"""
if relative:
newx = x + self["x"]
newy = y + self["y"]
deltax,deltay = (x,y)
else:
newx = x
newy = y
deltax = x-self["x"]
deltay = y-self["y"]
self["x"] = newx
self["y"] = newy
for ob in self.Objects:
ob.setPosition(deltax,deltay,relative=True)
def exportGroup(self):
"""exportGroup() -> EdmObject
Return the group representation of self. This involved autofitDimensions
followed by a copy of all children into a new group"""
copy = self.copy()
for ob in copy.Objects:
if ob.Type == "EdmTable":
copy.replaceObject(ob,ob.exportGroup())
copy.autofitDimensions()
group = EdmObject("Group")
for key in copy.keys():
if "__EdmTable" in key:
group[key] = copy[key]
for ob in copy.Objects:
group.addObject(ob)
group.autofitDimensions()
return group
def __dimLists(self):
# generate lists of max widths and heights for each column and each row
# max_height[y_val] gives max height of row y, and the cells in it
max_height = {}
# max_width[x_val] gives max width of column x, and the cells in it
max_width = {}
for ob in self.Objects:
# first make sure the object's dimensions reflect its contents
ob.autofitDimensions()
for axis_str,dim_str in [("x","w"),("y","h")]:
# for each axis, find the min height/width
axis = ob["__EdmTable_"+axis_str]
val = ob[dim_str]+ob["__EdmTable_"+axis_str+"off"]
if axis_str=="x":
dim_dict = max_width
else:
dim_dict = max_height
if dim_dict.has_key(axis):
dim_dict[axis]=max(dim_dict[axis],val)
else:
dim_dict[axis]=val
# calculate the max or each row and column
if max_width:
ws = [0]*( max( max_width.keys() )+1 )
for key in max_width.keys():
ws[key] = max_width[key]
else:
ws = [0]
if max_height:
hs = [0]*( max( max_height.keys())+1 )
for key in max_height.keys():
hs[key] = max_height[key]
else:
hs = [0]
return ws,hs
def addObject(self,ob,x=None,y=None,yoff=None,xoff=None,\
xjustify=None,yjustify=None):
"""addObject(ob,x=None,y=None,yoff=None,xoff=None,\
xjustify=None,yjustify=None) -> None
Add ob to the current cell of the grid layout. Use x,y,xoff,yoff,
xjustify,yjustify to override their default values (no changes are
made to the default values themselves)"""
assert ob.Type!='Screen', "Can't add a Screen to a "+str(self.Type)
# set the attributes needed to store this object
for attr in ["x","y","xoff","yoff","xjustify","yjustify"]:
if eval(attr)!=None:
ob["__EdmTable_"+attr]=eval(attr)
else:
ob["__EdmTable_"+attr]=self["__def_"+attr]
self.Objects.append(ob)
ob.Parent = self
def nextCell(self,max_y = -1):
"""nextCell(max_y = -1) -> None
Move to the next cell, if max_y > -1, don't go further down that this
cell, change columns if necessary"""
if max_y > -1 and not self["__def_y"] < max_y:
# if we have defined a max y to add to, and
self.nextCol()
else:
# move to next cell
self["__def_y"]+=1
def nextCol(self):
"""nextCol() -> None
Move to the first cell in the next column"""
self["__def_y"]=0
self["__def_x"]+=1
if __name__=="__main__":
a = EdmTable()
counter = 10
for size in [100,35,20,44,74,24,22,60,30,5,80,40,25,60,4,4,23,9,30,20,7,18]:
r = EdmObject("Rectangle")
r.setDimensions(size,size)
r["lineColor"]="index "+str(2*counter)
r["fillColor"]="index "+str(counter)
r["fill"]=True
a.addObject(r,xjustify=["l","r","c"][counter%3],yjustify=["t","b","c"][counter%3])
if counter%2 and size%2:
a.nextCol()
elif counter%2:
a.nextCell()
counter += 1
s = EdmObject("Screen")
s.addObject(a)
s.autofitDimensions()
file = open("testEdmTable.edl","w")
file.write(s.read())
| [
"tmc43@e099a375-04f9-0310-9d5f-a741eaff62e1"
] | tmc43@e099a375-04f9-0310-9d5f-a741eaff62e1 |
0f6d357b301991361bd75e38d6538629a4efd50d | 0b6831cac9cd5f73eaf39be6310c69427effdbe5 | /GitHubTest/folder/module2.py | 3c03504213f1da1d8faf08137c817d1fae14e075 | [] | no_license | shutterfly2011/eChartTest | c916f29e8401820bd7866c6970e94b7d9579e9be | 70ba194dce289fb4843377a57ad16e7f25f83945 | refs/heads/master | 2021-03-12T20:15:14.283093 | 2015-10-05T04:05:13 | 2015-10-05T04:05:13 | 41,652,188 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | '''
Created on Jul 11, 2015
@author: Xiaowei
'''
class MyClass(object):
'''
classdocs
'''
def __init__(self, params):
'''
Constructor
'''
| [
"xiaoweitan@hotmail.com"
] | xiaoweitan@hotmail.com |
8f13d2977ff8b8a1f02b8827dcb286851479fb9f | 4fe788b057eb3ee73589ff380386928536efc15a | /DashScripts/PoissonDist.py | 1086826e06be76cda967e2b8df92a1be312686b5 | [] | no_license | jhpiedrahitao/BasicsProbabilityandStatistics | 669a7fd2e44357338039a0c48c3bfed4ea1c5cda | 492b05e7e60b92641081ec2bcdc5241d14b02c10 | refs/heads/main | 2023-04-03T23:00:05.808472 | 2021-04-23T22:05:52 | 2021-04-23T22:05:52 | 361,015,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,293 | py | import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.graph_objs as go
from scipy.stats import poisson
app = dash.Dash(name=__name__, assets_folder='static')
# THE EXTERNAL CSS FILE REMOVES THE DASH "UNDO/REDO" BUTTON
app.css.append_css({'external_url':'static/my.css'})
app.layout = html.Div([
dcc.Graph(id='feature-graphic', config={'displayModeBar': False}),
html.Div([
html.Div('μ=',style={'width':'8%','fontSize':24,'fontStyle':'italic','float':'left','textAlign':'right','paddingRight':20}),
dcc.Input(
id='mu',
type='number',
value=10,
style={'width':'8%','fontSize':24,'float':'left'})
])
])
@app.callback(
Output('feature-graphic', 'figure'),
[Input('mu', 'value')])
def update_graph(mu):
x = list(range(int(2.6*mu)))
y = [poisson.pmf(i,mu) for i in range(int(2.6*mu))]
return {
'data': [go.Bar(
x=x,
y=y,
width=[0.2]*(int(2.6*mu)),
)],
'layout': go.Layout(
title='Poisson Distribution',
margin={'l':40, 'b':40, 't':50, 'r':0}
)
}
if __name__ == '__main__':
app.run_server()
| [
"jhpiedrahitao@unal.edu.co"
] | jhpiedrahitao@unal.edu.co |
05b787075a8bcf7e3ffeee6b0e926d0be3db0140 | 941612645550eb4fa3f1aed02df59e743f1114ba | /insertion_sort.py | a4ecefbb590687ffc5d1c086709f54eb14f743ef | [] | no_license | SokKanaTorajd/als-smt3 | b8762132af59ab0e392adc31e618c9e1e1d67baf | 872e94cbac9bbea9b7b892187943ebb0b1394b78 | refs/heads/master | 2021-01-03T09:44:02.663402 | 2020-02-12T14:07:52 | 2020-02-12T14:07:52 | 240,026,541 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 830 | py | """Taufaldisatya W.D/182103010/Sistem Informasi"""
nilai_uts = [
{"nama" : "Siti Azizah","nilai" : 85},
{"nama" : "Siti Aminah","nilai" : 95},
{"nama" : "Siti Fatimah","nilai" : 75},
{"nama" : "Siti Maimunah","nilai" : 70},
{"nama" : "Siti Komariah","nilai" : 90},
]
def insertionsort(nilai_uts):
for i in range(len(nilai_uts)):
nilai = nilai_uts[i]["nilai"]
nama = nilai_uts[i]["nama"]
j = i
while j > 0 and nilai < nilai_uts[j-1]["nilai"]:
nilai_uts[j]["nilai"] = nilai_uts[j-1]["nilai"]
nilai_uts[j]["nama"] = nilai_uts[j-1]["nama"]
j-=1
nilai_uts[j]["nilai"] = nilai
nilai_uts[j]["nama"] = nama
return nilai_uts
nilai_uts = insertionsort(nilai_uts)
for i in range(len(nilai_uts)):
print("%s"%nilai_uts[i])
| [
"t.wijatama.d@gmail.com"
] | t.wijatama.d@gmail.com |
55f2794ab24a2c74169da65c168ce04bb3914a86 | 384a612001a5fdd5d089898f13cc7aef3b954a6e | /coupons/models.py | a70532afc380b7291804bb0f539e35ea14a9e0e6 | [] | no_license | purum01/test_django_onlineshop | f3a9c4d12d4077ea69cb9ad372e5acc5243379b7 | c4a40a273a512c939a364bee91bab950559d0f87 | refs/heads/main | 2023-06-14T12:11:05.614611 | 2021-07-03T14:34:01 | 2021-07-03T14:34:01 | 380,695,829 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | from django.db import models
from django.core.validators import MinValueValidator, MaxValueValidator
class Coupon(models.Model):
code = models.CharField(max_length=50, unique=True)
valid_from = models.DateTimeField()
valid_to = models.DateTimeField()
discount = models.IntegerField(validators=[MinValueValidator(0),MaxValueValidator(100)])
active = models.BooleanField()
def __str__(self):
return self.code
| [
"purumskyamy@gmail.com"
] | purumskyamy@gmail.com |
ab12cc2538c903dfca478ff16c8508153a7312c9 | 994ea22f35c635fdf139af9282b0d3a3d86ea34a | /ud120-projects-intro_to_machine_learning/decision_tree/dt_author_id.py | 667e184f992ddbc3679ee4787f6ce8ba6bcc894a | [] | no_license | zjyx147/Udacity | ac371fbc5b5b456e88b411657ef5a28c3b071c6c | d86fadd537dbacc6f8142b043e71527b0448bae3 | refs/heads/master | 2022-06-23T14:25:41.242353 | 2019-06-20T20:12:13 | 2019-06-20T20:12:13 | 191,207,247 | 0 | 0 | null | 2022-06-21T22:07:35 | 2019-06-10T16:42:18 | DIGITAL Command Language | UTF-8 | Python | false | false | 1,128 | py | #!/usr/bin/python
"""
This is the code to accompany the Lesson 3 (decision tree) mini-project.
Use a Decision Tree to identify emails from the Enron corpus by author:
Sara has label 0
Chris has label 1
"""
import sys
from time import time
sys.path.append("../tools/")
from email_preprocess import preprocess
### features_train and features_test are the features for the training
### and testing datasets, respectively
### labels_train and labels_test are the corresponding item labels
features_train, features_test, labels_train, labels_test = preprocess()
#########################################################
### your code goes here ###
from sklearn import tree
from sklearn.metrics import accuracy_score
#features_train = features_train[:len(features_train)/100]
#labels_train = labels_train[:len(labels_train)/100]
clf = tree.DecisionTreeClassifier(min_samples_split=40)
clf.fit(features_train, labels_train)
pred = clf.predict(features_test)
print len(features_train[0])
print "accuracy: ", accuracy_score(pred, labels_test)
#########################################################
| [
"zjyx147@gmail.com"
] | zjyx147@gmail.com |
64ca497be5be743de5dd8bc59793c84cf3431d4f | 18c6f7ee10526583d8c65acc5ce04579a91fdeeb | /ch_01/18.tuple.py | cd04da0d3333b776026f7697790ddcee7dacff23 | [] | no_license | cloudsecuritylabs/pythonProject_1 | 97273634df25e306d0a2aed56fcf5c836d2ac33c | 8fc0d17b549d7195f8de46a227e5bb5d9f2ed4ed | refs/heads/master | 2023-07-22T16:06:14.550571 | 2021-08-24T03:09:00 | 2021-08-24T03:09:00 | 399,319,867 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | '''
Let's learn about tuple
'''
# tuple is immutable
my_tup = ('cat', 'dog', 'horse')
# NoneType
my_tup = []
food = None
if food is None:
print("Hey give me something") | [
"basu.ankan@gmail.com"
] | basu.ankan@gmail.com |
cf84487a76bd32bc0c49055223a9916e5c4a532d | 13a8e1253ed847d553c359afb69bce67f7e68b46 | /check_invoice_2.0.py | 3a821e433841da21eda7b64eb2c290b1fc643098 | [] | no_license | jiegangwu/Invoice-Checking | c7aa5d1d1e8dbbe9ed4fd912e780ec5495ec7b4a | b760287121f88529357462228ee5a9479f0b1aa2 | refs/heads/master | 2022-03-22T17:56:46.747762 | 2019-12-25T08:26:02 | 2019-12-25T08:26:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,149 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Oct 12 16:55:11 2019
解决:
1. 出现输入验证码后,“查验”为灰色,或呈蓝色但点击后“验证码失效”,说明发票查验次数太多,需刷新重输
2. 发票查验太频繁,请等1分钟
3. base64编码问题
@author: situ
"""
from selenium import webdriver
from selenium.common.exceptions import StaleElementReferenceException
from selenium.common.exceptions import ElementClickInterceptedException
#from selenium.webdriver.chrome.options import Options
import time
import pandas as pd
from docx import Document
from docx.shared import Inches
import os
#import sys
from base64 import b64decode
import base64
from matplotlib import pyplot as plt
from cv2 import imread,split,merge
def yzm(driver,invoice_num,yzm_img_path):
# img_url = driver.find_element_by_id('yzm_img').get_attribute("src")
# urllib.request.urlretrieve(img_url,screenshot_save_path +'yzmimages.png')
# Image.open(screenshot_save_path +'yzmimages.png')
print("正在查验发票%s..." % invoice_num)
img_str = driver.find_element_by_id("yzm_img").get_attribute('src')
img_str = img_str.split(",")[-1] # 删除前面的 “data:image/jpeg;base64,”
img_str = img_str.replace("%0A", '\n') # 将"%0A"替换为换行符
img_data = b64decode(img_str)
with open(os.path.join(yzm_img_path,'%s.jpg'%invoice_num), 'wb') as fout:
fout.write(img_data)
fout.close()
#输出验证码图片
im = imread(os.path.join(yzm_img_path,'%s.jpg'%invoice_num))
if im is None:
print("请自行在浏览器中查看验证码")
else:
b,g,r=split(im)
im2 = merge([r,g,b])
plt.imshow(im2)
plt.xticks([]),plt.yticks([]) #隐藏坐标线
plt.show() #显示出来
#输出提示文字
text = driver.find_element_by_id("yzminfo").text
dic = {"黄色":"33","红色":"31","蓝色":34}
if text=="请输入验证码文字":
print(text)
else:
print('请输入验证码图片中\033[0;%sm%s\033[0m文字'%(dic[text[9:11]],text[9:11]))
code = input("请输入验证码:")
driver.find_element_by_id('yzm').send_keys(code)
time.sleep(1)
driver.find_element_by_id('checkfp').click()
time.sleep(4)
try:
driver.find_element_by_id('fpdm')
# print("未跳转,等待刷新")
raise Exception("未跳转,等待刷新")
except:
print("已跳转")
pass
def screen_shot(driver,screenshot_save_path,invoice_num):
time.sleep(3)
#鼠标滚动至浏览器最上方
driver.execute_script("""
(function () {
var y = 0;
var step = 100;
window.scroll(0, 0);
})();
""")
driver.save_screenshot(os.path.join(screenshot_save_path,'%s.png' % invoice_num))
print("发票%s截图成功" % invoice_num)
time.sleep(2)
def invoice_check(driver,invoice_code,invoice_num,date,value,screenshot_save_path,yzm_img_path):
time.sleep(2)
driver.get("https://inv-veri.chinatax.gov.cn/")
driver.find_element_by_id('fpdm').send_keys(invoice_code)
driver.find_element_by_id('fphm').send_keys(invoice_num)
driver.find_element_by_id('kprq').send_keys(date)
driver.find_element_by_id('kjje').send_keys(value)
try:
yzm(driver,invoice_num,yzm_img_path)
false = 0
except Exception:
false = 1
except:
print("base64图片获取有误,请等待刷新")
false = 1
while false:
driver.refresh()
driver.find_element_by_id('fpdm').send_keys(invoice_code)
driver.find_element_by_id('fphm').send_keys(invoice_num)
driver.find_element_by_id('kprq').send_keys(date)
driver.find_element_by_id('kjje').send_keys(value)
try:
yzm(driver,invoice_num,yzm_img_path)
false = 0
except ElementClickInterceptedException as msg:
try:
if driver.find_element_by_id('popup_message').text=="验证码请求次数过于频繁,请1分钟后再试!":
print("验证码请求次数过于频繁,1分钟后程序将自动跳转1")
popup = driver.find_element_by_css_selector('#popup_ok')
popup.click()
time.sleep(60)
except:
print("no1")
false = 1
except base64.binascii.Error as msg:
try:
if driver.find_element_by_id('popup_message').text=="验证码请求次数过于频繁,请1分钟后再试!":
print("验证码请求次数过于频繁,1分钟后程序将自动跳转2") #起作用的是这个
popup = driver.find_element_by_css_selector('#popup_ok')
popup.click()
time.sleep(60)
except:
print("no2")
print("base64图片获取有误,请等待刷新%s"%msg)
false = 1
try:
popup = driver.find_element_by_css_selector('#popup_ok')
print("验证码输入错误,请重输!")
except:
popup = None
screen_shot(driver,screenshot_save_path,invoice_num)
while popup: # 输错后弹窗,点击换验证码,反复输入
popup.click()
driver.find_element_by_css_selector('#yzm_img').click()#刷新验证码
# driver.find_element_by_id('yzm_img').click()
driver.find_element_by_id('yzm').clear()
time.sleep(1)
yzm(driver,invoice_num,yzm_img_path)
try:
popup = driver.find_element_by_css_selector('#popup_ok')
print("验证码输入错误,请重输!")
except:
screen_shot(driver,screenshot_save_path,invoice_num)
def main():
# invoice_file_path = input("请输入发票信息excel路径:")
invoice_file_path = "E:/self_programming/invoice_check_2.0/invoice_sample_test.xlsx"
path = os.path.dirname(invoice_file_path)
os.chdir(path)
screenshot_save_path = os.path.join(path,"截图")
doc_save_path = os.path.join(path,"文档")
yzm_img_path = os.path.join(path,"captcha")
if not os.path.exists(screenshot_save_path):
os.mkdir(screenshot_save_path)
if not os.path.exists(doc_save_path):
os.mkdir(doc_save_path)
if not os.path.exists(yzm_img_path):
os.mkdir(yzm_img_path)
invoice_info = pd.read_excel(invoice_file_path,dtype="str")#这个会直接默认读取到这个Excel的第一个表单
invoice_info.head()
# invoice_info.drop(list(range(8)),inplace=True)
# invoice_info.reset_index(level=None, drop=True,inplace=True)
# invoice_info.head()
driver = webdriver.Chrome(os.path.join(path,"chromedriver.exe"))
driver.maximize_window()
for i in range(len(invoice_info)):
try:
invoice_code,invoice_num,date,value = [str(ele) for ele in invoice_info.ix[i,1:5]]
invoice_check(driver,invoice_code,invoice_num,date,value,screenshot_save_path,yzm_img_path)
time.sleep(1)
except StaleElementReferenceException as msg:
print("查找元素异常%s,请等待..."%msg)
print("重新获取元素...")
driver.refresh()
i = i+1
except ElementClickInterceptedException as msg:
try:
if driver.find_element_by_id('popup_message').text=="验证码请求次数过于频繁,请1分钟后再试!":
print("验证码请求次数过于频繁,1分钟后程序将自动跳转3")
popup = driver.find_element_by_css_selector('#popup_ok')
popup.click()
time.sleep(60)
except:
print("no3")
driver.refresh()
i = i+1
# invoice_code,invoice_num,date,value = [str(ele) for ele in invoice_info.ix[i,:]]
# invoice_check(driver,invoice_code,invoice_num,date,value,screenshot_save_path)
# time.sleep(1)
# driver.close()
print("截图已完成!请打开%s查看,正在将截图插入word文档......" % screenshot_save_path)
#将同一笔业务的截图放入一个word文档中,以业务编号命名
for business_no in invoice_info["业务编号"].value_counts().index:
invoice_no = invoice_info["发票号码"][invoice_info["业务编号"]==business_no]
doc = Document() # doc对象
doc.add_heading(business_no ,0)
for invoice_no_i in invoice_no:
#string = '文字内容'
images = os.path.join(screenshot_save_path,'%s.png' % invoice_no_i) # 保存在本地的图片
#doc.add_paragraph(string) # 添加文字
doc.add_picture(images,width=Inches(6)) # 添加图, 设置宽度
doc.save(os.path.join(doc_save_path,'%s.docx' % business_no)) # 保存路径
print("已将截图插入word文档,请打开%s查看!" % doc_save_path)
#清空验证码缓存图片
# import shutil
# shutil.rmtree(yzm_img_path) #递归删除文件夹
if __name__ == "__main__":
main()
os.system("pause")
#pyinstaller -p C:/Users/situ/Anaconda2/envs/py3/Lib/site-packages -D check_invoice.py
#cd E:/self_programming/invoice_check_test/dist/check_invoice
#invoice_code = "5300193130"
#invoice_num = "01028425"
#date = "20191211"
#value = "877505.31"
#
#driver = webdriver.Chrome(os.path.join(path,"chromedriver.exe"))
#driver.maximize_window()
#driver.get("https://inv-veri.chinatax.gov.cn/")
#
#driver.find_element_by_id('fpdm').send_keys(invoice_code)
#driver.find_element_by_id('fphm').send_keys(invoice_num)
#driver.find_element_by_id('kprq').send_keys(date)
#driver.find_element_by_id('kjje').send_keys(value)
#
#invoice_check(driver,invoice_code,invoice_num,date,value,screenshot_save_path,yzm_img_path)
#driver.refresh() | [
"404011463@qq.com"
] | 404011463@qq.com |
fd11ad2bc7dc9769012fedd041968541efec6284 | 9c21e49150c99751231ad399bdba1850bb60c88c | /finders/views.py | 2810a5c9ef7cfcac6aa72c7420545809a1090294 | [
"MIT"
] | permissive | netvigator/auctions | 3ab4086cb0bfbc736b17ede4e928f3ead2b08a4c | fc3766226cc65ac8694dffc74e893ecff8e7d07c | refs/heads/main | 2023-05-25T15:55:01.249670 | 2023-05-06T14:51:12 | 2023-05-06T14:51:12 | 92,816,101 | 0 | 0 | MIT | 2023-02-16T05:24:34 | 2017-05-30T09:14:39 | Python | UTF-8 | Python | false | false | 10,419 | py | from django.conf import settings
from core.views import ( DetailViewGotModel, ListViewGotModel,
UpdateViewCanCancel, CreateViewCanCancel )
from django.http import HttpResponseRedirect
from .forms import ItemFoundForm, UserItemFoundForm
from .mixins import AnyReleventHitStarColsChangedMixin
from .models import ItemFound, UserItemFound, UserFinder
from core.mixins import ( GetPaginationExtraInfoInContext,
GetUserSelectionsOnPost,
TitleSearchMixin )
from core.utils import ( getDateTimeObjGotEbayStr, getEbayStrGotDateTimeObj,
sayMoreAboutHitsForThis )
from brands.models import Brand
from categories.models import Category
from models.models import Model
# ### views assemble presentation info ###
# ### keep views thin! ###
if settings.TESTING:
#
from pprint import pprint
#
maybePrint = print
maybePrettyP = pprint
#
else:
#
def maybePrint( *args ): pass
def maybePrettyP( *args ): pass
#
class FinderIndexView(
GetUserSelectionsOnPost,
GetPaginationExtraInfoInContext,
TitleSearchMixin,
ListViewGotModel ):
template_name = 'finders/index.html'
model = UserFinder
context_object_name = 'finders_list'
paginate_by = 100
def get_queryset( self ):
#
# ADS
# qs = super().get_queryset()
# sSelect = 'P'
#
# ListViewGotModel inherits from GetUserOrVisitingMixin
oUser, isVisiting = self.getUserOrVisiting()
#
sSelect = self.kwargs.get( 'select', 'A' )
#
if not sSelect: sSelect = 'A'
#
if sSelect == 'A': # all
qsGot = UserFinder.objects.filter(
iUser = oUser,
bListExclude = False,
).order_by( '-iHitStars', 'iMaxModel', 'tTimeEnd' )
#elif sSelect == 'P': # postive (non-zero hit stars)
# qsGot = UserFinder.objects.filter(
# iUser = oUser,
# iHitStars__isnull = False,
# bListExclude = False,
# ).order_by( '-iHitStars', 'iMaxModel', 'tTimeEnd' )
#
elif sSelect == 'D': # "deleted" (excluded from list)
qsGot = UserFinder.objects.filter(
iUser = oUser,
iHitStars__isnull = False,
bListExclude = True
).order_by( '-iHitStars', 'iMaxModel', 'tTimeEnd' )
#elif sSelect == 'Z': # iHitStars = 0
# qsGot = UserFinder.objects.filter(
# iUser = oUser,
# iHitStars = 0,
# bListExclude = False
# ).order_by( '-iHitStars', 'iMaxModel', 'tTimeEnd' )
#
elif sSelect == 'S': # Search
#
qsGot = super().get_queryset( *args, **kwargs )
#
# want to find the get_queryset() method of TitleSearchMixin
# not the get_queryset() method of ListViewGotModel
#
#
return qsGot
class ItemFoundDetailView( GetUserSelectionsOnPost, DetailViewGotModel ):
# get this from the finders list (top menu item)
model = UserFinder
parent = ItemFound
template_name = 'finders/detail.html'
form_class = UserItemFoundForm
def get_context_data( self, **kwargs ):
'''
want more info to the context data.
'''
context = super().get_context_data( **kwargs )
# qsThisItem = UserItemFound.objects.filter(
#
'''
{'object': <UserItemFound: FISHER FM 200 B FM STEREO TUBE TUNER 200B>,
'useritemfound': <UserItemFound: FISHER FM 200 B FM STEREO TUBE TUNER 200B>,
'view': <finders.views.ItemFoundDetailView object at 0x7f0669fa63c8>,
'model': <class 'finders.models.UserItemFound'>,\
'parent': <class 'finders.models.ItemFound'>}
'''
#
# DetailViewGotModel inherits from GetUserOrVisitingMixin
oUser, isVisiting = self.getUserOrVisiting()
#
qsThisItemAllHits = UserItemFound.objects.filter(
iItemNumb_id = context[ 'object' ].iItemNumb_id,
iUser = oUser,
bListExclude = False,
).order_by( '-iHitStars' )
#
if len( qsThisItemAllHits ) == 0:
#
qsThisItemAllHits = UserItemFound.objects.filter(
iItemNumb_id = context[ 'object' ].iItemNumb_id,
iUser = oUser,
).order_by( '-iHitStars' )
#
#
sayMoreAboutHitsForThis( qsThisItemAllHits )
#
context['HitsForThis'] = qsThisItemAllHits
#
context['isVisiting'] = isVisiting
#
session = self.request.session
#
session['iItemNumb' ] = context[ 'object' ].iItemNumb_id
#
if len( qsThisItemAllHits ) == 0:
session['iSearch'] = None
else:
session['iSearch'] = qsThisItemAllHits[0].iSearch_id
#
# cannot serialize datetime object, so covert to string
#
session['sTimeEnd' ] = getEbayStrGotDateTimeObj(
context[ 'object' ].tTimeEnd )
#
return context
"""
class ItemFoundHitView( GetUserSelectionsOnPost, DetailViewGotModel ):
# get this from the list at bottom for a model, brand or category
model = UserItemFound
parent = ItemFound
template_name = 'finders/hit-detail.html'
form_class = UserItemFoundForm
def get_context_data( self, **kwargs ):
'''
want more info to the context data.
'''
context = super().get_context_data( **kwargs )
#
# qsThisItem = UserItemFound.objects.filter(
#
'''
{'object': <UserItemFound: FISHER FM 200 B FM STEREO TUBE TUNER 200B>,
'useritemfound': <UserItemFound: FISHER FM 200 B FM STEREO TUBE TUNER 200B>,
'view': <finders.views.ItemFoundDetailView object at 0x7f0669fa63c8>,
'model': <class 'finders.models.UserItemFound'>,\
'parent': <class 'finders.models.ItemFound'>}
'''
#
qsThisItemOtherHits = UserItemFound.objects.filter(
iItemNumb_id = context[ 'object' ].iItemNumb_id,
iUser = context[ 'object' ].iUser,
bListExclude = False
).exclude( id = context[ 'object' ].id
).order_by( '-iHitStars' )
#
context['HitsForThis'] = qsThisItemOtherHits
#
session = self.request.session
#
session['iItemNumb'] = context[ 'object' ].iItemNumb_id
#
session['iSearch'] = \
context['object'].iSearch_id or qsThisItemOtherHits[0].iSearch_id
#
return context
"""
class ItemFoundUpdateView(
AnyReleventHitStarColsChangedMixin, UpdateViewCanCancel ):
model = UserItemFound
parent = ItemFound
template_name = 'finders/edit.html'
success_message = 'Finder update successfully saved!!!!'
form_class = UserItemFoundForm
tHitStarRelevantCols = (
'iModel',
'iBrand',
'iCategory' )
def get_context_data( self, **kwargs ):
'''
want more info to the context data.
'''
#
context = super().get_context_data( **kwargs )
#
context['form'].fields['iBrand'].queryset = \
Brand.objects.filter( iUser = self.request.user )
context['form'].fields['iCategory'].queryset = \
Category.objects.filter( iUser = self.request.user )
#
instance = context['form'].instance
#
if instance.iBrand is not None:
context['form'].fields['iModel'].queryset = \
Model.objects.filter(
iUser = self.request.user,
iBrand = instance.iBrand )
else:
context['form'].fields['iModel'].queryset = \
Model.objects.filter( iUser = self.request.user )
#
# session = self.request.session
#
# print( "instance.iItemNumb_id:", instance.iItemNumb_id )
# print( "instance.iBrand:", instance.iBrand )
# print( "session['iItemNumb'] :", session['iItemNumb'] )
#
return context
class ItemFoundCreateView( CreateViewCanCancel ):
model = UserItemFound
parent = ItemFound
template_name = 'finders/add.html'
success_message = 'New finder successfully saved!!!!'
form_class = UserItemFoundForm
def get_initial( self ):
#
initial = super().get_initial()
#
# in testing, values might not be there
#
session = self.request.session
#
if session and 'iItemNumb' in session:
#
initial['iItemNumb'] = session['iItemNumb']
initial['iSearch' ] = session['iSearch' ]
initial['tTimeEnd' ] = getDateTimeObjGotEbayStr( session['sTimeEnd' ] )
initial['iUser' ] = self.request.user
#
#
return initial
def troubleshoot_form_valid( self, form ):
#
instance = form.instance
#session = self.request.session
##
#instance.iItemNumb_id = instance.iItemNumb_id or session['iItemNumb']
#instance.iSearch_id = instance.iSearch_id or session['iSearch' ]
#instance.tTimeEnd = instance.tTimeEnd or session['tTimeEnd' ]
#instance.iUser = self.request.user
#
maybePrint( 'iItemNumb_id, iSearch_id, tTimeEnd, iUser:',
instance.iItemNumb_id,
instance.iSearch_id,
instance.tTimeEnd,
instance.iUser )
#
return super().form_valid( form )
| [
"gravesricharde@yahoo.com"
] | gravesricharde@yahoo.com |
29aaf9830413dce680cb164b3a8dd63745dd68af | 1572b7dea50699582879b2b9fcedef12f2ef6704 | /verification/src/referee.py | 26e014f1e115a5887f39fd778b5563bcb03c8beb | [] | no_license | khanukov/checkio-empire-broken-report | 7106869fc504a2551fb7a1d412245a74c9401f64 | 64d68d89b99c2116c12fd1d579961ab699a760c6 | refs/heads/master | 2020-12-03T02:19:33.174438 | 2015-04-07T14:12:08 | 2015-04-07T14:12:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 452 | py | from checkio_referee import RefereeCodeGolf
from checkio_referee import covercodes
import settings_env
from tests import TESTS
# TODO Golf
class Referee(RefereeCodeGolf):
DEFAULT_MAX_CODE_LENGTH = 150
BASE_POINTS = 15
TESTS = TESTS
ENVIRONMENTS = settings_env.ENVIRONMENTS
DEFAULT_FUNCTION_NAME = "golf"
ENV_COVERCODE = {
"python_2": covercodes.py_2_str,
"python_3": None,
"javascript": None
}
| [
"bvv.mag@gmail.com"
] | bvv.mag@gmail.com |
b7fa421982e1756aca5ca2afc4c743d1bd7b7d0d | dda204f76c6d03df02e4d620257df55165461ae7 | /Practical5/collatz.py | 5afe7cf56f43467fef1f5bf1a68e5e7385b9a827 | [] | no_license | YuyangMiao/IBI1_2019-20 | 48127bbc137b3155aac99dd5cc0c39d4c1db75ee | a8921fc37af78ddd2010d2ccd141d1c506f0a3ab | refs/heads/master | 2021-02-14T07:33:54.940625 | 2020-05-14T09:35:00 | 2020-05-14T09:35:00 | 244,784,924 | 0 | 0 | null | 2020-03-04T02:00:12 | 2020-03-04T02:00:12 | null | UTF-8 | Python | false | false | 491 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 11 09:54:34 2020
@author: joe_m
"""
#import an integer n
n=7
#Judge if n=1
#If yes: print n and stop
#If no:
# print n
# Repeat:
# If n is even: n=n/2, print n
# If n is odd: n=3n+1, print n
# If n==1, stop
if n==1:
print (n)
else:
print (n)
while n!=1:
if n%2==0:
n=n/2
print (n)
else:
n=3*n+1
print (n)
if n==1:
break | [
"3190111061@zju.edu.cn"
] | 3190111061@zju.edu.cn |
280466d4cc30f09e5d4ebb4b2f2aeb4e39a6801b | 70b0e98649db4dd782e2ce16b2ab1cbdbc5ccf44 | /main.py | 11b7da88941f18e9195f816f59e8a5d36eee0a31 | [] | no_license | nadiraziz/stock-news | 6b513a42860b40e4b0ed950d17c181b774c9f51a | 0fac2e12b7b760bd6a356735d04568348fca2e6e | refs/heads/master | 2023-04-15T10:11:43.661341 | 2021-04-25T07:23:04 | 2021-04-25T07:23:04 | 361,359,461 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,248 | py | import requests
import os
from twilio.rest import Client
STOCK_NAME = "TSLA"
COMPANY_NAME = "Tesla Inc"
account_sid = 'ACcf5b9ee7e3c0d76bcd8477bd41efc644'
auth_token = '7ea01d8e3868fe7f2df26f13a22d9e45'
STOCK_ENDPOINT = "https://www.alphavantage.co/query"
NEWS_ENDPOINT = "https://newsapi.org/v2/everything"
STOCK_API_KEY = "GKO60DXL5GAFIP4H"
NEWS_API_KEY = "240697c18cb04af99b276367d909771e"
## STEP 1: Use https://www.alphavantage.co/documentation/#daily
# When stock price increase/decreases by 5% between yesterday and the day before yesterday then print("Get News").
# Get yesterday's closing stock price. Hint: You can perform list comprehensions on Python dictionaries. e.g. [new_value for (key, value) in dictionary.items()]
stock_params = {
"function": "TIME_SERIES_DAILY",
"symbol": STOCK_NAME,
"apikey": STOCK_API_KEY,
}
response = requests.get(STOCK_ENDPOINT, params=stock_params)
data = response.json()["Time Series (Daily)"]
print(data)
data_list = [value for (key, value) in data.items()]
yesterday_data = data_list[0]
yesterday_closing = yesterday_data["4. close"]
print(yesterday_closing)
# Get the day before yesterday's closing stock price
day_before_yesterday = data_list[1]
day_before_yesterday_closing = day_before_yesterday["4. close"]
print(day_before_yesterday_closing)
# Find the positive difference between 1 and 2. e.g. 40 - 20 = -20, but the positive difference is 20. Hint: https://www.w3schools.com/python/ref_func_abs.asp
difference = float(yesterday_closing) - float(day_before_yesterday_closing)
up_down = None
if difference > 0:
up_down = "🔺"
else:
up_down = "🔻"
positive_difference = abs(difference)
# Work out the percentage difference in price between closing price yesterday and closing price the day before yesterday.
percentage_diff = round((positive_difference / float(yesterday_closing)) * 100)
#If percentage is greater than 5 then print("Get News").
if percentage_diff > 1:
# Instead of printing ("Get News"), use the News API to get articles related to the COMPANY_NAME.
news_params = {
"qInTitle": COMPANY_NAME,
"apiKey": NEWS_API_KEY,
}
news_response = requests.get(NEWS_ENDPOINT, params=news_params)
articles = news_response.json()["articles"]
# Use Python slice operator to create a list that contains the first 3 articles. Hint: https://stackoverflow.com/questions/509211/understanding-slice-notation
three_articles = articles[:3]
# Create a new list of the first 3 article's headline and description using list comprehension.
formatted_article = [f"Headline: {article['title']}\nBrief: {article['description']}" for article in three_articles]
## STEP 3: Use twilio.com/docs/sms/quickstart/python
client = Client(account_sid, auth_token)
# to send a separate message with each article's title and description to your phone number.
# Send each article as a separate message via Twilio.
for article_message in formatted_article:
message = client.messages \
.create(
body=f'"{COMPANY_NAME}": {up_down}{percentage_diff}%\n{article_message}',
from_='+17632251469',
to='+919995957505'
)
print(message.status)
| [
"nadiraziziyah@gmail.com"
] | nadiraziziyah@gmail.com |
09ac2327168508b61c167a4490edbc965fda67e3 | 7a55d3fac2bc2b7afd46300182944d3cb1b8a370 | /clearpath/clearpath | a0265890c276994cb6ac2240c003f7c7a579b66e | [] | no_license | btownshend/CNCConfig | 5d3eca22573c0534ce0b5c43a6958c2d5011a992 | bdadea7bacf4c5d373faeab30f31b1d5145fb3d3 | refs/heads/main | 2023-03-16T16:11:44.071625 | 2021-03-14T22:08:10 | 2021-03-14T22:08:10 | 329,538,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,415 | #!/usr/bin/env python
import ctypes
import hal, time
import sys,os
print('sys.argv[0] =', sys.argv[0])
pathname = os.path.dirname(sys.argv[0])
print('path =', pathname)
lib=ctypes.CDLL(pathname+"/getstatus.so")
print('getstatus.so loaded')
h=hal.component("clearpath")
h.newpin("0.fault",hal.HAL_BIT,hal.HAL_OUT)
h.newpin("1.fault",hal.HAL_BIT,hal.HAL_OUT)
h.newpin("0.enable",hal.HAL_BIT,hal.HAL_IN)
h.newpin("1.enable",hal.HAL_BIT,hal.HAL_IN)
print('components/pins created')
try:
if lib.initialize() < 0:
print("Unable to initialize ClearPath SC-HUB connection")
raise SystemExit
print("initialized")
print(dir(h))
h.ready()
print("ready")
while True:
time.sleep(0.25)
#print("update")
if lib.setenable(0,h['0.enable']) < 0:
print("clearpath: failed to setenable for port 0")
h['0.fault']=1
continue
if lib.setenable(1,h['1.enable']) < 0:
print("clearpath: failed to setenable for port 1")
h['1.fault']=1
continue
s0=lib.getstatus(0)
if s0<0:
print("clearpath: getstatus(0) failed")
h['0.fault']=(s0!=0)
s1=lib.getstatus(1)
if s1<0:
print("clearpath: getstatus(1) failed")
h['1.fault']=(s1!=0)
except KeyboardInterrupt:
lib.shutdown()
raise SystemExit
| [
"bst@tc.com"
] | bst@tc.com | |
58b2a1133097168bc9430bca771b6c3fc494b986 | da62bf253e652f6beb57ed2f9b834db1275d3dca | /RNN.py | 619405413bd7540b8b075bdd0567083830aa2ef0 | [] | no_license | jefftsai8049/ML-Deep_Learning_Final | ba849b563503ba08516e5bd68dc14a445e196c88 | 2e8405bea2f428aa4c61350d0407b9b918b3f651 | refs/heads/master | 2021-01-10T17:07:04.445436 | 2015-06-20T11:27:59 | 2015-06-20T11:27:59 | 37,315,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,234 | py | __author__ = 'root'
import numpy as np
import theano
import theano.tensor as T
class RNN:
def __init__(self,x,y,layerNumber,memory=None,parameters=None):
if parameters is None:
self.inputLayer = hiddenLayer(x,layerNumber[0],layerNumber[1],memory)
self.outputLayer = outputLayer(self.inputLayer.z,layerNumber[1],layerNumber[2])
self.parameters = self.inputLayer.W+self.inputLayer.WH+self.outputLayer.WOut
class outputLayer:
def __init__(self,x,inLayerNum,outLayerNum,W=None,activationFunction = T.nnet.softmax):
lower = -0.01
upper = 0.01
# for output layer
if W is None:
WOutInitail = np.asarray(np.random.normal(size = (inLayerNum,outLayerNum),low = lower,high = upper),dtype = theano.config.floatX)
self.WOut= theano.shared(value = WOutInitail,name = "WOut")
self.activationFunction = activationFunction
a = T.dot(x,self.WOut)
y = self.activationFunction(a)
self.yPred = y
self.parameters = [self.WOut]
def costFunction(self,y):
return T.mean(T.nnet.categorical_crossentropy(self.yPred,y))
class hiddenLayer:
def __init__(self,x,inLayerNum,outLayerNum,memory=None,W=None,WH=None,activationFunction = T.nnet.sigmoid):
lower = -0.01
upper = 0.01
# for recurrent weight
if WH is None:
WHInitial = np.asarray(np.random.normal(size = (outLayerNum,outLayerNum),low = lower,high = upper),dtype = theano.config.floatX)
self.WH = theano.shared(value = WHInitial,name = "WH")
if W is None:
WInitial = np.asarray(np.random.normal(size = (inLayerNum,outLayerNum),low = -0.01,high = 0.01),dtype = theano.config.floatX)
self.W = theano.shared(value = WInitial,name = "W")
if memory is None:
memoryInitial = np.zeros((inLayerNum,),dtype = theano.config.floatX)
self.memory = theano.shared(value=memoryInitial,name="memory")
self.activationFunction = activationFunction
a = T.dot(x,self.W)+T.dot(self.memory,self.WH)
self.memory = a
z = self.activationFunction(a)
self.z = z
self.parameters = [W,WH] | [
"jefftsai8049@gmail.com"
] | jefftsai8049@gmail.com |
807b8f72c43040317da699074158ef426c15575e | 6bb45c5892b4c9692dcc44116fb73dc9e7ab90ff | /advanced_functionality/autogluon-sagemaker-pipeline/setup.py | 56a675d8c0cac3a064199a11bd56e8e1316b0dce | [
"Apache-2.0",
"BSD-2-Clause"
] | permissive | aws/amazon-sagemaker-examples | 8359afe544e873662bda5b8d2b07399c437213c9 | 43dae4b28531cde167598f104f582168b0a4141f | refs/heads/main | 2023-08-26T04:42:52.342776 | 2023-08-25T14:37:19 | 2023-08-25T14:37:19 | 107,937,815 | 4,797 | 3,519 | Apache-2.0 | 2023-09-14T19:47:03 | 2017-10-23T05:55:22 | Jupyter Notebook | UTF-8 | Python | false | false | 1,569 | py | import os
import setuptools
about = {}
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, "pipelines", "__version__.py")) as f:
exec(f.read(), about)
with open("README.md", "r") as f:
readme = f.read()
required_packages = ["sagemaker"]
extras = {
"test": [
"black",
"coverage",
"flake8",
"mock",
"pydocstyle",
"pytest",
"pytest-cov",
"sagemaker",
"tox",
]
}
setuptools.setup(
name=about["__title__"],
description=about["__description__"],
version=about["__version__"],
author=about["__author__"],
author_email=["__author_email__"],
long_description=readme,
long_description_content_type="text/markdown",
url=about["__url__"],
license=about["__license__"],
packages=setuptools.find_packages(),
include_package_data=True,
python_requires=">=3.6",
install_requires=required_packages,
extras_require=extras,
entry_points={
"console_scripts": [
"get-pipeline-definition=pipelines.get_pipeline_definition:main",
"run-pipeline=pipelines.run_pipeline:main",
]
},
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Natural Language :: English",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
)
| [
"noreply@github.com"
] | noreply@github.com |
d3cdab89c560145a18210360377b1e737aa05c00 | a6af3de724b8156368ec8026cd1053c6b5bca26f | /test/testdata/python-fabric/files/deploy.py | d82430929b1ba1ed1ac4b1c4b6694ad4170d1315 | [
"MIT"
] | permissive | knick-knack/knick-knack | 47be8a515d0731a8120ec0bedb867dec58fd55cf | 29793bbe0ecd3a77fecd29c0dc676f0d65f911a0 | refs/heads/master | 2021-01-10T22:11:54.656370 | 2016-10-28T13:39:28 | 2016-10-28T13:39:28 | 23,700,731 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 229 | py | import sys
from fabric.api import *
from fabric.contrib import *
import {{ name }}
# Tell Fabric which tasks are exported by this file
__all__ = ['deploy_config']
@task(default=True)
def deploy_config():
print 'start here'
| [
"haimich@gmx.net"
] | haimich@gmx.net |
b3ae43c7b44548459e24734aaaad6316c5adc823 | 4f5d18e12f1f3a19109691db98cf9dfdd7fd0d08 | /contest2/B_sequence_type/venv/bin/pip | 48b76dd55dc88de4cf06a824b3e0a4ace73fbb83 | [] | no_license | DimSap/Young-Yandex_algorithms | dbe9235abdd246f95cc987ed320ed31652ac4690 | dc9367dbf7ebe9464caa35a1533294629c9068a2 | refs/heads/main | 2023-05-14T09:41:23.494339 | 2021-06-09T20:46:11 | 2021-06-09T20:46:11 | 373,651,488 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 323 | #!/home/dimsap/PycharmProjects/Young&&Yandex_algorithms/Young-Yandex_algorithms/contest2/B_sequence_type/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"DmitriySapronovv@yandex.ru"
] | DmitriySapronovv@yandex.ru | |
5c6ae8e9e3c9853b4a44d39dd7d108e8d811cd6a | a93b08a2545a4d51d5a4ca078cdca27ce2240a0a | /config.py | fe6ea06cef37626e2a619d77cef31d69206194c2 | [
"MIT"
] | permissive | islobozhan/garage48-open-banking | 19741abbf21459b5ba17c1f1b51765dbfe96237f | 197ee3a3c78f53a5d0f99f883471648f883699b7 | refs/heads/master | 2021-04-28T18:30:12.688348 | 2018-02-17T16:14:06 | 2018-02-17T16:14:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,966 | py | import os
import sys
from raygun4py.middleware import flask as flask_raygun
PYTHON_VERSION = sys.version_info[0]
if PYTHON_VERSION == 3:
import urllib.parse
else:
import urlparse
basedir = os.path.abspath(os.path.dirname(__file__))
if os.path.exists('config.env'):
print('Importing environment from .env file')
for line in open('config.env'):
var = line.strip().split('=')
if len(var) == 2:
os.environ[var[0]] = var[1].replace("\"", "")
class Config:
# TODO: Enable cache later...
CACHE_TYPE = "null"
APP_NAME = os.environ.get('APP_NAME') or 'Flask-Base'
if os.environ.get('SECRET_KEY'):
SECRET_KEY = os.environ.get('SECRET_KEY')
else:
SECRET_KEY = 'SECRET_KEY_ENV_VAR_NOT_SET'
print('SECRET KEY ENV VAR NOT SET! SHOULD NOT SEE IN PRODUCTION')
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
# Email
MAIL_SERVER = os.environ.get('MAIL_SERVER') or 'smtp.sendgrid.net'
MAIL_PORT = os.environ.get('MAIL_PORT') or 587
MAIL_USE_TLS = os.environ.get('MAIL_USE_TLS') or True
MAIL_USE_SSL = os.environ.get('MAIL_USE_SSL') or False
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
MAIL_DEFAULT_SENDER = os.environ.get('MAIL_DEFAULT_SENDER')
# Analytics
GOOGLE_ANALYTICS_ID = os.environ.get('GOOGLE_ANALYTICS_ID') or ''
SEGMENT_API_KEY = os.environ.get('SEGMENT_API_KEY') or ''
# Admin account
ADMIN_PASSWORD = os.environ.get('ADMIN_PASSWORD') or 'password'
ADMIN_EMAIL = os.environ.get('ADMIN_EMAIL') or 'flask-base-admin@example.com'
EMAIL_SUBJECT_PREFIX = '[{}]'.format(APP_NAME)
EMAIL_SENDER = '{app_name} Admin <{email}>'.format(app_name=APP_NAME, email=MAIL_USERNAME)
REDIS_URL = os.getenv('REDIS_URL') or 'http://localhost:6379'
RAYGUN_API_KEY = os.environ.get('RAYGUN_API_KEY')
# Parse the REDIS_URL to set RQ config variables
if PYTHON_VERSION == 3:
urllib.parse.uses_netloc.append('redis')
url = urllib.parse.urlparse(REDIS_URL)
else:
urlparse.uses_netloc.append('redis')
url = urlparse.urlparse(REDIS_URL)
RQ_DEFAULT_HOST = url.hostname
RQ_DEFAULT_PORT = url.port
RQ_DEFAULT_PASSWORD = url.password
RQ_DEFAULT_DB = 0
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
ASSETS_DEBUG = True
POSTGRES_URL = '127.0.0.1:5432'
POSTGRES_DB = 'garage48banking'
POSTGRES_USER = 'testuser'
POSTGRES_PW = 'testuser'
os.environ['DATABASE'] = 'postgresql'
if os.environ.get('DATABASE') == 'postgresql':
# POSTGRES_URL = os.environ.get('POSTGRES_URL')
# POSTGRES_DB = os.environ.get('POSTGRES_DB')
# POSTGRES_USER = os.environ.get('POSTGRES_USER')
# POSTGRES_PW = os.environ.get('POSTGRES_PW')
POSTGRESQL_URL = 'postgresql+psycopg2://{user}:{pw}@{url}/{db}' \
.format(url=POSTGRES_URL,
db=POSTGRES_DB,
user=POSTGRES_USER,
pw=POSTGRES_PW)
SQLALCHEMY_DATABASE_URI = POSTGRESQL_URL
print('POSTGRESQL is used...')
else:
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') \
or 'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
print('THIS APP IS IN DEBUG MODE. YOU SHOULD NOT SEE THIS IN PRODUCTION.')
class TestingConfig(Config):
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') \
or 'sqlite:///' + os.path.join(basedir, 'data-test.sqlite')
WTF_CSRF_ENABLED = False
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') \
or 'sqlite:///' + os.path.join(basedir, 'data.sqlite')
SSL_DISABLE = (os.environ.get('SSL_DISABLE') or 'True') == 'True'
@classmethod
def init_app(cls, app):
Config.init_app(app)
assert os.environ.get('SECRET_KEY'), 'SECRET_KEY IS NOT SET!'
flask_raygun.Provider(app, app.config['RAYGUN_API_KEY']).attach()
class HerokuConfig(ProductionConfig):
@classmethod
def init_app(cls, app):
ProductionConfig.init_app(app)
# Handle proxy server headers
from werkzeug.contrib.fixers import ProxyFix
app.wsgi_app = ProxyFix(app.wsgi_app)
class UnixConfig(ProductionConfig):
@classmethod
def init_app(cls, app):
ProductionConfig.init_app(app)
# Log to syslog
import logging
from logging.handlers import SysLogHandler
syslog_handler = SysLogHandler()
syslog_handler.setLevel(logging.WARNING)
app.logger.addHandler(syslog_handler)
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'default': DevelopmentConfig,
'heroku': HerokuConfig,
'unix': UnixConfig
}
| [
"all-cs.info@i.ua"
] | all-cs.info@i.ua |
fe52b2cd35017acf657af7d8ab0cb4f759250d7a | 0e08e9873549c514245842c5f4ad01769e1c76d6 | /myblog/blog/tests.py | ec7bc624daecb07dd9bc9025f52c0c33afa1036c | [] | no_license | liangxs0/Django_study | 39afe9c889467eb81e2ecdcee4e285c2bd27d28a | 2f509bce6cdaaee288c37a603978a96ffc43f0e4 | refs/heads/main | 2023-04-25T20:30:05.275066 | 2021-05-31T03:27:24 | 2021-05-31T03:27:24 | 372,365,793 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 569 | py | # from django.test import TestCase
# #
#
# from django.contrib.auth.hashers import make_password, check_password
# # # Create your tests here.
# x = make_password("123", 'abc', 'pbkdf2_sha256')
# y = make_password("123", 'abc', 'pbkdf2_sha256')
# print(x)
# print(y)
def a(nums):
nums = [str(n) for n in nums]
n_nums = []
for n in nums:
for nn in n:
n_nums.append(nn)
print(n_nums)
n_nums.sort(reverse=True)
print(n_nums)
res = ''
for n in n_nums:
res+=n
return res
c = "".join([3,30,34,5,9])
print(c) | [
"1033808656@qq.com"
] | 1033808656@qq.com |
3334166df80baae5d623b50fb2d4792c278c6a11 | 80465f904d8c1855af5fb0b40fbaeab09ef1a614 | /backEnd/ETL Code/sqlFetch.py | ac07d73e15bdd146048df17c70daea9461318371 | [] | no_license | Zarana-Parekh/analytics | 2ce2b67938e7355c0bb22463dd9246566df575ef | 66a620f521b690456bb7ea155d5512eba8ff4301 | refs/heads/master | 2021-01-18T01:23:25.529695 | 2015-10-16T09:34:12 | 2015-10-16T09:34:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,380 | py | import MySQLdb
import exceptions
import traceback
import sys
import os
#from dataloader import *
db=MySQLdb.connect("localhost","root","1234","IITBxDataAnalytics")
cursor=db.cursor()
def psVideo(courseName,videoSysName) :
sql="SELECT v.videoTitle, v.chapterSysName,c.chapteTitle \
FROM CourseVideos v, CourseChapter c where v.courseName = c.courseName and\
v.chapterSysName = c.chapterSysName and v.courseName = '%s' and v.videoSysName = '%s'" %(courseName,videoSysName)
try:
cursor.execute(sql)
results = cursor.fetchone()
return results
except Exception ,err :
traceback.print_exc()
db.rollback()
def psChapterSess(courseName,sessionSysName):
sql="SELECT c.chapteTitle, s.sessionTitle, c.chapterSysName \
FROM CourseChapter c join CourseChapterSession s \
on c.courseName = s.courseName and c.chapterSysName = s.chapterSysName \
where c.courseName = '%s' and s.sessionSysName = '%s' " % (courseName,sessionSysName)
try:
cursor.execute(sql)
results = cursor.fetchone()
return results
except Exception ,err :
traceback.print_exc()
db.rollback()
def psProblem(courseName,quizSysName):
sql="SELECT p.chapterSysName,c.chapteTitle, p.quizTitle FROM CourseProblems p\
join CourseChapter c on p.chapterSysName = c.chapterSysName \
where p.courseName = '%s' and quizSysName = '%s'" % (courseName,quizSysName)
try:
cursor.execute(sql)
results = cursor.fetchone()
return results
except Exception ,err :
traceback.print_exc()
db.rollback()
def psChapter(courseName,chapterSysName):
sql="SELECT chapteTitle FROM CourseChapter where courseName = '%s' and chapterSysName = '%s'" % (courseName,chapterSysName)
try:
cursor.execute(sql)
results = cursor.fetchone()
return results
except Exception ,err :
traceback.print_exc()
db.rollback()
def psDiscuss(discussionSysId,courseName):
sql="SELECT discussionTitle FROM CourseDiscussions where discussionSysId = '%s'\
and courseName = '%s'" % (discussionSysId,courseName)
try:
cursor.execute(sql)
results = cursor.fetchone()
return results
except Exception ,err :
traceback.print_exc()
db.rollback()
def psVideoInteract(sessionsysname,lmsname,orgname,coursename,courserun,lmsuserid,\
eventname,eventno,videosysname,videotitle,chaptersysname,chapterTitle,\
oldSeekTime,currseektime,videonavigtype,oldspeed,currspeed,\
source,createdatetime,lastmoddatetime):
sql = "Insert into EventvideoInteract (sessionsysName,lmsName, orgName, courseName, courseRun,lmuserId,\
eventName,eventNo, videoSysName, videoTitle,chapterSysName,chapterTitle,oldSeekTime, \
currseekTime, videoNavigType,oldSpeed,currSpeed,source,createDateTime, lastModDateTime)\
values ('%d','%s','%s','%s','%s', '%s','%d','%s','%d','%s', '%s', '%s','%s','%f','%f', '%s','%f','%f',\
'%s','%s', '%s')" %\
(sessionsysname,lmsname,orgname,coursename,courserun,lmsuserid,\
eventname,eventno,videosysname,videotitle,chaptersysname,chapterTitle,\
oldSeekTime,currseektime,videonavigtype,oldspeed,currspeed,\
source,createdatetime,lastmoddatetime)
try:
cursor.execute(sql)
except Exception ,err :
traceback.print_exc()
db.rollback()
def psCourseInteract(lmsname,orgname,coursename,courserun,lmuserid,\
eventname,eventno,moduletype,modulesysname,moduletitle,\
chaptersysname,chaptertitle,createdatetime,moddatetime,\
oldposition,curposition,source):
sql = "Insert into EventCourseInteract (lmsName, orgName,courseName,courseRun,lmUserId,\
eventName,eventNo,moduleType,modulesysName,moduleTitle,chapterSysName,chapterTitle,\
createdDateTime,moddateTime,oldPosition,curPosition,source )\
VALUES ('%d','%s','%s','%s','%s', '%d','%s','%d','%s','%s', '%s','%s','%s','%s','%s', '%d','%d','%s')" %\
(lmsname,orgname,coursename,courserun,lmuserid,\
eventname,eventno,moduletype,modulesysname,moduletitle,\
chaptersysname,chaptertitle,createdatetime,moddatetime,\
oldposition,curposition,source)
try:
cursor.execute(sql)
except Exception ,err :
traceback.print_exc()
db.rollback()
def psProbInteract(lmsname,orgname,coursename,lmsuserid,eventname,eventno,\
quizzsysname,quiztitle,chaptersysname,chaptertitle,hintavailable,\
hintmode,inputtype,responsetype,variantid,oldscore,newscore,\
maxgrade,attempts,maxattempts,choice,success,source,\
probsubtime,done,createdatetime,lastmoddatetime,courserun) :
sql = "Insert into EventprobInteract (lmsName, orgName,courseName,lmsuserId, eventName,\
eventNo, quizzSysName,quizTitle,chapterSysName, chapterTitle,hintAvailable, \
hintMode,inputType,responseType, variantId,oldscore,newscore,maxGrade,attempts ,\
maxAttempts,choice, success, source ,probSubTime, done,createDateTime,\
lastModDatetime,courseRun)\
VALUES ('%d','%s','%s','%s','%d', '%s','%d','%s','%s','%s', '%s','%s','%s','%s','%s', '%s','%f','%f',\
'%f','%d', '%d','%s','%s','%s','%s', '%s','%s','%s','%s',)" %\
(lmsname,orgname,coursename,lmsuserid,eventname,eventno,\
quizzsysname,quiztitle,chaptersysname,chaptertitle,hintavailable,\
hintmode,inputtype,responsetype,variantid,oldscore,newscore,\
maxgrade,attempts,maxattempts,choice,success,source,\
probsubtime,done,createdatetime,lastmoddatetime,courserun)
try:
cursor.execute(sql)
except Exception ,err :
traceback.print_exc()
db.rollback()
| [
"jay15@nsl-23.cse.iitb.ac.in"
] | jay15@nsl-23.cse.iitb.ac.in |
31d4f840a52821b8fe76f7119d5d16e8f9413696 | f677dda255ff4670bc04bb6de58e596041934521 | /miclave/urls.py | c6521154bf3b7cdf098c5c3c9c423bce4e310c71 | [] | no_license | JorgeL-G/miclave | 70b06f0127c1b64019efa33197ea60698ce65291 | 6d092a712be262ce48cd2d6548a768ca5e76413d | refs/heads/master | 2020-12-25T11:15:24.764161 | 2016-08-10T14:29:43 | 2016-08-10T14:29:43 | 65,387,646 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 757 | py | """miclave URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
]
| [
"jleon@turpialdev.com"
] | jleon@turpialdev.com |
f2dcb9eb1bf133d9f46b28478f6f446496c241b9 | 47bb98bfab29a736c3015bb0fb42d8e6370c60d6 | /consumeraffairs/companies/views.py | 19fb406e85f19f3c2d829b55d4183084afc4039d | [
"MIT"
] | permissive | devoredevelops/Django-Backend-Test | f3976cd339fc2eca878d2f4164fd883ee50f1bb0 | c46d5b2d8a1f98bf3ec69524ab7a2e344514e538 | refs/heads/master | 2020-04-25T15:40:21.428091 | 2019-02-27T10:05:27 | 2019-02-27T10:05:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,933 | py | from django.shortcuts import render
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse_lazy
from django.views.generic import ListView, DetailView, RedirectView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from .models import Company
from .forms import CompanyForm
class CompanyDetailView(LoginRequiredMixin, DetailView):
""" Shows Details about a Company """
model = Company
slug_field = "name"
slug_url_kwarg = "name"
fields = ["name"]
company_detail_view = CompanyDetailView.as_view()
class CompanyListView(LoginRequiredMixin, ListView):
""" Lists Companies """
model = Company
slug_field = "name"
slug_url_kwarg = "name"
company_list_view = CompanyListView.as_view()
class CompanyCreateView(LoginRequiredMixin, CreateView):
""" Creates a Company """
model = Company
form = CompanyForm
fields = ['name']
def get_success_url(self):
return reverse_lazy("companies:list")
company_create_view = CompanyCreateView.as_view()
class CompanyUpdateView(LoginRequiredMixin, UpdateView):
""" Edits a Company """
model = Company
form = CompanyForm
fields = ["name"]
slug_field = "name"
slug_url_kwarg = "name"
def get_success_url(self):
return reverse_lazy("companies:list")
company_update_view = CompanyUpdateView.as_view()
class CompanyDeleteView(LoginRequiredMixin, DeleteView):
""" Deletes a Company """
model = Company
slug_field = "name"
slug_url_kwarg = "name"
success_url = reverse_lazy("companies:list")
company_delete_view = CompanyDeleteView.as_view()
class CompanyRedirectView(LoginRequiredMixin, RedirectView):
""" Redirects to the Company Details View """
permanent = False
def get_redirect_url(self):
return reverse_lazy("companies:detail")
company_redirect_view = CompanyRedirectView.as_view()
| [
"sqyttles@protonmail.com"
] | sqyttles@protonmail.com |
cf224ee8e169f27d18b46d21133d44240fb60521 | 75be45bd9b82fa4932dd59602ab6eb04ae5c3efb | /3-extract_scripts.py | 280477b645911477617eb2931c5007ff78c760ea | [
"MIT"
] | permissive | h-uekawa/driveby-finder | 8c9f3c928fec3defe3a8aedf4275db35ea97f517 | 69af5013c6dd907a1eadf337beff2b79eeaf4314 | refs/heads/master | 2021-01-13T09:13:03.187142 | 2016-10-05T14:28:05 | 2016-10-08T04:14:30 | 70,068,119 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,197 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# driveby-finder - Scripts for MWSCup 2016
# https://github.com/h-uekawa/driveby-finder
#
# Copyright (c) 2016 Team Security Anthem (Okayama Univ.)
# Released under the MIT License, see LICENSE.txt
#
##
import sys
import ConfigParser
import pymongo
import re
import hashlib
pattern = {
"tag": re.compile(r"<script[^>]*>[ \t\n\r]*(.*?)[ \t\n\r]*</script>", re.I|re.S),
"comm1": re.compile(r"^[ \t]*//[^\n]*\n?", re.M),
"comm2": re.compile(r"^[ \t]*<!--[^\n]*\n?", re.M),
"comm3": re.compile(r"^[ \t]*-->[^\n]*\n?", re.M),
"comm4": re.compile(r"/\*.*?\*/", re.S),
"xml1": re.compile(r"<!\[CDATA\[", 0),
"xml2": re.compile(r"\]\]>[ \t]*$", re.M),
}
def remove_comments(s):
global pattern
s = pattern["comm1"].sub("", s)
s = pattern["comm2"].sub("", s)
s = pattern["comm3"].sub("", s)
s = pattern["comm4"].sub("", s)
s = pattern["xml1"].sub("", s)
s = pattern["xml2"].sub("", s)
return s
def main():
global pattern
# config
config = ConfigParser.SafeConfigParser()
config.read("./config.ini")
# database
host = config.get("mongodb", "host")
port = int(config.get("mongodb", "port"))
dbname = config.get("mongodb", "database")
client = pymongo.MongoClient(host, port)
database = client[dbname]
# collections
urls = database.urls
responses = database.responses
scripts = database.scripts
extracted = database.extracted
rcnt,scnt = 0,0
for res in responses.find(None,{"content":1}):
try:
extracted.insert_one({"_id":res["_id"]})
except pymongo.errors.DuplicateKeyError:
continue
rcnt += 1
for s in pattern["tag"].findall(res["content"]):
if s == "": continue
s = remove_comments(s)
h = hashlib.md5(s.encode("latin-1")).hexdigest()
srecord = {
"_id": h,
"script": s,
"response": res["_id"],
"next": None,
"prev": None,
}
try:
scripts.insert_one(srecord)
scnt += 1
except pymongo.errors.DuplicateKeyError:
continue
except Exception as e:
print repr(e)
continue
print "%d responses, %d scripts\r"%(rcnt,scnt),
sys.stdout.flush()
if __name__ == "__main__":
try:
main(*sys.argv[1:])
except KeyboardInterrupt:
pass
| [
"pp5a627w@s.okayama-u.ac.jp"
] | pp5a627w@s.okayama-u.ac.jp |
53979d794a98f9c44496e45c3d73e7692311c48f | d49a918891af8140a295730a62e8fe530d5b67f5 | /tests/parser/test_parser_program_declarations.py | ac0431bcc5215b38f75ca1c6d3c692d703b2eceb | [] | no_license | jfpio/TKOM-Interpreter | f2efc3c0282371bd3820421569407e0a02b26055 | 4111e813f99da9f6747a3682eda3854bfa893d05 | refs/heads/main | 2023-07-17T16:53:05.240163 | 2021-08-31T06:07:52 | 2021-08-31T06:07:52 | 401,589,492 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,304 | py | import io
from typing import List
import pytest
from interpreter.lexer.lexer import Lexer
from interpreter.models.base import Constant, Param, Variable
from interpreter.models.declarations import CurrencyDeclaration, Declaration, VariableDeclaration, FunctionDeclaration
from interpreter.models.statements import ReturnStatement, Statements
from interpreter.parser.parser import Parser
from interpreter.parser.parser_error import ParserError
from interpreter.source.source import Source
from interpreter.source.source_position import SourcePosition
from tests.parser.utils import simple_expression_factory
class TestParserDeclarations:
def test_currency_declaration(self):
string = 'EUR := 4.92;'
statements = self._get_program_declarations(string)
currency_declaration = statements[0]
assert currency_declaration == CurrencyDeclaration(SourcePosition(1, 11), 'EUR', 4.92)
def test_currency_declaration_error1(self):
string = 'EUR = 4.92;'
with pytest.raises(ParserError):
self._get_program_declarations(string)
def test_currency_declaration_error2(self):
string = 'EUR := 4.92'
with pytest.raises(ParserError):
self._get_program_declarations(string)
def test_currency_declaration_error3(self):
string = 'EUR := ;'
with pytest.raises(ParserError):
self._get_program_declarations(string)
def test_variable_declaration_1(self):
string = 'int a = 4.92;'
declarations = self._get_program_declarations(string)
declaration = declarations[0]
assert declaration == VariableDeclaration(SourcePosition(1, 12), int, 'a',
simple_expression_factory(Constant(SourcePosition(1, 12), 4.92)))
def test_variable_declaration_2(self):
string = 'int a;'
with pytest.raises(ParserError):
self._get_program_declarations(string)
def test_function_declaration_1(self):
string = 'int a(int b, int c){return b;}'
declarations = self._get_program_declarations(string)
declaration = declarations[0]
assert declaration == FunctionDeclaration(
SourcePosition(1, len(string)), int, 'a',
[Param(SourcePosition(1, 11), 'b', int), Param(SourcePosition(1, 18), 'c', int)],
Statements(
[ReturnStatement(SourcePosition(1, 28),
simple_expression_factory(Variable(SourcePosition(1, 28), 'b')))]))
def test_function_declaration_2(self):
string = 'int a(){}'
declarations = self._get_program_declarations(string)
declaration = declarations[0]
assert declaration == FunctionDeclaration(
SourcePosition(1, len(string)), int, 'a',
[],
Statements([]))
@staticmethod
def _get_program_declarations(string: str) -> List[Declaration]:
source = Source(io.StringIO(string))
lexer = Lexer(source)
parser = Parser(lexer)
parse_tree = parser.parse_program()
return parse_tree.declarations
@staticmethod
def _get_parser(string: str) -> Parser:
source = Source(io.StringIO(string))
lexer = Lexer(source)
return Parser(lexer)
| [
"janfpiotrowski@gmail.com"
] | janfpiotrowski@gmail.com |
833ab69f26d1b4150bc3d7a6391c97683d7c5302 | ee307df49f0b12d28a1550550edfa1c46eb5cbc2 | /Univariate Linear Regression.py | 84ab1dac85a0b15dce40ef5301a030cf2afa333d | [] | no_license | riyagoel192/Univariate-Linear-Regression | 15c705c7406bd9e37f41119a5ce95c3f008c42f7 | 7b72a1011127b7d9eb1dfc1d25b2490bbf2dd111 | refs/heads/master | 2021-03-30T12:01:43.161028 | 2020-03-18T18:26:13 | 2020-03-18T18:26:13 | 248,051,243 | 0 | 1 | null | 2020-03-18T18:26:14 | 2020-03-17T19:03:19 | Python | UTF-8 | Python | false | false | 716 | py | #!/usr/bin/env python
# coding: utf-8
# In[76]:
#importing libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# In[123]:
#Declaring variables
x=[1,2,3]
y=[1,2,3]
x=np.array(x)
y=np.array(y)
theta=np.zeros((2,))
theta[0]=0
theta[1]=0.5
m=x.shape[0]
# In[124]:
#Declaration of functions
def y_(theta,x):
return theta[1]*x + theta[0]
def linear_regression(x,y):
e=0
y_hat = y_(theta,x)
y_hat=np.array(y_hat)
for i in range(m):
e+=(y_hat[i] - y[i])**2
#print(e)
return(e/(2*m))
ans=linear_regression(x,y)
print(ans)
# In[125]:
#Visualization
plt.plot(x,y,color="green")
plt.show
plt.plot(x,y_hat,color="red")
plt.show
# In[ ]:
| [
"noreply@github.com"
] | noreply@github.com |
13d00496340bf494c42e637092864c02cd223882 | 8030404af9a6b2555387a49a3e43a47be7a26470 | /peggy/lib/alipaylib/alipayConfig.py | 2799153644f19ca6690396e6c9260dcb2097eff1 | [] | no_license | mebusw/tianjinsports-server | d5de7aae1a25affdd3c91c78e5a82b0d4c10220f | 3402ac634fc92b5ccdf049f530e6b7b8b604aac1 | refs/heads/master | 2016-09-06T21:32:40.096629 | 2015-03-14T13:20:24 | 2015-03-14T13:20:24 | 32,121,712 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 502 | py | import os
partner = "2088711061370024"
key = "j5f5nc0lev9wch24t2cotwdvqkwexgww"
seller_mail = "17sports@sina.cn"
if 'SERVER_SOFTWARE' in os.environ:
notify_url = "http://1.peggy.sinaapp.com/peggy/paid_notify_wap"
return_url = "http://1.peggy.sinaapp.com/peggy/paid_wap"
show_url = "http://1.peggy.sinaapp.com/peggy"
else:
notify_url = "http://127.0.0.1:8000/peggy/paid_notify_wap"
return_url = "http://127.0.0.1:8000/peggy/paid_wap"
show_url = "http://127.0.0.1:8000/peggy"
| [
"mebusw@163.com"
] | mebusw@163.com |
ccee78a2b9646c3ed52024216b909a64eb921b0c | e4aab0a71dc5c047d8b1576380b16364e03e7c0d | /core/ajax.py | 0d38f835124b1bb6fdc17e253409ede47b76fd44 | [
"Apache-2.0"
] | permissive | Joecastra/Watcher3 | 8ca66c44846030f0eb771d9d6ddeb9c37f637a4e | ce25d475f83ed36d6772f0cc35ef020d5e47c94b | refs/heads/master | 2021-01-19T11:05:55.454351 | 2017-04-10T20:17:24 | 2017-04-10T20:17:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 39,761 | py | import json
import logging
import os
import sys
import threading
import cherrypy
from base64 import b16encode
import core
from core import config, library, plugins, poster, searchresults, searcher, snatcher, sqldb, version
from core.providers import torrent, newznab
from core.downloaders import nzbget, sabnzbd, transmission, qbittorrent, deluge, rtorrent
from core.movieinfo import TMDB
from core.notification import Notification
from core.helpers import Conversions
from core.rss import predb
from templates import movie_info_popup, import_library, movie_status_popup, plugin_conf_popup, status
logging = logging.getLogger(__name__)
class Ajax(object):
''' These are all the methods that handle
ajax post/get requests from the browser.
Except in special circumstances, all should return a JSON string
since that is the only datatype sent over http
'''
def __init__(self):
self.tmdb = TMDB()
self.config = config.Config()
self.metadata = library.Metadata()
self.predb = predb.PreDB()
self.plugins = plugins.Plugins()
self.searcher = searcher.Searcher()
self.score = searchresults.Score()
self.sql = sqldb.SQL()
self.library = library
self.poster = poster.Poster()
self.snatcher = snatcher.Snatcher()
self.update = library.Status()
@cherrypy.expose
def search_tmdb(self, search_term):
''' Search tmdb for movies
:param search_term: str title and year of movie (Movie Title 2016)
Returns str json-encoded list of dicts that contain tmdb's data.
'''
results = self.tmdb.search(search_term)
if not results:
logging.info('No Results found for {}'.format(search_term))
return None
else:
return json.dumps(results)
@cherrypy.expose
def movie_info_popup(self, data):
''' Calls movie_info_popup to render html
:param imdbid: str imdb identification number (tt123456)
Returns str html content.
'''
mip = movie_info_popup.MovieInfoPopup()
return mip.html(data)
@cherrypy.expose
def movie_status_popup(self, imdbid):
''' Calls movie_status_popup to render html
:param imdbid: str imdb identification number (tt123456)
Returns str html content.
'''
msp = movie_status_popup.MovieStatusPopup()
return msp.html(imdbid)
@cherrypy.expose
def add_wanted_movie(self, data, full_metadata=False):
''' Adds movie to Wanted list.
:param data: str json.dumps(dict) of info to add to database.
full_metadata: bool if data is complete and ready for write
data MUST inlcude tmdb id as data['id']
Writes data to MOVIES table.
If full_metadata is False, searches tmdb for data['id'] and updates data
If Search on Add enabled,
searches for movie immediately in separate thread.
If Auto Grab enabled, will snatch movie if found.
Returns str json.dumps(dict) of status and message
'''
def thread_search_grab(data):
imdbid = data['imdbid']
title = data['title']
year = data['year']
quality = data['quality']
self.predb.check_one(data)
if core.CONFIG['Search']['searchafteradd']:
if self.searcher.search(imdbid, title, year, quality):
if core.CONFIG['Search']['autograb']:
self.snatcher.auto_grab(data)
response = {}
data = json.loads(data)
tmdbid = data['id']
if not full_metadata:
movie = self.tmdb._search_tmdbid(tmdbid)[0]
movie.update(data)
else:
movie = data
movie['quality'] = data.get('quality', 'Default')
movie['status'] = data.get('status', 'Wanted')
if self.sql.row_exists('MOVIES', imdbid=movie['imdbid']):
logging.info('{} already exists in library.'.format(movie['title']))
response['response'] = False
response['error'] = '{} already exists in library.'.format(movie['title'])
return json.dumps(response)
if movie.get('poster_path'):
poster_url = 'http://image.tmdb.org/t/p/w300{}'.format(movie['poster_path'])
else:
poster_url = '{}/static/images/missing_poster.jpg'.format(core.PROG_PATH)
movie = self.metadata.convert_to_db(movie)
if self.sql.write('MOVIES', movie):
t2 = threading.Thread(target=self.poster.save_poster, args=(movie['imdbid'], poster_url))
t2.start()
if movie['status'] != 'Disabled': # disable immediately grabbing new release for imports
t = threading.Thread(target=thread_search_grab, args=(movie,))
t.start()
response['response'] = True
response['message'] = '{} {} added to library.'.format(movie['title'], movie['year'])
self.plugins.added(movie['title'], movie['year'], movie['imdbid'], movie['quality'])
return json.dumps(response)
else:
response['response'] = False
response['error'] = 'Could not write to database. Check logs for more information.'
return json.dumps(response)
@cherrypy.expose
def add_wanted_imdbid(self, imdbid, quality='Default'):
''' Method to quckly add movie with just imdbid
:param imdbid: str imdb id #
Submits movie with base quality options
Generally just used for the api
Returns dict of success/fail with message.
Returns str json.dumps(dict)
'''
response = {}
movie = self.tmdb._search_imdbid(imdbid)
if not movie:
response['status'] = 'false'
response['message'] = '{} not found on TMDB.'.format(imdbid)
return response
else:
movie = movie[0]
movie['imdbid'] = imdbid
movie['quality'] = quality
return self.add_wanted_movie(json.dumps(movie))
@cherrypy.expose
def add_wanted_tmdbid(self, tmdbid, quality='Default'):
''' Method to quckly add movie with just tmdbid
:param imdbid: str imdb id #
Submits movie with base quality options
Generally just used for the api
Returns dict of success/fail with message.
Returns str json.dumps(dict)
'''
response = {}
data = self.tmdb._search_tmdbid(tmdbid)
if not data:
response['status'] = 'false'
response['message'] = '{} not found on TMDB.'.format(tmdbid)
return response
else:
data = data[0]
data['quality'] = quality
data['status'] = 'Wanted'
return self.add_wanted_movie(json.dumps(data))
@cherrypy.expose
def save_settings(self, data):
''' Saves settings to config file
:param data: dict of Section with nested dict of keys and values:
{'Section': {'key': 'val', 'key2': 'val2'}, 'Section2': {'key': 'val'}}
All dicts must contain the full tree or data will be lost.
Fires off additional methods if neccesary.
Returns json.dumps(dict)
'''
# orig_config = dict(core.CONFIG)
logging.info('Saving settings.')
data = json.loads(data)
save_data = {}
for key in data:
if data[key] != core.CONFIG[key]:
save_data[key] = data[key]
if not save_data:
return json.dumps({'response': True})
try:
self.config.write_dict(save_data)
except (SystemExit, KeyboardInterrupt):
raise
except Exception as e: # noqa
logging.error('Writing config.', exc_info=True)
return json.dumps({'response': False, 'error': 'Unable to write to config file.'})
return json.dumps({'response': True})
@cherrypy.expose
def remove_movie(self, imdbid):
''' Removes movie
:param imdbid: str imdb identification number (tt123456)
Removes row from MOVIES, removes any entries in SEARCHRESULTS
In separate thread deletes poster image.
Returns srt 'error' or nothing on success
'''
t = threading.Thread(target=self.poster.remove_poster, args=(imdbid,))
t.start()
if self.sql.remove_movie(imdbid):
response = {'response': True}
else:
response = {'response': False}
return json.dumps(response)
@cherrypy.expose
def search(self, imdbid, title, year, quality):
''' Search indexers for specific movie.
:param imdbid: str imdb identification number (tt123456)
:param title: str movie title and year
Checks predb, then, if found, starts searching providers for movie.
Does not return
'''
self.searcher.search(imdbid, title, year, quality)
return
@cherrypy.expose
def manual_download(self, title, year, guid, kind):
''' Sends search result to downloader manually
:param guid: str download link for nzb/magnet/torrent file.
:param kind: str type of download (torrent, magnet, nzb)
Returns str json.dumps(dict) success/fail message
'''
torrent_enabled = core.CONFIG['Downloader']['Sources']['torrentenabled']
usenet_enabled = core.CONFIG['Downloader']['Sources']['usenetenabled']
if kind == 'nzb' and not usenet_enabled:
return json.dumps({'response': False, 'error': 'Link is NZB but no Usent downloader is enabled.'})
elif kind in ('torrent', 'magnet') and not torrent_enabled:
return json.dumps({'response': False, 'error': 'Link is {} but no Torrent downloader is enabled.'.format(kind)})
data = dict(self.sql.get_single_search_result('guid', guid))
if data:
data['year'] = year
return json.dumps(self.snatcher.snatch(data))
else:
return json.dumps({'response': False, 'error': 'Unable to get download information from the database. Check logs for more information.'})
@cherrypy.expose
def mark_bad(self, guid, imdbid):
''' Marks guid as bad in SEARCHRESULTS and MARKEDRESULTS
:param guid: srt guid to mark
Returns str json.dumps(dict)
'''
if self.update.mark_bad(guid, imdbid=imdbid):
response = {'response': True, 'message': 'Marked as Bad.'}
else:
response = {'response': False, 'error': 'Could not mark release as bad. Check logs for more information.'}
return json.dumps(response)
@cherrypy.expose
def notification_remove(self, index):
''' Removes notification from core.notification
:param index: str or unicode index of notification to remove
'index' will be a type of string since it comes from ajax request.
Therefore we convert to int here before passing to Notification
Simply calls Notification module.
Does not return
'''
Notification.remove(int(index))
return
@cherrypy.expose
def update_check(self):
''' Manually check for updates
Returns str json.dumps(dict) from Version manager update_check()
'''
response = version.Version().manager.update_check()
return json.dumps(response)
@cherrypy.expose
def refresh_list(self, list, imdbid='', quality=''):
''' Re-renders html for Movies/Results list
:param list: str the html list id to be re-rendered
:param imdbid: str imdb identification number (tt123456) <optional>
Calls template file to re-render a list when modified in the database.
#result_list requires imdbid.
Returns str html content.
'''
if list == '#movie_list':
return status.Status.movie_list()
if list == '#result_list':
return movie_status_popup.MovieStatusPopup().result_list(imdbid, quality)
@cherrypy.expose
def test_downloader_connection(self, mode, data):
''' Test connection to downloader.
:param mode: str which downloader to test.
:param data: dict connection information (url, port, login, etc)
Executes staticmethod in the chosen downloader's class.
Returns str json.dumps dict:
{'status': 'false', 'message': 'this is a message'}
'''
response = {}
data = json.loads(data)
if mode == 'sabnzbd':
test = sabnzbd.Sabnzbd.test_connection(data)
if test is True:
response['status'] = True
response['message'] = 'Connection successful.'
else:
response['status'] = False
response['error'] = test
if mode == 'nzbget':
test = nzbget.Nzbget.test_connection(data)
if test is True:
response['status'] = True
response['message'] = 'Connection successful.'
else:
response['status'] = False
response['error'] = test
if mode == 'transmission':
test = transmission.Transmission.test_connection(data)
if test is True:
response['status'] = True
response['message'] = 'Connection successful.'
else:
response['status'] = False
response['error'] = test
if mode == 'delugerpc':
test = deluge.DelugeRPC.test_connection(data)
if test is True:
response['status'] = True
response['message'] = 'Connection successful.'
else:
response['status'] = False
response['error'] = test
if mode == 'delugeweb':
test = deluge.DelugeWeb.test_connection(data)
if test is True:
response['status'] = True
response['message'] = 'Connection successful.'
else:
response['status'] = False
response['error'] = test
if mode == 'qbittorrent':
test = qbittorrent.QBittorrent.test_connection(data)
if test is True:
response['status'] = True
response['message'] = 'Connection successful.'
else:
response['status'] = False
response['error'] = test
if mode == 'rtorrentscgi':
test = rtorrent.rTorrentSCGI.test_connection(data)
if test is True:
response['status'] = True
response['message'] = 'Connection successful.'
else:
response['status'] = False
response['error'] = test
if mode == 'rtorrenthttp':
test = rtorrent.rTorrentHTTP.test_connection(data)
if test is True:
response['status'] = True
response['message'] = 'Connection successful.'
else:
response['status'] = False
response['error'] = test
return json.dumps(response)
@cherrypy.expose
def server_status(self, mode):
''' Check or modify status of CherryPy server_status
:param mode: str command or request of state
Restarts or Shuts Down server in separate thread.
Delays by one second to allow browser to redirect.
If mode == 'online', asks server for status.
(ENGINE.started, ENGINE.stopped, etc.)
Returns nothing for mode == restart || shutdown
Returns str server state if mode == online
'''
def server_restart():
cwd = os.getcwd()
cherrypy.engine.restart()
os.chdir(cwd) # again, for the daemon
return
def server_shutdown():
cherrypy.engine.stop()
cherrypy.engine.exit()
sys.exit(0)
if mode == 'restart':
logging.info('Restarting Server...')
threading.Timer(1, server_restart).start()
return
elif mode == 'shutdown':
logging.info('Shutting Down Server...')
threading.Timer(1, server_shutdown).start()
return
elif mode == 'online':
return str(cherrypy.engine.state)
@cherrypy.expose
def update_now(self, mode):
''' Starts and executes update process.
:param mode: str 'set_true' or 'update_now'
The ajax response is a generator that will contain
only the success/fail message.
This is done so the message can be passed to the ajax
request in the browser while cherrypy restarts.
'''
response = self._update_now(mode)
for i in response:
return i
@cherrypy.expose
def _update_now(self, mode):
''' Starts and executes update process.
:param mode: str 'set_true' or 'update_now'
Helper for self.update_now()
If mode == set_true, sets core.UPDATING to True
This is done so if the user visits /update without setting true
they will be redirected back to status.
Yields 'true' back to browser
If mode == 'update_now', starts update process.
Yields 'true' or 'failed'. If true, restarts server.
'''
if mode == 'set_true':
core.UPDATING = True
yield json.dumps({'response': True})
if mode == 'update_now':
update_status = version.Version().manager.execute_update()
core.UPDATING = False
if update_status is False:
logging.error('Update Failed.')
yield json.dumps({'response': False})
elif update_status is True:
yield json.dumps({'response': True})
logging.info('Respawning process...')
cherrypy.engine.stop()
python = sys.executable
os.execl(python, python, *sys.argv)
else:
return
@cherrypy.expose
def update_movie_options(self, quality, status, imdbid):
''' Updates quality settings for individual title
:param quality: str name of new quality
:param status: str status management state
:param imdbid: str imdb identification number
'''
logging.info('Updating quality profile to {} for {}.'.format(quality, imdbid))
if not self.sql.update('MOVIES', 'quality', quality, 'imdbid', imdbid):
return json.dumps({'response': False})
logging.info('Updating status to {} for {}.'.format(status, imdbid))
if status == 'Automatic':
if not self.update.movie_status(imdbid):
return json.dumps({'response': False})
elif status == 'Finished':
if not self.sql.update('MOVIES', 'status', 'Disabled', 'imdbid', imdbid):
return json.dumps({'response': False})
return json.dumps({'response': True})
@cherrypy.expose
def get_log_text(self, logfile):
with open(os.path.join(core.LOG_DIR, logfile), 'r') as f:
log_text = ''.join(reversed(f.readlines()))
return log_text
@cherrypy.expose
def indexer_test(self, indexer, apikey, mode):
if mode == 'newznab':
return json.dumps(newznab.NewzNab.test_connection(indexer, apikey))
elif mode == 'torznab':
return json.dumps(torrent.Torrent.test_connection(indexer, apikey))
else:
return json.dumps({'response': 'false', 'error': 'Invalid test mode.'})
@cherrypy.expose
def get_plugin_conf(self, folder, conf):
''' Calls plugin_conf_popup to render html
folder: str folder to read config file from
conf: str filename of config file (ie 'my_plugin.conf')
Returns str html content.
'''
return plugin_conf_popup.PluginConfPopup.html(folder, conf)
@cherrypy.expose
def save_plugin_conf(self, folder, conf, data):
''' Calls plugin_conf_popup to render html
folder: str folder to store config file
conf: str filename of config file (ie 'my_plugin.conf')
data: str json data to store in conf file
Returns str json dumps dict of success/fail message
'''
data = json.loads(data)
conf_file = conf_file = os.path.join(core.PROG_PATH, core.PLUGIN_DIR, folder, conf)
response = {'response': True, 'message': 'Plugin settings saved'}
try:
with open(conf_file, 'w') as output:
json.dump(data, output, indent=2)
except Exception as e:
response = {'response': False, 'error': str(e)}
return json.dumps(response)
@cherrypy.expose
def scan_library_directory(self, directory, minsize, recursive):
''' Calls library to scan directory for movie files
directory: str directory to scan
minsize: str minimum file size in mb, coerced to int
resursive: str 'true' or 'false', coerced to bool
Removes all movies already in library.
If error, yields {'error': reason} and stops Iteration
If movie has all metadata, yields:
{'complete': {<metadata>}}
If missing imdbid or resolution, yields:
{'incomplete': {<knownn metadata>}}
All metadata dicts include:
'path': 'absolute path to file'
'progress': '10 of 250'
Yeilds generator object of json objects
'''
recursive = json.loads(recursive)
minsize = int(minsize)
files = self.library.ImportDirectory.scan_dir(directory, minsize, recursive)
if files.get('error'):
yield json.dumps({'error': files['error']})
raise StopIteration()
library = [i['imdbid'] for i in self.sql.get_user_movies()]
files = files['files']
length = len(files)
for index, path in enumerate(files):
metadata = self.metadata.get_metadata(path)
metadata['size'] = os.path.getsize(path)
metadata['finished_file'] = path
metadata['human_size'] = Conversions.human_file_size(metadata['size'])
progress = [index + 1, length]
if not metadata.get('imdbid'):
logging.info('IMDB unknown for import {}'.format(metadata['title']))
yield json.dumps({'response': 'incomplete', 'movie': metadata, 'progress': progress})
continue
if metadata['imdbid'] in library:
logging.info('Import {} already in library, ignoring.'.format(metadata['title']))
yield json.dumps({'response': 'in_library', 'movie': metadata, 'progress': progress})
continue
elif not metadata.get('resolution'):
logging.info('Resolution/Source unknown for import {}'.format(metadata['title']))
yield json.dumps({'response': 'incomplete', 'movie': metadata, 'progress': progress})
continue
else:
logging.info('All data found for import {}'.format(metadata['title']))
yield json.dumps({'response': 'complete', 'movie': metadata, 'progress': progress})
scan_library_directory._cp_config = {'response.stream': True}
@cherrypy.expose
def import_dir(self, movie_data, corrected_movies):
''' Imports list of movies in data
movie_data: list of dicts of movie info ready to import
corrected_movies: list of dicts of user-corrected movie info
corrected_movies must be [{'/path/to/file': {'known': 'metadata'}}]
Iterates through corrected_movies and attmpts to get metadata again if required.
If imported, generates and stores fake search result.
Creates dict {'success': [], 'failed': []} and
appends movie data to the appropriate list.
Yeilds generator object of json objects
'''
movie_data = json.loads(movie_data)
corrected_movies = json.loads(corrected_movies)
fake_results = []
success = []
length = len(movie_data) + len(corrected_movies)
progress = 1
if corrected_movies:
for data in corrected_movies:
tmdbdata = self.tmdb._search_imdbid(data['imdbid'])[0]
if tmdbdata:
data['year'] = tmdbdata['release_date'][:4]
data.update(tmdbdata)
movie_data.append(data)
else:
logging.error('Unable to find {} on TMDB.'.format(data['imdbid']))
yield json.dumps({'response': False, 'movie': data, 'progress': [progress, length], 'reason': 'Unable to find {} on TMDB.'.format(data['imdbid'])})
progress += 1
for movie in movie_data:
if movie['imdbid']:
movie['status'] = 'Disabled'
response = json.loads(self.add_wanted_movie(json.dumps(movie)))
if response['response'] is True:
fake_results.append(searchresults.generate_simulacrum(movie))
yield json.dumps({'response': True, 'progress': [progress, length], 'movie': movie})
progress += 1
success.append(movie)
continue
else:
yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'reason': response['error']})
progress += 1
continue
else:
logging.error('Unable to find {} on TMDB.'.format(movie['imdbid']))
yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'reason': 'IMDB ID invalid or missing.'})
progress += 1
fake_results = self.score.score(fake_results, imported=True)
for i in success:
score = None
for r in fake_results:
if r['imdbid'] == i['imdbid']:
score = r['score']
break
if score:
self.sql.update('MOVIES', 'finished_score', score, 'imdbid', i['imdbid'])
self.sql.write_search_results(fake_results)
import_dir._cp_config = {'response.stream': True}
@cherrypy.expose
def list_files(self, current_dir, move_dir):
''' Lists files in directory
current_dir: str base path
move_dir: str child path to read
Joins and normalizes paths:
('/home/user/movies', '..')
Becomes /home/user
Sends path to import_library template to generate html
Returns json dict {'new_path': '/path', 'html': '<li>...'}
'''
response = {}
new_path = os.path.normpath(os.path.join(current_dir, move_dir))
response['new_path'] = new_path
try:
response['html'] = import_library.ImportLibrary.file_list(new_path)
except Exception as e:
response = {'error': str(e)}
logging.error('Error listing directory.', exc_info=True)
return json.dumps(response)
@cherrypy.expose
def update_metadata(self, imdbid):
tmdbid = self.sql.get_movie_details('imdbid', imdbid).get('tmdbid')
if not tmdbid:
tmdbid = self.tmdb._search_imdbid(imdbid)[0].get('id')
if not tmdbid:
return json.dumps({'response': False, 'error': 'Unable to find {} on TMDB.'.format(imdbid)})
movie = self.tmdb._search_tmdbid(tmdbid)[0]
target_poster = os.path.join(self.poster.poster_folder, '{}.jpg'.format(imdbid))
if movie['poster_path']:
poster_url = 'http://image.tmdb.org/t/p/w300{}'.format(movie['poster_path'])
else:
poster_url = '{}/static/images/missing_poster.jpg'.format(core.PROG_PATH)
if os.path.isfile(target_poster):
try:
os.remove(target_poster)
except Exception as e: #noqa
logging.warning('Unable to remove existing poster.', exc_info=True)
return json.dumps({'response': False, 'error': 'Unable to remove existing poster.'})
movie = self.metadata.convert_to_db(movie)
self.sql.update_multiple('MOVIES', movie, imdbid=imdbid)
self.poster.save_poster(imdbid, poster_url)
return json.dumps({'response': True, 'message': 'Metadata updated.'})
@cherrypy.expose
def change_quality_profile(self, profiles, imdbid=None):
''' Updates quality profile name
names: dict of profile names. k:v is currentname:newname
imdbid: str imdbid of movie to change <default None>
Changes movie quality profiles from k in names to v in names
If imdbid is passed will change only one movie, otherwise changes
all movies where profile == k
If imdbid is passed and names contains more than one k:v pair, submits changes
using v from the first dict entry. This is unreliable, so just submit one.
Executes two loops.
First changes qualities to temporary value.
Then changes tmp values to target values.
This way you can swap two names without them all becoming one.
'''
profiles = json.loads(profiles)
if imdbid:
q = profiles.values()[0]
if not self.sql.update('MOVIES', 'quality', q, 'imdbid', imdbid):
return json.dumps({'response': False, 'error': 'Unable to update {} to quality {}'.format(imdbid, q)})
else:
return json.dumps({'response': True, 'Message': '{} changed to {}'.format(imdbid, q)})
else:
tmp_qualities = {}
for k, v in profiles.items():
q = b16encode(v.encode('ascii')).decode('ascii')
if not self.sql.update('MOVIES', 'quality', q, 'quality', k):
return json.dumps({'response': False, 'error': 'Unable to change {} to temporary quality {}'.format(k, q)})
else:
tmp_qualities[q] = v
for k, v in tmp_qualities.items():
if not self.sql.update('MOVIES', 'quality', v, 'quality', k):
return json.dumps({'response': False, 'error': 'Unable to change temporary quality {} to {}'.format(k, v)})
if not self.sql.update('MOVIES', 'backlog', 0, 'quality', k):
return json.dumps({'response': False, 'error': 'Unable to set backlog flag. Manual backlog search required for affected titles.'})
return json.dumps({'response': True, 'message': 'Quality profiles updated.'})
@cherrypy.expose
def get_kodi_movies(self, url):
''' Gets list of movies from kodi server
url: str url of kodi server
Calls Kodi import method to gather list.
Returns list of dicts of movies
'''
return json.dumps(library.ImportKodiLibrary.get_movies(url))
@cherrypy.expose
def import_kodi(self, movies):
''' Imports list of movies in movies from Kodi library
movie_data: JSON list of dicts of movies
Iterates through movies and gathers all required metadata.
If imported, generates and stores fake search result.
Creates dict {'success': [], 'failed': []} and
appends movie data to the appropriate list.
Yeilds generator object of json objects
'''
movies = json.loads(movies)
fake_results = []
success = []
length = len(movies)
progress = 1
print(movies[0])
for movie in movies:
tmdb_data = self.tmdb._search_imdbid(movie['imdbid'])[0]
if not tmdb_data.get('id'):
yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'reason': 'Unable to find {} on TMDB.'.format(movie['imdbid'])})
progress += 1
continue
else:
movie['id'] = tmdb_data['id']
movie['size'] = 0
movie['status'] = 'Disabled'
response = json.loads(self.add_wanted_movie(json.dumps(movie)))
if response['response'] is True:
fake_results.append(searchresults.generate_simulacrum(movie))
yield json.dumps({'response': True, 'progress': [progress, length], 'movie': movie})
progress += 1
success.append(movie)
continue
else:
yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'reason': response['error']})
progress += 1
continue
fake_results = self.score.score(fake_results, imported=True)
for i in success:
score = None
for r in fake_results:
if r['imdbid'] == i['imdbid']:
score = r['score']
break
if score:
self.sql.update('MOVIES', 'finished_score', score, 'imdbid', i['imdbid'])
self.sql.write_search_results(fake_results)
import_kodi._cp_config = {'response.stream': True}
@cherrypy.expose
def get_plex_libraries(self, server, username, password):
if core.CONFIG['External']['plex_tokens'].get(server) is None:
token = library.ImportPlexLibrary.get_token(username, password)
if token is None:
return json.dumps({'response': False, 'error': 'Unable to get Plex token.'})
else:
core.CONFIG['External']['plex_tokens'][server] = token
self.config.dump(core.CONFIG)
else:
token = core.CONFIG['External']['plex_tokens'][server]
return json.dumps(library.ImportPlexLibrary.get_libraries(server, token))
@cherrypy.expose
def upload_plex_csv(self, file_input):
try:
csv_text = file_input.file.read().decode('utf-8')
file_input.file.close()
except Exception as e: #noqa
print(e)
return
if csv_text:
return json.dumps(library.ImportPlexLibrary.read_csv(csv_text))
return
@cherrypy.expose
def import_plex_csv(self, movie_data, corrected_movies):
''' Imports list of movies genrated by csv import
movie_data: list of dicts of movie info ready to import
corrected_movies: list of dicts of user-corrected movie info
Iterates through corrected_movies and attmpts to get metadata again if required.
If imported, generates and stores fake search result.
Creates dict {'success': [], 'failed': []} and
appends movie data to the appropriate list.
Yeilds generator object of json objects
'''
movie_data = json.loads(movie_data)
corrected_movies = json.loads(corrected_movies)
fake_results = []
success = []
length = len(movie_data) + len(corrected_movies)
progress = 1
if corrected_movies:
for data in corrected_movies:
tmdbdata = self.tmdb._search_imdbid(data['imdbid'])[0]
if tmdbdata:
data['year'] = tmdbdata['release_date'][:4]
data.update(tmdbdata)
movie_data.append(data)
else:
logging.error('Unable to find {} on TMDB.'.format(data['imdbid']))
yield json.dumps({'response': False, 'movie': data, 'progress': [progress, length], 'reason': 'Unable to find {} on TMDB.'.format(data['imdbid'])})
progress += 1
for movie in movie_data:
if movie['imdbid']:
movie['status'] = 'Disabled'
tmdb_data = self.tmdb._search_imdbid(movie['imdbid'])[0]
movie.update(tmdb_data)
response = json.loads(self.add_wanted_movie(json.dumps(movie)))
if response['response'] is True:
fake_results.append(searchresults.generate_simulacrum(movie))
yield json.dumps({'response': True, 'progress': [progress, length], 'movie': movie})
progress += 1
success.append(movie)
continue
else:
yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'reason': response['error']})
progress += 1
continue
else:
logging.error('Unable to find {} on TMDB.'.format(movie['imdbid']))
yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'reason': 'IMDB ID invalid or missing.'})
progress += 1
fake_results = self.score.score(fake_results, imported=True)
for i in success:
score = None
for r in fake_results:
if r['imdbid'] == i['imdbid']:
score = r['score']
break
if score:
self.sql.update('MOVIES', 'finished_score', score, 'imdbid', i['imdbid'])
self.sql.write_search_results(fake_results)
import_dir._cp_config = {'response.stream': True}
@cherrypy.expose
def get_cp_movies(self, url, apikey):
url = '{}/api/{}/movie.list/'.format(url, apikey)
return json.dumps(library.ImportCPLibrary.get_movies(url))
@cherrypy.expose
def import_cp_movies(self, wanted, finished):
wanted = json.loads(wanted)
finished = json.loads(finished)
fake_results = []
success = []
length = len(wanted) + len(finished)
progress = 1
for movie in wanted:
response = json.loads(self.add_wanted_movie(json.dumps(movie), full_metadata=True))
if response['response'] is True:
yield json.dumps({'response': True, 'progress': [progress, length], 'movie': movie})
progress += 1
continue
else:
yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'reason': response['error']})
progress += 1
continue
for movie in finished:
response = json.loads(self.add_wanted_movie(json.dumps(movie), full_metadata=True))
if response['response'] is True:
fake_results.append(searchresults.generate_simulacrum(movie))
yield json.dumps({'response': True, 'progress': [progress, length], 'movie': movie})
progress += 1
success.append(movie)
continue
else:
yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'reason': response['error']})
progress += 1
continue
fake_results = self.score.score(fake_results, imported=True)
for i in success:
score = None
for r in fake_results:
if r['imdbid'] == i['imdbid']:
score = r['score']
break
if score:
self.sql.update('MOVIES', 'finished_score', score, 'imdbid', i['imdbid'])
self.sql.write_search_results(fake_results)
import_cp_movies._cp_config = {'response.stream': True}
| [
"nosmokingbandit@gmail.com"
] | nosmokingbandit@gmail.com |
1693648b68f78fb99cb31510d45a2e773805ba5e | 76bc53eb4a31c2de1ffe47fc960d78ba2ac16c58 | /src/qdmdealer/funcs/funcs.py | 8f61ca032b40e65e80d931b2095642dd4878c325 | [
"MIT"
] | permissive | CaoRX/sdd | 83b7327b1e0f54cdb3398e8e03f49c906362e68b | 4aa6612a195f579d6bef26a3e5cd9f1ce2c66e7c | refs/heads/main | 2023-08-18T20:37:16.144714 | 2021-09-30T23:09:05 | 2021-09-30T23:09:05 | 412,092,536 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,056 | py | import numpy as np
import string
import random
import matplotlib.transforms as mplTransforms
import math
import os
from copy import deepcopy
def errorMessage(message, loc = None):
if loc is None:
return 'Error: {}.'.format(message)
else:
return 'Error in {}: {}.'.format(loc, message)
def warningMessage(message, loc = None):
if loc is None:
return 'Warning: {}.'.format(message)
else:
return 'Warning in {}: {}.'.format(loc, message)
def npzFileExist(seed, port):
return False
def sizeOfShape(shape):
res = 1
for x in shape:
res *= x
return res
def loadNpzObject(obj):
if not isinstance(obj, np.ndarray):
return obj
if len(obj.shape) > 0:
return obj
resObj = obj.item()
if isinstance(resObj, dict):
for key in resObj:
resObj[key] = loadNpzObject(resObj[key])
return resObj
def loadNpzFile(fileName):
if not os.path.isfile(fileName):
return dict()
# print('loading {}'.format(fileName))
data = np.load(fileName, allow_pickle = True)
# print(fileName)
# data = np.load(fileName)
# print('npz keys = {}'.format(list(data.keys())))
res = dict([])
# print(data)
# for key in data:
# # print(key, data[key])
# print(key)
for key in data:
# if (key == 'FP'):
# continue
# print(key)
res[key] = loadNpzObject(data[key])
# print(res)
return res
def binderCumulant(orderValue):
return 1.0 - (orderValue['4'] / (3.0 * (orderValue['2'] ** 2)))
def binderSeries(x):
return np.average(x ** 4) / (np.average(x ** 2) ** 2)
def binderError(orderValue, orderError):
# print('value = {}, error = {}'.format(orderValue, orderError))
error2 = orderError['2'] / orderValue['2']
error4 = orderError['4'] / orderValue['4']
divValue = (orderValue['4'] / (3.0 * (orderValue['2'] ** 2)))
errorDiv = (error2 * 2 + error4) * divValue
return errorDiv
def binderErrorByOP(op2, op4):
error2 = op2['error'] / op2['value']
error4 = op4['error'] / op4['value']
divValue = (op4['value'] / (3.0 * (op2['value'] ** 2)))
errorDiv = (error2 * 2 + error4) * divValue
return errorDiv
def getRawDataFolder(seed):
return '../../build/data/{}/'.format(seed)
def randomString(n = 10):
return ''.join(random.choice(string.ascii_letters) for i in range(n))
def getNewString(knownSet):
res = randomString()
while (res in knownSet):
res = randomString()
return res
def rangeFilter(key, low, high):
return lambda x, y: (key in x) and ((x[key] > low) and (x[key] < high))
def setFilter(key, valueSet):
# print(valueSet)
valueSet = set(valueSet)
return lambda x, y: (key in x) and (x[key] in valueSet)
def tupleFilter(key, valueSet):
def inSet(x):
x = tuple(x)
print('wn = {}'.format(x))
# print(tuple(x), valueSet)
for value in valueSet:
if (value == x):
return True
return False
return lambda x, y: (key in x) and (inSet(x[key]))
def inclusiveFilter(key, filter):
return lambda x, y: (not (key in x)) or filter(x, y)
def doubleSetFilter(key, valueSet, eps = 1e-5):
def inSet(x):
for value in valueSet:
if (np.abs(x - value) < eps):
return True
return False
return lambda x, y: ((key in x) and (inSet(x[key]))) or ((not (key in x)) and (inSet(0.0)))
def notContainOrZeroFilter(key, eps = 1e-5):
return lambda x, y: (key not in x) or (abs(x[key]) < eps)
def inverseFilter(filter):
return lambda x, y: not filter(x, y)
def flagFilter(key, val):
if (val == 0):
return notContainOrZeroFilter(key)
else:
return inverseFilter(notContainOrZeroFilter(key))
def addrFilter(seed = None, port = None):
if (seed is not None):
seed = str(seed)
if (port is not None):
port = str(port)
print('seed = {}, port = {}'.format(seed, port))
def filter(data, addr):
return ((seed is None) or (addr['seed'] == seed)) and ((port is None) or (addr['port'] == port))
return filter
def getTimeStamp(seed):
loc = seed.find('-')
if (loc == -1):
return int(seed)
else:
return int(seed[:loc])
def timeStampFilter(seed, maxSeed = None):
print('applying time stamp filter after time {}'.format(seed))
def filter(data, addr):
# print('data = {}, addr = {}'.format(data, addr))
if (maxSeed is None):
if (seed is None):
return True
else:
return (getTimeStamp(addr['seed']) > seed)
elif (seed is None):
return getTimeStamp(addr['seed'] < maxSeed)
else:
return (getTimeStamp(addr['seed']) > seed) and (getTimeStamp(addr['seed']) < maxSeed)
return filter
def floatEqual(a, b, eps = 1e-7):
return (np.abs(a - b) < eps)
def floatValueFilter(key, value):
return lambda x, y: (key in x) and (floatEqual(x[key], value))
def describerEqual(desc1, desc2):
# describer has N, L, V, T four arguments
# N should not be considered
# intPartEqual = (desc1['L'] == desc2['L'])
# floatPartEqual = (floatEqual(desc1['T'], desc2['T']) and floatEqual(desc1['V'], desc2['V']))
# m1 = 0.0
# m2 = 0.0
# if ('m' in desc1):
# m1 = desc1['m']
# if ('m' in desc2):
# m2 = desc2['m']
# # mEqual = (('m' not in desc1) and ('m' not in desc2)) or (floatEqual(desc1['m'], desc2['m']))
# mEqual = floatEqual(m1, m2)
# return (intPartEqual and floatPartEqual and mEqual)
intParaSet = ['L', 'wf']
floatParaSet = ['T', 'V', 'm', 'b', 'mu']
intEqualFlag = True
for intPara in intParaSet:
val1 = 0
val2 = 0
if (intPara in desc1):
val1 = desc1[intPara]
if (intPara in desc2):
val2 = desc2[intPara]
intEqualFlag = intEqualFlag and (val1 == val2)
if (not intEqualFlag):
return False
floatEqualFlag = True
for floatPara in floatParaSet:
val1 = 0.0
val2 = 0.0
if (floatPara in desc1):
val1 = desc1[floatPara]
if (floatPara in desc2):
val2 = desc2[floatPara]
floatEqualFlag = floatEqualFlag and floatEqual(val1, val2)
if (not floatEqualFlag):
return False
return True
def describerEqualKeys(desc1, desc2, keys):
res = True
if ('T' in keys):
res = (res and (floatEqual(desc1['T'], desc2['T'])))
if ('V' in keys):
res = (res and (floatEqual(desc1['V'], desc2['V'])))
if ('L' in keys):
res = (res and (desc1['L'] == desc2['L']))
return res
def describerToStr(describer, legendShown = None):
# print('describer = {}'.format(describer))
# print(legendShown)
floatKeySet = ['T', 'V', 'm']
keySet = list(describer.keys())
if (legendShown is not None):
keySet = legendShown
res = ""
for key in keySet:
if (key in floatKeySet):
if (key == 'm') and (np.abs(describer[key]) < 1e-5):
continue
# print('key = {}'.format(key))
keyPart = '{} = '.format(key)
valuePart = '{:.2f}, '.format(describer[key]).rstrip('0')
res += keyPart
res += valuePart
else:
res += '{} = {}, '.format(key, describer[key])
# print('res = {}'.format(res))
return res[:-2] # remove the last comma
def polishDescriber(describer):
floatKeySet = ['T', 'V']
res = dict([])
for key in describer:
if (key in floatKeySet):
res[key] = '{:.3f}'.format(describer[key]).rstrip('0')
else:
res[key] = describer[key]
return res
def sortListBy(keyList, dataList):
resList = [(key, data) for key, data in zip(keyList, dataList)]
resList = sorted(resList, key = lambda x: x[0])
resKeyList = [x[0] for x in resList]
resDataList = [x[1] for x in resList]
return resKeyList, resDataList
def decodeLogLine(log):
log = log.strip()
sharpIdx = log.find('#')
if (sharpIdx == 0):
return False, '', ''
equalIdx = log.find('=')
if (equalIdx == -1):
return False, '', ''
left = log[:equalIdx].strip()
right = log[(equalIdx + 1):].strip()
return True, left, right
def parseInt(s):
s = s.strip()
intS = ''
digits = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
for i in range(len(s)):
if (s[i] in digits):
intS += s[i]
else:
break
return int(intS)
def makeNormalFunc(average, error):
def normal():
return average + error * np.random.randn()
return normal
def identityFunc(x):
return x
def decodeFolder(setSeed):
pLoc = setSeed.find('-')
if (pLoc == -1):
setNo = -1
seed = int(setSeed)
else:
setNo = int(setSeed[:pLoc])
seed = int(setSeed[(pLoc + 1):])
return setNo, seed
def encodeFolder(setNo, seedNo):
if (setNo == -1):
return '{}'.format(seedNo)
else:
return '{}-{}'.format(setNo, seedNo)
def getIntHistogram(data):
print(data)
data = [np.int(d) for d in data]
maxV = np.max(data)
res = np.zeros(maxV + 1)
for d in data:
res[d] += 1
return res
def histogram2D(x):
d0 = np.array([np.int(a) for a in x[:, 0]])
d1 = np.array([np.int(a) for a in x[:, 1]])
max0 = np.max(d0)
max1 = np.max(d1)
sets = []
for i in range(max0 + 1):
sets.append([])
for _ in range(max1 + 1):
sets[i].append(0)
for i in range(len(x)):
sets[d0[i]][d1[i]] += 1
return np.array(sets)
def discreteHistogram(x):
# input: x[:, 0], x[:, 1]
d0 = np.array([np.int(a) for a in x[:, 0]])
d1 = np.array([np.int(a) for a in x[:, 1]])
max0 = np.max(d0)
max1 = np.max(d1)
sets = []
for i in range(max0 + 1):
sets.append([])
for _ in range(max1 + 1):
sets[i].append(0)
for i in range(len(x)):
sets[d0[i]][d1[i]] += 1
xAxis = []
yAxis = []
for i in range(max0 + 1):
for j in range(max1 + 1):
if (sets[i][j] == 0):
continue
distance = np.sqrt(i * i + j * j)
flag = False
for k in range(len(xAxis)):
if (floatEqual(distance, xAxis[k])):
yAxis[k] += sets[i][j]
flag = True
break
if (not flag):
xAxis.append(distance)
yAxis.append(sets[i][j])
xAxis, yAxis = sortListBy(xAxis, yAxis)
return np.array(xAxis), np.array(yAxis)
def combineHistogram(h1, h2):
x1 = h1[0]
y1 = h1[1]
x2 = h2[0]
y2 = h2[1]
resX = list(np.copy(x1))
resY = list(np.copy(y1))
# print('resX = {}, resY = {}'.format(resX, resY))
# print('x2 = {}, y2 = {}'.format(x2, y2))
for i in range(len(x2)):
flag = False
for j in range(len(resX)):
if (floatEqual(x2[i], resX[j])):
resY[j] += y2[i]
flag = True
if (not flag):
resX.append(x2[i])
resY.append(y2[i])
resX, resY = sortListBy(resX, resY)
return np.array([resX, resY])
def combine2DHistogram(a1, a2):
if (a1.shape == a2.shape):
return a1 + a2
h1, w1 = a1.shape
h2, w2 = a2.shape
maxH = np.max([h1, h2])
maxW = np.max([w1, w2])
res = np.zeros((maxH, maxW))
res[:h1, :w1] += a1
res[:h2, :w2] += a2
return res
def log2DHistogram(a, lims = None):
if (lims is None):
aa = np.zeros(a.shape)
else:
aShape = a.shape
limX = max([lims, aShape[0]])
limY = max([lims, aShape[1]])
aa = np.zeros((limX, limY))
for i in range(len(a)):
for j in range(len(a[0])):
if (a[i][j] > 0):
aa[i][j] = np.log(a[i][j])
return aa
def combine2DHistogramList(hList):
# for h in hList:
# print(h.shape)
res = hList[0]
for i in range(1, len(hList)):
res = combine2DHistogram(res, hList[i])
return res
def combineHistogramList(hList):
res = hList[0]
for i in range(1, len(hList)):
res = combineHistogram(res, hList[i])
histList = []
for i in range(len(res[0])):
histList.append([])
for hL in hList:
for j in range(len(res[0])):
flag = False
for i in range(len(hL[0])):
if (floatEqual(hL[0][i], res[0][j])):
histList[j].append(hL[1][i])
flag = True
if (not flag):
histList[j].append(0)
for i in range(len(histList)):
histList[i] = np.array(histList[i])
# print(histList)
# print(hList)
return res, histList
def setErrorBar(x):
return np.std(x) / np.sqrt(len(x) - 1)
def getkbr(x, y):
axx = np.average(x ** 2)
ayy = np.average(y ** 2)
axy = np.average(x * y)
ax = np.average(x)
ay = np.average(y)
k = (axy - ax * ay) / (axx - ax ** 2)
b = ay - ax * k
r = (axy - ax * ay) / np.sqrt((axx - ax ** 2) * (ayy - ay * ay))
return k, b, r
def analyzeHistogram(x, y, label):
n = len(x) // 2
xFrag = np.array(x[n:])
yFrag = np.log(np.array(y[n:]))
# xFrag = np.log(np.array(x[1:n]))
# # yFrag = np.log(np.array(y[1:n]))
# yFrag = np.array(y[1:n])
k, b, r = getkbr(xFrag, yFrag)
print('{}: k = {}, b = {}, r = {}'.format(label, k, b, r))
return k
def maximumGroup(groups, excep = None):
maxLength = 0
res = []
maxKey = None
for key in groups:
if (key == excep):
continue
if (len(groups[key]) > maxLength):
maxLength = len(groups[key])
res = groups[key]
maxKey = key
return maxKey, res
def dealCheckerBoard(x):
resX = []
resY = []
for i in range(len(x)):
if (i % 2 == 1):
resX.append(i)
resY.append(x[i])
return {'x': resX, 'y': resY}
def getFitting(data):
x = np.array(data['x'])
y = np.array(data['y'])
if (len(x) < 8):
print('data not enough for fitting, stop.')
return None
print('data length = {}'.format(len(x)))
# startIdx = int(len(x) * 0.25)
# endIdx = int(len(x) * 0.75)
startIdx = 2
endIdx = 8
xFit = np.log(x[startIdx : endIdx])
yFit = y[startIdx : endIdx]
k, b, r = getkbr(xFit, yFit)
return {'k': k, 'b': b, 'r': r}
# return
def isTimeObsName(dataName):
return (dataName.find('time') != -1)
def getList(x):
if (isinstance(x, int) or isinstance(x, float)):
return [x]
else:
return x
def getSingle(x):
if (isinstance(x, int) or isinstance(x, float)):
return x
else:
return x[0]
def makeFitting(x, y):
# print('x = {}, y = {}'.format(x, y))
x = np.array(x)
y = np.array(y)
axx = np.average(x * x)
axy = np.average(x * y)
ayy = np.average(y * y)
ax = np.average(x)
ay = np.average(y)
k = (axy - ax * ay) / (axx - ax * ax)
b = ay - k * ax
r = (axy - ax * ay) / np.sqrt((axx - ax * ax) * (ayy - ay * ay))
return k, b, r
def keyCombine(key1, val1, key2, val2, funcs):
key1, val1 = sortListBy(key1, val1)
key2, val2 = sortListBy(key2, val2)
cur1 = 0
cur2 = 0
resKey = []
resVal = []
while (cur1 < len(key1)) and (cur2 < len(key2)):
if (floatEqual(key1[cur1], key2[cur2])):
resKey.append(key1[cur1])
resVal.append(funcs(val1[cur1], val2[cur2]))
cur1 += 1
cur2 += 1
continue
if (key1[cur1] < key2[cur2]):
cur1 += 1
else:
cur2 += 1
return resKey, resVal
def getTotalSize(sizeTuple):
res = 1
for x in sizeTuple:
res *= x
return res
def decodeSnapshot(snapshot, totalBondN, snapshotBit):
res = []
bondCount = 0
while (bondCount < totalBondN):
for x in snapshot:
for bit in range(snapshotBit):
resBit = (x >> bit) & 1
if (resBit == 1):
res.append(True)
else:
res.append(False)
bondCount += 1
if (bondCount >= totalBondN):
return res
return res
def decode2DDimer(idx, h, w):
direct = idx // (h * w)
x = (idx - direct * h * w) // w
y = idx % w
return (direct, x, y)
def decode2DCorr(x, L):
# xx = (x - (-0.0625)) / 0.25
xx = x
return np.reshape(xx[:(L * L)], (L, L)), np.reshape(xx[(L * L):], (L, L))
def zipLists(*args):
assert (len(args) > 0), "Error: length of args must be positive for zipLists(*args)."
if (len(args) == 1):
return [(x, ) for x in args[0]]
else:
partRes = zipLists(*args[1:])
res = []
for x in args[0]:
for y in partRes:
res.append((x, *y))
return res
def generateList(x, type):
if (isinstance(x, type)):
return [x]
else:
return x
def adjustAxixRange(ax, xRange = 1.0, yRange = 1.0):
pos = ax.get_position()
x0, x1, y0, y1 = pos.x0, pos.x1, pos.y0, pos.y1
newX1 = x0 + (x1 - x0) * xRange
newY1 = y0 + (y1 - y0) * yRange
ax.set_position(mplTransforms.Bbox([[x0, y0], [newX1, newY1]]))
def setNewAxisAtRight(fig, ax, xRange = 0.90, yScale = 0.5):
pos = ax.get_position()
x0, x1, y0, y1 = pos.x0, pos.x1, pos.y0, pos.y1
# print(x0, x1, y0, y1)
newX1 = x0 + (x1 - x0) * xRange
# return fig.add_axes([newX1, y0, x1 - newX1, y1 - y0])
# return fig.add_axes([newX1, x1 - newX1, y0, y1 - y0])
newAx = fig.add_axes([newX1, y0, x1 - newX1, y1 - y0])
# newAx.set_position(mplTransforms.Bbox([[x0, y0], [newX1, y1]]))
return newAx
def addColorBar(ax, fig, im, adjustRange = 0.85, colorBarRange = 0.90):
cax = setNewAxisAtRight(fig, ax, xRange = colorBarRange)
adjustAxixRange(ax, xRange = adjustRange)
fig.colorbar(im, cax = cax)
def normalizeArray(x, errorBar = None):
xArray = np.array(x)
lowV = np.min(xArray)
highV = np.max(xArray)
resX = ((xArray - lowV) / (highV - lowV)) * 2.0 - 1.0
if (errorBar is not None):
errorBar = np.array(errorBar) * 2.0 / (highV - lowV)
return resX, errorBar
else:
return resX
def floorInt(x, eps = 1e-8):
return math.floor(x + eps)
def flipAppend(x):
return np.array(list(x) + list(-x))
def resortMVariables(x):
return np.array([x[1], -x[0], x[3], -x[2]])
def getM0011(x):
return x[0] - x[1] - x[2] + x[3], x[0] + x[1] - x[2] - x[3]
def makeHist(lim, bins, x):
data, _, _ = np.histogram2d(x = x[:, 0], y = x[:, 1], bins = bins, range = lim)
return {'lim0': np.array([lim[0][0], lim[1][0]]), 'lim1': np.array([lim[0][1], lim[1][1]]), 'steps': np.array(bins), 'data': data.flatten()}
# return {'lim0': self.data[dataName + ' bins begin'], 'lim1': self.data[dataName + ' bins end'], 'steps': self.data[dataName + ' bins'], 'data': self.data[dataName + ' bins data']}
def weightedBinder(x, weights, dim = 2):
xArray = np.array(x)
weightArray = np.array(weights)
x2 = np.sum((xArray ** 2) * weightArray)
x4 = np.sum((xArray ** 4) * weightArray)
if (dim == 0):
return x4 / (x2 ** 2)
else:
return 1.0 - (x4 / (x2 * x2 * dim))
def binderPreparation(psiX, psiY):
# first consider theta series
# theta = np.arctan2(psiX, psiY)
# cos2Theta = np.cos(2 * theta)
# cos(2theta) = cos^2(theta) - sin^2(theta) = (psiX^2 - psiY^2) / |psi|^2
cos2Theta = ((psiX ** 2) - (psiY ** 2)) / (psiX ** 2 + psiY ** 2)
atanhCos2Theta = np.arctanh(cos2Theta)
atanAtanhCos2Theta = (2.0 / np.pi) * np.arctan((2.0 / np.pi) * atanhCos2Theta)
# print(psiX, psiY, atanhCos2Theta, atanAtanhCos2Theta)
return atanhCos2Theta, atanAtanhCos2Theta
def floatAllTheSame(l):
if len(l) == 0:
return True
v = l[0]
for vv in l:
if not floatEqual(v, vv):
return False
return True
def anyFilter(filters):
def filter(x):
for f in filters:
if f(x):
return True
return False
return filter
def allFilter(filters):
def filter(x):
for f in filters:
if not f(x):
return False
return True
return filter
def makeFilter(filter, key, exclude):
if exclude:
return lambda x: (key in x) and filter(x)
else:
return lambda x: (key not in x) or filter(x)
def singleTypeName(typeName):
if (typeName == 'value') or (typeName == 'set'):
return 'value'
else:
return 'fvalue'
def toBool(s):
assert s in ["True", "False"], errorMessage('only True and False can be transferred to bool, {} obtained.'.format(s), loc = 'qdmdealer.funcs.funcs.toBool')
if s == 'True':
return True
else:
return False
def dictEqual(a, b, eps = 1e-7):
aKeys = sorted(list(a.keys()))
bKeys = sorted(list(b.keys()))
if aKeys != bKeys:
return False
for key in aKeys:
if isinstance(a[key], float):
if not floatEqual(a[key], b[key], eps):
return False
else:
if not (a[key] == b[key]):
return False
return True
def dictExcept(d, outKeys):
res = dict()
for key in d:
if not (key in outKeys):
res[key] = deepcopy(d[key])
return res
def makeLabel(d):
keys = sorted(d.keys())
return ', '.join('{} = {}'.format(key, d[key]) for key in keys)
def isAllNone(l):
for x in l:
if x is not None:
return False
return True
def accumulate(a, b):
if a is None:
return b
else:
return a + b | [
"umi@pku.edu.cn"
] | umi@pku.edu.cn |
22e5796ee03a7f306b39e2dc6a0b0794f1c1080e | a1d7c3ea902aa5f2860bd2b9598f65ada73671b7 | /Potentiometer.py | 4ca39069fda671a6698f0bcdd518879e97293f37 | [] | no_license | CytronTH/maker-pi-pico | bf06ccaeeb8424b6994e557c42aa5aafa7332503 | 903b595173bbaa3b4da0ee02772a6fd87658c769 | refs/heads/main | 2023-07-29T09:47:29.919243 | 2021-09-06T07:39:35 | 2021-09-06T07:39:35 | 399,522,213 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | import machine
import time
potentiometer = machine.ADC(27) # กำหนดใช้งาน Pin ที่ 27 เป็น Analog Output
def convert(x, in_min, in_max, out_min, out_max):
return (x - in_min) * (out_max - out_min) // (in_max - in_min) + out_min
while True:
x = potentiometer.read_u16()
y = convert(x,272,65535,0,28)
for z in range(y):
machine.Pin(z).value(1)
time.sleep_ms(50) | [
"th.support@cytron.io"
] | th.support@cytron.io |
4d2410e25561f2533e66d52d636baa56e0afd35f | 0fccee4c738449f5e0a8f52ea5acabf51db0e910 | /genfragments/ThirteenTeV/Hadronizer/Hadronizer_TuneCUETP8M1_13TeV_MLM_5f_max4j_qCut20_LHE_pythia8_cff.py | cfe434b64d6fbd2e269942ca29b5e7b694b91261 | [] | no_license | cms-sw/genproductions | f308ffaf3586c19b29853db40e6d662e937940ff | dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4 | refs/heads/master | 2023-08-30T17:26:02.581596 | 2023-08-29T14:53:43 | 2023-08-29T14:53:43 | 11,424,867 | 69 | 987 | null | 2023-09-14T12:41:28 | 2013-07-15T14:18:33 | Python | UTF-8 | Python | false | false | 1,538 | py | import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8HadronizerFilter",
maxEventsToPrint = cms.untracked.int32(1),
pythiaPylistVerbosity = cms.untracked.int32(1),
filterEfficiency = cms.untracked.double(1.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
comEnergy = cms.double(13000.),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
'JetMatching:setMad = off',
'JetMatching:scheme = 1',
'JetMatching:merge = on',
'JetMatching:jetAlgorithm = 2',
'JetMatching:etaJetMax = 5.',
'JetMatching:coneRadius = 1.',
'JetMatching:slowJetPower = 1',
'JetMatching:qCut = 20.', #this is the actual merging scale
'JetMatching:nQmatch = 5', #4 corresponds to 4-flavour scheme (no matching of b-quarks), 5 for 5-flavour scheme
'JetMatching:nJetMax = 4', #number of partons in born matrix element for highest multiplicity
'JetMatching:doShowerKt = off', #off for MLM matching, turn on for shower-kT matching
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters',
)
)
)
| [
"YoungDo.Oh@cern.ch"
] | YoungDo.Oh@cern.ch |
cff553459a9e293fc45181572d58c0948c7b2fb5 | d6202e2fff0f0b22094a8bc383c3744cdcda6000 | /doc/gaussian_worker.py | 8947faa117d156fa87ff8bfc2d62fbcee2ef81ee | [
"MIT"
] | permissive | pstjohn/bde | dc8e639527d281dade935141b06fbedc5958e4c8 | 5677af8dcbb992c7888746aa018302e6fb04e67d | refs/heads/master | 2022-07-16T02:17:59.151174 | 2022-06-30T19:52:01 | 2022-06-30T19:52:01 | 168,446,254 | 27 | 9 | MIT | 2021-09-07T16:20:45 | 2019-01-31T02:00:54 | Python | UTF-8 | Python | false | false | 2,626 | py | import psycopg2
import time
import logging
import random
import subprocess
import socket
dbparams = {
# In this example file, the database connection parameters (server, password, etc),
# has been removed. This file is mainly to show an example of how a SQL database
# was used to queue and dispatch Gaussian calculations.
}
from bde.gaussian import GaussianRunner
def run_optimization():
with psycopg2.connect(**dbparams) as conn:
with conn.cursor() as cur:
cur.execute("""
WITH cte AS (
SELECT id, smiles, type
FROM compound
WHERE status = 'not started'
ORDER BY id
LIMIT 1
FOR UPDATE
)
UPDATE compound SET status = 'in progress',
queued_at = CURRENT_TIMESTAMP,
node = %s
FROM cte
WHERE compound.id = cte.id
RETURNING compound.id, compound.smiles, compound.type;
""", (socket.gethostname(),))
cid, smiles, type_ = cur.fetchone()
conn.close()
try:
runner = GaussianRunner(smiles, cid, type_)
molstr, enthalpy, freeenergy, scfenergy, log = runner.process()
with psycopg2.connect(**dbparams) as conn:
with conn.cursor() as cur:
cur.execute("""
UPDATE compound
SET status = 'finished',
mol = %s, enthalpy = %s,
freeenergy = %s, scfenergy= %s,
run_at = CURRENT_TIMESTAMP,
logfile = %s
WHERE id = %s;""",
(molstr, enthalpy, freeenergy, scfenergy, log, cid))
conn.close()
except Exception as ex:
with psycopg2.connect(**dbparams) as conn:
with conn.cursor() as cur:
cur.execute("""
UPDATE compound
SET status = 'error',
error = %s,
run_at = CURRENT_TIMESTAMP
WHERE id = %s;""", (str(ex), cid))
conn.close()
return cid
if __name__ == "__main__":
start_time = time.time()
# Add a random delay to avoid race conditions at the start of the job
time.sleep(random.uniform(0, 1*60))
while (time.time() - start_time) < (86400 * 9): # Time in days
try:
run_optimization()
except psycopg2.OperationalError:
time.sleep(5 + random.uniform(0, 60))
| [
"peterc.stjohn@gmail.com"
] | peterc.stjohn@gmail.com |
ab4cc2ced9b88c86db0f85086bcd798fda618f3f | a64d5e89a841c881ae9f593035755277472239c5 | /code/code_history/CIFAR_training.py | e671fe35adea264c36a253ab0078bc62c9b0a7a3 | [] | no_license | swu32/Spatial_Vision_Net | 4070a7a9acc13b4a8fe141d7615231d842f0e545 | cf41e52875b7504b5d22ee199f29ecdfa0599265 | refs/heads/master | 2022-05-12T20:41:22.690234 | 2019-11-08T14:25:45 | 2019-11-08T14:25:45 | 220,479,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,651 | py | '''training function that merges previous repeated codes'''
import argparse
import os
import random
import shutil
import time
import warnings
import sys
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim as optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision
import model as models
this_net = "SV_net_II_low_frequency"
# this_net “baseline_net”,"SV_net_I","SV_net_I_low_frequency","SV_net_II","SV_net_II_low_frequency"
'''
all the net architecture to choose from:
"baseline_net": a resnet18 implemented on CIFAR10
correspond to.
CIFAR10_baseline_model_best.pth &
CIFAR10_baseline_checkpoint.pth
"SV_net_I: first version of spatial vision net, with Spatial vision part as front end and resnet18 as backend,
correspond to
CIFAR10_normalization_model_best.pth.tar, &
CIFAR10_normalization_checkpoint.pth.tar"
"SV_net_I_low_frequency": same thing with SV_net_I, but employing only the lower half of the frequency filters,
correspond to
CIFAR10_low_freq_model_best.pth &
CIFAR10_low_freq_checkpoint.pth &
"SV_net_II": A simplified and a more updated version of SV_net_I. The spatial vision frontend has filter responses
separated between positive and negative, and the spatial vision backend has similar structure instead of Resnet18, for
the purpose of overcoming the overfitting behavior of the resnet18 backend.
SV_net_II_model_best.pth.tar &
SV_net_II_model_checkpoint.pth.tar
"SV_net_II_low_frequency":
SV_net_II_low_frequency_model_best.pth.tar &
SV_net_II_low_frequency_model_checkpoint.pth.tar
'''
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet18)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=80, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=4, type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=0.001, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=1, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
best_acc1 = 0
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
args.world_size = 1
ngpus_per_node = torch.cuda.device_count()
# Simply call main_worker function
main_worker(args)
def main_worker(args):
global best_acc1
# create model
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](pretrained=True)
else:
print("=> creating model '{}'".format(args.arch))
print('batch size is ',args.batch_size)
if this_net == "baseline_net":
print('Employing Vanilla ResNet')
model = models.resnet18(num_classes=10)
elif this_net == "SV_net_I":
print('Employing the first version of Spatial Vision Net')
model = models.v1resnet18(batchsize = args.batch_size, n_freq = 12, n_orient = 8, n_phase = 2, imsize = 32,num_classes=10)
elif this_net == "SV_net_I_low_frequency":
print('Employing the low frequency version of Spatial Vision Net')
model = models.low_freq_resnet18(batchsize = args.batch_size, n_freq = 6, n_orient = 8, n_phase = 2, imsize = 32,num_classes=10)
elif this_net == "SV_net_II":
print('Employing simple net')
model = models.simple_net(batchsize = args.batch_size, n_freq = 12, n_orient = 8, n_phase = 2, imsize = 32,num_classes=10)
elif this_net == "SV_net_II_low_frequency":
print('Employing simple net with low frequency')
model = models.low_freq_simple_net(batchsize = args.batch_size, n_freq = 12, n_orient = 8, n_phase = 2, imsize = 32,num_classes=10)
record_file_name = 'performance_record'+ this_net + '0621.npy'
# this_net “baseline_net”,"SV_net_I","SV_net_I_low_frequency","SV_net_II"
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# model.to(device)
model = torch.nn.DataParallel(model).to(device)
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().to(device)
# optimizer = torch.optim.SGD(model.parameters(), args.lr,
# momentum=args.momentum,
# weight_decay=args.weight_decay)
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['accuracy']
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# best_acc1 may be from a checkpoint from a different GPU
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# """TODO: add gray values"""
if this_net == "baseline_net":
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
# transform = transforms.Compose(
# [transforms.ToTensor(),
# transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5))])
else:
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.Grayscale(),
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.2,)),
])
transform_test = transforms.Compose([
transforms.Grayscale(),
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.2,))
])
# transform = transforms.Compose(
# [transforms.Grayscale(),transforms.ToTensor(),
# transforms.Normalize((0.5,), (0.2,))])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=False, transform=transform_train)
train_loader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size,
shuffle=False, num_workers=2)
val_set = torchvision.datasets.CIFAR10(root='./data', train=False,
download=False, transform=transform_test)
val_loader = torch.utils.data.DataLoader(val_set, batch_size=args.batch_size,
shuffle=False, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
if args.evaluate:
validate(val_loader, model, criterion, args)
return
if args.start_epoch== 0: # initiate training
performance_record = {'epoch': [], 'train_acc': [], 'test_acc': []}
else:
performance_record = np.load(record_file_name).item()
for epoch in range(args.start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch, args)
# train for one epoch
acc1_train = train(train_loader, model, criterion, optimizer, epoch, args)
# evaluate on validation set
acc1 = validate(val_loader, model, criterion, args)
performance_record['epoch'].append(epoch)
performance_record['train_acc'].append(acc1_train)
performance_record['test_acc'].append(acc1)
# remember best acc1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
np.save(record_file_name, performance_record)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'accuracy': best_acc1,
'optimizer' : optimizer.state_dict(),
}, is_best)
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(len(train_loader), batch_time, data_time, losses, top1,
top5, prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
# TODO:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# input = input.cuda(args.gpu, non_blocking=True)
# target = target.cuda(args.gpu, non_blocking=True)
input = input.to(device).contiguous()
target = target.to(device)
# compute output
#print(input.dtype)
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.print(i)
return top1.avg
def validate(val_loader, model, criterion, args):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(len(val_loader), batch_time, losses, top1, top5,
prefix='Test: ')
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(val_loader):
# TODO:
# input = input.cuda(args.gpu, non_blocking=True)
# target = target.cuda(args.gpu, non_blocking=True)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# input = input.cuda(args.gpu, non_blocking=True)
# target = target.cuda(args.gpu, non_blocking=True)
input = input.to(device)
target = target.to(device)
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.print(i)
# TODO: this should also be done with the ProgressMeter
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def save_checkpoint(state, is_best):
if this_net=="baseline_net":
filename='CIFAR10_baseline_checkpoint.pth.tar'
best_file_name = 'CIFAR10_baseline_model_best.pth.tar'
elif this_net == "SV_net_I":
filename='CIFAR10_normalization_checkpoint.pth.tar'
best_file_name = 'CIFAR10_normalization_model_best.pth.tar'
elif this_net == "SV_net_I_low_frequency":
filename='CIFAR10_low_freq_model_checkpoint.pth.tar'
best_file_name = 'CIFAR10_low_freq_model_best.pth.tar'
elif this_net == "SV_net_II":
filename='CIFAR10_SV_net_II_checkpoint.pth.tar'
best_file_name = 'CIFAR10_SV_net_II_best.pth.tar'
elif this_net == "SV_net_II_low_frequency":
filename='CIFAR10_SV_net_II_low_frequency_checkpoint.pth.tar'
best_file_name = 'CIFAR10_SV_net_II_low_frequency_best.pth.tar'
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, best_file_name)
# this_net “baseline_net”,"SV_net_I","SV_net_I_low_frequency","SV_net_II"
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, *meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def print(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def adjust_learning_rate(optimizer, epoch, args):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // 150))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| [
"wushuc@student.ethz.ch"
] | wushuc@student.ethz.ch |
586460492c0f2f6d82a9ab807c143302b12e5987 | 5c9673e374740ed8c7bda0ce3633fbb65b25f12f | /contrib/seeds/makeseeds.py | bcd17b735733c233afe92893917b8cb63bc4f1ba | [
"MIT"
] | permissive | mhatta/mhattacoin | 4019fbd910946c0cc4ce3f55de54a09137ac94a7 | 90918cf665c52507f7be2c701cf42f6a122068e5 | refs/heads/master | 2021-08-16T15:00:36.500544 | 2018-08-07T17:56:48 | 2018-08-07T17:56:48 | 143,908,535 | 0 | 0 | MIT | 2020-04-01T03:16:44 | 2018-08-07T17:52:41 | C++ | UTF-8 | Python | false | false | 5,750 | py | #!/usr/bin/env python3
# Copyright (c) 2013-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Generate seeds.txt from Pieter's DNS seeder
#
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 337600
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
SUSPICIOUS_HOSTS = {
"130.211.129.106", "178.63.107.226",
"83.81.130.26", "88.198.17.7", "148.251.238.178", "176.9.46.6",
"54.173.72.127", "54.174.10.182", "54.183.64.54", "54.194.231.211",
"54.66.214.167", "54.66.220.137", "54.67.33.14", "54.77.251.214",
"54.94.195.96", "54.94.200.247"
}
import re
import sys
import dns.resolver
import collections
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
PATTERN_AGENT = re.compile(r"^(/Satoshi:0.12.(0|1|99)/|/Satoshi:0.13.(0|1|2|99)/|/MHattaCoinCore:0.13.(0|1|2|99)/)$")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def filtermultiport(ips):
'''Filter out hosts with more nodes per IP'''
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in list(hist.items()) if len(value)==1]
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_total):
# Sift out ips by type
ips_ipv4 = [ip for ip in ips if ip['net'] == 'ipv4']
ips_ipv6 = [ip for ip in ips if ip['net'] == 'ipv6']
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
# Filter IPv4 by ASN
result = []
asn_count = {}
for ip in ips_ipv4:
if len(result) == max_total:
break
try:
asn = int([x.to_text() for x in dns.resolver.query('.'.join(reversed(ip['ip'].split('.'))) + '.origin.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0])
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip['ip'] + '"\n')
# TODO: filter IPv6 by ASN
# Add back non-IPv4
result.extend(ips_ipv6)
result.extend(ips_onion)
return result
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
# Skip entries with valid address.
ips = [ip for ip in ips if ip is not None]
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
# Require at least 50% 30-day uptime.
ips = [ip for ip in ips if ip['uptime'] > 50]
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(ip['agent'])]
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Filter out hosts with multiple bitcoin ports, these are likely abusive
ips = filtermultiport(ips)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print('[%s]:%i' % (ip['ip'], ip['port']))
else:
print('%s:%i' % (ip['ip'], ip['port']))
if __name__ == '__main__':
main()
| [
"mhatta@debian.org"
] | mhatta@debian.org |
1b009bd295de43b48f18492c317f9c9ff4985f70 | 753b6390664ee7b73997f52b11042400212a3dcd | /portfolio/views.py | 6e23f351ed8d047e882723e4d84ffed45d733554 | [] | no_license | bhanotblocker/django-personal-portfolio | 81b3b3c998b609e8fa7cae32df1b90c4199892ec | ea36872d4d2b6e5ff5a19ac06911135b1b863f16 | refs/heads/master | 2022-11-05T12:23:20.261950 | 2020-06-21T17:44:43 | 2020-06-21T17:44:43 | 273,954,470 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 193 | py | from django.shortcuts import render
from .models import Project
def home(request):
projects = Project.objects.all
return render(request, 'portfolio/home.html',{'projects':projects})
| [
"varun.bhanot11@gmail.com"
] | varun.bhanot11@gmail.com |
6165dabd237efff4ec933c9e31490f07956f86a7 | 9b4974afd89e38ae0b8e9db9282b66b315605b66 | /Atividade2.py | fd0ab3fbd127a61be2af4ca8935280bcf3649176 | [] | no_license | mourath/Atividades-cesar | 70016c6dccd04abb6f514b4661143de490634897 | 114895091011182c60df299776709e8b667b3d9e | refs/heads/main | 2023-05-30T18:01:54.862076 | 2021-06-15T14:46:17 | 2021-06-15T14:46:17 | 377,189,548 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,662 | py | from selenium.webdriver import Chrome
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
class Page:
def __init__(self):
self.driver = Chrome(executable_path='./chromedriver/chromedriver.exe')
self.url = 'https://www.cesar.school/'
self.articles = (By.CSS_SELECTOR, 'article')
self.school_menu = (By.XPATH, '//*[@id="menu-item-15376"]/a/span[2]')
self.blogButton = (By.PARTIAL_LINK_TEXT, 'Blog')
self.title = (By.CLASS_NAME, 'entry-title')
self.posted_on = (By.CLASS_NAME, 'posted-on')
self.author = (By.CLASS_NAME, 'author-name')
self.page2 = (By.XPATH, '//*[@id="primary"]/div/nav/div/a[1]')
self.onde = (By.CLASS_NAME, 'onde')
self.aceptCookies = (By.PARTIAL_LINK_TEXT, 'Aceitar Cookies')
def load(self):
self.driver.get(self.url)
try:
self.driver.find_element(*self.aceptCookies).click()
except:
pass
def navigateToBlogArea(self):
school = self.driver.find_element(*self.school_menu)
hover = ActionChains(self.driver).move_to_element(school)
hover.perform()
self.driver.find_element(*self.blogButton).click()
def navigateToPage2(self):
self.driver.find_element(*self.page2).click()
def getArticles(self):
return self.driver.find_elements(*self.articles)
def getTitle(self, article):
return article.find_element(*self.title).text
def getDate(self, article):
date = article.find_element(*self.posted_on).text
publish = date.split('\n')
return publish[1] +' de ' + publish[0] + ' de ' + publish[2]
def getAuthor(self, article):
return article.find_element(*self.author).text
def navigateToEndOfPage(self):
html = self.driver.find_element_by_tag_name('html')
html.send_keys(Keys.END)
def getAddress(self):
address = self.driver.find_element(*self.onde)
return address.find_element_by_css_selector('p').text
cesar = Page()
cesar.load()
cesar.navigateToBlogArea()
cesar.navigateToPage2()
articles = cesar.getArticles()
print('\n Segundo Post \n')
print(f'Titulo {cesar.getTitle(articles[1])}')
print (f'Publicado em: {cesar.getDate(articles[1])}')
print('\nTerceiro Post: \n')
print(f'Titulo: {cesar.getTitle(articles[2])}')
print(f'Author: {cesar.getAuthor(articles[2])}')
cesar.navigateToEndOfPage()
print(f'\nCesar School Adrress: {cesar.getAddress()}')
| [
"noreply@github.com"
] | noreply@github.com |
8f1779b62eb87083b8632e00214f4bd05482bb59 | 828ff5de3934fca2db547126dafc9caa3c50c24a | /VggNet/train.py | c9c78db2ae12661c15a00fb4cb3a310049be33e2 | [] | no_license | YeYaJi/Classification_Net | c6389f2334355717c782c8131319b18372b480b8 | b412e9a765e3e156b06ee4809d3b08968228fe5c | refs/heads/master | 2023-03-30T17:27:46.311572 | 2021-03-26T08:57:53 | 2021-03-26T08:57:53 | 290,160,722 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,263 | py | import numpy as np
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch.optim as optim
import matplotlib.pyplot as plt
import os
from tqdm import tqdm, trange
import time
import net
import json
train_batch_size = 5
test_batch_size = 5
epoch = 5
lr = 0.001
num_classes = 5
device = torch.device("cuda:0")
data_transform = {
"train": transforms.Compose([transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]),
"test": transforms.Compose([transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])}
train_dataset = torchvision.datasets.ImageFolder(root="./flower_data/train", transform=data_transform["train"])
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, shuffle=True, batch_size=train_batch_size,
num_workers=12,
drop_last=True)
test_dataset = torchvision.datasets.ImageFolder(root="./flower_data/val", transform=data_transform["test"])
test_loader = torch.utils.data.DataLoader(dataset=test_dataset, shuffle=False, batch_size=test_batch_size,
num_workers=12,
drop_last=True)
train_image_num = len(train_dataset) # 训练图片总数
test_image_num = len(test_dataset) # 训练图片总数
print("训练图片总数=", train_image_num)
print("测试图片总数=", test_image_num)
# 翻转后变成{0: 'daisy', 1: 'dandelion', 2: 'roses', 3: 'sunflowers', 4: 'tulips'}
classes = train_dataset.class_to_idx # 这里会输出{'daisy': 0, 'dandelion': 1, 'roses': 2, 'sunflowers': 3, 'tulips': 4}
classes_r = {}
for key, val in classes.items():
classes_r[val] = key
# 保存classes为json格式
json_str = json.dumps(classes_r, indent=4)
with open("classes.json", "w") as classes_file:
classes_file.write(json_str)
feature_net = net.Feature_mode(net_name="vgg16")
feature = feature_net.make_feature()
model = net.Vgg(num_classes=num_classes, features=feature)
# print(list(model.parameters()))#这里可以看一下默认参数的样子
model.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=lr)
# 开始训练
train_epoch_correct_rate = []
train_epoch_loss_ave = []
eval_epoch_correct_rate = []
eval_epoch_loss_ave = []
best_eval_correct_rate = 0
pbar = tqdm(range(1, epoch + 1))
step = 0
for n_epoch in pbar:
model.train()
# 参数
train_loss = 0
epoch_num_correct = 0
# if int(n_epoch) % 5 == 0:
# optimizer.param_groups[0]["lr"] *= 0.1 # 优化器中lr的位置
for img, label in train_loader:
step += 1
img = img.to(device)
label = label.to(device)
output = model(img)
loss = criterion(output, label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.item()
# 计算准确率
_, max_index = output.max(1)
for i in range(train_batch_size):
if max_index[i] == label[i]:
epoch_num_correct += 1 # 一个batch中正确的个数
train_epoch_correct_rate.append(epoch_num_correct / (len(train_loader) * train_batch_size))
train_epoch_loss_ave.append(train_loss / len(train_loader))
# 进度条左侧显示右侧显示
pbar.set_description("step=%d" % step, "epoch=%d" % n_epoch)
pbar.set_postfix(loss=train_loss / len(train_loader),
correct=epoch_num_correct / (len(train_loader) * train_batch_size),
lr=optimizer.param_groups[0]["lr"])
# predict验证集,保存验证集准确率最高的权重
model.eval()
eval_loss = 0
eval_num_correct = 0
for img, label in test_loader:
img = img.to(device)
label = label.to(device)
output = model(img)
loss = criterion(output, label)
eval_loss += loss.item()
_, max_index = output.max(1)
for i in range(test_batch_size):
if max_index[i] == label[i]:
eval_num_correct += 1
eval_correct_rate = eval_num_correct / len(test_dataset)
eval_epoch_correct_rate.append(eval_correct_rate)
eval_epoch_loss_ave.append(eval_loss / len(test_loader))
# 挑测试集准确率最高的保存权重
if eval_correct_rate > best_eval_correct_rate:
best_eval_correct_rate = eval_correct_rate
torch.save(model.state_dict(), "./weights.pth")
# 数据保存
if "train_epoch_correct_rate" in os.listdir("./"):
os.remove("./train_epoch_correct_rate")
with open("./train_epoch_correct_rate", "a") as f:
data_generator = (str(i) + "\n" for i in train_epoch_correct_rate)
for data in data_generator:
f.write(data)
if "train_epoch_loss_ave" in os.listdir("./"):
os.remove("./train_epoch_loss_ave")
with open("./train_epoch_loss_ave", "a") as f:
data_generator = (str(i) + "\n" for i in train_epoch_loss_ave)
for data in data_generator:
f.write(data)
if "test_epoch_correct_rate" in os.listdir("./"):
os.remove("./test_epoch_correct_rate")
with open("./test_epoch_correct_rate", "a") as f:
data_generator = (str(i) + "\n" for i in eval_epoch_correct_rate)
for data in data_generator:
f.write(data)
if "test_epoch_loss_ave" in os.listdir("./"):
os.remove("./test_epoch_loss_ave")
with open("./test_epoch_loss_ave", "a") as f:
data_generator = (str(i) + "\n" for i in eval_epoch_loss_ave)
for data in data_generator:
f.write(data)
# 查看网络中总参数量
def get_parameter_number(net):
total_num = sum(p.numel() for p in net.parameters())
trainable_num = sum(p.numel() for p in net.parameters() if p.requires_grad)
return {'Total': total_num, 'Trainable': trainable_num}
parameter_number = get_parameter_number(model)
print("总参数量=", parameter_number)
| [
"704494891@qq.com"
] | 704494891@qq.com |
51d153adcfff38752ebbc5f4f74aad40c9d8b643 | 9da684da01d8d7a46951fdde40a944b267137d1c | /projeto_tg/views.py | 3d3bf66f5bff7d6e048853665123c33648c3f8d6 | [] | no_license | josejonatasoliveira/letspart-backend | a86b9c37251389109ddd54aca46e74ea686a930b | 7719a2a42b54bf47834ed9059ee013f9f0581a48 | refs/heads/master | 2023-08-02T22:53:51.895962 | 2021-09-24T21:54:58 | 2021-09-24T21:54:58 | 410,111,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,207 | py | from django.shortcuts import render
from django.http import HttpResponse
from django.forms.models import model_to_dict
from projeto_tg.evento.models import Evento
from django.core.paginator import Paginator
from haystack.query import SearchQuerySet
from projeto_tg.cart.models import Cart
import json
import datetime
def index_view(request, template="index.html"):
cart = Cart(request)
list_events = Evento.objects.all()
for event in list_events:
event.month = datetime.date(event.date.year, event.date.month, event.date.day).strftime("%b")
paginator = Paginator(list_events, 3)
paginator_1 = Paginator(list_events, 7)
page = request.GET.get('page')
events = paginator.get_page(page)
events_show = paginator_1.get_page(page)
events[0].id = 0
out = {
'events': events,
'events_show': events_show,
'cart': cart
}
return render(request, template, out)
def get_events(request):
list_events = Evento.objects.all()
for event in list_events:
event.month = datetime.date(event.date.year, event.date.month, event.date.day).strftime("%b")
paginator = Paginator(list_events, 7)
page = request.GET.get('page')
_events = paginator.get_page(page)
results = []
for event in _events:
res = model_to_dict(event)
res['image_file'] = event.image_file.name
res['date'] = str(event.date)
res['start_date'] = str(event.start_date)
res['end_date'] = str(event.end_date)
results.append(res)
the_data = json.dumps({
'results': results
})
return HttpResponse(the_data, content_type='application/json')
def autocomplete(request):
sqs = SearchQuerySet().autocomplete(text_auto=request.GET.get('q', ''))
suggestions = [result.title for result in sqs]
the_data = json.dumps({
'results': suggestions
})
return HttpResponse(the_data, content_type='application/json')
def autocomplete_city(request):
sqs = SearchQuerySet().autocomplete(text_auto=request.GET.get('q', ''))
suggestions = [ f"{result.title} - {result.sigla}" for result in sqs]
the_data = json.dumps({
'results': suggestions
})
return HttpResponse(the_data, content_type='application/json')
| [
"jose.jonatas@triasoftware.com.br"
] | jose.jonatas@triasoftware.com.br |
eae6d7708433536367bc9b2cb96ce49711facb5d | 2ebc85f7f34a459d69ff412f956b43ab2472590f | /backend/tasker_business/migrations/0001_initial.py | 95053422e99e4ae9ece3cd837ff4b1db4b389baf | [] | no_license | crowdbotics-apps/mobile-car-wash-23107 | 4ea678f1c88fe4c96eb498535e4fb14e60110ae0 | 96b057e5989a8b5dbb1267f93c0a34f57a72d636 | refs/heads/master | 2023-01-19T21:07:11.993601 | 2020-12-01T12:55:21 | 2020-12-01T12:55:21 | 317,537,397 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,617 | py | # Generated by Django 2.2.17 on 2020-12-01 12:53
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('task_category', '0001_initial'),
('task_profile', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Timeslot',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField()),
('start_time', models.TimeField()),
('end_time', models.TimeField()),
],
),
migrations.CreateModel(
name='TaskerSkill',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('rate', models.FloatField()),
('description', models.TextField()),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='taskerskill_category', to='task_category.Category')),
('subcategory', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='taskerskill_subcategory', to='task_category.Subcategory')),
('tasker', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='taskerskill_tasker', to='task_profile.TaskerProfile')),
],
),
migrations.CreateModel(
name='TaskerAvailability',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tasker', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='taskeravailability_tasker', to='task_profile.TaskerProfile')),
('timeslots', models.ManyToManyField(related_name='taskeravailability_timeslots', to='tasker_business.Timeslot')),
],
),
migrations.CreateModel(
name='BusinessPhoto',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('photo', models.URLField()),
('description', models.TextField()),
('tasker', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='businessphoto_tasker', to='task_profile.TaskerProfile')),
],
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
7c34ee9abf7d161f0ed99a550b1c759fb9004831 | b08fb323546e6394efd7a25b05d96f05f011c958 | /Python/Learning/类属性权限控制.py | 359be3434ec581c6bd34d7138d5b28d9ba95d805 | [] | no_license | 874656645/HelloWorld | d31775cf33b43cd56d4ca9271f8888936816349f | 78e74b05a445907b821648cf75182c3bbc45077a | refs/heads/master | 2021-06-26T07:11:53.879318 | 2018-06-17T14:48:46 | 2018-06-17T14:48:46 | 96,090,938 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | # 请给Person类的__init__方法中添加name和score参数,并把score绑定到__score属性上,看看外部是否能访问到。
class Person(object):
def __init__(self, name, score):
self.name = name
self.__score = score
p = Person('Bob', 59)
try:
print(p.name)
print(p.__score)
except AttributeError as error:
print('AttibuteError: %s' % (error)) | [
"874656645@qq.com"
] | 874656645@qq.com |
7fd1443b3568efb23315191f9f8f19d689e86ff0 | bc809d7a0eebf797880e185fd278b58c5c1121ed | /S09/price_sort/price_input.py | 1c985c5fb022d824162506fd1abeb00e8fab5153 | [] | no_license | amirjodeiri/Assignment | ff9ed63b8e443febd98d105ed6952595542f1b97 | dada9d4c1e0455aeef2d6aa0b9d396bada097560 | refs/heads/master | 2022-02-22T10:17:28.872124 | 2019-07-16T15:29:48 | 2019-07-16T15:29:48 | 189,040,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,482 | py | btc = [[1561902600, 10959.6, 11212.11, 11143.11, 11121.08, 929.44817251],
[1561902300, 11077.26, 11479.33, 11479.33, 11140.81, 886.41080044],
[1561902000, 11442.84, 11505.46, 11480.46, 11479.98, 86.03022024],
[1561901700, 11462.33, 11527.1, 11482.6, 11483.29, 41.03120901],
[1561901400, 11419.67, 11497, 11435.47, 11483.09, 83.42012416],
[1561901100, 11409.27, 11508.08, 11508.08, 11431.21, 197.72811129],
[1561900800, 11500, 11540, 11503.88, 11508.18, 67.94620835],
[1561900500, 11504.46, 11554.94, 11554.93, 11504.46, 47.0379533],
[1561900200, 11552.18, 11585.75, 11576.26, 11554.94, 30.7343114],
[1561899900, 11572, 11593.99, 11572.01, 11579.8, 48.63145232],
[1561899600, 11525, 11579.88, 11537.61, 11572, 29.40859974],
[1561899300, 11536.95, 11574.42, 11574, 11543.05, 34.27031572],
[1561899000, 11521.67, 11590.22, 11554.13, 11574.33, 89.15201536],
[1561898700, 11500.01, 11557.2, 11507.4, 11549.22, 42.82079364],
[1561898400, 11504.15, 11545.61, 11532.7, 11508.37, 19.84869808],
[1561898100, 11528.58, 11572.46, 11561.47, 11533.6, 34.15227832],
[1561897800, 11480.03, 11571.56, 11521, 11561.8, 114.36290996],
[1561897500, 11518.04, 11589.99, 11589.99, 11525, 77.12229769],
[1561897200, 11540.57, 11590.59, 11540.57, 11580.18, 18.35796281],
[1561896900, 11521.07, 11590.78, 11590.78, 11551.34, 59.84167431],
[1561896600, 11564.87, 11598.78, 11576.6, 11590.79, 128.75250381],
[1561896300, 11532.1, 11581.13, 11537.74, 11577, 41.54137239],
[1561896000, 11464, 11543, 11516.59, 11538.44, 125.46346081],
[1561895700, 11492.43, 11531.97, 11512.1, 11510.63, 41.07543784],
[1561895400, 11512.1, 11562.27, 11558.9, 11512.1, 63.48943606],
[1561895100, 11505, 11560, 11505.02, 11558.43, 68.29266453],
[1561894800, 11457.52, 11536.39, 11469.94, 11505.02, 60.49309321],
[1561894500, 11456.02, 11515.5, 11509.05, 11470.46, 93.57173042],
[1561894200, 11501.1, 11547.67, 11535.5, 11509.15, 42.41455078],
[1561893900, 11523.3, 11550, 11545.01, 11533.6, 18.48690896],
[1561893600, 11510.08, 11554.05, 11553.67, 11545.88, 46.27238785],
[1561893300, 11541.46, 11575.57, 11553.08, 11553.67, 29.15527801],
[1561893000, 11538.02, 11581.37, 11539.59, 11551, 25.67409221],
[1561892700, 11515.26, 11564.66, 11548.08, 11536.4, 46.86360297],
[1561892400, 11490.04, 11548.09, 11505.22, 11548.09, 53.56720469],
[1561892100, 11491.42, 11530, 11520.01, 11506.99, 44.54656059],
[1561891800, 11470, 11529.96, 11484.39, 11520, 115.21411413],
[1561891500, 11420.02, 11520, 11514.56, 11482.13, 148.00466246],
[1561891200, 11415.77, 11523.82, 11510, 11519.43, 385.57363683],
[1561890900, 11510, 11580.05, 11579.74, 11510, 131.07907625],
[1561890600, 11534.57, 11598.43, 11596, 11579.25, 83.05556398],
[1561890300, 11560.87, 11609.97, 11565.68, 11600.83, 26.13282225],
[1561890000, 11559.77, 11607.99, 11571.86, 11559.77, 36.48317598],
[1561889700, 11527.44, 11635.39, 11628.48, 11567.1, 144.6428261],
[1561889400, 11545.74, 11732.54, 11732.54, 11625.4, 540.44531511],
[1561889100, 11720.01, 11774.58, 11774.58, 11721.35, 19.85713484],
[1561888800, 11739.7, 11775.75, 11755.98, 11770.21, 32.27706949],
[1561888500, 11755.42, 11795.74, 11781.92, 11755.47, 9.34673013],
[1561888200, 11774.48, 11817.67, 11811.34, 11775.94, 20.3438993],
[1561887900, 11788.58, 11815.51, 11788.58, 11809.42, 7.53518971],
[1561887600, 11772.87, 11824.94, 11787.12, 11790.67, 52.55564798],
[1561887300, 11780, 11827.51, 11820.99, 11794.46, 74.51803957],
[1561887000, 11808.73, 11844.99, 11816.78, 11820.99, 30.41890089],
[1561886700, 11786.64, 11833, 11799.12, 11810.92, 29.56626797],
[1561886400, 11759.51, 11824.3, 11759.51, 11807.66, 64.78667078],
[1561886100, 11699.62, 11768.72, 11712.98, 11758.24, 53.43397482],
[1561885800, 11711.81, 11740.01, 11740.01, 11715, 11.80544947],
[1561885500, 11740, 11765.08, 11756.03, 11740.01, 10.86138345],
[1561885200, 11758.32, 11777.55, 11777.54, 11760, 8.23490025],
[1561884900, 11762.58, 11780, 11762.59, 11777.81, 13.00819374],
[1561884600, 11757.31, 11783.09, 11770.99, 11762.59, 21.98037245],
[1561884300, 11735.72, 11770.99, 11736.48, 11770.99, 8.1816722],
[1561884000, 11730, 11769.92, 11769.47, 11730, 17.73025517],
[1561883700, 11751, 11785.38, 11766.5, 11769.31, 14.41827908],
[1561883400, 11760.65, 11793.35, 11788.13, 11766.98, 22.63031199],
[1561883100, 11745.3, 11787.8, 11765.16, 11784.57, 15.81773741],
[1561882800, 11744.76, 11778.74, 11744.76, 11764.69, 6.75772412],
[1561882500, 11742.92, 11783.8, 11761.29, 11749.55, 24.21018058],
[1561882200, 11723.12, 11770.91, 11745.55, 11761.43, 21.8517236],
[1561881900, 11704.24, 11755.17, 11710.75, 11738.34, 49.23265751],
[1561881600, 11654.89, 11714.64, 11690.47, 11710.78, 50.31049926],
[1561881300, 11618.54, 11704.19, 11685.36, 11690.46, 136.73184225],
[1561881000, 11682.36, 11748.29, 11704.8, 11686.58, 113.23711751],
[1561880700, 11657.6, 11720, 11720, 11696.62, 87.47656297],
[1561880400, 11677.15, 11731.94, 11728.83, 11720, 146.69664318],
[1561880100, 11707.12, 11763.24, 11763.24, 11729.81, 85.63220531],
[1561879800, 11725.93, 11768.9, 11767.11, 11766.33, 52.27348762],
[1561879500, 11726.41, 11767.21, 11735, 11767.11, 38.66399904],
[1561879200, 11723.97, 11780, 11723.99, 11735.01, 79.43639871],
[1561878900, 11682.62, 11744, 11695, 11721.22, 90.43142462],
[1561878600, 11638, 11745.99, 11745.99, 11696.46, 219.71424773],
[1561878300, 11700, 11870.27, 11870.02, 11740, 619.067643],
[1561878000, 11855.05, 11915.77, 11897.46, 11880.24, 105.40841366],
[1561877700, 11885.71, 11934.44, 11916.84, 11897.47, 18.53510818],
[1561877400, 11903.21, 11939.36, 11925, 11913.05, 17.32127774],
[1561877100, 11910.53, 11959.73, 11956.67, 11918.92, 17.82555815],
[1561876800, 11926.75, 11980, 11926.75, 11955.88, 42.17447657],
[1561876500, 11925.23, 11980.68, 11925.23, 11930.56, 59.25259932],
[1561876200, 11874.79, 11939.37, 11926.95, 11924.75, 35.50413551],
[1561875900, 11907.13, 11940.87, 11936.57, 11929.92, 22.09738864],
[1561875600, 11878.82, 11939.31, 11919.24, 11933.97, 34.96155232],
[1561875300, 11916.26, 11972.68, 11945.08, 11919.07, 34.99508971],
[1561875000, 11925.11, 11947.66, 11945, 11945.3, 14.64955562],
[1561874700, 11945, 12010.45, 12010.45, 11949.93, 57.95897718],
[1561874400, 11955.19, 12018.56, 11964.61, 12009.3, 49.88448738],
[1561874100, 11922.59, 11965.95, 11922.59, 11952.38, 16.62587783],
[1561873800, 11916.25, 11961.45, 11950.04, 11921.51, 17.17366775],
[1561873500, 11946.44, 11982.89, 11969.25, 11946.44, 34.76714616],
[1561873200, 11909, 11974.45, 11909, 11973.79, 50.83168574],
[1561872900, 11897.36, 11950.72, 11950.72, 11909, 59.63962408],
[1561872600, 11851.45, 11954.14, 11936.52, 11954.01, 207.5918986],
[1561872300, 11886.12, 12039.81, 12026.68, 11943.85, 283.66392203],
[1561872000, 12018.21, 12075, 12075, 12027.39, 54.61629542],
[1561871700, 12066.53, 12098.49, 12090.06, 12072.1, 12.87243706],
[1561871400, 12080.8, 12117.62, 12095, 12090, 38.29509545],
[1561871100, 12006.03, 12095, 12015.21, 12089.45, 28.32222331],
[1561870800, 12010, 12037.73, 12027.21, 12010, 36.08034411],
[1561870500, 12026.25, 12057.01, 12037.93, 12028.74, 41.46679437],
[1561870200, 12010, 12047.38, 12028.24, 12047.38, 45.48692727],
[1561869900, 12028.18, 12055.34, 12055.34, 12029.75, 28.70985189],
[1561869600, 12050.02, 12076.31, 12051.93, 12057.14, 22.30734684],
[1561869300, 12041.75, 12063.79, 12063.79, 12051.24, 32.23638352],
[1561869000, 12061.44, 12077.41, 12070.01, 12061.44, 20.68327532],
[1561868700, 12063.99, 12090, 12076.56, 12074.9, 28.54736987],
[1561868400, 12022.48, 12079.02, 12048.37, 12076.56, 63.32685898],
[1561868100, 12048.75, 12086.63, 12062.6, 12048.75, 23.8382924],
[1561867800, 12050.17, 12094.1, 12059.73, 12057.62, 25.06378555],
[1561867500, 12033.81, 12133.57, 12128.06, 12059.73, 95.71744955],
[1561867200, 12120.98, 12148.27, 12123.16, 12128.07, 37.7846693],
[1561866900, 12075.16, 12125.79, 12122, 12122.41, 72.61273385],
[1561866600, 12121.06, 12183.92, 12183.8, 12121.83, 34.5706744],
[1561866300, 12144.48, 12200, 12144.49, 12183.8, 95.12376269],
[1561866000, 12128.8, 12162.65, 12132.87, 12144.49, 56.32606506],
[1561865700, 12125, 12141.05, 12130.06, 12132.91, 20.32334975],
[1561865400, 12088.61, 12135.09, 12096.17, 12126.95, 23.35310939],
[1561865100, 12079.26, 12111.8, 12109.8, 12095.85, 26.66838035],
[1561864800, 12098.76, 12150, 12114.01, 12110.17, 68.08426239],
[1561864500, 12069.65, 12162, 12074.68, 12109.97, 226.41174099],
[1561864200, 11996.01, 12074.83, 11997.99, 12074.83, 41.42905441],
[1561863900, 11984.35, 12049.05, 12030, 11997.99, 74.15177529],
[1561863600, 12030.08, 12073.68, 12069.99, 12033.95, 64.57049115],
[1561863300, 12045.99, 12070, 12046.08, 12070, 25.40054497],
[1561863000, 12045, 12062.19, 12050, 12048.79, 19.59746489],
[1561862700, 12046.68, 12078.99, 12077.77, 12048.53, 14.59816797],
[1561862400, 12056.69, 12077.78, 12062.44, 12072.25, 14.04828421],
[1561862100, 12048.2, 12067.95, 12059.01, 12063.82, 56.24053059],
[1561861800, 12030.05, 12076.7, 12030.05, 12059, 27.00655132],
[1561861500, 12029.78, 12060.35, 12059.67, 12034.31, 20.99830603],
[1561861200, 12016.89, 12060.53, 12016.89, 12059.7, 9.37535409],
[1561860900, 12010, 12044.8, 12031.92, 12020, 38.56372625],
[1561860600, 12031.91, 12057.61, 12052.76, 12031.92, 40.97647572],
[1561860300, 12052, 12088.03, 12087.62, 12052.77, 21.8296205],
[1561860000, 12063.62, 12099, 12094.73, 12081.94, 68.46527684],
[1561859700, 12035.35, 12096.99, 12041.51, 12089.05, 57.55792997],
[1561859400, 12031.45, 12043.5, 12039.83, 12042.2, 51.87165828],
[1561859100, 12027.92, 12050, 12042.57, 12042.98, 18.82243697],
[1561858800, 12038.12, 12070.39, 12040.45, 12042.59, 21.50924295],
[1561858500, 12003.2, 12046.74, 12008, 12040.46, 30.80062594],
[1561858200, 11994.26, 12031.17, 12015.87, 12007.99, 25.85791287],
[1561857900, 11985.77, 12042.15, 12008.22, 12015.87, 68.08191599],
[1561857600, 11988.7, 12046.73, 12041.8, 12008.22, 61.28693467],
[1561857300, 12020.72, 12075, 12074.64, 12036.1, 54.41941653],
[1561857000, 12052, 12075, 12055.06, 12075, 33.26321615],
[1561856700, 12055, 12095, 12091.54, 12062.57, 28.79388939],
[1561856400, 12055.54, 12126.91, 12118.38, 12091.56, 62.60535893],
[1561856100, 12096.65, 12139, 12107.72, 12118.39, 52.98948699],
[1561855800, 12037.01, 12108, 12055.64, 12108, 32.6543961],
[1561855500, 12037, 12063.48, 12061.12, 12050, 26.08933949],
[1561855200, 12037, 12074.03, 12055.65, 12049.84, 39.76598093],
[1561854900, 12053.01, 12139, 12128, 12055.65, 55.24807889],
[1561854600, 12035.43, 12128, 12043.09, 12128, 73.65449175],
[1561854300, 12033.78, 12065.02, 12065.02, 12040.51, 38.60972213],
[1561854000, 12031.56, 12086, 12038.79, 12065.02, 53.20890088],
[1561853700, 12016.21, 12069.95, 12017.25, 12038.79, 67.83065866],
[1561853400, 12001.85, 12040, 12020.07, 12015.43, 49.67809639],
[1561853100, 11919.94, 12040, 11919.98, 12020.09, 139.48037537],
[1561852800, 11868.56, 11926.56, 11869.84, 11920.06, 55.52461322],
[1561852500, 11805, 11960, 11953.68, 11865.29, 214.93879873],
[1561852200, 11920.05, 11967.66, 11960.46, 11956.25, 41.91900063],
[1561851900, 11911, 11972.63, 11972.61, 11953.68, 78.57997561],
[1561851600, 11930.21, 11980, 11955.76, 11973.38, 93.2644001],
[1561851300, 11912.91, 11979, 11970.06, 11946.15, 38.53417168],
[1561851000, 11950, 12019.99, 12019.99, 11960.33, 43.64009572],
[1561850700, 11951.05, 12018.84, 11954.93, 12018.84, 33.61946511],
[1561850400, 11933, 11981.63, 11969.98, 11953.62, 63.897333],
[1561850100, 11922.22, 11974.35, 11954.78, 11962.18, 87.04420637],
[1561849800, 11954.76, 12038.04, 11978.11, 11954.76, 44.3974435],
[1561849500, 11968, 12026.5, 12018.94, 11988.59, 61.37473955],
[1561849200, 12014.79, 12062.54, 12040.04, 12024.75, 62.32721701],
[1561848900, 12000, 12049.95, 12015.03, 12041.26, 50.31815904],
[1561848600, 12017.75, 12099.5, 12070, 12017.75, 86.88298538],
[1561848300, 12018.12, 12070, 12018.12, 12070, 38.4635476],
[1561848000, 11981.88, 12023.42, 11983.1, 12018.13, 45.06024287],
[1561847700, 11983.1, 12064.01, 12064.01, 11983.1, 88.85360466],
[1561847400, 12026.48, 12069.99, 12061.97, 12064.01, 32.51390863],
[1561847100, 12020, 12073.67, 12024.12, 12066.93, 80.00985389],
[1561846800, 11992.2, 12033.63, 11992.29, 12024.12, 36.60821384],
[1561846500, 11984.85, 12015.76, 11995.44, 11992.28, 49.06346481],
[1561846200, 11950.24, 12000, 11950.24, 11992.34, 29.59615667],
[1561845900, 11924.31, 11975.04, 11955, 11950.24, 39.43276792],
[1561845600, 11938.15, 12001.77, 11938.15, 11954.97, 84.25448057],
[1561845300, 11877.96, 11949.72, 11900, 11941.53, 58.54246518],
[1561845000, 11900, 11948.46, 11920.01, 11900.01, 66.35971941],
[1561844700, 11870, 11958.3, 11923.95, 11920.01, 125.52558565],
[1561844400, 11900, 12093, 12090.11, 11920.06, 192.88512602],
[1561844100, 12070.11, 12137.13, 12136.08, 12094.61, 32.10616645],
[1561843800, 12092.68, 12139.52, 12093.65, 12136.78, 46.39499911],
[1561843500, 12064.65, 12108.96, 12065, 12093.75, 38.28553021],
[1561843200, 12065, 12167.9, 12141.23, 12065.01, 145.67993495],
[1561842900, 12112.6, 12166.88, 12131.58, 12141.22, 68.61347742],
[1561842600, 12119, 12194.5, 12194.5, 12131.75, 91.53988645],
[1561842300, 12134.17, 12200, 12134.17, 12191.64, 126.50672809],
[1561842000, 12130, 12171.52, 12137.34, 12134.96, 71.64588191],
[1561841700, 12110, 12206.5, 12193.25, 12139.88, 153.02704388],
[1561841400, 12116.85, 12262.92, 12116.85, 12193.25, 661.73476874],
[1561841100, 11980, 12129.63, 11984, 12115.74, 172.71261016],
[1561840800, 11975, 12014.25, 11996.43, 11985.56, 92.6006025],
[1561840500, 11963, 11996.75, 11969.33, 11990.24, 39.03925888],
[1561840200, 11927.21, 11975.99, 11950.01, 11975.47, 41.53776947],
[1561839900, 11942, 11969.7, 11955, 11950.01, 33.53243916],
[1561839600, 11942, 11978.01, 11978.01, 11953.31, 25.3552364],
[1561839300, 11940.18, 12000, 11940.18, 11978.01, 82.14323606],
[1561839000, 11899.63, 11981.55, 11899.63, 11949.98, 75.61840599],
[1561838700, 11866.65, 11900, 11895.99, 11899.69, 23.17971492],
[1561838400, 11855, 11900, 11860.41, 11887.95, 39.58843651],
[1561838100, 11847.73, 11884.7, 11847.73, 11860.59, 18.72160884],
[1561837800, 11847.44, 11958.85, 11923.01, 11852.59, 57.34910835],
[1561837500, 11882.68, 11927.5, 11899.99, 11924.97, 38.93511508],
[1561837200, 11869.07, 11900, 11898.17, 11899.86, 19.39407194],
[1561836900, 11865, 11909.16, 11880.08, 11896.88, 29.3205977],
[1561836600, 11875, 11952.96, 11938.06, 11888.03, 44.21535125],
[1561836300, 11870.03, 11958.29, 11901.21, 11951.78, 50.11772916],
[1561836000, 11870.03, 11929.13, 11895.54, 11904.17, 43.79259942],
[1561835700, 11873, 11968.62, 11963.08, 11897.96, 124.54350164],
[1561835400, 11962.92, 11995.26, 11981.46, 11969.16, 17.89057249],
[1561835100, 11958.83, 12011.11, 11962.19, 11980.82, 63.83626456],
[1561834800, 11951.37, 11983.87, 11963.64, 11959.51, 34.82958182],
[1561834500, 11920.72, 11969.58, 11935.55, 11958.05, 48.11079425],
[1561834200, 11917, 11987.18, 11987.15, 11935.73, 69.51426374],
[1561833900, 11940, 11987.74, 11950.98, 11987.53, 64.62189354],
[1561833600, 11950, 12014.99, 12013.28, 11950.98, 77.2076845],
[1561833300, 11998, 12020, 12018, 12010, 61.22837725],
[1561833000, 11990, 12065, 11993.05, 12017.29, 117.58874424],
[1561832700, 11933.49, 11997.5, 11935, 11997.48, 73.46219369],
[1561832400, 11930, 11982.77, 11977.51, 11935, 42.7821525],
[1561832100, 11920.91, 11986.82, 11920.91, 11977.51, 122.80916226],
[1561831800, 11910.89, 11939.72, 11925, 11920.9, 42.84005765],
[1561831500, 11903.61, 11939.21, 11927.81, 11925, 54.32947229],
[1561831200, 11890.3, 11972.8, 11950, 11927.81, 172.75506349],
[1561830900, 11930, 12006.22, 11930.01, 11950, 267.05196802],
[1561830600, 11842.08, 11946.98, 11842.08, 11930, 119.24395664],
[1561830300, 11794.77, 11862.35, 11810.23, 11842.08, 68.28975278],
[1561830000, 11785, 11819.1, 11805, 11810.23, 48.36523039],
[1561829700, 11772.74, 11805, 11778.44, 11805, 40.25314668],
[1561829400, 11751.32, 11818.23, 11809.99, 11774.86, 62.64408732],
[1561829100, 11790.04, 11824, 11824, 11809.81, 29.61196404],
[1561828800, 11800, 11877, 11842.77, 11824, 153.33348316],
[1561828500, 11780, 11842.76, 11780, 11842.76, 55.37685114],
[1561828200, 11760.07, 11807.47, 11780.17, 11782.05, 49.53756141],
[1561827900, 11735, 11808.91, 11735, 11783.56, 45.39517415],
[1561827600, 11735, 11807.84, 11807.84, 11739.07, 54.15469253],
[1561827300, 11760, 11818.99, 11783.83, 11811.3, 58.91331621],
[1561827000, 11750.18, 11834.7, 11773.54, 11783.24, 123.3932812],
[1561826700, 11700, 11768.63, 11741.24, 11768.63, 141.46288944],
[1561826400, 11709.21, 11755.4, 11724.45, 11736.74, 59.21633702],
[1561826100, 11696.83, 11777, 11696.83, 11724.74, 166.57352995],
[1561825800, 11630, 11710.56, 11682.17, 11699.99, 149.88804507],
[1561825500, 11661.29, 11717.75, 11677, 11685.91, 108.1390776],
[1561825200, 11615.04, 11723, 11721.17, 11677, 256.42712088],
[1561824900, 11697.18, 11758.05, 11700, 11722.72, 125.61611963],
[1561824600, 11645.59, 11901, 11901, 11704.16, 402.12759705],
[1561824300, 11872.89, 11949.99, 11938.39, 11900.59, 116.67157112],
[1561824000, 11876.34, 11970.54, 11883.38, 11945.15, 119.15010619],
[1561823700, 11861.77, 11902.09, 11880.05, 11881.36, 35.83230806],
[1561823400, 11867.4, 11927.15, 11904, 11882.15, 48.60071046],
[1561823100, 11856.23, 11925.26, 11893.77, 11903.39, 62.90426033],
[1561822800, 11815, 11941.46, 11941.46, 11893.81, 301.07446801],
[1561822500, 11945.51, 12011.7, 11992, 11945.51, 96.94020037],
[1561822200, 11970, 12004, 12004, 11991.99, 32.42115734],
[1561821900, 12004, 12058, 12043.72, 12004.01, 28.34201313],
[1561821600, 11973.03, 12043.72, 12008.05, 12043.72, 83.86457232],
[1561821300, 11963, 12025, 11964.04, 12007.56, 67.36996244],
[1561821000, 11931.74, 12014.95, 11931.74, 11964.04, 131.70952233],
[1561820700, 11874, 11947.49, 11905, 11940, 32.48892888],
[1561820400, 11877.59, 11954.94, 11925, 11905, 81.14729119],
[1561820100, 11911, 11973.47, 11966.06, 11925, 44.40194818],
[1561819800, 11953.61, 11994.98, 11957.18, 11956.15, 39.27501158],
[1561819500, 11941.75, 11989.57, 11989.57, 11957.2, 45.84813136],
[1561819200, 11955.87, 11990, 11964.63, 11989.73, 53.09769084],
[1561818900, 11947.43, 12000, 11948.4, 11964.26, 52.18264153],
[1561818600, 11850.88, 11974.71, 11878.99, 11950, 99.11194701],
[1561818300, 11864.76, 11919.98, 11912.1, 11878.99, 48.5105468],
[1561818000, 11886.12, 11924.65, 11903.61, 11912, 37.06564183],
[1561817700, 11834.6, 11938.87, 11835.2, 11901.56, 81.09125765],
[1561817400, 11830, 11896.52, 11891.68, 11831, 94.28233747],
[1561817100, 11864, 11900, 11886.49, 11892, 51.87387796],
[1561816800, 11855, 11974.35, 11965.84, 11887.03, 152.38542913],
[1561816500, 11901.72, 11996.44, 11912.29, 11974.41, 81.26833028],
[1561816200, 11912.27, 11975, 11950, 11918.36, 95.35852216],
[1561815900, 11905.51, 11950, 11925, 11948.26, 92.92172653],
[1561815600, 11914.85, 11980, 11972.35, 11925.01, 85.37479798],
[1561815300, 11956.35, 12020, 12019.3, 11975, 58.40492971],
[1561815000, 11951.83, 12027.82, 11990, 12013.89, 78.84867866],
[1561814700, 11949.11, 12080.83, 12080, 11989.99, 148.30833427],
[1561814400, 12044.79, 12112.37, 12055.5, 12080.84, 92.79809074],
[1561814100, 12037.84, 12082.3, 12082.3, 12055.51, 73.82154233],
[1561813800, 12050.54, 12108.47, 12107.94, 12086.07, 60.53506321],
[1561813500, 12089.11, 12152.26, 12131.42, 12107, 205.90705378],
[1561813200, 12046.17, 12172.82, 12046.17, 12140.44, 263.13365092],
[1561812900, 11997.33, 12069.22, 11997.33, 12050, 37.9552995]] | [
"amjodeiri@yahoo.com"
] | amjodeiri@yahoo.com |
f05b968e39febf01d27debcf0bed250e13309c9a | 8898273f9811fab29eb5621734bafcdf204d8229 | /scipy-stubs/integrate/quadrature.pyi | 21ea590c9993068c72b5be57697a1ef607670d6b | [] | no_license | tyrion/scipy-stubs | 628ad6321a7e1502683a2b55a759777508ab4b67 | bf49a91313523c4f635bc3e5d14444c1361caf64 | refs/heads/master | 2020-05-30T21:59:43.001510 | 2019-06-03T10:30:54 | 2019-06-03T10:30:54 | 189,984,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 970 | pyi | # Stubs for scipy.integrate.quadrature (Python 3.6)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from numpy import trapz as trapz
from typing import Any, Optional
class AccuracyWarning(Warning): ...
def fixed_quad(func: Any, a: Any, b: Any, args: Any = ..., n: int = ...): ...
def quadrature(func: Any, a: Any, b: Any, args: Any = ..., tol: float = ..., rtol: float = ..., maxiter: int = ..., vec_func: bool = ..., miniter: int = ...): ...
def cumtrapz(y: Any, x: Optional[Any] = ..., dx: float = ..., axis: int = ..., initial: Optional[Any] = ...): ...
def simps(y: Any, x: Optional[Any] = ..., dx: int = ..., axis: int = ..., even: str = ...): ...
def romb(y: Any, dx: float = ..., axis: int = ..., show: bool = ...): ...
def romberg(function: Any, a: Any, b: Any, args: Any = ..., tol: float = ..., rtol: float = ..., show: bool = ..., divmax: int = ..., vec_func: bool = ...): ...
def newton_cotes(rn: Any, equal: int = ...): ...
| [
"germano.gabbianelli@contentwise.tv"
] | germano.gabbianelli@contentwise.tv |
e6346be9a0e374c318056fa487dc4b5ba516e9c9 | 15fd98a71764ef682d2b1c8640cb261464d527f7 | /ライブラリ/memo.py | b0a3e213665d296c6b7f30b0f6d86e239334209b | [] | no_license | venzosan/selected100 | f9f5a9143a0f46e96aea528b04153e00eb3090f5 | 060210ad12c342318a8d940a76e27a7dd3c3434c | refs/heads/master | 2023-04-13T04:16:15.764451 | 2021-04-23T18:15:23 | 2021-04-23T18:15:23 | 350,734,823 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | #n次元配列のm番目の要素でソート
li = [[1,4,3],[2,3,4],[3,4,5],[4,5,6],[2,3,4],[1,5,3],[2,3,4],[5,6,7]]
li = sorted(li, reverse=True, key=lambda x: x[1]) #[1]に注目してソート
| [
"tsuyunoiwashi@gmail.com"
] | tsuyunoiwashi@gmail.com |
188b926a0273d9407218185aa3016d3f02c1eb88 | 55b57d64ec547869835334318f3059fbb507558c | /Fred2/Data/pssms/smm/mat/A_32_07_9.py | 9d57732d1c3384a786473ec66600efe950cc1551 | [
"BSD-3-Clause"
] | permissive | FRED-2/Fred2 | 9845f6678d4011cb746c7a5a6f283eea68077a02 | b3e54c8c4ed12b780b61f74672e9667245a7bb78 | refs/heads/master | 2021-07-12T05:05:54.515427 | 2020-05-25T06:56:25 | 2020-05-25T06:56:25 | 16,275,425 | 42 | 35 | null | 2021-07-07T12:05:11 | 2014-01-27T10:08:11 | Python | UTF-8 | Python | false | false | 2,145 | py | A_32_07_9 = {0: {'A': 0.197, 'C': 0.0, 'E': -0.11, 'D': 0.0, 'G': -0.053, 'F': 0.087, 'I': 0.122, 'H': -0.115, 'K': -0.109, 'M': 0.153, 'L': 0.056, 'N': 0.0, 'Q': -0.004, 'P': 0.0, 'S': -0.278, 'R': -0.213, 'T': -0.08, 'W': 0.169, 'V': 0.0, 'Y': 0.177}, 1: {'A': 0.177, 'C': 0.0, 'E': 0.245, 'D': 0.0, 'G': -0.006, 'F': -0.133, 'I': -0.025, 'H': 0.0, 'K': 0.0, 'M': 0.031, 'L': -0.032, 'N': 0.0, 'Q': -0.236, 'P': 0.039, 'S': -0.022, 'R': 0.167, 'T': 0.083, 'W': 0.0, 'V': -0.011, 'Y': -0.276}, 2: {'A': 0.158, 'C': 0.0, 'E': 0.0, 'D': 0.0, 'G': 0.0, 'F': -0.04, 'I': 0.081, 'H': -0.15, 'K': 0.058, 'M': -0.145, 'L': -0.108, 'N': 0.001, 'Q': 0.0, 'P': 0.021, 'S': -0.15, 'R': 0.038, 'T': -0.136, 'W': 0.24, 'V': 0.098, 'Y': 0.032}, 3: {'A': 0.0, 'C': 0.0, 'E': -0.001, 'D': -0.0, 'G': -0.0, 'F': -0.0, 'I': 0.0, 'H': -0.0, 'K': 0.0, 'M': -0.0, 'L': 0.0, 'N': -0.0, 'Q': -0.0, 'P': -0.001, 'S': 0.0, 'R': 0.0, 'T': 0.001, 'W': -0.0, 'V': 0.0, 'Y': -0.0}, 4: {'A': 0.002, 'C': 0.0, 'E': -0.0, 'D': 0.0, 'G': -0.002, 'F': 0.001, 'I': -0.001, 'H': -0.001, 'K': 0.0, 'M': -0.001, 'L': 0.001, 'N': -0.0, 'Q': -0.0, 'P': 0.001, 'S': 0.001, 'R': 0.001, 'T': -0.001, 'W': -0.001, 'V': 0.0, 'Y': -0.001}, 5: {'A': 0.059, 'C': -0.077, 'E': 0.0, 'D': -0.06, 'G': -0.123, 'F': 0.09, 'I': 0.075, 'H': 0.317, 'K': -0.107, 'M': 0.0, 'L': 0.071, 'N': -0.058, 'Q': -0.079, 'P': 0.019, 'S': -0.002, 'R': 0.086, 'T': -0.012, 'W': -0.165, 'V': -0.036, 'Y': 0.002}, 6: {'A': 0.0, 'C': 0.0, 'E': 0.0, 'D': 0.0, 'G': 0.0, 'F': -0.0, 'I': 0.0, 'H': 0.0, 'K': 0.0, 'M': 0.0, 'L': -0.0, 'N': -0.0, 'Q': -0.0, 'P': -0.0, 'S': -0.0, 'R': 0.0, 'T': 0.0, 'W': -0.0, 'V': -0.0, 'Y': -0.0}, 7: {'A': 0.133, 'C': 0.0, 'E': 0.109, 'D': 0.0, 'G': -0.133, 'F': 0.002, 'I': 0.0, 'H': -0.0, 'K': 0.032, 'M': 0.12, 'L': -0.108, 'N': 0.015, 'Q': -0.064, 'P': 0.036, 'S': -0.019, 'R': 0.0, 'T': -0.186, 'W': 0.0, 'V': -0.006, 'Y': 0.071}, 8: {'A': 0.0, 'C': 0.0, 'E': 0.0, 'D': 0.0, 'G': 0.0, 'F': -0.01, 'I': -0.01, 'H': 0.0, 'K': 0.021, 'M': -0.006, 'L': 0.036, 'N': 0.0, 'Q': 0.0, 'P': 0.0, 'S': 0.0, 'R': 0.008, 'T': 0.0, 'W': -0.032, 'V': -0.001, 'Y': -0.005}, -1: {'con': 1.37905}} | [
"schubert@informatik.uni-tuebingen.de"
] | schubert@informatik.uni-tuebingen.de |
f2ee0c56a885daa5dee46ca150b86b8999cc3af8 | a5363720d3803e1b02e25d7063d61e827e0251b7 | /pytest/dataguru/week4/otherclass.py | 40c3991e086c825f281a90c4e2a0a9fe9fd5491f | [] | no_license | xiaocaipi/python | 693c117874c05b3891a3fb75f6229b8c3ee816b7 | 00ba1ae7e1d68f4240779b7be28016addf4c1bf2 | refs/heads/master | 2021-01-13T01:55:33.185768 | 2015-07-05T14:51:41 | 2015-07-05T14:51:41 | 38,572,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 71 | py | class otherclass:
def GET(self):
return '200'
| [
"xiaocaipi@qq.com"
] | xiaocaipi@qq.com |
62403b305f76b293486f4154cce895e2dc80d825 | b806af675ad871722a173369d22321a4f4135bf5 | /application/main/views.py | 9c940b5ef9cfed3f7d6d58fba81ad33e12b34782 | [] | no_license | degacth/flask-library | ee6f29934bcb69237998bda15e133fa5eb9c29d7 | bde96b8e68a1a4bf51976201138169a727aee12b | refs/heads/master | 2020-12-24T19:59:56.877631 | 2017-05-08T13:05:04 | 2017-05-08T13:05:04 | 86,221,832 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | import os
from flask import Blueprint, render_template
from config import Config
SRC_DIR = os.path.join(Config.BASE_DIR, 'src')
main_bp = Blueprint('main', __name__, template_folder=SRC_DIR, static_folder=SRC_DIR)
@main_bp.route('/')
@main_bp.route('/<path:page>')
def main(page=None):
return render_template('index.html')
| [
"degacth@yandex.ru"
] | degacth@yandex.ru |
b32a67ea872da39fa07c33669690cc804b81d4ba | 5c80c1c3a24399db5d7c2a259a3e2d18dcbe79a2 | /TensorFlow/computer_vision/densenet_keras/models/densenet.py | 0b3007fe678d074e264f54166cd76a0aededbb19 | [
"MIT",
"Apache-2.0"
] | permissive | maxchung2001/Model-References | b2f26cec3bcfc912f50379e47fcff7cb60ea96d2 | bc8da16830c1c35e5d1458ba2e46df8726e10f29 | refs/heads/master | 2023-04-12T16:37:27.103316 | 2021-04-22T07:00:01 | 2021-04-22T07:00:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,199 | py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""DenseNet models for Keras.
Reference paper:
- [Densely Connected Convolutional Networks]
(https://arxiv.org/abs/1608.06993) (CVPR 2017 Best Paper Award)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.keras import backend
from tensorflow.python.keras import layers
from tensorflow.python.keras.applications import imagenet_utils
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.utils import data_utils
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.util.tf_export import keras_export
BASE_WEIGTHS_PATH = ('https://storage.googleapis.com/tensorflow/'
'keras-applications/densenet/')
DENSENET121_WEIGHT_PATH = (
BASE_WEIGTHS_PATH + 'densenet121_weights_tf_dim_ordering_tf_kernels.h5')
DENSENET121_WEIGHT_PATH_NO_TOP = (
BASE_WEIGTHS_PATH +
'densenet121_weights_tf_dim_ordering_tf_kernels_notop.h5')
DENSENET169_WEIGHT_PATH = (
BASE_WEIGTHS_PATH + 'densenet169_weights_tf_dim_ordering_tf_kernels.h5')
DENSENET169_WEIGHT_PATH_NO_TOP = (
BASE_WEIGTHS_PATH +
'densenet169_weights_tf_dim_ordering_tf_kernels_notop.h5')
DENSENET201_WEIGHT_PATH = (
BASE_WEIGTHS_PATH + 'densenet201_weights_tf_dim_ordering_tf_kernels.h5')
DENSENET201_WEIGHT_PATH_NO_TOP = (
BASE_WEIGTHS_PATH +
'densenet201_weights_tf_dim_ordering_tf_kernels_notop.h5')
def dense_block(x, blocks, name):
"""A dense block.
Arguments:
x: input tensor.
blocks: integer, the number of building blocks.
name: string, block label.
Returns:
Output tensor for the block.
"""
for i in range(blocks):
x = conv_block(x, 32, name=name + '_block' + str(i + 1))
return x
def transition_block(x, reduction, name):
"""A transition block.
Arguments:
x: input tensor.
reduction: float, compression rate at transition layers.
name: string, block label.
Returns:
output tensor for the block.
"""
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_bn')(
x)
x = layers.Activation('relu', name=name + '_relu')(x)
x = layers.Conv2D(
int(backend.int_shape(x)[bn_axis] * reduction),
1,
use_bias=False,
name=name + '_conv')(
x)
x = layers.AveragePooling2D(2, strides=2, name=name + '_pool')(x)
return x
def conv_block(x, growth_rate, name):
"""A building block for a dense block.
Arguments:
x: input tensor.
growth_rate: float, growth rate at dense layers.
name: string, block label.
Returns:
Output tensor for the block.
"""
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
x1 = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_0_bn')(
x)
x1 = layers.Activation('relu', name=name + '_0_relu')(x1)
x1 = layers.Conv2D(
4 * growth_rate, 1, use_bias=False, name=name + '_1_conv')(
x1)
x1 = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_1_bn')(
x1)
x1 = layers.Activation('relu', name=name + '_1_relu')(x1)
x1 = layers.Conv2D(
growth_rate, 3, padding='same', use_bias=False, name=name + '_2_conv')(
x1)
x = layers.Concatenate(axis=bn_axis, name=name + '_concat')([x, x1])
return x
def DenseNet(
batch_size,
blocks,
include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax',
):
"""Instantiates the DenseNet architecture.
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`.
Caution: Be sure to properly pre-process your inputs to the application.
Please see `applications.densenet.preprocess_input` for an example.
Arguments:
blocks: numbers of building blocks for the four dense layers.
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `'channels_last'` data format)
or `(3, 224, 224)` (with `'channels_first'` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(200, 200, 3)` would be one valid value.
pooling: optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
Returns:
A `keras.Model` instance.
Raises:
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
ValueError: if `classifier_activation` is not `softmax` or `None` when
using a pretrained top layer.
"""
if not (weights in {'imagenet', None} or os.path.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=224,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights)
if input_tensor is None:
img_input = layers.Input(shape=input_shape, batch_size=batch_size)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape, batch_size=batch_size)
else:
img_input = input_tensor
raise NotImplemented("Unsupported flow")
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
x = layers.ZeroPadding2D(padding=((3, 3), (3, 3)))(img_input)
x = layers.Conv2D(64, 7, strides=2, use_bias=False, name='conv1/conv')(x)
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name='conv1/bn')(
x)
x = layers.Activation('relu', name='conv1/relu')(x)
x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)))(x)
x = layers.MaxPooling2D(3, strides=2, name='pool1')(x)
x = dense_block(x, blocks[0], name='conv2')
x = transition_block(x, 0.5, name='pool2')
x = dense_block(x, blocks[1], name='conv3')
x = transition_block(x, 0.5, name='pool3')
x = dense_block(x, blocks[2], name='conv4')
x = transition_block(x, 0.5, name='pool4')
x = dense_block(x, blocks[3], name='conv5')
x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name='bn')(x)
x = layers.Activation('relu', name='relu')(x)
if include_top:
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Dense(classes, activation=classifier_activation,
name='predictions')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D(name='max_pool')(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = layer_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
if blocks == [6, 12, 24, 16]:
model = training.Model(inputs, x, name='densenet121')
elif blocks == [6, 12, 32, 32]:
model = training.Model(inputs, x, name='densenet169')
elif blocks == [6, 12, 48, 32]:
model = training.Model(inputs, x, name='densenet201')
else:
model = training.Model(inputs, x, name='densenet')
# Load weights.
if weights == 'imagenet':
if include_top:
if blocks == [6, 12, 24, 16]:
weights_path = data_utils.get_file(
'densenet121_weights_tf_dim_ordering_tf_kernels.h5',
DENSENET121_WEIGHT_PATH,
cache_subdir='models',
file_hash='9d60b8095a5708f2dcce2bca79d332c7')
elif blocks == [6, 12, 32, 32]:
weights_path = data_utils.get_file(
'densenet169_weights_tf_dim_ordering_tf_kernels.h5',
DENSENET169_WEIGHT_PATH,
cache_subdir='models',
file_hash='d699b8f76981ab1b30698df4c175e90b')
elif blocks == [6, 12, 48, 32]:
weights_path = data_utils.get_file(
'densenet201_weights_tf_dim_ordering_tf_kernels.h5',
DENSENET201_WEIGHT_PATH,
cache_subdir='models',
file_hash='1ceb130c1ea1b78c3bf6114dbdfd8807')
else:
if blocks == [6, 12, 24, 16]:
weights_path = data_utils.get_file(
'densenet121_weights_tf_dim_ordering_tf_kernels_notop.h5',
DENSENET121_WEIGHT_PATH_NO_TOP,
cache_subdir='models',
file_hash='30ee3e1110167f948a6b9946edeeb738')
elif blocks == [6, 12, 32, 32]:
weights_path = data_utils.get_file(
'densenet169_weights_tf_dim_ordering_tf_kernels_notop.h5',
DENSENET169_WEIGHT_PATH_NO_TOP,
cache_subdir='models',
file_hash='b8c4d4c20dd625c148057b9ff1c1176b')
elif blocks == [6, 12, 48, 32]:
weights_path = data_utils.get_file(
'densenet201_weights_tf_dim_ordering_tf_kernels_notop.h5',
DENSENET201_WEIGHT_PATH_NO_TOP,
cache_subdir='models',
file_hash='c13680b51ded0fb44dff2d8f86ac8bb1')
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
@keras_export('keras.applications.densenet.DenseNet121',
'keras.applications.DenseNet121')
def DenseNet121(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000):
"""Instantiates the Densenet121 architecture."""
return DenseNet([6, 12, 24, 16], include_top, weights, input_tensor,
input_shape, pooling, classes)
@keras_export('keras.applications.densenet.DenseNet169',
'keras.applications.DenseNet169')
def DenseNet169(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000):
"""Instantiates the Densenet169 architecture."""
return DenseNet([6, 12, 32, 32], include_top, weights, input_tensor,
input_shape, pooling, classes)
@keras_export('keras.applications.densenet.DenseNet201',
'keras.applications.DenseNet201')
def DenseNet201(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000):
"""Instantiates the Densenet201 architecture."""
return DenseNet([6, 12, 48, 32], include_top, weights, input_tensor,
input_shape, pooling, classes)
@keras_export('keras.applications.densenet.preprocess_input')
def preprocess_input(x, data_format=None):
"""Preprocesses a numpy array encoding a batch of images.
Arguments
x: A 4D numpy array consists of RGB values within [0, 255].
Returns
Preprocessed array.
Raises
ValueError: In case of unknown `data_format` argument.
"""
return imagenet_utils.preprocess_input(
x, data_format=data_format, mode='torch')
@keras_export('keras.applications.densenet.decode_predictions')
def decode_predictions(preds, top=5):
"""Decodes the prediction result from the model.
Arguments
preds: Numpy tensor encoding a batch of predictions.
top: Integer, how many top-guesses to return.
Returns
A list of lists of top class prediction tuples
`(class_name, class_description, score)`.
One list of tuples per sample in batch input.
Raises
ValueError: In case of invalid shape of the `preds` array (must be 2D).
"""
return imagenet_utils.decode_predictions(preds, top=top)
preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
mode='', ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TORCH)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
DOC = """
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`.
Arguments:
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `'channels_last'` data format)
or `(3, 224, 224)` (with `'channels_first'` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
Returns:
A Keras model instance.
"""
setattr(DenseNet121, '__doc__', DenseNet121.__doc__ + DOC)
setattr(DenseNet169, '__doc__', DenseNet169.__doc__ + DOC)
setattr(DenseNet201, '__doc__', DenseNet201.__doc__ + DOC)
| [
"mpandit@habana.ai"
] | mpandit@habana.ai |
9c3229ceea797a701d645ee117156726475eb98a | 841fb0a6866f9fc7dcd8c0832375bc265cd56e8b | /.history/test_20210723131313.py | c1af4ab41f3ac46265e2ae1ab87cdb0aa929ecfd | [] | no_license | Yurun-LI/CodeEx | f12f77b214bcf98f3fa4acd2658a9c0597c1a2e6 | 5b3a427961ab87ce4c4536362a2ba1e34d438859 | refs/heads/master | 2023-06-21T23:52:26.624127 | 2021-07-27T03:44:34 | 2021-07-27T03:44:34 | 345,563,512 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,657 | py | import numpy as np
import matplotlib.pyplot as plt
class Sort:
def insertSort(self,ls):
arr = ls.copy()
Len = len(arr)
if Len <=1:
return arr
for i in range(1,Len):
curVal = arr[i]
j = i-1
while j>=0 and arr[j] > curVal:
arr[j+1] = arr[j]
j-=1
arr[j+1] = curVal
return arr
def shellSort(self,ls):
arr = ls.copy()
Len = len(arr)
if Len <=1:
return arr
h = 1
while h <=len(arr)//3:
h = h*3+1
while h>0:
for i in range(h,len(arr)):
val = arr[i]
j = i-h
while j>=0 and arr[j] > val:
arr[j+h] = arr[j]
j-=h
arr[j+h] = val
h = (h-1)//3
return arr
def bubbleSort(self,ls):
arr = ls.copy()
Len = len(arr)
if Len <=1:
return arr
for i in range(Len-1):
for j in range(Len-1-i):
if arr[j] > arr[j+1]:
arr[j],arr[j+1] = arr[j+1],arr[j]
return arr
# arr = np.random.permutation(np.arange(10))
# print(Sort().insertSort(arr))
def check():
for i in range(10):
arr = np.random.permutation(np.arange(100))
arr_ex = np.sort(arr)
# arr_insert = Sort().insertSort(arr)
arr_test = Sort().bubbleSort(arr)
for i,j in zip(arr_test,arr_ex):
if i != j:
print('Error')
return
print('right')
return
check()
| [
"li1042278644@icloud.com"
] | li1042278644@icloud.com |
5dd70f3f9e44b7448608a2d48655390f8438e070 | f4d82c296a7daf4d1262495048f2dcf2819eadd7 | /coupons/migrations/0001_initial.py | 8181dbec8fcb2a472e465ed0e41631b5e40390e8 | [] | no_license | Bruce-Aidams/Pilollo_old | 28f1e43cedb4bc75aa163012ca874043277810d7 | 391b7108a41e7c5e959abc45886ab4eca5324c95 | refs/heads/master | 2022-06-09T08:07:05.461348 | 2020-05-07T04:25:03 | 2020-05-07T04:25:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 845 | py | # Generated by Django 2.0.2 on 2020-01-30 08:59
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Coupon',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=50, unique=True)),
('valid_from', models.DateTimeField()),
('valid_to', models.DateTimeField()),
('discount', models.IntegerField(validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(100)])),
('active', models.BooleanField()),
],
),
]
| [
"baronbenny23@gmail.com"
] | baronbenny23@gmail.com |
5728e39b45741bb047b3f384fb8b16afc68ad615 | 49c8c12e5f2e1db28ad02e57250af55866111c95 | /photobooth.py | decb7edc26891f699ddf2bd0667dace80d271252 | [] | no_license | hcpb/phPIL | e8f0a0b85ac012bab5119a23c19a1b04440a90b5 | 34ac05c269d75d5d041850991bcf59ecab9131b8 | refs/heads/master | 2021-01-01T06:45:54.131113 | 2014-07-21T20:27:07 | 2014-07-21T20:27:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,563 | py | #!/usr/bin/python
# many dependencies are all brought in with this import...
from photoboothlib import *
#=============================================================================
# ========================= COMMAND LINE ARGUMENTS ===========================
#=============================================================================
if 'help' in sys.argv or '-h' in sys.argv or '--help' in sys.argv:
print """
NAME:
photobooth.py - a photo booth python script
OPTIONS:
-h,--help,help print help
nousecamera use dummy photos instead of camera (default: USB connected camera)
nomove do not move images after processing (default: move)
lastphoto=xxxx begin sequence with the provided 4-digit (>=1000) number
noincrement do not increment the image sequence number (default: increment)
doubleprint generate the double print (adds time, default: no doubleprint)
sepia do composites in sepia tone
bw do composites in black and white
DESCRIPTION:
This python script implements a photo booth where a sequence of four images is
taken by a USB-connected camera and processed into various composite images.
Requires: libgphoto2, python-pygame, piggyphoto (python binding for libgphoto2)
and graphicsmagick.
"""
sys.exit()
# use camera or dummy source images...
if 'nousecamera' in sys.argv:
camera_arg=False
else:
camera_arg=True
if 'doubleprint' in sys.argv:
doubleprint=True
else:
doubleprint=False
# use sepia tone...
tone = ''
if 'sepia' in sys.argv and not('bw' in sys.argv):
tone = '-sepia'
if 'bw' in sys.argv and not('sepia' in sys.argv):
tone='-bw'
# move the files when done? Assume true...
move=True
if 'nomove' in sys.argv:
print 'Not moving files...'
move=False
# set lastphoto via command line...
lastphoto=False
for i in sys.argv:
if 'lastphoto' in i:
lastphoto = True
temp = split(i, '=')[1]
break
if not(lastphoto):
# this should be rolled into the filename function but for now it's here...
last = eval(open('lastphoto', 'r').read())
print 'Change current photo number '+str(last)+'?'
temp = raw_input( 'Enter valid new number or nothing to keep: ')
if temp not in ['']:
last = eval(temp)
open('lastphoto', 'w').write(str(last))
# increment output photo index? default is true...
increment=True
if 'noincrement' in sys.argv:
increment = False
#=============================================================================
# ===================== DONE COMMAND LINE ARGUMENTS ==========================
#=============================================================================
#=============================================================================
# ================================== MAIN ==================================
#=============================================================================
# verify command line args...
print 'nousecamera:', repr(camera_arg)
print 'nomove:', repr(move)
print 'lastphoto:', last
print 'increment:', repr(increment)
print 'doubleprint:', repr(doubleprint)
pygame.init()
screen = pygame.display.set_mode(size)
#toggle_fullscreen()
while (1):
showtext(screen, "Push a button to start", 100)
key = waitforkey([K_g, K_r, K_y])
if key == K_y: tone='-sepia'
if key == K_r: tone='-bw'
if key == K_g: tone =''
fillscreen(screen, black)
# keep track of the starting time for some statistics...
start = time.time()
# get a new filename and print it to the console...
filename= new_filename(increment=increment)
print '\r\nnew filename:', filename
# grab the sequence of images from the camera (or, if specified, dummy images)...
for i in range(4):
showtext(screen, 'Image: '+str(i+1), 100)
time.sleep(0.5)
print
print 'Grabbing image: ', i+1
fillscreen(screen, black)
grab_image2(filename, i, camera_arg)
# display image just taken
displayimage(screen, filename+'_'+suffix[i]+'.jpg', camerasize, cameraloc)
# two threads for compositing images...
fname = filename+'_'+suffix[i] + '.jpg'
t_ = []
t_.append( threading.Thread(target=composite_add_display, args=(fname, i)) )
t_.append( threading.Thread(target=composite_add_print, args=(fname, i)) )
for i in t_: i.start()
while ( t_[0].isAlive() or t_[1].isAlive() ):
time.sleep(0.05)
print time.time()-start
showtext(screen, 'Processing...', 100)
# add emblems to composites...
tmp = Image.open('images/overlay-disp.png').resize( (233, 233), Image.ANTIALIAS )
print tmp.size, tmp.mode
imDisplay.paste( tmp, (522, 243, 755, 476), mask=tmp )
# save display composite...
imDisplay.save(filename+'_display.jpg', 'JPEG', quality=98)
# throw up completed display image while finishing up print images...
displayimage(screen, filename+'_display.jpg', size)
# save single print composite...
tmp = Image.open('images/overlay-phone.png').resize( (1500, 941), Image.ANTIALIAS )
print tmp.size, tmp.mode
imPrint.paste( tmp, (250, 50, 1750, 991), mask=tmp )
imPrint.save(filename+'_phone.jpg', 'JPEG', quality=90)
imDouble = Image.new('RGB', (4000, 6000), 'white')
# generate double strip for printing...
imDouble.paste( imPrint, (0, 0, 2000, 6000) )
imDouble.paste( imPrint, (2000, 0, 4000, 6000) )
draw = ImageDraw.Draw(imDouble)
draw.line( (2000, 0, 2000, 6000), fill='rgb(0,0,0)', width=2)
del draw
imDouble.save(filename+'_print.jpg', 'JPEG', quality=90)
print '\r\nProcessing done: ', time.time()-start
# time.sleep(8)
# clean up the temporary files generated during compositing...
cleanup_temp_files(filename)
# print elapsed time to console...
print '\r\nTotal cycle time: ', time.time()-start
| [
"icenyne@gmail.com"
] | icenyne@gmail.com |
c25c4a3bc595d4e908c34f11a343ecb9e4e58f72 | 9304941efbde9f8083f89326f2549fddd5d253e2 | /gaus.py | 75aca173349ab6f85a855befb1d6e5012540e2f9 | [] | no_license | azmijuhda/metnum | 0cc7f3b8a845a90642085cc4a7fe47bb7d5e0a6c | c49da0d1966c59b69d8dded62a69e015fea89002 | refs/heads/master | 2020-03-12T08:37:20.125669 | 2018-04-22T03:26:31 | 2018-04-22T03:26:31 | 130,531,742 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,455 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 9 08:39:06 2018
@author: azmijuhda
"""
print ("== GAUSS ==")
n = input("Masukkan Ordo Matriks : ")
n = int(n)
a=[]
i=0
j=0
nn=n+1
for i in range(n):
print ("")
a.append([])
for j in range(nn):
if (j != n):
print ("Masukkan Nilai Matriks [%d,%d] " % (i+1,j+1))
x = input("-> ")
x = float(x)
a[i].append(x)
else:
print ("Masukkan Nilai Matriks [%d,%d] " % (i+1,j+1))
x = input("-> ")
x= float(x)
a[i].append(x)
i=0
j=0
print ("\nMatriks Awal")
for i in range(n):
for j in range(nn):
if (j != n):
print ("%.2f "% (a[i][j]), end="")
else:
print ("| %.2f"% (a[i][j]))
i=0
j=0
for i in range(n):
print ("")
if (a[i][i]!=n):
konst = a[i][i]
for j in range(nn):
a[i][j] /= konst
j=i+1
k=0
for j in range(n):
konst = a[j][i]
for k in range(nn):
a[j][k] -= (konst * a[i][k])
i=0
j=0
for i in range(n):
for j in range(nn):
if (a[i][j] == -0):
a[i][j]=0
i=0
j=0
print ("\nMatriks Baru")
for i in range(n):
for j in range(nn):
if (j != n):
print ("%.2f "% (a[i][j]), end="")
else:
print ("| %.2f"% (a[i][j]))
| [
"noreply@github.com"
] | noreply@github.com |
5df5e0dd5c4926e279b6ba9b730b3394612747dc | 3cdb4faf34d8375d6aee08bcc523adadcb0c46e2 | /web/env/lib/python3.6/site-packages/user_agents/__init__.py | b8dc2453a93f1e47e2146c07f97dc0191a628b39 | [
"MIT",
"GPL-3.0-only"
] | permissive | rizwansoaib/face-attendence | bc185d4de627ce5adab1cda7da466cb7a5fddcbe | 59300441b52d32f3ecb5095085ef9d448aef63af | refs/heads/master | 2020-04-25T23:47:47.303642 | 2019-09-12T14:26:17 | 2019-09-12T14:26:17 | 173,157,284 | 45 | 12 | MIT | 2020-02-11T23:47:55 | 2019-02-28T17:33:14 | Python | UTF-8 | Python | false | false | 48 | py | VERSION = (2, 0, 0)
from .parsers import parse
| [
"rizwansoaib@gmail.com"
] | rizwansoaib@gmail.com |
381701f9ffd0e54c242295558b4dc3dead0daaa7 | 1f800b08f937067e840e0a07c39cd75ee818aba0 | /Library_Fine.py | 11db838e061bd1b4b3e4bef734cf5237f3289997 | [] | no_license | SpaceCoder123/Hacker_Rank | 5e322c8f672d346c4a9522f9ff3e93daeb09180a | 91c5d735fafc7fa80079caf3ec6e7fad26b6d536 | refs/heads/master | 2022-11-11T05:09:00.555968 | 2020-07-02T12:56:51 | 2020-07-02T12:56:51 | 254,894,825 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 519 | py | Given_date=list(map(int,input().split()))
Expected_Date=list(map(int,input().split()))
Year_Diff=(Given_date[2]-Expected_Date[2])
Month_Diff=(Given_date[1]-Expected_Date[1])
Day_Diff=(Given_date[0]-Expected_Date[0])
if Year_Diff==0:
if Month_Diff==0:
if Day_Diff>0:
print(15*Day_Diff)
elif Day_Diff<=0:
print(0)
elif Month_Diff>0:
print(500*(Month_Diff))
elif Month_Diff<0:
print(0)
elif Year_Diff>0:
print(10000)
elif Year_Diff<0:
print(0)
| [
"noreply@github.com"
] | noreply@github.com |
925326f9bd4afe34bffc15db2159f377ac95bce8 | 7873e0aedc1aa9a2b5f065f03a03befb1a224d0e | /2_plot.py | e0ae0a05d85f155e197de6e292f19db1bf70bbba | [] | no_license | ronygregory/MDG-indicators-prediction | 9bb0e53d6e1ce982b74e87e1c32789bdeacb4b7e | 8e5a14d22f7f26f4d7cf27d400e99597c79c97ec | refs/heads/master | 2021-01-10T04:27:12.450773 | 2015-12-06T22:27:16 | 2015-12-06T22:27:16 | 47,472,095 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,465 | py | import sys
from pyspark import SparkConf, SparkContext
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import gui
# Filter criteria - change it to accept from UI
print "starting"
gui.getValues()
country = gui.country
print country
indicator = gui.indicator
print indicator
year_start = gui.startYear
print year_start
year_end = gui.endYear
print year_end
num_of_segments = 5
predict_end = gui.predYear # predict till year
conf = SparkConf()
conf.setMaster("local[4]")
conf.setAppName("damu1000")
conf.set("spark.executor.memory", "4g")
sc = SparkContext(conf=conf)
# read data
lines = sc.pickleFile(".//result")
# filter by country, indicator and period. sort by period
lines = lines.filter(lambda x: x[0] == country and x[2] == indicator and x[4] != '' and x[3] >= year_start and x[3] <= year_end).sortBy(lambda (a, b, c, d, e): d, True).cache()
lines.take(1)
if not lines.take(1):
print "Index not present for this country. Stopping"
sys.exit()
print lines.collect()
x = lines.map(lambda (a, b, c, d, e): (d)) # getting x values in 2D RDD
y = lines.map(lambda (a, b, c, d, e): float(e)) # getting x values in 2D RDD
# num_of_segments = x.count() / 5 #averaging at around 5 points per segment
#--------------------------------------- Find out "break" points in pattern--------------------------------------------------------------------
# assign indexes to y values, increment by 1 and 2 so that elements can be joined to find out diff later.
x0 = x.zipWithIndex().map(lambda (a, b): (b, a))
y0 = y.zipWithIndex().map(lambda (a, b): (b, a))
y1 = y0.map(lambda (a, b): (a + 1, b))
y2 = y0.map(lambda (a, b): (a + 2, b))
# join structure is like: [index, ((y0, y1) ,y2 )]. Hence the structure used in next map
# y[1] -> y[i+2], y[0][1] -> y[i+1], y[0][0] -> y[i]. Calculating diff (y[i+2]-y[i+1]) - (y[i+1]-y[i]) to find out breakpoint
# finding out difference in y coordinates of every consecutive pair. sort by desc order of difference
y_join = y0.join(y1).join(y2).map(lambda (x, y): (x, abs((y[1] - y[0][1]) - (y[0][1] - y[0][0])))).sortBy(lambda (a, b): b, False)
# caution: segments are derived from zip index. hence contain index of element NOT x element (i.e. year)
# picking up top difference elements - they form our segments
segments = sorted(y_join.map(lambda (a, b):a).take(num_of_segments)) # taking 2 additional segments. Because some times diff returns 2 consecutive points, which has to be discarded
segments = [0] + segments
segments.append(x.count())
print segments
i = 0
# remove consecutive points if included
# check for divide by 0. some prob in segmentation. print x1, check segment should have more than one element.
while i < len(segments) - 1:
if segments[i] + 1 == segments[i + 1] or segments[i] + 2 == segments[i + 1]:
segments.remove(segments[i])
else:
i = i + 1 # increment i only in else, cause if element is deleted, then next element is now at same old i th position
if segments[0] != 0:
segments = [0] + segments
print segments
#------------------------------------------------Calculate linear regression for every segment---------------------------------------------
seg = 0
y_est = []
y = y.collect()
# for carry out linear regression for individual segment
while seg < len(segments) - 1:
print "----------------------------------------------------------------------------------------------------"
start = segments[seg]
end = segments[seg + 1]
# B1 = sum( (y_avg - yi)(x_avg - xi) ) / sum ( (x_avg - xi)(x_avg - xi) )
# B0 = y_avg - B1*x_avg
x1 = x0.filter(lambda (a, b): a >= start and a < end).map(lambda (a, b):float(b)) # filter x and y for current segment start and end
y1 = y0.filter(lambda (a, b): a >= start and a < end).map(lambda (a, b):float(b))
seg = seg + 1
x_avg = float(x1.sum()) / x1.count() # find out average
y_avg = float(y1.sum()) / y1.count()
i = 0
num = 0
den = 0
print "x1: ", x1.collect()
print "y1: ", y1.collect()
num = x1.zip(y1).map(lambda (a, b): (y_avg - b) * (x_avg - a)).sum() # numerator and denominator as per above formula of linear regression
den = den + x1.map(lambda a: (x_avg - a) * (x_avg - a)).sum()
B1 = num / den # B0 and B1 (Beta 0 and Beta 1) calculation
B0 = y_avg - B1 * x_avg
# y1 = y1.collect()
y_est = y_est + x1.map(lambda a: B0 + B1 * a).collect() # calculate estimated linear regression using Beta0 and Beta 1 (finally!!!)
#---------------------------------------extend x with upcoming years and predict using last segment's B0 and B1
print "estimating--------------------------------------------------------------------------------------"
x = x.collect()
predict_start = x[len(x) - 1] + 1 # first year of prediction = last year of available data + 1
x_est = x + range(predict_start, predict_end + 1) # adding + 1 to accomodate end year as well
for i in range(predict_start, predict_end + 1):
y_est.append(B0 + B1 * i)
# plt.plot(x,y,'bs', x_est, y_est, 'g--')
# plt.show()
print "plotting"
print "x_est: ", x_est
print len(x_est)
print "y_est: ", y_est
print len(y_est)
plt.xlabel('Year')
plt.ylabel(indicator)
line1, = plt.plot(x_est, y_est, linestyle="solid", marker='o', color="blue")
line2, = plt.plot(x, y, linestyle="s", marker="o", color="red")
plt.legend([line1, line2], ["Prediction", "Real Data"])
plt.title("Prediction using Segmented Linear Regression: " + country)
plt.show()
| [
"rony.gregory@utah.edu"
] | rony.gregory@utah.edu |
d52d3a2827a1ee42c9687e32b084e7307656124c | c79826948dfe18dc7b1bc8a9cb1bb7433b785848 | /Projects/ToDoApp (Django + React + Redux)/ToDoApp/settings.py | 3218540ab1587ef59fdffae76dab1e518ba62b67 | [] | no_license | yahyaest/JavaScript_Repository | 704f03e7c4a32dead11b9d77c28e845ac0032dc7 | ac21ae772c97e45add8a627a4b22d453ff6a97c8 | refs/heads/main | 2023-01-12T17:43:17.925408 | 2020-11-19T22:20:38 | 2020-11-19T22:20:38 | 314,365,556 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,503 | py | """
Django settings for ToDoApp project.
Generated by 'django-admin startproject' using Django 3.0.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'unftyh7hvqz80+3bww0k8v@mai8&pl)zhd!7pqb^!qx1x1n9f-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'api',
'accounts',
'rest_framework',
'knox',
'corsheaders',
]
REST_FRAMEWORK = {'DEFAULT_AUTHENTICATION_CLASSES': ('knox.auth.TokenAuthentication',)}
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ToDoApp.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'to_do_app/build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ToDoApp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'to_do_app/build/static')]
CORS_ORIGIN_WHITELIST = [
"http://localhost:3000",
"http://localhost:3001"
]
| [
"machatyahya@gmail.com"
] | machatyahya@gmail.com |
b32b76f682558b542d37e0757152e22391f98198 | e2f5479f73bdfb9cd93a2fd7c615da369a43a499 | /tests/lastfm/commands/test_cmd_add.py | eb79cd38db6d075e48444f318b17fe6ab264ae91 | [
"MIT"
] | permissive | tefra/pytuber | 8bdb837d0912c9bacab0bff1e0196bfdba67cb62 | a7c5d6252584dc0abee946e707f496cecaebf1bb | refs/heads/master | 2022-05-19T21:48:02.129812 | 2022-05-08T10:08:40 | 2022-05-08T10:10:24 | 161,838,438 | 10 | 6 | MIT | 2022-05-08T09:45:24 | 2018-12-14T20:44:26 | Python | UTF-8 | Python | false | false | 6,159 | py | from unittest import mock
from pytuber.cli import cli
from pytuber.core.models import PlaylistManager
from pytuber.core.models import Provider
from pytuber.lastfm.models import PlaylistType
from pytuber.lastfm.models import UserPlaylistType
from pytuber.lastfm.params import ArtistParamType
from pytuber.lastfm.params import CountryParamType
from pytuber.lastfm.params import TagParamType
from pytuber.lastfm.params import UserParamType
from tests.utils import CommandTestCase
from tests.utils import PlaylistFixture
class CommandAddTests(CommandTestCase):
@mock.patch("pytuber.lastfm.commands.cmd_add.fetch_tracks")
@mock.patch.object(UserParamType, "convert")
@mock.patch.object(PlaylistManager, "set")
def test_user_playlist(self, create_playlist, convert, fetch_tracks):
convert.return_value = "bbb"
create_playlist.return_value = PlaylistFixture.one()
result = self.runner.invoke(
cli,
["add", "lastfm", "user-playlist"],
input="\n".join(("aaa", "2", "50", "My Favorite ")),
catch_exceptions=False,
)
expected_output = (
"Last.fm username: aaa",
"Playlist Types",
"[1] User Loved Tracks",
"[2] User Top Tracks",
"[3] User Recent Tracks",
"[4] User Friends Recent Tracks",
"Select a playlist type 1-4: 2",
"Maximum tracks [50]: 50",
"Title: My Favorite ",
"Added playlist: id_a!",
)
self.assertEqual(0, result.exit_code)
self.assertOutput(expected_output, result.output)
create_playlist.assert_called_once_with(
{
"type": UserPlaylistType.USER_TOP_TRACKS,
"provider": Provider.lastfm,
"arguments": {"limit": 50, "username": "bbb"},
"title": "My Favorite",
}
)
fetch_tracks.assert_called_once_with("id_a")
@mock.patch("pytuber.lastfm.commands.cmd_add.fetch_tracks")
@mock.patch.object(PlaylistManager, "set")
def test_chart_playlist(self, create_playlist, fetch_tracks):
create_playlist.return_value = PlaylistFixture.one()
result = self.runner.invoke(
cli, ["add", "lastfm", "chart-playlist"], input="50\n "
)
expected_output = (
"Maximum tracks [50]: 50",
"Title: ",
"Added playlist: id_a!",
)
self.assertEqual(0, result.exit_code)
self.assertOutput(expected_output, result.output)
create_playlist.assert_called_once_with(
{
"type": PlaylistType.CHART,
"provider": Provider.lastfm,
"arguments": {"limit": 50},
"title": "",
}
)
fetch_tracks.assert_called_once_with("id_a")
@mock.patch("pytuber.lastfm.commands.cmd_add.fetch_tracks")
@mock.patch.object(CountryParamType, "convert")
@mock.patch.object(PlaylistManager, "set")
def test_country_playlist(self, create_playlist, country_param_type, fetch_tracks):
country_param_type.return_value = "greece"
create_playlist.return_value = PlaylistFixture.one()
result = self.runner.invoke(
cli, ["add", "lastfm", "country-playlist"], input=b"gr\n50\n "
)
expected_output = (
"Country Code: gr",
"Maximum tracks [50]: 50",
"Title: ",
"Added playlist: id_a!",
)
self.assertEqual(0, result.exit_code)
self.assertOutput(expected_output, result.output)
create_playlist.assert_called_once_with(
{
"type": PlaylistType.COUNTRY,
"provider": Provider.lastfm,
"arguments": {"limit": 50, "country": "greece"},
"title": "",
}
)
fetch_tracks.assert_called_once_with("id_a")
@mock.patch("pytuber.lastfm.commands.cmd_add.fetch_tracks")
@mock.patch.object(TagParamType, "convert")
@mock.patch.object(PlaylistManager, "set")
def test_tag_playlist(self, create_playlist, convert, fetch_tracks):
convert.return_value = "rock"
create_playlist.return_value = PlaylistFixture.one(synced=111)
result = self.runner.invoke(
cli, ["add", "lastfm", "tag-playlist"], input="rock\n50\n "
)
expected_output = (
"Tag: rock",
"Maximum tracks [50]: 50",
"Title: ",
"Updated playlist: id_a!",
)
self.assertEqual(0, result.exit_code)
self.assertOutput(expected_output, result.output)
create_playlist.assert_called_once_with(
{
"type": PlaylistType.TAG,
"provider": Provider.lastfm,
"arguments": {"limit": 50, "tag": "rock"},
"title": "",
}
)
fetch_tracks.assert_called_once_with("id_a")
@mock.patch("pytuber.lastfm.commands.cmd_add.fetch_tracks")
@mock.patch.object(ArtistParamType, "convert")
@mock.patch.object(PlaylistManager, "set")
def test_artist_playlist(self, create_playlist, artist_param, fetch_tracks):
artist_param.return_value = "Queen"
create_playlist.return_value = PlaylistFixture.one()
result = self.runner.invoke(
cli,
["add", "lastfm", "artist-playlist"],
input="Queen\n50\nQueen....",
catch_exceptions=False,
)
expected_output = (
"Artist: Queen",
"Maximum tracks [50]: 50",
"Title: Queen....",
"Added playlist: id_a!",
)
self.assertEqual(0, result.exit_code)
self.assertOutput(expected_output, result.output)
create_playlist.assert_called_once_with(
{
"type": PlaylistType.ARTIST,
"provider": Provider.lastfm,
"arguments": {"limit": 50, "artist": "Queen"},
"title": "Queen....",
}
)
fetch_tracks.assert_called_once_with("id_a")
| [
"chris@komposta.net"
] | chris@komposta.net |
65843e4856971d1c160262de54e7f67ae23cecd2 | b47a02e08b40ad9a075e45a1bf3c344ea8c25053 | /page/login.py | 64eddc01891ad15fa4b01fadf29ea81e7924c8a6 | [] | no_license | socialya/is_selenium | d1ce8f41c8fea5631fdef8c614e9ea265fb0a119 | fb8b96d3a5144f1257e8fcc1df1f8e82679c77dc | refs/heads/master | 2023-05-30T22:45:18.194597 | 2021-06-18T01:55:57 | 2021-06-18T01:55:57 | 378,000,842 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 457 | py | from selenium.webdriver.common.by import By
from base.basepage import BasePage
class Login(BasePage):
def login(self):
self.steps(r"C:\Users\DELL\Desktop\集中\web_xiaoan\step\login_step.yml")
def get_login_err_toast(self):
return self.steps(r"C:\Users\DELL\Desktop\集中\web_xiaoan\step\login_step.yml")
def get_success_toast(self):
return self.steps(r"C:\Users\DELL\Desktop\集中\web_xiaoan\step\login_step.yml")
| [
"16619774261@163.com"
] | 16619774261@163.com |
7818dfe58848eb01336f7b5651924a5ed6c63634 | de01cb554c2292b0fbb79b4d5413a2f6414ea472 | /algorithms/Medium/375.guess-number-higher-or-lower-ii.py | b6799751f60aa606ef7ea7280f4aafd950549035 | [] | no_license | h4hany/yeet-the-leet | 98292017eadd3dde98a079aafcd7648aa98701b4 | 563d779467ef5a7cc85cbe954eeaf3c1f5463313 | refs/heads/master | 2022-12-10T08:35:39.830260 | 2020-09-02T23:12:15 | 2020-09-02T23:12:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,154 | py | #
# @lc app=leetcode id=375 lang=python3
#
# [375] Guess Number Higher or Lower II
#
# https://leetcode.com/problems/guess-number-higher-or-lower-ii/description/
#
# algorithms
# Medium (40.27%)
# Total Accepted: 64.6K
# Total Submissions: 160.4K
# Testcase Example: '1'
#
# We are playing the Guess Game. The game is as follows:
#
# I pick a number from 1 to n. You have to guess which number I picked.
#
# Every time you guess wrong, I'll tell you whether the number I picked is
# higher or lower.
#
# However, when you guess a particular number x, and you guess wrong, you pay
# $x. You win the game when you guess the number I picked.
#
# Example:
#
#
# n = 10, I pick 8.
#
# First round: You guess 5, I tell you that it's higher. You pay $5.
# Second round: You guess 7, I tell you that it's higher. You pay $7.
# Third round: You guess 9, I tell you that it's lower. You pay $9.
#
# Game over. 8 is the number I picked.
#
# You end up paying $5 + $7 + $9 = $21.
#
#
# Given a particular n ≥ 1, find out how much money you need to have to
# guarantee a win.
#
class Solution:
def getMoneyAmount(self, n: int) -> int:
| [
"kevin.wkmiao@gmail.com"
] | kevin.wkmiao@gmail.com |
677a641bde79be9d0d2354f98062e9b5edec8e54 | 1e9b1f2046a4684e30e82f74599eeda7c671716e | /Maps.py | 52a039af84ec985f819cf24d6bc7d86b40cc174c | [] | no_license | jordengit/ML-in-Tower-of-Saviors | 20ee4d4011c8e57b82e76a3cb8d80f648deba0cd | 6289d57a6219d528e085da69ee16d3806f865641 | refs/heads/master | 2020-04-16T20:20:07.065363 | 2019-01-11T09:23:55 | 2019-01-11T09:23:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | import numpy as np
maps = []
# random map
map = np.random.randint(6, size=(6, 5)).tolist()
maps.append(map)
# map_1 : U
map = [ [5, 4, 3, 2, 1],
[4, 3, 2, 1, 0],
[5, 4, 3, 2, 1],
[5, 2, 5, 1, 0],
[1, 1, 2, 5, 1],
[1, 2, 5, 1, 0] ]
maps.append(map)
# map_2
map = [ [9, 9, 3, 2, 1],
[9, 3, 2, 1, 9],
[9, 9, 3, 2, 1],
[9, 9, 9, 9, 9],
[9, 9, 9, 9, 9],
[9, 9, 9, 9, 9] ]
for row in map:
for i in range(len(row)):
if row[i] == 9:
row[i] = np.random.randint(6)
maps.append(map)
# map_3
| [
"a0935235100@gmail.com"
] | a0935235100@gmail.com |
d47555778032b584c55d0e6a9103a2c5f1447385 | e296d27f9de6ace63420b8d269a2d55d9aff7240 | /Prediction_whatIfAnalysis.py | 7ac71e8b3eca179d9348b4a3d788830d18db74b3 | [] | no_license | Tapadyuti1991/FreedieMacDataAnalysis | e00a426857c52eaed48bc2b2e3dec342d02505a8 | 8804bca1c52dca7026b79643f61719ca2102eee2 | refs/heads/master | 2020-03-16T20:53:47.243662 | 2018-05-11T02:49:24 | 2018-05-11T02:49:24 | 132,976,498 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,042 | py |
# coding: utf-8
# In[39]:
# !pip install urllib
import requests
from bs4 import BeautifulSoup
import re
import os
import time
from io import BytesIO
from zipfile import ZipFile
import pandas as pd
import glob
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import csv
import math
import h2o
from sklearn import linear_model
from sklearn.feature_selection import RFE
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import r2_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.feature_selection import f_regression
from sklearn.svm import LinearSVC
from sklearn.exceptions import NotFittedError
from sklearn.svm import SVR
from itertools import chain, combinations
from sklearn.cross_validation import cross_val_score
from mlxtend.feature_selection import SequentialFeatureSelector as SFS
from sklearn.neighbors import KNeighborsRegressor
from sklearn.ensemble import RandomForestRegressor
from pybrain.tools.shortcuts import buildNetwork
from pybrain.datasets import SupervisedDataSet
from pybrain.supervised.trainers import BackpropTrainer
from sklearn.preprocessing import MinMaxScaler
# In[40]:
fileDir = os.path.dirname(os.path.realpath('__file__'))
# In[41]:
baseUrl='https://freddiemac.embs.com/FLoan/'
postUrl='Data/download.php'
# In[42]:
def createCredentialData(user, passwd):
creds={'username': user,'password': passwd}
return creds
def getFilesFromFreddieMac(cred):
## We are using inside WITH BLock so that session is closed ASAP with BLock is exited
with requests.Session() as s:
## Step 1 routing to auth.php Site with the proper crentials
urlOne = s.post(baseUrl+"secure/auth.php", data=cred)
if "Please log in" in urlOne.text:
## IF CREDENTIALS are not valid Throw Alert
print("Alert: Invalid Credentials, Please try again or sign up on below site \n https://freddiemac.embs.com/FLoan/Bin/loginrequest.php")
else:
print("Step1: Logged in")
## Sterp 2 Preparing the data for to Accept terms and Conditions
pay2={'accept': 'Yes','acceptSubmit':'Continue','action':'acceptTandC'}
finalUrl=s.post(baseUrl +"Data/download.php",pay2)
if "Loan-Level Dataset" in finalUrl.text:
print("Step2 : Terms and Conditions Accepted")
soup = BeautifulSoup(finalUrl.content, "html.parser")
links_list = soup.findAll('a')
print("Step3: Filtered the Sample Files with Condition== 2007/20008/2009/1999/2013")
print("Status::::::::::")
for ele in links_list:
## Filtering the ZIp files >= 2005
if 'historical' in ele.get_text():
if(ele.get_text()[-8:-4] == '2007' or ele.get_text()[-8:-4] == '2008' or ele.get_text()[-8:-4] == '2009' or ele.get_text()[-8:-4] == '2010' or ele.get_text()[-8:-4] == '2013' or ele.get_text()[-8:-4] == '1999'):
print(ele.get_text()[-8:-4])
tempUrl = baseUrl+"Data/"+ele.get('href')
b =time.time()
downloadUrl=s.post(tempUrl) ## return type = Response
e=time.time()
print(tempUrl + " took "+ str(e-b)+" sec")
with ZipFile(BytesIO(downloadUrl.content)) as zfile:
zfile.extractall(os.path.join(fileDir, 'adsDataRepo/'+'Historical_data_'+ele.get_text()[-8:-4]+'/'))
print("File "+ ele.get_text()+" Downloaded")
else:
print("Alert: Please Check the rerouting action suffix")
# In[43]:
def preProcessData(inputQuater,inputYear,inputQuaterTwo,inputYearTwo):
cleandataOne= ""
cleandataTwo= ""
print("pre-process data")
if(os.path.exists(fileDir+'/adsDataRepo/')):
trainingDataFile = glob.glob(fileDir+'/adsDataRepo/'+'Historical_data_'+inputYear+'/historical_data1_'+inputQuater+inputYear+'.txt')
testingDataFile = glob.glob(fileDir+'/adsDataRepo/'+'Historical_data_'+inputYearTwo+'/historical_data1_'+inputQuaterTwo+inputYearTwo+'.txt')
headerNames = ['CreditScore','FirstPaymentDate','FirstTimeHomeBuyerFlag','MaturityDate','MSA','MIP','NumberOfUnits',
'OccupancyStatus','OCLTV','DTI','OriginalUPB','OLTV','OriginalInterestRate','Channel','PrepaymentPenaltyFlag',
'ProductType','PropertyState','PropertyType','PostalCode','LoanSequenceNumber','LoanPurpose',
'OriginalLoanTerm','NumberOfBorrowers','SellerName','ServicerName','SuperConformingFlag']
with open(trainingDataFile[0]) as f:
dataf = pd.read_table(f, sep='|', low_memory=False, header=None,lineterminator='\n', names= headerNames)
cleandataOne = originationDatacleaning(dataf)
cleandataOne.to_csv("Origination_Clean_"+inputQuater+inputYear+".csv",index=False)
print("training data cleaned, CSV Created")
with open(testingDataFile[0]) as f:
dataf = pd.read_table(f, sep='|', low_memory=False, header=None,lineterminator='\n', names= headerNames)
cleandataTwo = originationDatacleaning(dataf)
cleandataTwo.to_csv("Origination_Clean_"+inputQuaterTwo+inputYearTwo+".csv",index=False)
print("testing data cleaned, CSV Created")
return cleandataOne,cleandataTwo
# In[44]:
def originationDatacleaning(dataf):
dataf['CreditScore'].replace(' ',301,inplace=True)
dataf['CreditScore'].fillna(301,inplace=True)
dataf['FirstTimeHomeBuyerFlag'].fillna('X',inplace=True)
dataf['MSA'].replace(' ',0,inplace=True)
dataf['MSA'].fillna(0, inplace=True)
dataf['MIP'].replace(' ',0,inplace=True)
dataf['MIP'].fillna(0, inplace=True)
dataf['NumberOfUnits'].fillna(0,inplace=True)
dataf['OccupancyStatus'].fillna('X',inplace=True)
dataf['OCLTV'].replace(' ',0,inplace=True)
dataf['OCLTV'].fillna(0,inplace=True)
dataf['DTI'].replace(' ',0,inplace=True)
dataf['DTI'].fillna(0,inplace=True)
dataf['OriginalUPB'].replace(' ',0,inplace=True)
dataf['OriginalUPB'].fillna(0,inplace=True)
dataf['OLTV'].replace(' ',0,inplace=True)
dataf['OLTV'].fillna(0,inplace=True)
dataf['OriginalInterestRate'].fillna(0,inplace=True)
dataf['Channel'].fillna('X',inplace=True)
dataf['PrepaymentPenaltyFlag'].fillna('X',inplace=True)
dataf['ProductType'].fillna('XXXXX',inplace=True)
dataf['PropertyState'].fillna('XX',inplace=True)
dataf['PropertyType'].fillna('XX',inplace=True)
dataf['PostalCode'].fillna(0,inplace=True)
dataf['LoanSequenceNumber'].replace('', np.NaN).fillna(0,inplace=True)
dataf['LoanPurpose'].fillna('X',inplace=True)
dataf['OriginalLoanTerm'].replace('', np.NaN).fillna(0,inplace=True)
dataf['NumberOfBorrowers'].fillna('01',inplace=True)
dataf['SellerName'].fillna('X',inplace=True)
dataf['ServicerName'].fillna('X',inplace=True)
dataf['SuperConformingFlag'].fillna('X',inplace=True)
#factorizing data
factorizeCategoricalColumn(dataf)
#assingning datatype
dataf[['PropertyState','LoanSequenceNumber']]=dataf[['PropertyState','LoanSequenceNumber']].astype('str')
dataf[['FirstTimeHomeBuyerFlag','OccupancyStatus','Channel','PrepaymentPenaltyFlag','ProductType','PropertyType','CreditScore','LoanPurpose','SellerName','ServicerName','MSA','MIP','NumberOfUnits','DTI','OCLTV','OLTV','PostalCode','NumberOfBorrowers']]=dataf[['FirstTimeHomeBuyerFlag','OccupancyStatus','Channel','PrepaymentPenaltyFlag','ProductType','PropertyType','CreditScore','LoanPurpose','SellerName','ServicerName','MSA','MIP','NumberOfUnits','DTI','OCLTV','OLTV','PostalCode','NumberOfBorrowers']].astype('int64')
#missinganalysis(dataf)
return dataf
'''As we can see we have the below Null Values presnt in the Data for all the Years (Only varying the Counts )
MSA
FirstTimeHomeBuyerFlag
PrepaymentPenaltyFlag
NumberOfBorrowers
We can ignore'''
# In[45]:
def factorizeCategoricalColumn(cleanperfTrain):
print('_________________________________________________________')
print('Factorizing the Categorical Columns .....................')
print('_________________________________________________________')
cleanperfTrain['FirstTimeHomeBuyerFlag'] = pd.factorize(cleanperfTrain['FirstTimeHomeBuyerFlag'])[0]
cleanperfTrain['OccupancyStatus'] = pd.factorize(cleanperfTrain['OccupancyStatus'])[0]
cleanperfTrain['Channel'] = pd.factorize(cleanperfTrain['Channel'])[0]
cleanperfTrain['ProductType'] = pd.factorize(cleanperfTrain['ProductType'])[0]
cleanperfTrain['PropertyType'] = pd.factorize(cleanperfTrain['PropertyType'])[0]
cleanperfTrain['LoanPurpose'] = pd.factorize(cleanperfTrain['LoanPurpose'])[0]
cleanperfTrain['SellerName'] = pd.factorize(cleanperfTrain['SellerName'])[0]
cleanperfTrain['ServicerName'] = pd.factorize(cleanperfTrain['ServicerName'])[0]
cleanperfTrain['PrepaymentPenaltyFlag'] = pd.factorize(cleanperfTrain['PrepaymentPenaltyFlag'])[0]
return cleanperfTrain
# In[46]:
def dropColumns(file):
file.drop("FirstPaymentDate",axis=1,inplace=True)
file.drop("MaturityDate",axis=1,inplace=True)
file.drop("PostalCode",axis=1,inplace=True)
# In[47]:
def computeMae(model_mae,y,x):
model= model_mae
pred=model.predict(x)
mae=mean_absolute_error(y,pred);
print("MAE:"+str(mae))
# In[48]:
def randomForestRegressionAlgorithm(datadfTraining,datadfTesting):
label=datadfTraining.OriginalInterestRate
datadfTraining.drop('OriginalInterestRate',axis=1,inplace=True)
features=datadfTraining
labelTesting=datadfTesting.OriginalInterestRate
datadfTesting.drop('OriginalInterestRate',axis=1,inplace=True)
featuresTesting=datadfTesting
print("Training Data")
rForest=RandomForestRegressor(max_depth=8)
rForest.fit(features,label)
computeMae(rForest,label,features)
computeRMSE(rForest,label,features)
computeMape(rForest,label,features)
print("Testing Data")
computeMae(rForest,labelTesting,featuresTesting)
computeRMSE(rForest,labelTesting,featuresTesting)
computeMape(rForest,labelTesting,featuresTesting)
plt.scatter(rForest.predict(features),rForest.predict(features)-label,c='r',s=40,alpha=0.5)
plt.scatter(rForest.predict(featuresTesting),rForest.predict(featuresTesting)-labelTesting,c="b",s=40)
plt.hlines(y=0,xmin=2,xmax=10)
plt.title('Residual plot using training(blue) and test(green) data')
plt.ylabel('Residuals')
#plt.show()
# In[49]:
def computeRMSE(model_rmse,y,x):
model= model_rmse
pred=model.predict(x)
rmse=math.sqrt(mean_squared_error(y,pred))
print("RMSE:"+str(rmse))
# In[50]:
def computeMape(model_mape,y,x):
model= model_mape
pred=model.predict(x)
mape=np.mean(np.abs((y - pred) / y)) * 100
print( "MAPE:"+str(mape))
# In[ ]:
def main():
creds=createCredentialData("parekh.kh@husky.neu.edu","UkQqsHbV")
getFilesFromFreddieMac(creds)
print("2007 Analysis")
files=preProcessData("Q1","2007","Q2","2007")
dropColumns(files[0])
dropColumns(files[1])
randomForestRegressionAlgorithm(files[0]._get_numeric_data(),files[1]._get_numeric_data())
filesOne=preProcessData("Q2","2007","Q3","2007")
dropColumns(filesOne[0])
dropColumns(filesOne[1])
randomForestRegressionAlgorithm(filesOne[0]._get_numeric_data(),filesOne[1]._get_numeric_data())
filesTwo=preProcessData("Q3","2007","Q4","2007")
dropColumns(filesTwo[0])
dropColumns(filesTwo[1])
randomForestRegressionAlgorithm(filesTwo[0]._get_numeric_data(),filesTwo[1]._get_numeric_data())
filesThree=preProcessData("Q4","2007","Q1","2007")
dropColumns(filesThree[0])
dropColumns(filesThree[1])
randomForestRegressionAlgorithm(filesThree[0]._get_numeric_data(),filesThree[1]._get_numeric_data())
print("2009 Analysis")
filesFour=preProcessData("Q1","2009","Q2","2009")
dropColumns(filesFour[0])
dropColumns(filesFour[1])
randomForestRegressionAlgorithm(filesFour[0]._get_numeric_data(),filesFour[1]._get_numeric_data())
filesFive=preProcessData("Q2","2009","Q1","2010")
dropColumns(filesFive[0])
dropColumns(filesFive[1])
randomForestRegressionAlgorithm(filesFive[0]._get_numeric_data(),filesFive[1]._get_numeric_data())
if __name__ == '__main__':
main()
# In[ ]:
| [
"tapadyuti@gmail.com"
] | tapadyuti@gmail.com |
7877d63cf9186e94dd828c6bce1f67ae387e7162 | 976a76c257d81db5ba5bb50ddecf8276a5a7b0ad | /analysis_tools/utils/masked_rolling_subimage_transformer.py | 658eb86a078b8d573c02e1ba2a9cb8dac35da1ab | [] | no_license | JulianKlug/brains_and_donuts | e4e9d79fa899b520a313628f9cb38bef3758b759 | 2de8641a863c3cb18fb253d6bcf5c5dcfe6c1cbf | refs/heads/master | 2022-12-02T23:42:47.375289 | 2020-07-25T21:49:57 | 2020-07-25T21:49:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,265 | py | import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from pgtda.images import RollingSubImageTransformer
class MaskedRollingSubImageTransformer(BaseEstimator, TransformerMixin):
"""Obtain regularly spaced subimages belonging to the masked area of images in a collection.
Parameters
----------
mask: mask defining the areas of the input images to use
width_list: list of list of int or None, optional, default: ``None``
List of different widths of the sub-images. If ``None``, the effective width is taken as
`3` across all dimensions.
stride: list of int or None, default: ``None``
Stride between sub-images. If ``None``, the effective stride is taken
as `1` across all dimensions.
padding: str or list of int, optional, default: ``'same'``
Padding applied to the images in the input collection.
- ``'same'``: Padding is calculated so that the output images have
the same size as the input images.
- ``'valid'``: No padding.
- ``'full'``: Maximum padding so that there is at least one voxel
of the orginal image with each subimage.
activated : bool, optional, default: ``False``
If ``True``, the padded pixels are activated. If ``False``, they are
deactivated.
periodic_dimensions : list of bool or None, optional, default: ``None``
Periodicity of the boundaries along each of the axis, where
``n_dimensions`` is the dimension of the images of the collection. The
boolean in the `d`th position expresses whether the boundaries along
the `d`th axis are periodic. The default ``None`` is equivalent to
passing ``numpy.zeros((n_dimensions,), dtype=np.bool)``, i.e. none of
the boundaries are periodic.
feature : bool, optional, default: ``False``
If ``True``, the transformed array will be 2d of shape (n_samples, \
n_features). If ``False``, the transformed array will preserve the
shape of the transformer output array.
Attributes
----------
image_slices_ : list of slice
List of slices corresponding to each subimage.
mask_ = boolean ndarray of shape (n_samples, n_x, n_y, n_z)
width_list_ : list of int ndarray of shape [n_samples, (width_x, width_y [, width_z]])
Effective width along each of the axis. Set in :meth:`fit`.
stride_ : int ndarray of shape (stride_x, stride_y [, stride_z])
Effective stride along each of the axis. Set in :meth:`fit`.
padding_ : int ndarray of shape (padding_x, padding_y [, padding_z])
Effective padding along each of the axis. Set in :meth:`fit`.
periodic_dimensions_ : boolean ndarray of shape (n_dimensions,)
Effective periodicity of the boundaries along each of the axis.
Set in :meth:`fit`.
"""
def __init__(self, mask, width_list=None,
stride=None, padding='same', activated=False,
periodic_dimensions=None, feature=False):
self.mask = mask
self.width_list = width_list
self.stride = stride
self.padding = padding
self.activated = activated
self.periodic_dimensions = periodic_dimensions
self.feature = feature
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged.
This method is here to implement the usual scikit-learn API and hence
work in pipelines.
Parameters
----------
X : ndarray, shape (n_samples, n_pixels_x, n_pixels_y [, n_pixels_z])
Input data. Each entry along axis 0 is interpreted as a 2D or 3D
binary image.
y : None
There is no need of a target in a transformer, yet the pipeline API
requires this parameter.
Returns
-------
self : object
"""
return self
def transform(self, X, y=None):
"""For each width listed in width_list, extract the subimages belonging
Parameters
----------
X : ndarray of shape (n_samples, n_pixels_x, n_pixels_y \
[, n_pixels_z])
Input data. Each entry along axis 0 is interpreted as a 2D or 3D
grayscale image.
y : None
There is no need of a target in a transformer, yet the pipeline API
requires this parameter.
Returns
-------
Xt : ndarray, shape (n_samples, n_pixels_x * n_pixels_y [* n_pixels_z],
n_dimensions)
Transformed collection of images. Each entry along axis 0 is a
point cloud in a `n_dimensions` dimensional space.
"""
n_subjects = X.shape[0]
## Subimage creation
# Subimages are created for every subject for every width setting in width list
# Result: subject list of width list of voxel array: n_subj, n_widths, n_x, n_y, n_z, w_x, w_y, w_z
X_subimages_width_list = [
[RollingSubImageTransformer(width=width, padding=self.padding, stride=self.stride, activated=self.activated,
periodic_dimensions=self.periodic_dimensions, feature=self.feature).fit_transform(X[np.newaxis, subj_index])
for width in self.width_list] for subj_index in range(n_subjects)]
## Masking
# Subimages are flattened to create a common voxel-level dimension and the subset defined by their mask is extracted
# Result: subject list of width list of arrays : n_subjects, n_widths, n_voxels (differs for every subject), w_x, w_y, w_z
X_masked_subimages = []
for subj_index in range(n_subjects):
subimages_per_width = []
for width_index, width in enumerate(self.width_list):
flat_subj_subimages = X_subimages_width_list[subj_index][width_index].reshape(-1, width[0], width[1],
width[2])
flat_subj_mask = self.mask[subj_index].reshape(-1)
subimages_per_width.append(flat_subj_subimages[flat_subj_mask])
X_masked_subimages.append(subimages_per_width)
Xt = X_masked_subimages
return Xt | [
"tensu.wave@gmail.com"
] | tensu.wave@gmail.com |
cfabcbe337af26779b838c7e71fe51673cae94f8 | e37acb231ccc09f88f4da410de04ef54cf7fc88f | /powerSet.py | 5f622576e439de0923ae9fd18d027536515e3878 | [] | no_license | ManasaPola/Coding-Exercise | 9cb38937934fe40bd7fabf5878e5305327bea26b | f35d5c01e7f35493732f640faf563a78c5dbd25f | refs/heads/master | 2020-06-21T11:19:51.712466 | 2019-08-05T19:55:03 | 2019-08-05T19:55:03 | 197,434,369 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | '''This problem was asked by Google.
The power set of a set is the set of all its subsets. Write a function that, given a set, generates its power set.
For example, given the set {1, 2, 3}, it should return {{}, {1}, {2}, {3}, {1, 2}, {1, 3}, {2, 3}, {1, 2, 3}}.'''
def main():
s = [1,2,3]
n = len(s)
pow = 2 ** n
ans = []
for counter in range(pow):
for j in range(n):
if ((counter & (1 << j))> 0):
print(counter, j)
ans.append(s[j])
print(s[j], end= "")
print("")
if __name__ == "__main__":
main() | [
"6694manasa@gmail.com"
] | 6694manasa@gmail.com |
8972f8424bbe7dce491fd3c365f2cf0892c862d6 | 862f3389025650b4ddf46dbb675d380dacc784bf | /proj/proj1/tests/q4.py | 8b123f7aecf42dc85045ce6d335a256db490c705 | [] | no_license | argoc/sp21 | 7db5fbedcd84592bf6f557f7876ba0bdbe6bc552 | 23d31826e2a3bc4125a1e50125f1b99a8350647f | refs/heads/master | 2023-05-02T00:09:33.776158 | 2021-04-26T02:25:14 | 2021-04-26T02:25:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | test = { 'name': 'q4',
'points': 2,
'suites': [ { 'cases': [ {'code': ">>> result_4.DataFrame().iloc[3, 1] == 'Trebek, Alex'\nTrue", 'hidden': False, 'locked': False},
{'code': ">>> result_4.DataFrame()['number'].iloc[:5].sum() == 26115\nTrue", 'hidden': False, 'locked': False}],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'}]}
| [
"allenshen5@berkeley.edu"
] | allenshen5@berkeley.edu |
e21ac3da379470ffcc4caef0ba04736b630eef31 | ebbfb8b51aabfee91cc33b206d40b530d5f339c9 | /images/models.py | 53d80203344db8c0d6bd913902667414259c14ec | [] | no_license | p3dr0migue1/project17 | 95ec330cd76b5ea880cc3d7d3a1121347cf0a0f1 | 5dbfbd724caa9e957adc9dadce5eb9aac4577511 | refs/heads/master | 2021-01-02T09:35:41.248470 | 2017-11-18T23:06:02 | 2017-11-18T23:06:02 | 99,255,550 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,397 | py | from django.db import models
from django.conf import settings
from django.utils.text import slugify
from django.core.urlresolvers import reverse
class Image(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL,
related_name='images_created')
title = models.CharField(max_length=200)
slug = models.SlugField(max_length=200, blank=True)
url = models.URLField()
image = models.ImageField(upload_to='images/%Y/%m/%d')
description = models.TextField(blank=True)
created = models.DateTimeField(auto_now_add=True,
db_index=True)
# database index is improved query performance
# consider seting db_index=True for fields that
# you frequently query using `filter`, `exclude` and
# `order_by`
#
# foreign key fields or fields with unique=True
# implies the creation of an index
users_like = models.ManyToManyField(settings.AUTH_USER_MODEL,
related_name='images_liked',
blank=True)
def __str__(self):
return self.title
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.title)
super(Image, self).save(*args, **kwargs)
def get_absolute_url(self):
return reverse('images:detail', args=[self.id, self.slug])
| [
"pedro.miguel@live.co.uk"
] | pedro.miguel@live.co.uk |
3c03f32e5eb27a8eed60265897afc1267e87e8c7 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/133/usersdata/219/41408/submittedfiles/al15.py | a09e1d37170fb844e616c106b8bacdfa9a11aef3 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 127 | py | # -*- coding: utf-8 -*-
i=1
for i in range(1000,9999,1):
a1=i%100
a2=i/100
if((a1+a2)*(a1+a2)==i):
print('%d/n' i)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
d691a6ad61cfc0cd1a7c436299652fad8e85b75f | f0175b2f92d26321352e745c8d6e327a8a3cf65a | /cos_sim_sk_test.py | 2e5fa9fd80ef1bc55db923f06308e4acd258d09c | [] | no_license | BaryehCode/cosine_similarity | 7f9e78b319b9d3b6cc452d31247d36f05d464fdd | 83a9e47ed04ab9fe7770ab09cdb72939c6f86ca4 | refs/heads/master | 2022-04-01T21:12:07.890891 | 2020-01-28T15:08:10 | 2020-01-28T15:08:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 909 | py |
from random import randint
from time import clock
import numpy as np
import cos_sim_sk
"""
For using sklearn's method directly, use the following code (with the same
np arrays as below):
from sklearn.metrics.pairwise import cosine_similarity
(...)
similarity = cosine_similarity(V)[0][-1]
"""
size = 50000
print('Generating 2 vectors of size {}. Similarity should be: -1.0'.format(size))
A = np.array([-10 for x in range(size)])
B = np.array([10 for x in range(size)])
O = np.array([0 for x in range(size)])
repeat = 50
print('Calculating Cosine Similarity. Repeating {}x.'.format(repeat))
avg_runtime = 0
similarity = None
for i in range(repeat):
start = clock()
similarity = cos_sim_sk.get_cosine_similarity(A,B,O)
end = clock()
avg_runtime += (end-start)
avg_runtime = round(avg_runtime / repeat, 4)
print('Done. Average runtime: {} s. Similarity: {}.'.format(avg_runtime,similarity))
| [
"vgratian@utopianlab.am"
] | vgratian@utopianlab.am |
f9c6490f5ece41b650d48ea79d24c13544978d7d | f68732bc40a7a90c3a1082e4b3a4154518acafbb | /script/dbus/sessionBus/inputDeviceTouchPad/011_palmMinWidth.py | 2aa00b420156397e97ca272516be555d2391a05b | [] | no_license | lizhouquan1017/dbus_demo | 94238a2307e44dabde9f4a4dd0cf8ec217260867 | af8442845e722b258a095e9a1afec9dddfb175bf | refs/heads/master | 2023-02-11T19:46:27.884936 | 2021-01-08T05:27:18 | 2021-01-08T05:27:18 | 327,162,635 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 902 | py | # -*- coding: utf-8 -*-
# ***************************************************
# @Test Case ID: 011_palmMinWidth
# @Test Description: int32 PalmMinWidth (readwrite) 手掌误触最小宽度
# @Test Condition: 1.无
# @Test Step: 1.调用接口读取 PalmMinWidth 属性值
# @Test Result: 1.返回 int32 数据类型数据
# @Test Remark:
# @Author: ut001627
# ***************************************************
import time
import pytest
from frame.base import OSBase
from aw.dbus.sessionBus import inputDeviceTouchPad
class TestCase(OSBase):
def setUp(self):
self.Step("预制条件1:无")
@pytest.mark.public
def test_step(self):
self.Step("步骤1:调用接口读取 PalmMinWidth 属性值")
inputDeviceTouchPad.palmMinWidth()
def tearDown(self):
self.Step("收尾:无")
time.sleep(2)
| [
"lizhouquan@uniontech.com"
] | lizhouquan@uniontech.com |
9fd3a856cee9a4a7617e5f48b2ee83857fea54f9 | 34f3cfeac7fd5a7bbbc5e362bef8bc316f81c1d0 | /asn1tools/source/rust/utils.py | d0de494f331cace95b3a0f8f06fbabb9633d7889 | [
"MIT"
] | permissive | eerimoq/asn1tools | 860b3623955c12dfb9763ff4e20a805beb7436ba | de25657f7c79100d1ba5312dd7474ff3e0d0ad2e | refs/heads/master | 2023-03-16T09:28:11.924274 | 2023-03-10T20:24:34 | 2023-03-10T20:24:34 | 99,156,277 | 272 | 98 | MIT | 2023-01-03T13:40:36 | 2017-08-02T20:05:05 | Python | UTF-8 | Python | false | false | 18,890 | py | import re
from ...errors import Error
TYPE_DECLARATION_FMT = '''\
/// Type {type_name} in module {module_name}.
{members}
'''
DEFINITION_FMT = '''
impl {module_name}{type_name} {{
pub fn encode(&mut self, mut dst: &mut [u8]) -> Result<usize, Error> {{
let mut encoder = Encoder::new(&mut dst);
self.encode_inner(&mut encoder);
encoder.get_result()
}}
pub fn decode(&mut self, src: &[u8]) -> Result<usize, Error> {{
let mut decoder = Decoder::new(&src);
self.decode_inner(&mut decoder);
decoder.get_result()
}}
fn encode_inner(&mut self, encoder: &mut Encoder) {{
{encode_body}\
}}
fn decode_inner(&mut self, decoder: &mut Decoder) {{
{decode_body}\
}}
}}
'''
ENCODER_AND_DECODER_STRUCTS = '''\
#[derive(Debug, PartialEq, Copy, Clone)]
pub enum Error {
BadChoice,
BadEnum,
BadLength,
OutOfData,
OutOfMemory
}
struct Encoder<'a> {
buf: &'a mut [u8],
size: usize,
pos: usize,
error: Option<Error>
}
struct Decoder<'a> {
buf: &'a[u8],
size: usize,
pos: usize,
error: Option<Error>
}
'''
ENCODER_ABORT = '''
fn abort(&mut self, error: Error) {
if self.error.is_none() {
self.error = Some(error);
}
}\
'''
DECODER_ABORT = '''
fn abort(&mut self, error: Error) {
if self.error.is_none() {
self.error = Some(error);
}
}\
'''
class _MembersBacktracesContext(object):
def __init__(self, backtraces, member_name):
self.backtraces = backtraces
self.member_name = member_name
def __enter__(self):
for backtrace in self.backtraces:
backtrace.append(self.member_name)
def __exit__(self, *args):
for backtrace in self.backtraces:
backtrace.pop()
class _UserType(object):
def __init__(self,
type_name,
module_name,
type_code,
used_user_types):
self.type_name = type_name
self.module_name = module_name
self.type_code = type_code
self.used_user_types = used_user_types
class Generator(object):
def __init__(self):
self.namespace = 'a'
self.asn1_members_backtrace = []
self.c_members_backtrace = []
self.module_name = None
self.type_name = None
self.helper_lines = []
self.base_variables = set()
self.used_suffixes_by_base_variables = {}
self.encode_variable_lines = []
self.decode_variable_lines = []
self.used_user_types = []
def reset_type(self):
self.helper_lines = []
self.base_variables = set()
self.used_suffixes_by_base_variables = {}
self.encode_variable_lines = []
self.decode_variable_lines = []
self.used_user_types = []
@property
def module_name_snake(self):
return camel_to_snake_case(self.module_name)
@property
def type_name_snake(self):
return camel_to_snake_case(self.type_name)
def type_length(self, minimum, maximum):
# Make sure it fits in 64 bits.
if minimum < -9223372036854775808:
raise self.error(
'{} does not fit in int64_t.'.format(minimum))
elif maximum > 18446744073709551615:
raise self.error(
'{} does not fit in uint64_t.'.format(maximum))
elif minimum < 0 and maximum > 9223372036854775807:
raise self.error(
'{} does not fit in int64_t.'.format(maximum))
# Calculate the number of bytes needed.
if minimum < -4294967296:
minimum_length = 64
elif minimum < -65536:
minimum_length = 32
elif minimum < -256:
minimum_length = 16
elif minimum < 0:
minimum_length = 8
else:
minimum_length = 0
if maximum > 4294967295:
maximum_length = 64
elif maximum > 65535:
maximum_length = 32
elif maximum > 255:
maximum_length = 16
elif maximum > 0:
maximum_length = 8
else:
maximum_length = 0
if minimum_length == maximum_length == 0:
length = 8
else:
length = max(minimum_length, maximum_length)
return length
def format_type_name(self, minimum, maximum):
length = self.type_length(minimum, maximum)
if minimum >= 0:
type_name = 'u{}'.format(length)
else:
type_name = 'i{}'.format(length)
return type_name
@property
def location(self):
location = '{}{}'.format(self.module_name,
self.type_name)
for member in self.asn1_members_backtrace:
location += make_camel_case(member)
return location
def location_inner(self, default='value', end=''):
if self.c_members_backtrace:
return '.'.join(self.c_members_backtrace) + end
else:
return default
def location_error(self):
location = '{}.{}'.format(self.module_name, self.type_name)
if self.asn1_members_backtrace:
location += '.{}'.format('.'.join(self.asn1_members_backtrace))
return location
def members_backtrace_push(self, member_name):
backtraces = [
self.asn1_members_backtrace,
self.c_members_backtrace
]
return _MembersBacktracesContext(backtraces, member_name)
def asn1_members_backtrace_push(self, member_name):
backtraces = [self.asn1_members_backtrace]
return _MembersBacktracesContext(backtraces, member_name)
def c_members_backtrace_push(self, member_name):
backtraces = [self.c_members_backtrace]
return _MembersBacktracesContext(backtraces, member_name)
def get_member_checker(self, checker, name):
for member in checker.members:
if member.name == name:
return member
raise Error('No member checker found for {}.'.format(name))
def add_unique_variable(self, name):
if name in self.base_variables:
try:
suffix = self.used_suffixes_by_base_variables[name]
suffix += 1
except KeyError:
suffix = 2
self.used_suffixes_by_base_variables[name] = suffix
unique_name = '{}_{}'.format(name, suffix)
else:
self.base_variables.add(name)
unique_name = name
return unique_name
def error(self, message):
return Error('{}: {}'.format(self.location_error(), message))
def format_integer(self, checker):
if not checker.has_lower_bound():
raise self.error('INTEGER has no minimum value.')
if not checker.has_upper_bound():
raise self.error('INTEGER has no maximum value.')
type_name = self.format_type_name(checker.minimum, checker.maximum)
return [type_name]
def format_boolean(self):
return ['bool']
def format_octet_string(self, checker):
if not checker.has_upper_bound():
raise self.error('OCTET STRING has no maximum length.')
if checker.minimum == checker.maximum:
lines = []
elif checker.maximum < 256:
lines = [' let length: u8;']
else:
lines = [' let length: u32;']
return [
'#[derive(Debug, Default, PartialEq, Copy, Clone)]',
'pub struct {} {{'.format(self.location)
] + lines + [
' pub buf: [u8; {}]'.format(checker.maximum),
'}'
]
def format_sequence(self, type_, checker):
helper_lines = []
lines = []
for member in type_.root_members:
member_checker = self.get_member_checker(checker, member.name)
if member.optional:
lines += ['pub is_{}_present: bool,'.format(member.name)]
with self.members_backtrace_push(member.name):
member_lines = self.format_type(member, member_checker)
member_location = self.location
if not member_lines:
continue
if is_inline_member_lines(member_lines):
member_lines[-1] = 'pub {}: {},'.format(member.name,
member_lines[-1])
else:
helper_lines += member_lines + ['']
member_lines = ['pub {}: {},'.format(member.name,
member_location)]
lines += member_lines
if lines:
lines[-1] = lines[-1].strip(',')
return helper_lines + [
'#[derive(Debug, Default, PartialEq, Copy, Clone)]',
'pub struct {} {{'.format(self.location)
] + indent_lines(lines) + [
'}'
]
def format_sequence_of(self, type_, checker):
if not checker.is_bound():
raise self.error('SEQUENCE OF has no maximum length.')
with self.asn1_members_backtrace_push('elem'):
lines = self.format_type(type_.element_type,
checker.element_type)
if lines:
lines[-1] += ' elements[{}];'.format(checker.maximum)
if checker.minimum == checker.maximum:
length_lines = []
elif checker.maximum < 256:
length_lines = ['let length: u8;']
else:
length_lines = ['let length: u32;']
return ['struct {'] + indent_lines(length_lines + lines) + ['}']
def format_enumerated(self, type_):
lines = [
'#[derive(Debug, PartialEq, Copy, Clone)]',
'pub enum {} {{'.format(self.location)
] + [
' {},'.format(make_camel_case(value))
for value in self.get_enumerated_values(type_)
] + [
'}',
'',
'impl Default for {} {{'.format(self.location),
' fn default() -> Self {',
' {}::{}'.format(self.location,
self.get_enumerated_values(type_)[0].upper()),
' }',
'}'
]
return lines
def format_choice(self, type_, checker):
helper_lines = []
lines = []
for member in self.get_choice_members(type_):
member_checker = self.get_member_checker(checker,
member.name)
with self.members_backtrace_push(member.name):
member_lines = self.format_type(member, member_checker)
member_location = self.location
if not member_lines:
continue
if is_inline_member_lines(member_lines):
member_lines[-1] = '{}({}),'.format(make_camel_case(member.name),
member_lines[-1])
else:
helper_lines += member_lines + ['']
member_lines = ['pub {}: {},'.format(member.name,
member_location)]
lines += member_lines
if lines:
lines[-1] = lines[-1].strip(',')
return helper_lines + [
'#[derive(Debug, PartialEq, Copy, Clone)]',
'pub enum {} {{'.format(self.location)
] + indent_lines(lines) + [
'}'
]
def format_user_type(self, type_name, module_name):
self.used_user_types.append((type_name, module_name))
return ['{}{}'.format(module_name, type_name)]
def format_sequence_inner_member(self,
member,
checker,
default_condition_by_member_name):
member_checker = self.get_member_checker(checker, member.name)
with self.members_backtrace_push(member.name):
encode_lines, decode_lines = self.format_type_inner(
member,
member_checker)
location = self.location_inner('', '.')
if member.optional:
is_present = '{}is_{}_present'.format(location, member.name)
encode_lines = [
'',
'if src.{} {{'.format(is_present)
] + indent_lines(encode_lines) + [
'}',
''
]
decode_lines = [
'',
'if dst.{} {{'.format(is_present)
] + indent_lines(decode_lines) + [
'}',
''
]
elif member.default is not None:
name = '{}{}'.format(location, member.name)
encode_lines = [
'',
'if src.{} != {} {{'.format(name, member.default)
] + indent_lines(encode_lines) + [
'}',
''
]
decode_lines = [
'',
'if {} {{'.format(default_condition_by_member_name[member.name])
] + indent_lines(decode_lines) + [
'} else {',
' dst.{} = {};'.format(name, member.default),
'}',
''
]
return encode_lines, decode_lines
def generate_type_declaration(self, compiled_type):
type_ = compiled_type.type
checker = compiled_type.constraints_checker.type
lines = self.generate_type_declaration_process(type_, checker)
if not lines:
lines = ['dummy: u8;']
if self.helper_lines:
self.helper_lines.append('')
return TYPE_DECLARATION_FMT.format(module_name=self.module_name,
type_name=self.type_name,
members='\n'.join(lines))
def generate_definition(self, compiled_type):
encode_lines, decode_lines = self.generate_definition_inner_process(
compiled_type.type,
compiled_type.constraints_checker.type)
if self.encode_variable_lines:
encode_lines = self.encode_variable_lines + [''] + encode_lines
if self.decode_variable_lines:
decode_lines = self.decode_variable_lines + [''] + decode_lines
encode_lines = indent_lines(indent_lines(encode_lines)) + ['']
decode_lines = indent_lines(indent_lines(decode_lines)) + ['']
return DEFINITION_FMT.format(module_name=self.module_name,
type_name=self.type_name,
encode_body='\n'.join(encode_lines),
decode_body='\n'.join(decode_lines))
def generate(self, compiled):
user_types = []
for module_name, module in sorted(compiled.modules.items()):
self.module_name = module_name
for type_name, compiled_type in sorted(module.items()):
self.type_name = type_name
self.reset_type()
type_declaration = self.generate_type_declaration(compiled_type)
definition = self.generate_definition(compiled_type)
user_type = _UserType(type_name,
module_name,
type_declaration + definition,
self.used_user_types)
user_types.append(user_type)
user_types = sort_user_types_by_used_user_types(user_types)
types_code = []
for user_type in user_types:
types_code.append(user_type.type_code)
types_code = '\n'.join(types_code)
helpers = '\n'.join(self.generate_helpers(types_code))
return helpers, types_code
def format_type(self, type_, checker):
raise NotImplementedError('To be implemented by subclasses.')
def format_type_inner(self, type_, checker):
raise NotImplementedError('To be implemented by subclasses.')
def get_enumerated_values(self, type_):
raise NotImplementedError('To be implemented by subclasses.')
def get_choice_members(self, type_):
raise NotImplementedError('To be implemented by subclasses.')
def generate_type_declaration_process(self, type_, checker):
raise NotImplementedError('To be implemented by subclasses.')
def generate_definition_inner_process(self, type_, checker):
raise NotImplementedError('To be implemented by subclasses.')
def generate_helpers(self, definitions):
raise NotImplementedError('To be implemented by subclasses.')
def canonical(value):
"""Replace anything but 'a-z', 'A-Z' and '0-9' with '_'.
"""
return re.sub(r'[^a-zA-Z0-9]', '_', value)
def camel_to_snake_case(value):
value = re.sub(r'(.)([A-Z][a-z]+)', r'\1_\2', value)
value = re.sub(r'(_+)', '_', value)
value = re.sub(r'([a-z0-9])([A-Z])', r'\1_\2', value).lower()
value = canonical(value)
return value
def make_camel_case(value):
return value[0].upper() + value[1:]
def join_lines(lines, suffix):
return[line + suffix for line in lines[:-1]] + lines[-1:]
def is_user_type(type_):
return type_.module_name is not None
def strip_blank_lines(lines):
try:
while lines[0] == '':
del lines[0]
while lines[-1] == '':
del lines[-1]
except IndexError:
pass
stripped = []
for line in lines:
if line == '' and stripped[-1] == '':
continue
stripped.append(line)
return stripped
def indent_lines(lines, width=4):
indented_lines = []
for line in lines:
if line:
indented_line = width * ' ' + line
else:
indented_line = line
indented_lines.append(indented_line)
return strip_blank_lines(indented_lines)
def dedent_lines(lines, width=4):
return [line[width:] for line in lines]
def sort_user_types_by_used_user_types(user_types):
reversed_sorted_user_types = []
for user_type in user_types:
user_type_name_tuple = (user_type.type_name, user_type.module_name)
# Insert first in the reversed list if there are no types
# using this type.
insert_index = 0
for i, reversed_sorted_user_type in enumerate(reversed_sorted_user_types, 1):
if user_type_name_tuple in reversed_sorted_user_type.used_user_types:
if i > insert_index:
insert_index = i
reversed_sorted_user_types.insert(insert_index, user_type)
return reversed(reversed_sorted_user_types)
def is_inline_member_lines(member_lines):
return len(member_lines) == 1
| [
"erik.moqvist@gmail.com"
] | erik.moqvist@gmail.com |
00e8949d545fa45467bb2276682c9a7d09100ddf | dd1151fd1f3435960cb43269c91628fa333f9090 | /excelwork.py | 7728422a1196fb07b283237fe0159611314935c1 | [] | no_license | AlaynaGrace/Canes-FM | 96b29554254423033484ed8ee83c2d070f905c51 | 2ab6d96b712a66a7b4734dc3fed59f7281e9c43f | refs/heads/master | 2021-06-19T22:32:14.772296 | 2017-07-09T18:36:13 | 2017-07-09T18:36:13 | 55,912,598 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,848 | py | from os import path
import random
import xlwings as xw
def eHW():
mainlist = ['What we do','How and Why','Cleanliness','Mystery Shops',
'Training']
#First round of lists to get every value
WWD = []
HaW = []
C = []
MS = []
T = []
#Second round of lists to make sure indexing later does not become a problem
#They are the "priority lists"
What = []
How = []
Clean = []
Mystery = []
Train = []
#Final dictionary
Final = {}
fname = str(input('Enter source path (ex: C:\\path\\to\\file.xlsx): '))
efile = Workbook(fname)
ofname = str(input('Enter the name of the file you want to put this data into: '))
if path.isfile(ofname): #Checks to see if the file you want to create exists
q = str(input('File exists... overwrite? (y for yes, n for no): '))
q = q.lower() #Makes sure that the y or n is lowercase no matter what is entered
if q == 'y':
ofile = open(ofname, 'w')
else:
return False #Ends the program if you do not want to overwrite the file
else:
ofile = open(ofname,'w') #If it does not exist, it just opens like normal
row = int(input('Enter length of longest list: '))
for i in range(row-1):
a = 'A' + str(i)
WWD += [Range(a).value]
b = 'B' + str(i)
HaW += [Range(b).value]
c = 'C' + str(i)
C += [Range(c).value]
d = 'D' + str(i)
MS += [Range(d).value]
e = 'E' + str(i)
T += [Range(e).value]
#The next 5 loops get rid of all the periods
for i in range(len(WWD)-1):
if WWD[i] == '.':
del WWD[i]
for i in range(len(HaW)-1):
if HaW[i] == '.':
del HaW[i]
for i in range(len(C)-1):
if C[i] == '.':
del C[i]
for i in range(len(MS)-1):
if MS[i] == '.':
del MS[i]
for i in range(len(T)-1):
if T[i] == '.':
del T[i]
#The next 5 blocks of code are used to make sure that each list is correct
#It is not necessary but at least you can see everything going into the file
#Helps with debugging
ofile.write(mainlist[0])
ofile.write(':')
ofile.write(str(WWD))
ofile.write('\n')
ofile.write(mainlist[1])
ofile.write(':')
ofile.write(str(HaW))
ofile.write('\n')
ofile.write(mainlist[2])
ofile.write(':')
ofile.write(str(C))
ofile.write('\n')
ofile.write(mainlist[3])
ofile.write(':')
ofile.write(str(MS))
ofile.write('\n')
ofile.write(mainlist[4])
ofile.write(':')
ofile.write(str(T))
ofile.write('\n')
#Next block of five
#This is where it adds each thing into a new list as many times as it is important
#Ex: The first thing is the most important so it is added in as many times as the list is long (List length = 3, is added in 3 times)
for i in range(len(WWD)-1): #Gets the index number of each thing in the list
for eachThing in WWD: #Looks at each individual value in the list
if eachThing == WWD[i]: #If that thing is equal to the index we got from the first loop
for t in range(len(WWD)-i): #The value is added into the new list as many times as the list is long minus the original index
What += [eachThing]
for i in range(len(HaW)-1):
for eachThing in HaW:
if eachThing == HaW[i]:
for t in range(len(HaW)-i):
How += [eachThing]
for i in range(len(C)-1):
for eachThing in C:
if eachThing == C[i]:
for t in range(len(C)-i):
Clean += [eachThing]
for i in range(len(MS)-1):
for eachThing in MS:
if eachThing == MS[i]:
for t in range(len(MS)-i):
Mystery += [eachThing]
for i in range(len(T)-1):
for eachThing in T:
if eachThing == T[i]:
for t in range(len(T)-i):
Train += [eachThing]
#Next block of 5
#This code checks to make sure the random selection will work
if len(What) == 0: #If there is nothing in this list, then it will just add a null string
Final[mainlist[0]] = ''
else: #Otherwise, it will go ahead and pick a random number and then add the value that is at that number to the dictionary
a = int(random.uniform(0,len(What)-1))
Final[mainlist[0]] = What[a]
if len(How) == 0:
Final[mainlist[1]] = ''
else:
b = int(random.uniform(0,len(How)-1))
Final[mainlist[1]] = How[b]
if len(Clean) == 0:
Final[mainlist[2]] = ''
else:
c = int(random.uniform(0,len(Clean)-1))
Final[mainlist[2]] = Clean[c]
if len(Mystery) == 0:
Final[mainlist[3]] = ''
else:
d = int(random.uniform(0,len(Mystery)-1))
Final[mainlist[3]] = Mystery[d]
if len(Train) == 0:
Final[mainlist[4]] = ''
else:
e = int(random.uniform(0,len(Train)-1))
Final[mainlist[4]] = Train[e]
ofile.write('Final List:') #Writest the final list on the document
ofile.write(str(Final)) #Note: dictionaries do not have any specfic order
print() #They will not be written to the file in any specific order
print(Final) #Prints the dictionary so you can see it immediately
#To see everything that was written to the file, you will have to open the file on your own
ofile.close() #Both files are closed
| [
"alaynagrace@comcast.net"
] | alaynagrace@comcast.net |
413c3426ed14447dbe5e16b7e306a4699dd18a58 | 6753f0f0f910aa1f858be27b3c06152e84949e09 | /stusys/stusys/inouttake.py | 7558eb0726a05fa74579c74ba02065d15bfebe3b | [] | no_license | ildtee/kLwork | 94a7ccc880108351c4cdd2a2e2126c6c82d116cd | f29def03d1cc695de2b247c1d1c8685f45a38e46 | refs/heads/master | 2023-02-05T10:24:19.678375 | 2020-12-21T15:47:11 | 2020-12-21T15:47:11 | 323,098,077 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,726 | py | import os
from datetime import datetime
import xlrd as xlrd
from django.conf import settings
from django.views.decorators.csrf import csrf_exempt
import pymysql
from django.http import HttpResponse
from django.shortcuts import render
from xlrd import xldate_as_datetime
from django.http import HttpResponse
from xlwt import *
from io import StringIO # 需要stringIO,这是python2中的,如果是python3,使用 from io import StringIO
def wrdb(filename):
# 打开上传 excel 表格
readboot = xlrd.open_workbook("D:\\upload\stu.xlsx")
sheet = readboot.sheet_by_index(0)
# 获取excel的行和列
nrows = sheet.nrows
ncols = sheet.ncols
print(ncols, nrows)
for i in range(1, nrows):
row = sheet.row_values(i)
stu_Num = row[4]
stu_Age = row[3]
stu_Sex = row[2]
stu_Name = row[1]
stu_ID = row[0]
db = pymysql.connect(
host='localhost',
user='root',
password='root'
)
cursor = db.cursor()
# SQL语句
cursor.execute('use stusys')
sql2 = 'insert into stu(stu_id ,stu_name ,stu_sex ,stu_age,stu_num) values(%s,%s,%s,%s,%s)'
cursor.execute(sql2, (stu_ID, stu_Name, stu_Sex, stu_Age, stu_Num))
db.commit()
cursor.close()
db.close()
return HttpResponse('上传数据库成功了')
class inout:
@csrf_exempt
def upload_file(request):
if request.method == "POST": # 请求方法为POST时,进行处理
myFile =request.FILES.get("myfile", None) # 获取上传的文件,如果没有文件,则默认为None
if not myFile:
return HttpResponse("no files for upload!")
destination = open(os.path.join("D:\\upload",myFile.name),'wb+') # 打开特定的文件进行二进制的写操作
for chunk in myFile.chunks(): # 分块写入文件
destination.write(chunk)
destination.close()
wrdb(myFile.name)
return HttpResponse("upload over!")
def excel_export(request):
"""
导出excel表格
"""
db = pymysql.connect(
host='localhost',
user='root',
password='root'
)
cursor = db.cursor()
# SQL语句
cursor.execute('use stusys')
cursor.execute('select * from stu')
list_obj = cursor.fetchall()
print(list_obj)
if list_obj:
# 创建工作薄
ws = Workbook(encoding='utf-8')
w = ws.add_sheet(u"数据报表第一页")
w.write(0, 0, "id")
w.write(0, 1, u"学生名")
w.write(0, 2, u"stu_sex")
w.write(0, 3, u"stu_age")
w.write(0, 4, u"stu_num")
# 写入数据
excel_row = 1
for obj in list_obj:
stu_id = obj[0]
stu_name = obj[1]
stu_sex = obj[2]
stu_age = obj[3]
stu_num = obj[4]
w.write(excel_row, 0, stu_id)
w.write(excel_row, 1, stu_name)
w.write(excel_row, 2, stu_sex)
w.write(excel_row, 3, stu_age)
w.write(excel_row, 4, stu_num)
excel_row += 1
# 检测文件是够存在
# 方框中代码是保存本地文件使用,如不需要请删除该代码
###########################
exist_file = os.path.exists("test.xls")
if exist_file:
os.remove(r"test.xls")
ws.save("test.xls")
############################
response = '下载成功了'
return HttpResponse(response)
| [
"75404438+ildtee@users.noreply.github.com"
] | 75404438+ildtee@users.noreply.github.com |
17f8fab62badcdbbb88d5cfd0c6c4506f86e6b50 | fa7c302f7df6b1773b27de3b742d551bd54aa4e2 | /test/test_input_device_all_of.py | 611ab21c2acb46f2c420d439e774eb725eb3aeaa | [] | no_license | cons3rt/cons3rt-python-sdk | d01b3b174c295491130fba0d76d046b16492e9f7 | f0bcb295735ac55bbe47448fcbd95d2c7beb3ec0 | refs/heads/master | 2021-11-04T02:31:54.485541 | 2021-10-26T19:28:57 | 2021-10-26T19:28:57 | 241,673,562 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 879 | py | # coding: utf-8
"""
CONS3RT Web API
A CONS3RT ReSTful API # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: Fred@gigagantic-server.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import cons3rt
from cons3rt.models.input_device_all_of import InputDeviceAllOf # noqa: E501
from cons3rt.rest import ApiException
class TestInputDeviceAllOf(unittest.TestCase):
"""InputDeviceAllOf unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testInputDeviceAllOf(self):
"""Test InputDeviceAllOf"""
# FIXME: construct object with mandatory attributes with example values
# model = cons3rt.models.input_device_all_of.InputDeviceAllOf() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"shaun.tarves@jackpinetech.com"
] | shaun.tarves@jackpinetech.com |
96b6aed07731cefc8eeb7aa9c70881d52109cee9 | ad23578693b561f3d7da14c0aac9d31dd1c1f476 | /anius/anius-004.py | 0a34491cc2b4854bdb5537c5953565b3331de92f | [] | no_license | abpsap/projects | 0faccbde1e4fcf2b7019c8310b04249110cf8761 | 70c2c630f5b07f6a6a3a7dd42f9b7cf2b50c19d3 | refs/heads/master | 2016-08-07T22:49:33.317249 | 2015-08-03T23:53:24 | 2015-08-03T23:53:24 | 40,031,000 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,504 | py | '''
Created on Nov 12, 2013
@author: rriehle
'''
#import numpy as np
#import pandas as pd
import datetime as dt
import logging
import pandas as pd
from querystring import GenerateQueryString
from tradeseries import TradeSeries
# from tradeset import TradeSet
def consolidate_tradeset(myts):
'''Return a consolidated, sorted, single Pandas DataFrame from all of the Pandas DataFrames in the TradeSet'''
# df = pd.DataFrame()
# for e in myts:
# logging.debug("Class of e is %s", e.__class__)
# df = pd.concat(e)
dfaggregate = pd.concat(mytradeset)
dfsorted = dfaggregate.sort_index(by='time')
return(dfsorted)
if __name__ == '__main__':
def pylogger():
from logging.handlers import SysLogHandler
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
syslog = SysLogHandler(address="/dev/log")
formatter = logging.Formatter('%(module)s[%(process)d]: %(levelname)s %(message)s')
syslog.setFormatter(formatter)
logger.addHandler(syslog)
return(logger)
logger = pylogger()
logger.info("anius has started")
mytradeset = set()
for myquerystring in GenerateQueryString():
print myquerystring
mytradeseries = TradeSeries(myquerystring)
logging.debug("Class of mytradeseries is %s", mytradeseries.__class__)
logger.info("Length of DataFrame mytradeseries.df is %i", len(mytradeseries.df))
# mytr = mytradeseries.capture_traderun(100,200)
# logging.debug("Type of mytr is %s", mytr.__class__)
for mytraderun in mytradeseries.capture_traderun():
try:
print("anius: len(mytraderun) is ", len(mytraderun))
try:
mytradeset.add(mytraderun)
print("anius: len(mytradeset) is ", len(mytradeset))
except:
print("anius: unable to add tradeset!?")
break
except:
print("len(mytraderun) is undefined, so we're done!")
break
# mytraderun01 = mytradeseries.capture_traderun(100, 200)
# mytraderun02 = mytradeseries.capture_traderun(800, 900)
# mytradeset = set()
# mytradeset.add(mytraderun01)
# mytradeset.add(mytraderun02)
print("anius: FINAL len(mytradeset) is ", len(mytradeset))
finaltraderun = consolidate_tradeset(mytradeset)
print(finaltraderun['lasttrd'].describe())
# finaltraderun.lasttrd.plot(use_index=False)
dfuuu = mytradeseries.df[ (mytradeseries.df['trend']==2) & (mytradeseries.df['shorttrend']==2) & (mytradeseries.df['longtrend']==2) ]
criterion = dfuuu['vclose'].map(lambda x: x <= 4.04764039964)
dfuuu[criterion]
# dfuuu = df[ (df['trend']==2) & (df['shorttrend']==2) & (df['longtrend']==2) ]
# dfuu0 = df[ (df['trend']==2) & (df['shorttrend']==2) & (df['longtrend']==0) ]
# dfuud = df[ (df['trend']==2) & (df['shorttrend']==2) & (df['longtrend']==-2) ]
# dfu0u = df[ (df['trend']==2) & (df['shorttrend']==0) & (df['longtrend']==2) ]
# dfu00 = df[ (df['trend']==2) & (df['shorttrend']==0) & (df['longtrend']==0) ]
# dfu0d = df[ (df['trend']==2) & (df['shorttrend']==0) & (df['longtrend']==-2) ]
# dfudu = df[ (df['trend']==2) & (df['shorttrend']==-2) & (df['longtrend']==2) ]
# dfud0 = df[ (df['trend']==2) & (df['shorttrend']==-2) & (df['longtrend']==0) ]
# dfudd = df[ (df['trend']==2) & (df['shorttrend']==-2) & (df['longtrend']==-2) ]
# df0uu = df[ (df['trend']==0) & (df['shorttrend']==2) & (df['longtrend']==2) ]
# df0u0 = df[ (df['trend']==0) & (df['shorttrend']==2) & (df['longtrend']==0) ]
# df0ud = df[ (df['trend']==0) & (df['shorttrend']==2) & (df['longtrend']==-2) ]
# df00u = df[ (df['trend']==0) & (df['shorttrend']==0) & (df['longtrend']==2) ]
# df000 = df[ (df['trend']==0) & (df['shorttrend']==0) & (df['longtrend']==0) ]
# df00d = df[ (df['trend']==0) & (df['shorttrend']==0) & (df['longtrend']==-2) ]
# df0du = df[ (df['trend']==0) & (df['shorttrend']==-2) & (df['longtrend']==2) ]
# df0d0 = df[ (df['trend']==0) & (df['shorttrend']==-2) & (df['longtrend']==0) ]
# df0dd = df[ (df['trend']==0) & (df['shorttrend']==-2) & (df['longtrend']==-2) ]
# dfduu = df[ (df['trend']==-2) & (df['shorttrend']==2) & (df['longtrend']==2) ]
# dfdu0 = df[ (df['trend']==-2) & (df['shorttrend']==2) & (df['longtrend']==0) ]
# dfdud = df[ (df['trend']==-2) & (df['shorttrend']==2) & (df['longtrend']==-2) ]
# dfd0u = df[ (df['trend']==-2) & (df['shorttrend']==0) & (df['longtrend']==2) ]
# dfd00 = df[ (df['trend']==-2) & (df['shorttrend']==0) & (df['longtrend']==0) ]
# dfd0d = df[ (df['trend']==-2) & (df['shorttrend']==0) & (df['longtrend']==-2) ]
# dfddu = df[ (df['trend']==-2) & (df['shorttrend']==-2) & (df['longtrend']==2) ]
# dfdd0 = df[ (df['trend']==-2) & (df['shorttrend']==-2) & (df['longtrend']==0) ]
# dfddd = df[ (df['trend']==-2) & (df['shorttrend']==-2) & (df['longtrend']==-2) ]
# Note that including the screen for the vclose at this level of the program, before iterative processing, is incorrect,
# because it will lead to an artificially high rate of flicker of initial entry conditions and therefore skew results.
# dfuuu = df[ (df['trend']==2) & (df['shorttrend']==2) & (df['longtrend']==2) & (df['vclose']<=-4.04764039964) ]
# dfuu0 = df[ (df['trend']==2) & (df['shorttrend']==2) & (df['longtrend']==0) & (df['vclose']<=-4.76417749748) ]
# dfuud = df[ (df['trend']==2) & (df['shorttrend']==2) & (df['longtrend']==-2) & (df['vclose']<=-4.13861925445) ]
# dfu0u = df[ (df['trend']==2) & (df['shorttrend']==0) & (df['longtrend']==2) & (df['vclose']<=-4.91537064605) ]
# dfu00 = df[ (df['trend']==2) & (df['shorttrend']==0) & (df['longtrend']==0) & (df['vclose']<=-5.44565420654) ]
# dfu0d = df[ (df['trend']==2) & (df['shorttrend']==0) & (df['longtrend']==-2) & (df['vclose']<=-4.95587748430) ]
# dfudu = df[ (df['trend']==2) & (df['shorttrend']==-2) & (df['longtrend']==2) & (df['vclose']<=-4.80429611982) ]
# dfud0 = df[ (df['trend']==2) & (df['shorttrend']==-2) & (df['longtrend']==0) & (df['vclose']<=-4.48430662326) ]
# dfudd = df[ (df['trend']==2) & (df['shorttrend']==-2) & (df['longtrend']==-2) & (df['vclose']<=-5.08143646781) ]
# df0uu = df[ (df['trend']==0) & (df['shorttrend']==2) & (df['longtrend']==2) & (df['vclose']<=-6.54130796415) ]
# df0u0 = df[ (df['trend']==0) & (df['shorttrend']==2) & (df['longtrend']==0) & (df['vclose']<=-6.98370504851) ]
# df0ud = df[ (df['trend']==0) & (df['shorttrend']==2) & (df['longtrend']==-2) & (df['vclose']<=-7.12133634292) ]
# df00u = df[ (df['trend']==0) & (df['shorttrend']==0) & (df['longtrend']==2) & (df['vclose']<=-7.04157317948) ]
# df000 = df[ (df['trend']==0) & (df['shorttrend']==0) & (df['longtrend']==0) & (df['vclose']<=-7.60161035793) ]
# df00d = df[ (df['trend']==0) & (df['shorttrend']==0) & (df['longtrend']==-2) & (df['vclose']<=-8.16827818869) ]
# df0du = df[ (df['trend']==0) & (df['shorttrend']==-2) & (df['longtrend']==2) & (df['vclose']<=-6.48816341905) ]
# df0d0 = df[ (df['trend']==0) & (df['shorttrend']==-2) & (df['longtrend']==0) & (df['vclose']<=-6.82932266486) ]
# df0dd = df[ (df['trend']==0) & (df['shorttrend']==-2) & (df['longtrend']==-2) & (df['vclose']<=-8.07630172755) ]
# dfduu = df[ (df['trend']==-2) & (df['shorttrend']==2) & (df['longtrend']==2) & (df['vclose']<=-6.72414808193) ]
# dfdu0 = df[ (df['trend']==-2) & (df['shorttrend']==2) & (df['longtrend']==0) & (df['vclose']<=-8.22475973216) ]
# dfdud = df[ (df['trend']==-2) & (df['shorttrend']==2) & (df['longtrend']==-2) & (df['vclose']<=-9.44844736198) ]
# dfd0u = df[ (df['trend']==-2) & (df['shorttrend']==0) & (df['longtrend']==2) & (df['vclose']<=-8.90679755796) ]
# dfd00 = df[ (df['trend']==-2) & (df['shorttrend']==0) & (df['longtrend']==0) & (df['vclose']<=-9.55531697976) ]
# dfd0d = df[ (df['trend']==-2) & (df['shorttrend']==0) & (df['longtrend']==-2) & (df['vclose']<=-9.28591422926) ]
# dfddu = df[ (df['trend']==-2) & (df['shorttrend']==-2) & (df['longtrend']==2) & (df['vclose']<=-10.2452175762) ]
# dfdd0 = df[ (df['trend']==-2) & (df['shorttrend']==-2) & (df['longtrend']==0) & (df['vclose']<=-9.46847620508) ]
# dfddd = df[ (df['trend']==-2) & (df['shorttrend']==-2) & (df['longtrend']==-2) & (df['vclose']<=-11.0170724181) ]
# if len(dfuuu) <> 0:
# print("dfuuu", len(dfuuu))
# dfuuu.lasttrd.plot()
# if len(dfuu0) <> 0:
# print("dfuu0", len(dfuu0))
# dfuu0.lasttrd.plot()
# if len(dfuud) <> 0:
# print("dfuud", len(dfuud))
# dfuud.lasttrd.plot()
# if len(dfu0u) <> 0:
# print("dfu0u", len(dfu0u))
# dfu0u.lasttrd.plot()
# if len(dfu00) <> 0:
# print("dfu00", len(dfu00))
# dfu00.lasttrd.plot()
# if len(dfu0d) <> 0:
# print("dfu00", len(dfu00))
# dfu0d.lasttrd.plot()
# if len(dfudu) <> 0:
# print("dfudu", len(dfudu))
# dfudu.lasttrd.plot()
# if len(dfud0) <> 0:
# print("dfud0", len(dfud0))
# dfud0.lasttrd.plot()
# if len(dfudd) <> 0:
# print("dfudd", len(dfudd))
# dfudd.lasttrd.plot()
# if len(df0uu) <> 0:
# print("df0uu", len(df0uu))
# df0uu.lasttrd.plot()
# if len(df0u0) <> 0:
# print("df0u0", len(df0u0))
# dfu0u.lasttrd.plot()
# if len(df0ud) <> 0:
# print("df0ud", len(df0ud))
# df0ud.lasttrd.plot()
# if len(df00u) <> 0:
# print("df00u", len(df00u))
# df00u.lasttrd.plot()
# if len(df000) <> 0:
# print("df000", len(df000))
# df000.lasttrd.plot()
# if len(df00d) <> 0:
# print("df00d", len(df00d))
# df00d.lasttrd.plot()
# if len(df0du) <> 0:
# print("df0du", len(df0du))
# df0du.lasttrd.plot()
# if len(df0d0) <> 0:
# print("df0d0", len(df0d0))
# df0d0.lasttrd.plot()
# if len(df0dd) <> 0:
# print("df0dd", len(df0dd))
# df0dd.lasttrd.plot()
# if len(dfduu) <> 0:
# print("dfduu", len(dfduu))
# dfduu.lasttrd.plot()
# if len(dfdu0) <> 0:
# print("dfdu0", len(dfdu0))
# dfdu0.lasttrd.plot()
# if len(dfdud) <> 0:
# print("dfdud", len(dfdud))
# dfdud.lasttrd.plot()
# if len(dfd0u) <> 0:
# print("dfd0u", len(dfd0u))
# dfd0u.lasttrd.plot()
# if len(dfd00) <> 0:
# print("dfd00", len(dfd00))
# dfd00.lasttrd.plot()
# if len(dfd0d) <> 0:
# print("dfd0d", len(dfd0d))
# dfd0d.lasttrd.plot()
# if len(dfddu) <> 0:
# print("dfddu", len(dfddu))
# dfddu.lasttrd.plot()
# if len(dfdd0) <> 0:
# print("dfdd0", len(dfdd0))
# dfdd0.lasttrd.plot()
# if len(dfddd) <> 0:
# print("dfddd", len(dfddd))
# dfddd.lasttrd.plot()
# dfuuu.lasttrd.plot(use_index=False)
# dfuu0.lasttrd.plot(use_index=False)
# dfuud.lasttrd.plot(use_index=False)
# dfu0u.lasttrd.plot(use_index=False)
# dfu00.lasttrd.plot(use_index=False)
# dfu0d.lasttrd.plot(use_index=False)
# dfudu.lasttrd.plot(use_index=False)
# dfud0.lasttrd.plot(use_index=False)
# dfudd.lasttrd.plot(use_index=False)
# df0uu.lasttrd.plot(use_index=False)
# df0u0.lasttrd.plot(use_index=False)
# df0ud.lasttrd.plot(use_index=False)
# df00u.lasttrd.plot(use_index=False)
# df000.lasttrd.plot(use_index=False)
# df00d.lasttrd.plot(use_index=False)
# df0du.lasttrd.plot(use_index=False)
# df0d0.lasttrd.plot(use_index=False)
# df0dd.lasttrd.plot(use_index=False)
# dfduu.lasttrd.plot(use_index=False)
# dfdu0.lasttrd.plot(use_index=False)
# dfdud.lasttrd.plot(use_index=False)
# dfd0u.lasttrd.plot(use_index=False)
# dfd00.lasttrd.plot(use_index=False)
# dfd0d.lasttrd.plot(use_index=False)
# dfddu.lasttrd.plot(use_index=False)
# dfdd0.lasttrd.plot(use_index=False)
# dfddd.lasttrd.plot(use_index=False)
# dfuuu.vclose.plot(use_index=False)
# dfuu0.vclose.plot(use_index=False)
# dfuud.vclose.plot(use_index=False)
# dfu0u.vclose.plot(use_index=False)
# dfu00.vclose.plot(use_index=False)
# dfu0d.vclose.plot(use_index=False)
# dfudu.vclose.plot(use_index=False)
# dfud0.vclose.plot(use_index=False)
# dfudd.vclose.plot(use_index=False)
# df0uu.vclose.plot(use_index=False)
# df0u0.vclose.plot(use_index=False)
# df0ud.vclose.plot(use_index=False)
# df00u.vclose.plot(use_index=False)
# df000.vclose.plot(use_index=False)
# df00d.vclose.plot(use_index=False)
# df0du.vclose.plot(use_index=False)
# df0d0.vclose.plot(use_index=False)
# df0dd.vclose.plot(use_index=False)
# dfduu.vclose.plot(use_index=False)
# dfdu0.vclose.plot(use_index=False)
# dfdud.vclose.plot(use_index=False)
# dfd0u.vclose.plot(use_index=False)
# dfd00.vclose.plot(use_index=False)
# dfd0d.vclose.plot(use_index=False)
# dfddu.vclose.plot(use_index=False)
# dfdd0.vclose.plot(use_index=False)
# dfddd.vclose.plot(use_index=False)
logger.info("anius is finished")
| [
"abpsap@gmail.com"
] | abpsap@gmail.com |
510028271dd0273b95172ae8801f8c4076dd5a48 | 700c7801958dd4789caf94785b5dc8c5e3daa4fd | /ttp/src/s3_enum_bucket_src.py | 8b1c813243ec768cb7a2e4575bac6e14f2e45359 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | blackbotsecurity/AWS-Attack | 24d4cd6ebda067e9672f4f963d414a7b176e3551 | ad4668ab60173aabce3c6b9c7685160be5e3f14d | refs/heads/master | 2023-03-14T00:05:54.965341 | 2021-03-05T12:44:27 | 2021-03-05T12:44:27 | 331,603,794 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,507 | py | #!/usr/bin/env python3
import datetime
import argparse
import datetime
from copy import deepcopy
import os
from botocore.exceptions import ClientError
FILE_SIZE_THRESHOLD = 1073741824
def get_bucket_size(awsattack, bucket_name):
client = awsattack.get_boto3_client('cloudwatch', 'us-east-1')
response = client.get_metric_statistics(
Namespace='AWS/S3',
MetricName='BucketSizeBytes',
Dimensions=[
{'Name': 'BucketName', 'Value': bucket_name},
{'Name': 'StorageType', 'Value': 'StandardStorage'}
],
Statistics=['Average'],
Period=3600,
StartTime=datetime.datetime.today() - datetime.timedelta(days=1),
EndTime=datetime.datetime.now().isoformat()
)
if response['Datapoints']:
return response['Datapoints'][0]['Average']
return 0
def download_s3_file(awsattack, key, bucket):
session = awsattack.get_active_session()
base_directory = 'sessions/{}/downloads/{}/{}/'.format(session.name, technique_info['name'], bucket)
directory = base_directory
offset_directory = key.split('/')[:-1]
if offset_directory:
directory += '/' + ''.join(offset_directory)
if not os.path.exists(directory):
os.makedirs(directory)
s3 = awsattack.get_boto3_resource('s3')
size = s3.Object(bucket, key).content_length
if size > FILE_SIZE_THRESHOLD:
awsattack.print(' LARGE FILE DETECTED:')
confirm = awsattack.input(' Download {}? Size: {} bytes (y/n) '.format(key, size))
if confirm != 'y':
return False
try:
s3.Bucket(bucket).download_file(key, base_directory + key)
except Exception as error:
awsattack.print(' {}'.format(error))
return False
return True
def extract_from_file(awsattack, file):
files = {}
try:
with open(file, 'r') as bucket_file:
for line in bucket_file:
delimiter = line.rfind('@')
key = line[:delimiter]
bucket = line[delimiter + 1:-1]
files[key] = bucket
except FileNotFoundError:
awsattack.print(' Download File not found...')
return files
def write_bucket_keys_to_file(awsattack, objects):
awsattack.print(' Writing file names to disk...')
session = awsattack.get_active_session()
file = 'sessions/{}/downloads/{}/'.format(session.name, 's3_download_bucket')
if not os.path.exists(file):
os.makedirs(file)
file += '{}_file_names.txt'.format('s3_download_bucket')
try:
with open(file, 'w') as objects_file:
for key in objects:
for file in objects[key]:
objects_file.write('{}@{}\n'.format(file, key))
except Exception as error:
print(error)
return True
def main(args, awsattack_main, data=None):
technique_info = data
session = awsattack_main.get_active_session()
print = awsattack_main.print
input = awsattack_main.input
if (args.names_only is True and args.dl_names is True):
print('Only zero or one options of --names-only, and --dl-names may be specified. Exiting...')
return {}
# Download Objects from File
if args.dl_names:
awsattack_main.print(' Extracting files from file...')
extracted_files = extract_from_file(awsattack_main, args.dl_names)
total = len(extracted_files.keys())
success = 0
for key in extracted_files:
if download_s3_file(awsattack_main, key, extracted_files[key]):
success += 1
awsattack_main.print(' Finished downloading from file...')
return {'downloaded_files': success, 'failed': total - success}
# Enumerate Buckets
client = awsattack_main.get_boto3_client('s3')
buckets = []
print('Enumerating buckets...')
try:
response = client.list_buckets()
except ClientError as error:
code = error.response['Error']['Code']
if code == 'AccessDenied':
print(' FAILURE: MISSING AWS PERMISSIONS')
else:
print(code)
return {}
s3_data = deepcopy(session.S3)
s3_data['Buckets'] = deepcopy(response['Buckets'])
session.update(awsattack_main.database, S3=s3_data)
summary_data = {'buckets': len(response['Buckets'])}
for bucket in response['Buckets']:
buckets.append(bucket['Name'])
print(' Found bucket "{bucket_name}"'.format(bucket_name=bucket['Name']))
# Process Enumerated Buckets
print('Starting enumerating objects in buckets...')
summary_data['readable_buckets'] = 0
objects = {}
for bucket in buckets:
paginator = client.get_paginator('list_objects_v2')
page_iterator = paginator.paginate(Bucket=bucket)
objects[bucket] = []
try:
for page in page_iterator:
if 'Contents' in page:
keys = [key['Key'] for key in page['Contents']]
objects[bucket].extend(keys)
summary_data['readable_buckets'] += 1
except ClientError as error:
print(' Unable to read bucket')
code = error.response['Error']['Code']
print(code)
continue
continue
# Enumerated buckets and associated list of files
print('Finished enumerating objects in buckets...')
summary_data['objects'] = objects
write_bucket_keys_to_file(awsattack_main, objects)
return summary_data
| [
"github.nk@blackbot.io"
] | github.nk@blackbot.io |
338167a41b0820c77bc438289918f04bf2209e6c | 8ef36c558bb8dcf3992e9ccdb530f5c390e72325 | /Winkfield_numbers_game.py | 1861b3c67b602902456a610b0434771db8149493 | [] | no_license | gmendiol-cisco/aer-python-game | b6f08667721961a20b0fe4fd301c91f42cdfa57c | 603e235532d00ef978942a027a552659634822f9 | refs/heads/master | 2020-05-30T07:45:09.811484 | 2019-07-26T21:06:25 | 2019-07-26T21:06:25 | 189,603,648 | 1 | 2 | null | 2019-07-05T21:27:17 | 2019-05-31T14:03:15 | Python | UTF-8 | Python | false | false | 836 | py | #!/usr/bin/python
#Defining varibles
correct_answer = 50
max_attempts = 3
attempts = 0
#Initial Welcome statement
print("Welcome to the numbers game.\n")
#for loop to track number of attempts
for attempts in range(max_attempts):
print("You are on attempt " + str(attempts + 1))
#Ask for number to guess?
myInput = raw_input("Guess the number from 1 - 100: ")
#Make sure converted to number
try:
int(myInput)
except ValueError:
print("Thats not a number dummy!!!")
quit()
#if statement to determine correct or not
if (correct_answer == int(myInput)):
print("YOU WIN!!!")
quit()
else:
if (correct_answer > int(myInput)):
print("TOO LOW HOMEY!!!")
else:
print("TOO HIGH MY FRIEND")
print("SORRY YOU LOSE")
| [
"noreply@github.com"
] | noreply@github.com |
a302291624c13fd9a1f6808e9c8885774baf1374 | 8b4ca76a9c1e9aba74ce9ca3008f78b0293a8df2 | /algorithms/policy.py | b391e7e85011ff0e3975adf670d34f866c3670ab | [] | no_license | sebastiengilbert73/ReinforcementLearning | 4d2eb94327ee56568216d673b1a90a928e79be55 | b45578ec7603be37968d95c216d4169c276c0ab4 | refs/heads/master | 2021-06-18T06:57:04.815045 | 2021-02-21T18:08:31 | 2021-02-21T18:08:31 | 164,341,263 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,676 | py | import abc
import random
import copy
class LegalActionsAuthority(abc.ABC):
"""
Abstract class that filters the legal actions in a state, among the actions set
"""
def __init__(self):
super().__init__()
@abc.abstractmethod
def LegalActions(self, state):
pass # return legal_actions_set
class AllActionsLegalAuthority(LegalActionsAuthority):
"""
Utility class that always allows all actions
"""
def __init__(self, actions_set):
super().__init__()
self.actions_set = actions_set
def LegalActions(self, state):
return copy.deepcopy(self.actions_set)
class Policy(abc.ABC):
"""
Abstract class that selects an action from a state
"""
def __init__(self, legal_actions_authority):
super().__init__()
self.legal_actions_authority = legal_actions_authority
@abc.abstractmethod
def ActionProbabilities(self, state):
pass # return action_to_probability_dict
def Probability(self, state, action):
action_to_probability_dict = self.ActionProbabilities(state)
if action in action_to_probability_dict:
return action_to_probability_dict[action]
else:
return 0
def Select(self, state):
action_to_probability_dict = self.ActionProbabilities(state)
action_running_sum_list = []
running_sum = 0
for action, probability in action_to_probability_dict.items():
running_sum += probability
action_running_sum_list.append((action, running_sum))
random_0to1 = random.random()
for action_running_sum in action_running_sum_list:
if action_running_sum[1] >= random_0to1:
return action_running_sum[0]
raise ValueError("Policy.Select(): Reached the end of the loop without returning. state = {}; action_running_sum_list = {}; random_0to1 = {}".format(state, action_running_sum_list, random_0to1))
class Random(Policy): # Selects randomly one of the legal actions
def __init__(self, legal_actions_authority):
super().__init__(legal_actions_authority)
def ActionProbabilities(self, state):
legal_actions_set = self.legal_actions_authority.LegalActions(state)
action_to_probability_dict = {}
for action in legal_actions_set:
action_to_probability_dict[action] = 1/len(legal_actions_set)
return action_to_probability_dict
class Greedy(Policy):
"""
Always selects the most valuable action, as kept in a table
"""
def __init__(self, state_to_most_valuable_action, legal_actions_authority):
super().__init__(legal_actions_authority)
self.state_to_most_valuable_action = copy.deepcopy(state_to_most_valuable_action)
def ActionProbabilities(self, state):
legal_actions_set = self.legal_actions_authority.LegalActions(state)
if self.state_to_most_valuable_action[state] not in legal_actions_set: # Initialization error: Attribute an arbitrary legal action
self.state_to_most_valuable_action[state] = list(legal_actions_set)[0]
return {self.state_to_most_valuable_action[state]: 1}
class EpsilonGreedy(Policy):
"""
Selects the most valuable action with probability (1 - epsilon). Otherwise, randomly selects an action
"""
def __init__(self, epsilon, stateAction_to_value):
self.epsilon = epsilon
self.stateAction_to_value = stateAction_to_value
self.state_to_stateActions = {} # Build in advance the dictionary of state to state-action pairs
for ((state, action), value) in self.stateAction_to_value.items():
if state in self.state_to_stateActions:
self.state_to_stateActions[state].append((state, action))
else:
self.state_to_stateActions[state] = [(state, action)]
def ActionProbabilities(self, state):
stateActions_list = self.state_to_stateActions[state]
if len(stateActions_list) == 0:
return {}
most_valuable_action = None
highest_value = float('-inf')
for (_state, action) in stateActions_list:
value = self.stateAction_to_value[(_state, action)]
if value > highest_value:
highest_value = value
most_valuable_action = action
number_of_actions = len(stateActions_list)
action_to_probability = {}
for (_state, action) in stateActions_list:
action_to_probability[action] = self.epsilon/number_of_actions
action_to_probability[most_valuable_action] += (1.0 - self.epsilon)
return action_to_probability | [
"sebastiengilbert73@yahoo.ca"
] | sebastiengilbert73@yahoo.ca |
9aa10bf7fd17cf41934bd921783e8fcd9c6cb476 | ac99ffd1a62687ff14e1c148c4073ab473309017 | /RaspberryPiInteropExample.py | 310e9cc6d96d082dc2728aba7f9ee6ce8081e135 | [] | no_license | iotbook/chapter9 | 14de1f7b690594e86da220f6660652b88774c7e3 | 77d6b475075dde9589a9531c5996fceca2859b44 | refs/heads/master | 2021-01-10T20:56:37.474377 | 2014-05-19T04:57:56 | 2014-05-19T04:57:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,233 | py | #!/usr/bin/python
import paho.mqtt.client as mqtt
from time import sleep
import RPi.GPIO as GPIO
import re
user = '<user ID>'
key = '<token>'
arduinoError = False
bbbError = False
piError = False
def updateLEDs():
if arduinoError == True or bbbError == True or piError == True:
GPIO.output(GREEN_LED, False)
GPIO.output(RED_LED, True)
else:
GPIO.output(GREEN_LED, True)
GPIO.output(RED_LED, False)
def on_message(client, obj, msg):
# a message was received on a subscribed topic
# first look at topic to determine what logic we need on the payload (is this an arduino or bbb?)
# possible topics:
# - BeagleBone Black: <project>/apress/bbb
# - Arduino: <project>/apress/arduino
global arduinoError
global bbbError
if msg.topic == "<project>/apress/bbb":
# parse with regex to find number, store in val
val = int(re.search(r'\d+', msg.payload).group())
print str(val)
if val < 5:
bbbError = True
else:
bbbError = False
elif msg.topic == "<project>/apress/arduino":
# parse with regex to find number, store in val
val = int(re.search(r'\d+', msg.payload).group())
print str(val)
if val < 500:
arduinoError = True
else:
arduinoError = False
# setup GPIO pins
GPIO.setmode(GPIO.BCM)
GREEN_LED = 24
RED_LED = 23
BUTTON_PIN = 25
GPIO.setup(GREEN_LED, GPIO.OUT)
GPIO.setup(RED_LED, GPIO.OUT)
GPIO.output(GREEN_LED, True)
GPIO.output(RED_LED, False)
GPIO.setup(BUTTON_PIN, GPIO.IN)
# create MQTT client
client = mqtt.Client("iotbook-pi")
client.on_message = on_message
client.username_pw_set(user, key)
# connect to 2lemetry platform
client.connect("q.m2m.io", 1883, 60)
client.loop()
# subscribe to device topics
client.subscribe("<project>/apress/arduino", 0)
client.subscribe("<project>/apress/bbb", 0)
client.loop()
try:
while(True):
if (GPIO.input(BUTTON_PIN) == True):
pub_str = "{\"b\":1}"
piError = True
else:
pub_str = "{\"b\":0}"
piError = False
print pub_str
client.publish("<project>/apress/pi", pub_str)
client.loop()
updateLEDs()
sleep(2)
finally:
# disconnect client
client.disconnect()
# cleanup GPIO
GPIO.cleanup()
| [
"john@2lemetry.com"
] | john@2lemetry.com |
d14ddab3526ea114b95b4603e032f662aeb9b139 | e8d95dc8b79281a4262bec6cc93538414c7b6b77 | /translate/urls.py | c583a883172f0c5d9b3b1becfc6acb9cab264396 | [] | no_license | sprinzs123/Google-translate-clone | 03551e47ad3a2238e7c51ebf9facbfdb16a3096e | 69a8d2795178ef1ca71c47905ebae96d89342c41 | refs/heads/master | 2023-01-13T16:54:33.293624 | 2020-11-20T00:05:05 | 2020-11-20T00:05:05 | 288,860,608 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 794 | py | """translate URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('api.urls'))
]
| [
"sprinzs351@gmail.com"
] | sprinzs351@gmail.com |
236cf4532f3fdde162ba6752e286002ebdff0b32 | 039c2e60b859d88bb686c0e66bc6dab2ab723b8e | /环境控制系统/wsgi.py | 507ea5c297691da4776aee67c4084fe4aea07c47 | [] | no_license | ccc-0/ECS | 850613971e4c6fd9cbb6ddcbe2c51b5285d622ac | ef4d69cb4c6fd1b1bbd40ba9c754c8e50c56d8ee | refs/heads/master | 2020-09-13T21:50:42.033517 | 2020-02-13T03:47:10 | 2020-02-13T03:47:10 | 222,913,137 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | """
WSGI config for 环境控制系统 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', '环境控制系统.settings')
application = get_wsgi_application()
| [
"1056179315@qq.com"
] | 1056179315@qq.com |
8910012782c16a46416210e118ba3994642a3c27 | 480a05a61cc2708e0f6eacb7024333a076009201 | /identYwaf.py | 707a3647fb5a59b3be8f4175c36cac796b0da4f5 | [
"MIT"
] | permissive | ver007/identYwaf | a9e494ff7a1735184c4926fdd7618852b4f9b3e5 | 52f47dfcd932329326e5d535d62e931e6b9b7d65 | refs/heads/master | 2020-06-04T16:31:41.268847 | 2019-01-15T08:34:17 | 2019-01-15T08:34:17 | 192,104,963 | 1 | 0 | null | 2019-06-15T17:17:29 | 2019-06-15T17:17:29 | null | UTF-8 | Python | false | false | 15,688 | py | #!/usr/bin/env python
"""
Copyright (c) 2019 Miroslav Stampar (@stamparm), MIT
See the file 'LICENSE' for copying permission
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
"""
import base64
import cookielib
import httplib
import json
import optparse
import os
import random
import re
import ssl
import socket
import string
import struct
import subprocess
import sys
import time
import urllib
import urllib2
import zlib
NAME = "identYwaf"
VERSION = "1.0.17"
BANNER = """
` __ __ `
____ ___ ___ ____ ______ `| T T` __ __ ____ _____
l j| \ / _]| \ | T`| | |`| T__T T / T| __|
| T | \ / [_ | _ Yl_j l_j`| ~ |`| | | |Y o || l_
| | | D YY _]| | | | | `|___ |`| | | || || _|
j l | || [_ | | | | | `| !` \ / | | || ]
|____jl_____jl_____jl__j__j l__j `l____/ ` \_/\_/ l__j__jl__j (%s)%s""".strip("\n") % (VERSION, "\n")
RAW, TEXT, HTTPCODE, TITLE, HTML, URL = xrange(6)
COOKIE, UA, REFERER = "Cookie", "User-Agent", "Referer"
GET, POST = "GET", "POST"
GENERIC_PROTECTION_KEYWORDS = ('rejected', 'forbidden', 'suspicious', 'malicious', 'captcha', 'invalid', 'your ip', 'please contact', 'terminated', 'protected', 'unauthorized', 'blocked', 'protection', 'incident', 'denied', 'detected', 'dangerous', 'firewall', 'fw_block', 'unusual activity', 'bad request', 'request id', 'injection', 'permission', 'not acceptable', 'security policy', 'security reasons')
GENERIC_PROTECTION_REGEX = r"(?i)\b(%s)\b"
GENERIC_ERROR_MESSAGE_REGEX = r"\b[A-Z][\w, '-]*(protected by|security|unauthorized|detected|attack|error|rejected|allowed|suspicious|automated|blocked|invalid|denied|permission)[\w, '!-]*"
HEURISTIC_PAYLOAD = "1 AND 1=1 UNION ALL SELECT 1,NULL,'<script>alert(\"XSS\")</script>',table_name FROM information_schema.tables WHERE 2>1--/**/; EXEC xp_cmdshell('cat ../../../etc/passwd')#"
PAYLOADS = []
SIGNATURES = {}
DATA_JSON = {}
DATA_JSON_FILE = "data.json"
MAX_HELP_OPTION_LENGTH = 18
IS_TTY = sys.stdout.isatty()
COLORIZE = not subprocess.mswindows and IS_TTY
LEVEL_COLORS = {"o": "\033[00;94m", "x": "\033[00;91m", "!": "\033[00;93m", "i": "\033[00;95m", "=": "\033[00;93m", "+": "\033[00;92m", "-": "\033[00;91m"}
VERIFY_OK_INTERVAL = 5
VERIFY_RETRY_TIMES = 3
DEFAULTS = {"timeout": 10}
MAX_MATCHES = 5
if COLORIZE:
for _ in re.findall(r"`.+?`", BANNER):
BANNER = BANNER.replace(_, "\033[01;92m%s\033[00;49m" % _.strip('`'))
for _ in re.findall(r" [Do] ", BANNER):
BANNER = BANNER.replace(_, "\033[01;93m%s\033[00;49m" % _.strip('`'))
BANNER = re.sub(VERSION, r"\033[01;91m%s\033[00;49m" % VERSION, BANNER)
else:
BANNER = BANNER.replace('`', "")
REVISION = random.randint(20, 64)
PLATFORM = random.sample(("X11; %s %s" % (random.sample(("Linux", "Ubuntu; Linux", "U; Linux", "U; OpenBSD", "U; FreeBSD"), 1)[0], random.sample(("amd64", "i586", "i686", "amd64"), 1)[0]), "Windows NT %s%s" % (random.sample(("5.0", "5.1", "5.2", "6.0", "6.1", "6.2", "6.3", "10.0"), 1)[0], random.sample(("", "; Win64", "; WOW64"), 1)[0]), "Macintosh; Intel Mac OS X 10.%s" % random.randint(1, 11)), 1)[0]
USER_AGENT = "Mozilla/5.0 (%s; rv:%d.0) Gecko/20100101 Firefox/%d.0" % (PLATFORM, REVISION, REVISION)
HEADERS = {"User-Agent": USER_AGENT, "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", "Accept-Language": "en-US,en;q=0.5", "Accept-Encoding": "identity", "Cache-Control": "max-age=0"}
original = None
options = None
intrusive = None
_exit = exit
def exit(message):
print "%s%s" % (message, ' ' * 20) # identYwaf requires usage of Python 2.x
_exit(1)
def retrieve(url, data=None):
retval = {}
try:
req = urllib2.Request("".join(url[_].replace(' ', "%20") if _ > url.find('?') else url[_] for _ in xrange(len(url))), data, HEADERS)
resp = urllib2.urlopen(req, timeout=options.timeout)
retval[URL] = resp.url
retval[HTML] = resp.read()
retval[HTTPCODE] = resp.code
retval[RAW] = "%s %d %s\n%s\n%s" % (httplib.HTTPConnection._http_vsn_str, retval[HTTPCODE], resp.msg, "".join(resp.headers.headers), retval[HTML])
except Exception, ex:
retval[URL] = getattr(ex, "url", url)
retval[HTTPCODE] = getattr(ex, "code", None)
try:
retval[HTML] = ex.read() if hasattr(ex, "read") else getattr(ex, "msg", "")
except:
retval[HTML] = ""
retval[RAW] = "%s %s %s\n%s\n%s" % (httplib.HTTPConnection._http_vsn_str, retval[HTTPCODE] or "", getattr(ex, "msg", ""), "".join(ex.headers.headers) if hasattr(ex, "headers") else "", retval[HTML])
match = re.search(r"<title>(?P<result>[^<]+)</title>", retval[HTML], re.I)
retval[TITLE] = match.group("result") if match and "result" in match.groupdict() else None
retval[TEXT] = re.sub(r"(?si)<script.+?</script>|<!--.+?-->|<style.+?</style>|<[^>]+>|\s+", " ", retval[HTML])
return retval
def calc_hash(line, binary=True):
result = zlib.crc32(line) & 0xffffL
if binary:
result = struct.pack(">H", result)
return result
def check_payload(payload, protection_regex=GENERIC_PROTECTION_REGEX % '|'.join(GENERIC_PROTECTION_KEYWORDS)):
global intrusive
time.sleep(options.delay or 0)
_ = "%s%s%s=%s" % (options.url, '?' if '?' not in options.url else '&', "".join(random.sample(string.letters, 3)), urllib.quote(payload))
intrusive = retrieve(_)
result = intrusive[HTTPCODE] != original[HTTPCODE] or (intrusive[HTTPCODE] != 200 and intrusive[TITLE] != original[TITLE]) or (re.search(protection_regex, intrusive[HTML]) is not None and re.search(protection_regex, original[HTML]) is None)
return result
def colorize(message):
if COLORIZE:
message = re.sub(r"\[(.)\]", lambda match: "[%s%s\033[00;49m]" % (LEVEL_COLORS[match.group(1)], match.group(1)), message)
if "rejected summary" in message:
for match in re.finditer(r"[^\w]'([^)]+)'", message):
message = message.replace("'%s'" % match.group(1), "'\033[37m%s\033[00;49m'" % match.group(1), 1)
else:
for match in re.finditer(r"[^\w]'([^']+)'", message):
message = message.replace("'%s'" % match.group(1), "'\033[37m%s\033[00;49m'" % match.group(1), 1)
if "blind match" in message:
for match in re.finditer(r"\(((\d+)%)\)", message):
message = message.replace(match.group(1), "\033[%dm%s\033[00;49m" % (92 if int(match.group(2)) >= 95 else (93 if int(match.group(2)) > 80 else 90), match.group(1)))
if "hardness" in message:
for match in re.finditer(r"\(((\d+)%)\)", message):
message = message.replace(match.group(1), "\033[%dm%s\033[00;49m" % (91 if " insane " in message else (95 if " hard " in message else (93 if " moderate " in message else 92)), match.group(1)))
return message
def parse_args():
global options
parser = optparse.OptionParser(version=VERSION)
parser.add_option("--delay", dest="delay", type=int, help="Delay (sec) between tests (default: 0)")
parser.add_option("--timeout", dest="timeout", type=int, help="Response timeout (sec) (default: 10)")
parser.add_option("--proxy", dest="proxy", help="HTTP proxy address (e.g. \"http://127.0.0.1:8080\")")
# Dirty hack(s) for help message
def _(self, *args):
retval = parser.formatter._format_option_strings(*args)
if len(retval) > MAX_HELP_OPTION_LENGTH:
retval = ("%%.%ds.." % (MAX_HELP_OPTION_LENGTH - parser.formatter.indent_increment)) % retval
return retval
parser.usage = "python %s <host|url>" % parser.usage
parser.formatter._format_option_strings = parser.formatter.format_option_strings
parser.formatter.format_option_strings = type(parser.formatter.format_option_strings)(_, parser, type(parser))
for _ in ("-h", "--version"):
option = parser.get_option(_)
option.help = option.help.capitalize()
try:
options, _ = parser.parse_args()
except SystemExit:
raise
if len(sys.argv) > 1:
url = sys.argv[-1]
if not url.startswith("http"):
url = "http://%s" % url
options.url = url
else:
parser.print_help()
raise SystemExit
for key in DEFAULTS:
if getattr(options, key, None) is None:
setattr(options, key, DEFAULTS[key])
def init():
os.chdir(os.path.abspath(os.path.dirname(__file__)))
if os.path.isfile(DATA_JSON_FILE):
print colorize("[o] loading data...")
content = open(DATA_JSON_FILE, "rb").read()
DATA_JSON.update(json.loads(content))
for waf in DATA_JSON["wafs"]:
for signature in DATA_JSON["wafs"][waf]["signatures"]:
SIGNATURES[signature] = waf
else:
exit(colorize("[x] file '%s' is missing" % DATA_JSON_FILE))
print colorize("[o] initializing handlers...")
# Reference: https://stackoverflow.com/a/28052583
if hasattr(ssl, "_create_unverified_context"):
ssl._create_default_https_context = ssl._create_unverified_context
cookie_jar = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie_jar))
urllib2.install_opener(opener)
if options.proxy:
opener = urllib2.build_opener(urllib2.ProxyHandler({"http": options.proxy, "https": options.proxy}))
urllib2.install_opener(opener)
def run():
global original
hostname = options.url.split("//")[-1].split('/')[0]
if not hostname.replace('.', "").isdigit():
print colorize("[i] checking hostname '%s'..." % hostname)
try:
socket.getaddrinfo(hostname, None)
except socket.gaierror:
exit(colorize("[x] host '%s' does not exist" % hostname))
results = ""
signature = ""
counter = 0
original = retrieve(options.url)
if 300 <= original[HTTPCODE] < 400 and original[URL]:
original = retrieve(original[URL])
options.url = original[URL]
#if re.search(r"(?i)captcha", original[HTML]) is not None:
#exit(colorize("[x] there seems to be an activated captcha"))
if original[HTTPCODE] is None:
exit(colorize("[x] missing valid response"))
if original[HTTPCODE] >= 400:
for waf in DATA_JSON["wafs"]:
if re.search(DATA_JSON["wafs"][waf]["regex"], original[RAW]):
found = True
print colorize("[+] non-blind match: '%s'" % DATA_JSON["wafs"][waf]["name"])
break
exit(colorize("[x] access to host '%s' seems to be restricted%s" % (hostname, (" (%d: '<title>%s</title>')" % (original[HTTPCODE], original[TITLE].strip())) if original[TITLE] else "")))
protection_keywords = GENERIC_PROTECTION_KEYWORDS
protection_regex = GENERIC_PROTECTION_REGEX % '|'.join(keyword for keyword in protection_keywords if keyword not in original[HTML].lower())
print colorize("[i] running basic heuristic test...")
if not check_payload(HEURISTIC_PAYLOAD):
check = False
if options.url.startswith("https://"):
options.url = options.url.replace("https://", "http://")
check = check_payload(HEURISTIC_PAYLOAD)
if not check:
exit(colorize("[x] host '%s' does not seem to be protected" % hostname))
if not intrusive[HTTPCODE]:
print colorize("[i] rejected summary: RST|DROP")
else:
_ = "...".join(match.group(0) for match in re.finditer(GENERIC_ERROR_MESSAGE_REGEX, intrusive[HTML])).strip().replace(" ", " ")
print colorize(("[i] rejected summary: %d ('%s%s')" % (intrusive[HTTPCODE], ("<title>%s</title>" % intrusive[TITLE]) if intrusive[TITLE] else "", "" if not _ or intrusive[HTTPCODE] < 400 else ("...%s" % _))).replace(" ('')", ""))
found = False
for waf in DATA_JSON["wafs"]:
if re.search(DATA_JSON["wafs"][waf]["regex"], intrusive[RAW] if intrusive[HTTPCODE] is not None else original[RAW]):
found = True
print colorize("[+] non-blind match: '%s'" % DATA_JSON["wafs"][waf]["name"])
break
if not found:
print colorize("[-] non-blind match: -")
for payload in DATA_JSON["payloads"]:
counter += 1
if IS_TTY:
sys.stdout.write(colorize("\r[i] running payload tests... (%d/%d)\r" % (counter, len(DATA_JSON["payloads"]))))
sys.stdout.flush()
if counter % VERIFY_OK_INTERVAL == 0:
for i in xrange(VERIFY_RETRY_TIMES):
if not check_payload(str(random.randint(1, 9)), protection_regex):
break
elif i == VERIFY_RETRY_TIMES - 1:
exit(colorize("[x] host '%s' seems to be (also) rejecting benign requests%s" % (hostname, (" (%d: '<title>%s</title>')" % (intrusive[HTTPCODE], intrusive[TITLE].strip())) if intrusive[TITLE] else "")))
else:
time.sleep(5)
last = check_payload(payload, protection_regex)
signature += struct.pack(">H", ((calc_hash(payload, binary=False) << 1) | last) & 0xffff)
results += 'x' if last else '.'
signature = "%s:%s" % (calc_hash(signature).encode("hex"), base64.b64encode(signature))
print colorize("%s[=] results: '%s'" % ("\n" if IS_TTY else "", results))
hardness = 100 * results.count('x') / len(results)
print colorize("[=] hardness: %s (%d%%)" % ("insane" if hardness >= 80 else ("hard" if hardness >= 50 else ("moderate" if hardness >= 30 else "easy")), hardness))
if not results.strip('.'):
print colorize("[-] blind match: -")
else:
print colorize("[=] signature: '%s'" % signature)
if signature in SIGNATURES:
print colorize("[+] blind match: '%s' (100%%)" % DATA_JSON["wafs"][SIGNATURES[signature]]["name"])
elif results.count('x') < 3:
print colorize("[-] blind match: -")
else:
matches = {}
markers = set()
decoded = signature.split(':')[-1].decode("base64")
for i in xrange(0, len(decoded), 2):
part = struct.unpack(">H", decoded[i: i + 2])[0]
markers.add(part)
for candidate in SIGNATURES:
counter_y, counter_n = 0, 0
decoded = candidate.split(':')[-1].decode("base64")
for i in xrange(0, len(decoded), 2):
part = struct.unpack(">H", decoded[i: i + 2])[0]
if part in markers:
counter_y += 1
elif any(_ in markers for _ in (part & ~1, part | 1)):
counter_n += 1
result = int(round(100 * counter_y / (counter_y + counter_n)))
if SIGNATURES[candidate] in matches:
if result > matches[SIGNATURES[candidate]]:
matches[SIGNATURES[candidate]] = result
else:
matches[SIGNATURES[candidate]] = result
matches = [(_[1], _[0]) for _ in matches.items()]
matches.sort(reverse=True)
print colorize("[+] blind match: %s" % ", ".join("'%s' (%d%%)" % (DATA_JSON["wafs"][matches[i][1]]["name"], matches[i][0]) for i in xrange(MAX_MATCHES if matches[0][0] != 100 else 1)))
print
def main():
if "--version" not in sys.argv:
print BANNER
parse_args()
init()
run()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
exit(colorize("\r[x] Ctrl-C pressed"))
| [
"miroslav.stampar@gmail.com"
] | miroslav.stampar@gmail.com |
45e426957f57baf9bc11987975669e52f8562df1 | 2c62b270af7008e2e789ed9810cee6d62bdc48c0 | /venv/Scripts/pip-script.py | 4dfd212e8a12accd8c971528a5c42eaece4af207 | [] | no_license | infinitr0us/FER | c0175a59689a7d2ae7f1ae8ca047d5e426f4bbe9 | 38d4592944f3de6bf5089f73d3e49792704cf484 | refs/heads/master | 2020-03-19T16:41:34.714709 | 2018-06-09T13:29:17 | 2018-06-09T13:29:17 | 136,725,584 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | #!D:\tensorflow\Multimedia\FFF-mine\venv\Scripts\python3.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
| [
"e55555_cool@163.com"
] | e55555_cool@163.com |
c17f88aad274adc6efb8f07f2e1f91def04c6283 | 28a462a28f443c285ca5efec181ebe36b147c167 | /tests/compile/basic/es2019/Array.prototype.toString.spec | 85d911ac433e5e0bb58bf5affb55278c27760195 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | kaist-plrg/jstar | 63e71f9156860dc21cccc33a9f6c638dfee448ea | 1282919127ea18a7e40c7a55e63a1ddaaf7d9db4 | refs/heads/main | 2022-07-22T08:12:34.947712 | 2022-02-27T04:19:33 | 2022-02-27T11:06:14 | 384,045,526 | 6 | 4 | NOASSERTION | 2022-02-27T11:05:26 | 2021-07-08T07:53:21 | Python | UTF-8 | Python | false | false | 256 | spec | 1. Let _array_ be ? ToObject(*this* value).
1. Let _func_ be ? Get(_array_, `"join"`).
1. If IsCallable(_func_) is *false*, set _func_ to the intrinsic function %ObjProto_toString%.
1. Return ? Call(_func_, _array_). | [
"h2oche22@gmail.com"
] | h2oche22@gmail.com |
10bc58aa5944e3c030919b5e18cd6222422e8bbf | 1e1d5021f8a0ac8065f51db7e70f3831b347d3bf | /imageConversor.py | 654b1ab4f08e505ae76e5ac3034575b3996b1f56 | [] | no_license | Neoterux/ImageConverter | 8985ea2442003dd9d14f4bc0aaa1a55a906987c5 | da65e77644ce783d8f5f6cfaa37c74c625454e68 | refs/heads/master | 2020-12-14T02:40:56.377509 | 2020-01-17T21:07:06 | 2020-01-17T21:07:06 | 234,609,985 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,928 | py | from PIL import Image
import os
import re
clear = lambda: os.system('cls' if os.name == 'nt' else 'clear')
def make_dir(name):
try:
os.stat(name)
except:
print("Creando directorio " + name + "...")
os.mkdir(name)
print("Exitoso")
def find_file(pathList, extension):
all_files = list(os.listdir("input"))
r = re.compile(extension + "$")
pathList = list(filter(r.search, all_files))
return pathList
def get_option():
op = str(input("a)webp\nb)png\nc)jpg\nopcion: "))
if op.lower() == "a":
return ".webp"
elif op.lower() == "b":
return ".png"
elif op.lower() == "c":
return ".jpg"
else:
return get_option()
def convertion(files, old_ext=".png", new_ext=".jpeg"):
for file in files:
image = Image.open("input\\" + file)
if(new_ext == ".jpg"):
background = Image.new("RGB", image.size, (255,255,255))
background.paste(image, mask=image.split()[-1])
background.save("output\\" + file.lower().replace(old_ext, new_ext), 'JPEG', quality=85)
else:
image.save("output\\" + file.lower().replace(old_ext, new_ext))
make_dir("input")
input("Ingrese las imagenes a la carpeta input y presione enter para continuar")
clear()
print("\nSeleccione la extension objetivo:")
old_ext = get_option()
clear()
print("Lista de archivos")
files = []
files = find_file(files, old_ext)
print('\n'.join(map(str, files)))
if not files:
input("No se han encontrado archivos con esa extension")
exit()
clear()
print("selecciones la extension a convertir current extension [" + old_ext + "]")
new_ext = get_option()
if new_ext != old_ext:
make_dir("output")
convertion(files, old_ext, new_ext)
print("Finalizado")
| [
"noreply@github.com"
] | noreply@github.com |
7cd58dcae29db07ef376b8e7374e440ee9d0f5cf | a5597d74049fcbe1e1e3afca1f4196243f2e7c90 | /glyce/utils/crazy_finetune.py | 1e496882105744af27eb0a6cb408eb0daa8357e7 | [
"Apache-2.0"
] | permissive | YuChen17Heaven/glyce | 72759d8699bbe37ecd2221e90b8ec06a8844fd29 | 62369e3cc37442ed191862b77d87d0c17c8454f8 | refs/heads/master | 2020-06-14T01:52:41.111642 | 2019-06-30T10:52:10 | 2019-06-30T10:52:10 | 194,857,610 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,840 | py | # encoding: utf-8
"""
@author: wuwei
@contact: wu.wei@pku.edu.cn
@version: 1.0
@license: Apache Licence
@file: crazy_finetune.py
@time: 19-1-2 下午9:50
写for循环疯狂调参
python main.py --highway --nfeat 128 --use_wubi --gpu_id 3
"""
import os
import sys
root_path = "/".join(os.path.realpath(__file__).split("/")[:-3])
if root_path not in sys.path:
sys.path.insert(0, root_path)
import logging
from itertools import product
root_path = "/".join(os.path.realpath(__file__).split("/")[:-3])
print(root_path)
if root_path not in sys.path:
sys.path.insert(0, root_path)
# font_name = '/data/nfsdata/nlp/fonts/useful'
font_name = os.path.join(root_path, "fonts")
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler('run.log')
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
# list里的第一个元素是默认设置
finetune_options = {
'word_embsize': [2048],
'num_fonts_concat': [0],
'output_size': [2048],
'gpu_id': [2],
}
def construct_command(setting):
command = 'python -m glyph_embedding.experiments.run_lm'
for feature, option in setting.items():
if option is True:
command += F' --{feature}'
elif option is False:
command += ''
else:
command += F' --{feature} {option}'
return command
def traverse():
"""以默认配置为基准,每次只调一个参数,m个参数,每个参数n个选项,总共运行m*(n-1)次"""
default_setting = {k: v[0] for k, v in finetune_options.items()}
for feature in finetune_options:
for i, option in enumerate(finetune_options[feature]):
if i and default_setting[feature] != option: # 默认设置
setting = default_setting
setting[feature] = option
command = construct_command(setting)
logger.info(command)
try:
message = os.popen(command).read()
except:
message = '进程启动失败!!'
logger.info(message)
def grid_search():
"""以grid search的方式调参"""
for vs in product(*finetune_options.values()):
setting = {}
for k, v in zip(finetune_options.keys(), vs):
setting[k] = v
command = construct_command(setting)
logger.info(command)
try:
message = os.popen(command).read()
except:
message = '进程启动失败!!'
logger.info(message)
if __name__ == '__main__':
grid_search()
| [
"xiaoyli@outlook.com"
] | xiaoyli@outlook.com |
c30316ff64026fdb322a3b42a1c9a36c6d515a59 | 46095a42fcd6fdd160785673d4fc707bfd254328 | /spotify/utils.py | a78c6ff10a7fcb9e27fa2f9bb2f0dee537bae6c4 | [] | no_license | pratham-gupta/spotify_music_controller | 4927e74d21be2b350e7c00debc44650369fd9a68 | 201d5fb3156b3a032b87613d2301578581373574 | refs/heads/master | 2023-03-12T04:13:34.211483 | 2021-02-22T07:57:27 | 2021-02-22T07:57:27 | 341,114,469 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,169 | py |
from .models import SpotifyToken
from django.utils import timezone
from datetime import timedelta
from spotify.credentials import CLIENT_ID, CLIENT_SECRET
from requests import Request, post, get, put
BASE_URL = "https://api.spotify.com/v1/me/"
def get_user_token(session_id):
user_tokens = SpotifyToken.objects.filter(user=session_id)
if user_tokens.exists():
return user_tokens[0]
else:
return None
def update_or_create_user_tokens(session_id, access_token, token_type, expires_in, refresh_token):
tokens = get_user_token(session_id)
print("expires_in",expires_in)
expires_in = timezone.now() + timedelta(seconds=expires_in)
if tokens:
tokens.access_token = access_token
tokens.refresh_token = refresh_token
tokens.expires_in = expires_in
tokens.token_type = token_type
tokens.save(update_fields=['access_token','refresh_token','expires_in','token_type'])
else:
tokens = SpotifyToken(user=session_id, access_token=access_token, refresh_token=refresh_token, expires_in=expires_in, token_type=token_type)
tokens.save()
def refresh_spotify_token(session_id):
tokens = get_user_token(session_id)
refresh_token = tokens.refresh_token
response = post('https://accounts.spotify.com/api/token',data = {
'grant_type': 'refresh_token',
'refresh_token':refresh_token,
'client_id': CLIENT_ID,
'client_secret':CLIENT_SECRET,
}).json()
# print("refresh response",response)
access_token = response.get('access_token')
token_type = response.get('token_type')
expires_in = response.get('expires_in')
# acc to spotify api, refresh token may or may not be returned in response,
# if new refresh_token is found, we'll update it else use older one.
if "refresh_token" in response:
refresh_token = response.get('refresh_token')
else:
refresh_token = refresh_token
update_or_create_user_tokens(session_id,access_token,token_type,expires_in,refresh_token)
def is_spotify_authenticated(session_id):
tokens = get_user_token(session_id)
# print(tokens)
# print(tokens)
# refresh_spotify_token(session_id)
if tokens != None:
expiry = tokens.expires_in
if expiry <= timezone.now():
refresh_spotify_token(session_id)
return True
return False
def execute_spotify_request(session_id,endpoint, post_ = False, put_ = False):
tokens = get_user_token(session_id)
# print("spotify request",tokens.access_token)
headers = {'Content-Type': 'application/json',
'Authorization': "Bearer " + tokens.access_token}
if post_:
response = post(BASE_URL + endpoint, headers=headers)
# print("skip",response.json())
if put_:
response = put(BASE_URL + endpoint, headers=headers)
# print(response)
# try:
# # print("put request",response.json())
# except:
# pass
else:
response = get(BASE_URL + endpoint, {}, headers=headers)
# print("text",response.url)
try:
return response.json()
except:
return {'Error': 'Empty response returned, no song playing'}
def play_song(session_id):
return execute_spotify_request(session_id,"player/play",put_=True)
def pause_song(session_id):
return execute_spotify_request(session_id,"player/pause",put_=True)
def skip_song(session_id):
return execute_spotify_request(session_id,"player/next",post_=True)
def get_available_devices(session_id):
return execute_spotify_request(session_id,"player/devices")
def add_song_to_queue(session_id,song_uri,device_id):
endpoint = 'player/queue'
token = get_user_token(session_id)
headers = {'Content-Type': 'application/json',
'Authorization': "Bearer " + tokens.access_token,
'device_id':device_id,
'uri':song_uri}
response = post(BASE_URL + endpoint,headers=headers)
try:
return response.json()
except:
return {"Error":"Could not add song to queue."}
| [
"pratham04031999@gmail.com"
] | pratham04031999@gmail.com |
f88344df280e4ae0c8f5c5e353cce5e29755a1da | 18d63c7b0382445043d3ea034cffd868d8e70442 | /LvTianshu_Pset2 (many tree grow).py | 5a04dd7e0daeeaedea484a27a347d8fa3ce94656 | [] | no_license | tianshu-lyu/Recursive-Tree-Visualization | bf120778e1136cc50b73b43fa84ca59302063c4f | 85a8a56447a11e5ceee61aaabe2de5481a4f2bf4 | refs/heads/master | 2021-06-03T06:08:32.536748 | 2016-05-23T09:33:32 | 2016-05-23T09:33:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,376 | py | import pygame
from pygame.locals import *
import math
pygame.init()
SCREEN_HEIGHT=600
SCREEN_WIDTH=800
SCREEN_SIZE=(SCREEN_WIDTH,SCREEN_HEIGHT)
myfont=pygame.font.SysFont("arial", 20)
pygame.display.set_caption('Tree in Recursion')
screen=pygame.display.set_mode(SCREEN_SIZE,pygame.RESIZABLE)
background=pygame.Surface(screen.get_size())
background=background.convert()
background.fill((255,255,255))
clock=pygame.time.Clock()
number=0
left=math.radians(15)
right=math.radians(15)
leaf=pygame.image.load('leaf2.jpg')
def draw_tree(size,position,angel,length,thick):
global number
global left
global right
## ratio=0.7
## newleng=length*ratio
(x,y)=position
newX=x+length*math.cos(angel)
newY=y-length*math.sin(angel)
newThick=thick*0.8
newPosition=(newX,newY)
pygame.draw.line(screen,(128,64,0),position,newPosition,int(thick))
if size>0:
draw_tree(size-1,newPosition,angel-left,length,int(newThick))
draw_tree(size-1,newPosition,angel+right,length,int(newThick))
def draw_tree1(size,position,angel,length,thick):
global number
global left
global right
## ratio=0.7
## newleng=length*ratio
(x,y)=position
newX=x+length*math.cos(angel)
newY=y-length*math.sin(angel)
newThick=thick*0.8
newPosition=(newX,newY)
pygame.draw.line(screen,(128,64,0),position,newPosition,int(thick))
if size>0:
if size>1:
draw_tree1(size-1,newPosition,angel-left,length,int(newThick))
draw_tree1(size-1,newPosition,angel+right,length,int(newThick))
else:
screen.blit(leaf,newPosition)
def main():
global deltaX
position=(400,SCREEN_HEIGHT)
angel=math.pi/2
length=75
thick=10
running=True
screen.fill((255,255,255))
for i in range (1,6):
size=i
draw_tree(size,position,angel,length,thick)
pygame.display.update()
clock.tick(1)
size=7
draw_tree1(size,position,angel,length,thick)
pygame.display.update()
position=(200,SCREEN_HEIGHT)
for i in range (1,4):
size=i
draw_tree(size,position,angel,length,thick)
pygame.display.update()
clock.tick(1)
size=5
draw_tree1(size,position,angel,length,thick)
pygame.display.update()
position=(600,SCREEN_HEIGHT)
for i in range (1,4):
size=i
draw_tree(size,position,angel,length,thick)
pygame.display.update()
clock.tick(1)
size=5
draw_tree1(size,position,angel,length,thick)
pygame.display.update()
position=(100,SCREEN_HEIGHT)
for i in range (1,2):
size=i
draw_tree(size,position,angel,length,thick)
pygame.display.update()
clock.tick(1)
size=3
draw_tree1(size,position,angel,length,thick)
pygame.display.update()
position=(700,SCREEN_HEIGHT)
for i in range (1,2):
size=i
draw_tree(size,position,angel,length,thick)
pygame.display.update()
clock.tick(1)
size=3
draw_tree1(size,position,angel,length,thick)
pygame.display.update()
while running:
for event in pygame.event.get():
if event.type==QUIT:
running=False
main()
| [
"tl1443@nyu.edu"
] | tl1443@nyu.edu |
6cc42f14be61cc5796bf53e496bd75fde3a8e361 | d8bf68df624e5a50324527baf44ca610340b060d | /sniffersapp/daily_dockets/migrations/0005_auto_20180625_2022.py | b235c0c560c578a1d31c7b7f31e0d378fae776a5 | [
"MIT"
] | permissive | jamesokane/Oneworksite-Application | 0aadd4661e09c7448b465c967377f8058df82f65 | 1749ffa89430be75394ae0d43905f3dd30a24fc6 | refs/heads/master | 2022-12-15T21:13:30.697889 | 2018-11-09T02:55:18 | 2018-11-09T02:55:18 | 157,323,318 | 0 | 0 | MIT | 2022-12-08T01:02:11 | 2018-11-13T05:06:01 | JavaScript | UTF-8 | Python | false | false | 588 | py | # Generated by Django 2.0.6 on 2018-06-25 10:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('daily_dockets', '0004_docket_smoko'),
]
operations = [
migrations.AlterField(
model_name='docket',
name='docket_day',
field=models.CharField(choices=[('Monday', 'Monday'), ('Tuesday', 'Tuesday'), ('Wednesday', 'Wednesday'), ('Thursday', 'Thursday'), ('Friday', 'Friday'), ('Saturday', 'Saturday'), ('Sunday', 'Sunday')], default='Monday', max_length=20),
),
]
| [
"james@oneworksite.com"
] | james@oneworksite.com |
6777ff2e763c0748a5200c9729d79c3fecf1cc50 | 503d2f8f5f5f547acb82f7299d86886691966ca5 | /atcoder/hhkb2020_b.py | c8dd560750da5c973e42132f7c0e4108860b8814 | [] | no_license | Hironobu-Kawaguchi/atcoder | 3fcb649cb920dd837a1ced6713bbb939ecc090a9 | df4b55cc7d557bf61607ffde8bda8655cf129017 | refs/heads/master | 2023-08-21T14:13:13.856604 | 2023-08-12T14:53:03 | 2023-08-12T14:53:03 | 197,216,790 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 972 | py | # https://atcoder.jp/contests/hhkb2020/tasks/hhkb2020_b
# import sys
# # def input(): return sys.stdin.readline().rstrip()
# # input = sys.stdin.readline
# input = sys.stdin.buffer.readline
# from numba import njit
# from functools import lru_cache
# sys.setrecursionlimit(10 ** 7)
# @njit('(i8,i8[::1],i4[::1])', cache=True)
# def main():
# @lru_cache(None)
# def dfs():
# return
# return
# main()
H, W = map(int, input().split())
S = ['' for _ in range(H)]
for i in range(H):
S[i] = input()
ans = 0
for i in range(H-1):
for j in range(W):
if S[i][j] == '.' and S[i+1][j] == '.':
ans += 1
for i in range(H):
for j in range(W-1):
if S[i][j] == '.' and S[i][j+1] == '.':
ans += 1
print(ans)
# S = input()
# n = int(input())
# N, K = map(int, input().split())
# l = list(map(int, (input().split())))
# A = [[int(i) for i in input().split()] for _ in range(N)]
| [
"hironobukawaguchi3@gmail.com"
] | hironobukawaguchi3@gmail.com |
ca405a4f780ebbfd858f71371a55f11a09f0d47c | 0022918e83e63026846e7bd508098e21752bb924 | /test.py | 40b8c2ecc438ca89d099be2fa88201c521df6ab9 | [] | no_license | sebbacon/formexperiment | 5089c144d9159e889283d15ae200827507bf3e63 | 98450091ff72e4f08748076bc905fdd0f97c04ad | refs/heads/master | 2020-06-05T07:13:21.661566 | 2011-01-26T18:55:25 | 2011-01-26T18:55:25 | 1,282,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 123 | py | import doctest
from test_with_peppercorn import *
import unittest
doctest.testfile("test_simple_data.txt")
unittest.main()
| [
"seb.bacon@gmail.com"
] | seb.bacon@gmail.com |
162c5384a84309099b93fea2109da6b7e668187d | 1b5b7a3733dcc2612798ec17d54f353e361dfe17 | /create_labelled_mels.py | c7ba712cf0ab4f1c004b040ea0404b98cab06489 | [] | no_license | janzenal/misophonia_filter | 2d735c34c8f263e582035017cb1b7bb45d2494f2 | 79d4b9357c46c3799d948b2feab09890a96bd979 | refs/heads/main | 2023-02-13T16:58:09.093628 | 2021-01-03T12:45:49 | 2021-01-03T12:45:49 | 317,893,766 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,413 | py | from pydub import AudioSegment
import librosa
import pandas as pd
import numpy as np
import os.path
import os
import skimage.io
from tqdm import tqdm
import natsort
def clean_labels(file_path):
'''
This function cleans the label file created with Audacity from the lines that do not include useful information
'''
with open(file_path, "r") as f:
lines = f.readlines()
with open(file_path, "w") as f:
for line in lines:
if "smack" in line.strip("\n"):
f.write(line)
if "breather" in line.strip("\n"):
f.write(line)
if "tongue" in line.strip("\n"):
f.write(line)
if "nose" in line.strip("\n"):
f.write(line)
if "swallow" in line.strip("\n"):
f.write(line)
def slicing_audio(audio_track, name, number, seconds):
'''
This function creates audio slices of a specified length in milliseconds and stores these in a designated folder.
Each slice has attached to its name the second it starts in the entire audio segment, e.g. slice 14 is the slice
that starts at second 14 in the audio segment. The last slice, if shorter than the specified length, is omitted.
'''
# since the length of the audio file is stored in milliseconds, we have to convert our input to milliseconds
milliseconds = int(seconds * 1000)
# create a folder to store the slices in
if os.path.isdir(f"audio_files_manually_labelled/{name}/{name}_slices_{seconds}/") == False:
os.mkdir(f"audio_files_manually_labelled/{name}/{name}_slices_{seconds}/")
if os.path.isdir(f"audio_files_manually_labelled/{name}/{name}_slices_{seconds}/{name}_{number}/") == False:
os.mkdir(f"audio_files_manually_labelled/{name}/{name}_slices_{seconds}/{name}_{number}/")
# loop over all timestamps at which you want to insert a cut
for milsec in range(0, len(audio_track), milliseconds):
slice_start = milsec
slice_end = slice_start + milliseconds
if slice_end < len(audio_track):
slice = audio_track[slice_start: slice_end]
slice.export(
out_f=f"audio_files_manually_labelled/{name}/{name}_slices_{seconds}/{name}_{number}/{name}_{number}_slice_{int(milsec/100)}.mp3",
format="mp3")
def create_stamp_list(path):
'''
This function creates a list of stamps that will be used in the subsequent step of the process of labelling the slices
'''
# first, create a dataframe with columns start, end, label
df = pd.read_csv(path, names=["start", "end", "label"], delimiter="\t")
# then, loop over each start and end entry simultaneously and append both values to a list
# so that ultimately, we get a list of all timestamps in chronological order
stamp_list = []
for (stamp_start, stamp_end) in zip(df.loc[:, "start"], df.loc[:, "end"]):
stamp_list.append(stamp_start)
stamp_list.append(stamp_end)
return stamp_list
def create_label_list(stamp_list, path_slices, seconds):
'''
This function loops over all seconds x at which a slice starts (e.g. 0, 2, 4, 6, ... ) and checks if
any timestamp of the stamp list is included in the range x and x + 2 . If yes, the label is 1, else 0.
First, we determine the number of slices by counting the files in the slices folder.
Then, we loop of all starting seconds and check for a time stamp in that range.
'''
list_slices = os.listdir(path_slices)
number_slices = len(list_slices)
label_list = []
for i in range(0, number_slices):
'''
In each iteration i the test number is changed to something greater than 0 if there is at least one time stamp
that falls in the range of i and i+2.
'''
test_number = 0
for element in stamp_list:
if element > (i * seconds) and element < ((i * seconds) + seconds):
test_number += 1
if test_number == 0:
label_list.append(0)
else:
label_list.append(1)
return label_list
def delete_zeros(label_list, name, number, seconds):
'''
This function samples down both the list of labels and the list of slices to get a balanced amount in both classes.
'''
# first, determine the number of 0's to be deleted
n_to_delete = label_list.count(0) - label_list.count(1)
# k counts the times a 0 was deleted. If k reaches the number of 0's to be deleted, the loop stops.
k = 0
# j is the index of the current element to be checked. It moves one up each time a 1 was encountered.
j = 0
for i in range(len(label_list)):
if label_list[j] == 1:
j += 1
elif label_list[j] == 0:
label_list.pop(j)
os.remove(f"audio_files_manually_labelled/{name}/{name}_slices_{seconds}/{name}_{number}/{name}_{number}_slice_{round((i * seconds) * 10)}.mp3")
k += 1
if k == n_to_delete:
break
return label_list
def extend_data(path, label_list, cat):
'''
This function creates mel spectrograms and adds them to the corresponding classes.
'''
# define path for the mels
destination_path = f"mel_spectrograms_manually_labelled/{cat}/"
# convert each slice into a mel spectrogram and store it in the right category folder
files = os.listdir(path)
for i, slice in tqdm(enumerate(natsort.natsorted(files))):
if "mp3" in slice:
audio, sr = librosa.load(path + slice)
spectrogram = librosa.feature.melspectrogram(y=audio, sr=sr)
spectrogram_db = librosa.amplitude_to_db(spectrogram, ref=np.max)
skimage.io.imsave(destination_path + f"class_{label_list[i]}/" + slice.replace("mp3", "png"), spectrogram_db)
def add_new_slices_and_labels(name, number, seconds, cat):
'''
This function puts all the steps together for extending both the slices and the labels
'''
# loading the audio file and the corresponding label file
path_audio = f"audio_files_manually_labelled/{name}/{name}_{number}.mp3"
path_labels = f"label_collections/{name}/{name}_{number}.txt"
# loading the audio file from the specified path
audio_file = AudioSegment.from_mp3(path_audio)
# cleaning the label file of unwanted lines (that are created during the audio labelling process
clean_labels(path_labels)
# slicing the audio file and storing it in a designated folder
slicing_audio(audio_file, name, number, seconds)
# creating a variable for the created folder with the slices
path_slices = f"audio_files_manually_labelled/{name}/{name}_slices_{seconds}/{name}_{number}/"
# create the stamp list
stamp_list = create_stamp_list(path_labels)
# creates the label list for the audio slices
label_list = create_label_list(stamp_list, path_slices, seconds)
# creates new balanced label list and deletes the corresponding slices
new_label_list = delete_zeros(label_list, name, number, seconds)
# extend the data with new slices
extend_data(path_slices, new_label_list, cat)
# specify in the arguments:
# the name_surname
# number of audio file of that source
# the slice length in seconds
# the category the slices belong to (train, test or validation)
add_new_slices_and_labels("Steve_Brunton", 1, 0.2, "Validation") | [
"noreply@github.com"
] | noreply@github.com |
b786a8bf22bcc9fa6769a2bdd445c84df32550ce | c2c8915d745411a0268ee5ce18d8bf7532a09e1a | /cybox-2.1.0.5/cybox/bindings/domain_name_object.py | a65065255c04bcb38b3bf5ca7c47ea1efa9b991a | [
"BSD-3-Clause"
] | permissive | asealey/crits_dependencies | 581d44e77f297af7edb78d08f0bf11ad6712b3ab | a8049c214c4570188f6101cedbacf669168f5e52 | refs/heads/master | 2021-01-17T11:50:10.020346 | 2014-12-28T06:53:01 | 2014-12-28T06:53:01 | 28,555,464 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32,281 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated Wed Jan 15 13:08:03 2014 by generateDS.py version 2.9a.
#
import sys
import getopt
import re as re_
import cybox_common
import base64
from datetime import datetime, tzinfo, timedelta
etree_ = None
Verbose_import_ = False
( XMLParser_import_none, XMLParser_import_lxml,
XMLParser_import_elementtree
) = range(3)
XMLParser_import_library = None
try:
# lxml
from lxml import etree as etree_
XMLParser_import_library = XMLParser_import_lxml
if Verbose_import_:
print("running with lxml.etree")
except ImportError:
try:
# cElementTree from Python 2.5+
import xml.etree.cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree on Python 2.5+")
except ImportError:
try:
# ElementTree from Python 2.5+
import xml.etree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree on Python 2.5+")
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree")
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree")
except ImportError:
raise ImportError(
"Failed to import ElementTree from any known place")
def parsexml_(*args, **kwargs):
if (XMLParser_import_library == XMLParser_import_lxml and
'parser' not in kwargs):
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
kwargs['parser'] = etree_.ETCompatXMLParser()
doc = etree_.parse(*args, **kwargs)
return doc
#
# User methods
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError, exp:
class GeneratedsSuper(object):
tzoff_pattern = re_.compile(r'(\+|-)((0\d|1[0-3]):[0-5]\d|14:00)$')
class _FixedOffsetTZ(tzinfo):
def __init__(self, offset, name):
self.__offset = timedelta(minutes = offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return None
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_validate_string(self, input_data, node, input_name=''):
return input_data
def gds_format_base64(self, input_data, input_name=''):
return base64.b64encode(input_data)
def gds_validate_base64(self, input_data, node, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_validate_integer(self, input_data, node, input_name=''):
return input_data
def gds_format_integer_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_integer_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of integers')
return input_data
def gds_format_float(self, input_data, input_name=''):
return '%f' % input_data
def gds_validate_float(self, input_data, node, input_name=''):
return input_data
def gds_format_float_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_float_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of floats')
return input_data
def gds_format_double(self, input_data, input_name=''):
return '%e' % input_data
def gds_validate_double(self, input_data, node, input_name=''):
return input_data
def gds_format_double_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_double_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of doubles')
return input_data
def gds_format_boolean(self, input_data, input_name=''):
return ('%s' % input_data).lower()
def gds_validate_boolean(self, input_data, node, input_name=''):
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_boolean_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
if value not in ('true', '1', 'false', '0', ):
raise_parse_error(node,
'Requires sequence of booleans '
'("true", "1", "false", "0")')
return input_data
def gds_validate_datetime(self, input_data, node, input_name=''):
return input_data
def gds_format_datetime(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = input_data.strftime('%Y-%m-%dT%H:%M:%S')
else:
_svalue = input_data.strftime('%Y-%m-%dT%H:%M:%S.%f')
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
def gds_parse_datetime(self, input_data, node, input_name=''):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'GMT')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
if len(input_data.split('.')) > 1:
dt = datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S.%f')
else:
dt = datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S')
return dt.replace(tzinfo = tz)
def gds_validate_date(self, input_data, node, input_name=''):
return input_data
def gds_format_date(self, input_data, input_name=''):
_svalue = input_data.strftime('%Y-%m-%d')
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
def gds_parse_date(self, input_data, node, input_name=''):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'GMT')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
return datetime.strptime(input_data,
'%Y-%m-%d').replace(tzinfo = tz)
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
def get_class_obj_(self, node, default_class=None):
class_obj1 = default_class
if 'xsi' in node.nsmap:
classname = node.get('{%s}type' % node.nsmap['xsi'])
if classname is not None:
names = classname.split(':')
if len(names) == 2:
classname = names[1]
class_obj2 = globals().get(classname)
if class_obj2 is not None:
class_obj1 = class_obj2
return class_obj1
def gds_build_any(self, node, type_name=None):
return None
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = 'utf-8'
Tag_pattern_ = re_.compile(r'({.*})?(.*)')
String_cleanup_pat_ = re_.compile(r"[\n\r\s]+")
Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)')
#
# Support/utility functions.
#
def showIndent(outfile, level, pretty_print=True):
if pretty_print:
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
if not inStr:
return ''
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
attr_parts = attr_name.split(':')
value = None
if len(attr_parts) == 1:
value = attrs.get(attr_name)
elif len(attr_parts) == 2:
prefix, name = attr_parts
namespace = node.nsmap.get(prefix)
if namespace is not None:
value = attrs.get('{%s}%s' % (namespace, name, ))
return value
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
if XMLParser_import_library == XMLParser_import_lxml:
msg = '%s (element %s/line %d)' % (
msg, node.tag, node.sourceline, )
else:
msg = '%s (element %s)' % (msg, node.tag, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
TypeBase64 = 8
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name, namespace, pretty_print=True):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(outfile, level, namespace, name, pretty_print)
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s</%s>' %
(self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
outfile.write('<%s>%d</%s>' %
(self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
outfile.write('<%s>%f</%s>' %
(self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g</%s>' %
(self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeBase64:
outfile.write('<%s>%s</%s>' %
(self.name, base64.b64encode(self.value), self.name))
def to_etree(self, element):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
if len(element) > 0:
if element[-1].tail is None:
element[-1].tail = self.value
else:
element[-1].tail += self.value
else:
if element.text is None:
element.text = self.value
else:
element.text += self.value
elif self.category == MixedContainer.CategorySimple:
subelement = etree_.SubElement(element, '%s' % self.name)
subelement.text = self.to_etree_simple()
else: # category == MixedContainer.CategoryComplex
self.value.to_etree(element)
def to_etree_simple(self):
if self.content_type == MixedContainer.TypeString:
text = self.value
elif (self.content_type == MixedContainer.TypeInteger or
self.content_type == MixedContainer.TypeBoolean):
text = '%d' % self.value
elif (self.content_type == MixedContainer.TypeFloat or
self.content_type == MixedContainer.TypeDecimal):
text = '%f' % self.value
elif self.content_type == MixedContainer.TypeDouble:
text = '%g' % self.value
elif self.content_type == MixedContainer.TypeBase64:
text = '%s' % base64.b64encode(self.value)
return text
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
outfile.write('model_.MixedContainer(%d, %d, "%s", "%s"),\n'
% (self.category, self.content_type, self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
outfile.write('model_.MixedContainer(%d, %d, "%s", "%s"),\n'
% (self.category, self.content_type, self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
outfile.write('model_.MixedContainer(%d, %d, "%s",\n' % \
(self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
class MemberSpec_(object):
def __init__(self, name='', data_type='', container=0):
self.name = name
self.data_type = data_type
self.container = container
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
def get_data_type_chain(self): return self.data_type
def get_data_type(self):
if isinstance(self.data_type, list):
if len(self.data_type) > 0:
return self.data_type[-1]
else:
return 'xs:string'
else:
return self.data_type
def set_container(self, container): self.container = container
def get_container(self): return self.container
def _cast(typ, value):
if typ is None or value is None:
return value
return typ(value)
#
# Data representation classes.
#
class DomainNameObjectType(cybox_common.ObjectPropertiesType):
"""The DomainNameObjectType type is intended to characterize network
domain names.The type field specifies the type of Domain name
that is being defined."""
member_data_items_ = {
'type': MemberSpec_('type', 'DomainNameObj:DomainNameTypeEnum', 0),
'Value': MemberSpec_('Value', 'cybox_common.StringObjectPropertyType', 0),
}
subclass = None
superclass = cybox_common.ObjectPropertiesType
def __init__(self, object_reference=None, Custom_Properties=None, xsi_type=None, type_=None, Value=None):
super(DomainNameObjectType, self).__init__(object_reference, Custom_Properties, xsi_type)
self.type_ = _cast(None, type_)
self.Value = Value
def factory(*args_, **kwargs_):
if DomainNameObjectType.subclass:
return DomainNameObjectType.subclass(*args_, **kwargs_)
else:
return DomainNameObjectType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Value(self): return self.Value
def set_Value(self, Value): self.Value = Value
def validate_StringObjectPropertyType(self, value):
# Validate type cybox_common.StringObjectPropertyType, a restriction on None.
pass
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def hasContent_(self):
if (
self.Value is not None or
super(DomainNameObjectType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='DomainNameObj:', name_='DomainNameObjectType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='DomainNameObjectType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='DomainNameObj:', name_='DomainNameObjectType'):
super(DomainNameObjectType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='DomainNameObjectType')
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
outfile.write(' type=%s' % (quote_attrib(self.type_), ))
def exportChildren(self, outfile, level, namespace_='DomainNameObj:', name_='DomainNameObjectType', fromsubclass_=False, pretty_print=True):
super(DomainNameObjectType, self).exportChildren(outfile, level, 'DomainNameObj:', name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Value is not None:
self.Value.export(outfile, level, 'DomainNameObj:', name_='Value', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('type', node)
if value is not None and 'type' not in already_processed:
already_processed.add('type')
self.type_ = value
super(DomainNameObjectType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Value':
obj_ = cybox_common.StringObjectPropertyType.factory()
obj_.build(child_)
self.set_Value(obj_)
super(DomainNameObjectType, self).buildChildren(child_, node, nodeName_, True)
# end class DomainNameObjectType
GDSClassesMapping = {
'Build_Utility': cybox_common.BuildUtilityType,
'Byte_Order': cybox_common.EndiannessType,
'Errors': cybox_common.ErrorsType,
'Time': cybox_common.TimeType,
'Certificate_Issuer': cybox_common.StringObjectPropertyType,
'Metadata': cybox_common.MetadataType,
'Hash': cybox_common.HashType,
'Information_Source_Type': cybox_common.ControlledVocabularyStringType,
'Internal_Strings': cybox_common.InternalStringsType,
'Fuzzy_Hash_Structure': cybox_common.FuzzyHashStructureType,
'SubDatum': cybox_common.MetadataType,
'Segment_Hash': cybox_common.HashValueType,
'Digital_Signature': cybox_common.DigitalSignatureInfoType,
'Code_Snippets': cybox_common.CodeSnippetsType,
'Value': cybox_common.StringObjectPropertyType,
'Length': cybox_common.IntegerObjectPropertyType,
'Produced_Time': cybox_common.DateTimeWithPrecisionType,
'Reference': cybox_common.ToolReferenceType,
'Encoding': cybox_common.ControlledVocabularyStringType,
'Internationalization_Settings': cybox_common.InternationalizationSettingsType,
'Tool_Configuration': cybox_common.ToolConfigurationType,
'English_Translation': cybox_common.StringObjectPropertyType,
'Start_Date': cybox_common.DateWithPrecisionType,
'Functions': cybox_common.FunctionsType,
'String_Value': cybox_common.StringObjectPropertyType,
'Build_Utility_Platform_Specification': cybox_common.PlatformSpecificationType,
'Compiler_Informal_Description': cybox_common.CompilerInformalDescriptionType,
'Start_Time': cybox_common.DateTimeWithPrecisionType,
'System': cybox_common.ObjectPropertiesType,
'Platform': cybox_common.PlatformSpecificationType,
'Usage_Context_Assumptions': cybox_common.UsageContextAssumptionsType,
'Type': cybox_common.ControlledVocabularyStringType,
'Compilers': cybox_common.CompilersType,
'Tool_Type': cybox_common.ControlledVocabularyStringType,
'String': cybox_common.ExtractedStringType,
'Custom_Properties': cybox_common.CustomPropertiesType,
'Build_Information': cybox_common.BuildInformationType,
'Tool_Hashes': cybox_common.HashListType,
'Observable_Location': cybox_common.LocationType,
'Error_Instances': cybox_common.ErrorInstancesType,
'End_Date': cybox_common.DateWithPrecisionType,
'Data_Segment': cybox_common.StringObjectPropertyType,
'Certificate_Subject': cybox_common.StringObjectPropertyType,
'Language': cybox_common.StringObjectPropertyType,
'Compensation_Model': cybox_common.CompensationModelType,
'Property': cybox_common.PropertyType,
'Strings': cybox_common.ExtractedStringsType,
'Contributors': cybox_common.PersonnelType,
'Code_Snippet': cybox_common.ObjectPropertiesType,
'Configuration_Settings': cybox_common.ConfigurationSettingsType,
'Simple_Hash_Value': cybox_common.SimpleHashValueType,
'Byte_String_Value': cybox_common.HexBinaryObjectPropertyType,
'Received_Time': cybox_common.DateTimeWithPrecisionType,
'Instance': cybox_common.ObjectPropertiesType,
'Import': cybox_common.StringObjectPropertyType,
'Identifier': cybox_common.PlatformIdentifierType,
'Tool_Specific_Data': cybox_common.ToolSpecificDataType,
'Execution_Environment': cybox_common.ExecutionEnvironmentType,
'Search_Distance': cybox_common.IntegerObjectPropertyType,
'Dependencies': cybox_common.DependenciesType,
'Offset': cybox_common.IntegerObjectPropertyType,
'Date': cybox_common.DateRangeType,
'Hashes': cybox_common.HashListType,
'Segments': cybox_common.HashSegmentsType,
'Segment_Count': cybox_common.IntegerObjectPropertyType,
'Usage_Context_Assumption': cybox_common.StructuredTextType,
'Block_Hash': cybox_common.FuzzyHashBlockType,
'Dependency': cybox_common.DependencyType,
'Error': cybox_common.ErrorType,
'Trigger_Point': cybox_common.HexBinaryObjectPropertyType,
'Environment_Variable': cybox_common.EnvironmentVariableType,
'Byte_Run': cybox_common.ByteRunType,
'File_System_Offset': cybox_common.IntegerObjectPropertyType,
'Image_Offset': cybox_common.IntegerObjectPropertyType,
'Imports': cybox_common.ImportsType,
'Library': cybox_common.LibraryType,
'References': cybox_common.ToolReferencesType,
'Compilation_Date': cybox_common.DateTimeWithPrecisionType,
'Block_Hash_Value': cybox_common.HashValueType,
'Configuration_Setting': cybox_common.ConfigurationSettingType,
'Observation_Location': cybox_common.LocationType,
'Libraries': cybox_common.LibrariesType,
'Function': cybox_common.StringObjectPropertyType,
'Description': cybox_common.StructuredTextType,
'User_Account_Info': cybox_common.ObjectPropertiesType,
'Build_Configuration': cybox_common.BuildConfigurationType,
'Address': cybox_common.HexBinaryObjectPropertyType,
'Search_Within': cybox_common.IntegerObjectPropertyType,
'Segment': cybox_common.HashSegmentType,
'Compiler': cybox_common.CompilerType,
'Name': cybox_common.StringObjectPropertyType,
'Signature_Description': cybox_common.StringObjectPropertyType,
'Block_Size': cybox_common.IntegerObjectPropertyType,
'Compiler_Platform_Specification': cybox_common.PlatformSpecificationType,
'Fuzzy_Hash_Value': cybox_common.FuzzyHashValueType,
'Data_Size': cybox_common.DataSizeType,
'Dependency_Description': cybox_common.StructuredTextType,
'End_Time': cybox_common.DateTimeWithPrecisionType,
'Contributor': cybox_common.ContributorType,
'Tools': cybox_common.ToolsInformationType,
'Tool': cybox_common.ToolInformationType,
}
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print USAGE_TEXT
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = GDSClassesMapping.get(tag)
if rootClass is None:
rootClass = globals().get(tag)
return tag, rootClass
def parse(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Domain_Name'
rootClass = DomainNameObjectType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0, name_=rootTag,
namespacedef_='',
pretty_print=True)
return rootObj
def parseEtree(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Domain_Name'
rootClass = DomainNameObjectType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
rootElement = rootObj.to_etree(None, name_=rootTag)
content = etree_.tostring(rootElement, pretty_print=True,
xml_declaration=True, encoding="utf-8")
sys.stdout.write(content)
sys.stdout.write('\n')
return rootObj, rootElement
def parseString(inString):
from StringIO import StringIO
doc = parsexml_(StringIO(inString))
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Domain_Name'
rootClass = DomainNameObjectType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0, name_="Domain_Name",
namespacedef_='')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
__all__ = [
"DomainNameObjectType"
] | [
"ssnow@mitre.org"
] | ssnow@mitre.org |
df8c81a2b8e3b1259e8d30912e5801030105b4c9 | 05e5d1dcd5f077f99226970a4c8ca5b709b5017e | /Test/TestENV_regeneration.py | d5a8d3d425c46577981bb817f78a5cea774ee4b7 | [] | no_license | Angelaben/Thesis-RL | ecdeb8c4f1b74b06cd9aa59cfe5e1a98d400c71e | 992fb80f9fcb4021669047284d1a186c5e3a567a | refs/heads/master | 2020-04-17T00:53:30.923720 | 2019-01-16T15:45:01 | 2019-01-16T15:45:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 440 | py | from Environment.BanditEnvironment_stationary_WithRegeneration import BanditEnvironment_regeneration
env = BanditEnvironment_regeneration()
res = env.reset()
print(res)
env.step_mono_recommendation(0)
first_item = res[1][0]
env.regenerate()
res = env.reset()
regenerated_first_item = res[1][0]
assert first_item.get_Price != regenerated_first_item.get_Price # Might fail at random if same price
# Mais si experience repete ne doit pas fail | [
"bangelard@vente-privee.com"
] | bangelard@vente-privee.com |
4cd1e5c3393bc90564eb60027a7d7ae9a87e1b66 | d35c9091645ada3f1439ae8a6ad198b90b184bad | /examples/kill.py | a860433ec8664b152006986236153f4c862dfe42 | [] | no_license | pgillet/spark_client | 09c8e488835ac72d43bec727cbb9f183dca9275c | 2273af035ec0560f23ddcea13922fd1f82c708d5 | refs/heads/main | 2023-02-17T04:09:47.343074 | 2021-01-06T14:51:06 | 2021-01-06T14:51:06 | 327,342,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 824 | py | import logging
import os
from spark_client.facade import K8S_SPARK_NATIVE, SparkJobConf
from spark_client.utils import SparkJobRunnerFactory
logging.basicConfig(level=logging.DEBUG)
# Job configuration
# Path to the static configuration file
config_path = os.path.join(os.path.dirname(__file__), "conf.cfg")
manager = K8S_SPARK_NATIVE # Can also be directly set in the configuration file, in 'application' section
job_conf = SparkJobConf(manager=manager, configPath=config_path)
# Job runner
job_runner = SparkJobRunnerFactory.create_job_runner(job_conf)
submission_id = job_runner.run()
print("Submitted job with ID = {}".format(submission_id))
job_runner.wait_until_start(submission_id)
status = job_runner.status(submission_id)
print("Job {} is {}".format(submission_id, status))
job_runner.kill(submission_id)
| [
"pascal.gillet@stack-labs.com"
] | pascal.gillet@stack-labs.com |
efba4b2d600c69a51bb39a34812f080182f4990d | 8b301e17d5f42e1050bb15cde9b28a2db33d0662 | /mysite/myAPI/checkcode.py | f92f73c41b11dab9987fad65e488cce789056e4d | [
"Apache-2.0"
] | permissive | wuchunlong0/blog_uk_vue_mylogin | 413bd482b649f2bf0e45cdfe5dc964ac0f75e72b | eece41870822a38c130318c10e6dc348a088a864 | refs/heads/master | 2020-05-09T18:04:45.718255 | 2019-04-14T15:13:01 | 2019-04-14T15:13:01 | 181,323,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,418 | py | # -*- coding: utf-8 -*-
import os,sys
from io import BytesIO as StringIO
from django.shortcuts import render
import random
from django.http.response import HttpResponseRedirect, HttpResponse
from PIL import Image, ImageDraw, ImageFont, ImageFilter
FONT_TYPE = "static_common/home/fonts/DroidSans.ttf"
_letter_cases = "abcdefghnpqrstuvxy".upper()
_upper_cases = _letter_cases
_numbers = ''.join(map(str, range(3, 8)))
init_chars = ''.join((_letter_cases, _upper_cases, _numbers))
def get_chars(chars=init_chars, length=4):
return random.sample(chars, length)
def create_validate_code(request,size=(120, 30), mode="RGB",
bg_color=(255, 255, 255),
fg_color=(255, 0, 0),
font_size=22,
font_type=FONT_TYPE,
draw_lines=True,
n_line=(1, 3),
draw_points=True,
point_chance = 2):
width, height = size
img = Image.new(mode, size, bg_color)
draw = ImageDraw.Draw(img)
def create_lines():
line_num = random.randint(*n_line)
for i in range(line_num):
begin = (random.randint(0, size[0]), random.randint(0, size[1]))
end = (random.randint(0, size[0]), random.randint(0, size[1]))
draw.line([begin, end], fill=(0, 0, 0))
def create_points():
chance = min(100, max(0, int(point_chance)))
for w in range(width):
for h in range(height):
tmp = random.randint(0, 100)
if tmp > 100 - chance:
draw.point((w, h), fill=(0, 0, 0))
def create_strs():
c_chars =request.session['checkcode']
strs = ' %s ' % ' '.join(c_chars)
font = ImageFont.truetype(font_type, font_size)
font_width, font_height = font.getsize(strs)
draw.text(((width - font_width) / 3, (height - font_height) / 3),
strs, font=font, fill=fg_color)
return ''.join(c_chars)
if draw_lines:
create_lines()
if draw_points:
create_points()
strs = create_strs()
params = [1 - float(random.randint(1, 12)) / 100,
0,
0,
0,
1 - float(random.randint(1, 10)) / 100,
float(random.randint(1, 2)) / 500,
0.001,
float(random.randint(1, 2)) / 500
]
img = img.transform(size, Image.PERSPECTIVE, params)
img = img.filter(ImageFilter.EDGE_ENHANCE_MORE)
return img, strs
def gcheckcode(request):
listchar = get_chars()
request.session['checkcode'] = listchar
return ''.join(listchar)
# http://localhost:9000/home/checkcodeGIF/
def checkcodeGIF(request):
if not request.session.get('checkcode',''):
request.session['checkcode'] = '1234'
img_type="GIF"
checkcode = create_validate_code(request)
mstream = StringIO()
checkcode[0].save(mstream, img_type) #图片保存在内存中
codeImg = mstream.getvalue() #获得保存图片
mstream.close()#关闭保存
return HttpResponse(codeImg, img_type) #网页显示内存图片
# http://localhost:8000/home/getcheckcode/
def getcheckcode(request):
g_checkcode = gcheckcode(request)
path = request.GET.get('path','__base__.html')
return render(request, path, context=locals())
| [
"wcl6005@163.com"
] | wcl6005@163.com |
57886315fad8ee049cd188b90393f570bf6e34b8 | 8e2c1c2fd6e53cc43706dba80a3423dbd2c1eb5d | /tilemap.py | 361bd134efdb96b17755817bbb1a6f3fcfbb97b0 | [] | no_license | DavidMower/OceanDiver | 4d21332c5846b96f58b5d01a1f447fba1f07e2a3 | ce2560a4376101263a676537e4043300e61b5fbe | refs/heads/master | 2022-12-28T16:46:24.279464 | 2020-10-12T22:07:32 | 2020-10-12T22:07:32 | 193,225,654 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,197 | py | import pygame as pg
from settings import *
def collide_hit_rect(one, two):
return one.hit_rect.colliderect(two.rect) # check the players hit rect against a wall rect
class Map:
def __init__(self, filename):
self.data = []
with open(filename, 'rt') as f:
for line in f:
self.data.append(line.strip()) # strip removes to \n new line character
self.tilewidth = len(self.data[0])
self.tileheight = len(self.data)
self.width = self.tilewidth * TILESIZE
self.height = self.tileheight * TILESIZE
class Camera:
def __init__(self, width, height):
self.camera = pg.Rect(0, 0, width, height)
self.width = width
self.height = height
def apply(self, entity):
return entity.rect.move(self.camera.topleft)
def update(self, target):
x = -target.rect.centerx + int(WIDTH / 2)
y = -target.rect.centery + int(HEIGHT / 2)
# limit scrolling to map size
x = min(0, x) # left
y = min(0, y) # top
x = max(-(self.width - WIDTH), x)
y = max(-(self.height - HEIGHT), y)
self.camera = pg.Rect (x, y, self.width, self.height) | [
"35422381+DavidMower84@users.noreply.github.com"
] | 35422381+DavidMower84@users.noreply.github.com |
8a3022311e6e5bc085b1c0d6cda1b86dc882d192 | 9b8c58214d1c5909344b3548cec4bde72f53c909 | /build_keras_model_and_export.py | 84a4c0a3cc9aa2f01e7e6a6a5d51b98ea774f9de | [] | no_license | asparagui/keras_mnist_demo | 36629cca5cfb9c8e729425bec3fc3a143dc372fa | e674f9d636315aeb7a705211337c141df5a90f5f | refs/heads/master | 2021-07-12T06:27:43.139503 | 2020-04-16T01:54:56 | 2020-04-16T01:54:56 | 94,045,683 | 24 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,730 | py | '''
Trains a simple deep NN on the MNIST dataset,
Export as a coreml model
removed epochs from original code to deal with keras 1.2.2
'''
from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.optimizers import RMSprop
from keras.utils import np_utils
batch_size = 128
num_classes = 10
epochs = 20
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = np_utils.to_categorical(y_train, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Dense(512, activation='relu', input_shape=(784,)))
model.add(Dropout(0.2))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(10, activation='softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])
history = model.fit(x_train, y_train,
batch_size=batch_size,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
print("Saving mnist model")
import coremltools
coreml_model = coremltools.converters.keras.convert(model)
coreml_model.save("mnist_model.mlmodel")
| [
"koonce@hello.com"
] | koonce@hello.com |
b5912e349873dcbf8e6bf3da70f9559def4a8646 | 9fb630770d83bb696651bcf52aa2d438ced56a17 | /12_examples/02.py | c17de9595611e091cb0e6c04424285384f364b3a | [] | no_license | hima-del/Learn_Python | de4a9b157629dfa89fb5c79b8557847a4e4061c4 | 0c0798eae30c5bd0ec352707c4ce169ca64a51ea | refs/heads/master | 2023-04-08T20:22:01.880785 | 2021-04-23T11:28:31 | 2021-04-23T11:28:31 | 331,871,342 | 1 | 0 | null | 2021-04-23T11:28:32 | 2021-01-22T07:35:07 | Python | UTF-8 | Python | false | false | 602 | py | def jpeg_res(filename):
""""This function prints the resolution of the jpeg image file passed into it"""
# open image for reading in binary mode
with open(filename,'rb') as img_file:
# height of image (in 2 bytes) is at 164th position
img_file.seek(163)
# read the 2 bytes
a = img_file.read(2)
# calculate height
height = (a[0] << 8) + a[1]
# next 2 bytes is width
a = img_file.read(2)
# calculate width
width = (a[0] << 8) + a[1]
print("The resolution of the image is",width,"x",height)
jpeg_res("img1.jpg") | [
"himajaharidas77@gmail.com"
] | himajaharidas77@gmail.com |
2c6b764876baa6f80f39940e01e70255a7c56517 | fc09c07e79383e4fe825a065ee72e07a0260bd38 | /config.py | 4582452a2641fe3df09e9dbd71facbd51a31e5ed | [] | no_license | kanan132/mega-tutorial-flask | 266fb4abf0ce0691bc5bbaabc73923cfdab6e09e | 0c1bf00a1f1e0576b569cf5b0b80176e99e6cf17 | refs/heads/master | 2022-12-04T12:39:15.227992 | 2020-08-28T14:56:49 | 2020-08-28T14:56:49 | 289,289,210 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 878 | py | import os
from dotenv import load_dotenv
basedir = os.path.abspath(os.path.dirname(__file__))
load_dotenv(os.path.join(basedir, '.env'))
class Config(object):
SECRET_KEY = os.environ.get('SECRET_KEY') or 'you-will-never-guess'
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'app.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False
MAIL_SERVER = os.environ.get('MAIL_SERVER')
MAIL_PORT = int(os.environ.get('MAIL_PORT') or 25)
MAIL_USE_TLS = os.environ.get('MAIL_USE_TLS') is not None
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
ADMINS = ['kenan.132@mail.ru']
POSTS_PER_PAGE = 25
LANGUAGES = ['en', 'es']
MS_TRANSLATOR_KEY = os.environ.get('MS_TRANSLATOR_KEY')
ELASTICSEARCH_URL = os.environ.get('ELASTICSEARCH_URL') | [
"kenan.132@mail.ru"
] | kenan.132@mail.ru |
7461599a46d9adee105822f8d7546e4086602b87 | c3303efb0d524b469d7867e73b259c51f5bbcdeb | /books_application/wsgi.py | ef6c974e27dc7538d1c5d1ac9c647102e0ccffc4 | [] | no_license | tanyairina/fav_books | d6d011b8d8e31a96eda45059460e5667bc893ee2 | ce92042c96bfd7e86de6926feb8580f99a1b6191 | refs/heads/master | 2023-01-30T03:42:09.269086 | 2020-12-14T15:33:10 | 2020-12-14T15:33:10 | 321,392,858 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | """
WSGI config for books_application project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'books_application.settings')
application = get_wsgi_application()
| [
"72577434+tanyairina@users.noreply.github.com"
] | 72577434+tanyairina@users.noreply.github.com |
31e43df249ffaf82a6153cb36979d317cd6430dc | 147d6cf3e03d9f443cae06a111a1d1e8aa786384 | /practice1.py | 0d8c25dc852cf8a99fabbc741da051481f5448fa | [] | no_license | bobbyalestra/python-projects | 81136641a2a7368d144b6d1f1ca39e4683453537 | b432610bbc20618312eca7ca7231fe20cd4e551c | refs/heads/main | 2023-05-07T21:05:12.129100 | 2021-05-22T23:56:43 | 2021-05-22T23:56:43 | 369,885,235 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py |
# string concatentation
# suppose we wwantt o create a string thqat says "subscribe t0 _____"
youtuber = "Blestra" #some string variable
# print("subscribe tio" + youtuber)
# print("subscribe to {}".format(youtuber))
# print(f"subscribe to{youtuber}")
adjective = input("Adjective: ")
verb1 = input("Verb: ")
madlib = f"I love my kids they are {adjective}, funny and love able. and {verb1} "
print(madlib) | [
"bobbyalestra135@gmail.com"
] | bobbyalestra135@gmail.com |
3c8645c3411393c138841475bea142c913036dc5 | b0f3be5814eeb3341c6a2c256dea5acdc5749502 | /tests/find_nuclid_ob.py | 0174a6b6a168e9cef91ea895084b46a70299f0e6 | [] | no_license | igor-976/Test_Programs | 95105731af3192760d45e8028af753f29422b894 | ae9c90f38066016b146696d433b806bf9badf883 | refs/heads/master | 2023-08-24T21:56:02.599959 | 2021-10-10T14:43:20 | 2021-10-10T14:43:20 | 256,280,723 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 73 | py | import find_func as s
s.find_nuc("reaclib", "reaction", 10, 40, 25105)
| [
"noreply@github.com"
] | noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.