blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e771a1aedae7f216e8205656c9656ab022c6e060 | 66c28faeaf3ec5e2e19007ea627aba62e8b8cc8e | /Game.py | df7fef9444d8237df760ec3281230b748e0b5841 | [] | no_license | iloveleejunghyun/ReinforcementLearning | 9053d0700964101de758a3edbdbf9cca808f31b6 | 79242b3516af5293f4d377344a6698df2193b23f | refs/heads/master | 2022-09-11T17:00:06.691004 | 2020-06-02T22:18:12 | 2020-06-02T22:18:12 | 265,705,072 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,644 | py | import numpy as np
import pandas as pd
import random
class Game:
rewards = None
positionCol = None
positionRow = None
def __init__(self, startCol=1, startRow=1):
self.distance = pd.DataFrame({1:[8,7,6,5,4], 2:[7,6,5,4,3], 3:[6,5,4,3,2], 4:[5,4,3,2,1], 5:[4,3,2,1,0]}, index={1,2,3,4,5})
self.rewards = pd.DataFrame({1:[0,1,2,3,4], 2:[1,2,3,4,5], 3:[2,3,4,5,6], 4:[3,4,5,6,7], 5:[4,5,6,7,8]}, index={1,2,3,4,5})
self.positionCol = startCol
self.positionRow = startRow
def move(self, direction):
reward = 0
end = False
distance_before = self.distance[self.positionCol][self.positionRow]
if direction=='Up':
self.positionRow -= 1
elif direction=='Down':
self.positionRow += 1
elif direction=='Left':
self.positionCol -= 1
else:
self.positionCol += 1
#check if we lost
if self.positionRow < 1 or self.positionRow > 5 or self.positionCol < 1 or self.positionCol > 5:
end = True
reward = -1000
#check if we have reached the end
elif self.positionCol == 5 and self.positionRow == 5:
end = True
reward = self.rewards[self.positionCol][self.positionRow]
else:
end = False
if distance_before < self.distance[self.positionCol][self.positionRow]:
reward = -1000
else:
reward = self.rewards[self.positionCol][self.positionRow]
#return reward and end of game indicator
return (reward, end) | [
"ilovejunghyun@163.com"
] | ilovejunghyun@163.com |
3674de65b0e09eba8a92b497cf4a7530fb460826 | d53bc632503254ca0d5099fe457c02c07212a131 | /cookieproject1/cookieproject1/wsgi.py | 0e0d958b3a4961808057c49586b4e5768c75d831 | [] | no_license | srikar1993/django | ba8428f6e1162cc40f2d034126e7baf29eb62edc | 2199d5d94accc7bce5b3fac4a4b7b1444e39b35f | refs/heads/master | 2023-07-14T21:10:52.654992 | 2021-08-26T06:37:04 | 2021-08-26T06:37:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | """
WSGI config for cookieproject1 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'cookieproject1.settings')
application = get_wsgi_application()
| [
"32321399+srikar.madhavapeddy@users.noreply.github.com"
] | 32321399+srikar.madhavapeddy@users.noreply.github.com |
eb6f175b0c5788c950623020ee524b875e28fc23 | bafd37fdbaf76d5d7dabd9c07985969b3924f9c8 | /example_client/example.py | 9671da3b0660af7e0bb9fe806d6331467f1918ae | [
"Apache-2.0"
] | permissive | k24dizzle/nagios_registration | 2c1c95c7c871ee8ed31de46d555c812f2c0f41c8 | be18dbadd2c08def81e795e4afe2fe2cf41775cf | refs/heads/master | 2020-03-08T11:54:30.569982 | 2015-07-16T18:01:07 | 2015-07-16T18:01:07 | 128,111,583 | 1 | 0 | null | 2018-04-04T19:32:53 | 2018-04-04T19:32:52 | null | UTF-8 | Python | false | false | 3,263 | py | import oauth2
import json
###
#
# This script will create 2 hosts, and add them to a host group.
# It will then create a service, and assign that service to both hosts.
# It will then deploy a new nagios configuration file.
#
###
consumer_key = "OAUTH_KEY"
consumer_secret = "OAUTH_SECRET"
registration_server = "http://localhost:8000"
###
#
# You can create a consumer key and secret on the nagios_registration
# server with a django management command:
#
# python manage.py create_consumer
#
###
consumer = oauth2.Consumer(key=consumer_key, secret=consumer_secret)
client = oauth2.Client(consumer)
# Variables used by the actual requests below
hostname1 = "example app host"
address1 = "127.0.0.1"
hostname2 = "second app host"
address2 = "127.0.0.2"
groupname = "example_app_servers"
alias = "Example App Servers"
base_service = "24x7-active-service"
service_description = "Disk Usage"
check_command = "check_remote!disk_check.py!98!99"
# End of settings, now just making requests to the server
# Create the 2 hosts
client.request("%s/api/v1/host" % (registration_server),
method='POST',
body=json.dumps({"name": hostname1, "address": address1}),
headers={"Content-Type": "application/json"})
client.request("%s/api/v1/host" % (registration_server),
method='POST',
body=json.dumps({"name": hostname2, "address": address2}),
headers={"Content-Type": "application/json"})
# Create the hostgroup
client.request("%s/api/v1/hostgroup" % (registration_server),
method='POST',
body=json.dumps({"name": groupname, "alias": alias}),
headers={"Content-Type": "application/json"})
# Add the hosts to the hostgroup
client.request("%s/api/v1/hostgroup" % (registration_server),
method='PATCH',
body=json.dumps({"group": groupname, "host": hostname1}),
headers={"Content-Type": "application/json"})
client.request("%s/api/v1/hostgroup" % (registration_server),
method='PATCH',
body=json.dumps({"group": groupname, "host": hostname2}),
headers={"Content-Type": "application/json"})
# Create a service
client.request("%s/api/v1/service" % (registration_server),
method='POST',
body=json.dumps({"base_service": base_service,
"description": service_description,
"check_command": check_command}),
headers={"Content-Type": "application/json"})
# Add the service to the 2 hosts
client.request("%s/api/v1/service" % (registration_server),
method='PATCH',
body=json.dumps({"service": service_description,
"host": hostname1}),
headers={"Content-Type": "application/json"})
client.request("%s/api/v1/service" % (registration_server),
method='PATCH',
body=json.dumps({"service": service_description,
"host": hostname2}),
headers={"Content-Type": "application/json"})
# Deploy the changes
client.request("%s/api/v1/deploy" % (registration_server), method="POST")
print "Done!"
| [
"pmichaud@uw.edu"
] | pmichaud@uw.edu |
81e1b940bf6f21e1a0941c32360e43578a445dfa | c884a92881b6331ceae0a7371d2cd02d6ee0fb01 | /jd/jd/spiders/commect_spiders.py | 190f7ac7b9d8055e9b26e1148a44adc0b6cece32 | [] | no_license | greenluno/jd_scrapy | df5d2c3313b04faab4a66546d20509c083a5f2d5 | 2d798232be55db3bae88aad8221a1f0797eac287 | refs/heads/master | 2020-06-21T11:56:45.451727 | 2019-07-17T18:56:48 | 2019-07-17T18:56:48 | 197,443,128 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,795 | py | # -*- coding: utf-8 -*-
import requests
from jd.items import commentItem
import json
import xlrd
import scrapy
from scrapy import Request
class JDCommentSpider(scrapy.Spider):
name = 'comment'
allowed_domains = ['jd.com']
start_urls = ['https://www.jd.com']
def parse(self, response):
"""jd"""
url = "https://item.jd.com/1384071.html"
yield Request(url, callback=self.parseDetails)
def parseDetails(self, response):
id= 1384071 # item id
comment_num = "https://club.jd.com/comment/productCommentSummaries.action?referenceIds=" + str(id)
com = requests.get(comment_num).text
date = json.loads(com)
comment_nums = date['CommentsCount'][0]['ShowCount']
print(comment_nums)
comment_total = int(comment_nums)
if comment_total % 10 == 0: # calculate the number of page, for 10 comment every page
page = comment_total//10
else:
page = comment_total//10 + 1
for k in range(page):
#Scraping each comment page
com_url = 'https://sclub.jd.com/comment/productPageComments.action?productId=' + str(id) +'&score=0&sortType=5&page='+str(k)+'&pageSize=10'
yield scrapy.Request(com_url, callback=self.parse_getCommentnum)
def parse_getCommentnum(self, response):
js = json.loads(response.text)
# print(js)
comments = js['comments'] # all comments on same page
items = []
for comment in comments:
item1 = commentItem()
item1['user_name'] = comment['nickname'] # username
item1['user_id'] = comment['id'] # user id
item1['userProvince'] = comment['userProvince'] # user location
item1['content'] = comment['content'] # comment
item1['good_id'] = comment['referenceId'] # item id
item1['good_name'] = comment['referenceName'] # item name
item1['date'] = comment['referenceTime'] # timestamp
item1['replyCount'] = comment['replyCount'] # number of replies
item1['score'] = comment['score'] # score
item1['status'] = comment['status'] # status
item1['userLevelId'] = comment['userLevelId'] # user level
item1['productColor'] = comment['productColor'] # item color
item1['productSize'] = comment['productSize'] # item size
item1['userLevelName'] = comment['userLevelName'] # user status level name
item1['isMobile'] = comment['isMobile'] # mobile indicator
item1['userClientShow'] = comment['userClientShow'] # user client type
item1['days'] = comment['days'] # number of day
items.append(item1)
return items
| [
"noreply@github.com"
] | greenluno.noreply@github.com |
c0cacdecc4e03866a9fb51eb29dec1c32237ba2b | 7541be63a8989f6a95a8839048c1896059b17aa6 | /scripts/fun.py | b59183629696aa9d3a28fa5902709212f64b86d8 | [] | no_license | Kanda-design/MyProject | b7232efbe12f192e9f517df96f65b2e811685181 | 0b2a82a5db4234a8e4ffc7810aff350b93eb83b1 | refs/heads/master | 2020-08-21T06:15:55.295845 | 2019-10-18T21:00:39 | 2019-10-18T21:00:39 | 216,105,980 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,065 | py | import time
lines=9
column=int(lines*2.5)
space=15
x=y=0
one_third=int(lines*(1/3)-1)
two_third=lines*(2/3)
down_count=0
top_count=1
def sp(num):
while num >= 0:
print(" ",end="")
num=num-1
def sp_up(num1):
while (num1 >= 0):
print("*", end="")
sp(5 * top_count)
num1 = num1 - 1
while x < lines:
# time.sleep(2)
space1 = space
sp(space)
space=space1
if(x <= one_third):
if(x == 0):
print("*",end="")
sp(space1-one_third)
print("*")
elif(x == one_third):
sp_up(1)
print("*")
top_count=top_count+1
else:
sp_up(2)
print("*")
top_count=top_count+1
if(x == one_third):
space=space-1
else:
space=space-2
else:
print("*",end="")
sp(column - down_count)
down_count=down_count+4
print("*")
if(x == one_third):
space=space+1
else:
space=space+2
x=x+1 | [
"ssnaveen77@gmail.com"
] | ssnaveen77@gmail.com |
05546c27ea40660996b98f84d8a1a0f04a42c288 | 85bf9a13bf62c1f074894d134c23dd992ae8688c | /problems/p317/Solution.py | 6d55e5a066806320f5503f718d38b8fa74f2166f | [] | no_license | pololee/oj-leetcode | 4cca3d309b2c9931d15d3cec4b07b5d9d22733ef | 78a8b27ee108ba93aa7b659665976112f48fc2c2 | refs/heads/master | 2020-06-21T02:15:26.882273 | 2020-02-06T04:56:21 | 2020-02-06T04:56:21 | 197,320,113 | 0 | 0 | null | 2020-02-06T04:56:23 | 2019-07-17T05:20:02 | Python | UTF-8 | Python | false | false | 2,336 | py | import collections
import sys
class Solution:
DIRECTIONS = [(1, 0), (0, 1), (-1, 0), (0, -1)]
def shortestDistance(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
if not grid:
return 0
row_size = len(grid)
col_size = len(grid[0])
distance = [[0 for _ in range(col_size)]
for _ in range(row_size)]
reaches = [[0 for _ in range(col_size)]
for _ in range(row_size)]
num_of_buildings = 0
for i in range(row_size):
for j in range(col_size):
if grid[i][j] == 1:
num_of_buildings += 1
self.bfs(grid, distance, reaches, i, j)
shortest = sys.maxsize
for i in range(row_size):
for j in range(col_size):
if grid[i][j] == 0 and reaches[i][j] == num_of_buildings:
shortest = min(shortest, distance[i][j])
if shortest == sys.maxsize:
return -1
return shortest
def bfs(self, grid, distance, reaches, istart, jstart):
row_size = len(grid)
col_size = len(grid[0])
visited = [[False for _ in range(col_size)]
for _ in range(row_size)]
queue = collections.deque()
queue.append((istart, jstart))
visited[istart][jstart] = True
level = 0
while queue:
size = len(queue)
for _ in range(size):
row, col = queue.popleft()
if grid[row][col] == 0:
distance[row][col] += level
reaches[row][col] += 1
for drow, dcol in self.DIRECTIONS:
new_row = row + drow
new_col = col + dcol
if new_row >= 0 and new_row < row_size and new_col >= 0 and new_col < col_size and grid[new_row][new_col] == 0 and not visited[new_row][new_col]:
visited[new_row][new_col] = True
queue.append((new_row, new_col))
level += 1
def main():
test = [[1, 0, 2, 0, 1], [0, 0, 0, 0, 0], [0, 0, 1, 0, 0]]
sol = Solution()
print(sol.shortestDistance(test))
if __name__ == '__main__':
main()
| [
"pololee1990@gmail.com"
] | pololee1990@gmail.com |
aa3149016fb02d2179d334fbb170365f680f6088 | 5d688d4f501ed47448325b759a2c676123bed35a | /phunspell/tests/test__pl_PL.py | a628ac7bee297b1adf29bf74f4f2144f2a068bc0 | [
"MIT"
] | permissive | dvwright/phunspell | bf2eb686db23a0bc2ccad0600703565a0656321f | 818bbd081f84c570ec304fdc235ca112f9abd869 | refs/heads/main | 2023-04-22T01:47:24.828980 | 2021-03-24T00:52:30 | 2021-03-24T00:52:30 | 344,022,732 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 206 | py | import phunspell
import inspect
import unittest
# TODO:
class TestPhunspellLangs(unittest.TestCase):
pass
# pspell = phunspell.Phunspell('pl_PL')
if __name__ == "__main__":
unittest.main()
| [
"dwright999@apple.com"
] | dwright999@apple.com |
2096d1bf2f0cefa674cfde2faac4a3bce7276101 | b90b08d4f1b106dac4ac963f5f1c7ce21b52fa9d | /origin/formation_period.py | b2428ddcc529316c50ba83f94f3dca45fcaff3b1 | [] | no_license | q40603/pairs_trade_nsd | 9a274927bc2242eaeddabfc798b57941657b431c | 29d777739ffe25ba7ada3b9851fe0882772c5b8d | refs/heads/main | 2023-05-13T00:29:28.920693 | 2021-06-06T14:50:11 | 2021-06-06T14:50:11 | 355,578,537 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,164 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 17 11:33:32 2018
@author: chaohsien
"""
import pandas as pd
import numpy as np
from statsmodels.tsa.api import VAR
from statsmodels.tsa.stattools import adfuller
from MTSA import order_select , snr , zcr , JB_VECM #, chow_test
from vecm import vecm , rank , eig , weigh
from VecmPvalue import vecm_pvalue
# 挑出定態的股票,並做回測------------------------------------------------------------------------------------------------------------------------
def formation_period_single(day1):
# 合併二日資料
#min_price = np.log(pd.concat([day1,day2]))
min_price = day1
min_price = min_price.dropna(axis = 1)
min_price.index = np.arange(0,len(min_price),1)
unit_stock = np.where(min_price.apply(lambda x: adfuller(x)[1] > 0.05 , axis = 0 ) == True ) # 找出單跟的股票
min_price.drop(min_price.columns[unit_stock], axis=1 , inplace = True) # 刪除單跟股票,剩餘為定態序列。
spread = min_price
#------------------------------------------------------------------------------
# 無母數開倉門檻--------------------------------------
ave = []
std = []
for i in range(len(spread.T)):
y = spread.iloc[:,i]
ave.append( np.mean(y) )
std.append( np.std(y) )
ave = pd.DataFrame(ave) ; ave.columns = ["mu"]
std = pd.DataFrame(std) ; std.columns = ["stdev"]
# 程式防呆
a = np.array(np.where( std < 0.0000001 ))
if a.size > 1:
a = int(np.delete(a,-1,axis=0))
spread.drop(spread.columns[a],axis=1,inplace=True)
ave.drop(ave.index[a],axis=0,inplace=True) ; ave.index = np.arange(0,len(ave),1)
std.drop(std.index[a],axis=0,inplace=True) ; std.index = np.arange(0,len(std),1)
#------------------------------------------------------------------------------
# 計算過零率 ( spread )------------------------------
# Boot = 500
# z_c_r = []
# for j in range(len(spread.T)):
# y = spread.iloc[:,j]
# z_c_r.append( zcr(y,Boot) )
# z_c_r = pd.DataFrame(z_c_r) ; z_c_r.columns = ["zcr"]
# -----------------------------------------------------------------------------
stock_name = pd.DataFrame(spread.columns) ; stock_name.columns = ["stock"]
stock_name["zcr"] = 0
con = pd.concat([stock_name,ave,std],axis=1)
return con
# 挑出單根股票,帶入VECM中---------------------------------------------------------------------------------------------------------------------
def find_pairs( i , n , min_price ):
# 開啟matlab引擎
#eng=matlab.engine.start_matlab()
# 選擇適合的 VECM model,並且檢定 formation period 是否有結構性斷裂,並刪除該配對,其餘配對則回傳共整合係數。
#rank = 1
#t1 = int(len(min_price)*3/4) # 一天的時間長度(偵測兩天中間是否有結構性斷裂)
local_select_model = []
local_weight = []
local_name = []
local_pval = []
for j in range(i+1,n+1):
stock1 = min_price.iloc[:,i]
stock2 = min_price.iloc[:,j]
stock1_name = min_price.columns.values[i]
stock2_name = min_price.columns.values[j]
z = ( np.vstack( [stock1 , stock2] ).T )
model = VAR(z)
p = order_select(z,5)
#p = int(model.select_order(5).bic)
# VAR 至少需落後1期
if p < 1:
continue
# portmanteau test
if model.fit(p).test_whiteness( nlags = 5 ).pvalue < 0.05:
continue
# Normality test
if model.fit(p).test_normality().pvalue < 0.05:
continue
#r1 = eng.rank_jci( matlab.double(z.tolist()) , 'H2' , (p-1) )
#r2 = eng.rank_jci( matlab.double(z.tolist()) , 'H1*' , (p-1))
#r3 = eng.rank_jci( matlab.double(z.tolist()) , 'H1' , (p-1) )
r1 = rank( pd.DataFrame(z) , 'H2' , p )
r2 = rank( pd.DataFrame(z) , 'H1*' , p )
r3 = rank( pd.DataFrame(z) , 'H1' , p )
#r4 = rank( pd.DataFrame(z) , 'H*' , p )
if r3 > 0: # 在 model 3 上有 rank
if r2 > 0: # 在 model 2 上有 rank
if r1 > 0: # select model 1 and model 2 and model 3
#lambda_model2 = eng.eig_jci( matlab.double(z.tolist()) , 'H1*' , (p-1) , r2 )
#lambda_model3 = eng.eig_jci( matlab.double(z.tolist()) , 'H1' , (p-1) , r2 )
lambda_model2 = eig( pd.DataFrame(z) , 'H1*' , p , r2 )
lambda_model3 = eig( pd.DataFrame(z) , 'H1' , p , r2 )
test = np.log(lambda_model2/lambda_model3) * (len(min_price)-p)
if test <= 0:
raise ValueError('test value error')
if test > 3.8414:
#bp1 = chow_test( z , t1 , p , 'H1' , r3 )
#if bp1 == 0:
local_select_model.append('model3')
#weight.append( eng.coin_jci( matlab.double(z.tolist()) , 'H1' , (p-1) , r3 ) )
local_weight.append( weigh( pd.DataFrame(z) , 'H1' , p , r3 ) )
local_name.append([stock1_name,stock2_name])
local_pval.append( vecm_pvalue('model3', vecm( pd.DataFrame(z),'H1',p)[0][0] ) )
else:
#lambda_model1 = eng.eig_jci( matlab.double(z.tolist()) , 'H2' , (p-1) , r1 )
lambda_model1 = eig( pd.DataFrame(z) , 'H2' , p , r1 )
test = np.log(lambda_model1/lambda_model2) * (len(min_price)-p)
if test > 3.8414:
#bp1 = chow_test( z , t1 , p , 'H1*' , r2 )
#if bp1 == 0:
local_select_model.append('model2')
#weight.append( eng.coin_jci( matlab.double(z.tolist()) , 'H1*' , (p-1) , r2 ) )
local_weight.append( weigh( pd.DataFrame(z) , 'H1*' , p , r2 ) )
local_name.append([stock1_name,stock2_name])
local_pval.append( vecm_pvalue('model2',vecm(pd.DataFrame(z),'H1*',p)[0][1] ) )
else:
#bp1 = chow_test( z , t1 , p , 'H2' , r1 )
#if bp1 == 0:
local_select_model.append('model1')
#weight.append( eng.coin_jci( matlab.double(z.tolist()) , 'H2' , (p-1) , r1 ) )
local_weight.append( weigh( pd.DataFrame(z) , 'H2' , p , r1 ) )
local_name.append([stock1_name,stock2_name])
local_pval.append( vecm_pvalue('model1',vecm(pd.DataFrame(z),'H2',p)[0][0] ) )
else: # select model 2 and model 3
#lambda_model2 = eng.eig_jci( matlab.double(z.tolist()) , 'H1*' , (p-1) , r2 )
#lambda_model3 = eng.eig_jci( matlab.double(z.tolist()) , 'H1' , (p-1) , r2 )
lambda_model2 = eig( pd.DataFrame(z) , 'H1*' , p , r2 )
lambda_model3 = eig( pd.DataFrame(z) , 'H1' , p , r2 )
test = np.log(lambda_model2/lambda_model3) * (len(min_price)-p)
if test <= 0:
raise ValueError('test value error')
if test > 3.8414:
#bp1 = chow_test( z , t1 , p , 'H1' , r3 )
#if bp1 == 0:
local_select_model.append('model3')
#weight.append( eng.coin_jci( matlab.double(z.tolist()) , 'H1' , (p-1) , r3 ) )
local_weight.append( weigh( pd.DataFrame(z) , 'H1' , p , r3 ) )
local_name.append([stock1_name,stock2_name])
local_pval.append( vecm_pvalue('model3',vecm(pd.DataFrame(z),'H1',p)[0][0] ) )
else:
#bp1 = chow_test( z , t1 , p , 'H1*' , r2 )
#if bp1 == 0:
local_select_model.append('model2')
#weight.append( eng.coin_jci( matlab.double(z.tolist()) , 'H1*' , (p-1) , r2 ) )
local_weight.append( weigh( pd.DataFrame(z) , 'H1*' , p , r2 ) )
local_name.append([stock1_name,stock2_name])
local_pval.append( vecm_pvalue('model2',vecm(pd.DataFrame(z),'H1*',p)[0][1] ) )
else : # 只在 model 3 上有rank
#bp1 = chow_test( z , t1 , p , 'H1' , r3 )
#if bp1 == 0:
local_select_model.append('model3')
#weight.append( eng.coin_jci( matlab.double(z.tolist()) , 'H1' , (p-1) , r3 ) )
local_weight.append( weigh( pd.DataFrame(z) , 'H1' , p , r3 ) )
local_name.append([stock1_name,stock2_name])
local_pval.append( vecm_pvalue('model3',vecm(pd.DataFrame(z),'H1',p)[0][0] ) )
else: # 表示此配對無rank
continue
#local_pval.append(1)
#if VECM residuals 不是常態則搜尋下一個配對
#if JB_VECM( stock1 , stock2 , local_select_model , p) == 1:
#continue
# 關閉 matlab 引擎
#eng.quit()
return local_weight, local_name, local_select_model , local_pval
| [
"q40603@gmail.com"
] | q40603@gmail.com |
c13fcd679c4ce531053d8bfceaeaefe08fe3be64 | ca6c77859c630e5a09c81d0c6fd196f130768594 | /Chap8_StringsDeepLook/ITDS_Pandas_RegEx_DataMunging.py | 56d8b890291756c7e296ab7a0230093152ac4e6b | [] | no_license | kevin-d-mcewan/ClassExamples | b3739af1de03a396e2818da76851849d7b5b4a7b | a235a2635316248c8066fdb27074e92635603ee3 | refs/heads/master | 2022-12-05T22:14:33.730945 | 2019-12-01T00:47:33 | 2019-12-01T00:47:33 | 224,661,803 | 0 | 1 | null | 2022-11-20T03:10:29 | 2019-11-28T13:41:26 | Python | UTF-8 | Python | false | false | 1,283 | py | import pandas as pd
zips = pd.Series({'Boston': '02215', 'Miami': '3310'})
print(zips)
print('---------------------------')
print(zips.str.match(r'\d{5}'))
print('--------------------------------')
cities = pd.Series(['Boston, MA 02215', 'Miami, FL 33101'])
print(cities)
print('-----------------------------------------')
'''The RegEx is looking for a space and then followed by 2
UPPERCASE letters'''
print(cities.str.contains(r' [A-Z]{2}'))
print('----------------------------------------')
print(cities.str.match(r' [A-Z]{2} '))
print('-----------------------------------------')
# REFORMATTING YOUR DATA
contacts = [['Mike Green', 'demo1@deitel.com', '5555555555'],
['Sue Brown', 'demo2@deitel.com', '5555551234']]
contactsdf = pd.DataFrame(contacts,
columns = ['Names', 'Email', 'Phone'])
print(contactsdf)
print('--------------------------------------------------')
import re
def get_formatted_phone(value):
result = re.fullmatch(r'(\d{3})(\d{3})(\d{4})', value)
return '-'.join(result.groups()) if result else value
formatted_phone = contactsdf['Phone'].map(get_formatted_phone)
print(formatted_phone)
print('--------------------------------------------------')
contactsdf['Phone'] = formatted_phone
print(contactsdf)
| [
"MacKevinProgramming@gmail.com"
] | MacKevinProgramming@gmail.com |
d9c601b0c799ef3301c1e4e090d9a3ae6b184644 | 7f347d3642e129e2d29639deaff81090660ec750 | /ssd1306_display_img.py | 7813b0c124e002a1250e8a6350f274d6f26badc6 | [] | no_license | mikehemberger/visualizing-fractals | bee858bd2b1d75eaaed461f11bef8dd901874ef8 | f1bae8b447dbe75b7e0b771c3edeef04c1979dc6 | refs/heads/main | 2023-08-11T16:38:52.655813 | 2021-10-01T12:19:45 | 2021-10-01T12:19:45 | 403,018,462 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 474 | py | # Imports
import os
from PIL import Image
from luma.core.interface.serial import i2c
from luma.oled.device import ssd1306
# Assign device
serial = i2c(port=1, address=0x3C)
device = ssd1306(serial)
# Image to use
fp = "/home/pi/Documents/github/remote_from_vscode/"
fn = "mandelbrot_img_128x64.jpg"
img_path = os.path.join(fp, fn)
# Load image
w, h = 128, 64
img = Image.open(img_path).resize((w, h)).convert("1", dither=Image.NONE)
# Send to display
device.display(img) | [
"mikehemberger@gmail.com"
] | mikehemberger@gmail.com |
3effa98b6606fe1a7aff37b1d11b2510846b86af | 9962438fbc35ea87a397a9b5c58eb1a23180e590 | /phone_book/app.py | 840c25faaca77749c79a128b4fe8287834dd0633 | [] | no_license | barsuk2/Phone_book | 23f106308f2ab7c8ed9961393a3f35fe3cf8e4e7 | 31f0286a29ca22c4b51e695925f0d986b5a0a904 | refs/heads/main | 2023-04-13T10:01:14.902404 | 2021-03-19T04:28:20 | 2021-03-19T04:28:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,176 | py | import re
from flask import Flask, render_template, request, redirect, flash
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.debug = True
# manager=M
# app.debug = True
app.config['SECRET_KEY'] = 'a really really really really long secret key'
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql+psycopg2://postgres:123@localhost:5433/phone_book'
db = SQLAlchemy(app)
@app.route('/', methods=['GET'])
@app.route('/index', methods=['GET'])
def index():
page = request.args.get('page', 1, type=int)
per_page = 10
phone_book = PhoneBook.query.order_by('name').paginate(page, per_page, error_out=False)
return render_template('index.html', phone_book=phone_book)
@app.route('/add_phone', methods=['POST', 'GET'])
def add_phone():
if request.method == 'POST':
name = request.form['name']
if name == '':
flash('Имя пустое')
return redirect('')
phone = request.form['phone']
phone = int(re.sub('\D', '', phone))
if (PhoneBook.query.filter_by(name=name).count() != 0) or \
(PhoneBook.query.filter_by(phone=phone).count() != 0):
flash('Имя или телефон существуют')
return redirect('')
obj = PhoneBook(name=name, phone=phone)
db.session.add(obj)
db.session.commit()
return redirect('/')
@app.route('/search', methods=['POST', 'GET'])
def search():
if request.method == 'GET':
name = request.args['search']
phone_book = PhoneBook.query.filter(PhoneBook.name.contains(name))
if phone_book.count() == 0:
flash('Совпадений не найдено')
else:
flash(f'Найдено {phone_book.count()} записи(ей)')
return render_template('search.html', phone_book=phone_book)
class PhoneBook(db.Model):
id = db.Column(db.Integer(), primary_key=True, nullable=False, unique=True, )
name = db.Column(db.String(50), nullable=False)
phone = db.Column(db.BigInteger(), nullable=False)
def __repr__(self):
return f'Book:{self.name}'
if __name__ == '__main__':
# app.run(debug=False)
app.run()
| [
"ickzn@ya.ru"
] | ickzn@ya.ru |
e72e92e66a942a5f02bb5c82d4035e2d7da29250 | 4577da97831b5bf9e08343bd3f18690a1bf41bdf | /stolenwebserver.py | 38117112df0a6f69a37103c14afe4bcf60999b99 | [] | no_license | jsklein/botplugins | 5325d224225eab73cad9d61cef0435731017e834 | 3aec8ee3281335b998ca796fd8050358f7cde98d | refs/heads/master | 2020-06-12T23:41:16.538886 | 2017-05-22T01:12:20 | 2017-05-22T01:12:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,427 | py | import BaseHTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
import sys
import base64
key = ""
class AuthHandler(SimpleHTTPRequestHandler):
''' Main class to present webpages and authentication. '''
def do_HEAD(self):
print "send header"
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_AUTHHEAD(self):
print "send header"
self.send_response(401)
self.send_header('WWW-Authenticate', 'Basic realm=\"Test\"')
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_GET(self):
global key
''' Present frontpage with user authentication. '''
if self.headers.getheader('Authorization') == None:
self.do_AUTHHEAD()
self.wfile.write('no auth header received')
pass
elif self.headers.getheader('Authorization') == 'Basic '+key:
SimpleHTTPRequestHandler.do_GET(self)
pass
else:
self.do_AUTHHEAD()
self.wfile.write(self.headers.getheader('Authorization'))
self.wfile.write('not authenticated')
pass
def test(HandlerClass = AuthHandler,
ServerClass = BaseHTTPServer.HTTPServer):
BaseHTTPServer.test(HandlerClass, ServerClass)
if __name__ == '__main__':
key = base64.b64encode("tuna:tuna")
test()
| [
"noreply@github.com"
] | jsklein.noreply@github.com |
ee558e4158fb25e06a9705e7e855d550ebd33d2c | dab455eaa042d355372dc871000fa3b6419430bd | /deepposekit/annotate/gui/Annotator.py | d2c3a9f5d0815c52b048f314dda35164bfc64074 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | OSU-AIMS/DeepPoseRobot | b8ef07ee1ce66fd6858e96a64064b7303ab67371 | 93a96e08d014bf075fabb6234776ba6ce343803c | refs/heads/main | 2023-04-28T07:51:48.901472 | 2021-05-12T19:57:08 | 2021-05-12T19:57:08 | 340,788,773 | 1 | 0 | Apache-2.0 | 2021-05-12T19:50:07 | 2021-02-21T01:03:55 | Python | UTF-8 | Python | false | false | 11,424 | py | # -*- coding: utf-8 -*-
# Copyright 2018-2019 Jacob M. Graving <jgraving@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import h5py
import os
from deepposekit.annotate.gui.GUI import GUI
from deepposekit.annotate.utils import hotkeys as keys
__all__ = ["Annotator"]
class Annotator(GUI):
"""
A GUI for annotating images.
------------------------------------------------------------
Keys | Action
------------------------------------------------------------
> +,- | Rescale the image
> Left mouse | Move active keypoint
> W, A, S, D | Move active keypoint
> space | Changes W,A,S,D mode (swaps between 1px or 10px)
> J, L | Load previous or next image
> <, > | Jump 10 images backward or forward
> I, K or |
tab, shift+tab | Switch active keypoint
> R | Mark frame as unannotated, or "reset"
> F | Mark frame as annotated or "finished"
> Esc, Q | Quit the Annotator GUI
------------------------------------------------------------
Note: Data is automatically saved when moving between frames.
Parameters
----------
datapath: str
Filepath of the HDF5 (.h5) file that contains the images to
be annotated.
dataset: str
Key name to access the images in the .h5 file.
skeleton: str
Filepath of the .csv or .xlsx file that has indexed information
on name of the keypoint (part, e.g. head), parent (the direct
connecting part, e.g. neck connects to head, parent is head),
and swap (swapping positions with a part when reflected).
See example file for more information.
scale: int/float, default 1
Scaling factor for the GUI (e.g. used in zooming).
text_scale: float
Scaling factor for the GUI font.
A text_scale of 1 works well for 1920x1080 (1080p) images
shuffle_colors: bool, default = True
Whether to shuffle the color order for drawing keypoints
refresh: int, default 100
Delay on receiving next keyboard input in milliseconds.
Attributes
----------
window_name: str
Name of the Annotation window when running program.
Set to be 'Annotation' unless otherwise changed.
n_images: int
Number of images in the .h5 file.
n_keypoints: int
Number of keypoints in the skeleton.
key: int
The key that is pressed on the keyboard.
image_idx: int
Index of a specific image in the .h5 file.
image: numpy.ndarray
One image accessed using image_idx.
Example
-------
>>> from deepposekit import Annotator
>>> app = Annotator('annotation.h5', 'images', 'skeleton.csv')
>>> app.run()
"""
def __init__(
self,
datapath,
dataset,
skeleton,
scale=1,
text_scale=0.15,
shuffle_colors=True,
refresh=100,
):
super(GUI, self).__init__()
self.window_name = "Annotation"
self.shuffle_colors = shuffle_colors
self._init_skeleton(skeleton)
if os.path.exists(datapath):
self._init_data(datapath, dataset)
else:
raise ValueError("datapath file or path does not exist")
self._init_gui(scale, text_scale, shuffle_colors, refresh)
def _init_data(self, datapath, dataset):
""" Initializes the images from the .h5 file (called in init).
Parameters
----------
datapath: str
Path of the .h5 file that contains the images to be annotated.
dataset: str
Key name to access the images in the .h5 file.
"""
if isinstance(datapath, str):
if datapath.endswith(".h5"):
self.datapath = datapath
else:
raise ValueError("datapath must be .h5 file")
else:
raise TypeError("datapath must be type `str`")
if isinstance(dataset, str):
self.dataset = dataset
else:
raise TypeError("dataset must be type `str`")
with h5py.File(self.datapath, "r+") as h5file:
self.n_images = h5file[self.dataset].shape[0]
# Check that all parts of the file exist
if "annotations" not in list(h5file.keys()):
empty_array = np.zeros((self.n_images, self.n_keypoints, 2))
h5file.create_dataset(
"annotations",
(self.n_images, self.n_keypoints, 2),
dtype=np.float64,
data=empty_array,
)
for idx in range(self.n_images):
h5file["annotations"][idx] = self.skeleton.loc[:, ["x", "y"]].values
if "annotated" not in list(h5file.keys()):
empty_array = np.zeros((self.n_images, self.n_keypoints), dtype=bool)
h5file.create_dataset(
"annotated",
(self.n_images, self.n_keypoints),
dtype=bool,
data=empty_array,
)
if "skeleton" not in list(h5file.keys()):
skeleton = self.skeleton[["tree", "swap_index"]].values
h5file.create_dataset(
"skeleton", skeleton.shape, dtype=np.int32, data=skeleton
)
# Unpack the images from the file
#self.image_idx = np.sum(np.all(h5file["annotated"].value, axis=1)) - 1
self.image_idx = np.sum(np.all(h5file["annotated"][:], axis=1)) - 1
self.image = h5file[self.dataset][self.image_idx]
self._check_grayscale()
self.skeleton.loc[:, ["x", "y"]] = h5file["annotations"][self.image_idx]
self.skeleton.loc[:, "annotated"] = h5file["annotated"][self.image_idx]
def _save(self):
""" Saves an image.
Automatically called when moving to new images or invoked manually
using 'ctrl + s' keys.
"""
#with h5py.File(self.datapath) as h5file:
with h5py.File(self.datapath,'r+') as h5file:
h5file["annotations"][self.image_idx] = self.skeleton.loc[
:, ["x", "y"]
].values
h5file["annotated"][self.image_idx] = self.skeleton.loc[
:, "annotated"
].values
self.skeleton.loc[:, ["x", "y"]] = h5file["annotations"][self.image_idx]
self.skeleton.loc[:, "annotated"] = h5file["annotated"][self.image_idx]
def _load(self):
""" Loads an image.
This method is called in _move_image_idx when moving to different
images. The image of specified image_idx will be loaded onto the GUI.
"""
with h5py.File(self.datapath,'r') as h5file:
self.image = h5file[self.dataset][self.image_idx]
self._check_grayscale()
self.skeleton.loc[:, ["x", "y"]] = h5file["annotations"][self.image_idx]
self.skeleton.loc[:, "annotated"] = h5file["annotated"][self.image_idx]
def _last_image(self):
""" Checks if image index is on the last index.
Helper method to check for the index of the last image in the h5 file.
Returns
-------
bool
Indicate if image_idx is the last index.
"""
return self.image_idx == self.n_images - 1
def _move_image_idx(self):
""" Move to different image.
Based on the key pressed, updates the image on the GUI.
The scheme is as follows:
------------------------------------------------------------
Keys | Action
------------------------------------------------------------
> <- , -> | Load previous or next image
> , , . | Jump 10 images backward or forward
------------------------------------------------------------
Every time the user moves from the image, the annotations
on the image is saved before loading the next image.
"""
# <- (left arrow) key
if self.key is keys.LEFTARROW:
self._save()
if self.image_idx == 0:
self.image_idx = self.n_images - 1
else:
self.image_idx -= 1
self._load()
# -> (right arrow) key
elif self.key is keys.RIGHTARROW:
self._save()
if self._last_image():
self.image_idx = 0
else:
self.image_idx += 1
self._load()
# . (period) key
elif self.key is keys.LESSTHAN:
self._save()
if self.image_idx - 10 < 0:
self.image_idx = self.n_images + self.image_idx - 10
else:
self.image_idx -= 10
self._load()
# , (comma) key
elif self.key is keys.GREATERTHAN:
self._save()
if self.image_idx + 10 > self.n_images - 1:
self.image_idx = self.image_idx + 10 - self.n_images
else:
self.image_idx += 10
self._load()
def _data(self):
""" Activates key bindings for annotated and save.
Creates additional key bindings for the program.
The bindings are as follows:
------------------------------------------------------------
Keys | Action
------------------------------------------------------------
> Ctrl-R | Mark frame as unannotated
> Ctrl-F | Mark frame as annotated
> Ctrl-S | Save
------------------------------------------------------------
"""
if self.key is keys.R:
self.skeleton["annotated"] = False
elif self.key is keys.F:
self.skeleton["annotated"] = True
elif self.key is keys.V:
if self.skeleton.loc[self.idx, ["x", "y"]].isnull()[0]:
self.skeleton.loc[self.idx, ["x", "y"]] = -1
else:
self.skeleton.loc[self.idx, ["x", "y"]] = np.nan
elif self.key in [keys.Q, keys.ESC]:
self._save()
print("Saved")
def _hotkeys(self):
""" Activates all key bindings.
Enables all the key functionalities described at the
start of the file.
"""
if self.key != keys.NONE:
self._wasd()
self._move_idx()
self._move_image_idx()
self._zoom()
self._data()
self._update_canvas()
| [
"34522460+AdamExley@users.noreply.github.com"
] | 34522460+AdamExley@users.noreply.github.com |
0185ca7c1da567af3bf96e38225686f8229fb649 | c6d94fad0eb1ca7342de1fc3491f18233ae32b81 | /cwb/data/source/earthquake.py | f8d2993879c45d9c7c4576512d6b6a7b12488483 | [] | no_license | shengtai0201/OpenDataCollection | f16e04085f266851b078dcef06384f9f95f6216c | 064f73e1973ffff338913048dff429b62a6a63a7 | refs/heads/master | 2021-01-25T06:45:27.554847 | 2017-06-24T09:44:21 | 2017-06-24T09:44:21 | 93,606,728 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,365 | py | # 分區震度資訊
class ShakingArea:
def __init__(self):
# 分區描述
self.area_desc = None
# 分區名稱
self.area_name = None
# 分區震度
self.area_intensity = None
# 分區圖示顏色
self.area_color = None
# 資料來源方式
self.info_status = None
# 各地震站資訊
self.eq_station_list = []
# 震度資訊
class Intensity:
def __init__(self):
# 分區震度資訊
self.shaking_area_list = []
# 地震資訊
class Earthquake:
def __init__(self, report_type, earthquake_no):
# 報文種類
self.report_type = report_type
# 地震報告編號
self.earthquake_no = earthquake_no
# 開放資料公告報號
self.report_no = None
# 報文內容
self.report_content = None
# 開放資料燈號
self.report_color = None
# 開放資料參考網址
self.web = None
# 地震報告圖網址
self.report_image_uri = None
# 等震度圖網址
self.shake_map_uri = None
# 地震報告備註
self.report_remark = None
# 海嘯備註
self.tsunami_remark = None
# 地震資訊
self.earthquake_info = None
# 震度資訊
self.intensity = None
| [
"shengtai0201@gmail.com"
] | shengtai0201@gmail.com |
f722b3ea28af5c3e8bae8160d0203660b4e1d87f | e2838f58196fb16b09cd0bb3ad5b15fd4fc3a006 | /libs/bn.py | 063fc4a08ab764b00c0cc7228d9a1ac4d6fbcd29 | [
"MIT"
] | permissive | wzn0828/pytorch-classification | 8ebb4b7d7d142e877c5871bd201c44679ac8dbf5 | bba9b62dfab404222b599d07b86bed04d9541757 | refs/heads/master | 2021-07-01T11:57:08.764114 | 2020-09-22T00:50:00 | 2020-09-22T00:50:00 | 164,197,281 | 0 | 0 | null | 2019-01-05T08:41:11 | 2019-01-05T08:41:11 | null | UTF-8 | Python | false | false | 7,718 | py | from collections import OrderedDict, Iterable
from itertools import repeat
try:
# python 3
from queue import Queue
except ImportError:
# python 2
from Queue import Queue
import torch
import torch.nn as nn
import torch.autograd as autograd
from .functions import inplace_abn, inplace_abn_sync
def _pair(x):
if isinstance(x, Iterable):
return x
return tuple(repeat(x, 2))
class ABN(nn.Sequential):
"""Activated Batch Normalization
This gathers a `BatchNorm2d` and an activation function in a single module
"""
def __init__(self, num_features, activation=nn.ReLU(inplace=True), **kwargs):
"""Creates an Activated Batch Normalization module
Parameters
----------
num_features : int
Number of feature channels in the input and output.
activation : nn.Module
Module used as an activation function.
kwargs
All other arguments are forwarded to the `BatchNorm2d` constructor.
"""
super(ABN, self).__init__(OrderedDict([
("bn", nn.BatchNorm2d(num_features, **kwargs)),
("act", activation)
]))
class InPlaceABN(nn.Module):
"""InPlace Activated Batch Normalization"""
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, activation="leaky_relu", slope=0.01):
"""Creates an InPlace Activated Batch Normalization module
Parameters
----------
num_features : int
Number of feature channels in the input and output.
eps : float
Small constant to prevent numerical issues.
momentum : float
Momentum factor applied to compute running statistics as.
affine : bool
If `True` apply learned scale and shift transformation after normalization.
activation : str
Name of the activation functions, one of: `leaky_relu`, `elu` or `none`.
slope : float
Negative slope for the `leaky_relu` activation.
"""
super(InPlaceABN, self).__init__()
self.num_features = num_features
self.affine = affine
self.eps = eps
self.momentum = momentum
self.activation = activation
self.slope = slope
if self.affine:
self.weight = nn.Parameter(torch.Tensor(num_features))
self.bias = nn.Parameter(torch.Tensor(num_features))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
self.reset_parameters()
def reset_parameters(self):
self.running_mean.zero_()
self.running_var.fill_(1)
if self.affine:
self.weight.data.fill_(1)
self.bias.data.zero_()
def forward(self, x):
return inplace_abn(x, self.weight, self.bias, autograd.Variable(self.running_mean),
autograd.Variable(self.running_var), self.training, self.momentum, self.eps,
self.activation, self.slope)
def __repr__(self):
rep = '{name}({num_features}, eps={eps}, momentum={momentum},' \
' affine={affine}, activation={activation}'
if self.activation == "leaky_relu":
rep += ' slope={slope})'
else:
rep += ')'
return rep.format(name=self.__class__.__name__, **self.__dict__)
class InPlaceABNSync(nn.Module):
"""InPlace Activated Batch Normalization with cross-GPU synchronization
This assumes that it will be replicated across GPUs using the same mechanism as in `nn.DataParallel`.
"""
def __init__(self, num_features, devices=None, eps=1e-5, momentum=0.1, affine=True, activation="none",
slope=0.01):
"""Creates a synchronized, InPlace Activated Batch Normalization module
Parameters
----------
num_features : int
Number of feature channels in the input and output.
devices : list of int or None
IDs of the GPUs that will run the replicas of this module.
eps : float
Small constant to prevent numerical issues.
momentum : float
Momentum factor applied to compute running statistics as.
affine : bool
If `True` apply learned scale and shift transformation after normalization.
activation : str
Name of the activation functions, one of: `leaky_relu`, `elu` or `none`.
slope : float
Negative slope for the `leaky_relu` activation.
"""
super(InPlaceABNSync, self).__init__()
self.num_features = num_features
self.devices = devices if devices else list(range(torch.cuda.device_count()))
self.affine = affine
self.eps = eps
self.momentum = momentum
self.activation = activation
self.slope = slope
if self.affine:
self.weight = nn.Parameter(torch.Tensor(num_features))
self.bias = nn.Parameter(torch.Tensor(num_features))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
self.reset_parameters()
# Initialize queues
self.worker_ids = self.devices[1:]
self.master_queue = Queue(len(self.worker_ids))
self.worker_queues = [Queue(1) for _ in self.worker_ids]
def reset_parameters(self):
self.running_mean.zero_()
self.running_var.fill_(1)
if self.affine:
self.weight.data.fill_(1)
self.bias.data.zero_()
def forward(self, x):
if x.get_device() == self.devices[0]:
# Master mode
extra = {
"is_master": True,
"master_queue": self.master_queue,
"worker_queues": self.worker_queues,
"worker_ids": self.worker_ids
}
else:
# Worker mode
extra = {
"is_master": False,
"master_queue": self.master_queue,
"worker_queue": self.worker_queues[self.worker_ids.index(x.get_device())]
}
return inplace_abn_sync(x, self.weight, self.bias, autograd.Variable(self.running_mean),
autograd.Variable(self.running_var), extra, self.training, self.momentum, self.eps,
self.activation, self.slope)
def __repr__(self):
rep = '{name}({num_features}, eps={eps}, momentum={momentum},' \
' affine={affine}, devices={devices}, activation={activation}'
if self.activation == "leaky_relu":
rep += ' slope={slope})'
else:
rep += ')'
return rep.format(name=self.__class__.__name__, **self.__dict__)
class InPlaceABNWrapper(nn.Module):
"""Wrapper module to make `InPlaceABN` compatible with `ABN`"""
def __init__(self, *args, **kwargs):
super(InPlaceABNWrapper, self).__init__()
self.bn = InPlaceABN(*args, **kwargs)
def forward(self, input):
return self.bn(input)
class InPlaceABNSyncWrapper(nn.Module):
"""Wrapper module to make `InPlaceABNSync` compatible with `ABN`"""
def __init__(self, *args, **kwargs):
super(InPlaceABNSyncWrapper, self).__init__()
self.bn = InPlaceABNSync(*args, **kwargs)
def forward(self, input):
return self.bn(input)
| [
"18001279185@163.com"
] | 18001279185@163.com |
2de393cbda5169cda816111a6548dbb4b7dce927 | a00b5723f4359e44431f9015be1568d2ab5589da | /9_Palindrome_Number.py | 530f8700b8ec67cf875c741c235ba4eacf48b071 | [] | no_license | creageng/lc2016 | 8495286c94c4989c426767eb17bb3b563d70ced2 | ab5062835ac44408d1f83a307c2d3b44038af006 | refs/heads/master | 2021-01-21T14:40:07.671299 | 2018-02-21T07:17:26 | 2018-02-21T07:17:26 | 58,109,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 774 | py | # Determine whether an integer is a palindrome.
# Do this without extra space.
# click to show spoilers.
# Subscribe to see which companies asked this question
class Solution(object):
def isPalindrome(self, x):
"""
:type x: int
:rtype: bool
"""
if x < 0:
return False
if x == 0:
return True
div = 1
while x/div >= 10:
div *= 10
while x:
left = x/div
right = x%10
if right != left:
return False
x = (x%div)/10
div /= 100
return True
if __name__ == "__main__":
x = 22422
print Solution().isPalindrome(x)
#"PAHNAPLSIIGYIR"
## [1,2] | [
"creageng@gmail.com"
] | creageng@gmail.com |
29a953189350246c9f2bbfd79cf95300e9445eab | ef71ecf6cdaee1a00d1587bc31016b95615ebef9 | /htmltoimg.py | 520b63b09d814c6d7b48e00db4345d96f5fea23b | [] | no_license | babybear1402/mapgif | 47cf19bf3674e99ce4cddb8780633c2ae689783b | 210995a121e268a5b5aae32894500274a312b16b | refs/heads/master | 2022-07-04T11:01:58.195403 | 2020-05-10T06:01:57 | 2020-05-10T06:01:57 | 262,722,249 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,697 | py | import cv2
import pandas as pd
from itertools import cycle
from PIL import Image, ImageTk
from PIL import Image, ImageDraw, ImageFont
import os
import numpy as np
import imageio
cases = pd.read_csv('cbc.csv')
print(cases.columns)
images =[]
image_folder = "testpic"
for i in range(107):
imagej = Image.open("pictures/"+str(i)+".png")
draw = ImageDraw.Draw(imagej)
(x, y) = (65, 5)
message = cases.columns[i+1]
font = ImageFont.truetype("Lato-Black.ttf", 50)
color = 'rgb(0, 0, 0)' # black color
# draw the message on the background
z = 8
draw.rectangle([935,8,1378,25], fill="white")
draw.text((x, y), message, fill=color, font=font)
(x, y) = (1011, z)
message = str(int(np.exp(2)))
font = ImageFont.truetype("Lato-Black.ttf", 15)
draw.text((x, y), message, fill=color, font=font)
(x, y) = (940, z)
message = "0"
draw.text((x, y), message, fill=color, font=font)
(x, y) = (1082, z)
message = str(int(np.exp(4)))
draw.text((x, y), message, fill=color, font=font)
(x, y) = (1082+64, z)
message = str(int(np.exp(6)))
draw.text((x, y), message, fill=color, font=font)
(x, y) = (1082 + 65 * 2, z)
message = "2,980"
draw.text((x, y), message, fill=color, font=font)
(x, y) = (1082 + 60 * 3, z)
message = "22,026"
draw.text((x, y), message, fill=color, font=font)
(x, y) = (1082 + 60 * 4, z)
message = "59,874"
draw.text((x, y), message, fill=color, font=font)
# save the edited image
imagej.save("testpic/"+str(i)+".png")
print("running")
images.append(imageio.imread("testpic/"+str(i)+".png"))
imageio.mimsave('movie.gif', images) | [
"noreply@github.com"
] | babybear1402.noreply@github.com |
03de9c788c9caf671d86a69e63e56f16c376f540 | fc43f31b30a6b5668caaa6a12a75e408cef78ac4 | /landfill/scraper/scraper.py | 2f33902bd0969633f46a5e3928e4a4d7a8db5bc7 | [
"MIT"
] | permissive | zbanks/landfill | ad67825cfc911e57b76603e38cf185b50dfa79a4 | 5fd409d3cbea29471e21159878e064e5eb1f8fa7 | refs/heads/master | 2020-04-27T20:50:29.810333 | 2014-01-02T02:20:27 | 2014-01-02T02:20:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 65 | py | import os
BASEPATH=os.path.join(os.path.dirname(__file__),'..')
| [
"eric@ericslaptop.(none)"
] | eric@ericslaptop.(none) |
46f2d7ffd07959a3d5d20ccce58b0d5dc4df1fd3 | e5e25801fe7293a29084cb0ecbe73aabb95145d1 | /prototipo/prototipo1/clientes/views.py | 0d1aa436225dfda4f37434304c4022568f190d35 | [] | no_license | Imperdus/Speech-to-txt-prototype | 16ee651e8635ef742fcea70a78407a0b7a7f3ac4 | c52c2474deeac51be9510f4473d2ae0e029960e0 | refs/heads/master | 2023-03-03T20:26:11.810918 | 2021-02-12T18:30:26 | 2021-02-12T18:30:26 | 338,027,501 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 114 | py | from django.shortcuts import render
# views
def persons_list(request):
return render(request, 'crudm.html')
| [
"lucasantostn@gmail.com"
] | lucasantostn@gmail.com |
f72b0e98f3517d88647e2159a2327506c2341005 | 7ea56317ae2917381f7c37327320bab870f1f537 | /send_screenshot_mail.py | 167b7aa6646690aa3d38fc888c3bd24d6bce7c4f | [] | no_license | AspenBen/teamviewer_keep_running_windows | c4c3bccfe837dad1fb252d6565d18d39b78c604c | 86a131a10de74757416cc64a2de4af70cbcb97cc | refs/heads/master | 2022-11-18T00:35:00.643114 | 2020-07-13T03:03:43 | 2020-07-13T03:03:43 | 279,198,524 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,679 | py | from email import encoders
from email.header import Header
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email.utils import parseaddr, formataddr
import smtplib
def _format_addr(s):
name, addr = parseaddr(s)
return formataddr((Header(name, 'utf-8').encode(), addr))
def send_mail():
account = "sourcexxxxx@163.com"#I resign a 163 emial as Outbox
password = "GLKTJULVSQVMHBVE"
to = "destinaitonxxxxx@asterfusion.com"#the mail we want to send
smtp_server = "smtp.163.com"
msg = MIMEMultipart()
msg['From'] = _format_addr("sourcexxxx@163.com")
msg['To'] = _format_addr("destinationxxxx@asterfusion.com")
msg['Subject'] = Header('teamviwer ID and password')
#content word
msg.attach(MIMEText('teamviewer exit suddendly, now has restart it, please get the ID and password from under picture', 'plain', 'utf-8'))
#picture
with open('C:\\Users\\26989\\software\\teamviwer_auto_running\\account_pswd.jpg', 'rb') as f:
mime = MIMEBase('image', 'jpeg', filename='account_pswd.jpg')
mime.add_header('Content-Disposition', 'attachment', filename='account_pswd.jpg')
mime.add_header('Content-ID', '<0>')
mime.add_header('X-Attachment-Id', '0')
mime.set_payload(f.read())
encoders.encode_base64(mime)
msg.attach(mime)
server = smtplib.SMTP(smtp_server, 25)
server.set_debuglevel(1)
server.login(account, password)
server.sendmail(account, [to], msg.as_string())
print ("the email has sended, check email box")
server.quit()
| [
"zhangb2015@lzu.edu.cn"
] | zhangb2015@lzu.edu.cn |
d50acec61d6c84d3ce17f5a4f20933ee44d8dd98 | 124901bcf4fa7dc8230ce9371b90653689f1bb1d | /series/series1.py | 55c9a92f33205a122f38dba5ce2ef3736a79be68 | [] | no_license | priyankapiya23/BasicPython | e322bcdceae77fc2a77c7d87cc0368a09eff72ab | 93f0602e8bfee88e796b8bd8ecf8c56982fb7f3c | refs/heads/main | 2023-02-28T17:28:19.878305 | 2021-02-12T06:39:43 | 2021-02-12T06:39:43 | 309,364,401 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | #s=((a+1)/2)+((a+3)/4)+........n term
a=int(input("enter the value of a="))
n=int(input("enter the value of n="))
k=0
s=0
for i in range (1,n+1,2):
k=((a+i)/i+1)
s=s+k
print(round(s,2))
#s=a/2+a/3+..a/n+1
a=int(input("enter the value of a="))
n=int(input("enter the value of n="))
k=0
s=0
for i in range(2,n+2):
k=a/i
s=s+k
print(round(s,2))
#(1) + (1*2) + (1*2*3) +.10 term
sum = 0
k=1
for i in range(1,11):
k=k*i
sum=sum+k
print(sum)
| [
"priyankakumarisumanpk@gmail.com"
] | priyankakumarisumanpk@gmail.com |
9c0638a818504ccc0fe7184a5b9662a344e8de15 | 318f850eddbe1965244e6fdf53b75eda036f42b9 | /envifair2018/wsgi.py | 9aa666f090a31da4d9a2f7850e2eeab784ed7263 | [] | no_license | chromity/envifair2018 | 70a99e00c681722e3e8937163e75e0c2979a1b09 | d7cc124bbb2f020d94f7d4357ed51a07a5ee61f3 | refs/heads/master | 2020-03-29T13:55:38.241719 | 2018-09-23T17:01:41 | 2018-09-23T17:01:41 | 149,989,208 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | """
WSGI config for envifair2018 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'envifair2018.settings')
application = get_wsgi_application()
| [
"michaeldaryl.code@gmail.com"
] | michaeldaryl.code@gmail.com |
acbbec5e9fde66dc1ec45b08c53724a5018010e7 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/tenderli.py | 28ba6f38f41106cd5c55dcedcbdf538f5a5cc99c | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 835 | py | ii = [('GodwWSL2.py', 4), ('FerrSDO3.py', 3), ('WilbRLW.py', 1), ('WilbRLW4.py', 2), ('AubePRP2.py', 1), ('CookGHP.py', 1), ('KembFJ1.py', 2), ('WilbRLW5.py', 2), ('TennAP.py', 1), ('BailJD2.py', 3), ('WilbRLW2.py', 1), ('LyttELD.py', 3), ('CoopJBT2.py', 1), ('GrimSLE.py', 1), ('AinsWRR3.py', 2), ('RoscTTI2.py', 2), ('ClarGE.py', 8), ('LandWPA.py', 2), ('GilmCRS.py', 3), ('AinsWRR.py', 1), ('MedwTAI.py', 1), ('LandWPA2.py', 2), ('FerrSDO2.py', 7), ('TalfTIT.py', 1), ('CoopJBT.py', 3), ('SoutRD2.py', 1), ('WheeJPT.py', 3), ('HowiWRL2.py', 1), ('BailJD3.py', 3), ('MereHHB.py', 1), ('HogaGMM.py', 2), ('MartHRW.py', 1), ('DequTKM.py', 1), ('KembFJ2.py', 2), ('AinsWRR2.py', 1), ('ClarGE3.py', 2), ('RogeSIP.py', 2), ('DibdTRL.py', 2), ('HogaGMM2.py', 1), ('MartHSI.py', 1), ('BowrJMM3.py', 1), ('ClarGE4.py', 2), ('AdamJOA.py', 1)] | [
"prabhjyotsingh95@gmail.com"
] | prabhjyotsingh95@gmail.com |
7c2d4edda7c466933dd4f7639c43ef006c476fa8 | 75d63ca6c6d2e6210b40afbce5665703ac219974 | /main.py | b0012c7c1126d51e0c238b2ef8c96d63bb0465d4 | [] | no_license | musale/scrap-send-mail | 1cd97f0fc75ba02c0c252ea3fd1223a3a60d79ab | 8c3b3410eac46b84d5b44e14954821515d438612 | refs/heads/master | 2021-01-19T03:28:49.962649 | 2017-04-05T16:10:58 | 2017-04-05T16:10:58 | 87,315,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,338 | py | """Main file."""
import email.message
import smtplib
from string import Template
import requests
from bs4 import BeautifulSoup
from utils.variables import SENDER_EMAIL, SENDER_EMAIL_PASSWORD, headers
SEND_TO = ['musale@focusmobile.co']
MESSAGE_CONTAINER = email.message.Message()
MESSAGE_CONTAINER['Subject'] = "UPDATES"
MESSAGE_CONTAINER['From'] = SENDER_EMAIL
MESSAGE_CONTAINER.add_header('Content-Type', 'text/html')
def main():
"""Run this function to do stuff."""
url = "http://optimetriks.breezy.hr/"
response = requests.get(url, headers=headers)
soup = BeautifulSoup(response.text, 'html.parser')
data = soup.find_all('h2')
image = soup.img.get('src')
substitute_ = {
'data': "".join(str(x) for x in data),
'image': image,
'url': url
}
html_data = open('utils/email.txt')
src = Template(html_data.read())
MESSAGE_CONTAINER.set_payload(src.safe_substitute(substitute_))
# setup the email server,
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
# add my account login name and password,
server.login(SENDER_EMAIL, SENDER_EMAIL_PASSWORD)
# send the email
server.sendmail(SENDER_EMAIL, SEND_TO, MESSAGE_CONTAINER.as_string())
# disconnect from the server
server.quit()
if __name__ == '__main__':
main()
| [
"martinmshale@gmail.com"
] | martinmshale@gmail.com |
4120d60565a39b46cd5b6d64ed972b8c46931722 | 5a298ece5b17e6e993d50a855027f265e115e2bd | /utilities/filter_data.py | 99687a7e234137d21978c275dd56b29a9d74c2f1 | [] | no_license | hvk3/IR_project | 86b8a1176f6a8ed541f179f1c541eb139dde0295 | ae6deea2276f0a76bfa23482fd1b7a4c1f039264 | refs/heads/master | 2021-10-16T17:33:11.258479 | 2019-02-12T08:45:51 | 2019-02-12T08:45:51 | 118,168,898 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 569 | py | from pymongo import MongoClient
from langdetect import detect
from tqdm import tqdm
client = MongoClient()
db = client.youtube8m
ds_1 = db.iteration3
ds_2 = db.iteration4
ds_2.remove()
print("Before:", ds_1.find().count())
for record in tqdm(ds_1.find()):
title = record['metadata']['title']
description = record['metadata']['description']
# if len(description) > 0 and len(title) > 0:
# ds_2.insert_one(record)
try:
if detect(description) == 'en': #3: title, #4: description
ds_2.insert_one(record)
except:
continue
print("After:", ds_2.find().count())
| [
"anshuman14021@iiitd.ac.in"
] | anshuman14021@iiitd.ac.in |
4c67a37578e6068ad0497b192d132793cfe18577 | e7a7116066d2552c00ffeeaf56dc36492bdfbf32 | /utils.py | cf1ba7bd335f1f9985862f764cca8efa1c344440 | [] | no_license | StanfordAI4HI/tclust-eval | b3d54a19c2359a0e49b030722931cae143e2ba6d | 1fb87bfa666670486740e09d55ffdf2d7a8283cc | refs/heads/master | 2020-04-24T18:26:14.135992 | 2019-05-09T20:25:32 | 2019-05-09T20:25:32 | 172,179,600 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,962 | py | import numpy as np
from collections import OrderedDict
from munkres import Munkres, make_cost_matrix
from sklearn.metrics.cluster import contingency_matrix
from sklearn.preprocessing import LabelEncoder
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = np.bincount(label_idx).astype(np.float64)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
return -np.sum((pi / pi_sum) * (np.log(pi) - np.log(pi_sum)))
def get_segment_dict(gt, pred, weight_vec=None):
gt_clusters = np.unique(gt)
segment_dict = OrderedDict({y: OrderedDict() for y in gt_clusters})
prev_cg = gt[0]
prev_cp = -1
prev_boundary = 0
token_list = []
weights = []
segment = []
for i, (cg, cp) in enumerate(zip(gt, pred)):
if cg != prev_cg:
segment_dict[prev_cg][(prev_boundary, i - 1)] = (token_list, weights, segment)
prev_cg = cg
prev_boundary = i
token_list = []
weights = []
segment = []
prev_cp = -1
if cp != prev_cp:
token_list.append(cp)
weights.append(0)
prev_cp = cp
weights[-1] += weight_vec[i] if weight_vec is not None else 0
segment.append(cp)
segment_dict[prev_cg][(prev_boundary, i)] = (token_list, weights, segment)
return segment_dict
def relabel_clustering(temporal_clustering):
relabeled = []
last_item = temporal_clustering[0]
counter = 0
for item in temporal_clustering:
if item != last_item:
counter += 1
relabeled.append(counter)
last_item = item
return relabeled
# Relabel the cluster labels in the secondary clustering based on the correspondences with the primary clustering.
# The primary use-case is ease of visualization.
def relabel_clustering_with_munkres_correspondences(primary_temporal_clustering, secondary_temporal_clustering):
# First make sure we relabel everything with 0-indexed, continuous cluster labels
le = LabelEncoder()
secondary_temporal_clustering = le.fit_transform(secondary_temporal_clustering)
# Build out the contingency matrix
mat = contingency_matrix(primary_temporal_clustering, secondary_temporal_clustering)
# Create the cost matrix
cost_mat = make_cost_matrix(mat, lambda x: len(primary_temporal_clustering) - x)
# Apply the Hungarian method to find the optimal cluster correspondence
m = Munkres()
indexes = m.compute(cost_mat)
# Create the correspondences between secondary clusters and primary labels
correspondences = {b: a for a, b in indexes}
# What're the labels in the primary and secondary clusterings
primary_labels, secondary_labels = set(np.unique(primary_temporal_clustering)), set(np.unique(secondary_temporal_clustering))
proposed_label = max(primary_labels) + 1
for label in secondary_labels:
if label not in correspondences:
correspondences[label] = proposed_label
proposed_label += 1
# Relabel the temporal clustering
relabeled_secondary_temporal_clustering = [correspondences[e] for e in secondary_temporal_clustering]
return relabeled_secondary_temporal_clustering
def heaviest_common_subsequence_with_alignment(l1, l2, w1, w2):
h1, h2 = float(sum(w1)), float(sum(w2))
if len(l1) == 0 or len(l2) == 0 or h1 == 0 or h2 == 0:
return 0.
dp = np.zeros((len(l1), len(l2)))
last_match = np.zeros((len(l1), len(l2)))
w1_sum = 0.
for i in range(len(l1)):
w2_sum = 0.
for j in range(len(l2)):
if i == 0 and j == 0:
emd = abs(w1[0] / h1 - w2[0] / h2) * (h1 + h2)
dp[0, 0] = 0 if l1[0] != l2[0] else max(w1[0] + w2[0] - emd, 0)
last_match[0, 0] = 1 if l1[0] == l2[0] else 0
elif i == 0:
emd1 = abs(0 / h1 - w2_sum / h2) * (h1 + h2)
emd2 = abs(w1[0] / h1 - (w2_sum + w2[j]) / h2) * (h1 + h2)
dp[0, j] = dp[0, j - 1] if l1[0] != l2[j] else max(dp[0, j - 1], w1[0] + w2[j] - (emd1 + emd2))
last_match[0, j] = 1 if l1[0] == l2[j] and w1[0] + w2[j] - (emd1 + emd2) >= dp[0, j - 1] else 0
elif j == 0:
emd1 = abs(w1_sum / h1 - 0 / h2) * (h1 + h2)
emd2 = abs((w1_sum + w1[i]) / h1 - w2[0] / h2) * (h1 + h2)
dp[i, 0] = dp[i - 1, 0] if l1[i] != l2[0] else max(dp[i - 1, 0], w1[i] + w2[0] - (emd1 + emd2))
last_match[i, 0] = 1 if l1[i] == l2[0] and w1[i] + w2[0] - (emd1 + emd2) >= dp[i - 1, 0] else 0
else:
emd1 = abs(w1_sum / h1 - w2_sum / h2) * (h1 + h2) * (1 - last_match[i - 1, j - 1])
emd2 = abs((w1_sum + w1[i]) / h1 - (w2_sum + w2[j]) / h2) * (h1 + h2)
dp[i, j] = max(dp[i, j - 1], dp[i - 1, j]) if l1[i] != l2[j] else max(dp[i, j - 1], dp[i - 1, j],
dp[i - 1, j - 1] + w1[i] + w2[
j] - (emd1 + emd2))
last_match[i, j] = 1 if l1[i] == l2[j] and dp[i - 1, j - 1] + w1[i] + w2[j] - (emd1 + emd2) == dp[
i, j] else 0
w2_sum += w2[j]
w1_sum += w1[i]
dp = np.array(dp)
return dp[-1, -1]
def heaviest_common_subsequence(l1, l2, w1, w2):
if len(l1) == 0 or len(l2) == 0:
return 0.
dp = np.zeros((len(l1), len(l2)))
for i in range(len(l1)):
for j in range(len(l2)):
if i == 0 and j == 0:
dp[0, 0] = 0 if l1[0] != l2[0] else w1[0] + w2[0]
elif i == 0:
dp[0, j] = dp[0, j - 1] if l1[0] != l2[j] else max(dp[0, j - 1], w1[0] + w2[j])
elif j == 0:
dp[i, 0] = dp[i - 1, 0] if l1[i] != l2[0] else max(dp[i - 1, 0], w1[i] + w2[0])
else:
dp[i, j] = max(dp[i, j - 1], dp[i - 1, j]) if l1[i] != l2[j] else max(dp[i, j - 1], dp[i - 1, j],
dp[i - 1, j - 1] + w1[i] + w2[j])
dp = np.array(dp)
return dp[-1, -1]
def heaviest_common_substring(l1, l2, w1, w2):
if len(l1) == 0 or len(l2) == 0:
return 0.
dp = np.zeros((len(l1), len(l2)))
for i in range(len(l1)):
for j in range(len(l2)):
if i == 0 and j == 0:
dp[0, 0] = 0 if l1[0] != l2[0] else w1[0] + w2[0]
elif i == 0:
dp[0, j] = 0 if l1[0] != l2[j] else w1[0] + w2[j]
elif j == 0:
dp[i, 0] = 0 if l1[i] != l2[0] else w1[i] + w2[0]
else:
dp[i, j] = 0 if l1[i] != l2[j] else dp[i - 1, j - 1] + w1[i] + w2[j]
dp = np.array(dp)
return np.max(dp) | [
"kgoel93@gmail.com"
] | kgoel93@gmail.com |
5fddff59502d10efd1cd2c33bbf7420a94d10c2f | 0df28488c03d8e19c184ebfe320a960e87d35fb4 | /sentenceSimplier/tools.py | e3232efcb348a98b49662a324c35a150d9f6fef8 | [] | no_license | sahilmakkar1983/NLPTools | 90deee62a29934c9d8884805903495701cc55dc2 | 7c96033791b672e696ece50974447f7137b9f4a2 | refs/heads/master | 2020-06-24T12:54:21.955654 | 2019-07-26T07:31:32 | 2019-07-26T07:31:32 | 198,967,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,389 | py | # -*- coding: utf-8 -*-
# Practical Natural Language Processing Tools (practNLPTools): Combination of Senna and Stanford dependency Extractor
#
# Copyright (C) 2014 PractNLP Project
# Author: Biplab Ch Das' <bipla12@cse.iitb.ac.in>
# URL: <http://www.cse.iitb.ac.in/biplab12>
# For license information, see LICENSE.TXT
"""
A module for interfacing with the SENNA and Stanford Dependency Extractor.
SUPPORTED_OPERATIONS: It provides Part of Speech Tags, Semantic Role Labels, Shallow Parsing (Chunking), Named Entity Recognisation (NER), Dependency Parse and Syntactic Constituency Parse.
Requirement: Java Runtime Environment :)
"""
import subprocess
import os
from platform import architecture, system
class Annotator:
def getSennaTagBatch(self, sentences):
input_data = ''
for sentence in sentences:
input_data += sentence + '\n'
input_data = input_data[:-1]
package_directory = os.path.dirname(os.path.abspath(__file__))
os_name = system()
executable = ''
if os_name == 'Linux':
bits = architecture()[0]
if bits == '64bit':
executable = 'senna-linux64'
elif bits == '32bit':
executable = 'senna-linux32'
else:
executable = 'senna'
if os_name == 'Windows':
executable = 'senna-win32.exe'
if os_name == 'Darwin':
executable = 'senna-osx'
senna_executable = os.path.join(package_directory, executable)
cwd = os.getcwd()
os.chdir(package_directory)
p = subprocess.Popen(senna_executable, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
senna_stdout = p.communicate(input=input_data)[0]
os.chdir(cwd)
return senna_stdout.split("\n\n")[0:-1]
def getSennaTag(self, sentence):
input_data = sentence
package_directory = os.path.dirname(os.path.abspath(__file__))
os_name = system()
executable = ''
if os_name == 'Linux':
bits = architecture()[0]
if bits == '64bit':
executable = 'senna-linux64'
elif bits == '32bit':
executable = 'senna-linux32'
else:
executable = 'senna'
if os_name == 'Windows':
executable = 'senna-win32.exe'
if os_name == 'Darwin':
executable = 'senna-osx'
senna_executable = os.path.join(package_directory, executable)
cwd = os.getcwd()
os.chdir(package_directory)
p = subprocess.Popen(senna_executable, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
senna_stdout = p.communicate(input=input_data)[0]
os.chdir(cwd)
return senna_stdout
def getDependency(self, parse):
package_directory = os.path.dirname(os.path.abspath(__file__))
cwd = os.getcwd()
os.chdir(package_directory)
parsefile = open(cwd + '/in.parse', 'w')
parsefile.write(parse)
parsefile.close()
p = subprocess.Popen([ 'java', '-cp', 'stanford-parser.jar', 'edu.stanford.nlp.trees.EnglishGrammaticalStructure', '-treeFile', cwd + '/in.parse', '-collapsed', ], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.wait()
stanford_out = p.stdout.read()
os.chdir(cwd)
return stanford_out.strip()
def getBatchAnnotations(self, sentences, dep_parse=False):
annotations = []
batch_senna_tags = self.getSennaTagBatch(sentences)
for senna_tags in batch_senna_tags:
annotations += [self.getAnnotationsAfterTagging(senna_tags)]
if dep_parse:
syntax_tree = ''
for annotation in annotations:
syntax_tree += annotation['syntax_tree']
dependencies=self.getDependency(syntax_tree).split("\n\n")
# print dependencies
if len(annotations) == len(dependencies):
for (d, a) in zip(dependencies, annotations):
a['dep_parse'] = d
return annotations
def getAnnotationsAfterTagging(self, senna_tags, dep_parse=False):
annotations = {}
senna_tags = map(lambda x: x.strip(), senna_tags.split('\n'))
no_verbs = len(senna_tags[0].split('\t')) - 6
words = []
pos = []
chunk = []
ner = []
verb = []
srls = []
syn = []
for senna_tag in senna_tags:
senna_tag = senna_tag.split('\t')
words += [senna_tag[0].strip()]
pos += [senna_tag[1].strip()]
chunk += [senna_tag[2].strip()]
ner += [senna_tag[3].strip()]
verb += [senna_tag[4].strip()]
srl = []
for i in range(5, 5 + no_verbs):
srl += [senna_tag[i].strip()]
srls += [tuple(srl)]
syn += [senna_tag[-1]]
roles = []
for j in range(no_verbs):
role = {}
i = 0
temp = ''
curr_labels = map(lambda x: x[j], srls)
for curr_label in curr_labels:
splits = curr_label.split('-')
if splits[0] == 'S':
if len(splits) == 2:
if splits[1] == 'V':
role[splits[1]] = words[i]
else:
if splits[1] in role:
role[splits[1]] += ' ' + words[i]
else:
role[splits[1]] = words[i]
elif len(splits) == 3:
if splits[1] + '-' + splits[2] in role:
role[splits[1] + '-' + splits[2]] += ' ' + words[i]
else:
role[splits[1] + '-' + splits[2]] = words[i]
elif splits[0] == 'B':
temp = temp + ' ' + words[i]
elif splits[0] == 'I':
temp = temp + ' ' + words[i]
elif splits[0] == 'E':
temp = temp + ' ' + words[i]
if len(splits) == 2:
if splits[1] == 'V':
role[splits[1]] = temp.strip()
else:
if splits[1] in role:
role[splits[1]] += ' ' + temp
role[splits[1]] = role[splits[1]].strip()
else:
role[splits[1]] = temp.strip()
elif len(splits) == 3:
if splits[1] + '-' + splits[2] in role:
role[splits[1] + '-' + splits[2]] += ' ' + temp
role[splits[1] + '-' + splits[2]] = role[splits[1] + '-' + splits[2]].strip()
else:
role[splits[1] + '-' + splits[2]] = temp.strip()
temp = ''
i += 1
if 'V' in role:
roles += [role]
annotations['words'] = words
annotations['pos'] = zip(words, pos)
annotations['posS'] = pos
annotations['ner'] = zip(words, ner)
annotations['srl'] = roles
annotations['srls'] = srls
annotations['verbs'] = filter(lambda x: x != '-', verb)
annotations['verbAll'] = verb
annotations['chunk'] = zip(words, chunk)
annotations['dep_parse'] = ''
annotations['syntax_tree'] = ''
for (w, s, p) in zip(words, syn, pos):
annotations['syntax_tree']+=s.replace("*","("+p+" "+w+")")
# annotations['syntax_tree']=annotations['syntax_tree'].replace("S1","S")
if dep_parse:
annotations['dep_parse']=self.getDependency(annotations['syntax_tree'])
return annotations
def getAnnotations(self, sentence, dep_parse=False):
annotations = {}
senna_tags = self.getSennaTag(sentence).decode('utf-8')
#senna_tags = self.getSennaTag(sentence).encode('utf-8')
print(senna_tags)
senna_tags = list(map(lambda x: x.strip(), senna_tags.split('\n')))
no_verbs = len(senna_tags[0].split('\t')) - 6
words = []
pos = []
chunk = []
ner = []
verb = []
srls = []
syn = []
for senna_tag in senna_tags[0:-2]:
senna_tag = senna_tag.split('\t')
words += [senna_tag[0].strip()]
pos += [senna_tag[1].strip()]
chunk += [senna_tag[2].strip()]
ner += [senna_tag[3].strip()]
verb += [senna_tag[4].strip()]
srl = []
for i in range(5, 5 + no_verbs):
srl += [senna_tag[i].strip()]
srls += [tuple(srl)]
syn += [senna_tag[-1]]
roles = []
#print(srl)
for j in range(no_verbs):
role = {}
i = 0
temp = ''
curr_labels = map(lambda x: x[j], srls)
for curr_label in curr_labels:
splits = curr_label.split('-')
if splits[0] == 'S':
if len(splits) == 2:
if splits[1] == 'V':
role[splits[1]] = words[i]
else:
if splits[1] in role:
role[splits[1]]+=" "+words[i]
else:
role[splits[1]] = words[i]
elif len(splits) == 3:
if splits[1] + '-' + splits[2] in role:
role[splits[1] + '-' + splits[2]] += ' '+ words[i]
else:
role[splits[1] + '-' + splits[2]] = words[i]
elif splits[0] == 'B':
temp = temp + ' ' + words[i]
elif splits[0] == 'I':
temp = temp + ' ' + words[i]
elif splits[0] == 'E':
temp = temp + ' ' + words[i]
if len(splits) == 2:
if splits[1] == 'V':
role[splits[1]] = temp.strip()
else:
if splits[1] in role:
role[splits[1]] += ' ' + temp
role[splits[1]] = role[splits[1]].strip()
else:
role[splits[1]] = temp.strip()
elif len(splits) == 3:
if splits[1] + '-' + splits[2] in role:
role[splits[1] + '-' + splits[2]] += ' ' + temp
role[splits[1] + '-' + splits[2]] = role[splits[1] + '-' + splits[2]].strip()
else:
role[splits[1] + '-' + splits[2]] = temp.strip()
temp = ''
i += 1
#if 'V' in role:
# roles += [role]
roles += [role]
#print(srls)
annotations['words'] = words
annotations['pos'] = zip(words, pos)
annotations['posS'] = pos
annotations['ner'] = zip(words, ner)
annotations['srl'] = roles
annotations['srls'] = srls
annotations['verbs'] = filter(lambda x: x != '-', verb)
annotations['verbAll'] = verb
annotations['chunk'] = zip(words, chunk)
annotations['dep_parse'] = ''
annotations['syntax_tree'] = ''
#print(roles)
for (w, s, p) in zip(words, syn, pos):
annotations['syntax_tree'] += s.replace('*', '(' + p + ' '+ w + ')')
# annotations['syntax_tree']=annotations['syntax_tree'].replace("S1","S")
if dep_parse:
annotations['dep_parse'] = self.getDependency(annotations['syntax_tree'])
return annotations
def test():
annotator = Annotator()
#print annotator.getBatchAnnotations(['He killed the man with a knife and murdered him with a dagger.', 'He is a good boy.'], dep_parse=True)
#print annotator.getAnnotations('Republican candidate George Bush was great.', dep_parse=True)['dep_parse']
#print annotator.getAnnotations('Republican candidate George Bush was great.', dep_parse=True)['chunk']
if __name__ == '__main__':
test()
| [
"sahilmakkar1983@gmail.com"
] | sahilmakkar1983@gmail.com |
da605bc187546c1cac4492ee190266de4f86bb67 | b4619a6aa617b062273602e1317b5f1fc83de6a7 | /User_backend/views.py | 5e72bc8b738ca6bcf6af51d5c29ba552dde6d836 | [] | no_license | getting-things-gnome/GTGOnline | b9c6e92bb1b3708cb3259039e5029c40a75dd3f5 | 0050c1ec9975a4ea008d6fdd1fcf50195209a83e | refs/heads/master | 2021-01-01T19:02:17.858742 | 2014-09-03T08:20:40 | 2014-09-03T08:20:40 | 9,886,164 | 8 | 6 | null | 2014-09-03T08:20:40 | 2013-05-06T12:21:03 | JavaScript | UTF-8 | Python | false | false | 6,490 | py | # Create your views here.
import sys
import json
from django.http import HttpResponse, HttpResponseRedirect, QueryDict
from django.template import loader, RequestContext
from django.shortcuts import render_to_response
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
from User_backend.user import register_user, login_user, logout_user, \
validate_form, does_email_exist, \
fetch_gravatar_profile, authenticate_user, \
get_api_key
from Group_backend.group import find_users_from_query
from Group_backend.group import create_default_groups
from Tools.constants import *
def landing(request):
template = loader.get_template('landing.html')
errors_list = {
'0': None,
'1': 'Incorrect Email/Password combination',
'2': 'Account has been disabled',
'3': 'One or more credentials were invalid, ' \
'so Registration is unsuccessful, please register again',
'4': 'Registration Successful, you may now login',
}
error = request.session.get('error', '0');
request.session.flush()
error_dict = {'error': errors_list.get(error, 'Unknown Error')}
if error == '4':
error_dict['success'] = errors_list.get(error);
error_dict['error'] = None;
context = RequestContext(request, error_dict)
return HttpResponse(template.render(context))
def login(request):
print >>sys.stderr, "POST dict = " + str(request.POST)
#if request.POST.get('email', '') == '':
#template = loader.get_template('landing.html')
#context = RequestContext(request, {})
#return HttpResponse(template.render(context))
response = login_user(request, request.POST['email'], \
request.POST['password'])
if response == USER_LOGGED_IN:
request.session['error'] = '0'
return HttpResponseRedirect('/tasks/main/')
elif response == USER_ACCOUNT_DISABLED:
request.session['error'] = '2'
else:
request.session['error'] = '1'
return HttpResponseRedirect('/user/landing/')
def logout(request):
logout_user(request)
print >>sys.stderr, "User logout successful"
return HttpResponseRedirect('/user/landing/')
def after_login(request):
template = loader.get_template('after_login.html')
context = RequestContext(request, {})
return HttpResponse(template.render(context))
def check_email(request):
if does_email_exist(request.GET.get('email', '')):
print >>sys.stderr, "exists"
return HttpResponse('1', mimetype='application/json')
else:
print >>sys.stderr, "not exists"
return HttpResponse('0', mimetype='application/json')
@csrf_exempt
def register(request):
query_is_from_browser = True
if request.method == 'POST':
email = request.POST.get('email', '')
password = request.POST.get('password', '')
first_name = request.POST.get('first_name', 'Walter')
last_name = request.POST.get('last_name', 'White')
if request.path[1:4] == 'api':
query_is_from_browser = False
resp = HttpResponse(mimetype='application/json')
if not validate_form(email, password, first_name, last_name):
if not query_is_from_browser:
resp.content = json.dumps(LOGIN_RESPONSE_DICT['3'])
resp.status_code = 400
return resp
request.session['error'] = '3'
return HttpResponseRedirect('/user/landing/')
user = register_user(email, password, first_name, last_name)
if user != None:
create_default_groups(user)
if not query_is_from_browser:
resp.content = json.dumps(get_api_key(user))
resp.status_code = 200
return resp
response = login_user(request, email, password)
if response == USER_LOGGED_IN:
request.session['error'] = '0'
return HttpResponseRedirect('/tasks/main/')
request.session['error'] = '4'
return HttpResponseRedirect('/user/landing/')
@login_required
def search_user(request):
query = request.GET.get('query', '')
print >>sys.stderr, query
template = loader.get_template('search_user.html')
user_list = find_users_from_query(request.user, query, GROUPED)
context = RequestContext(request, {'email': request.user.email, \
'name': request.user.first_name, \
'users': json.dumps(user_list), \
'query': query, 'origin': 'search'})
return HttpResponse(template.render(context))
@csrf_exempt
def get_user_list_json(request):
query = request.POST.get('query', '')
visited = request.POST.getlist('visited[]')
visited.append(request.user.email)
print >>sys.stderr, visited
user_list = find_users_from_query(request.user, query, NON_GROUPED, \
visited = visited)
return HttpResponse(json.dumps(user_list), mimetype='application/json')
def show_user_profile(request):
profile_email = request.GET.get('email', request.user.email)
if profile_email == '':
profile_email = request.user.email
template = loader.get_template('user_profile.html')
context = RequestContext(request, {'email': request.user.email, \
'name': request.user.get_full_name(), \
'profile_email': profile_email})
return HttpResponse(template.render(context))
def get_gravatar(request):
email = request.GET.get('email', '')
email_hash = request.GET.get('hash', '')
profile_obj = fetch_gravatar_profile(email, email_hash)
if profile_obj == None:
return HttpResponse('0', mimetype='application/json')
profile = json.load(profile_obj)
print >>sys.stderr, 'profile = ' + str(profile)
return HttpResponse(json.dumps(profile), mimetype='application/json')
@csrf_exempt
def custom_auth_for_gtg(request):
email = request.POST.get('email', '')
password = request.POST.get('password', '')
user_object = authenticate_user(email, password)
resp = HttpResponse(mimetype='application/json')
if user_object != None:
resp.content = json.dumps(get_api_key(user_object))
resp.status_code = 200
return resp
resp.status_code = 400
return resp
| [
"parinporecha@gmail.com"
] | parinporecha@gmail.com |
dc2f5420408a700287563c423718370a936dee4c | 47acd038366827429f8a706b9b2f87f23e8558d5 | /gui/ControlWidget.py | 7fa871fb3d906165d865f5c29bc19a9f6d95fe49 | [] | no_license | mtb-beta/melody_catcher | 3085edf9b38138ffd3faa78aaf1cd69d43510894 | 5ec289a984b3f22925c18b61c196f9222561ae81 | refs/heads/master | 2020-12-24T18:03:53.498690 | 2013-02-13T17:14:23 | 2013-02-13T17:14:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,878 | py | #!/usr/bin/env python
#-*-coding:utf-8-*-
"""
Author:mtb_beta
Date:
2012年 12月17日 月曜日 17時42分46秒 JST
Note:コントロールパネルを制御するクラス
"""
from PyQt4 import QtCore,QtGui
from PyQt4.phonon import Phonon
class ControlWidget(QtGui.QWidget):
def __init__(self,control_width,control_height,parent = None):
QtGui.QWidget.__init__(self,parent = parent)
self.width = control_width-20
self.height = control_height-20
self.setup()
def setup(self):
"""
コントロールパネルのセットアップ
"""
self.setupAudio()
self.sources = []
def setup2(self,main_window):
self.setupUi(main_window)
self.timeLcd.display("00:00:00")
def set_wave_panel(self,wave_panel):
self.wave_widget = wave_panel.wave_widget
def setupUi(self,main_window):
self.main_window = main_window
bar = QtGui.QToolBar()
bar.addAction(self.main_window.playAction)
bar.addAction(self.main_window.pauseAction)
bar.addAction(self.main_window.stopAction)
bar.addAction(self.main_window.analyzeAction)
self.seekSlider = Phonon.SeekSlider(self)
self.seekSlider.setMediaObject(self.mediaObject)
self.volumeSlider = Phonon.VolumeSlider(self)
self.volumeSlider.setAudioOutput(self.audioOutput)
self.volumeSlider.setSizePolicy(QtGui.QSizePolicy.Maximum,QtGui.QSizePolicy.Maximum)
volumeLabel = QtGui.QLabel()
volumeLabel.setPixmap(QtGui.QPixmap('images/volume.png'))
palette = QtGui.QPalette()
palette.setBrush(QtGui.QPalette.Light,QtCore.Qt.darkGray)
self.timeLcd = QtGui.QLCDNumber()
self.timeLcd.setPalette(palette)
headers = ('title','artist','album','year')
self.musicTable = QtGui.QTableWidget(0,4)
self.musicTable.setHorizontalHeaderLabels(headers)
self.musicTable.setSelectionMode(QtGui.QAbstractItemView.SingleSelection)
self.musicTable.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
self.musicTable.cellPressed.connect(self.tableClicked)
seekerLayout = QtGui.QHBoxLayout()
seekerLayout.addWidget(self.seekSlider)
playbackLayout = QtGui.QHBoxLayout()
playbackLayout.addWidget(bar)
playbackLayout.addStretch()
playbackLayout.addWidget(self.seekSlider)
playbackLayout.addWidget(volumeLabel)
playbackLayout.addWidget(self.volumeSlider)
playbackLayout.addWidget(self.timeLcd)
mainLayout = QtGui.QVBoxLayout()
#mainLayout.addWidget(self.musicTable)
#mainLayout.addLayout(seekerLayout)
mainLayout.addLayout(playbackLayout)
self.setLayout(mainLayout)
self.setFixedSize(self.width,self.height)
def tableClicked(self,row,column):
wasPlaying = (self.mediaObject.state() == Phonon.PlayingState)
self.mediaObject.stop()
self.mediaObject.clearQueue()#TO DO
self.mediaObject.setCurrentSource(self.sources[row])
if wasPlaying:
self.mediaObject.play()
else:
self.mediaObject.stop()
def setupAudio(self):
"""
オーディオ関連のセットアップ
"""
self.audioOutput = Phonon.AudioOutput(Phonon.MusicCategory,self)
self.mediaObject = Phonon.MediaObject(self)
self.metaInformationResolver = Phonon.MediaObject(self)
self.mediaObject.setTickInterval(10)
self.mediaObject.tick.connect(self.tick)
self.mediaObject.stateChanged.connect(self.stateChange)
self.metaInformationResolver.stateChanged.connect(self.metaStateChanged)
self.mediaObject.currentSourceChanged.connect(self.sourceChanged)
self.mediaObject.aboutToFinish.connect(self.aboutToFinish)
Phonon.createPath(self.mediaObject,self.audioOutput)
def sizeHint(self):
return QtCore.QSize(500,300)
def aboutToFinish(self):
"""
楽曲の再生が終了した時に実行される関数
"""
index = self.sources.index(self.mediaObject.currentSource()) + 1
if len(self.sources) > index:
self.mediaObject.enqueue(self.sources[index])
def sourceChanged(self,source):
"""
再生するファイルが変更された時に実行される関数
"""
self.wave_index = str(self.sources.index(source))
self.musicTable.selectRow(self.sources.index(source))
self.timeLcd.display('00:00:00')
self.wave_widget.draw_wave(self.wave_index)
def metaStateChanged(self,newState,oldState):
"""
参照するファイルが追加された時に実行される関数
"""
if newState == Phonon.ErrorState:
QtGui.QMessageBox.warning(self,"Error opening files",
self.metaInformatinonResolver.errorString())
while self.sources and self.sources.pop() != self.metaInformationResolver.currentSource():
pass
return
if newState != Phonon.StoppedState and newState != Phonon.PausedState:
return
if self.metaInformationResolver.currentSource().type() == Phonon.MediaSource.Invalid:
return
metaData = self.metaInformationResolver.metaData()
title = metaData.get("TITLE",[''])[0]
if not title:
title = self.metaInformationResolver.currentSource().fileName()
titleItem = QtGui.QTableWidgetItem(title)
titleItem.setFlags(titleItem.flags() ^ QtCore.Qt.ItemIsEditable)
artist = metaData.get('ARTIST',[''])[0]
artistItem = QtGui.QTableWidgetItem(artist)
artistItem.setFlags(artistItem.flags() ^ QtCore.Qt.ItemIsEditable)
album = metaData.get('ALBUM',[''])[0]
albumItem = QtGui.QTableWidgetItem(album)
albumItem.setFlags(albumItem.flags() ^ QtCore.Qt.ItemIsEditable)
year = metaData.get('DATE',[''])[0]
yearItem = QtGui.QTableWidgetItem(year)
yearItem.setFlags(yearItem.flags() ^ QtCore.Qt.ItemIsEditable)
currentRow = self.musicTable.rowCount()
self.musicTable.insertRow(currentRow)
self.musicTable.setItem(currentRow,0,titleItem)
self.musicTable.setItem(currentRow,1,artistItem)
self.musicTable.setItem(currentRow,2,albumItem)
self.musicTable.setItem(currentRow,3,yearItem)
if not self.musicTable.selectedItems():
self.musicTable.selectRow(0)
self.mediaObject.setCurrentSource(self.metaInformationResolver.currentSource())
index =self.sources.index(self.metaInformationResolver.currentSource()) + 1
if len(self.sources) > index :
self.metaInformationResolver.setCurrentSource(self.sources[index])
else :
self.musicTable.resizeColumnsToContents()
if self.musicTable.columnWidth(0)> 300:
self.musicTable.setColumnWidth(0,300)
self.wave_widget.draw_wave(self.wave_index)
def tick(self,time):
"""
tickが更新された際に動作する関数
"""
self.wave_widget.setBarTime(time)
displayTime = QtCore.QTime(0,(time/60000) % 60,(time /1000) % 60 )
self.timeLcd.display(displayTime.toString('mm:ss'))
def stateChange(self,newState,oldState):
"""
メディアオブジェクトのステータスが変更された際に動作する関数
"""
if newState == Phonon.ErrorState:
if self.mediaObject.erroType() == Phonon.FatalError:
QtGui.QMessageBox.warning(self,"Fatal Error",
self.mediaObject.errorString())
else:
QtGui.QMessageBox.warnings(self,"Error",
self.mediaObject.errorString())
elif newState == Phonon.PlayingState:
self.main_window.playAction.setEnabled(False)
self.main_window.pauseAction.setEnabled(True)
self.main_window.stopAction.setEnabled(True)
elif newState == Phonon.StoppedState:
self.main_window.stopAction.setEnabled(False)
self.main_window.playAction.setEnabled(True)
self.main_window.pauseAction.setEnabled(False)
self.timeLcd.display("00:00:00")
elif newState == Phonon.PausedState:
self.main_window.pauseAction.setEnabled(False)
self.main_window.stopAction.setEnabled(True)
self.main_window.playAction.setEnabled(True)
| [
"mtb.toya0403@gmail.com"
] | mtb.toya0403@gmail.com |
22f53a193adbd741ca7b66d76cd6304ac67fc39e | 3bd9959909396cb5e94848498a0349280843dc16 | /app_one/main_app/migrations/0003_message.py | f27083f21a27ef54bb2c67b1c5ebc21292601efe | [] | no_license | elixire03/emjgcWeb | c0c85d969240c976dfb81fae5e13d542e27a52a6 | ccdc4ecc1636ff3e7e44fda3522852615ccae6d6 | refs/heads/master | 2022-11-16T02:44:16.806095 | 2020-07-11T17:23:52 | 2020-07-11T17:23:52 | 278,889,999 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 752 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2020-06-10 17:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main_app', '0002_contact_setting'),
]
operations = [
migrations.CreateModel(
name='message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('email', models.EmailField(max_length=254)),
('subject', models.CharField(max_length=120)),
('message', models.TextField()),
],
),
]
| [
"bernzgeron@yahoo.com"
] | bernzgeron@yahoo.com |
125750e87f87dbe02927348d84c7ffc69e1c15f5 | ad6442301437e847296c5fb5571da56ec59a750f | /TD/TD2/Ex 6.py | 5c63afca72f4d5a7a1b95cc6d7642f23b9b457b7 | [] | no_license | SlamaFR/L1-S2-Algorithm-Programming | 2024d6a8ac59190a561e4bb9ce66c4fd84c9c2cb | fa0d641590bf7ac000548a01a676d311277d3bee | refs/heads/master | 2022-03-12T22:03:34.537689 | 2019-05-15T20:46:56 | 2019-05-15T20:46:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,712 | py | def diagonale_g(n):
"""
Affiche une diagonale d'étoiles descendant vers la gauche.
:param n: Nombre d'étoiles.
>>> diagonale_g(5)
*
*
*
*
*
"""
if n < 1:
return
print(" " * (n - 1) + "*")
diagonale_g(n - 1)
def diagonale_d(n):
"""
Affiche une diagonale d'étoiles descendant vers la gauche.
:param n: Nombre d'étoiles.
>>> diagonale_d(5)
*
*
*
*
*
"""
if n < 1:
return
diagonale_d(n - 1)
print(" " * (n - 1) + "*")
def triangle(n, i=0):
"""
Affiche un triangle d'étoile de hauteur n.
:param n: Hauteur du triangle.
:param i: Indice de la ligne.
>>> triangle(5)
*
***
*****
*******
*********
"""
if n < 1:
return
print((n - 1) * " " + (2 * i + 1) * "*")
triangle(n - 1, i + 1)
def triangle2(n, i=0):
"""
Affiche un triangle renversé d'étoiles de hauteur n.
:param n: Hauteur du triangle.
:param i: Indice de la ligne.
>>> triangle2(5)
*********
*******
*****
***
*
"""
if n < 1:
return
triangle2(n - 1, i + 1)
print((n - 1) * " " + (2 * i + 1) * "*")
def sablier(n, i=0):
"""
Affiche un sablier d'étoiles de hauteur n.
:param n: Hauteur du sablier.
:param i: Indice de la ligne.
>>> sablier(5)
*****
***
*
***
*****
"""
if n < 1:
return
if n - i > 0:
print(i * " " + (n - i) * "*")
elif n > 1:
print(abs(n * 2 - i) * " " + (abs(n - i) + 2) * "*")
else:
print((abs(n - i) + 2) * "*")
sablier(n - 1, i + 1)
| [
"irwin.madet@gmail.com"
] | irwin.madet@gmail.com |
e850b124c421c6c6d1856d64db21c4677aa6b098 | 0edba37bb447653b094ed2f7de5d0e674a175b03 | /verified/ping_hosts_v1.py | 751a7f0a9cfb56c550b154c8de2750c5f55abce8 | [] | no_license | r98522608/blackHatPrac | b76707d4e957328a099a02d85db204922d10763d | c16cdc7acacdc2bdf90066d9c7efa2a679a9bab7 | refs/heads/master | 2021-01-11T16:05:05.984698 | 2017-01-25T09:55:27 | 2017-01-25T09:55:27 | 80,001,775 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 787 | py | """
Working in both python3.5 & python2.7
Before using, need pip install ipy
"""
import shlex
import subprocess
import socket
mypath = '/etc/hosts'
fp = open(mypath)
for line in fp.readlines():
ip = line.rstrip()
ips = ip.split()
if ips:
try:
socket.inet_aton(ips[0])
tmp_cmd = "ping -c1 {0}".format(ips[0])
cmd = shlex.split(tmp_cmd)
try:
output = subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
print ("The IP {0} is NotReacahble".format(cmd[2]))
else:
print ("The IP {0} is Reachable".format(cmd[2]))
except socket.error as e:
print ("==========={}, is not ipv4 address===========".format(ips[0]))
| [
"r98522608@ntu.edu.tw"
] | r98522608@ntu.edu.tw |
eb545e93e2a06c4d1a834c571cdfaa270e57ca76 | 8e18e5eadb39ee854560c3e06cd7ad936eb90373 | /.py/913练习五.py | ccccb5d520b68a02102fb4667dc6169e88b4b422 | [] | no_license | loucx/pycodes | 2ae344b78208062a8b25c67b206e65551140a05f | 75d8b3b64fbe0eca5d3d60be69056c54076cacf9 | refs/heads/master | 2020-03-29T15:15:38.254034 | 2019-03-11T14:42:35 | 2019-03-11T14:42:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 107 | py | import re
s=input()
mat=re.compile(r"[1-9]\d{5}")
ls=mat.findall(s.strip("\n"))
print(",".join(ls))
| [
"noreply@github.com"
] | loucx.noreply@github.com |
c928dece19da6a4ec6efab3d0b7a3946a269599a | eecb0a901d23a1ca4412e3ce736fadd140792a2a | /Binary_Search_Tree/main.py | 0a545ad23cd30d51cbaaca46be6a8da4908c8fa3 | [] | no_license | MouradTerzi/Leetcode_problems_solutions | 0281350582e0746f768b752788b162bb67762fbf | f9523cd1324cf5803104ee2d3a362c65c7a413d8 | refs/heads/master | 2021-02-07T16:22:20.231958 | 2020-03-14T23:02:07 | 2020-03-14T23:02:07 | 244,050,604 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,363 | py | from binary_search_tree_base import *
elements_list=[10,15,18]
#elements_list = [8,7,10]
#elements_list = [90,10]
#elements_list=[8]
root = CreateBRT(elements_list)
#IterativePostorder(root)
IterativeBreadthFirst(root)
#IterativeInorder(root)
#root = InsertionInBRT(None,5)
#print(id(root))
#print(root.data)
#print(root.prec)
#print(root.left_node)
#print(root.right_node)
#InsertionInBRT(root,12)
#print(root.right_node.data)
#right = root.right_node
#print(id(right.prec))
#IterativePreorder(root)
#IterativeInorder(root)
#InordreRecursive(root)
#Insertion(root,3)
#Insertion(root,7)
#Insertion(root,10)
#InordreRecursive(root)
#print("Inordre after deleting")
#root = DeleteElement(root,8)
#print("Inordre after deleting:")
#InordreRecursive(root)
#DeleteElement(root,3)
#print("Inordre after second deleting:")
#InordreRecursive(root)
#root = DeleteElement(root,13)
#print("Tree after deleting the node ")
#InordreRecursive(root)
#root=TreeNode(8)
#root.left_node = TreeNode(6)
#root.right_node = TreeNode(11)
#InordreRecursive(root)
#boolean, prec,element = SearchInABM(root,8)
#print(boolean,prec,element.data)
#if boolean == True:
# print(element.data)
#Insertion(root,8)
#Insertion(root,80)
#Insertion(root,-1)
#print("The tree after insertion of the new elements")
#InordreRecursive(root)
#boolean,prec,element=SearchInABM(root,-1)
#print(prec.data) | [
"terziimourad@gmail.com"
] | terziimourad@gmail.com |
87466cd291f6c19586b503ef7109c6a64acf8ca6 | 39157a854806af4db51b986adf5096bd342bacdb | /fuzznumpy/main.py | 68c4295fe915c1b180f9319956c9abc08d8c52e3 | [] | no_license | xcainiao/fuzzing | b6b43550f7a5c05595a180d111d9ec03e4710293 | 5cadbe3e1bcc9090a68b1006cb5b6b76db990ae1 | refs/heads/master | 2020-03-30T01:51:59.811511 | 2018-09-27T14:25:05 | 2018-09-27T14:25:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | import fuzz
import numpy as np
from ctypes import CDLL
test = CDLL("c/test.so")
test.init()
fuzz.init()
while 1:
func = fuzz.generate()
# func = """import numpy\nnumpy.half(-1).choose(numpy.void(1), numpy.broadcast_arrays((1,)))"""
test.copybuff(func)
try:
exec(func, {"np":np})
except Exception as e:
# print e
continue
print func
fuzz.register(func)
| [
"you@example.com"
] | you@example.com |
cff20d1afdd9342ffd7b1517d3263f834cb30721 | be4ed634abaab9032ad7a85802c3a62a4d5de050 | /Scripts/NomassDsmPlotLibrary.py | 8cb8a8109ccf5d788e179c128bb1dd0727340c8c | [
"MIT"
] | permissive | jacoblchapman/No-MASS | 380b08e56214ea08c885fe7a65d1f35a11bf1181 | 843ccaa461923e227a8e854daaa6952d14cb8bed | refs/heads/Master | 2021-03-27T11:47:05.283200 | 2020-08-28T18:11:39 | 2020-08-28T18:11:39 | 38,058,134 | 0 | 2 | MIT | 2020-08-28T18:11:40 | 2015-06-25T15:37:00 | Jupyter Notebook | UTF-8 | Python | false | false | 5,681 | py | """
Created by Ana Sancho (user ezzas1)
on 10 Nov 2016 at 09:25.
"""
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
flatui = ["#e74c3c", "#34495e", "#2ecc71","#9b59b6", "#3498db", "#95a5a6",
'#d95f0e', '#fec44f', '#1c9099','#005a32', '#f768a1','#a65628']
def DistributionStartingTimes(dataframe, appliance, building = 0, ylim=None):
dic={1: 'washing-machine', 4:'dishwasher'}
dataframe = dataframe[['Building%i_Appliance%i_previous_state'%(building, appliance),
'Building%i_Appliance%i_action'%(building, appliance),
'Building%i_Appliance10_supplied'%building]]
dataframe = dataframe.dropna().reset_index(drop=True)
previous_state_col = dataframe['Building%i_Appliance%i_previous_state'%(building, appliance)]
shifted_state_col = dataframe['Building%i_Appliance%i_action'%(building, appliance)]
figure,axn = plt.subplots(1,2, figsize = (8,3))
sns.distplot(previous_state_col, kde=False, bins = 24, ax = axn[0])
second = sns.distplot(shifted_state_col, kde=False, bins = 24, ax = axn[1])
axn[0].set_ylabel('number of cases',fontsize = 16)
axn[0].set_xlabel('Starting time without DR (h)',fontsize = 12)
axn[1].set_xlabel('Starting time with DR (h)',fontsize = 12)
axn[0].set_xlim([0,24])
axn[1].set_xlim([0,24])
if not ylim: ylim = max(axn[0].get_ylim()[1], axn[1].get_ylim()[1])
axn[0].set_ylim([0,ylim])
axn[1].set_ylim([0,ylim])
figure.suptitle('Disitribution of the starting times - %s'%dic[appliance],fontsize = 14)
# for solar shadow
# x= mean_pv.index/1440.*24
# y=mean_pv.values
# ax2 = axn[1].twinx()
# # ax2.fill_between(x,0,y, color= flatui[7], alpha=0.3)
# ax2.set_yticks([])
# sec.plot(mean_pv.values)
return figure
def boxplotSC(dataframe_week):
dataframe = dataframe_week[['self_cons_index','self_cons_potential']]
figure = plt.figure(figsize = (6,2))
sns.set(style='white')
sns.set_context('talk')
sns.boxplot(dataframe, color='white', linewidth=1.4, orient='h')
plt.setp(figure.get_axes()[0].lines, color='k', alpha=0.7)
labels = ['self consumption', 'self consumption potential']
plt.xlabel('percentage (%)')
figure.get_axes()[0].set_yticklabels(labels)
return figure
def ShowMeTariff(tariffArray):
sns.set_context('talk')
sns.set_style('ticks')
figure = plt.figure(figsize = (10,3))
plt.bar(range(0,24),tariffArray, color = flatui[5],edgecolor=flatui[5], label='TOU')
plt.xlabel('time (h)')
plt.xlim([0,24])
plt.ylim([0,1])
plt.ylabel('grid cost')
return figure
def barplot(dataframe_week, per_simulation = False):
dataframe = dataframe_week[['on-peak', 'mid-peak','off-peak','pvExport']]/60000 # to convert to kWh
figure = plt.figure()
color = [flatui[0], flatui[1], flatui[2], flatui[7]]
if per_simulation:
dataframe.plot.bar(stacked = True, color=color)
else:
dataframe.mean().plot.bar(stacked = False, color=color, figsize = (4,5))
plt.ylabel('Energy (kWh)')
# plt.legend().set_visible(False)
plt.xticks(rotation=0)
return figure
def boxplotSC_local_neigh(analysis_SC_tocompare_df, title = None):
# dataframe = analysis_SC_tocompare_df.unstack('building').unstack('col')[['self_cons_local','self_cons_neigh']]
# dataframe = dataframe_week[['self_cons_index','self_cons_potential']]
figure = plt.figure(figsize = (6,2))
sns.set(style='white')
sns.boxplot(data = analysis_SC_tocompare_df,y='penetration',x='SC',hue='Level', orient='h',
linewidth=1.1, color = 'gray', fliersize = 3.5, )
# plt.setp(figure.get_axes()[0].lines, color='k', alpha=0.7)
plt.ylabel('PV penetration (%)')
plt.xlabel('Self-consumption (%)')
plt.legend(bbox_to_anchor=(1.32,1))
plt.xlim([-0.3,80])
# figure.get_axes()[0].set_yticklabels(labels)
if title: plt.title(title)
return figure
def boxplotSC_multibuilding(analysis_SC_tocompare_df, title = None):
figure = plt.figure(figsize = (6,6))
sns.set(style='white')
sns.boxplot(data = analysis_SC_tocompare_df,y='Level',x='SC',hue='building', orient='h',
linewidth=1.1, palette = flatui, fliersize = 3.5, )
# plt.setp(figure.get_axes()[0].lines, color='k', alpha=0.7)
plt.ylabel(' ')
plt.xlabel('Self-consumption (%)')
plt.legend(loc = 4, title='Building') #bbox_to_anchor=(1.32,1),
# plt.xlim([0,30])
# figure.get_axes()[0].set_yticklabels(labels)
if title: plt.title(title)
return figure
def barplot_multibuilding(analysis_SC_df, stacked = True):
dataframe = analysis_SC_df.unstack('building').unstack('col')[['on-peak', 'mid-peak','off-peak','pvExport']]/60000 # to convert to kWh
figure = plt.figure()
color = [flatui[0], flatui[1], flatui[2], flatui[7]]
if stacked:
dataframe.mean(level=0).plot.bar(stacked=True, color=color)
else:
dataframe.mean(level=0).plot.bar(stacked=False, color=color)
plt.ylabel('Energy (kWh)')
# plt.legend().set_visible(False)
plt.xticks(rotation=0)
plt.xlabel('Building')
return figure
def quickAnalysisGraph(quickAnalysisDF, title, label_global ='_received', ):
rec_graph = (quickAnalysisDF.sum().unstack())/10**5
fig, axn = plt.subplots(1,2, figsize = (12,3), sharey=True,)
rec_graph.ix[:,:3].plot.bar(stacked=True, color=flatui,ax = axn[0], grid=True)
rec_graph.ix[:,3].plot.bar(color=flatui[5],ax = axn[1], grid=True, label = label_global, legend=True,)
axn[0].set_title(title)
axn[0].set_ylabel(title +' power (x10^5)')
return fig | [
"jacoblchapman@gmail.com"
] | jacoblchapman@gmail.com |
33e63f19a07ed941138abfc59de29452102d7eb1 | 75e8c34f770ad9e1e8542ba5c41a5479ee08e69c | /tests/test_main.py | fc40ae7cf4bbee68a83c933a14d7fd5226983240 | [
"MIT"
] | permissive | vmaikova/flask_system | 1a07bd9f297c116dffb4a0330bc5a9d92456b762 | 546413a37383bd8651b6c9c23f050c85e1180b21 | refs/heads/master | 2021-04-27T07:09:29.807072 | 2018-03-29T10:18:21 | 2018-03-29T10:18:21 | 122,626,035 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,307 | py | # -*- coding: utf-8 -*-
from pytest import raises
# The parametrize function is generated, so this doesn't work:
#
# from pytest.mark import parametrize
#
import pytest
parametrize = pytest.mark.parametrize
from flask_platform import metadata
from flask_platform.main import main
class TestMain(object):
@parametrize('helparg', ['-h', '--help'])
def test_help(self, helparg, capsys):
with raises(SystemExit) as exc_info:
main(['progname', helparg])
out, err = capsys.readouterr()
# Should have printed some sort of usage message. We don't
# need to explicitly test the content of the message.
assert 'usage' in out
# Should have used the program name from the argument
# vector.
assert 'progname' in out
# Should exit with zero return code.
assert exc_info.value.code == 0
@parametrize('versionarg', ['-V', '--version'])
def test_version(self, versionarg, capsys):
with raises(SystemExit) as exc_info:
main(['progname', versionarg])
out, err = capsys.readouterr()
# Should print out version.
assert err == '{0} {1}\n'.format(metadata.project, metadata.version)
# Should exit with zero return code.
assert exc_info.value.code == 0
| [
"vmaikova@yahoo.com"
] | vmaikova@yahoo.com |
1df190b393e91b1201a3c30b120bc9a49a40a1b8 | 65329299fca8dcf2e204132624d9b0f8f8f39af7 | /napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/router_information/tlvs/tlv/segment_routing_sid_label_range/__init__.py | e65b58e4df63d726184cc67034cd38bbeac27625 | [
"Apache-2.0"
] | permissive | darylturner/napalm-yang | bf30420e22d8926efdc0705165ed0441545cdacf | b14946b884ad2019b896ee151285900c89653f44 | refs/heads/master | 2021-05-14T12:17:37.424659 | 2017-11-17T07:32:49 | 2017-11-17T07:32:49 | 116,404,171 | 0 | 0 | null | 2018-01-05T16:21:37 | 2018-01-05T16:21:36 | null | UTF-8 | Python | false | false | 9,972 | py |
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import tlvs
class segment_routing_sid_label_range(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/opaque-lsa/router-information/tlvs/tlv/segment-routing-sid-label-range. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: The Segment Identifier (SID) or label ranges that are supported by
the local system for Segment Routing
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_extmethods', '__tlvs',)
_yang_name = 'segment-routing-sid-label-range'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__tlvs = YANGDynClass(base=tlvs.tlvs, is_container='container', yang_name="tlvs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'network-instances', u'network-instance', u'protocols', u'protocol', u'ospfv2', u'areas', u'area', u'lsdb', u'lsa-types', u'lsa-type', u'lsas', u'lsa', u'opaque-lsa', u'router-information', u'tlvs', u'tlv', u'segment-routing-sid-label-range']
def _get_tlvs(self):
"""
Getter method for tlvs, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/router_information/tlvs/tlv/segment_routing_sid_label_range/tlvs (container)
YANG Description: Sub-TLVs of the SID/Label range TLV of the RI LSA
"""
return self.__tlvs
def _set_tlvs(self, v, load=False):
"""
Setter method for tlvs, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/router_information/tlvs/tlv/segment_routing_sid_label_range/tlvs (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_tlvs is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tlvs() directly.
YANG Description: Sub-TLVs of the SID/Label range TLV of the RI LSA
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=tlvs.tlvs, is_container='container', yang_name="tlvs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """tlvs must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=tlvs.tlvs, is_container='container', yang_name="tlvs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
})
self.__tlvs = t
if hasattr(self, '_set'):
self._set()
def _unset_tlvs(self):
self.__tlvs = YANGDynClass(base=tlvs.tlvs, is_container='container', yang_name="tlvs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)
tlvs = __builtin__.property(_get_tlvs)
_pyangbind_elements = {'tlvs': tlvs, }
import tlvs
class segment_routing_sid_label_range(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/opaque-lsa/router-information/tlvs/tlv/segment-routing-sid-label-range. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: The Segment Identifier (SID) or label ranges that are supported by
the local system for Segment Routing
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_extmethods', '__tlvs',)
_yang_name = 'segment-routing-sid-label-range'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__tlvs = YANGDynClass(base=tlvs.tlvs, is_container='container', yang_name="tlvs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'network-instances', u'network-instance', u'protocols', u'protocol', u'ospfv2', u'areas', u'area', u'lsdb', u'lsa-types', u'lsa-type', u'lsas', u'lsa', u'opaque-lsa', u'router-information', u'tlvs', u'tlv', u'segment-routing-sid-label-range']
def _get_tlvs(self):
"""
Getter method for tlvs, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/router_information/tlvs/tlv/segment_routing_sid_label_range/tlvs (container)
YANG Description: Sub-TLVs of the SID/Label range TLV of the RI LSA
"""
return self.__tlvs
def _set_tlvs(self, v, load=False):
"""
Setter method for tlvs, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/router_information/tlvs/tlv/segment_routing_sid_label_range/tlvs (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_tlvs is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tlvs() directly.
YANG Description: Sub-TLVs of the SID/Label range TLV of the RI LSA
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=tlvs.tlvs, is_container='container', yang_name="tlvs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """tlvs must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=tlvs.tlvs, is_container='container', yang_name="tlvs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
})
self.__tlvs = t
if hasattr(self, '_set'):
self._set()
def _unset_tlvs(self):
self.__tlvs = YANGDynClass(base=tlvs.tlvs, is_container='container', yang_name="tlvs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)
tlvs = __builtin__.property(_get_tlvs)
_pyangbind_elements = {'tlvs': tlvs, }
| [
"dbarrosop@dravetech.com"
] | dbarrosop@dravetech.com |
3a48406a812d84fd849ab992709f79e5fa2084df | 3748c73e6bab589c078dbef66c332e545d668702 | /Spider.py | 60b8f0867a5b51c27db9b9e05f1b355fd7284d97 | [] | no_license | StubbornF4/Async_Spider | 14765acaff54cdc1ff009ad9ecc9a143134f95b8 | 0e84b78ce0e34ff6921c9d62cd3be92f332ebf09 | refs/heads/master | 2022-11-25T06:53:38.703006 | 2020-08-08T00:20:23 | 2020-08-08T00:20:23 | 285,819,474 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,782 | py | import asyncio
from urllib.parse import urlencode
import aiohttp
import logging
import json
from motor.motor_asyncio import AsyncIOMotorClient
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(levelname)s: %(message)s')
INDEX_URL = 'https://dynamic5.scrape.cuiqingcai.com/api/book/?'
CONCURRENCY = 5
session = None
article_id = []
#定义信号量,最大的并发抓取数
semaphore = asyncio.Semaphore(CONCURRENCY)
#mongodb相关配置
MONGO_CONNECTION_STRING = 'mongodb://localhost:27017'
MONGO_DB_NAME = 'books'
MONGO_COLLECTION_NAME = 'books'
client = AsyncIOMotorClient(MONGO_CONNECTION_STRING)
db = client[MONGO_DB_NAME]
collection = db[MONGO_COLLECTION_NAME]
#async_mongodb
#TODO
async def save_data(data):
logging.info("saving data %s", data)
await collection.update_one(
{'id': data.get('id')},
{'$set': data},
upsert=True,
)
async def scrape_index(page):
params = {
'limit': 18,
'offset': int(page) * 18,
}
url = INDEX_URL + urlencode(params)
async with semaphore:
try:
logging.info('scraping %s', url)
async with session.get(url) as response:
result = await response.text()
logging.info('page %s result %s ', page, result)
#将Str类型的数据转成Dict
result = json.loads(result)
return result
except aiohttp.ClientError:
logging.error('error occurred while scraping %s', url, exc_info=True)
async def scrape_detail(id):
url = "https://dynamic5.scrape.cuiqingcai.com/api/book/" + str(id)
async with semaphore:
try:
logging.info('scraping %s', url)
async with session.get(url) as response:
result = await response.json()
print(type(result))
print(result)
await save_data(result)
except aiohttp.ClientError:
logging.error('error occurred while scraping %s', url, exc_info=True)
async def main():
global session, article_id
session = aiohttp.ClientSession()
#抓取索引页,获取每页所有书的ID,以进行进一步的抓取
scrape_index_tasks = [asyncio.ensure_future(scrape_index(page))for page in range(1, 3)]
results = await asyncio.gather(*scrape_index_tasks)
for result in results:
for item in result["results"]:
article_id.append((item["id"]))
print(article_id)
#对详情页进行抓取
scrape_detail_tasks = [asyncio.ensure_future(scrape_detail(id))for id in article_id]
await asyncio.gather(*scrape_detail_tasks)
await session.close()
if __name__ == '__main__':
asyncio.get_event_loop().run_until_complete(main()) | [
"lichunxiao0404"
] | lichunxiao0404 |
4f52dc228911b03ffbd14435ca8e9b100afda4c2 | 63862adffd94fae2d43c2ba6130181e1a66b7367 | /v0/main.py | e38e9725e2fad75cc616aaa43d6ae6bf4b2f8fd1 | [
"MIT"
] | permissive | josiemundi/fastapi-twofactor | 023144d771ce939d1500df8d7218714f959adb53 | d64e6081374a94810e6a61d4a70596cd71c64eec | refs/heads/main | 2023-01-20T11:29:36.915120 | 2020-11-17T20:24:14 | 2020-11-17T20:24:14 | 311,323,757 | 11 | 6 | null | null | null | null | UTF-8 | Python | false | false | 4,186 | py | # code originally from - https://fastapi.tiangolo.com/tutorial/security/oauth2-jwt/
from datetime import datetime, timedelta
from typing import Optional
from fastapi import Depends, FastAPI, HTTPException, status
from fastapi.security import OAuth2PasswordBearer, OAuth2PasswordRequestForm
from jose import JWTError, jwt
from passlib.context import CryptContext
from pydantic import BaseModel
# to get a string like this run:
# openssl rand -hex 32
SECRET_KEY = "09d25e094faa6ca2556c818166b7a9563b93f7099f6f0f4caa6cf63b88e8d3e7"
ALGORITHM = "HS256"
ACCESS_TOKEN_EXPIRE_MINUTES = 30
fake_users_db = {
"johndoe": {
"username": "johndoe",
"full_name": "John Doe",
"email": "johndoe@example.com",
"hashed_password": "$2b$12$EixZaYVK1fsbw1ZfbX3OXePaWxn96p36WQoeG6Lruj3vjPGga31lW",
"disabled": False,
}
}
class Token(BaseModel):
access_token: str
token_type: str
class TokenData(BaseModel):
username: Optional[str] = None
class User(BaseModel):
username: str
email: Optional[str] = None
full_name: Optional[str] = None
disabled: Optional[bool] = None
class UserInDB(User):
hashed_password: str
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
app = FastAPI()
def verify_password(plain_password, hashed_password):
return pwd_context.verify(plain_password, hashed_password)
def get_password_hash(password):
return pwd_context.hash(password)
def get_user(db, username: str):
if username in db:
user_dict = db[username]
return UserInDB(**user_dict)
def authenticate_user(fake_db, username: str, password: str):
user = get_user(fake_db, username)
if not user:
return False
if not verify_password(password, user.hashed_password):
return False
return user
def create_access_token(data: dict, expires_delta: Optional[timedelta] = None):
to_encode = data.copy()
if expires_delta:
expire = datetime.utcnow() + expires_delta
else:
expire = datetime.utcnow() + timedelta(minutes=15)
to_encode.update({"exp": expire})
encoded_jwt = jwt.encode(to_encode, SECRET_KEY, algorithm=ALGORITHM)
return encoded_jwt
async def get_current_user(token: str = Depends(oauth2_scheme)):
credentials_exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials",
headers={"WWW-Authenticate": "Bearer"},
)
try:
payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
username: str = payload.get("sub")
if username is None:
raise credentials_exception
token_data = TokenData(username=username)
except JWTError:
raise credentials_exception
user = get_user(fake_users_db, username=token_data.username)
if user is None:
raise credentials_exception
return user
async def get_current_active_user(current_user: User = Depends(get_current_user)):
if current_user.disabled:
raise HTTPException(status_code=400, detail="Inactive user")
return current_user
@app.post("/token", response_model=Token)
async def login_for_access_token(form_data: OAuth2PasswordRequestForm = Depends()):
user = authenticate_user(fake_users_db, form_data.username, form_data.password)
if not user:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Incorrect username or password",
headers={"WWW-Authenticate": "Bearer"},
)
access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)
access_token = create_access_token(
data={"sub": user.username}, expires_delta=access_token_expires
)
return {"access_token": access_token, "token_type": "bearer"}
@app.get("/users/me/", response_model=User)
async def read_users_me(current_user: User = Depends(get_current_active_user)):
return current_user
@app.get("/users/me/items/")
async def read_own_items(current_user: User = Depends(get_current_active_user)):
return [{"item_id": "Foo", "owner": current_user.username}]
| [
"johannasaladas@gmail.com"
] | johannasaladas@gmail.com |
d33d903e7de59d03eac8b1c9b2af624e056b3328 | b8faf65ea23a2d8b119b9522a0aa182e9f51d8b1 | /vmraid/website/doctype/social_link_settings/social_link_settings.py | 35954b6ce718f192fa921627f23f3e2a83b1b277 | [
"MIT"
] | permissive | vmraid/vmraid | a52868c57b1999a8d648441eb9cd05815204345d | 3c2e2a952003ba7ea2cf13673b9e79e127f4166e | refs/heads/main | 2022-07-29T18:59:28.585133 | 2022-04-22T08:02:52 | 2022-04-22T08:02:52 | 372,473,120 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2020, VMRaid Technologies and contributors
# License: MIT. See LICENSE
# import vmraid
from vmraid.model.document import Document
class SocialLinkSettings(Document):
pass
| [
"sowrisurya@outlook.com"
] | sowrisurya@outlook.com |
ca1bff906f0e6227e84c313b67eef7c7274ad1c1 | 95ac1e3c58ba4db29b411bd49ab6877df204858f | /database.py | 45b8de8bd58d742c9ee02ad438d6672ea7bf3566 | [] | no_license | vina7/PythonServerSimulation | 18f0bf8cc29999bbc56b2b234887c7aa49ef2da7 | 9f281f2ec34557ad1b8670b90f55485082c1ee51 | refs/heads/master | 2021-01-23T06:49:32.555668 | 2017-03-28T04:20:43 | 2017-03-28T04:20:43 | 86,402,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,071 | py | #!/usr/bin/python
import sqlite3
conn = sqlite3.connect("switchgear.db");
conn.execute('''CREATE TABLE TC100(
NAME CHAR(50) PRIMARY KEY NOT NULL,
ALARM INT NOT NULL,
TEMP INT NOT NULL
);''')
conn.execute('''CREATE TABLE NAME(
NAME CHAR(50) PRIMARY KEY NOT NULL
);''')
conn.execute('''CREATE TABLE PXM8000(
NAME CHAR(50) PRIMARY KEY NOT NULL,
LINE1C INT NOT NULL,
LINE1V INT NOT NULL,
LINE2C INT NOT NULL,
LINE2V INT NOT NULL,
LINE3C INT NOT NULL,
LINE3V INT NOT NULL
); ''')
conn.execute('''
CREATE TABLE METER(
NAME CHAR(50) PRIMARY KEY NOT NULL,
LINE1C INT NOT NULL,
LINE1V INT NOT NULL,
LINE2C INT NOT NULL,
LINE2V INT NOT NULL,
LINE3C INT NOT NULL,
LINE3V INT NOT NULL
); ''')
conn.execute('''
CREATE TABLE MAGNUM(
NAME CHAR(50) PRIMARY KEY NOT NULL,
REASON CHAR(200) NOT NULL,
MAINTENANCE INT NOT NULL,
STATUS INT NOT NULL,
LINE1C INT NOT NULL,
LINE2C INT NOT NULL,
LINE3C INT NOT NULL
); ''')
print "Table created successfully"
conn.close()
| [
"van7@pitt.edu"
] | van7@pitt.edu |
f819a142bd8930f08e51e57ed6af15a211801e81 | 4bcae7ca3aed842d647d9112547522cffa805d51 | /0674.最长连续递增序列.py | 43854333b238384701a6a84adb3ed71f0d9e3655 | [] | no_license | SLKyrim/vscode-leetcode | fd5a163f801661db0dfae1d4fdfa07b79fdb82b6 | 65a271c05258f447d3e56755726f02179780eb8a | refs/heads/master | 2021-07-03T03:15:28.883786 | 2021-02-23T06:19:18 | 2021-02-23T06:19:18 | 226,062,540 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,286 | py | #
# @lc app=leetcode.cn id=674 lang=python3
#
# [674] 最长连续递增序列
#
# https://leetcode-cn.com/problems/longest-continuous-increasing-subsequence/description/
#
# algorithms
# Easy (45.18%)
# Likes: 89
# Dislikes: 0
# Total Accepted: 30.7K
# Total Submissions: 68K
# Testcase Example: '[1,3,5,4,7]'
#
# 给定一个未经排序的整数数组,找到最长且连续的的递增序列,并返回该序列的长度。
#
#
#
# 示例 1:
#
# 输入: [1,3,5,4,7]
# 输出: 3
# 解释: 最长连续递增序列是 [1,3,5], 长度为3。
# 尽管 [1,3,5,7] 也是升序的子序列, 但它不是连续的,因为5和7在原数组里被4隔开。
#
#
# 示例 2:
#
# 输入: [2,2,2,2,2]
# 输出: 1
# 解释: 最长连续递增序列是 [2], 长度为1。
#
#
#
#
# 注意:数组长度不会超过10000。
#
#
# @lc code=start
class Solution:
def findLengthOfLCIS(self, nums: List[int]) -> int:
res = 0
n = len(nums)
if n == 0:
return 0
if n == 1:
return 1
cnt = 1
for i in range(1, n):
if nums[i] > nums[i-1]:
cnt += 1
else:
res = max(res, cnt)
cnt = 1
return max(res, cnt)
# @lc code=end
| [
"623962644@qq.com"
] | 623962644@qq.com |
cad75d0eb80bd26d71d27053160c2de0b316bf00 | bf6f4b7b8aebb247a944ba553a6a02bd1a22a8fe | /VVehicular/VVehicular/settings.py | cdecd4fef39ce493dfcbb5a6f3b36f37df52b95f | [] | no_license | jesusbarron/proyectonube | 186c45986f7069650c94f12b5d660dccff8ac0f5 | 961fcdf46b9132777fe4f8a446409e48f2aeb147 | refs/heads/master | 2020-12-24T15:58:57.819272 | 2014-10-22T08:08:20 | 2014-10-22T08:08:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,175 | py | # Django settings for VVehicular project.
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'cloudcomp$proyectovv', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': 'cloudcomp',
'PASSWORD': 'cloud123',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = os.path.normpath(os.path.join(os.path.dirname(__file__),'media/'))
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '*yok!e2gk+zg1lp!b%o3&6wfqna=c8tl$6x5^=e&@1mov(^1vx'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS=(
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"VVehicular.apps.context_processors.my_processor",
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'VVehicular.urls'
#identificar el perfil de los usuarios
AUTH_PROFILE_MODULE = 'home.userProfile'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'VVehicular.wsgi.application'
TEMPLATE_DIRS = (
os.path.join(os.path.dirname(__file__),'templates'),
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django_admin_bootstrapped',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
'VVehicular.apps.home',
# Uncomment the next line to enable admin documentation:
'django.contrib.admindocs',
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| [
"jesus_barron25@hotmail.com"
] | jesus_barron25@hotmail.com |
d0f14525b0f398cc139c7786f3749726bfd3cd9b | f6c84feac4d29034cfb9d34d82d1599d81cd2e15 | /contacts/models/person.py | 67720691a1c16599fa55c4ad9790f5f9b4dda689 | [] | no_license | agorsk1/django-WAS | 557c118ef70883008ccb3175e760cd4e7687bc3a | 4481228f13550e5f787e76061539b5f516c5ada7 | refs/heads/master | 2020-09-14T03:42:03.876154 | 2019-12-12T09:09:07 | 2019-12-12T09:09:07 | 223,006,297 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,832 | py | from django.core.validators import MinValueValidator, MaxValueValidator
from django.db import models
from django.utils.translation import gettext_lazy as _
class Person(models.Model):
MAX_HEIGHT = 220
MIN_HEIGHT = 100
GENDER_MALE = 'male'
GENDER_FEMALE = 'female'
GENDER_UNSPECIFIED = None
GENDER_CHOICES = [
(GENDER_MALE, _('Male')),
(GENDER_FEMALE, _('Female')),
(GENDER_UNSPECIFIED, _('Unspecified'))
]
first_name = models.CharField(verbose_name=_("First Name"), max_length=30)
last_name = models.CharField(verbose_name=_("Last Name"), max_length=30)
date_of_birth = models.DateField(verbose_name=_("Date of Birth"), blank=True, null=True, default=None)
pesel = models.PositiveIntegerField(verbose_name=_('PESEL'), help_text=_("Type your PESEL number"), blank=True, null=True, default=None)
image = models.ImageField(verbose_name=_('Image'), upload_to='image/', blank=True, null=True, default=None)
homepage = models.URLField(verbose_name=_('Homepage'), blank=True, null=True, default=None)
notes = models.TextField(verbose_name=_('Notes'), blank=True, null=True, default=None)
height = models.DecimalField(verbose_name=_('Height'), help_text=_('Please enter height in cm'), max_digits=4, decimal_places=1, validators=[MinValueValidator(MIN_HEIGHT), MaxValueValidator(MAX_HEIGHT)], blank=True, null=True, default=None)
gender = models.CharField(verbose_name=_('Gender'), max_length=30, choices=GENDER_CHOICES, blank=True, null=True, default=None)
friends = models.ManyToManyField(verbose_name=_('Friends'), to='contacts.Person', blank=True, default=None)
def __str__(self) -> str:
return f'{self.first_name} {self.last_name}'
class Meta:
verbose_name = _('Person')
verbose_name_plural = _('People')
| [
"artq1406@gmail.com"
] | artq1406@gmail.com |
b26dfc2cc4ffb4aa822cac635d3e83c1522e9304 | 04b3a30ca30c3a9cc459b06fe1842a500dd5ab51 | /addresss/views.py | 06c24539f8cc82d589e80f97797e2431e41d5162 | [] | no_license | rahulsayon/Final-Ecommerce | 17b7830e44ab86b7513f48d80fc1bb7f12c36516 | ca0c860653ec1b80f0a3f012e338ecc2189019ac | refs/heads/master | 2022-12-11T01:12:03.500783 | 2020-09-13T20:09:40 | 2020-09-13T20:09:40 | 295,228,975 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,454 | py | from django.shortcuts import render
from .forms import AddressForm
from django.shortcuts import redirect
from billing.models import BillingProfile
from django.utils.http import is_safe_url
from addresss.models import Address
# Create your views here.
def checkout_address_create_view(request):
form = AddressForm(request.POST or None)
context = { "form" : form }
next_ = request.GET.get('next')
next_post = request.POST.get('next')
redirect_path = next_ or next_post or None
if form.is_valid():
print(form.cleaned_data)
instance = form.save(commit=False)
billing_profile , billing_profile_created = BillingProfile.objects.new_or_get(request)
if billing_profile is not None:
address_type = request.POST.get('address_type' , 'shipping')
print("billinf profile" , billing_profile)
instance.billing_profile = billing_profile
instance.address_type = request.POST.get('address_type' , 'shipping')
instance.save()
request.session[address_type + "_address_id"] = instance.id
print(address_type +"_address_id")
else:
print("error")
return redirect("cart:checkout")
if is_safe_url(redirect_path , request.get_host()):
return redirect(redirect_path)
else:
return redirect("cart:checkout")
return redirect("cart:checkout")
def checkout_address_reuse_view(request):
if request.user.is_authenticated:
context = {}
next_ = request.GET.get('next')
next_post = request.POST.get('next')
redirect_path = next_ or next_post or None
if request.method == "POST":
print(request.POST)
shipping_address = request.POST.get('shipping_address', None)
address_type = request.POST.get('address_type', 'shipping')
billing_profile, billing_profile_created = BillingProfile.objects.new_or_get(request)
if shipping_address is not None:
qs = Address.objects.filter(billing_profile=billing_profile, id=shipping_address)
if qs.exists():
request.session[address_type + "_address_id"] = shipping_address
if is_safe_url(redirect_path, request.get_host()):
return redirect(redirect_path)
return redirect("cart:checkout")
| [
"rahulsayon95@gmail.com"
] | rahulsayon95@gmail.com |
35e1a01f77365a4404f8d1a72b7faa481b8b2689 | 9235b96ac3dd60ec2fac03154f7aa59cb46e301e | /heap/Main.py | 45e2a46714d3ae3ad416288a89d25dcb59f0b146 | [] | no_license | DaDa0013/data_structure_python | 2fe51d499cfeb4a6f0b090c5b5ed3d08b3adab14 | 1c1f32dbaa954bf489203afb4193ffe3764af0fc | refs/heads/main | 2023-05-14T06:45:54.063459 | 2021-06-13T05:29:09 | 2021-06-13T05:29:09 | 357,013,979 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,018 | py | class AdaptedHeap: # min_heap으로 정의함!
def __init__(self):
self.A = []
self.D = {} # dictionary D[key] = index
def __str__(self):
return str(self.A)
def __len__(self):
return len(self.A)
def insert(self, key):
self.A.append(key)
self.D[key]=len(self.A)-1
self.heapify_up(len(self.A)-1)
return self.D[key]
# code here
# key 값이 최종 저장된 index를 리턴한다!
def heapify_up(self, k):
while k > 0 and self.A[(k-1)//2] > self.A[k]:
a=self.A[k]
b=self.A[(k-1)//2]
self.A[k], self.A[(k-1)//2] = self.A[(k-1)//2], self.A[k]
self.D[a],self.D[b] = (k-1)//2, k
k = (k-1)//2
# code here: key 값의 index가 변경되면 그에 따라 D 변경 필요
def heapify_down(self, k):
n=len(self.A)
while n>= 2*k +1:# 자식 노드가 있는가?
L, R= 2*k + 1, 2*k + 2
m = k # m = (A[k], A[L], A[R]) 중 작은 값을 가지는 index
if self.A[k] > self.A[L]:
m = L
if n > R:
if self.A[m] > self.A[R]:
m = R
if k == m:
break
else:
a=self.A[k]
b=self.A[m]
self.A[k], self.A[m]= self.A[m], self.A[k],
self.D[a], self.D[b] = m, k
k = m
# code here: key 값의 index가 변경되면 그에 따라 D 변경 필요
def find_min(self):
# 빈 heap이면 None 리턴, 아니면 min 값 리턴
# code here
if len(self.A) == 0:
return None
return self.A[0]
def delete_min(self):
# 빈 heap이면 None 리턴, 아니면 min 값 지운 후 리턴
# code here
if len(self.A)==0:
return None
key=self.A[0]
a=self.A[-1]
self.A[0],self.A[-1], = self.A[-1],self.A[0]
self.D[key],self.D[a] = len(self.A)-1,0
self.A.pop()
del self.D[a]
self.heapify_down(0)
return key
def update_key(self, old_key, new_key):
# old_key가 힙에 없으면 None 리턴
# 아니면, new_key 값이 최종 저장된 index 리턴
# code here
for i in range(len(self.A)):
if self.A[i]==old_key:
n=self.D[old_key]
del self.D[old_key]
self.A[n]=new_key
self.D[new_key]=n
if old_key < new_key:
self.heapify_down(n)
else:
self.heapify_up(n)
return n
return None
# 아래 명령 처리 부분은 수정하지 말 것!
H = AdaptedHeap()
while True:
cmd = input().split()
if cmd[0] == 'insert':
key = int(cmd[1])
loc = H.insert(key)
print(f"+ {int(cmd[1])} is inserted")
elif cmd[0] == 'find_min':
m_key = H.find_min()
if m_key != None:
print(f"* {m_key} is the minimum")
else:
print(f"* heap is empty")
elif cmd[0] == 'delete_min':
m_key = H.delete_min()
if m_key != None:
print(f"* {m_key} is the minimum, then deleted")
else:
print(f"* heap is empty")
elif cmd[0] == 'update':
old_key, new_key = int(cmd[1]), int(cmd[2])
idx = H.update_key(old_key, new_key)
if idx == None:
print(f"* {old_key} is not in heap")
else:
print(f"~ {old_key} is updated to {new_key}")
elif cmd[0] == 'print':
print(H)
elif cmd[0] == 'exit':
break
else:
print("* not allowed command. enter a proper command!") | [
"yoon351200@naver.com"
] | yoon351200@naver.com |
a75d04852aca116b804d4a5aa819b764cddff608 | 5d9636dcae2471d700da5583cfc0359644c7322d | /pugsley/auth/routes.py | 78e16175c4ac581d44b4ba571f9a66b393c72966 | [
"MIT"
] | permissive | kfields/pugsley-lite | 93a4c7c334fd9b4f3ab68acc565b1f29a4a31b99 | 9fdd4868895b38fb81855952f19bdf9cca1635b3 | refs/heads/master | 2023-01-24T18:29:15.338112 | 2019-08-11T20:33:30 | 2019-08-11T20:33:30 | 179,791,236 | 1 | 0 | MIT | 2023-01-09T22:22:33 | 2019-04-06T05:09:26 | CSS | UTF-8 | Python | false | false | 4,521 | py | from flask import render_template, redirect, url_for, flash, request, jsonify
from werkzeug.urls import url_parse
from flask_login import login_user, logout_user, current_user
from flask_babel import _
from pugsley import db
from pugsley.jwt import encode_auth_token
from pugsley.auth import bp
from pugsley.auth.forms import LoginForm, RegistrationForm, \
ResetPasswordRequestForm, ResetPasswordForm
from pugsley.models.users import User
from pugsley.auth.emails import send_password_reset_email
@bp.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('main.index'))
form = LoginForm()
if form.validate_on_submit():
email = form.email.data
if '@' in email:
user = User.query.filter_by(email=form.email.data).first()
else:
user = User.query.filter_by(username=form.email.data).first()
if user is None or not user.check_password(form.password.data):
flash(_('Invalid email or password'))
return redirect(url_for('auth.login'))
login_user(user, remember=form.remember_me.data)
next_page = request.args.get('next')
if not next_page or url_parse(next_page).netloc != '':
next_page = url_for('main.index')
return redirect(next_page)
# return render_template('login.html', title=_('Log In'), form=form)
return render_template('layouts/auth-default.html',
content=render_template( 'pages/login.html', form=form ) )
@bp.route('/logout')
def logout():
logout_user()
return redirect(url_for('main.index'))
@bp.route('/register', methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('main.index'))
form = RegistrationForm()
if form.validate_on_submit():
# user = User(first_name=form.first_name.data, last_name=form.last_name.data, username=form.username.data, email=form.email.data)
user = User(username=form.email.data, email=form.email.data)
user.set_password(form.password.data)
db.session.add(user)
db.session.commit()
flash(_('Congratulations, you are now a registered user!'))
return redirect(url_for('auth.login'))
# return render_template('register.html', title=_('Register'), form=form)
return render_template('layouts/auth-default.html',
content=render_template( 'pages/register.html', form=form ) )
@bp.route('/reset_password_request', methods=['GET', 'POST'])
def reset_password_request():
if current_user.is_authenticated:
return redirect(url_for('main.index'))
form = ResetPasswordRequestForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user:
send_password_reset_email(user)
flash(
_('Check your email for the instructions to reset your password'))
return redirect(url_for('auth.login'))
return render_template('reset_password_request.html',
title=_('Reset Password'), form=form)
@bp.route('/reset_password/<token>', methods=['GET', 'POST'])
def reset_password(token):
if current_user.is_authenticated:
return redirect(url_for('main.index'))
user = User.verify_reset_password_token(token)
if not user:
return redirect(url_for('main.index'))
form = ResetPasswordForm()
if form.validate_on_submit():
user.set_password(form.password.data)
db.session.commit()
flash(_('Your password has been reset.'))
return redirect(url_for('auth.login'))
return render_template('reset_password.html', form=form)
@bp.route('/token', methods=['POST'])
def token():
if not request.is_json:
return jsonify({"msg": "Missing JSON in request"}), 400
username = request.json.get('username', None)
password = request.json.get('password', None)
if not username:
return jsonify({"msg": "Missing username parameter"}), 400
if not password:
return jsonify({"msg": "Missing password parameter"}), 400
user = User.query.filter_by(username=username).first()
if user is None or not user.check_password(password):
return jsonify({"msg": "Bad username or password"}), 401
# Identity can be any data that is json serializable
access_token = encode_auth_token(sub=username, id=user.id)
print(access_token)
return jsonify({"token": access_token.decode('utf-8')}), 200 | [
"kurtisfields@gmail.com"
] | kurtisfields@gmail.com |
e98b340a8750c4ecbc896b144ce74e599fad301b | 891ea89e1b2c5e437f30d1dc6889a52b2566010c | /src/command_modules/azure-cli-keyvault/azure/cli/command_modules/keyvault/keyvaultclient/generated/models/organization_details.py | 5b8435485f0274d1d67d2ef1b59ef47aaa0c238e | [
"MIT"
] | permissive | biggorog01/azure-cli | f201da449e21cf6bdebd0deaede843d8f3ab942b | eb316bce7d59c36fcce72e5289d243a2c3dd4cd1 | refs/heads/master | 2020-12-24T12:04:46.451850 | 2016-11-07T03:56:11 | 2016-11-07T03:56:11 | 73,080,322 | 0 | 1 | null | 2017-06-20T23:12:53 | 2016-11-07T13:21:23 | Python | UTF-8 | Python | false | false | 1,343 | py | #---------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#---------------------------------------------------------------------------------------------
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
#pylint: skip-file
from msrest.serialization import Model
class OrganizationDetails(Model):
"""Details of the organization of the certificate issuer.
:param id: Id of the organization.
:type id: str
:param admin_details: Details of the organization administrator.
:type admin_details: list of :class:`AdministratorDetails
<KeyVault.models.AdministratorDetails>`
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'admin_details': {'key': 'admin_details', 'type': '[AdministratorDetails]'},
}
def __init__(self, id=None, admin_details=None):
self.id = id
self.admin_details = admin_details
| [
"noreply@github.com"
] | biggorog01.noreply@github.com |
df81a46fb1daeb5393ea26cdc1f3659bb8189a8e | 8aff0742b1dd2cbb5a693807f1d3eafce5406890 | /mysite/settings.py | 4c855b6b963b7f70a43ff49fea8d5e656016032b | [] | no_license | ijk5554234/CameraChoose_in_Django | 052952e29036f2b2029d49658c04c8a31e27f4d4 | dcf118c730bf295c8490bfef8c85d6d85abfd3e1 | refs/heads/master | 2020-12-25T19:04:04.696864 | 2015-03-11T02:43:54 | 2015-03-11T02:43:54 | 31,746,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,446 | py | """
Django settings for mysite project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'a2vvud(^1m#1nfl1@8s3f090^qpbsbd%395s*(mhckl8-f2pp#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'camerachoose',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'mysite.urls'
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'dj',
'USER': 'jikel',
'PASSWORD': '12345678',
'HOST': 'localhost',
'PORT': '3306'
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
'/static/',
)
# Template dir
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates/'),
os.path.join(BASE_DIR, 'templates/camerachoose/'),
)
PROJECT_PATH = os.path.dirname(globals()["__file__"])
STATIC_PATH = PROJECT_PATH + '/static' | [
"ljk5980@gmail.com"
] | ljk5980@gmail.com |
ed6396e69d37f3562b6de9bc2f2fe68e0391bda3 | faf21bbc7d98519283b5c4d6d56fe8d7bb0a2175 | /edley/urls.py | 7cc795a385bf38604e6e03eddd3b475277bbcc0e | [] | no_license | Samiel999/edley | 358949f6ee827985a61e22f46ebefd73c7044268 | 01399e31f90457cad011156b564fa0e8f9f4b206 | refs/heads/master | 2022-11-13T16:27:13.373005 | 2020-07-10T09:32:06 | 2020-07-10T09:32:06 | 278,592,958 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 799 | py |
"""edley URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('telefonbuch.urls'))
]
| [
"samuel@MacBook-Pro-6.fritz.box"
] | samuel@MacBook-Pro-6.fritz.box |
d8982a501517e741145cac724e03b59326021d7d | 09e5cfe06e437989a2ccf2aeecb9c73eb998a36c | /modules/dxtbx/command_line/print_header.py | 0927cef0df1adb502a68d0f8709b4377dcad155a | [
"BSD-3-Clause"
] | permissive | jorgediazjr/dials-dev20191018 | b81b19653624cee39207b7cefb8dfcb2e99b79eb | 77d66c719b5746f37af51ad593e2941ed6fbba17 | refs/heads/master | 2020-08-21T02:48:54.719532 | 2020-01-25T01:41:37 | 2020-01-25T01:41:37 | 216,089,955 | 0 | 1 | BSD-3-Clause | 2020-01-25T01:41:39 | 2019-10-18T19:03:17 | Python | UTF-8 | Python | false | false | 1,633 | py | from __future__ import absolute_import, division, print_function
import sys
from scitbx.array_family import flex
from dxtbx.format.FormatMultiImage import FormatMultiImage
from dxtbx.format.Registry import Registry
def print_header():
# this will do the lookup for every frame - this is strictly not needed
# if all frames are from the same instrument
for arg in sys.argv[1:]:
print("=== %s ===" % arg)
format_class = Registry.find(arg)
print("Using header reader: %s" % format_class.__name__)
i = format_class(arg)
beam = i.get_beam()
goniometer = i.get_goniometer()
detector = i.get_detector()
scan = i.get_scan()
if beam is None:
print("No beam model found")
else:
print(beam)
if detector is None:
print("No detector model found")
else:
print(detector)
if goniometer is None:
print("No goniometer model found")
else:
print(goniometer)
if scan is None:
print("No scan model found")
else:
print(scan)
if not issubclass(format_class, FormatMultiImage):
try:
raw_data = i.get_raw_data()
if not isinstance(raw_data, tuple):
raw_data = (raw_data,)
d = [p.as_1d() for p in raw_data]
print("Total Counts: %d" % sum([flex.sum(p.select(p >= 0)) for p in d]))
except AttributeError:
print("Could not read image data")
if __name__ == "__main__":
print_header()
| [
"jorge7soccer@gmail.com"
] | jorge7soccer@gmail.com |
0ac4e9e793ef3967e9ce8b67ea5f79fa5e4ddbe1 | 8aa529d576f0aec174bbca38c076d7d41e26d703 | /poo/JeuVoiture/exemple.py | 33a895f44d59f9e8c064b8c5fa3ab0dc44ab0de8 | [] | no_license | Manutereva/StagePython | 165c66a93a193d5603ed5f97d7a48da6225d246d | 2192e4f3ce24bffc08e8628227b755ff9f1f9173 | refs/heads/master | 2020-08-19T05:05:05.565901 | 2019-10-17T15:23:43 | 2019-10-17T15:23:43 | 215,881,200 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 15 14:41:26 2019
@author: miguel
"""
def dire_bonjour():
print("Bonjour")
def say_hello():
print("Bonjour")
def fala_bom_dia():
print("Bonjour")
dico = {
"FR": dire_bonjour,
"EN": say_hello,
"PT": fala_bom_dia
}
dico["FR"]()
func = lambda arg : arg * 2
print(func(4))
liste = [4,8,1,3]
l = map(type, liste)
print(list(l))
| [
"mickael@ermes.ai"
] | mickael@ermes.ai |
c8262174a413858ac535c1546e354b1373bc5eb6 | 4072f7068837cbaeb6cb6984434d9b911d00f81c | /list_and_dict.py | fc58621d073c3ed4806d9bcda9806a51b946712e | [] | no_license | 8cH9azbsFifZ/utilities | 18b486a5e9540c24142ca646b40fe7e7df8ac637 | 7072698c7d479a56c7d14970e242c7c47ca7c91a | refs/heads/master | 2020-09-07T14:50:40.914707 | 2019-11-05T00:33:32 | 2019-11-05T00:33:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,092 | py | """Utility functions."""
__author__ = "Tom Goetz"
__copyright__ = "Copyright Tom Goetz"
__license__ = "GPL"
def list_in_list(list1, list2):
"""Test if all items in list1 are present in list2."""
for list_item in list1:
if list_item not in list2:
return False
return True
def list_intersection(list1, list2):
"""Return a list of the items present both in list1 and list2."""
return [list_item for list_item in list1 if list_item in list2]
def list_intersection_count(list1, list2):
"""Return the count of items present in both lists."""
return len(list_intersection(list1, list2))
def dict_filter_none_values(in_dict):
"""Given a dictionary, return a new dictionary with only the dict items with non-None values."""
return {key : value for key, value in in_dict.iteritems() if value is not None}
def filter_dict_by_list(in_dict, keep_list):
"""Return a dictionary with all items from the input dictionary who's keys appear in the list."""
return {key : value for key, value in in_dict.iteritems() if key in keep_list}
| [
"tcgoetz@gmail.com"
] | tcgoetz@gmail.com |
6b5929e0f8c6f7ca0c6f96456f18f0bb57b58342 | 84b397f6ec1780a917c1e4865459a3118c25b6dd | /blog/migrations/0001_initial.py | 7416e505d5fbe1ff32f943b26f5700c33dc55dad | [] | no_license | MrArabboy/Blogsite | 4167cb9f7030e9bb3feb240d0b2560b3cacde9c3 | 8d80d08b418476bcaad87f3f479f0ffe55af00c5 | refs/heads/master | 2023-06-14T04:16:28.347506 | 2021-07-10T14:54:21 | 2021-07-10T14:54:21 | 384,720,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 802 | py | # Generated by Django 3.2.3 on 2021-06-22 14:13
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Blog',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
('context', models.TextField(max_length=200)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"mr.arabboy@gmail.com"
] | mr.arabboy@gmail.com |
f02c935f46b416c5992dd469b7e5961998b00fda | bbce5ff1bec789af2ddf55e4af4e14e11fdfc3d2 | /app.py | 74675f5eccb17ce082d01e7b938b1ec66c875162 | [] | no_license | SnehilSinha/Fake-Currency-Detection-using-Deep-Convolutional-Networks | 597b8c14e8d9538ef1f38c6c9707dd3cc09005fb | 3fffc6a8fd075bcb44a63ef1525a6ff3ae4d45f7 | refs/heads/main | 2023-05-30T18:11:53.129080 | 2021-07-04T07:23:00 | 2021-07-04T07:23:00 | 360,904,405 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,303 | py | import os
import sys
import cv2
# Flask
from flask import Flask, redirect, url_for, request, render_template, Response, jsonify, redirect
from werkzeug.utils import secure_filename
from gevent.pywsgi import WSGIServer
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.applications.imagenet_utils import preprocess_input, decode_predictions
from tensorflow.keras.models import load_model
import numpy as np
from util import base64_to_pil
# Declare a flask app
app = Flask(__name__)
MODEL_PATH = 'models/counterfeit.h5' # Model saved with Keras model.save()
model = load_model(MODEL_PATH)
print('Model loaded. Start serving...')
def model_predict(img, model):
#img = img.convert('RGB')
gray = cv2.imread(".\\uploads\\image.png", cv2.IMREAD_GRAYSCALE)
high_thresh, thresh_im = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
lowThresh = 0.5*high_thresh
blur = cv2.medianBlur(gray, 17)
edge_img = cv2.Canny(blur,lowThresh*1.5,high_thresh*1.5)
pts = np.argwhere(edge_img>0)
try:
y1,x1 = pts.min(axis=0)
y2,x2 = pts.max(axis=0)
except ValueError:
print (img)
pass
cropped = gray[y1:y2, x1:x2]
blur_cropped = cv2.medianBlur(cropped, 7)
blur_cropped = cv2.resize(blur_cropped , (224, 224))
img=np.array(blur_cropped)
x = np.array(img).reshape(-1,224,224,1)
x = preprocess_input(x, mode='tf')
preds = model.predict(x)
return preds
@app.route('/', methods=['GET'])
def index():
# Main page
return render_template('index.html')
@app.route('/predict', methods=['GET', 'POST'])
def predict():
if request.method == 'POST':
img = base64_to_pil(request.json)
img.save("./uploads/image.png")
# Make prediction
preds = model_predict(img, model)
#preds[0][0]=0.00031593
print("pred= ",preds)
print("pred[0][0]= ",preds[0][0])
if (preds[0][0]) == 0.0:
return jsonify(result="Fake!")
else:
return jsonify(result="Real")
return None
if __name__ == '__main__':
app.run(port=5002, threaded=False)
# Serve the app with gevent
#http_server = WSGIServer(('0.0.0.0', 5000), app)
#http_server.serve_forever()
| [
"noreply@github.com"
] | SnehilSinha.noreply@github.com |
fd297696413587ed4cb669225d6afaab95af0cd3 | da82dda955093f4a8143d77075970cecd462c46e | /기본수학1/2869.py | d0a7b37e924494a14dd4543331eb99af9e402271 | [] | no_license | damagejun19/baekjoon | 36d2ab5917e08874e23690533d221de6c5747dae | 644c05807665cc60573b0a41858e395e964c365b | refs/heads/main | 2023-05-25T06:10:35.977178 | 2021-06-10T14:45:20 | 2021-06-10T14:45:20 | 372,837,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 98 | py | import math
A, B, V = map(int, input().split())
day = math.ceil((V - B)/(A - B))
print(day)
| [
"noreply@github.com"
] | damagejun19.noreply@github.com |
473ad805640993a7478b24d1975950e372158851 | d2f55652fe0cbacc5729f596f4232261a0823156 | /union_logistic回归.py | dde87d787631c8746b768195308b75382ef714e1 | [] | no_license | airuibel/td_bair_union_model | 2f3b8657e99e5d809baf6c3db02b372852873824 | 189530561f8af4d3b8e5ed62b90a27b40bc6cf73 | refs/heads/master | 2022-02-28T14:08:54.779708 | 2019-09-24T08:14:01 | 2019-09-24T08:14:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 50,227 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 4 15:59:34 2019
@author: wj56740
"""
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 30 11:02:54 2019
@author: wj56740
#针对客户:所有新增客户
#观察期时间窗:2017.07.01-2018.09.28
#表现期:2019年4月15日
## 坏客户定义:
# 1) 曾经逾期16+
## 好客户的定义:
# 1) 已结清且未发生逾期
# 2) 已还至少6期且未发生逾期
"""
import os
import pandas as pd
import numpy as np
from scipy import stats
from sklearn.metrics import roc_curve, roc_auc_score
from sklearn.ensemble import GradientBoostingClassifier,RandomForestClassifier
from sklearn.model_selection import train_test_split
import warnings
import matplotlib.pyplot as plt
from matplotlib import rcParams
import datetime
from xgboost.sklearn import XGBClassifier
import lightgbm as lgb
from sklearn.externals import joblib
import pickle
import pandas as pd
import numpy as np
pd.set_option('display.width', 1000)
from c_tools import v2_cat_woe_iv,v2_equif_bin
from c_tools import ChiMerge,AssignBin, CalcWOE, BinBadRate
from itertools import *
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LassoCV
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestClassifier
from statsmodels.stats.outliers_influence import variance_inflation_factor as vif
from scipy import stats
from sklearn.externals import joblib
from c_tools import Vif,score_situation,score_situation_test,auc_ruce
import statsmodels.api as sm
import time
import gc
warnings.filterwarnings("ignore")
rcParams['font.sans-serif'] = ['Microsoft YaHei']
## 修改路径到数据存储的文件夹
os.chdir('D:/llm/联合建模算法开发/数据/')
## 定义一些函数用于模型评估,计算KS
def r_p(y_test, answer_p, idx, low=0, high=150):
a = answer_p
b = y_test
idx = idx[low:high]
recall = (b.iloc[idx] == 1).sum() / (b == 1).sum()
precision = (b.iloc[idx] == 1).sum() / (high - low)
return (recall, precision)
def r_p_chart(y_test, answer_p, part=20):
print('分段 平均分 最小分 最大分 客户数 逾期数 逾期数/客户数 KS值 GAIN_INDEX 累计召回率')
a = answer_p
b = y_test
idx = sorted(range(len(a)), key=lambda k: a[k], reverse=True)
total_label_RATE = (y_test == 1).sum() / len(y_test)
ths = []
cum = 0
if len(np.unique(a)) < part:
for unq_a in np.unique(a)[::-1]:
ths.append(cum)
cum = cum + (a == unq_a).sum()
ths.append(cum)
else:
for i in np.arange(0, len(a), (len(a) / part)):
ths.append(int(round(i)))
ths.append(len(a))
min_scores = []
for idx_ths, _ in enumerate(ths):
if idx_ths == 0:
continue
# idx_ths = 1
low = ths[idx_ths - 1]
high = ths[idx_ths]
r, p = r_p(y_test, answer_p, idx, low, high)
cum_r, cum_p = r_p(y_test, answer_p, idx, 0, high)
max_score = answer_p[idx[low]]
min_score = answer_p[idx[high - 1]]
min_scores.append(min_score)
mean_score = (max_score + min_score) / 2
len_ = high - low
idx_tmp = idx[low:high]
bad_num = (b.iloc[idx_tmp] == 1).sum()
INTERVAL_label_RATE = bad_num / len_
idx_tmp = idx[0:high]
tpr = (b.iloc[idx_tmp] == 1).sum() / (b == 1).sum()
fpr = (b.iloc[idx_tmp] == 0).sum() / (b == 0).sum()
ks = tpr - fpr
gain_index = INTERVAL_label_RATE / total_label_RATE
print('%d %10.3f %10.3f %10.3f %7d %7d %10.2f %10.2f %10.2f %10.2f'
% (idx_ths, mean_score * 100, min_score * 100, max_score * 100, len_, bad_num, INTERVAL_label_RATE * 100,
ks * 100, gain_index, cum_r * 100))
return min_scores
def r_p_chart2(y_test, answer_p, min_scores, part=20):
print('分段 平均分 最小分 最大分 客户数 逾期数 逾期数/客户数 KS值 GAIN_INDEX 累计召回率')
a = answer_p
b = y_test
idx = sorted(range(len(a)), key=lambda k: a[k], reverse=True)
ths = []
ths.append(0)
min_scores_idx = 0
for num, i in enumerate(idx):
# print(a[i])
if a[i] < min_scores[min_scores_idx]:
ths.append(num)
min_scores_idx = min_scores_idx + 1
ths.append(len(idx))
total_label_RATE = (y_test == 1).sum() / len(y_test)
min_scores = []
for idx_ths, _ in enumerate(ths):
if idx_ths == 0:
continue
low = ths[idx_ths - 1]
high = ths[idx_ths]
r, p = r_p(y_test, answer_p, idx, low, high)
cum_r, cum_p = r_p(y_test, answer_p, idx, 0, high)
max_score = answer_p[idx[low]]
min_score = answer_p[idx[high - 1]]
min_scores.append(min_score)
mean_score = (max_score + min_score) / 2
len_ = high - low
idx_tmp = idx[low:high]
bad_num = (b.iloc[idx_tmp] == 1).sum()
INTERVAL_label_RATE = bad_num / len_
idx_tmp = idx[0:high]
tpr = (b.iloc[idx_tmp] == 1).sum() / (b == 1).sum()
fpr = (b.iloc[idx_tmp] == 0).sum() / (b == 0).sum()
ks = tpr - fpr
gain_index = INTERVAL_label_RATE / total_label_RATE
print('%d %10.3f %10.3f %10.3f %7d %7d %10.2f %10.2f %10.2f %10.2f'
% (idx_ths, mean_score * 100, min_score * 100, max_score * 100, len_, bad_num, INTERVAL_label_RATE * 100,
ks * 100, gain_index, cum_r * 100))
'''
##2.2 变量预处理--针对不同的数据类型进行预处理
'''
final_data=pd.read_excel('所有建模样本数据_20190802.xlsx')
touna_cd_score_for=pd.read_csv('touna_cd_score_for.csv')
touna_cd_score_for['applydate']=touna_cd_score_for['apply_date'].str.slice(0,10)
final_data=pd.merge(final_data,touna_cd_score_for,on=['credentials_no_md5', 'cust_name_md5', 'mobile_md5', 'applydate'],how='inner')
final_data=final_data.drop_duplicates(['credentials_no_md5', 'cust_name_md5', 'mobile_md5', 'applydate'])
columns_transform={'通用分':'rational_score','小额现金贷多期分':'small_creditmoney_multiplyterm_score','小额现金贷单期分':'small_creditmoney_singleterm_score',
'银行分':'bank_score','消费金融分':'consumerloan_score','大额现金贷分':'big_creditmoney_singleterm_score'}
final_data=final_data.rename(columns=columns_transform)
vars_count_table=pd.read_excel('D:/llm/联合建模算法开发/逻辑回归结果/de_dict_vars_20190722.xlsx')
choose_columns_table = vars_count_table[vars_count_table['是否选用'].isin(['是'])]
numeric_columns = choose_columns_table.loc[choose_columns_table.var_type.isin(['\ufeff数据型','数据型', '数字型']), 'var_name'].values.tolist()
str_columns = choose_columns_table.loc[choose_columns_table.var_type.isin(['字符型', '\ufeff字符型','字符型 ' ]), 'var_name'].values.tolist()
date_columns = choose_columns_table.loc[choose_columns_table.var_type.isin(['\ufeff日期型','日期型']), 'var_name'].values.tolist()
final_data=final_data.replace('\\N',np.nan)
'''
## 4.变量筛选
'''
"""
## 4.1 初步筛选
等频分箱:
类别大于10的变量进行等频分箱,然后计算IV
类别小于10 的变量直接进行计算IV
"""
model_data_new=final_data.loc[final_data.app_applydate<='2018-10-31 23:59:59',choose_columns_table.var_name.tolist()+
['apply_y','rational_score','small_creditmoney_multiplyterm_score','small_creditmoney_singleterm_score',
'bank_score','consumerloan_score','big_creditmoney_singleterm_score','app_applydate']].copy()
'''
## 处理日期型变量,将日期变量转为距离申请日期的天数
'''
for col in date_columns: # 去除异常的时间
try:
model_data_new.loc[model_data_new[col] >= '2030-01-01', col] = np.nan
except:
pass
def date_cal(x, app_applydate): # 计算申请日期距离其他日期的天数
days_dt = pd.to_datetime(app_applydate) - pd.to_datetime(x)
return days_dt.dt.days
for col in date_columns:
if col != 'app_applydate':
try:
if col not in ['vehicle_minput_drivinglicensevaliditydate']:
model_data_new[col] = date_cal(model_data_new[col], model_data_new['app_applydate'])
else:
model_data_new[col] = date_cal(model_data_new['app_applydate'], model_data_new[col]) # 计算行驶证有效期限距离申请日期的天数
except:
pass
#x=model_data_new.drop('apply_y',axis=1)
#y=model_data_new['apply_y']
#x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.3,random_state=0)
#test_data=pd.concat([x_test,y_test],axis=1)
#
#df = pd.concat([x_train,y_train],axis=1)
df=model_data_new.copy()
for cn in df.columns:
try:
df[cn]=df[cn].astype('float64')
df[cn]=df[cn].fillna(-99)
except:
df[cn]=df[cn].fillna('-99')
df[cn]=df[cn].astype('str')
def main(df):
cols = df.columns.tolist()
col_removed = ['apply_y']
cols = list(set(cols)-set(col_removed))
cat_over20 = []
cat_less20 = []
for col in cols:
if len(df[col].value_counts())> 10:
cat_over20.append(col)
else:
cat_less20.append(col)
print("类别》10 的变量个数是:",len(cat_over20))
print("类别《10的变量个数是:",len(cat_less20))
target = "apply_y"
# cat_less20 = ["deci_apt_facetrial_housetype","deci_cont02_relationship","deci_jxl_contact2_rel","deci_vehicle_minput_lastmortgagerinfo"]
high_iv_name = v2_cat_woe_iv(df,cat_less20,target) # 计算woe 和IV值
group = []
for i in high_iv_name.index.tolist():
group.append(len(df[i].value_counts()))
less_iv_name = pd.DataFrame({"var_name":high_iv_name.index,"IV":high_iv_name.values,"group":group})
print(less_iv_name)
less_iv_name.to_excel("D:/llm/联合建模算法开发/逻辑回归结果/cat_less_iv_1.xlsx")
df = v2_equif_bin(df, cat_over20, target)
cols_name = [i+"_Bin" for i in cat_over20]
over_iv_name = v2_cat_woe_iv(df, cols_name, target) # 计算woe 和IV值
over_iv_name.to_excel("D:/llm/联合建模算法开发/逻辑回归结果/cat_more_iv_2.xlsx")
iv_name=less_iv_name.var_name.tolist()+[cn.split('_Bin')[0] for cn in over_iv_name.index]
return iv_name
if __name__ == '__main__':
iv_name=main(df)
"""
## 4.2 对连续型变量用卡方分箱
卡方分箱:
max_bin = 5
分箱后需要手动调节:badrate
"""
iv_st_name=[cn.split('_Bin')[0] for cn in iv_name]
init_choose_name_desc=choose_columns_table.set_index('var_name').loc[iv_st_name,:].reset_index()
#init_choose_name_desc.to_excel('D:/llm/联合建模算法开发/逻辑回归结果/init_choose_name_desc_20190805.xlsx',index=False)
init_choose_table=init_choose_name_desc.loc[init_choose_name_desc['是否选用'].isin(['是']),:]#手动去除一些不能用的变量
numeric_columns = init_choose_table.loc[init_choose_table.var_type.isin(['\ufeff数据型','数据型', '数字型','数值型']), 'var_name'].values.tolist()
str_columns = init_choose_table.loc[init_choose_table.var_type.isin(['字符型', '\ufeff字符型','字符型 ' ]), 'var_name'].values.tolist()
date_columns = init_choose_table.loc[init_choose_table.var_type.isin(['\ufeff日期型','日期型']), 'var_name'].values.tolist()
print(len(numeric_columns)+len(str_columns)+len(date_columns))
df_choose1=df[init_choose_table.var_name.tolist()+['apply_y']].copy()
df_choose1.to_csv('D:/llm/联合建模算法开发/逻辑回归结果/df_choose1.csv',index=False)
target ="apply_y"
max_interval = 5
num_var=numeric_columns+date_columns
def v3_Chimerge(df,target,max_interval,num_var):
num_list=[]
for col in num_var:
print("{}开始进行分箱".format(col))
if -99 not in set(df[col]):
print("{}没有特殊取值".format(col))
cutOff = ChiMerge(df, col, target, max_interval=max_interval, special_attribute=[])
if len(cutOff)!=0:
df[col + '_Bin'] = df[col].map(lambda x: AssignBin(x, cutOff, special_attribute=[]))
dicts, regroup = BinBadRate(df, col + "_Bin", target, grantRateIndicator=0)
regroup.columns = ["group", "total", "bad", "bad_rate"]
regroup["var_name"] = col+"_Bin"
regroup = regroup.sort_values(by="group")
regroup.to_csv("D:/llm/联合建模算法开发/逻辑回归结果/regroup_Chi_cutbin__.csv", mode="a", header=True)
print(regroup)
num_list.append(col)
else:
max_interval = 5
cutOff = ChiMerge(df, col, target, max_interval=max_interval, special_attribute=[-99])
if len(cutOff)!=0:
df[col + '_Bin'] = df[col].map(lambda x: AssignBin(x, cutOff, special_attribute=[-99]))
dicts, regroup = BinBadRate(df, col + "_Bin", target, grantRateIndicator=0)
regroup.columns = ["group", "total", "bad", "bad_rate"]
regroup["var_name"] = col+"_Bin"
regroup = regroup.sort_values(by="group")
regroup.to_csv("D:/llm/联合建模算法开发/逻辑回归结果/regroup_Chi_cutbin__.csv", mode="a", header=True)
print(regroup)
num_list.append(col)
return df,num_list
df_choose1,num_list = v3_Chimerge(df_choose1,target,max_interval, num_var)
df_choose1.to_csv("D:/llm/联合建模算法开发/逻辑回归结果/cut_bin_train.csv")
print(df_choose1.columns.tolist())
print(df_choose1.shape[0])
# 计算woe 和IV
cols_list = [i+"_Bin" for i in num_list]
df_hig_iv = v2_cat_woe_iv(df_choose1,cols_list,target)
df_hig_iv.to_excel("D:/llm/联合建模算法开发/逻辑回归结果/high_iv_chi.xlsx")
# 经卡方分箱完筛选后的最终变量
iv_name1=[cn.split('_Bin')[0] for cn in df_hig_iv.index.tolist()]+str_columns
init_choose_name_desc=choose_columns_table.set_index('var_name').loc[iv_name1,:].reset_index()
#init_choose_name_desc.to_excel('D:/llm/联合建模算法开发/逻辑回归结果/init_choose_name_desc_20190805v1.xlsx',index=False)
"""
## 4.3 手动调整分箱,依据相关性进一步筛选变量
手动调节不满足单调性的变量
使其满足单调性
并计算IV
转换为woe值
时间外样本转换为woe值
连续变量的相关性分析
"""
init_choose_name_desc=pd.read_excel('D:/llm/联合建模算法开发/逻辑回归结果/init_choose_name_desc_20190805v1.xlsx')
init_choose_table=init_choose_name_desc.loc[init_choose_name_desc['是否选用'].isin(['是']),:]#手动去除一些不能用的变量
numeric_columns = init_choose_table.loc[init_choose_table.var_type.isin(['\ufeff数据型','数据型', '数字型','数值型']), 'var_name'].values.tolist()
str_columns = init_choose_table.loc[init_choose_table.var_type.isin(['字符型', '\ufeff字符型','字符型 ' ]), 'var_name'].values.tolist()
date_columns = init_choose_table.loc[init_choose_table.var_type.isin(['\ufeff日期型','日期型']), 'var_name'].values.tolist()
print(len(numeric_columns)+len(str_columns)+len(date_columns))
target ="apply_y"
col=[i+"_Bin" for i in numeric_columns+date_columns]+str_columns
df_choose2=df_choose1.copy()
def func(x,dict_woe):
return dict_woe[x]
""""把分箱好的变量转为woe值"""
def main(df,col,address):
target = "apply_y"
error_cols=[]
for var in col:
try:
woe_iv,regroup = CalcWOE(df, var, target)
bin = regroup.iloc[:, 0].tolist()
woe_value = regroup.iloc[:, -3].tolist()
dict_woe = dict(zip(bin, woe_value))
print(dict_woe)
df[var] = df[var].map(lambda x: func(x, dict_woe))
dicts, regroup_badrate = BinBadRate(df, var, target, grantRateIndicator=0)
regroup_badrate=regroup_badrate.rename(columns={var:var+'_woe'})
regroup=pd.merge(regroup,regroup_badrate,on=['total','bad'],how='left')
regroup=regroup[[var,var+'_woe',"total","bad","good",'bad_rate',"bad_pcnt","good_pcnt","WOE","IV","var_name"]]
regroup=regroup.rename(columns={var:'var_bin'})
print(regroup)
regroup.to_csv("D:/llm/联合建模算法开发/逻辑回归结果/num_bin_badrate.csv",mode="a",header=True)
except:
error_cols.append(var)
df.to_csv(address+"data_woe_value_num.csv",header=True) # 包含分类变量和连续变量
return error_cols,df
error_cols,df_choose3=main(df_choose2,col,'D:/llm/联合建模算法开发/逻辑回归结果/')
""" 相关性分析:大于0.5的变量进行筛选"""
#df_choose3= pd.read_csv("D:/llm/联合建模算法开发/逻辑回归结果/data_woe_value_num.csv")
df_choose4 = df_choose3[col].copy()
def corr_analysis(df):
cor = df.corr()
filter=0.5
# 列出高于阈值的正相关或者负相关的系数矩阵
cor.loc[:,:] = np.triu(cor, k=1)
cor = cor.stack()
high_cors = cor[(cor > filter) | (cor < -filter)]
# df_cor = pd.DataFrame(cor)
df_high_cor1 = pd.DataFrame(high_cors)
# df_cor.to_excel('./data/savefile_v1.xlsx', header=True)
# df_high_cor1.to_excel('./data/savefile_v2.xlsx', header=True)
return df,df_high_cor1
df_choose4, df_high_cor1= corr_analysis(df_choose4)
df_high_cor1=df_high_cor1.reset_index()
df_high_cor1.columns=['var_name1','var_name2','corref']
df_high_cor1.to_excel("D:/llm/联合建模算法开发/逻辑回归结果/df_high_cor.xlsx")
## 相关性去除iv更低的变量
cat_less_iv=pd.read_excel('D:/llm/联合建模算法开发/逻辑回归结果/cat_less_iv_1.xlsx')
cat_less_iv=cat_less_iv.set_index('var_name').loc[str_columns,'IV'].reset_index().rename(columns={'IV':'iv'})
df_hig_iv=pd.read_excel("D:/llm/联合建模算法开发/逻辑回归结果/high_iv_chi.xlsx")
df_hig_iv=pd.concat([df_hig_iv,cat_less_iv],axis=0)
df_hig_iv.to_csv('D:/llm/联合建模算法开发/逻辑回归结果/df_hig_iv_all.csv',index=False)
df_high_cor1=pd.read_excel("D:/llm/联合建模算法开发/逻辑回归结果/df_high_cor.xlsx")
del_cor=[]
for i in range(len(df_high_cor1)):
if df_hig_iv.loc[df_hig_iv.var_name.isin([df_high_cor1.var_name1.values[i]]),'iv'].values[0]>=\
df_hig_iv.loc[df_hig_iv.var_name.isin([df_high_cor1.var_name2.values[i]]),'iv'].values[0]:
del_cor.append(df_high_cor1.var_name2.values[i])
del_cor=list(set(del_cor))
remain_col=[cn for cn in col if cn not in del_cor] ##剔除一些最好不能用的变量
remain_col.remove('apt_ec_currentoverdue')
remain_col.remove('tx_cardid')
#剩72个
## 经相关性后去除的分箱情况
target ="apply_y"
df_choose5=df_choose1.copy()
""""把分箱好的变量转为woe值"""
def main(df,col,address):
target = "apply_y"
for var in col:
woe_iv,regroup = CalcWOE(df, var, target)
bin = regroup.iloc[:, 0].tolist()
woe_value = regroup.iloc[:, -3].tolist()
dict_woe = dict(zip(bin, woe_value))
print(dict_woe)
df[var] = df[var].map(lambda x: func(x, dict_woe))
dicts, regroup_badrate = BinBadRate(df, var, target, grantRateIndicator=0)
regroup_badrate=regroup_badrate.rename(columns={var:var+'_woe'})
regroup=pd.merge(regroup,regroup_badrate,on=['total','bad'],how='left')
regroup=regroup[[var,var+'_woe',"total","bad","good",'bad_rate',"bad_pcnt","good_pcnt","WOE","IV","var_name"]]
regroup=regroup.rename(columns={var:'var_bin'})
print(regroup)
regroup.to_csv(address+".csv",mode="a",header=True)
main(df_choose5,remain_col,"D:/llm/联合建模算法开发/逻辑回归结果/final_test_num_bin_badrate1")
"""
## 4.4 进行交叉验证,进一步筛选变量
分箱之后:
调整单调性
调整分箱稳定性
"""
df_choose7=df_choose5[remain_col+['apply_y']].copy()
for cn in df_choose7.columns:
print(cn,df_choose7[cn].unique())
remain_col.remove('m12_apply_platform_cnt_Bin')
remain_col.remove('vehicle_evtrpt_mileage_Bin')
remain_col.remove('jxl_120_record')
remain_col.remove('jxl_id_operator')
remain_col.remove('d1_id_relate_device_num_Bin')
remain_col.remove('contact_court_cnt_Bin')
x = df_choose7[remain_col]
y = df["apply_y"]
rf = RandomForestClassifier()
rf.fit(x,y)
print(pd.DataFrame({"var_name": remain_col, "importance_": rf.feature_importances_}).sort_values("importance_",ascending=False))
"""
交叉验证,挑选剔除后使模型性能下降的变量
"""
def cross_val(df,finally_columns_name):
init_score=1
del_cols=[]
col = finally_columns_name.copy()
for i in finally_columns_name:
col.remove(i)
if len(col)>=1:
print(i)
x = df[col]
y = df["apply_y"]
lr = LogisticRegression()
scores = cross_val_score(lr,x,y,cv=10,scoring="roc_auc")
score = scores.mean()
if score>init_score:
del_cols.append(i)
break
init_score=score
print(score)
return del_cols
## 迭代,使得不在剔除变量为止为停止准则,剩余48个变量
i=0
temp1=remain_col
while i<=100:
del_cols=cross_val(df_choose7,remain_col)
remain_col=[cn for cn in remain_col if cn not in del_cols ]
if len(del_cols)==0:
break
print(del_cols)
i=i+1
## 利用lasso回归继续变量筛选
def lasscv(df,finally_columns_name):
col = []
init_score=0
del_cols=[]
for i in finally_columns_name:
col.append(i)
x = df[col]
x = np.matrix(x)
y = df["apply_y"]
la = LassoCV( cv=10)
la.fit(x,y)
print(i)
print(la.score(x,y))
score=la.score(x,y)
if score<init_score:
del_cols.append(i)
break
init_score=score
return del_cols
## 迭代,使得不在增加变量为止为停止准则
remain_col=['accu_loan_amt_Bin',
'agreement_month_repay_Bin',
'apt_ec_overduephasetotallastyear_Bin',
'avg_sms_cnt_l6m_Bin',
'big_creditmoney_singleterm_score_Bin',
'consumerloan_score_Bin',
'contact_bank_call_cnt_Bin',
'contact_car_contact_afternoon_Bin',
'contact_unknown_contact_early_morning_Bin',
'contacts_class1_cnt_Bin',
'i_cnt_grp_partner_loan_all_all_Bin',
'i_cnt_mobile_all_all_180day_Bin',
'i_cv_cnt_30daypartner_all_all_360day_Bin',
'i_freq_record_loan_thirdservice_365day_Bin',
'i_max_length_record_loan_p2pweb_365day_Bin',
'i_mean_freq_node_seq_partner_loan_all_all_Bin',
'i_mean_length_event_loan_imbank_30day_Bin',
'i_pctl_cnt_ic_partner_loan_insurance_60day_Bin',
'i_pctl_cnt_ic_partner_loan_p2pweb_365day_Bin',
'i_ratio_freq_day_loan_imbank_365day_Bin',
'i_ratio_freq_day_login_p2pweb_365day_Bin',
'i_ratio_freq_record_loan_consumerfinance_365day_Bin',
'i_ratio_freq_record_loan_offloan_180day_Bin',
'jxl_id_comb_othertel_num_Bin',
'max_call_in_cnt_l6m_Bin',
'max_overdue_amount_Bin',
'max_overdue_terms_Bin',
'max_total_amount_l6m_Bin',
'other_org_count_Bin',
'phone_used_time_Bin',
'qtorg_query_orgcnt_Bin',
'rational_score_Bin',
'times_by_current_org_Bin',
'vehicle_evtrpt_b2bprice_Bin',
'cell_reg_time_Bin',
'email_info_date_Bin',
'prof_title_info_date_Bin',
'vehicle_minput_lastreleasedate_Bin',
'apt_lastloanmode',
'vehicle_minput_lastmortgagerinfo',
'high_acade_qua']
##lasso迭代,筛选变量,剩余38个
i=0
del_cols=[]
temp=remain_col
while i<=100:
del_cols=lasscv(df_choose7,remain_col)
if len(del_cols)==0:
break
remain_col=set(remain_col)-set(del_cols)
remain_col=list(remain_col)
print(del_cols)
i=i+1
## 共线性和p值去除变量,剩余38个
df_choose6=df_choose1[remain_col+['apply_y']].copy()
main(df_choose6,remain_col,"D:/llm/联合建模算法开发/逻辑回归结果/final_test_num_bin_badrate2")
while 1:
df_choose7=df_choose6[remain_col+['apply_y']].copy()
x = df_choose7[remain_col]
y = df_choose7["apply_y"]
lgt=sm.Logit(y,x[remain_col])
result=lgt.fit()
print( result.summary2())
p_value=pd.DataFrame(result.pvalues).reset_index().rename(columns={'index':'var_name',0:'p_value'})
p_value_max=p_value['p_value'].max()
p_value_max_cols=p_value.loc[p_value['p_value']>=p_value_max,'var_name'].values[0]
print(p_value_max_cols)
if p_value_max<=0.05:
break
remain_col.remove(p_value_max_cols)
while 1:
max_vif ,vif_df= Vif(df_choose7, remain_col)
print(vif_df)
if max_vif['vif'].values[0]<=3:
break
remain_col.remove(max_vif['var_name'].values[0])
vif_df.to_excel('D:/llm/联合建模算法开发/逻辑回归结果/vif_df_20190729.xlsx')
df_choose7=df_choose6[remain_col+['apply_y']].copy()
x = df_choose7[remain_col]
y = df_choose7["apply_y"]
x_train,x_test,y_train,y_test=train_test_split(x,y)
lr=LogisticRegression(penalty='l2',C=1,n_jobs=-1,verbose=0,random_state=0)
lr.fit(x_train,y_train)
columns_coef=pd.DataFrame(lr.coef_[0].tolist(),index=remain_col).reset_index().rename(columns={'index':'var_name',0:'coef'})
final_columns=columns_coef.loc[columns_coef['coef']>0,'var_name'].values.tolist()
while 1:
lgt=sm.Logit(y_train,x_train[final_columns])
result=lgt.fit()
print( result.summary2())
p_value=pd.DataFrame(result.pvalues).reset_index().rename(columns={'index':'var_name',0:'p_value'})
p_value_max=p_value['p_value'].max()
p_value_max_cols=p_value.loc[p_value['p_value']>=p_value_max,'var_name'].values[0]
print(p_value_max_cols)
if p_value_max<=0.05:
break
final_columns.remove(p_value_max_cols)
df_hig_iv = v2_cat_woe_iv(df_choose7,final_columns,'apply_y')
df_hig_iv=df_hig_iv.reset_index().rename(columns={'index':'var_name',0.0:'iv'}).sort_values('iv',ascending=False)
## 相关性去除
final_columns.remove('consumerloan_score_Bin')
final_columns.remove('big_creditmoney_singleterm_score_Bin')
final_columns.remove('phone_used_time_Bin')
##最终剩下的变量
final_columns=['vehicle_minput_lastmortgagerinfo',
'i_mean_freq_node_seq_partner_loan_all_all_Bin',
'rational_score_Bin',
'contact_unknown_contact_early_morning_Bin',
'vehicle_minput_lastreleasedate_Bin',
'i_pctl_cnt_ic_partner_loan_insurance_60day_Bin',
'cell_reg_time_Bin',
'i_cnt_grp_partner_loan_all_all_Bin',
'i_ratio_freq_record_loan_offloan_180day_Bin',
'prof_title_info_date_Bin',
'apt_ec_overduephasetotallastyear_Bin',
'i_cnt_mobile_all_all_180day_Bin',
'contact_bank_call_cnt_Bin',
'apt_lastloanmode',
'max_total_amount_l6m_Bin',
'contact_car_contact_afternoon_Bin',
'i_freq_record_loan_thirdservice_365day_Bin',
'vehicle_evtrpt_b2bprice_Bin',
'avg_sms_cnt_l6m_Bin',
'times_by_current_org_Bin',
'max_call_in_cnt_l6m_Bin',
'max_overdue_terms_Bin']
"""
##
分箱之后:
调整单调性
调整分箱稳定性
"""
df_choose6=df_choose1[final_columns+['apply_y']].copy()
df_choose6['vehicle_minput_lastmortgagerinfo']=df_choose6['vehicle_minput_lastmortgagerinfo'].replace({'-99':'NA&3,4'})
main(df_choose6,final_columns,"D:/llm/联合建模算法开发/逻辑回归结果/final_test_num_bin_badrate3")
df_hig_iv = v2_cat_woe_iv(df_choose6,final_columns,'apply_y')
df_hig_iv=df_hig_iv.reset_index().rename(columns={'index':'var_name',0.0:'iv'})
choose_column=df_hig_iv.loc[df_hig_iv.iv>=0.02,'var_name'].values.tolist()
## 训练模型
df_choose7=df_choose6[choose_column+['apply_y']].copy()
#df_choose7.to_excel('D:/llm/联合建模算法开发/逻辑回归结果/训练集数据_20190806.xlsx')
x = df_choose7[choose_column]
y = df_choose7["apply_y"]
x_train,x_test,y_train,y_test=train_test_split(x,y)
lgt=sm.Logit(y_train,x_train)
result=lgt.fit()
print( result.summary2())
choose_column.remove('apt_ec_overduephasetotallastyear_Bin')
choose_column.remove('apt_lastloanmode')
lr=LogisticRegression(penalty='l2',C=1,n_jobs=-1,verbose=0,random_state=0)
lr.fit(x_train[choose_column],y_train)
score=lr.predict_proba(x_train[choose_column])[:,1]
min_scores = r_p_chart(y_train, score, part=20)
min_scores = [round(i, 5) for i in min_scores]
min_scores[19] = 0
cuts = [round(min_scores[i] * 100.0, 3) for i in range(20)[::-1]] + [100.0]
joblib.dump(lr,'D:/llm/联合建模算法开发/逻辑回归结果/lr_alldata_20190806.pkl')
columns_coef=pd.DataFrame(lr.intercept_.tolist()+lr.coef_[0].tolist(),index=['intercept']+choose_column).reset_index().rename(columns={'index':'var_name',0:'coef'})
columns_coef.to_excel('D:/llm/联合建模算法开发/逻辑回归结果/columns_coef_20190806.xlsx',index=False)
print('训练集')
pred_p = lr.predict_proba(x_train[choose_column])[:, 1]
fpr, tpr, th = roc_curve(y_train, pred_p)
ks = tpr - fpr
print('train ks: ' + str(max(ks)))
print(roc_auc_score(y_train, pred_p))
r_p_chart2(y_train, pred_p, min_scores, part=20)
print('测试集')
pred_p2 = lr.predict_proba(x_test[choose_column])[:, 1]
fpr, tpr, th = roc_curve(y_test, pred_p2)
ks2 = tpr - fpr
print('test ks: ' + str(max(ks2)))
print(roc_auc_score(y_test, pred_p2))
r_p_chart2(y_test, pred_p2, min_scores, part=20)
print('建模全集')
pred_p3 = lr.predict_proba(x[choose_column])[:, 1]
fpr, tpr, th = roc_curve(y, pred_p3)
ks3 = tpr - fpr
print('all ks: ' + str(max(ks3)))
print(roc_auc_score(y, pred_p3))
r_p_chart2(y, pred_p3, min_scores, part=20)
## 入模变量分箱choose_column
def avg_sms_cnt_l6m(x):
if x == -99:
return 0.0463
elif x < 5.833333333333332:
return -0.2544
elif x < 21.166666666666668:
return -0.1303
elif x < 68.66666666666667:
return 0.2065
elif x>= 68.66666666666667:
return 0.0893
def cell_reg_time(x):
if x == -99:
return 0.0359
elif x < 923.0:
return 0.3336
elif x< 3958.0:
return 0.0005
elif x < 5959.0:
return -0.353
elif x>= 5959.0:
return -0.7457
def contact_bank_call_cnt(x):
if x == -99:
return 0.0463
elif x < 7.0:
return -0.1887
elif x< 21.0:
return 0.1478
elif x < 36.0:
return 0.2769
elif x>= 36.0:
return 0.5728
def contact_car_contact_afternoon(x):
if x == -99:
return 0.0463
elif x < 5.0:
return -0.089
elif x< 11.0:
return 0.2716
elif x>= 11.0:
return 0.3853
def contact_unknown_contact_early_morning(x):
if x == -99:
return 0.0463
elif x < 45.0:
return -0.1839
elif x < 143.0:
return 0.2056
elif x< 253.0:
return 0.3757
elif x>= 253.0:
return 0.4198
def i_cnt_grp_partner_loan_all_all(x):
if x == -99:
return -1.6913
elif x < 1.0:
return 0.2569
elif x< 3.0:
return -0.9927
elif x< 63.0:
return -0.2717
elif x>= 63.0:
return 0.1975
def i_cnt_mobile_all_all_180day(x):
if x == -99:
return -1.6913
elif x< 2.0:
return -0.2916
elif x< 3.0:
return 0.3239
elif x>= 3.0:
return 0.789
def i_freq_record_loan_thirdservice_365day(x):
if x == -99:
return -1.6913
elif x < 1.0:
return -0.0709
elif x< 2.0:
return 0.6482
elif x>= 2.0:
return 1.0176
def i_mean_freq_node_seq_partner_loan_all_all(x):
if x == -99:
return -1.6913
elif x < -999.0:
return -0.0222
elif x< 1.3889:
return 0.104
elif x< 2.0:
return 0.1767
elif x>= 2.0:
return -0.326
def i_pctl_cnt_ic_partner_loan_insurance_60day(x):
if x == -99:
return -1.6913
elif x < 0.7752:
return -0.7926
elif x < 0.9598:
return -0.147
elif x>= 0.9598:
return 0.4604
def i_ratio_freq_record_loan_offloan_180day(x):
if x == -99:
return -1.6913
elif x < -999.0:
return -0.3641
elif x< 0.1316:
return -0.0927
elif x< 0.2131:
return 0.6413
elif x>= 0.2131:
return 0.3469
def max_call_in_cnt_l6m(x):
if x == -99:
return 0.0463
elif x < 134.0:
return -0.2253
elif x< 337.0:
return -0.1184
elif x< 570.0:
return 0.2266
elif x>=570.0:
return 0.5305
def max_overdue_terms(x):
if x == -99:
return -0.2221
elif x < 1.0:
return 0.162
elif x< 2.0:
return 1.2763
elif x>= 2.0:
return 1.3173
def max_total_amount_l6m(x):
if x == -99:
return 0.0463
elif x < 110.39:
return -0.2687
elif x< 298.8:
return -0.0599
elif x< 462.91:
return 0.2699
elif x>= 462.91:
return 0.4125
def prof_title_info_date(x):
if x == -99:
return -0.188
elif x < -29.0:
return 3.2165
elif x < -6.0:
return 2.6882
elif x < 145.0:
return 0.3216
elif x>=145.0:
return 0.0506
def rational_score(x):
if x < 424.0:
return -0.2805
elif x< 467.0:
return 1.0683
elif x < 492.0:
return 0.8485
elif x< 601.0:
return 0.0829
elif x>= 601.0:
return -0.9137
def times_by_current_org(x):
if x == -99:
return -0.1334
elif x < 2.0:
return -0.0704
elif x< 3.0:
return 0.2697
elif x < 4.0:
return 0.5512
elif x>= 4.0:
return 1.5033
def vehicle_evtrpt_b2bprice(x):
if x == -99:
return 0.3172
elif x < 3.33:
return 0.3671
elif x< 4.3:
return 0.3085
elif x < 10.6:
return -0.0114
elif x>= 10.6:
return -0.2644
def vehicle_minput_lastmortgagerinfo(x):
if x == -99:
return -0.2394
elif x==1:
return 0.4462
elif x ==2:
return 0.2031
elif x ==3:
return -0.6617
elif x ==4:
return -0.225
elif x ==5:
return 0.2546
def vehicle_minput_lastreleasedate(x):
if x == -99:
return -0.2392
elif x< 1.0:
return 0.1718
elif x <6.0:
return 0.6836
elif x < 164.0:
return 0.0425
elif x>= 164.0:
return -0.2837
## 时间外验证进行重新评估
outtime_testdata=pd.read_table('outtime_test_20190806.txt',dtype={'app_applycode':str},sep='\u0001')
outtime_testdata=outtime_testdata.replace('\\N',np.nan)
ylabel_data=pd.read_table('contractno_ylabel_data.txt',sep='\u0001',dtype={'applycode':str}) #车贷申请表
outtime_testdata=pd.merge(outtime_testdata,ylabel_data,left_on=['app_applycode','contractno'],right_on=['applycode','contractno'],how='inner')
del ylabel_data
gc.collect()
outtime_testdata=outtime_testdata[outtime_testdata.apply_y.isin([0,1])].copy()
td_union_data=pd.read_excel('CarLoanData_BringToTounawang.xlsx')
outtime_testdata['applydate']=outtime_testdata['app_applydate'].str.slice(0,10)
outtime_testdata=pd.merge(outtime_testdata,td_union_data,left_on=['credentials_no_md5','cust_name_md5','mobile_md5','applydate'],right_on=
['credentials_no_md5_x','cust_name_md5_x','mobile_md5_x','applydate'],how='inner')
del td_union_data
gc.collect()
outtime_testdata.columns=[cn.lower() for cn in outtime_testdata.columns]
touna_cd_score_for=pd.read_csv('touna_cd_score_for.csv')
touna_cd_score_for['applydate']=touna_cd_score_for['apply_date'].str.slice(0,10)
outtime_testdata=pd.merge(outtime_testdata,touna_cd_score_for,on=['credentials_no_md5', 'cust_name_md5', 'mobile_md5', 'applydate'],how='inner')
outtime_testdata=outtime_testdata.drop_duplicates(['credentials_no_md5', 'cust_name_md5', 'mobile_md5', 'applydate'])
columns_transform={'通用分':'rational_score','小额现金贷多期分':'small_creditmoney_multiplyterm_score','小额现金贷单期分':'small_creditmoney_singleterm_score',
'银行分':'bank_score','消费金融分':'consumerloan_score','大额现金贷分':'big_creditmoney_singleterm_score'}
outtime_testdata=outtime_testdata.rename(columns=columns_transform)
de_dict_var = pd.read_excel('de_dict_vars_20190722.xlsx')
for i, _ in de_dict_var.iterrows():
name = de_dict_var.loc[i, 'var_name']
default = de_dict_var.loc[i, 'default']
if default != '""' and name in set(outtime_testdata.columns) and name!='app_applycode':
try:
outtime_testdata[name] = outtime_testdata[name].astype('float64')
if (outtime_testdata[name] == float(default)).sum() > 0:
outtime_testdata.loc[outtime_testdata[name] == float(default), name] = np.nan
except:
pass
elif default == '""' and name in set(outtime_testdata.columns) and name!='app_applycode':
try:
outtime_testdata[name] = outtime_testdata[name].astype('float64')
if (outtime_testdata[name] == float(-99)).sum() > 0:
outtime_testdata.loc[outtime_testdata[name] == float(-99), name] = np.nan
if (outtime_testdata[name] == '-99').sum() > 0:
outtime_testdata.loc[outtime_testdata[name] == '-99', name] = np.nan
except:
pass
for col in ['vehicle_minput_lastreleasedate','prof_title_info_date','cell_reg_time']: # 去除异常的时间
try:
outtime_testdata.loc[outtime_testdata[col] >= '2030-01-01', col] = np.nan
except:
pass
def date_cal(x, app_applydate): # 计算申请日期距离其他日期的天数
days_dt = pd.to_datetime(app_applydate) - pd.to_datetime(x)
return days_dt.dt.days
for col in ['prof_title_info_date','vehicle_minput_lastreleasedate','cell_reg_time']:
if col != 'app_applydate':
try:
if col not in ['vehicle_minput_drivinglicensevaliditydate']:
outtime_testdata[col] = date_cal(outtime_testdata[col], outtime_testdata['app_applydate'])
else:
outtime_testdata[col] = date_cal(outtime_testdata['app_applydate'], outtime_testdata[col]) # 计算行驶证有效期限距离申请日期的天数
except:
pass
outtime_testdata=outtime_testdata.fillna(-99)
outtime_testdata["avg_sms_cnt_l6m"+'_Bin'] = outtime_testdata["avg_sms_cnt_l6m"].map(lambda x: avg_sms_cnt_l6m(x))
outtime_testdata["cell_reg_time"+'_Bin'] = outtime_testdata["cell_reg_time"].map(lambda x: cell_reg_time(x))
outtime_testdata["contact_bank_call_cnt"+'_Bin'] = outtime_testdata["contact_bank_call_cnt"].map(lambda x: contact_bank_call_cnt(x))
outtime_testdata["contact_car_contact_afternoon"+'_Bin'] = outtime_testdata["contact_car_contact_afternoon"].map(lambda x: contact_car_contact_afternoon(x))
outtime_testdata["contact_unknown_contact_early_morning"+'_Bin'] = outtime_testdata["contact_unknown_contact_early_morning"].map(lambda x: contact_unknown_contact_early_morning(x))
outtime_testdata["i_cnt_grp_partner_loan_all_all"+'_Bin'] = outtime_testdata["i_cnt_grp_partner_loan_all_all"].map(lambda x: i_cnt_grp_partner_loan_all_all(x))
outtime_testdata["i_cnt_mobile_all_all_180day"+'_Bin'] = outtime_testdata["i_cnt_mobile_all_all_180day"].map(lambda x: i_cnt_mobile_all_all_180day(x))
outtime_testdata["i_freq_record_loan_thirdservice_365day"+'_Bin'] = outtime_testdata["i_freq_record_loan_thirdservice_365day"].map(lambda x: i_freq_record_loan_thirdservice_365day(x))
outtime_testdata["i_mean_freq_node_seq_partner_loan_all_all"+'_Bin'] = outtime_testdata["i_mean_freq_node_seq_partner_loan_all_all"].map(lambda x: i_mean_freq_node_seq_partner_loan_all_all(x))
outtime_testdata["i_pctl_cnt_ic_partner_loan_insurance_60day"+'_Bin'] = outtime_testdata["i_pctl_cnt_ic_partner_loan_insurance_60day"].map(lambda x: i_pctl_cnt_ic_partner_loan_insurance_60day(x))
outtime_testdata["i_ratio_freq_record_loan_offloan_180day"+'_Bin'] = outtime_testdata["i_ratio_freq_record_loan_offloan_180day"].map(lambda x: i_ratio_freq_record_loan_offloan_180day(x))
outtime_testdata["max_call_in_cnt_l6m"+'_Bin'] = outtime_testdata["max_call_in_cnt_l6m"].map(lambda x: max_call_in_cnt_l6m(x))
outtime_testdata["max_overdue_terms"+'_Bin'] = outtime_testdata["max_overdue_terms"].map(lambda x: max_overdue_terms(x))
outtime_testdata["max_total_amount_l6m"+'_Bin'] = outtime_testdata["max_total_amount_l6m"].map(lambda x: max_total_amount_l6m(x))
outtime_testdata["prof_title_info_date"+'_Bin'] = outtime_testdata["prof_title_info_date"].map(lambda x: prof_title_info_date(x))
outtime_testdata["rational_score"+'_Bin'] = outtime_testdata["rational_score"].map(lambda x: rational_score(x))
outtime_testdata["times_by_current_org"+'_Bin'] = outtime_testdata["times_by_current_org"].map(lambda x: times_by_current_org(x))
outtime_testdata["vehicle_evtrpt_b2bprice"+'_Bin'] = outtime_testdata["vehicle_evtrpt_b2bprice"].map(lambda x: vehicle_evtrpt_b2bprice(x))
outtime_testdata["vehicle_minput_lastmortgagerinfo"] = outtime_testdata["vehicle_minput_lastmortgagerinfo"].map(lambda x: vehicle_minput_lastmortgagerinfo(x))
outtime_testdata["vehicle_minput_lastreleasedate"+'_Bin'] = outtime_testdata["vehicle_minput_lastreleasedate"].map(lambda x: vehicle_minput_lastreleasedate(x))
for cn in choose_column:
print(cn,outtime_testdata[cn].unique())
print('时间外验证')
pred_p4= lr.predict_proba(outtime_testdata[choose_column])[:, 1]
fpr, tpr, th = roc_curve(outtime_testdata.apply_y, pred_p4)
ks3 = tpr - fpr
print('all ks: ' + str(max(ks3)))
print(roc_auc_score(outtime_testdata.apply_y, pred_p4))
r_p_chart2(outtime_testdata.apply_y, pred_p4, min_scores, part=20)
## 入模变量稳定性评估
df_choose7['app_applydate']=model_data_new['app_applydate']
full_data=pd.concat([df_choose7[choose_column+['apply_y','app_applydate']],outtime_testdata[choose_column+['apply_y','app_applydate']]])
full_data['applymonth']=full_data['app_applydate'].str.slice(0,7)
full_data=full_data[full_data['applymonth']!='2018-12']
#full_data.to_csv('D:/llm/联合建模算法开发/逻辑回归结果/full_data_20190722.csv',index=False)
def judge_stable_analyze(data,col,address):
num_counts=pd.crosstab(data['applymonth'],data[col],margins=True)
num_counts_percents=num_counts.div(num_counts['All'],axis=0)
num_counts_percents=num_counts_percents.drop('All',axis=1)
num_counts_percents=num_counts_percents.drop('All',axis=0)
bad_percents=pd.crosstab(index=data['applymonth'],columns=data[col],values=data['apply_y'],aggfunc='mean',margins=True)
bad_percents=bad_percents.drop('All',axis=1)
bad_percents=bad_percents.drop('All',axis=0)
plt.cla()
plt.figure(2)
ax1 = plt.subplot(121)
ax2 = plt.subplot(122)
for cn in num_counts_percents.columns:
plt.sca(ax1)
plt.plot(num_counts_percents[cn])
plt.xticks(rotation=90)
plt.legend()
plt.title(col)
plt.sca(ax2)
plt.plot(bad_percents[cn])
plt.xticks(rotation=90)
plt.legend()
plt.title(col)
plt.savefig(address+col+'.jpg')
plt.show()
count_data=pd.concat([num_counts_percents,bad_percents],axis=0)
count_data.to_csv(address+col+'.csv')
for cn in choose_column:
judge_stable_analyze(full_data,cn,'D:/llm/联合建模算法开发/逻辑回归结果/逾期分析_加时间外验证/')
## 申请样本稳定性psi计算
psi_data=pd.read_table('psi_alldata_20190718.txt',dtype={'app_applycode':str},sep='\u0001')
psi_data=psi_data.replace('\\N',np.nan)
psi_data=pd.merge(psi_data,final_data[['app_applycode','y']],on='app_applycode',how='left')
newloan_label=pd.read_excel('./2019年3月7日之前的新增贷款客户申请编号.xlsx',converters={'applycode':str}) # 选择新增客户
psi_data=pd.merge(psi_data,newloan_label,left_on='app_applycode',right_on='applycode',how='inner')
del newloan_label
cn=[cn.split('_Bin')[0] for cn in choose_column]
psi_data=psi_data[cn+['y','app_applydate']].copy()
psi_data.loc[psi_data.y.isnull(),'y']=0
de_dict_var = pd.read_excel('DE_Var_Select_20190613.xlsx')
for i, _ in de_dict.iterrows():
name = de_dict_var.loc[i, 'var_name']
default = de_dict_var.loc[i, 'default']
if default != '""' and name in set(psi_data.columns) and name!='app_applycode':
try:
psi_data[name] = psi_data[name].astype('float64')
if (psi_data[name] == float(default)).sum() > 0:
psi_data.loc[psi_data[name] == float(default), name] = np.nan
except:
pass
elif default == '""' and name in set(psi_data.columns) and name!='app_applycode':
try:
psi_data[name] = psi_data[name].astype('float64')
if (psi_data[name] == float(-99)).sum() > 0:
psi_data.loc[psi_data[name] == float(-99), name] = np.nan
if (psi_data[name] == '-99').sum() > 0:
psi_data.loc[psi_data[name] == '-99', name] = np.nan
except:
pass
for col in ['email_info_date','vehicle_minput_lastreleasedate']: # 去除异常的时间
try:
psi_data.loc[psi_data[col] >= '2030-01-01', col] = np.nan
except:
pass
def date_cal(x, app_applydate): # 计算申请日期距离其他日期的天数
days_dt = pd.to_datetime(app_applydate) - pd.to_datetime(x)
return days_dt.dt.days
for col in ['email_info_date','vehicle_minput_lastreleasedate']:
if col != 'app_applydate':
try:
if col not in ['vehicle_minput_drivinglicensevaliditydate']:
psi_data[col] = date_cal(psi_data[col], psi_data['app_applydate'])
else:
psi_data[col] = date_cal(psi_data['app_applydate'], psi_data[col]) # 计算行驶证有效期限距离申请日期的天数
except:
pass
psi_data=psi_data.fillna(-998)
psi_data["avg_sms_cnt_l6m"+'_Bin'] = psi_data["avg_sms_cnt_l6m"].map(lambda x: avg_sms_cnt_l6m(x))
psi_data["class2_black_cnt"+'_Bin'] = psi_data["class2_black_cnt"].map(lambda x: class2_black_cnt(x))
psi_data["coll_contact_total_sms_cnt"+'_Bin'] = psi_data["coll_contact_total_sms_cnt"].map(lambda x: coll_contact_total_sms_cnt(x))
psi_data["contact_bank_call_in_cnt"+'_Bin'] = psi_data["contact_bank_call_in_cnt"].map(lambda x: contact_bank_call_in_cnt(x))
psi_data["contact_bank_contact_weekday"+'_Bin'] = psi_data["contact_bank_contact_weekday"].map(lambda x: contact_bank_contact_weekday(x))
psi_data["contact_unknown_contact_early_morning"+'_Bin'] = psi_data["contact_unknown_contact_early_morning"].map(lambda x: contact_unknown_contact_early_morning(x))
psi_data["jxl_id_comb_othertel_num"+'_Bin'] = psi_data["jxl_id_comb_othertel_num"].map(lambda x: jxl_id_comb_othertel_num(x))
psi_data["m12_apply_platform_cnt"+'_Bin'] = psi_data["m12_apply_platform_cnt"].map(lambda x: m12_apply_platform_cnt(x))
psi_data["m3_id_relate_email_num"+'_Bin'] = psi_data["m3_id_relate_email_num"].map(lambda x: m3_id_relate_email_num(x))
#psi_data["m3_id_relate_homeaddress_num"+'_Bin'] = psi_data["m3_id_relate_homeaddress_num"].map(lambda x: m3_id_relate_homeaddress_num(x))
psi_data["max_call_cnt_l6m"+'_Bin'] = psi_data["max_call_cnt_l6m"].map(lambda x: max_call_cnt_l6m(x))
psi_data["max_overdue_terms"+'_Bin'] = psi_data["max_overdue_terms"].map(lambda x: max_overdue_terms(x))
psi_data["max_total_amount_l6m"+'_Bin'] = psi_data["max_total_amount_l6m"].map(lambda x: max_total_amount_l6m(x))
psi_data["phone_used_time"+'_Bin'] = psi_data["phone_used_time"].map(lambda x: phone_used_time(x))
psi_data["qtorg_query_orgcnt"+'_Bin'] = psi_data["qtorg_query_orgcnt"].map(lambda x: qtorg_query_orgcnt(x))
psi_data["times_by_current_org"+'_Bin'] = psi_data["times_by_current_org"].map(lambda x: times_by_current_org(x))
#psi_data["email_info_date"+'_Bin'] = psi_data["email_info_date"].map(lambda x: email_info_date(x))
psi_data["vehicle_minput_lastreleasedate"+'_Bin'] = psi_data["vehicle_minput_lastreleasedate"].map(lambda x: vehicle_minput_lastreleasedate(x))
for cn in choose_column:
print(cn,psi_data[cn].unique())
pred_p5= lr.predict_proba(psi_data[choose_column])[:, 1]
psi_data['newloan_score3']=pred_p5*100
## 建模器申请样本集
modeltime_applydata=psi_data[(pd.to_datetime(psi_data.app_applydate)>=pd.to_datetime('2018-03-01 00:00:00')) & (pd.to_datetime(psi_data.app_applydate)<=pd.to_datetime('2018-10-31 23:59:59'))]
modeltime_applydata.loc[modeltime_applydata.y.isnull(),'y']=0
r_p_chart2(modeltime_applydata.y, (modeltime_applydata.newloan_score3.values/100).tolist(), min_scores, part=20)
fpr, tpr, th = roc_curve(modeltime_applydata.y, (modeltime_applydata.newloan_score3.values/100).tolist())
ks3 = tpr - fpr
print('all ks: ' + str(max(ks3)))
## 时间外样本
outtime_psidata=modeltime_applydata=psi_data[(pd.to_datetime(psi_data.app_applydate)>=pd.to_datetime('2018-11-01 00:00:00')) & (pd.to_datetime(psi_data.app_applydate)<=pd.to_datetime('2018-11-30 23:59:59'))]
outtime_psidata['newloan_score3_segment']=pd.cut(outtime_psidata['newloan_score3'],cuts,right=False)
num_count=outtime_psidata['newloan_score3_segment'].value_counts().sort_index(ascending=False)
num_count_percents=num_count/num_count.sum()
| [
"noreply@github.com"
] | airuibel.noreply@github.com |
5f180caa46869257a5cd62e3e7ef0bdb9717cd32 | 1ebe19998b19fcda64c030f2ae783d31c084cb5a | /projects/NoteBook/run_client.py | 150adadfece777ae85167c1eea17c8a6a15eae7d | [
"MIT"
] | permissive | jskyzero/Python.Playground | f287583c6d2eefc1c7ccda421aa669e4cdd4e533 | 6a6d815d2307d17657b7622201874e4ee7f2324a | refs/heads/master | 2023-04-05T11:42:04.362850 | 2020-01-12T14:37:58 | 2020-01-12T14:37:58 | 359,559,533 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 120 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from notebook.client import Client
CLIENT = Client()
CLIENT.main_func()
| [
"jskyzero@outlook.com"
] | jskyzero@outlook.com |
8becb8342ce4e4a5d50e6184ef347e3199559245 | 172f51a6d83a47f52f40fad7bc1009bb1a31dfe2 | /cuip/plumes/extract1pixel.py | 9330ce26841dbff5c48aa9e8323e6de4aab98d29 | [] | no_license | gdobler/cuip | 29e2fc00d034f9efc980d8ba9bb7c43cf38533e0 | 9c2c2e62a0e5219244e40a0c370c72e6d5f9accd | refs/heads/master | 2021-01-19T04:32:14.780033 | 2018-02-22T22:00:45 | 2018-02-22T22:00:45 | 50,519,966 | 1 | 2 | null | 2017-02-26T16:19:17 | 2016-01-27T16:15:06 | Python | UTF-8 | Python | false | false | 546 | py | import glob
import pylab as pl
import numpy as np
import os
from findImageSize import findsize
from utils import RawImages
files = (glob.glob(os.getenv("PLUMESDIR") + "/*raw"))
imsize = findsize(files[0], outputdir=os.getenv("PLUMESDIR"),
imsizefile = "oct08_2013-10-25_imgsize.txt")
pixels = np.array([(50,50), (100,20)])
pixvalues = np.zeros((len(files), pixels.shape[0], 3)) * np.nan
for i, f in enumerate(files):
pixvalues[i] = RawImages(fl=[f], lim=1, imsize=imsize, pixels=pixels).pixvals
print (pixvalues)
| [
"fedhere@gmail.com"
] | fedhere@gmail.com |
7492f69aa14995983da604ca9f0147a8ac31dcef | 4c74f80edca6e83df90b95f92c9bcd209d88fb10 | /Lab1.3/snmp.py | 28eaa949f1e34b1f1e8c46a401bfe49201da7248 | [] | no_license | ramilius/p4ne | ca5b7b79cfbcb43830f4e7949ec680e55d1210e2 | be03170d3c2e6c34abc515173bbcf7dde27ea575 | refs/heads/master | 2021-06-25T10:35:29.189830 | 2017-08-30T13:59:15 | 2017-08-30T13:59:15 | 100,274,920 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 723 | py | from pysnmp.hlapi import *
result = getCmd(SnmpEngine(),CommunityData('public', mpModel=0),
UdpTransportTarget(('10.31.70.107',161)),
ContextData(),
ObjectType(ObjectIdentity('SNMPv2-MIB', 'sysDescr', 0)))
for x in result:
v = x[3]
print(v)
for y in v:
for z in y:
print(z)
result2 = nextCmd(SnmpEngine(),CommunityData('public', mpModel=0),
UdpTransportTarget(('10.31.70.107',161)),
ContextData(),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.2')),
lexicographicMode=False)
for x in result2:
v = x[3]
print(v)
for y in v:
for z in y:
print(z) | [
"moonfbwer@mail.ru"
] | moonfbwer@mail.ru |
bf5ed36b3a44df2a5ecb1cbda005d48baca2dc60 | 5a6e6741fe7c64cb99b6fbff66ab332d3563ffa9 | /src/interaction/speech.py | 8dc98b998edeb21336dc0f7093c938f63709eb47 | [] | no_license | gowhale/braille-pi | 06c9fdf05f0bc5a8ae3f2c18cb74e2c487e103dc | d184e7cc2f4cc6f339186a85057922706fc69a8a | refs/heads/dev | 2023-07-18T05:27:21.156461 | 2021-08-27T14:01:08 | 2021-08-27T14:01:08 | 320,604,946 | 0 | 0 | null | 2021-08-16T12:52:21 | 2020-12-11T15:02:02 | Python | UTF-8 | Python | false | false | 4,909 | py | from subprocess import call
from sys import platform
try:
from os import system
except ModuleNotFoundError:
"OS Note found"
import string
import os
import fnmatch
from playsound import playsound
from string import punctuation
import pygame
class Speech:
"""The purpose of this class it to verbally speak out strings.
Attributes:
operating_system (String) text which defines what OS the code is running on.
cmd_start (String) start of the linux speak command.
cmd_finish (String) end of the linux speak command.
"""
operating_system = ""
cmd_start = 'espeak '
cmd_finish = '>/dev/null'
sound_effects_directory = 'sounds/sound_effects'
sound_effects_file_names = []
voice_file_directory = 'sounds/voice_files'
voice_file_file_names = []
sound_file_extension = ".mp3"
mute = False
def __init__(self):
"""Sets the operating system of the object."""
print("""Speech object initiated!'""")
# Finds all sound and voiceover files matching sound file extension
pattern = "*" + self.sound_file_extension
sound_effects = os.listdir(self.sound_effects_directory)
for entry in sound_effects:
if fnmatch.fnmatch(entry, pattern):
self.sound_effects_file_names.append(entry)
voice_files = os.listdir(self.voice_file_directory)
for entry in voice_files:
if fnmatch.fnmatch(entry, pattern):
self.voice_file_file_names.append(entry)
self.operating_system = platform
def convert_to_file_name(self, text):
sentence = str(text).replace(" ", "_")
my_punctuation = punctuation.replace("_", "")
sentence = (sentence.translate(
str.maketrans("", "", my_punctuation))).lower()
file_name = sentence + self.sound_file_extension
return (file_name)
def play_sound(self, sound):
"""Plays a sound effect."""
current_os = self.operating_system
sound_file_name = sound + self.sound_file_extension
if sound_file_name in self.sound_effects_file_names:
sound_file_address = "{}/{}".format(
self.sound_effects_directory, sound_file_name)
if current_os == "linux" or current_os == "linux2":
# Raspberry Pi
pygame.mixer.init()
pygame.mixer.music.load(sound_file_address)
pygame.mixer.music.play()
while pygame.mixer.music.get_busy() == True:
continue
elif current_os == "darwin":
# OS X (Macbook)
playsound(sound_file_address)
elif current_os == "win32":
# Windows...
print("Windows edition coming soon.")
else:
print("SOUND FILE NEEDED")
def say(self, text):
"""This method says the text which is passed through the function.
Arguments:
text (String) text which will be spoken."""
print("SAYING -> {}".format(text))
if (not self.mute):
current_os = self.operating_system
snake_case_text = text.replace(" ", "_")
file_name = self.convert_to_file_name(snake_case_text)
if file_name in self.voice_file_file_names:
# If sound file available play it
voice_file_address = "{}/{}".format(
self.voice_file_directory, file_name)
if current_os == "linux" or current_os == "linux2":
# Raspberry Pi
pygame.mixer.init()
pygame.mixer.music.load(voice_file_address)
pygame.mixer.music.play()
while pygame.mixer.music.get_busy() == True:
continue
elif current_os == "darwin":
# OS X (Macbook)
playsound(voice_file_address)
elif current_os == "win32":
# Windows...
print("Windows edition coming soon.")
else:
print("VOICE FILE NEEDED")
with open("automation_scripts/script_saver/voice_files_needed.txt", "a") as myfile:
myfile.write("-{}\n".format(text))
# Narrating the passed text
if current_os == "linux" or current_os == "linux2":
call([self.cmd_start+snake_case_text+self.cmd_finish], shell=True)
elif current_os == "darwin":
# OS X (Macbook)
system('say {}'.format(snake_case_text))
elif current_os == "win32":
# Windows...
print("Windows edition coming soon.")
def set_mute(self):
self.mute = True
| [
"gabewhale@gmail.com"
] | gabewhale@gmail.com |
03be06c5198c90f540920ca80d4ed95b562765cc | 4ac330ae846ac3d593dca27ae34a5712a6de3a6c | /mail/excl_1.0.py | 71ad2d608809f02d320fa4581df171e392a7b827 | [] | no_license | gaoshang18/python | 5999c420be0bf020759fa0bb0bc2251532d5c779 | e73aa15e71eea54699215f0ff6279ad9b8417be2 | refs/heads/master | 2020-05-01T03:35:32.480839 | 2019-08-26T02:02:55 | 2019-08-26T02:02:55 | 177,248,778 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 736 | py | import xlrd
import xlwt
def readexcel():
# 这部分是读取内容
workbook=xlrd.open_workbook(r'E:\用户新增表.xlsx')
print(workbook.sheet_names())
sheet2=workbook.sheet_by_name('用户新增表')
nrows=sheet2.nrows #行数excl_1.0.py
ncols=sheet2.ncols #列数
print(nrows,ncols)
cell_A=sheet2.cell(1,1).value #取第二行第二列内容
print(cell_A)
sheet = '用户新增表'
EXcel = '用户新增表'
row = nrows + 1
col = 1
s = '你好'
print(row)
#下面是写入内容 部门 账号 姓名 外包oa账号 岗位角色 创建时间 共享盘组
sheet.write(row, col, s)
EXcel.save('用户新增表.xlsx')
if __name__ == '__main__':
readexcel() | [
"gaoshang18@qq.com"
] | gaoshang18@qq.com |
e18dc8ce6aee0d6274af6f5c1d9f1a26f9ed3c9c | 3996d7cbc3a41bc0d38ce7826b96cbeeb750d7f3 | /Bucles/numeros_primos.py | 83423fbedd870bc515cd901cba3cd0ce957e48e6 | [] | no_license | jpmontoya182/python-basic | 7f1eaa38e6630c62e59c9e64f0f84420907ef196 | 67ea95fa7f1f4f594a508010aee90c3c684a37c8 | refs/heads/master | 2022-11-30T05:03:49.866785 | 2020-08-16T16:12:42 | 2020-08-16T16:12:42 | 287,760,808 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 704 | py | def es_primo(numero):
result = True
# contador = 0
for i in range(1, numero + 1):
print(i)
if i == 1 or i == numero:
continue
if numero % i == 0:
result = False
break
# if i == 1 or i == numero:
# continue
# if numero % i == 0:
# contador += 1
# if contador == 0:
# return True
# else:
# return False
return result
def run():
numero = int(input('Ingrese un numero para el calculo : '))
if es_primo(numero):
print('El numero ingresado es primo')
else:
print('El numero ingresado no es primo')
if __name__ == '__main__':
run()
| [
"jpmontoya182@gmail.com"
] | jpmontoya182@gmail.com |
d3b3df7beaf26702d254fe115ef7f7f3966b570c | d0d4cdb5c01cef60ad70305436d097da020920fa | /src/Models/AccountingModels.py | 4402471ed42c49b564dce1b989a8852b2f5a9c57 | [] | no_license | Halicea/VlachYouthWeb | 5e0c76b6ba60736e95e9bb995e6d5c7c85c3e9d0 | c067c3dca0b11627522732472c333da4c7f76bf9 | refs/heads/master | 2021-01-20T02:02:08.285673 | 2010-05-13T12:44:26 | 2010-05-13T12:44:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,102 | py | '''
Created on Sep 8, 2009
@author: kosta halicea
'''
###########
from google.appengine.ext import db
from Models.BaseModels import Person
import datetime as dt
from google.appengine.ext.db import djangoforms
###########
class Account(db.Model):
AccountNumber = db.StringProperty(required=True)
BankName = db.StringProperty(required=True)
Owner = db.ReferenceProperty(reference_class=Person, required=True, collection_name='owner_accounts')
DateCreated = db.DateTimeProperty()
Balance = db.FloatProperty()
def getRecentTransactions(self):
return self.owner_account_transactions.order('-EntryTime').fetch(100)
RecentTransactions = property(getRecentTransactions)
@classmethod
def CreateNew(cls, accountNumber, bankName, owner, balance, _isAutoSave=False):
result = cls(AccountNumber=accountNumber, BankName=bankName, Owner=owner, DateCreated=dt.datetime.now(), Balance=float(balance))
if _isAutoSave:
result.put()
return result
class TransactionModes(object):
debit = 'debit'
credit = 'credit'
class TransactionTypeGroup(db.Model):
Code = db.StringProperty()
Description = db.TextProperty()
UsedId = db.StringListProperty()
DateCreated = db.DateTimeProperty()
@classmethod
def CreateNew(cls, code, description, userId, _isAutoInsert=False):
result = cls(Code=code, Description=description, UserId=userId, dateCreated=dt.datetime.now())
if _isAutoInsert: result.put()
return result
class TransactionType(db.Model):
Code = db.StringProperty(required=True)
TypeGroup = db.ReferenceProperty(reference_class=TransactionTypeGroup, collection_name='type_group_transaction_types')
Description = db.TextProperty()
DateCreated = db.DateTimeProperty()
class TransactionBatch(db.Model):
DateCreated = db.DateTimeProperty(required=True)
Owner = db.ReferenceProperty(Person, collection_name='owner_transaction_batches')
Account = db.ReferenceProperty(Account, collection_name='account_transaction_batches')
class Transaction(db.Model):
OwnerAccount = db.ReferenceProperty(Account, required=True, collection_name='owner_account_transactions')
ReferentAccount = db.ReferenceProperty(Account, required=True, collection_name='referent_account_transactions')
TransactionMode = db.StringProperty(required=True, choices=set([TransactionModes.debit, TransactionModes.credit]))
TransactionType = db.ReferenceProperty(TransactionType, collection_name='transaction_type_transactions')
Batch = db.ReferenceProperty(TransactionBatch, collection_name='batch_transactions')
Ammount = db.FloatProperty(required=True, default=0.0)
Description = db.TextProperty()
TransactionTime = db.DateTimeProperty(required=True)
EntryTime = db.DateTimeProperty(required=True)
VerifiedByReferentEntityStatus = db.StringProperty()
TimeOfVerification = db.DateTimeProperty()
def put(self):
super(Transaction, self).put()
ref_financial_card = FinancialCard.GetByOwnerByUser(self.OwnerAccount.Owner, self.ReferentAccount.Owner)
if ref_financial_card.Balance == None: ref_financial_card.Balance = 0.0
if self.TransactionMode == TransactionModes.credit:
self.OwnerAccount.Balance += self.Ammount
ref_financial_card.Balance -= self.Ammount
elif self.TransactionMode == TransactionModes.debit:
self.OwnerAccount.Balance -= self.Ammount
ref_financial_card.Balance += self.Ammount
else:
raise Exception("Not Valid Transaction Mode")
self.OwnerAccount.save()
ref_financial_card.save()
@classmethod
def CreateNew(cls, ownerAccount, referentAccount, transactionMode, transactionType,
ammount, description, transactionTime, verifiedByReferentEntityStatus=False,
timeOfVerification=None, _isAutoInsert=False):
result = cls(
OwnerAccount=ownerAccount,
ReferentAccount=referentAccount,
TransactionMode=transactionMode,
TransactionType=transactionType,
Ammount=float(ammount),
Description=description,
TransactionTime=transactionTime,
EntryTime=dt.datetime.now(),
VerifiedByReferentEntityStatus=verifiedByReferentEntityStatus,
TimeOfVerification=timeOfVerification
)
if _isAutoInsert:
result.put()
return result
class FinancialCard (db.Model):
DateCreated = db.DateTimeProperty (required=True)
Owner = db.ReferenceProperty(Person, required=True, collection_name='owner_financial_cards')
ReferentEntity = db.ReferenceProperty(Person, required=True, collection_name='referent_entity_financial_cards')
Balance = db.FloatProperty(default=0.0)
def Recalculate(self, _isAutoUpdate=False):
refAccounts = self.ReferentEntity.owner_accounts
result = []
#Potential Big Loop
for acc in self.Owner.owner_accounts:
for ref_acc in refAccounts:
result.extend(Transaction.gql("WHERE OwnerAccount =:oa AND ReferentAccount =:ra", oa=acc, ra=ref_acc))
balance = 0.0
for trans in result:
if trans.TransactionMode == TransactionModes.debit:
balance -= trans.Ammount
elif trans.TransactionMode == TransactionModes.credit:
balance += trans.Ammount
self.Balance = float(balance)
if _isAutoUpdate:
self.save()
return self
@classmethod
def CreateNew(cls, owner, referentEntity, balance=0.0, _autoInsert=False):
result = cls(
DateCreated=dt.datetime.now(),
Owner=owner,
ReferentEntity=referentEntity,
Balance=float(balance)
)
# set None to 0
if result.Balance == None: result.Balance = 0.0
if _autoInsert:
result.put()
return result
@classmethod
def GetByOwnerByUser(cls, owner, referentEntity):
result = cls.all().filter('Owner =', owner).filter('ReferentEntity =', referentEntity).get()
if result == None:
result = cls.CreateNew(owner, referentEntity, True)
return result
@classmethod
def RecalculateFinancialCard(cls, owner, referentEntity, _isAutoUpdate=False):
fcard = cls.gql("WHERE Owner =:o AND ReferentEntity =:re", o=owner, re=referentEntity)
return fcard.Recalculate(_isAutoUpdate=_isAutoUpdate)
#----------------------
class TransactionVerificationRequest(db.Model):
''' Object that is stored when a mail is sent to a person that need to verify some transaction'''
UrlCode = db.StringProperty(required=True)
Sender = db.ReferenceProperty(reference_class=Person, collection_name='sender_transaction_verification_requests')
Reciever = db.ReferenceProperty(reference_class=Person, collection_name='reciever_transaction_verification_requests')
Transaction = db.ReferenceProperty(Transaction, collection_name='transaction_transaction_verification_requests')
SendDate = db.DateTimeProperty()
@classmethod
def CreateNew(cls, transaction, _autoInsert=False):
# @type cls TransactionVerificationRequest
url_code = transaction.key().__str__()
result = cls(
UrlCode=url_code,
Sender=transaction.OwnerAccount.Owner,
Reciever=transaction.ReferentAccount.Owner,
Transaction=transaction,
SendDate=dt.datetime.now()
)
if _autoInsert:
result.put()
return result
| [
"costa.halicea@gmail.com"
] | costa.halicea@gmail.com |
9e5a35985bca4d00ccfa7344e42777168701d67c | 20144ae2d9fbe8c6aa67c53aa0e93f3027d870a5 | /movies_correlation.py | 24818022cf1602b82ff1ca9725741dd7ecef44d9 | [] | no_license | scc23/data-science-movies | 989d28974cbf55852a1877cead3041b4b6665b56 | a59dbcee0b6857e7abbf592d5a562a6194e61390 | refs/heads/master | 2020-07-02T11:49:57.343484 | 2019-08-09T18:20:04 | 2019-08-09T18:20:04 | 201,518,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,129 | py | # Question:
# Do the various criteria for success (critic reviews, audience reviews, profit/loss) correlate with each other?
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import re
from scipy import stats
def main():
# Read JSON files into Pandas DataFrames
omdb_filename = "./movies/data/omdb-data.json.gz"
rotten_filename = "./movies/data/rotten-tomatoes.json.gz"
wikidata_filename = "./movies/data/wikidata-movies.json.gz"
omdb = pd.read_json(omdb_filename, lines=True)
rotten = pd.read_json(rotten_filename, lines=True)
wikidata = pd.read_json(wikidata_filename, lines=True)
# Clean data by extracting only the necessary columns.
# print(omdb.columns.values)
omdb = omdb[
['imdb_id',
'omdb_awards' # awards won: text describing awards won by the movie
]
]
omdb = omdb.sort_values(by=['imdb_id'])
omdb = omdb.set_index('imdb_id')
# print(rotten.columns.values)
rotten = rotten[
['imdb_id',
'audience_ratings', # audience ratings: the count of audience reviews
'audience_average', # audience average rating (out of 5)
'audience_percent', # audience percent who "liked it" (out of 100)
'critic_average', # critic average rating (out of 10)
'critic_percent' # critic percent who gave a positive review (out of 100)
]
]
rotten = rotten.sort_values(by=['imdb_id'])
rotten = rotten.set_index('imdb_id')
# print(wikidata.columns.values)
wikidata = wikidata[
['imdb_id',
'made_profit' # made profit? Boolean calculated from 'cost' and 'box office'
]
]
wikidata = wikidata.sort_values(by=['imdb_id'])
wikidata = wikidata.set_index('imdb_id')
# Join the DataFrames by index (imdb_id)
movies = omdb.join(rotten).join(wikidata)
movies['critic_average'] = movies['critic_average'].apply(lambda x: x / 2)
# movies['audience_reviews'] = movies[['audience_average', 'audience_percent']].apply(tuple, axis=1) # tuple of [audience_average, audience_percent]
# movies['critic_reviews'] = movies[['critic_average', 'critic_percent']].apply(tuple, axis=1) # tuple of [critic_average, critic_percent]
# movies = movies.drop(columns=['audience_ratings', 'audience_average', 'audience_percent', 'critic_average', 'critic_percent'])
# critic_reviews_vs_audience_reviews = movies[['critic_reviews', 'audience_reviews']]
# Remove rows with NaN values
# critic_reviews_vs_audience_reviews = critic_reviews_vs_audience_reviews[~critic_reviews_vs_audience_reviews.critic_reviews.apply(lambda x: np.isnan(x[0]) & np.isnan(x[1]))]
# critic_reviews_vs_audience_reviews = critic_reviews_vs_audience_reviews[~critic_reviews_vs_audience_reviews.audience_reviews.apply(lambda x: np.isnan(x[0]) & np.isnan(x[1]))]
# critic_review_vs_profit = movies[['critic_reviews', 'audience_reviews']]
# audience_reviews_vs_profit = movies[['audience_reviews', 'made_profit']]
# Compute the correlation coefficients
critic_percent_vs_audience_reviews = movies[['critic_percent', 'audience_percent']].dropna()
critic_percent_vs_made_profit = movies[['critic_percent', 'made_profit']].dropna()
audience_percent_vs_made_profit = movies[['audience_percent', 'made_profit']].dropna()
critic_reviews_vs_audience_reviews = stats.linregress(critic_percent_vs_audience_reviews['critic_percent'], critic_percent_vs_audience_reviews['audience_percent']).rvalue
critic_reviews_vs_profit = stats.linregress(critic_percent_vs_made_profit['critic_percent'], critic_percent_vs_made_profit['made_profit']).rvalue
audience_reviews_vs_profit = stats.linregress(audience_percent_vs_made_profit['audience_percent'], audience_percent_vs_made_profit['made_profit']).rvalue
print("Correlation coefficient between critic reviews and audience reviews: {}".format(critic_reviews_vs_audience_reviews))
print("Correlation coefficient between critic reviews and profit/loss: {}".format(critic_reviews_vs_profit))
print("Correlation coefficient between audience reviews and profit/loss: {}".format(audience_reviews_vs_profit))
# Plot the data
plt.title('Critic Reviews VS Audience Reviews')
plt.xlabel('Critic Reviews')
plt.ylabel('Audience Reviews')
plt.plot(critic_percent_vs_audience_reviews['critic_percent'], critic_percent_vs_audience_reviews['audience_percent'], 'b.')
# plt.plot(critic_percent_vs_made_profit['critic_percent'], critic_percent_vs_made_profit['made_profit'], 'r.')
# plt.plot(audience_percent_vs_made_profit['audience_percent'], audience_percent_vs_made_profit['made_profit'], 'g.')
plt.show()
# By looking at the correlation coefficients, and plotting the data, we can observe that there is a strong correlation between the critic reviews and audience reviews.
# However, the other criteria for success such as critic reviews, audience reviews, and profit/loss do not correlate as much.
if __name__ == "__main__":
main()
| [
"sherman_chow@sfu.ca"
] | sherman_chow@sfu.ca |
1c930c629d264c1b02af2492b5b962be70f570d9 | d6589ff7cf647af56938a9598f9e2e674c0ae6b5 | /nlp-automl-20191111/setup.py | 7541638ca97742a773686465bb0c04174993e7bc | [
"Apache-2.0"
] | permissive | hazho/alibabacloud-python-sdk | 55028a0605b1509941269867a043f8408fa8c296 | cddd32154bb8c12e50772fec55429a9a97f3efd9 | refs/heads/master | 2023-07-01T17:51:57.893326 | 2021-08-02T08:55:22 | 2021-08-02T08:55:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,647 | py | # -*- coding: utf-8 -*-
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import os
from setuptools import setup, find_packages
"""
setup module for alibabacloud_nlp-automl20191111.
Created on 30/12/2020
@author: Alibaba Cloud SDK
"""
PACKAGE = "alibabacloud_nlp_automl20191111"
NAME = "alibabacloud_nlp-automl20191111" or "alibabacloud-package"
DESCRIPTION = "Alibaba Cloud nlp-automl (20191111) SDK Library for Python"
AUTHOR = "Alibaba Cloud SDK"
AUTHOR_EMAIL = "sdk-team@alibabacloud.com"
URL = "https://github.com/aliyun/alibabacloud-python-sdk"
VERSION = __import__(PACKAGE).__version__
REQUIRES = [
"alibabacloud_tea_util>=0.3.1, <1.0.0",
"alibabacloud_tea_openapi>=0.1.0, <1.0.0",
"alibabacloud_openapi_util>=0.0.3, <1.0.0",
"alibabacloud_endpoint_util>=0.0.3, <1.0.0"
]
LONG_DESCRIPTION = ''
if os.path.exists('./README.md'):
with open("README.md", encoding='utf-8') as fp:
LONG_DESCRIPTION = fp.read()
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license="Apache License 2.0",
url=URL,
keywords=["alibabacloud","nlp","automl20191111"],
packages=find_packages(exclude=["tests*"]),
include_package_data=True,
platforms="any",
install_requires=REQUIRES,
python_requires=">=3.6",
classifiers=(
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
"Topic :: Software Development"
)
)
| [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
a09d630b0a1ce6cb0085ac88aa8b844f88f05b60 | 97ff998f266f4a504df90d90a38b982a8a7b2f16 | /model.py | 37cc68751b628daa679b3c5efe4829b390aec0b2 | [] | no_license | saidul-islam98/Fruits-Rotten-or-Fresh-Classification | fe2f8809d4f97a29e77a47c719c70d74a14390b1 | e7795c4226b9222e8766024f9075f82560a8a5b0 | refs/heads/main | 2023-04-04T06:12:18.947280 | 2021-04-15T09:34:03 | 2021-04-15T09:34:03 | 357,995,241 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,173 | py | import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Dropout
from tensorflow.keras.utils import plot_model
def model_definition():
model = Sequential()
model.add(Conv2D(64, (3,3), activation='relu', input_shape=(228, 228, 3)))
model.add(MaxPooling2D(2, 2))
# The second convolution
model.add(Conv2D(128, (3,3), activation='relu'))
model.add(MaxPooling2D(2,2))
# The third convolution
model.add(Conv2D(256, (3,3), activation='relu'))
model.add(MaxPooling2D(2,2))
# The fourth convolution
model.add(Conv2D(512, (3,3), activation='relu'))
model.add(MaxPooling2D(2,2))
# Flatten the results to feed into a DNN
model.add(Conv2D(512, (3,3), activation='relu'))
model.add(MaxPooling2D(2,2))
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dropout(0.5))
model.add(Dense(512, activation='relu'))
model.add(Dense(64, activation="relu"))
model.add(Dense(6, activation='softmax'))
return model
def model_summary(model):
model.summary()
# def model_plot(model):
# plot_model(model) | [
"saidulislam143.si@gmail.com"
] | saidulislam143.si@gmail.com |
c4039809bdf3379983925eac6c165225ee0d6921 | ce3901b7a4a7b05106f125eab75e3a22ae3ab506 | /app/app/tests.py | 4ed7e8fb1d9b08a232c0fadccef82f5ca1be3ebc | [
"MIT"
] | permissive | skancoder/Django-Recipe-RESTAPI | f4d5b56c92239d46a1bfe3d8d5220fac3917f415 | d19618424575c2db2a46d7771eabac4d9fb3d5c5 | refs/heads/master | 2023-01-03T14:22:14.579315 | 2020-05-18T09:09:18 | 2020-05-18T09:09:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 568 | py | #Django looks for any folder or file that begins with test for unit testing
#for testing: all the files,folders,test function must begin with 'test'
from django.test import TestCase
from app.calculator import add,subtract
class CalcTests(TestCase):
#test function must begin with name 'test'
def test_add_numbers(self):
"""Test that two numbers are added together"""
self.assertEqual(add(3,8),11)
def test_subtract_numbers(self):
"""Test that values are subtracted and returned"""
self.assertEqual(subtract(5,11),6) | [
"kandagatlas@gmail.com"
] | kandagatlas@gmail.com |
48692f6bb82436458dcda51926e85f92d86ed1ad | 589b5eedb71d83c15d44fedf60c8075542324370 | /project/stock_project/alpha_model/alpha_factor/ARAPIncomeTTM.py | 605a216d31fef56f1a88b39a3a9a2b23dfa799dd | [] | no_license | rlcjj/quant | 4c2be8a8686679ceb675660cb37fad554230e0d4 | c07e8f0f6e1580ae29c78c1998a53774a15a67e1 | refs/heads/master | 2020-03-31T07:15:48.111511 | 2018-08-27T05:29:00 | 2018-08-27T05:29:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,685 | py | import pandas as pd
from quant.stock.stock import Stock
from quant.stock.date import Date
from quant.stock.stock_factor_operate import StockFactorOperate
def ARAPIncomeTTM(beg_date, end_date):
"""
因子说明:(预收账款 + 应付账款) / 营业总收入 TTM
最近一期财报 实时更新
若有一个为负值 结果为负值
"""
# param
#################################################################################
factor_name = 'ARAPIncomeTTM'
ipo_num = 90
# read data
#################################################################################
income = Stock().get_factor_h5("OperatingIncome", None, "primary_mfc")
advance = Stock().get_factor_h5("AdvanceReceipts", None, "primary_mfc")
payable = Stock().get_factor_h5("AccountsPayable", None, "primary_mfc")
# data precessing
#################################################################################
[advance, payable, income] = Stock().make_same_index_columns([advance, payable, income])
add = advance.add(payable)
ratio = add.div(income)
ratio = StockFactorOperate().change_quarter_to_daily_with_report_date(ratio, beg_date, end_date)
res = ratio.T.dropna(how='all').T
# save data
#############################################################################
Stock().write_factor_h5(res, factor_name, "alpha_dfc")
return res
#############################################################################
if __name__ == '__main__':
from datetime import datetime
beg_date = '2004-01-01'
end_date = datetime.today()
data = ARAPIncomeTTM(beg_date, end_date)
print(data)
| [
"1119332482@qq.com"
] | 1119332482@qq.com |
d1744ebbc97977eaa70c4613205308c9689750e2 | 28051ef0461a8928917e42917cf9517838d02b11 | /morfo_analyzer_3.py | 8d2a2059849b995da42967055c8c50fac710231e | [] | no_license | decourtenay/polimorfologik | d4ef34f21b6ecf6329cfe3c47e8c57d0383869eb | 3f2398b7f26de56949ece3a056108716d7027b58 | refs/heads/master | 2023-02-25T02:05:33.791397 | 2021-02-03T21:29:31 | 2021-02-03T21:29:31 | 335,761,492 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,563 | py | import re
import csv
polimorfologik = 'polimorfologik-2.1.txt'
input_file = 'input.txt'
output_file = 'output.txt'
def get_input(phrase):
entry = phrase
entry_list = entry.split()
return entry_list
def find_pp(phrase):
list_of_words = get_input(phrase)
prepositions = ['o','od','ode','na','nad','nade','do','przy','przed','przede','po','pod','za','zza','ze','w','we','obok','około','koło','bez','beze','dla','poza','u']
first_word = {'no.': '1', 'form': list_of_words[0], 'pos': ''}
if len(list_of_words) > 1:
second_word = {'no.': '2', 'form': list_of_words[1], 'pos': ''}
else:
second_word = {'no.': '2', 'form': '', 'pos': ''}
if len(list_of_words) > 2:
third_word = {'no.': '3', 'form': list_of_words[2], 'pos': ''}
else:
third_word = {'no.': '3', 'form': '', 'pos': ''}
if list_of_words[0] in prepositions:
first_word['pos'] = 'prep'
if len(list_of_words) > 1:
second_word['pos'] = 'prep_comp'
elif len(list_of_words) > 1:
if list_of_words[1] in prepositions:
second_word['pos'] = 'prep'
if len(list_of_words) > 2:
third_word['pos'] = 'prep_comp'
list_of_words_pos = [first_word, second_word, third_word]
print(first_word)
print(second_word)
print(third_word)
return list_of_words_pos
def find_head(phrase):
list_of_words_pos = find_pp(phrase)
for word in list_of_words_pos:
if word['pos'] not in ['prep', 'prep_comp']:
word_search_string = r".*;" + (re.escape(word['form'])) + r";.*"
f1 = open(polimorfologik, 'r', encoding='utf-8')
word_features = re.findall(word_search_string, f1.read())
print(word_features)
for feature_set in word_features:
feature_set_analysis = re.search(r"(.*?);(.*?);([a-z]*):?(..)?:?([a-zA-Z.]*)?:?([a-z0-9]*)?.*?", feature_set)
if feature_set_analysis.group(3) == 'subst':
if feature_set_analysis.group(1) == feature_set_analysis.group(2):
word['pos'] = 'head_noun'
word['number'] = feature_set_analysis.group(4)
word['gender'] = feature_set_analysis.group(6)
print(word)
elif feature_set_analysis.group(3) == 'adj':
word['form'] = feature_set_analysis.group(1)
word['pos'] = 'adj_agreeing'
word['number'] = feature_set_analysis.group(4)
word['gender'] = feature_set_analysis.group(6)
else:
word['pos'] = 'compl'
word['number'] = feature_set_analysis.group(4)
word['gender'] = feature_set_analysis.group(6)
print(list_of_words_pos)
return list_of_words_pos
def agreement(phrase):
list_of_words_pos = find_head(phrase)
for word in list_of_words_pos:
if word['pos'] == 'head_noun':
gender = word['gender']
number = word['number']
for adj in list_of_words_pos:
if adj['pos'] == 'adj_agreeing':
adj['gender'] = gender
adj['number'] = number
return list_of_words_pos
def find_forms(phrase):
list_of_words_pos = agreement(phrase)
plural_words = []
print(list_of_words_pos)
for word in list_of_words_pos:
forms_search_string = r"\b" + (re.escape(word['form'])) + r";.*"
if word['pos'] in ['prep', 'prep_comp', '', 'compl']:
word['number'] = 'sg'
for case in ['nom','gen','dat','acc','inst','loc','voc']:
word[case] = word['form']
word_pl = dict(word)
word_pl['number'] = 'pl'
plural_words.append(word_pl)
elif word['pos'] == 'head_noun':
f1 = open(polimorfologik, 'r', encoding='utf-8')
case_forms = re.findall(forms_search_string,f1.read())
for case in ['nom','gen','dat','acc','inst','loc','voc']:
word[case] = word['form']
case_form_search_string = r".*?;(.*?);subst:.*?" + word['number'] + r":.*?" + case + r":.*"
for form in case_forms:
case_form = re.search(case_form_search_string,form)
if case_form is not None:
word[case] = case_form.group(1)
if word['number'] == 'sg':
word_pl = {}
word_pl['no.'] = word['no.']
word_pl['form'] = word['form']
word_pl['nom'] = ''
else:
word_pl = dict(word)
word_pl['number'] = 'pl'
word = {}
word['no.'] = word_pl['no.']
word['form'] = word_pl['form']
word['nom'] = ''
if word_pl['nom'] == '':
for case in ['nom', 'gen', 'dat', 'acc', 'inst', 'loc', 'voc']:
word_pl[case] = word['form']
case_form_search_string = r".*?;(.*?);subst:.*?pl:" + case + r":.*"
for form in case_forms:
case_form = re.search(case_form_search_string, form)
if case_form is not None:
word_pl[case] = case_form.group(1)
elif word['nom'] == '':
for case in ['nom', 'gen', 'dat', 'acc', 'inst', 'loc', 'voc']:
word[case] = word['form']
case_form_search_string = r".*?;(.*?);subst:sg:" + case + r":.*"
for form in case_forms:
case_form = re.search(case_form_search_string, form)
if case_form is not None:
word[case] = case_form.group(1)
word_pl['pos'] = 'head_noun'
word['pos'] = 'head_noun'
word_pl['number'] = 'pl'
word['number'] = 'sg'
plural_words.append(word_pl)
elif word['pos'] == 'adj_agreeing':
word['number'] = 'sg'
word_pl = {}
word_pl['number'] = 'pl'
word_pl['gender'] = word['gender']
word_pl['no.'] = word['no.']
f1 = open(polimorfologik, 'r', encoding='utf-8')
case_forms = re.findall(forms_search_string,f1.read())
for case in ['nom','gen','dat','acc','inst','loc','voc']:
word[case] = word['form']
case_form_search_string = r".*?;(.*?);.*?adj:sg:.*?" + case + r".*?" + word['gender'] + r".*?:pos"
print(case_form_search_string)
for form in case_forms:
case_form = re.search(case_form_search_string,form)
if case_form is not None:
word[case] = case_form.group(1)
for case in ['nom','gen','dat','acc','inst','loc','voc']:
word_pl[case] = word['form']
case_form_search_string = r".*?;(.*?);.*?adj:pl:[a-z.]*?" + case + r"[a-z.]*?:[a-z0-9.]*?" + word['gender'] + r"[a-z0-9.]*?:pos"
# .* ?;(.*?);.*?adj:pl:[a-z.]*?) nom [a-z.]*?:[a-z0-9.]*? f [a-z0-9.]*?:pos
# case_form_search_string = r".*?;(.*?);.*?adj:pl:.*?" + case + r".*?" + word['gender'] + r".*?:pos"
for form in case_forms:
case_form = re.search(case_form_search_string,form)
if case_form is not None:
word_pl[case] = case_form.group(1)
plural_words.append(word_pl)
print(list_of_words_pos)
print(plural_words)
final_forms_sg = {}
final_forms_pl = {}
first_sg = list_of_words_pos[0]
second_sg = list_of_words_pos[1]
third_sg = list_of_words_pos[2]
first_pl = plural_words[0]
second_pl = plural_words[1]
third_pl = plural_words[2]
for case in ['nom', 'gen', 'dat', 'acc', 'inst', 'loc', 'voc']:
if second_sg[case] == '':
final_forms_sg[case] = first_sg[case]
elif third_sg[case] == '':
final_forms_sg[case] = first_sg[case] + ' ' + second_sg[case]
else:
final_forms_sg[case] = first_sg[case] + ' ' + second_sg[case] + ' ' + third_sg[case]
for case in ['nom', 'gen', 'dat', 'acc', 'inst', 'loc', 'voc']:
if second_pl[case] == '':
final_forms_pl[case] = first_pl[case]
elif third_pl[case] == '':
final_forms_pl[case] = first_pl[case] + ' ' + second_pl[case]
else:
final_forms_pl[case] = first_pl[case] + ' ' + second_pl[case] + ' ' + third_pl[case]
print(final_forms_sg)
print(final_forms_pl)
final_forms_sg_str = final_forms_sg['nom'] + ';' + 'singular' + ';' + final_forms_sg[
'nom'] + ';' + final_forms_sg['gen'] + ';' + final_forms_sg['dat'] + ';' + final_forms_sg['acc'] + ';' + \
final_forms_sg['inst'] + ';' + final_forms_sg['loc'] + ';' + final_forms_sg['voc'] + '\n'
final_forms_pl_str = final_forms_sg['nom'] + ';' + 'plural' + ';' + final_forms_pl[
'nom'] + ';' + final_forms_pl['gen'] + ';' + final_forms_pl['dat'] + ';' + final_forms_pl['acc'] + ';' + \
final_forms_pl['inst'] + ';' + final_forms_pl['loc'] + ';' + final_forms_pl['voc'] + '\n'
with open(output_file, 'a', encoding='utf-8') as output_data:
output_data.write(final_forms_sg_str)
output_data.write(final_forms_pl_str)
def morpho_analyzer():
processed_list = []
with open(input_file, 'r+', encoding='utf-8') as input_data:
for phrase in input_data.readlines():
if phrase not in processed_list:
find_forms(phrase)
morpho_analyzer()
# TODO
# git save??
# ensure that acc = nom for not m1 in plural
# ensure that forms are provided when not in dictionary (=starting form
# cleanup find_forms for head_noun
# światowy;światowe;adj:pl:acc:m2.m3.f.n1.n2.p2.p3:pos+adj:pl:nom.voc:m2.m3.f.n1.n2.p2.p3:pos+adj:sg:acc:n1.n2:pos+adj:sg:nom.voc:n1.n2:pos
# światowa organizacja zdrowia
# problem with regex in line 49
# światowy;światowa;adj:sg:nom.voc:f:pos
# INSTRUMENTAL IN ADJ AGREEING GIVES A KEY ERROR, check "mały" sg. instrumental
#
# mały;małym;adj:pl:dat:m1.m2.m3.f.n1.n2.p1.p2.p3:pos+adj:sg:inst:m1.m2.m3.n1.n2:pos+adj:sg:loc:m1.m2.m3.n1.n2:pos+subst:pl:dat:m1+subst:sg:inst:m1+subst:sg:loc:m1
#
# może wtedy defoltować do nominativu line 143??
| [
"nowakpm@gmail.com"
] | nowakpm@gmail.com |
9dffdb97ad4a5e34ac9104a9dd6f11c0cc395e5c | e694047b179ffb6af741a5652b83a39e45224afc | /ormQueries/store/models.py | 130e4e00b6aac2fac7a630989c32af935684ec6a | [] | no_license | LiorA1/Django | 976cec553038cb0a411b2cee8e509f64e16947ad | 17dfba26057868a1bee12d39399dea393e95726d | refs/heads/main | 2023-06-21T20:52:31.431643 | 2021-07-18T18:22:27 | 2021-07-18T18:22:27 | 345,341,097 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,564 | py | from django.db import models
from datetime import datetime
# Create your models here.
# This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# * Make sure each ForeignKey and OneToOneField has `on_delete` set to the desired behavior
# * Remove `managed = False` lines if you wish to allow Django to create, modify, and delete the table
# Feel free to rename the models, but don't rename db_table values or field names.
class Contact(models.Model):
id = models.IntegerField(primary_key=True)
last_name = models.CharField(max_length=15)
first_name = models.CharField(max_length=15)
title = models.CharField(max_length=2)
street = models.CharField(max_length=30)
city = models.CharField(max_length=20)
state = models.CharField(max_length=2)
zip_code = models.CharField(max_length=5)
phone = models.CharField(max_length=10)
fax = models.CharField(max_length=10)
class Meta:
# managed = False
db_table = 'contact'
class Customer(models.Model):
id = models.IntegerField(primary_key=True)
fname = models.CharField(max_length=15)
lname = models.CharField(max_length=20)
address = models.CharField(max_length=35)
city = models.CharField(max_length=20)
state = models.CharField(max_length=2)
zip_code = models.CharField(max_length=10)
phone = models.CharField(max_length=12)
company_name = models.CharField(max_length=35)
class Meta:
# managed = False
db_table = 'customer'
class Employee(models.Model):
emp_id = models.IntegerField(primary_key=True)
manager = models.ForeignKey("Employee", on_delete=models.PROTECT, null=True) # ForeignKey
emp_fname = models.CharField(max_length=20)
emp_lname = models.CharField(max_length=20)
# dept_id = models.OneToOneField("Department", on_delete=models.PROTECT) ## ForeignKey
street = models.CharField(max_length=40)
city = models.CharField(max_length=20)
state = models.CharField(max_length=4)
zip_code = models.CharField(max_length=9)
phone = models.CharField(max_length=10)
status = models.CharField(max_length=1)
ss_number = models.CharField(max_length=11)
salary = models.FloatField(max_length=20) # This field type is a guess.
start_date = models.DateField(default=datetime.now)
termination_date = models.DateField(null=True)
birth_date = models.DateField(default=datetime.now)
bene_health_ins = models.CharField(max_length=1)
bene_life_ins = models.CharField(max_length=1)
bene_day_care = models.CharField(max_length=1)
sex = models.CharField(max_length=1)
class Meta:
# managed = False
db_table = 'employee'
class EmpToDep(models.Model):
departmentKey = models.ForeignKey("Department", on_delete=models.PROTECT) ## foreignkey
employeeKey = models.ForeignKey("Employee", on_delete=models.PROTECT) ## foreignkey
start_date = models.DateField(default=datetime.now)
termination_date = models.DateField(null=True)
ismanager = models.BooleanField(default=False)
class Department(models.Model):
dept_id = models.IntegerField(primary_key=True)
dept_name = models.CharField(max_length=40)
# dept_head_id = models.OneToOneField(Employee, on_delete=models.PROTECT) # ForeignKey
class Meta:
# managed = False
db_table = 'department'
class FinCode(models.Model):
code = models.CharField(primary_key=True, max_length=2)
typeOf = models.CharField(max_length=10)
description = models.CharField(max_length=50)
class Meta:
# managed = False
db_table = 'fin_code'
class FinData(models.Model):
year = models.CharField(max_length=4)
quarter = models.CharField(max_length=2)
code = models.ForeignKey(FinCode, on_delete=models.PROTECT)
amount = models.FloatField(max_length=9) # This field type is a guess.
class Meta:
# managed = False
constraints = [models.UniqueConstraint(fields=['year', 'quarter', 'code'], name='fin_data_pk')]
db_table = 'fin_data'
class Product(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=15)
description = models.CharField(max_length=30)
size = models.CharField(max_length=18)
color = models.CharField(max_length=6)
quantity = models.IntegerField()
unit_price = models.FloatField(max_length=15) # This field type is a guess.
class Meta:
# managed = False
db_table = 'product'
class SalesOrder(models.Model):
id = models.IntegerField(primary_key=True)
customer = models.ForeignKey(Customer, on_delete=models.PROTECT)
order_date = models.DateField()
fin_code = models.ForeignKey(FinCode, on_delete=models.PROTECT)
region = models.CharField(max_length=7)
sales_rep = models.ForeignKey(Employee, on_delete=models.PROTECT)
class Meta:
# managed = False
db_table = 'sales_order'
class SalesOrderItems(models.Model):
sale_order = models.ForeignKey(SalesOrder, on_delete=models.PROTECT)
line_id = models.IntegerField()
product = models.ForeignKey(Product, on_delete=models.PROTECT)
quantity = models.IntegerField()
ship_date = models.DateField()
class Meta:
# managed = False
constraints = [models.UniqueConstraint(fields=['sale_order', 'line_id'], name='sales_order_items_pk')]
db_table = 'sales_order_items'
| [
"liorbm1@gmail.com"
] | liorbm1@gmail.com |
77082cacfa0573b473b851f43f273315feb2e7e5 | 40b64cc2005de68b691665fcb7049f5e5f6ec273 | /setup.py | d13a00b4913c83b37465dee667cfe4eb1c63f2bd | [
"MIT"
] | permissive | bnmrrs/django-twilio-utils | abf251d420891c347e248021ff8b5f75228d9dda | 99ebc10ffbe2bf224833952248078d890eb5b914 | refs/heads/master | 2021-01-16T00:16:45.926536 | 2010-09-04T14:25:03 | 2010-09-04T14:25:03 | 846,211 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,757 | py | #!/usr/bin/env python
#
# The MIT License
#
# Copyright (c) 2010 Ben Morris
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import distutils.core
import sys
# Importing setuptools adds some features like "setup.py develop", but
# it's optional so swallow the error if it's not there.
try:
import setuptools
except ImportError:
pass
distutils.core.setup(
name="django-twilio-utils",
version="0.1.1",
packages = ["django_twilio_utils"],
author="Ben Morris",
author_email="ben@bnmrrs.com",
url="http://www.github.com/bnmrrs/django-twilio-utils",
license="http://www.opensource.org/licenses/mit-license.php",
description="A set of utilities for interacting with Twilio from within Django",
)
| [
"ben@bnmrrs.com"
] | ben@bnmrrs.com |
7814cce7e5c9092e9c732fb61fdfb4a542218980 | 507a94242c44a8bbb09a9f1529f0cf1329a01bb5 | /NLO/Ppp/fchn6C.py | 173d64a2fb786e65738f4ff41514404fe917e957 | [] | no_license | JeffersonLab/BigTMD | 92661894ccc59e8cb164a773f830ddc03d75bae2 | 6e97635d21a63b7975b2e7f5891edc0c35c4dc0c | refs/heads/master | 2021-07-15T01:15:45.367325 | 2020-05-18T01:01:38 | 2020-05-18T01:01:38 | 152,819,316 | 4 | 4 | null | null | null | null | UTF-8 | Python | false | false | 9,532 | py | #!/usr/bin/env python
import numpy as np
from mpmath import fp
from numba import jit
import numpy as np
EulerGamma=np.euler_gamma
@jit(cache=True)
def _PolyLOG(s, z):
tol = 1e-10
l = 0
k = 1
zk = z
while 1:
term = zk / k**s
l += term
if abs(term) < tol:
break
zk *= z
k += 1
return l
@jit(cache=True)
def PolyLOG(s, z):
#return fp.polylog(s,z)
#if abs(z) > 0.75:
# return -PolyLOG(s,1-z) + np.pi**2/6 - np.log(z)*np.log(1-z)
if abs(z) >1:
return -PolyLOG(s, 1/z) - np.pi**2/6 - 0.5*np.log(-z)**2
return _PolyLOG(s, z)
@jit(cache=True)
def regular(g=None,gp=None,s=None,t=None,Q=None,s23=None,mu=None,nf=None):
return 0.0208333333333333*(2*Q**4*s - 8*Q**4*s23 + 8*Q**4*t + 3*Q**2*s**2 - 16*Q**2*s*s23 + 17*Q**2*s*t + 12*Q**2*s23**2 - 28*Q**2*s23*t + 16*Q**2*t**2 + s**3 - 6*s**2*s23 + 7*s**2*t + 6*s*s23**2 - 18*s*s23*t + 12*s*t**2 + 6*s23**2*t - 12*s23*t**2 + 6*t**3)*np.log((2*Q**2 + s + t + np.sqrt(4*Q**2*s23 + s**2 + 2*s*t + t**2))/(2*Q**2 + s + t - np.sqrt(4*Q**2*s23 + s**2 + 2*s*t + t**2)))/(np.pi**5*t*(2*Q**2 + s + t)*np.sqrt(4*Q**2*s23 + s**2 + 2*s*t + t**2)) + 0.000868055555555556*(s23 - t)*(-(24*Q**4*s23 + 12*Q**2*(s*(2*s23 + t) - 2*s23**2 + 2*s23*t + t**2) + 12*t*(s + t)*(s - s23 + t))*np.log((2*Q**2 + s + t - np.sqrt(4*Q**2*s23 + (s + t)**2))/(2*Q**2 + s + t + np.sqrt(4*Q**2*s23 + (s + t)**2)))/((-s23 + t)*(4*Q**2*s23 + (s + t)**2)) + (-24*Q**2*s23 - 12*s*(s23 + t) - 12*t*(-s23 + t))/((-s23 + t)*np.sqrt(4*Q**2*s23 + (s + t)**2)))*(4*Q**4*s - 8*Q**4*s23 + 8*Q**4*t + 2*Q**2*s**2 - 4*Q**2*s*s23 + 10*Q**2*s*t - 12*Q**2*s23*t + 12*Q**2*t**2 + 2*s**2*t - 4*s*s23*t + 6*s*t**2 - 4*s23*t**2 + 4*t**3)/(np.pi**5*t*(Q**2 + t)*(2*Q**2 + s + t)*np.sqrt(4*Q**2*s23 + s**2 + 2*s*t + t**2)) + 0.0833333333333333*(s23 - t)*(-Q**2 - s + s23 - t)/(np.pi**5*t**2) + 0.0208333333333333*(-2*np.log(mu) + np.log(s23) - 2*np.log(2) - np.log(np.pi) + EulerGamma)*(-2*Q**2*s23 + Q**2*t - 2*s*s23 + s*t + 2*s23**2 - 4*s23*t + 2*t**2)/(np.pi**5*t**2) + 0.0208333333333333*(2*np.log(mu) - np.log(s23) + np.log(Q**2*(s23 - t)**2/(t**2*(Q**2 + s - s23 + t))) - EulerGamma + np.log(np.pi) + 2*np.log(2))*(-4*Q**6*s23 + 6*Q**6*t - 10*Q**4*s*s23 + 13*Q**4*s*t + 16*Q**4*s23**2 - 38*Q**4*s23*t + 25*Q**4*t**2 - 4*Q**2*s**2*s23 + 5*Q**2*s**2*t + 8*Q**2*s*s23**2 - 30*Q**2*s*s23*t + 26*Q**2*s*t**2 + 24*Q**2*s23**2*t - 50*Q**2*s23*t**2 + 27*Q**2*t**3 - 4*s**2*s23*t + 5*s**2*t**2 + 8*s*s23**2*t - 20*s*s23*t**2 + 13*s*t**3 + 8*s23**2*t**2 - 16*s23*t**3 + 8*t**4)/(np.pi**5*t**2*(Q**2 + t)*(2*Q**2 + s + t)) + 0.0208333333333333*(Q**6*s23 + 2*Q**4*s*s23 - Q**4*s23**2 + 3*Q**4*s23*t - Q**4*t**2 + Q**2*s**2*s23 - Q**2*s*s23**2 + 4*Q**2*s*s23*t - Q**2*s*t**2 - Q**2*s23**2*t + 2*Q**2*s23*t**2 - Q**2*t**3 + s**2*s23*t - s*s23**2*t + 2*s*s23*t**2 - s*t**3)/(np.pi**5*t**2*(Q**2 + s)*(Q**2 + t)) + 0.333333333333333*(s23 - t)*(-(0.5*np.log(mu)/np.pi**5 - 0.25*np.log(s23)/np.pi**5 - 0.25*EulerGamma/np.pi**5 + 0.25*np.log(np.pi)/np.pi**5 + 0.5*np.log(2)/np.pi**5)*(2*Q**2*s23**2 - 2*Q**2*s23*t + s*s23*t - s*t**2 + s23*t**2 - t**3) - 0.25*(4*Q**2*s23**2 - 2*Q**2*t**2 + 4*s*s23*t - 2*s*t**2 + 2*s23*t**2 - 2*t**3 + (s23 - t)*(2*Q**2*s23 + t*(s + t))*np.log(Q**2*(s23 - t)**2/(t**2*(Q**2 + s - s23 + t))))/np.pi**5)*(-Q**2 - s + s23 - t)**2/(t**4*(Q**2 + s - s23 + t)**2) - 0.0416666666666667*(Q**2*s23 + s*t)*(-EulerGamma + 1 + np.log(4*np.pi))*(2*s23**2 - 2*s23*t + t**2)/(np.pi**5*t**4) - 0.00260416666666667*(s23 - t)**2*((-1 + (2*Q**2 + s + t)/np.sqrt(4*Q**2*s23 + (s + t)**2))*(1 + (2*Q**2 + s + t)/np.sqrt(4*Q**2*s23 + (s + t)**2))*(4*Q**2*s23 + (s + t)**2)**(-1.5)*(-(4*Q**2*s23 + (s + t)**2)*(2*Q**2 + s + t) + (2*Q**2*s23 + s*(s23 + t) + t*(-s23 + t))*(12*Q**4*s23 + 2*Q**2*(2*s23 + t)*(3*s - 2*s23 + 3*t) + (s + t)*(s*(s23 + 5*t) + 5*t*(-s23 + t)))/(s23 - t)**2)*np.log((2*Q**2 + s + t - np.sqrt(4*Q**2*s23 + (s + t)**2))/(2*Q**2 + s + t + np.sqrt(4*Q**2*s23 + (s + t)**2))) + 4 - 2*(2*Q**2 + s + t)**2/(4*Q**2*s23 + (s + t)**2) - (8*Q**2 + 4*s + 4*t)*(2*Q**2*s23 + s*(s23 + t) + t*(-s23 + t))/((s23 - t)*(4*Q**2*s23 + (s + t)**2)) + 2*(12*Q**4 + 4*Q**2*(3*s - 2*s23 + 3*t) + (s + t)**2)*(2*Q**2*s23 + s*(s23 + t) + t*(-s23 + t))**2/((s23 - t)**2*(4*Q**2*s23 + (s + t)**2)**2))/(np.pi**5*Q**2*(Q**2 + s - s23 + t)) - 0.000217013888888889*(s23 - t)*(24 - (48*Q**2 + 24*s + 24*t)*(2*Q**2*s23 + s*(s23 + t) + t*(-s23 + t))/((s23 - t)*(4*Q**2*s23 + (s + t)**2)) + 12*(4*Q**2*s23 + (s + t)**2)**(-1.5)*(2*Q**2 + s + t)**2*(2*Q**2*s23 + s*(s23 + t) + t*(-s23 + t))*np.log((2*Q**2 + s + t - np.sqrt(4*Q**2*s23 + (s + t)**2))/(2*Q**2 + s + t + np.sqrt(4*Q**2*s23 + (s + t)**2)))/(-s23 + t) - (24*Q**2*s23 + 12*s*(s23 + t) + 12*t*(-s23 + t))*np.log((2*Q**2 + s + t - np.sqrt(4*Q**2*s23 + (s + t)**2))/(2*Q**2 + s + t + np.sqrt(4*Q**2*s23 + (s + t)**2)))/((-s23 + t)*np.sqrt(4*Q**2*s23 + (s + t)**2)))*(6*Q**2 + 6*s - 8*s23 + 8*t)/(np.pi**5*Q**2*(Q**2 + s - s23 + t)) - 0.0104166666666667*(8*Q**2 + 8*s - 14*s23 + 14*t)/(np.pi**5*Q**2) + 0.333333333333333*(s23 - t)*((-6*Q**4*s23**2 + 4*Q**4*s23*t + 2*Q**2*t*(-2*s*s23 + s*t - s23*t + t**2))*(0.25*np.log(mu)/np.pi**5 - 0.125*np.log(s23)/np.pi**5 - 0.125*EulerGamma/np.pi**5 + 0.125*np.log(np.pi)/np.pi**5 + 0.25*np.log(2)/np.pi**5) + 0.125*(-16*Q**4*s23**2 + 2*Q**4*t**2 + 2*Q**2*t*(-8*s*s23 + s*t - 3*s23*t + t**2) + 2*Q**2*(Q**2*s23*(3*s23 - 2*t) - t*(s*(-2*s23 + t) + t*(-s23 + t)))*np.log(t**2*(Q**2 + s - s23 + t)/(Q**2*(s23 - t)**2)) - t**2*(s + t)**2)/np.pi**5)*(-Q**2 - s + s23 - t)**3/(Q**2*t**4*(Q**2 + s - s23 + t)**3)
@jit(cache=True)
def delta(g=None,gp=None,s=None,t=None,Q=None,s23=None,mu=None,B=None,nf=None):
return 0.111111111111111*(s23 - t)*(4*Q**2*s23 + (s + t)**2)*(Q**2 + s - 3*s23 + 3*t)*(12*EulerGamma*(0.03125*np.log(mu)/np.pi**5 + 0.015625*np.log(np.pi)/np.pi**5 + 0.03125*np.log(2)/np.pi**5) - 6*EulerGamma*(0.03125*np.log(mu)/np.pi**5 - 0.03125*EulerGamma/np.pi**5 + 0.015625*np.log(np.pi)/np.pi**5 + 0.03125*np.log(2)/np.pi**5) - (-0.1875*np.log(mu)/np.pi**5 - 0.1875*np.log(2)/np.pi**5 - 0.09375*np.log(np.pi)/np.pi**5 + 0.09375*EulerGamma/np.pi**5 + 0.03125*(-6*Q**2*s23 + 3*Q**2*(s23 - t)*np.log(t**2*(Q**2 + s - s23 + t)/(Q**2*(s23 - t)**2)) - 3*t*(s + t))/(np.pi**5*Q**2*(s23 - t)))*np.log(B) - 0.046875*np.log(B)**2/np.pi**5 - 0.1875*np.log(mu)**2/np.pi**5 - 12*(0.015625*np.log(np.pi)/np.pi**5 + 0.03125*np.log(2)/np.pi**5)*np.log(mu) - 0.1875*np.log(2)*np.log(np.pi)/np.pi**5 - 0.046875*(EulerGamma**2 + 0.166666666666667*np.pi**2)/np.pi**5 - 0.1875*np.log(2)**2/np.pi**5 - 0.046875*np.log(np.pi)**2/np.pi**5 - 0.09375*(-0.333333333333333*np.pi**2 + 2*EulerGamma**2)/np.pi**5 - 0.015625*(4*Q**2*s23 + (s + t)**2)*(2*Q**2*(6*(np.log(-2*t*(Q**2 + s - s23 + t)/(2*Q**2*s23 + s*(s23 + t) - (-s23 + t)*(-t + np.sqrt(4*Q**2*s23 + (s + t)**2)))) - 2)*np.log(2*t*(Q**2 + s - s23 + t)/((-s23 + t)*np.sqrt(4*Q**2*s23 + (s + t)**2))) + np.pi**2)*(Q**2 + s - s23 + t)/(4*Q**2*s23 + (s + t)**2) + 12*Q**2*(PolyLOG(2, (2*Q**2*s23 + s*(s23 + t) + (-s23 + t)*(t + np.sqrt(4*Q**2*s23 + (s + t)**2)))/((s23 - t)*(2*Q**2 + s + t - np.sqrt(4*Q**2*s23 + (s + t)**2)))) - PolyLOG(2, 2*t*(Q**2 + s - s23 + t)/((-s23 + t)*(2*Q**2 + s + t + np.sqrt(4*Q**2*s23 + (s + t)**2)))))*(Q**2 + s - s23 + t)/(4*Q**2*s23 + (s + t)**2) + 3*Q**2*(Q**2 + s - s23 + t)*np.log((2*Q**2 + s + t - np.sqrt(4*Q**2*s23 + (s + t)**2))/(2*Q**2 + s + t + np.sqrt(4*Q**2*s23 + (s + t)**2)))*np.log((2*Q**2 + s + t - np.sqrt(4*Q**2*s23 + (s + t)**2))*(2*Q**2 + s + t + np.sqrt(4*Q**2*s23 + (s + t)**2))**3/(4*Q**2*s23 + (s + t)**2)**2)/(4*Q**2*s23 + (s + t)**2) + (-12*Q**2*(Q**2 + s - s23 + t)*np.log(2*t*(Q**2 + s - s23 + t)/((-s23 + t)*np.sqrt(4*Q**2*s23 + (s + t)**2)))/(4*Q**2*s23 + (s + t)**2) - 3 + 3*(2*Q**2 + s + t)*(2*Q**2 + s + t + np.sqrt(4*Q**2*s23 + (s + t)**2))/(4*Q**2*s23 + (s + t)**2) - 3*(-2*Q**2*s23 - s*(s23 + t) + t*(s23 - t))/((-s23 + t)*np.sqrt(4*Q**2*s23 + (s + t)**2)))*np.log(-1 + (2*Q**2 + s + t)/np.sqrt(4*Q**2*s23 + (s + t)**2)) + 3*(4*Q**2*(Q**2 + s - s23 + t)*np.log(1 + (-2*Q**2*s23 - s*(s23 + t) + t*(s23 - t))/((-s23 + t)*np.sqrt(4*Q**2*s23 + (s + t)**2)))/(4*Q**2*s23 + (s + t)**2) - 1 + (2*Q**2 + s + t)*(2*Q**2 + s + t - np.sqrt(4*Q**2*s23 + (s + t)**2))/(4*Q**2*s23 + (s + t)**2) + (-2*Q**2*s23 - s*(s23 + t) + t*(s23 - t))/((-s23 + t)*np.sqrt(4*Q**2*s23 + (s + t)**2)))*np.log(1 + (2*Q**2 + s + t)/np.sqrt(4*Q**2*s23 + (s + t)**2)))/(np.pi**5*Q**2*(Q**2 + s - s23 + t)) + (-2*Q**2*s23 + Q**2*(s23 - t)*np.log(t**2*(Q**2 + s - s23 + t)/(Q**2*(s23 - t)**2)) - t*(s + t))*(0.1875*np.log(mu)/np.pi**5 - 0.09375*EulerGamma/np.pi**5 + 0.09375*np.log(np.pi)/np.pi**5 + 0.1875*np.log(2)/np.pi**5)/(Q**2*(s23 - t)))/(t**2*(Q**2 + 0.25*(s + t)**2/s23))
@jit(cache=True)
def plus1B(g=None,gp=None,s=None,t=None,Q=None,s23=None,mu=None,B=None,nf=None):
return 0.0833333333333333*s23*(s23 - t)*(Q**2 + s - 3*s23 + 3*t)*np.log(mu)/(np.pi**5*t**2) - 0.0416666666666667*s23*(s23 - t)*(Q**2 + s - 3*s23 + 3*t)*np.log(t**2*(Q**2 + s - s23 + t)/(Q**2*(s23 - t)**2))/(np.pi**5*t**2) + 0.0416666666666667*s23*(s23 - t)*(Q**2 + s - 3*s23 + 3*t)*np.log(np.pi)/(np.pi**5*t**2) + 0.0833333333333333*s23*(s23 - t)*(Q**2 + s - 3*s23 + 3*t)*np.log(2)/(np.pi**5*t**2) - 0.0416666666666667*s23*(Q**2*(s23*(-2 + EulerGamma) - EulerGamma*t) - t*(s + t))*(Q**2 + s - 3*s23 + 3*t)/(np.pi**5*Q**2*t**2)
@jit(cache=True)
def plus2B(g=None,gp=None,s=None,t=None,Q=None,s23=None,mu=None,B=None,nf=None):
return -0.0416666666666667*s23*(s23 - t)*(Q**2 + s - 3*s23 + 3*t)/(np.pi**5*t**2)
| [
"nobuosato@gmail.com"
] | nobuosato@gmail.com |
cf7aaa8675a60a7a5f79d429c7ca6db548e2bc01 | a5e60d6bdc5ffa932e9d2638ee56e88ed4c07fc7 | /df_websockets/tasks.py | dd4e7dfa2164adc5397a54f207d9030465a96516 | [
"LicenseRef-scancode-cecill-b-en"
] | permissive | webclinic017/df_websockets | c5d82afe90e475731e61186fa8df39897c64252e | f0d072c4d6f2ddfa8bf17dd408236553fc154731 | refs/heads/master | 2023-04-16T05:45:42.293330 | 2021-04-16T08:50:42 | 2021-04-16T08:50:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,116 | py | # ##############################################################################
# This file is part of df_websockets #
# #
# Copyright (C) 2020 Matthieu Gallet <github@19pouces.net> #
# All Rights Reserved #
# #
# You may use, distribute and modify this code under the #
# terms of the (BSD-like) CeCILL-B license. #
# #
# You should have received a copy of the CeCILL-B license with #
# this file. If not, please visit: #
# https://cecill.info/licences/Licence_CeCILL-B_V1-en.txt (English) #
# or https://cecill.info/licences/Licence_CeCILL-B_V1-fr.txt (French) #
# #
# ##############################################################################
"""Define Celery tasks and functions for calling signals
=====================================================
This module is automatically imported by Celery.
Use these functions for:
* setting websocket channels allowed for a given :class:`django.http.response.HttpResponse`,
* calling signals, with a full function (:meth:`df_websockets.tasks.call`) and a
shortcut (:meth:`df_websockets.tasks.trigger`)
"""
import json
import logging
import os
import uuid
from functools import lru_cache
from importlib import import_module
from asgiref.sync import async_to_sync
from celery import shared_task
from channels import DEFAULT_CHANNEL_LAYER
from channels.layers import get_channel_layer
from django.apps import apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.module_loading import import_string
from redis import ConnectionPool, StrictRedis
from df_websockets import ws_settings
from df_websockets.decorators import (
REGISTERED_FUNCTIONS,
REGISTERED_SIGNALS,
DynamicQueueName,
FunctionConnection,
SignalConnection,
)
from df_websockets.load import load_celery
from df_websockets.utils import valid_topic_name
from df_websockets.window_info import WindowInfo
logger = logging.getLogger("df_websockets.signals")
class Constant:
"""Allow to define constants that can be nicely printed to stdout"""
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
def __repr__(self):
return self.name
# special values for the "to" argument
SERVER = Constant("SERVER")
SESSION = Constant("SESSION")
WINDOW = Constant("WINDOW")
USER = Constant("USER")
BROADCAST = Constant("BROADCAST")
# special value for the "queue" argument
SYNC = Constant("SYNC")
_signal_encoder = import_string(ws_settings.WEBSOCKET_SIGNAL_ENCODER)
_topic_serializer = import_string(ws_settings.WEBSOCKET_TOPIC_SERIALIZER)
__values = {
"host": settings.WEBSOCKET_REDIS_CONNECTION.get("host", "localhost"),
"port": ":%s" % settings.WEBSOCKET_REDIS_CONNECTION["port"]
if settings.WEBSOCKET_REDIS_CONNECTION.get("port")
else "",
"db": settings.WEBSOCKET_REDIS_CONNECTION.get("db", 1),
"password": ":%s@" % settings.WEBSOCKET_REDIS_CONNECTION["password"]
if settings.WEBSOCKET_REDIS_CONNECTION.get("password")
else "",
}
redis_connection_pool = ConnectionPool.from_url(
"redis://%(password)s%(host)s%(port)s/%(db)s" % __values,
retry_on_timeout=True,
socket_keepalive=True,
)
def get_websocket_redis_connection():
"""Return a valid Redis connection, using a connection pool."""
return StrictRedis(
connection_pool=redis_connection_pool,
retry_on_timeout=True,
socket_keepalive=True,
)
def set_websocket_topics(request, *topics):
"""Use it in a Django view for setting websocket topics. Any signal sent to one of these topics will be received
by the client.
:param request: :class:`django.http.request.HttpRequest`
:param topics: list of topics that will be subscribed by the websocket (can be any Python object).
"""
# noinspection PyTypeChecker
if not hasattr(request, "window_key"):
raise ImproperlyConfigured("You should use the WebsocketMiddleware middleware")
token = request.window_key
request.has_websocket_topics = True
prefix = ws_settings.WEBSOCKET_REDIS_PREFIX
request = WindowInfo.from_request(request)
topic_strings = {_topic_serializer(request, x) for x in topics if x is not SERVER}
# noinspection PyUnresolvedReferences,PyTypeChecker
if getattr(request, "user", None) and request.user.is_authenticated:
topic_strings.add(_topic_serializer(request, USER))
topic_strings.add(_topic_serializer(request, WINDOW))
topic_strings.add(_topic_serializer(request, BROADCAST))
connection = get_websocket_redis_connection()
redis_key = "%s%s" % (prefix, token)
connection.delete(redis_key)
for topic in topic_strings:
if topic is not None:
connection.rpush(redis_key, prefix + topic)
connection.expire(redis_key, ws_settings.WEBSOCKET_REDIS_EXPIRE)
def trigger(window_info, signal_name, to=None, **kwargs):
"""Shortcut to :meth:`df_websockets.tasks.call`, allowing to directly pass arguments of the signal to this function.
Your signal cannot use `window_info`, `signal_name` and `to` as argument names.
These two successive calls are strictly equivalent:
.. code-block:: python
from df_websockets.tasks import call, trigger, WINDOW, SERVER
def my_python_view(request):
trigger(request, 'my.signal.name', to=[WINDOW, SERVER], arg1=12, arg2='Hello')
trigger_signal(request, 'my.signal.name', to=[WINDOW, SERVER], kwargs={'arg1': 12, 'arg2': 'Hello'})
"""
return _trigger_signal(
window_info, signal_name, to=to, kwargs=kwargs, from_client=False
)
# noinspection PyIncorrectDocstring
def trigger_signal(
window_info,
signal_name,
to=None,
kwargs=None,
countdown=None,
expires=None,
eta=None,
):
"""Call a df_websockets signal.
:param window_info: either a :class:`django.http.request.HttpRequest` or
a :class:`df_websockets.window_info.WindowInfo`
:param signal_name: name of the called signal (:class:`str`)
:param to: :class:`list` of the topics that should receive the signal
:param kwargs: dict with all arguments of your signal. Will be encoded to JSON with
`settings.WEBSOCKET_SIGNAL_ENCODER` and decoded with `settings.WEBSOCKET_SIGNAL_DECODER`.
:param countdown: check the Celery doc (in a nutshell: number of seconds before executing the signal)
:param expires: check the Celery doc (in a nutshell: if this signal is not executed before this number of seconds,
it is cancelled)
:param eta: check the Celery doc (in a nutshell: datetime of running this signal)
"""
return _trigger_signal(
window_info,
signal_name,
to=to,
kwargs=kwargs,
countdown=countdown,
expires=expires,
eta=eta,
from_client=False,
)
def _trigger_signal(
window_info,
signal_name,
to=None,
kwargs=None,
countdown=None,
expires=None,
eta=None,
from_client=False,
):
"""actually calls a DF signal, dispatching them to their destination:
* only calls Celery tasks if a delay is required (`coutdown` argument)
* write messages to websockets if no delay is required
"""
import_signals_and_functions()
window_info = WindowInfo.from_request(
window_info
) # ensure that we always have a true WindowInfo object
if kwargs is None:
kwargs = {}
for k in (SERVER, WINDOW, USER, BROADCAST):
if to is k:
to = [k]
if to is None:
to = [USER]
serialized_client_topics = []
to_server = False
logger.debug('received signal "%s" to %r' % (signal_name, to))
for topic in to:
if topic is SERVER:
if signal_name not in REGISTERED_SIGNALS:
logger.debug('Signal "%s" is unknown by the server.' % signal_name)
to_server = True
else:
serialized_topic = _topic_serializer(window_info, topic)
if serialized_topic is not None:
serialized_client_topics.append(serialized_topic)
celery_kwargs = {}
if expires:
celery_kwargs["expires"] = expires
if eta:
celery_kwargs["eta"] = eta
if countdown:
celery_kwargs["countdown"] = countdown
queues = {
x.get_queue(window_info, kwargs)
for x in REGISTERED_SIGNALS.get(signal_name, [])
}
window_info_as_dict = None
if window_info:
window_info_as_dict = window_info.to_dict()
if celery_kwargs and serialized_client_topics:
celery_client_topics = serialized_client_topics
queues.add(ws_settings.CELERY_DEFAULT_QUEUE)
to_server = True
else:
celery_client_topics = []
if to_server:
for queue in queues:
topics = (
celery_client_topics
if queue == ws_settings.CELERY_DEFAULT_QUEUE
else []
)
_server_signal_call.apply_async(
[
signal_name,
window_info_as_dict,
kwargs,
from_client,
topics,
to_server,
queue,
],
queue=queue,
**celery_kwargs,
)
if serialized_client_topics and not celery_kwargs:
signal_id = str(uuid.uuid4())
for topic in serialized_client_topics:
_call_ws_signal(signal_name, signal_id, topic, kwargs)
def _call_ws_signal(signal_name, signal_id, serialized_topic, kwargs):
serialized_message = json.dumps(
{"signal": signal_name, "opts": kwargs, "signal_id": signal_id},
cls=_signal_encoder,
)
topic = ws_settings.WEBSOCKET_REDIS_PREFIX + serialized_topic
channel_layer = get_channel_layer(DEFAULT_CHANNEL_LAYER)
logger.debug("send message to topic %r" % topic)
topic_valid = valid_topic_name(topic)
# noinspection PyTypeChecker
async_to_sync(channel_layer.group_send)(
topic_valid,
{"type": "ws_message", "message": serialized_message},
)
def _return_ws_function_result(window_info, result_id, result, exception=None):
connection = get_websocket_redis_connection()
json_msg = {
"result_id": result_id,
"result": result,
"exception": str(exception) if exception else None,
}
serialized_message = json.dumps(json_msg, cls=_signal_encoder)
serialized_topic = _topic_serializer(window_info, WINDOW)
if serialized_topic:
topic = ws_settings.WEBSOCKET_REDIS_PREFIX + serialized_topic
logger.debug("send function result to topic %r" % topic)
connection.publish(topic, serialized_message.encode("utf-8"))
@lru_cache()
def import_signals_and_functions():
"""Import all `signals.py`, 'forms.py' and `functions.py` files to register signals and WS functions
(tries these files for all Django apps).
"""
def try_import(module):
try:
import_module(module)
except ImportError as e:
if package_dir and os.path.isfile(
os.path.join(package_dir, "%s.py" % module_name)
):
logger.exception(e)
except Exception as e:
logger.exception(e)
load_celery()
for app_config in apps.app_configs.values():
app = app_config.name
package_dir = app_config.path
for module_name in ("signals", "forms", "functions"):
if os.path.isfile(os.path.join(package_dir, "%s.py" % module_name)):
try_import("%s.%s" % (app, module_name))
elif os.path.isdir(os.path.join(package_dir, module_name)):
for f in os.listdir(os.path.join(package_dir, module_name)):
f = os.path.splitext(f)[0]
try_import("%s.%s.%s" % (app, module_name, f))
logger.debug(
"Found signals: %s"
% ", ".join(["%s (%d)" % (k, len(v)) for (k, v) in REGISTERED_SIGNALS.items()])
)
logger.debug(
"Found functions: %s" % ", ".join([str(k) for k in REGISTERED_FUNCTIONS])
)
@shared_task(serializer="json", bind=True)
def _server_signal_call(
self,
signal_name,
window_info_dict,
kwargs=None,
from_client=False,
serialized_client_topics=None,
to_server=False,
queue=None,
):
logger.info(
'Signal "%s" called on queue "%s" to topics %s (from client?: %s, to server?: %s)'
% (signal_name, queue, serialized_client_topics, from_client, to_server)
)
try:
if kwargs is None:
kwargs = {}
if serialized_client_topics:
signal_id = str(uuid.uuid4())
for topic in serialized_client_topics:
_call_ws_signal(signal_name, signal_id, topic, kwargs)
window_info = WindowInfo.from_dict(window_info_dict)
import_signals_and_functions()
window_info.celery_request = self.request
if not to_server or signal_name not in REGISTERED_SIGNALS:
return
for connection in REGISTERED_SIGNALS[signal_name]:
assert isinstance(connection, SignalConnection)
if connection.get_queue(window_info, kwargs) != queue or (
from_client
and not connection.is_allowed_to(connection, window_info, kwargs)
):
continue
new_kwargs = connection.check(kwargs)
if new_kwargs is None:
continue
connection(window_info, **new_kwargs)
except Exception as e:
logger.exception(e)
@shared_task(serializer="json", bind=True)
def _server_function_call(
self, function_name, window_info_dict, result_id, kwargs=None
):
logger.info("Function %s called from client." % function_name)
e, result, window_info = None, None, None
try:
if kwargs is None:
kwargs = {}
window_info = WindowInfo.from_dict(window_info_dict)
import_signals_and_functions()
window_info.celery_request = self.request
connection = REGISTERED_FUNCTIONS[function_name]
assert isinstance(connection, FunctionConnection)
if not connection.is_allowed_to(connection, window_info, kwargs):
raise ValueError("Unauthorized function call %s" % connection.path)
kwargs = connection.check(kwargs)
if kwargs is not None:
# noinspection PyBroadException
result = connection(window_info, **kwargs)
except Exception as e:
logger.exception(e)
result = None
if window_info:
_return_ws_function_result(window_info, result_id, result, exception=e)
def get_expected_queues():
expected_queues = set()
import_signals_and_functions()
for connection in REGISTERED_FUNCTIONS.values():
if isinstance(connection.queue, DynamicQueueName):
for queue_name in connection.queue.get_available_queues():
expected_queues.add(queue_name)
elif not callable(connection.queue):
expected_queues.add(connection.queue)
for connections in REGISTERED_SIGNALS.values():
for connection in connections:
if isinstance(connection.queue, DynamicQueueName):
for queue_name in connection.queue.get_available_queues():
expected_queues.add(queue_name)
elif not callable(connection.queue):
expected_queues.add(connection.queue)
return expected_queues
| [
"github@19pouces.net"
] | github@19pouces.net |
68496ff621471723f8f5bfa29b64fc2080d909b4 | 10933f33099b423c5971d12993de07cc6f7d0f07 | /python_scripts/speaker250_rec.py | 19c7a182bda1152db09eed322fea844bc5cca4d8 | [] | no_license | Joako360/Voice-Identification | ed521d3fe41c6d862ab72e4585b1600742295847 | 744cb2276097c2839e7bd5f5db9f461d44e48b25 | refs/heads/master | 2023-03-15T15:10:30.219968 | 2019-04-29T02:56:09 | 2019-04-29T02:56:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,361 | py | import os
import sys
import librosa
import tflearn
import pydub
import wave
import pickle
import speech_data
import segment_data
import tensorflow as tf
import librosa.display
import numpy as np
# load constants - training directory, testing directory
training_data = '/home/cc/Data/Full-100/'
training_seg = '/home/cc/Data/Segment-100-Two/'
# calculate the mfcc matrices for training from the segmented data
#X = []
#Y = []
speakers = speech_data.get_speakers(training_seg)
#for f in os.listdir(training_seg):
# Y.append(speech_data.one_hot_from_item(speech_data.speaker(f), speakers))
# y, sr = librosa.load(training_seg + f)
# X.append(librosa.feature.mfcc(y=y, sr=sr, n_mfcc=20))
#pickle.dump(X, open('/home/cc/Data/pickle_files/speaker250_2secX.p', 'wb'))
#pickle.dump(Y, open('/home/cc/Data/pickle_files/speaker250_2secY.p', 'wb'))
X = pickle.load(open('/home/cc/Data/pickle_files/speaker250_2secX.p', 'rb'))
Y = pickle.load(open('/home/cc/Data/pickle_files/speaker250_2secY.p', 'rb'))
# size of fully connected layers
n = sys.argv[1]
# define the network and the model for training
tflearn.init_graph(num_cores=8, gpu_memory_fraction=0.5)
net = tflearn.input_data(shape=[None, 20, 87])
net = tflearn.fully_connected(net, n)
net = tflearn.fully_connected(net, n)
net = tflearn.dropout(net, 0.8)
net = tflearn.fully_connected(net, n)
net = tflearn.fully_connected(net, n)
net = tflearn.dropout(net, 0.8)
net = tflearn.fully_connected(net, n)
net = tflearn.fully_connected(net, n)
net = tflearn.dropout(net, 0.8)
net = tflearn.fully_connected(net, n)
net = tflearn.fully_connected(net, n)
net = tflearn.dropout(net, 0.8)
net = tflearn.fully_connected(net, n)
net = tflearn.fully_connected(net, n)
net = tflearn.dropout(net, 0.8)
net = tflearn.fully_connected(net, n)
net = tflearn.fully_connected(net, n)
net = tflearn.dropout(net, 0.8)
net = tflearn.fully_connected(net, n)
net = tflearn.fully_connected(net, n)
net = tflearn.dropout(net, 0.8)
net = tflearn.fully_connected(net, n)
net = tflearn.fully_connected(net, n)
net = tflearn.dropout(net, 0.5)
net = tflearn.fully_connected(net, len(speakers), activation='softmax')
net = tflearn.regression(net, optimizer='adam', loss='categorical_crossentropy')
# now train the model!
model = tflearn.DNN(net)
model.fit(X, Y, n_epoch=200, show_metric=True, snapshot_step=1000, run_id='SpeakerRec')
| [
"drew.boles88@gmail.com"
] | drew.boles88@gmail.com |
39ba0f48eff89e8b494be8e685767d654f52cf3f | 8380e98943ca6b17909907cdfcca24b70f8b581b | /qiskit/pulse/instructions/directives.py | ee3d7955dbfa7c804d4f0d3733bd5cbcb057579c | [
"Apache-2.0"
] | permissive | JohanNicander/qiskit-terra | 97fbd6465223463660cbb06812516db0a00a458c | a67440aabef1f87b8c600e6952109d5b647a94b4 | refs/heads/master | 2022-12-11T04:22:17.428403 | 2020-08-06T12:40:53 | 2020-08-06T12:40:53 | 280,187,204 | 0 | 0 | Apache-2.0 | 2020-07-16T15:17:26 | 2020-07-16T15:17:25 | null | UTF-8 | Python | false | false | 1,742 | py | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Directives are hints to the pulse compiler for how to process its input programs."""
from abc import ABC
from typing import Optional
from qiskit.pulse import channels as chans
from qiskit.pulse.instructions import instruction
class Directive(instruction.Instruction, ABC):
"""A compiler directive.
This is a hint to the pulse compiler and is not loaded into hardware.
"""
class RelativeBarrier(Directive):
"""Pulse ``RelativeBarrier`` directive."""
def __init__(self,
*channels: chans.Channel,
name: Optional[str] = None):
"""Create a relative barrier directive.
The barrier directive blocks instructions within the same schedule
as the barrier on channels contained within this barrier from moving
through the barrier in time.
Args:
channels: The channel that the barrier applies to.
name: Name of the directive for display purposes.
"""
super().__init__(tuple(channels), 0, tuple(channels), name=name)
def __eq__(self, other):
"""Verify two barriers are equivalent."""
return (isinstance(other, type(self)) and
set(self.channels) == set(other.channels))
| [
"noreply@github.com"
] | JohanNicander.noreply@github.com |
ba5fe81af0632687c14d963ae372ba1b8ee5503f | a8750439f200e4efc11715df797489f30e9828c6 | /CodeForces/EC_46_2_C_1.py | 8ee3122998960e839a22312f1db953f98a96581f | [] | no_license | rajlath/rkl_codes | f657174305dc85c3fa07a6fff1c7c31cfe6e2f89 | d4bcee3df2f501349feed7a26ef9828573aff873 | refs/heads/master | 2023-02-21T10:16:35.800612 | 2021-01-27T11:43:34 | 2021-01-27T11:43:34 | 110,989,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | n = int(input())
a = []
for i in range(n):
l, r = [int(x) for x in input().split()]
a.append([l, 1])
a.append([r+1, -1])
a = sorted(a)
ans = [0] * (n + 1)
idx = 0
for i in range(len(a) - 1):
idx += a[i][1]
ans[idx] += a[i+1][0] - a[i][0]
for i in range(1, n+1):
print(ans[i], end = " ") | [
"raj.lath@gmail.com"
] | raj.lath@gmail.com |
9b05b73cd5f0370491f151c54c36a981422be0f9 | 16b567ed93c10287f7b9e90ddc819512aadbcaf5 | /filters/stopwords_filter.py | c69ebfd10592f81b4efc0b75d78a5a7c9c1a54df | [] | no_license | Rigel772/python-keyword-density | b3bdfb70e06e53264be7507e4111a923b40ea51a | c3a4469360de3d7c02dd9b8de2dc7eac45a3253a | refs/heads/master | 2020-05-19T11:28:23.854324 | 2018-11-02T13:22:51 | 2018-11-02T13:22:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 647 | py | #-*- coding: utf-8 -*-
import os.path
from .base_filter import BaseFilter
class StopwordsFilter(BaseFilter):
def __init__(self, country):
super(StopwordsFilter, self).__init__()
self.country = country
stopword_fname = '%s.txt' % self.country
folder_name = os.path.dirname(__file__)
self.fname = os.path.join(folder_name, 'stopwords', stopword_fname)
with open(self.fname, 'rb') as f:
self.stopwords = {l.strip().decode('utf8') for l in f if l}
def predicate(self, tok):
"""Returns True if tok not in stopwords else False"""
return tok not in self.stopwords
| [
"you@example.com"
] | you@example.com |
0944a6584dec0a46eaffce3109f6738e8ef1de9b | d27152dda9c4e42c75239ef8c38dcaaed7c88c64 | /pchtrakt/mediaparser.py | f5c1ac80451826567d411ab1ab7659605c35f175 | [] | no_license | cptjhmiller/pchtrakt | f379731066191ea428b8ec1e72bf9bad5c67e3cc | e7a13d5ffbd4eb5e4d6038c179c85024bc4e0f89 | refs/heads/dvp | 2021-01-10T20:51:12.480210 | 2015-08-31T10:58:47 | 2015-08-31T10:58:47 | 6,009,369 | 4 | 3 | null | 2015-04-14T01:21:45 | 2012-09-29T13:48:39 | Python | UTF-8 | Python | false | false | 12,775 | py | # -*- coding: utf-8 -*-
#
# Authors: Jonathan Lauwers / Frederic Haumont
# URL: http://github.com/pchtrakt/pchtrakt
#
# This file is part of pchtrakt.
#
# pchtrakt is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pchtrakt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pchtrakt. If not, see <http://www.gnu.org/licenses/>.
#from os.path import basename, isfile
from urllib import quote_plus
from urllib2 import urlopen, HTTPError, URLError, Request
import json, re
from lib import parser
from movieparser import *
from lib.tvdb_api import tvdb_exceptions
from pchtrakt.config import *
from lib.tvdb_api import tvdb_api,tvdb_exceptions
from lib.utilities import Debug, sp, getNfo, getIDFromNFO
from xml.etree import ElementTree
import os
tvdb = tvdb_api.Tvdb()
class MediaParserResult():
def __init__(self,file_name):
self.file_name = file_name
class MediaParserResultTVShow(MediaParserResult):
def __init__(self,file_name,name,dirty,season_number,episode_numbers,air_by_date):
self.file_name = file_name
self.path = os.path.dirname(file_name)
self.name = name
self.air_by_date = air_by_date
self.dirty = dirty
self.id = ''
if self.air_by_date:
if self.name in pchtrakt.dictSerie:
self.id = pchtrakt.dictSerie[self.name]['TvDbId']
else:
self.id = tvdb[self.name]['id']
season_number = -1
episode_numbers = [self.air_by_date]
url = ('http://thetvdb.com/api/GetEpisodeByAirDate.php?apikey=0629B785CE550C8D&seriesid={0}&airdate={1}'.format(quote_plus(self.id), self.air_by_date))
Debug('[The TvDB] GET EPISODE USING: ' + url)
oResponse = ElementTree.parse(urlopen(url,None,5))
for movie in oResponse.findall('./'):
season_number = movie.find('SeasonNumber').text
episode_numbers = movie.find('EpisodeNumber').text
self.season_number = season_number
self.episode_numbers = episode_numbers
if self.name in pchtrakt.dictSerie:
self.id = pchtrakt.dictSerie[self.name]['TvDbId']
self.year = pchtrakt.dictSerie[self.name]['Year']
else:
if parseNFO:
files = []
if (self.file_name.split(".")[-1] == "DVD" or self.file_name.split(".")[-1].lower() == "iso"):
if isfile(self.path.rsplit('/', 2)[0] + '/tvshow.nfo'):
pchtrakt.logger.info(' [Pchtrakt] found ../../tvshow.nfo')
files.extend([(self.path.rsplit('/', 1)[0] + '/tvshow.nfo')])
elif isfile(self.path.rsplit('/', 1)[0] + '/tvshow.nfo'):
pchtrakt.logger.info(' [Pchtrakt] found ../tvshow.nfo')
files.extend([(self.path.rsplit('/', 1)[0] + '/tvshow.nfo')])
else:
for root, dirs, walk_files in os.walk(self.path):
files.extend([(os.path.join(root, file)) for file in walk_files])
for file in getNfo(files):
pchtrakt.logger.info(' [Pchtrakt] parsing %s' % file)
self.id = getIDFromNFO('TV', file)
if self.id != '':
try:
if (re.match("tt\d{5,10}", self.id)):
pchtrakt.logger.info(' [Pchtrakt] Using IMDB ID to find match')
self.id = tvdb[self.id]['id']
self.name = tvdb[int(self.id)]['seriesname']
pchtrakt.online = 1
if tvdb[self.name]['firstaired'] != None:
self.year = tvdb[self.name]['firstaired'].split('-')[0]
else:
self.year = None
pchtrakt.dictSerie[self.name]={'Year':self.year, 'TvDbId':self.id}
with open('cache.json','w') as f:
json.dump(pchtrakt.dictSerie, f, separators=(',',':'), indent=4)
except tvdb_exceptions.tvdb_error, e:
pchtrakt.online = 0
break
if self.id == '':
try:
self.id = tvdb[self.name]['id']
pchtrakt.online = 1
if tvdb[self.name]['firstaired'] != None:
self.year = tvdb[self.name]['firstaired'].split('-')[0]
else:
self.year = None
pchtrakt.dictSerie[self.name]={'Year':self.year, 'TvDbId':self.id}
with open('cache.json','w') as f:
json.dump(pchtrakt.dictSerie, f, separators=(',',':'), indent=4)
except tvdb_exceptions.tvdb_error, e:
pchtrakt.online = 0
class MediaParserResultMovie(MediaParserResult):
def imdbapi1(self):
try:
if self.year is None:
ImdbAPIurl = ('http://www.imdbapi.com/?t={0}'.format(quote_plus(self.name.encode('utf-8', 'replace'))))
else:
ImdbAPIurl = ('http://www.imdbapi.com/?t={0}&y={1}'.format(quote_plus(self.name.encode('utf-8', 'replace')), self.year))
Debug('[IMDB api] Trying search 1: ' + ImdbAPIurl)
oResponse = urlopen(ImdbAPIurl,None,10)
myMovieJson = json.loads(oResponse.read())
if myMovieJson['Response'] == "True":
self.id = myMovieJson['imdbID']
Debug('[IMDB api] Movie match using: ' + ImdbAPIurl)
return self.id
except Exception as e:
Debug('[IMDB api] ' + str(e))
def imdbapi2(self):
try:
if self.id == None:
if self.year is None:
ImdbAPIurl = ('http://www.deanclatworthy.com/imdb/?q={0}'.format(quote_plus(self.name.encode('utf-8', 'replace'))))
else:
ImdbAPIurl = ('http://www.deanclatworthy.com/imdb/?q={0}&year={1}'.format(quote_plus(self.name.encode('utf-8', 'replace')), self.year))
Debug('[IMDB api] Trying search 2: ' + ImdbAPIurl)
oResponse = urlopen(ImdbAPIurl,None,10)
myMovieJson = json.loads(oResponse.read())
if "title" in myMovieJson.keys():
self.id = myMovieJson['imdbid']
Debug('[IMDB api] Found Movie match using: ' + ImdbAPIurl)
return self.id
except Exception as e:
Debug('[IMDB api] ' + str(e))
def imdbapi3(self):
try:
if self.id == None:
if self.year is None:
ImdbAPIurl = ('http://www.google.com/search?q=www.imdb.com:site+{0})&num=1&start=0'.format(quote_plus(self.name.encode('utf-8', 'replace'))))
else:
ImdbAPIurl = ('http://www.google.com/search?q=www.imdb.com:site+{0}+({1})&num=1&start=0'.format(quote_plus(self.name.encode('utf-8', 'replace')), self.year))
Debug('[IMDB api] Trying search 3: ' + ImdbAPIurl)
request = Request(ImdbAPIurl, None, {'User-Agent':'Mosilla/5.0 (Macintosh; Intel Mac OS X 10_7_4) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11'})
urlfile = urlopen(request)
page = urlfile.read()
entries = re.findall("/title/tt(\d{7})/", page)
self.id = "tt"+str(entries[0])
Debug('[IMDB api] Search address = ' + ImdbAPIurl + ' ID = ' + self.id)
return self.id
except Exception as e:
Debug('[IMDB api] ' + str(e))
def __init__(self,fullpath,file_name,name,year,imdbid):
self.file_name = file_name
self.path = os.path.dirname(fullpath)
self.name = name
self.year = year
self.id = imdbid
if parseNFO and self.id == None:
files = []
for root, dirs, walk_files in os.walk(self.path):
files.extend([sp(os.path.join(root, file)) for file in walk_files]) #not sure if sp is needed
for file in getNfo(files):
self.id = getIDFromNFO('MOVIE', file)
if self.id != '':
break
if pchtrakt.online and (self.id == None or self.id == ''):
retries = 0
while self.id == None:
self.id = self.imdbapi1()
if (self.id != None and self.id != ''):
break
#self.id = self.imdbapi2()
#if (self.id != None and self.id != ''):
# break
self.id = self.imdbapi3()
if (self.id != None and self.id != ''):
break
if retries >= 1:
raise MovieResultNotFound(file_name)
break
else:
msg = ('[IMDB api] First lookup failed, trying 1 more time')
pchtrakt.logger.warning(msg)
retries += 1
sleep(60)
continue
class MediaParserResultMoviebackup(MediaParserResult):
def __init__(self,file_name,name,year,imdbid):
self.file_name = file_name
self.name = name
if year == None:
self.year = ""
else:
self.year = year
if pchtrakt.online:
ImdbAPIurl = ('http://www.imdbapi.com/?t={0}&y={1}'.format(quote_plus(self.name.encode('utf-8', 'replace')), self.year))
Debug('[IMDB api] Trying search 1: ' + ImdbAPIurl)
try:
oResponse = urlopen(ImdbAPIurl,None,10)
myMovieJson = json.loads(oResponse.read())
self.id = myMovieJson['imdbID']
Debug('[IMDB api] Movie match using: ' + ImdbAPIurl)
except URLError, HTTPError:
pass
except KeyError:
ImdbAPIurl = ('http://www.deanclatworthy.com/imdb/?q={0}&year={1}'.format(quote_plus(self.name.encode('utf-8', 'replace')), self.year))
Debug('[IMDB api] Trying search 2: ' + ImdbAPIurl)
try:
oResponse = urlopen(ImdbAPIurl,None,10)
myMovieJson = json.loads(oResponse.read())
self.id = myMovieJson['imdbid']
Debug('[IMDB api] Found Movie match using: ' + ImdbAPIurl)
except:
try:
address = ('http://www.google.com/search?q=www.imdb.com:site+{0}&num=1&start=0'.format(quote_plus(self.name.encode('utf-8', 'replace'))))
Debug('[IMDB api] Trying search 3: ' + address)
request = Request(address, None, {'User-Agent':'Mosilla/5.0 (Macintosh; Intel Mac OS X 10_7_4) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11'})
urlfile = urlopen(request)
page = urlfile.read()
entries = re.findall("/title/tt(\d{7})/", page)
self.id = "tt"+str(entries[0])
Debug('[IMDB api] Search address = ' + address + ' ID = ' + self.id)
except:
raise MovieResultNotFound(file_name)
else:
self.id = '0'
class MediaParserUnableToParse(Exception):
def __init__(self, file_name):
self.file_name = file_name
class MediaParser():
def __init__(self):
self.TVShowParser = parser.NameParser()
self.MovieParser = MovieParser()
def parse(self, file_name):
try:
parsedResult = self.TVShowParser.parse(file_name)
oResultTVShow = MediaParserResultTVShow(file_name,parsedResult.series_name,parsedResult.series_name_dirty,parsedResult.season_number,parsedResult.episode_numbers,parsedResult.air_by_date)
return oResultTVShow
except parser.InvalidNameException as e:
oMovie = self.MovieParser.parse(file_name)
return oMovie
raise MediaParserUnableToParse(' [Pchtrakt] Unable to parse the filename and detecte an movie or a tv show') | [
"cpt.miller@fun4gamers.co.uk"
] | cpt.miller@fun4gamers.co.uk |
5216eb7ed2b3564a1c9820eed4716c5dd256f187 | dd3fce1dcc928e7c95e1c203ef214eb0159614d0 | /mysite/settings.py | c59b01a6439757b0ac5baf0156fb93b07386ae2d | [] | no_license | sorbg/my-first-blog | 2c31f86797b2ee6d40e1481dffa6222a45096f4a | 4781262320e0f5d425d11a2cd6b3d56e61d39fb6 | refs/heads/master | 2021-05-06T17:38:15.678120 | 2017-11-24T16:19:23 | 2017-11-24T16:19:23 | 111,898,556 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,210 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.11.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '%5f7#t*a)mk_$_jaf975a2ucx^=61@26zexz_j&*1v$a@n(*!1'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', 'sorbg.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
# STATIC_ROOT:
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| [
"seanoreillybg@gmail.com"
] | seanoreillybg@gmail.com |
3d514da561570f82bc381f126db86de33619319c | 3e4a921135ebbf299cfb1bfca7c037516d517bc0 | /KNNImplementation.py | 552ae65e6085669e9b5adf0abdadf5bb2847631b | [] | no_license | Priyanka554/Heart-Disease-Prediction | 1bacd4971437e95543530663629d1a2c3d82b408 | 65e3fe22259ca4cb3b55084480fad6b184f8504a | refs/heads/main | 2023-02-25T15:58:53.431201 | 2021-02-01T20:48:56 | 2021-02-01T20:48:56 | 335,079,576 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,377 | py | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from collections import Counter
def kNNClassifier(dataset):
print()
print("######################## KNN Classifier ############################")
print()
y = dataset['target']
X = dataset.drop(['target'], axis = 1)
def Euclidean_distance(a, b):
# No.of dimensions of point a
length = len(a)
# intialising the distance
distance = 0
# Calculating the euclidean distance between points a and b
for i in range(length):
distance += abs(a[i] - b[i])**2
distance = distance**(1/2)
#returning the distance
return distance
#Splitting the data into train and test data by assigning the percentage to test_size
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=0)
# Scaling the train set and then the test set
standaradScaler = StandardScaler()
X_train = standaradScaler.fit_transform(X_train)
X_test = standaradScaler.transform(X_test)
def knn_implement(X_train, X_test, y_train, y_test, k):
yImpTest = []
for testPoint in X_test:
#intialising the distances
distances = []
for trainPoint in X_train:
distance = Euclidean_distance(testPoint, trainPoint)
distances.append(distance)
#Storing the calculated euclidean distance in Data Frames
DFDistance = pd.DataFrame(data=distances, columns=['dist'],
index=y_train.index)
#Sorting the Distances and getting the k closest points
DFClosest = DFDistance.sort_values(by=['dist'], axis=0)[:k]
# Creating counter to track the closest points
counter = Counter(y_train[DFClosest.index])
#Getting the common among the closest points
predict = counter.most_common()[0][0]
#Appending all the predicted list
yImpTest.append(predict)
return yImpTest
#intiliasing the scores
scores = []
#Looping the k values from 1 to 10
for k in range(1,10):
yImpTest = knn_implement(X_train, X_test, y_train, y_test, k)
#Getting the accuracy score
scores.append(accuracy_score(y_test, yImpTest))
print(scores)
#[1.0, 0.9557522123893806, 0.9292035398230089, 0.9026548672566371, 0.8790560471976401, 0.8967551622418879,
#0.8790560471976401, 0.8967551622418879, 0.8967551622418879]
# [1.0, 0.9675324675324676, 0.9383116883116883, 0.9090909090909091, 0.8766233766233766, 0.8928571428571429,
# 0.8733766233766234, 0.8928571428571429, 0.8993506493506493]
#plotting the graph for k and scores calculated
plt.plot([k for k in range(1, 10)], scores, color = 'green')
for i in range(1,10):
plt.text(i, scores[i-1], (i, scores[i-1]))
#x-axis and y-axis lables and titles for graph
plt.title('Graph for K Neighbors classifier scores')
plt.xlabel('Neighbors (K)')
plt.ylabel('Scores') | [
"noreply@github.com"
] | Priyanka554.noreply@github.com |
10f6b5b97469d394300ef6b65037975b33dec662 | 3fb1d9b6c97d97db2922d1ae32c177f02a8559ef | /Lesson 8 - Project Prep/6_PostAndAnswerLengthExercise_reducer.py | 3926190d5e0b5d494f7817e9ef5112b8710d9e8d | [] | no_license | nanfengpo/udacity_hadoop_mapreduce | cdaa692b8fca1eed2c7b51171bb36cdeaa421da3 | ffba21dbd32568995e9f2b1366b1338a103f4a6f | refs/heads/master | 2021-01-21T15:03:58.541937 | 2017-04-16T22:01:00 | 2017-04-16T22:01:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,091 | py | #!/usr/bin/python
import sys
def reducer():
answerLength = 0
answerCount = 0
questionLength = 0
oldKey = None
for line in sys.stdin:
data_mapped = line.strip().split("\t")
if len(data_mapped) != 3:
# Something has gone wrong. Skip this line.
continue
thisKey, thisNodeType, thisBodyLength = data_mapped
if oldKey and oldKey != thisKey:
print '{0}\t{1}\t{2}'.format(oldKey, questionLength, float(answerLength) / float((answerCount if answerCount > 0 else 1)))
answerCount = 0
answerLength = 0
questionLength = 0
if thisNodeType == "answer":
answerLength += int(thisBodyLength)
answerCount += 1
elif thisNodeType == "question":
questionLength = int(thisBodyLength)
oldKey = thisKey
if oldKey != None:
print '{0}\t{1}\t{2}'.format(oldKey, questionLength, float(answerLength) / float((answerCount if answerCount > 0 else 1)))
def main():
reducer()
if __name__ == '__main__':
main()
| [
"180726@edu.p.lodz.pl"
] | 180726@edu.p.lodz.pl |
34969a057bca7dfae9547d8cf1ae6495fda27954 | be20fb78227fd9d3fbad2f1da77586130325763b | /gift_cards_django/gift_cards_app/admin.py | 0874163ab693677f755b10ed89fec54c23302401 | [] | no_license | chad-dickens/Memphis-Blue | 2ddb1372b7ed5038fcf059e448a43e3d83c2ebbd | aa49dc9ee821073f8e6344df681fbf9010b6fc4a | refs/heads/master | 2023-05-28T19:16:57.294773 | 2021-06-21T12:21:01 | 2021-06-21T12:21:01 | 281,660,322 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 132 | py | from django.contrib import admin
from gift_cards_app.models import Orders
# Register your models here.
admin.site.register(Orders)
| [
"chad@192-168-1-108.tpgi.com.au"
] | chad@192-168-1-108.tpgi.com.au |
c6c7206c12803b9da1445ff393a4414592a8abb7 | f43b23b981ad5c5206bd43b1891cf21328cef412 | /shop_services/peewee_test/more_define.py | 7b1a5e3b588cc885fc4d1d21939c9a4dae1b66ce | [
"MIT"
] | permissive | CodePrometheus/Starry-Supermarket | 518a8910eeba82ac4c2c0ac2c4387533cc241e03 | c4a98ce563e7e21fc55bc1749417dc4b9d1529f7 | refs/heads/master | 2023-08-24T20:26:02.252809 | 2021-10-25T15:25:53 | 2021-10-25T15:25:53 | 413,009,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,995 | py | import datetime
import logging
from peewee import *
logger = logging.getLogger("peewee")
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler())
db = MySQLDatabase('starry-supermarket', host='localhost',
user='root', passwd='root')
class BaseModel(Model):
add_time = DateTimeField(default=datetime.datetime.now, verbose_name="添加时间")
class Meta:
database = db # 这里是数据库链接,为了方便建立多个表,可以把这个部分提炼出来形成一个新的类
class Person(BaseModel):
first = CharField()
last = CharField()
class Meta:
primary_key = CompositeKey('first', 'last')
class Pet(BaseModel):
owner_first = CharField()
owner_last = CharField()
pet_name = CharField()
class Meta:
constraints = [SQL('FOREIGN KEY(owner_first, owner_last) REFERENCES person(first, last)')]
class Blog(BaseModel):
pass
class Tag(BaseModel):
pass
# 复合主键
class BlogToTag(BaseModel):
"""A simple "through" table for many-to-many relationship."""
blog = ForeignKeyField(Blog)
tag = ForeignKeyField(Tag)
class Meta:
primary_key = CompositeKey('blog', 'tag')
class User(BaseModel):
# 如果没有设置主键,那么自动生成一个id的主键
username = CharField(max_length=20)
age = CharField(default=18, max_length=20, verbose_name="年龄")
class Meta:
table_name = 'new_user' # 这里可以自定义表名
if __name__ == "__main__":
db.connect()
db.create_tables([Person, Pet, Blog, Tag, BlogToTag, User])
# id = Person.insert({
# 'first': 'star',
# 'last': '2021'
# }).execute()
#
# for i in range(10):
# User.create(username=f"2021-{i}", age=random.randint(18, 40))
#
# id = Blog.insert({}).execute()
# print(id)
# and
person = Person.select().where((Person.first == "li") & (Person.first == "li1"))
print(person.sql())
| [
"Code_Prince@outlook.com"
] | Code_Prince@outlook.com |
d2abe9a6ced4dbf9bd2576aa1d42042ffe66e940 | 6f5db07830d3eed9498362d493990b098d745c9c | /CloudServer.py | f20c796e19813bbff8b7fded79dd54a9c4bf6352 | [] | no_license | harshitachoradia/delivery-verification-service | 82545675f1ce12dd9fb50a5b0ad79c06f6b92b88 | 8906c422733d289ccdbbf279a20d3907aea2345c | refs/heads/main | 2023-05-24T07:34:56.062498 | 2021-06-13T07:13:27 | 2021-06-13T07:13:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,484 | py | from flask import request, url_for
from flask_api import FlaskAPI
from flask import jsonify
from flask import json
from flask import Response
from flask import send_from_directory, send_file
from flask_cors import CORS
import cv2
import tensorflow as tf
from tensorflow.keras import datasets, layers, models, regularizers
import numpy as np
import os
import sys
import datetime
sys.path.append(os.path.abspath("/home/verifier/database"))
import database
app = FlaskAPI(__name__)
CORS(app)
cassandraOperations = database.CassandraOperations()
#Title each block, clean and format code
IMG_SIZE = 100
CHECKPOINT_PATH = '/home/verifier/cloud/TrainedModel/cp.ckpt'
CATEGORIES = ['NandiniBlue', 'NandiniGoodLife', 'NandiniGreen', 'NandiniOrange', 'NandiniSlim']
def createModel():
global cnn
cnn = models.Sequential([
layers.Conv2D(filters=32, kernel_size=(3, 3), activation='relu', input_shape=(IMG_SIZE, IMG_SIZE, 3)),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(filters=64, kernel_size=(3, 3), activation='relu'),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(filters=32, kernel_size=(3, 3), activation='relu'),
layers.MaxPooling2D((2, 2)),
layers.Flatten(),
layers.Dense(
units=32,
kernel_regularizer=regularizers.l1_l2(l1=1e-5, l2=1e-4),
bias_regularizer=regularizers.l2(1e-4),
activity_regularizer=regularizers.l2(1e-5)
),
layers.Dense(5, activation='softmax')
])
cnn.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
cnn.load_weights(CHECKPOINT_PATH)
return cnn
def classify(imgPath):
global cnn
img = cv2.imread(imgPath)
img = cv2.resize(img, (IMG_SIZE, IMG_SIZE))
img = np.array(img).reshape(-1, IMG_SIZE, IMG_SIZE, 3)
res = CATEGORIES[np.argmax(cnn.predict(img))]
return res
def calculateVolume(weight):
if weight >= 450 and weight <= 550:
volume = 0.5
else:
volume = 1
return volume
def calculateCost(milktype, volume):
if milktype == 'NandiniBlue':
cost = 18
elif milktype == 'NandiniOrange':
cost = 20
elif milktype == 'NandiniGreen':
cost = 21
elif milktype == 'NandiniGoodLife':
cost = 23
elif milktype == 'NandiniSlim':
cost = 25
else:
cost = 0
return cost*volume*2
@app.route('/receiveImages', methods=['POST'])
def receiveImages():
print('Retreiving Image')
if request is None:
print("Invalid request from Client")
else:
print("Received request: ",request.method)
if request.method == 'POST':
if request.files is None:
print("No Image sent")
else:
print("- - - - ",request.files,"- - - -")
receivedFiles = request.files['upload']
receivedFiles.save(receivedFiles.filename)
print("Image Saved successfully")
return jsonify({'status': "success"})
else:
print("Not POST request")
return jsonify({'status': 'Invalid method'})
@app.route('/getAllData', methods=['GET'])
def getAllData():
cache = {}
print('fetching all data')
data = cassandraOperations.executeQuery("select * from milkdata.delivery")
for item in data:
cache[str(item.timestamp)] = str(item.prediction)
return jsonify(cache)
@app.route('/verify',methods=['POST'])
def verify():
print("Detection started...")
data = request.get_json()
print(data)
deviceName = data['devicename']
timeData = str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
imageData = data['imagename']
weightData = data['weight']
volumeData = calculateVolume(weightData)
#run the model
try:
cnn = createModel()
predictionResult = classify(imageData)
costData = calculateCost(predictionResult, volumeData)
except:
cassandraOperations.executeQuery("INSERT INTO delivery(timestamp,device,image,weight,prediction,cost,volume) VALUES('{time}','{device}','{image}',{weight},'{prediction}','{cost}','{volume}');".format(time=timeData,device=deviceName,image=imageData,weight=weightData,prediction='Error',cost='Error',volume=volumeData,status='NotPaid'))
return jsonify({'status': "unsuccessful","device":deviceName,"time":timeData,"Type":"Error"})
#Do DB operations here
#Add all items to table
cassandraOperations.executeQuery("INSERT INTO delivery(timestamp,device,image,weight,prediction,cost,volume) VALUES('{time}','{device}','{image}',{weight},'{prediction}','{cost}','{volume}');".format(time=timeData,device=deviceName,image=imageData,weight=weightData,prediction=str(predictionResult),cost=costData,volume=volumeData,status='NotPaid'))
#Handle exceptions for each case
return jsonify({'status': "success","device":deviceName,"time":timeData,"Type":predictionResult})
#Create mode end points to retreive from DB (GET methods) or use DB directly in your application
if __name__ == '__main__':
cassandraOperations.createSession('34.89.206.52')
cassandraOperations.setLogger('INFO')
cassandraOperations.createKeyspaceIfNotExists('milkdata')
cassandraOperations.executeQuery('CREATE TABLE IF NOT EXISTS delivery(timestamp timestamp PRIMARY KEY, device text, image text, weight float,prediction text, cost float, volume float, status text);')
app.run(host='0.0.0.0', port=50053)
| [
"vmadyasta@gmail.com"
] | vmadyasta@gmail.com |
486042156b03a721b93efdb0f698cf3f62fe2f93 | 2e85b35497e7d97a3f887e2dd82e25d4e9338937 | /introducao_ao_python/topico_5/importando_modulos.py | 118a930dd232c71d2f5cc036244125d29c0b3275 | [] | no_license | magnologan/introducao-a-pentest-com-python | bad95932dc96c1a18fe1cdd2f799bc893c6b9c32 | 1a8ff1744ec28e6dd92fa127d1042f467e6f48a7 | refs/heads/master | 2021-05-15T10:04:13.056901 | 2017-10-02T14:41:45 | 2017-10-02T14:41:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 154 | py | ### importando o modulo inteiro de tempo
import time
time.sleep(2)
### importando apenas a funcao especifica de sleep
from time import sleep
sleep(2)
| [
"samukasmk@gmail.com"
] | samukasmk@gmail.com |
e638da845ef167b11b3122f120cd0e44acefa0de | e93c6e93f612bca7f192adf539b4f489ad114ab5 | /m01_basics/l_07_nested_data.py | 6739ccff73e049c4db47923a80292b1b7f11a800 | [
"MIT"
] | permissive | be1iever/python-52-weeks | 8d57a10af9c0f5309ba21a9503a8fdf4bd82840c | 185d8b3147c6bfb069d58e4933b74792081bf8f2 | refs/heads/main | 2023-08-19T08:21:45.330447 | 2021-09-21T15:00:28 | 2021-09-21T15:00:28 | 409,847,518 | 1 | 0 | MIT | 2021-09-24T05:51:14 | 2021-09-24T05:51:13 | null | UTF-8 | Python | false | false | 2,506 | py | from pprint import pprint
from random import choice
import copy
from util.create_utils import create_network
device = {
"name": "r3-L-n7",
"vendor": "cisco",
"model": "catalyst 2960",
"os": "ios",
"interfaces": [
]
}
print("\n\n----- device with no interfaces --------------------")
for key, value in device.items():
print(f"{key:>16s} : {value}")
interfaces = list()
for index in range(0, 8):
interface = {
"name": "g/0/0/" + str(index),
"speed": choice(["10", "100", "1000"])
}
interfaces.append(interface)
device["interfaces"] = interfaces
print("\n\n----- device with interfaces --------------------")
for key, value in device.items():
if key != "interfaces":
print(f"{key:>16s} : {value}")
else:
print(f"{key:>16s} :")
for interface in device["interfaces"]:
print(f"\t\t\t\t\t{interface}")
print()
print("\n\n----- device with interfaces using pprint--------------------")
pprint(device)
print("\n\n----- network with devices and interfaces --------------------")
network = create_network(num_devices=4, num_subnets=4)
pprint(network)
print("\n----- information about network --------------------")
print(f"-- number of subnets: {len(network['subnets'])}")
print(f"-- list of subnets: {network['subnets'].keys()}")
print(f"-- list of subnets w/o extraneous: {', '.join(network['subnets'])}")
print("\n----- network and devices nicely formatted --------------------")
for subnet_address, subnet in network["subnets"].items():
print(f"\n-- subnet: {subnet_address}")
for device in subnet["devices"]:
print(f" |-- device: {device['name']:8} {device['ip']:10} {device['vendor']:>10} : {device['os']}")
print("\n\n----- remember assignment vs shallow copy vs deep copy --------------------")
print(" modify 'network' only, and see if assign/copy/deepcopy versions reflect that change")
network_assign = network
network["subnets"]["10.0.1.0"]["devices"][0]["name"] = "different name assigned"
print(f" --- network == network_assign : {network==network_assign}")
network_copy = copy.copy(network)
network["subnets"]["10.0.1.0"]["devices"][0]["name"] = "another different name, copy this time"
print(f" --- network == network_copy : {network==network_copy}")
network_deepcopy = copy.deepcopy(network)
network["subnets"]["10.0.1.0"]["devices"][0]["name"] = "this time with deep copy"
print(f" --- network == network_deepcopy : {network==network_deepcopy}")
| [
"chuck.a.black@gmail.com"
] | chuck.a.black@gmail.com |
ce2469650940b0fa5dfceaad6a4836793f0f23b9 | 30fd01dbae99721069d936d5daa6a8050488a248 | /hacker/FirefoxSQLite.py | 7da8415a2e85749f5c5b4f1f6d446bc2933e030b | [] | no_license | chenshuo666/mypython | 6b334ad42b117c2750129028e82037643d99ab6a | 3cfcf49f2d6cc3733d244cc7eb212a4dba6a439a | refs/heads/master | 2020-03-10T04:04:35.530485 | 2018-04-17T04:02:16 | 2018-04-17T04:02:16 | 129,182,623 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,999 | py | #!/usr/bin/python
# coding=utf-8
import re
import optparse
import os
import sqlite3
# 解析打印downloads.sqlite文件的内容,输出浏览器下载的相关信息
def printDownloads(downloadDB):
conn = sqlite3.connect(downloadDB)
c = conn.cursor()
c.execute('SELECT name, source, datetime(endTime/1000000, \'unixepoch\') FROM moz_downloads;')
print('\n[*] --- Files Downloaded --- ')
for row in c:
print('[+] File: ' + str(row[0]) + ' from source: ' + str(row[1]) + ' at: ' + str(row[2]))
# 解析打印cookies.sqlite文件的内容,输出cookie相关信息
def printCookies(cookiesDB):
try:
conn = sqlite3.connect(cookiesDB)
c = conn.cursor()
c.execute('SELECT host, name, value FROM moz_cookies')
print('\n[*] -- Found Cookies --')
for row in c:
host = str(row[0])
name = str(row[1])
value = str(row[2])
print('[+] Host: ' + host + ', Cookie: ' + name + ', Value: ' + value)
except Exception as e:
if 'encrypted' in str(e):
print('\n[*] Error reading your cookies database.')
print('[*] Upgrade your Python-Sqlite3 Library')
# 解析打印places.sqlite文件的内容,输出历史记录
def printHistory(placesDB):
try:
conn = sqlite3.connect(placesDB)
c = conn.cursor()
c.execute("SELECT url, datetime(visit_date/1000000, 'unixepoch') FROM moz_places, moz_historyvisits WHERE visit_count > 0 AND moz_places.id==moz_historyvisits.place_id;")
print('\n[*] -- Found History --')
for row in c:
url = str(row[0])
date = str(row[1])
print('[+] ' + date + ' - Visited: ' + url)
except Exception as e:
if 'encrypted' in str(e):
print('\n[*] Error reading your places database.')
print('[*] Upgrade your Python-Sqlite3 Library')
exit(0)
# 解析打印places.sqlite文件的内容,输出百度的搜索记录
def printBaidu(placesDB):
conn = sqlite3.connect(placesDB)
c = conn.cursor()
c.execute( "SELECT url, datetime(visit_date/1000000, 'unixepoch') FROM moz_places, moz_historyvisits WHERE visit_count > 0 AND moz_places.id==moz_historyvisits.place_id;")
print('\n[*] -- Found Baidu --')
for row in c:
url = str(row[0])
date = str(row[1])
if 'baidu' in url.lower():
r = re.findall(r'wd=.*?\&', url)
if r:
search = r[0].split('&')[0]
search = search.replace('wd=', '').replace('+', ' ')
print('[+] ' + date + ' - Searched For: ' + search)
def main():
parser = optparse.OptionParser("[*]Usage: firefoxParse.py -p <firefox profile path> ")
#C:\Users\用户名\AppData\Roaming\Mozilla\Firefox\Profiles\e28nsous.default,SQLite缓存的地址
parser.add_option('-p', dest='pathName', type='string', help='specify skype profile path')
(options, args) = parser.parse_args()
pathName = options.pathName
if pathName == None:
print(parser.usage)
exit(0)
elif os.path.isdir(pathName) == False:
print('[!] Path Does Not Exist: ' + pathName)
exit(0)
else:
downloadDB = os.path.join(pathName, 'downloads.sqlite')
if os.path.isfile(downloadDB):
printDownloads(downloadDB)
else:
print('[!] Downloads Db does not exist: ' + downloadDB)
cookiesDB = os.path.join(pathName, 'cookies.sqlite')
if os.path.isfile(cookiesDB):
pass
printCookies(cookiesDB)
else:
print('[!] Cookies Db does not exist:' + cookiesDB)
placesDB = os.path.join(pathName, 'places.sqlite')
if os.path.isfile(placesDB):
printHistory(placesDB)
printBaidu(placesDB)
else:
print('[!] PlacesDb does not exist: ' + placesDB)
if __name__ == '__main__':
main() | [
"929387146@qq.com"
] | 929387146@qq.com |
086f919dc5d77d92ce256911cf93cd83d411d684 | e5f194129752f3f89eed53478416d2c92cde0259 | /.cache/Microsoft/Python Language Server/stubs.v4/PW5N1gWcYNUaFmNEjFpBbn4_TkxeV53eiQaZBrpg6xw=/python3.pyi | 8befe0f027be53bb4d55f3d4c9c1399a04b4cd3d | [] | no_license | stepin-s/st | 1677fc25cb42c36afd76d2e3a48a1c0a5daf1b93 | b4cf346a446d57210197ee7f6f809cbc0a5b8799 | refs/heads/master | 2023-07-27T17:37:39.268414 | 2021-05-25T12:08:10 | 2021-05-25T12:08:10 | 405,090,749 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230,782 | pyi | class NotImplementedType(object):
__class__ = NotImplementedType
def __init__(self, *args, **kwargs):
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __reduce__(self):
return ''; return ()
def __repr__(self):
'Return repr(self).'
return ''
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class object:
'The base class of the class hierarchy.\n\nWhen called, it accepts no arguments and returns a new featureless\ninstance that has no instance attributes and cannot be given any.\n'
__class__ = object
def __delattr__(self, name):
'Implement delattr(self, name).'
return None
def __dir__(self):
'Default dir() implementation.'
return ['']
def __eq__(self, value):
'Return self==value.'
return False
def __format__(self, format_spec):
'Default object formatter.'
return ''
def __ge__(self, value):
'Return self>=value.'
return False
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __gt__(self, value):
'Return self>value.'
return False
def __hash__(self):
'Return hash(self).'
return 0
def __init__(self):
'The base class of the class hierarchy.\n\nWhen called, it accepts no arguments and returns a new featureless\ninstance that has no instance attributes and cannot be given any.\n'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __le__(self, value):
'Return self<=value.'
return False
def __lt__(self, value):
'Return self<value.'
return False
def __ne__(self, value):
'Return self!=value.'
return False
def __reduce__(self):
'Helper for pickle.'
return ''; return ()
def __reduce_ex__(self, protocol):
'Helper for pickle.'
return ''; return ()
def __repr__(self):
'Return repr(self).'
return ''
def __setattr__(self, name, value):
'Implement setattr(self, name, value).'
return None
def __sizeof__(self):
'Size of object in memory, in bytes.'
return 0
def __str__(self):
'Return str(self).'
return ''
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
__Object__ = object
class type(object):
"type(object_or_name, bases, dict)\ntype(object) -> the object's type\ntype(name, bases, dict) -> a new type"
__base__ = object
__bases__ = ()
__basicsize__ = 880
def __call__(self, *args, **kwargs):
'Call self as a function.'
return cls()
__class__ = type
def __delattr__(self, name):
'Implement delattr(self, name).'
return None
__dict__ = {}
__dictoffset__ = 264
def __dir__(self):
'Specialized __dir__ implementation for types.'
return ['']
__flags__ = 2148291584
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __init__(self, object_or_name, bases, dict):
"type(object_or_name, bases, dict)\ntype(object) -> the object's type\ntype(name, bases, dict) -> a new type"
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __instancecheck__(self, instance):
'Check if an object is an instance.'
return False
__itemsize__ = 40
__mro__ = ()
__name__ = 'type'
@classmethod
def __prepare__(cls, name, bases, **kwds):
'__prepare__() -> dict\nused to create the namespace for the class statement'
return None
__qualname__ = 'type'
def __repr__(self):
'Return repr(self).'
return ''
def __setattr__(self, name, value):
'Implement setattr(self, name, value).'
return None
def __sizeof__(self):
'Return memory consumption of the type object.'
return 0
def __subclasscheck__(self, subclass):
'Check if a class is a subclass.'
return False
def __subclasses__(self):
'Return a list of immediate subclasses.'
return (cls,)
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
__text_signature__ = None
__weakrefoffset__ = 368
def mro(self):
"Return a type's method resolution order."
return [__Type__()]
__Type__ = type
class int(object):
"int([x]) -> integer\nint(x, base=10) -> integer\n\nConvert a number or string to an integer, or return 0 if no arguments\nare given. If x is a number, return x.__int__(). For floating point\nnumbers, this truncates towards zero.\n\nIf x is not a number or if base is given, then x must be a string,\nbytes, or bytearray instance representing an integer literal in the\ngiven base. The literal can be preceded by '+' or '-' and be surrounded\nby whitespace. The base defaults to 10. Valid bases are 0 and 2-36.\nBase 0 means to interpret the base from the string as an integer literal.\n>>> int('0b100', base=0)\n4"
def __abs__(self):
'abs(self)'
return int()
def __add__(self, value):
'Return self+value.'
return int()
def __and__(self, value):
'Return self&value.'
return int()
def __bool__(self):
'self != 0'
return False
def __ceil__(self):
'Ceiling of an Integral returns itself.'
return int()
__class__ = int
def __divmod__(self, value):
'Return divmod(self, value).'
return (0, 0)
def __eq__(self, value):
'Return self==value.'
return False
def __float__(self):
'float(self)'
return 0.0
def __floor__(self):
'Flooring an Integral returns itself.'
return int()
def __floordiv__(self, value):
'Return self//value.'
return 0
def __format__(self, format_spec):
return ''
def __ge__(self, value):
'Return self>=value.'
return False
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __getnewargs__(self):
return ()
def __gt__(self, value):
'Return self>value.'
return False
def __hash__(self):
'Return hash(self).'
return 0
def __index__(self):
'Return self converted to an integer, if self is suitable for use as an index into a list.'
return 0
def __init__(self, x, base=10):
"int([x]) -> integer\nint(x, base=10) -> integer\n\nConvert a number or string to an integer, or return 0 if no arguments\nare given. If x is a number, return x.__int__(). For floating point\nnumbers, this truncates towards zero.\n\nIf x is not a number or if base is given, then x must be a string,\nbytes, or bytearray instance representing an integer literal in the\ngiven base. The literal can be preceded by '+' or '-' and be surrounded\nby whitespace. The base defaults to 10. Valid bases are 0 and 2-36.\nBase 0 means to interpret the base from the string as an integer literal.\n>>> int('0b100', base=0)\n4"
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __int__(self):
'int(self)'
return 0
def __invert__(self):
'~self'
return int()
def __le__(self, value):
'Return self<=value.'
return False
def __lshift__(self, value):
'Return self<<value.'
return int()
def __lt__(self, value):
'Return self<value.'
return False
def __mod__(self, value):
'Return self%value.'
return int()
def __mul__(self, value):
'Return self*value.'
return int()
def __ne__(self, value):
'Return self!=value.'
return False
def __neg__(self):
'-self'
return int()
def __or__(self, value):
'Return self|value.'
return int()
def __pos__(self):
'+self'
return int()
def __pow__(self, value, mod):
'Return pow(self, value, mod).'
return int()
def __radd__(self, value):
'Return value+self.'
return int()
def __rand__(self, value):
'Return value&self.'
return int()
def __rdivmod__(self, value):
'Return divmod(value, self).'
return (0, 0)
def __repr__(self):
'Return repr(self).'
return ''
def __rfloordiv__(self, value):
'Return value//self.'
return int()
def __rlshift__(self, value):
'Return value<<self.'
return int()
def __rmod__(self, value):
'Return value%self.'
return int()
def __rmul__(self, value):
'Return value*self.'
return int()
def __ror__(self, value):
'Return value|self.'
return int()
def __round__(self, ndigits=0):
'Rounding an Integral returns itself.\nRounding with an ndigits argument also returns an integer.'
return int()
def __rpow__(self, value, mod):
'Return pow(value, self, mod).'
return int()
def __rrshift__(self, value):
'Return value>>self.'
return int()
def __rshift__(self, value):
'Return self>>value.'
return int()
def __rsub__(self, value):
'Return value-self.'
return int()
def __rtruediv__(self, value):
'Return value/self.'
return int()
def __rxor__(self, value):
'Return value^self.'
return int()
def __sizeof__(self):
'Returns size in memory, in bytes.'
return 0
def __sub__(self, value):
'Return self-value.'
return int()
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
def __truediv__(self, value):
'Return self/value.'
return __Float__()
def __trunc__(self):
'Truncating an Integral returns itself.'
return int()
def __xor__(self, value):
'Return self^value.'
return int()
def as_integer_ratio(self):
'Return integer ratio.\n\nReturn a pair of integers, whose ratio is exactly equal to the original int\nand with a positive denominator.\n\n>>> (10).as_integer_ratio()\n(10, 1)\n>>> (-10).as_integer_ratio()\n(-10, 1)\n>>> (0).as_integer_ratio()\n(0, 1)'
pass
def bit_length(self):
"Number of bits necessary to represent self in binary.\n\n>>> bin(37)\n'0b100101'\n>>> (37).bit_length()\n6"
return 0
def conjugate(self):
'Returns self, the complex conjugate of any int.'
return __Complex__()
@property
def denominator(self):
'the denominator of a rational number in lowest terms'
pass
@classmethod
def from_bytes(cls, type, bytes, byteorder):
"Return the integer represented by the given array of bytes.\n\n bytes\n Holds the array of bytes to convert. The argument must either\n support the buffer protocol or be an iterable object producing bytes.\n Bytes and bytearray are examples of built-in objects that support the\n buffer protocol.\n byteorder\n The byte order used to represent the integer. If byteorder is 'big',\n the most significant byte is at the beginning of the byte array. If\n byteorder is 'little', the most significant byte is at the end of the\n byte array. To request the native byte order of the host system, use\n `sys.byteorder' as the byte order value.\n signed\n Indicates whether two's complement is used to represent the integer."
return 0
@property
def imag(self):
'the imaginary part of a complex number'
pass
@property
def numerator(self):
'the numerator of a rational number in lowest terms'
pass
@property
def real(self):
'the real part of a complex number'
pass
def to_bytes(self, length, byteorder):
"Return an array of bytes representing an integer.\n\n length\n Length of bytes object to use. An OverflowError is raised if the\n integer is not representable with the given number of bytes.\n byteorder\n The byte order used to represent the integer. If byteorder is 'big',\n the most significant byte is at the beginning of the byte array. If\n byteorder is 'little', the most significant byte is at the end of the\n byte array. To request the native byte order of the host system, use\n `sys.byteorder' as the byte order value.\n signed\n Determines whether two's complement is used to represent the integer.\n If signed is False and a negative integer is given, an OverflowError\n is raised."
return b''
__Int__ = int
class bool(int):
'bool(x) -> bool\n\nReturns True when the argument x is true, False otherwise.\nThe builtins True and False are the only two instances of the class bool.\nThe class bool is a subclass of the class int, and cannot be subclassed.'
def __and__(self, value):
'Return self&value.'
return bool()
__class__ = bool
def __init__(self, x):
'bool(x) -> bool\n\nReturns True when the argument x is true, False otherwise.\nThe builtins True and False are the only two instances of the class bool.\nThe class bool is a subclass of the class int, and cannot be subclassed.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __or__(self, value):
'Return self|value.'
return bool()
def __rand__(self, value):
'Return value&self.'
return bool()
def __repr__(self):
'Return repr(self).'
return ''
def __ror__(self, value):
'Return value|self.'
return bool()
def __rxor__(self, value):
'Return value^self.'
return bool()
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
def __xor__(self, value):
'Return self^value.'
return bool()
@classmethod
def from_bytes(cls, type, bytes, byteorder):
"Return the integer represented by the given array of bytes.\n\n bytes\n Holds the array of bytes to convert. The argument must either\n support the buffer protocol or be an iterable object producing bytes.\n Bytes and bytearray are examples of built-in objects that support the\n buffer protocol.\n byteorder\n The byte order used to represent the integer. If byteorder is 'big',\n the most significant byte is at the beginning of the byte array. If\n byteorder is 'little', the most significant byte is at the end of the\n byte array. To request the native byte order of the host system, use\n `sys.byteorder' as the byte order value.\n signed\n Indicates whether two's complement is used to represent the integer."
return False
__Bool__ = bool
__Long__ = __Int__
class float(object):
'Convert a string or number to a floating point number, if possible.'
def __abs__(self):
'abs(self)'
return float()
def __add__(self, value):
'Return self+value.'
return float()
def __bool__(self):
'self != 0'
return False
__class__ = float
def __divmod__(self, value):
'Return divmod(self, value).'
return (0, 0)
def __eq__(self, value):
'Return self==value.'
return False
def __float__(self):
'float(self)'
return 0.0
def __floordiv__(self, value):
'Return self//value.'
return 0
def __format__(self, format_spec):
'Formats the float according to format_spec.'
return ''
def __ge__(self, value):
'Return self>=value.'
return False
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
@classmethod
def __getformat__(cls, type, typestr):
"You probably don't want to use this function.\n\n typestr\n Must be 'double' or 'float'.\n\nIt exists mainly to be used in Python's test suite.\n\nThis function returns whichever of 'unknown', 'IEEE, big-endian' or 'IEEE,\nlittle-endian' best describes the format of floating point numbers used by the\nC type named by typestr."
return ''
def __getnewargs__(self):
return ()
def __gt__(self, value):
'Return self>value.'
return False
def __hash__(self):
'Return hash(self).'
return 0
def __init__(self, *args, **kwargs):
'Convert a string or number to a floating point number, if possible.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __int__(self):
'int(self)'
return 0
def __le__(self, value):
'Return self<=value.'
return False
def __lt__(self, value):
'Return self<value.'
return False
def __mod__(self, value):
'Return self%value.'
return float()
def __mul__(self, value):
'Return self*value.'
return float()
def __ne__(self, value):
'Return self!=value.'
return False
def __neg__(self):
'-self'
return float()
def __pos__(self):
'+self'
return float()
def __pow__(self, value, mod):
'Return pow(self, value, mod).'
return float()
def __radd__(self, value):
'Return value+self.'
return float()
def __rdivmod__(self, value):
'Return divmod(value, self).'
return (0, 0)
def __repr__(self):
'Return repr(self).'
return ''
def __rfloordiv__(self, value):
'Return value//self.'
return float()
def __rmod__(self, value):
'Return value%self.'
return float()
def __rmul__(self, value):
'Return value*self.'
return float()
def __round__(self, ndigits):
'Return the Integral closest to x, rounding half toward even.\n\nWhen an argument is passed, work like built-in round(x, ndigits).'
return float()
def __rpow__(self, value, mod):
'Return pow(value, self, mod).'
return float()
def __rsub__(self, value):
'Return value-self.'
return float()
def __rtruediv__(self, value):
'Return value/self.'
return float()
@classmethod
def __set_format__(cls, type, typestr, fmt):
"You probably don't want to use this function.\n\n typestr\n Must be 'double' or 'float'.\n fmt\n Must be one of 'unknown', 'IEEE, big-endian' or 'IEEE, little-endian',\n and in addition can only be one of the latter two if it appears to\n match the underlying C reality.\n\nIt exists mainly to be used in Python's test suite.\n\nOverride the automatic determination of C-level floating point type.\nThis affects how floats are converted to and from binary strings."
pass
def __sub__(self, value):
'Return self-value.'
return float()
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
def __truediv__(self, value):
'Return self/value.'
return __Float__()
def __trunc__(self):
'Return the Integral closest to x between 0 and x.'
return float()
def as_integer_ratio(self):
'Return integer ratio.\n\nReturn a pair of integers, whose ratio is exactly equal to the original float\nand with a positive denominator.\n\nRaise OverflowError on infinities and a ValueError on NaNs.\n\n>>> (10.0).as_integer_ratio()\n(10, 1)\n>>> (0.0).as_integer_ratio()\n(0, 1)\n>>> (-.25).as_integer_ratio()\n(-1, 4)'
return (0, 0)
def conjugate(self):
'Return self, the complex conjugate of any float.'
return __Complex__()
@classmethod
def fromhex(cls, type, string):
"Create a floating-point number from a hexadecimal string.\n\n>>> float.fromhex('0x1.ffffp10')\n2047.984375\n>>> float.fromhex('-0x1p-1074')\n-5e-324"
return 0.0
def hex(self):
"Return a hexadecimal representation of a floating-point number.\n\n>>> (-0.1).hex()\n'-0x1.999999999999ap-4'\n>>> 3.14159.hex()\n'0x1.921f9f01b866ep+1'"
return ''
@property
def imag(self):
'the imaginary part of a complex number'
pass
def is_integer(self):
'Return True if the float is an integer.'
return False
@property
def real(self):
'the real part of a complex number'
pass
__Float__ = float
class complex(object):
'Create a complex number from a real part and an optional imaginary part.\n\nThis is equivalent to (real + imag*1j) where imag defaults to 0.'
def __abs__(self):
'abs(self)'
return complex()
def __add__(self, value):
'Return self+value.'
return complex()
def __bool__(self):
'self != 0'
return False
__class__ = complex
def __divmod__(self, value):
'Return divmod(self, value).'
return (0, 0)
def __eq__(self, value):
'Return self==value.'
return False
def __float__(self):
'float(self)'
return 0.0
def __floordiv__(self, value):
'Return self//value.'
return 0
def __format__(self, format_spec):
'complex.__format__() -> str\n\nConvert to a string according to format_spec.'
return ''
def __ge__(self, value):
'Return self>=value.'
return False
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __getnewargs__(self):
return ()
def __gt__(self, value):
'Return self>value.'
return False
def __hash__(self):
'Return hash(self).'
return 0
def __init__(self, *args, **kwargs):
'Create a complex number from a real part and an optional imaginary part.\n\nThis is equivalent to (real + imag*1j) where imag defaults to 0.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __int__(self):
'int(self)'
return 0
def __le__(self, value):
'Return self<=value.'
return False
def __lt__(self, value):
'Return self<value.'
return False
def __mod__(self, value):
'Return self%value.'
return complex()
def __mul__(self, value):
'Return self*value.'
return complex()
def __ne__(self, value):
'Return self!=value.'
return False
def __neg__(self):
'-self'
return complex()
def __pos__(self):
'+self'
return complex()
def __pow__(self, value, mod):
'Return pow(self, value, mod).'
return complex()
def __radd__(self, value):
'Return value+self.'
return complex()
def __rdivmod__(self, value):
'Return divmod(value, self).'
return (0, 0)
def __repr__(self):
'Return repr(self).'
return ''
def __rfloordiv__(self, value):
'Return value//self.'
return complex()
def __rmod__(self, value):
'Return value%self.'
return complex()
def __rmul__(self, value):
'Return value*self.'
return complex()
def __rpow__(self, value, mod):
'Return pow(value, self, mod).'
return complex()
def __rsub__(self, value):
'Return value-self.'
return complex()
def __rtruediv__(self, value):
'Return value/self.'
return complex()
def __sub__(self, value):
'Return self-value.'
return complex()
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
def __truediv__(self, value):
'Return self/value.'
return __Float__()
def conjugate(self):
'complex.conjugate() -> complex\n\nReturn the complex conjugate of its argument. (3-4j).conjugate() == 3+4j.'
return __Complex__()
@property
def imag(self):
'the imaginary part of a complex number'
pass
@property
def real(self):
'the real part of a complex number'
pass
__Complex__ = complex
class tuple(object):
"Built-in immutable sequence.\n\nIf no argument is given, the constructor returns an empty tuple.\nIf iterable is specified the tuple is initialized from iterable's items.\n\nIf the argument is a tuple, the return value is the same object."
def __add__(self, value):
'Return self+value.'
return tuple()
__class__ = tuple
def __contains__(self, key):
'Return key in self.'
return False
def __eq__(self, value):
'Return self==value.'
return False
def __ge__(self, value):
'Return self>=value.'
return False
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __getitem__(self, key):
'Return self[key].'
pass
def __getnewargs__(self):
return ()
def __gt__(self, value):
'Return self>value.'
return False
def __hash__(self):
'Return hash(self).'
return 0
def __init__(self, *args, **kwargs):
"Built-in immutable sequence.\n\nIf no argument is given, the constructor returns an empty tuple.\nIf iterable is specified the tuple is initialized from iterable's items.\n\nIf the argument is a tuple, the return value is the same object."
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __iter__(self):
'Implement iter(self).'
return __TupleIterator__()
def __le__(self, value):
'Return self<=value.'
return False
def __len__(self):
'Return len(self).'
return 0
def __lt__(self, value):
'Return self<value.'
return False
def __mul__(self, value):
'Return self*value.'
return tuple()
def __ne__(self, value):
'Return self!=value.'
return False
def __repr__(self):
'Return repr(self).'
return ''
def __rmul__(self, value):
'Return value*self.'
return tuple()
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
def count(self, value):
'Return number of occurrences of value.'
return 0
def index(self, value, start, stop):
'Return first index of value.\n\nRaises ValueError if the value is not present.'
return 0
__Tuple__ = tuple
class list(object):
'Built-in mutable sequence.\n\nIf no argument is given, the constructor creates a new empty list.\nThe argument must be an iterable if specified.'
def __add__(self, value):
'Return self+value.'
return list()
__class__ = list
def __contains__(self, key):
'Return key in self.'
return False
def __delitem__(self, key):
'Delete self[key].'
return None
def __eq__(self, value):
'Return self==value.'
return False
def __ge__(self, value):
'Return self>=value.'
return False
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __getitem__(self, index):
'x.__getitem__(y) <==> x[y]'
pass
def __gt__(self, value):
'Return self>value.'
return False
__hash__ = None
def __iadd__(self, value):
'Implement self+=value.'
return None
def __imul__(self, value):
'Implement self*=value.'
return None
def __init__(self, *args, **kwargs):
'Built-in mutable sequence.\n\nIf no argument is given, the constructor creates a new empty list.\nThe argument must be an iterable if specified.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __iter__(self):
'Implement iter(self).'
return __ListIterator__()
def __le__(self, value):
'Return self<=value.'
return False
def __len__(self):
'Return len(self).'
return 0
def __lt__(self, value):
'Return self<value.'
return False
def __mul__(self, value):
'Return self*value.'
return list()
def __ne__(self, value):
'Return self!=value.'
return False
def __repr__(self):
'Return repr(self).'
return ''
def __reversed__(self):
'Return a reverse iterator over the list.'
return __ListIterator__()
def __rmul__(self, value):
'Return value*self.'
return list()
def __setitem__(self, key, value):
'Set self[key] to value.'
return None
def __sizeof__(self):
'Return the size of the list in memory, in bytes.'
return 0
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
def append(self, object):
'Append object to the end of the list.'
return None
def clear(self):
'Remove all items from list.'
return None
def copy(self):
'Return a shallow copy of the list.'
return list()
def count(self, value):
'Return number of occurrences of value.'
return 0
def extend(self, iterable):
'Extend list by appending elements from the iterable.'
return None
def index(self, value, start, stop):
'Return first index of value.\n\nRaises ValueError if the value is not present.'
return 0
def insert(self, index, object):
'Insert object before index.'
return None
def pop(self, index):
'Remove and return item at index (default last).\n\nRaises IndexError if list is empty or index is out of range.'
return self[0]
def remove(self, value):
'Remove first occurrence of value.\n\nRaises ValueError if the value is not present.'
return None
def reverse(self):
'Reverse *IN PLACE*.'
return None
def sort(self):
'Sort the list in ascending order and return None.\n\nThe sort is in-place (i.e. the list itself is modified) and stable (i.e. the\norder of two equal elements is maintained).\n\nIf a key function is given, apply it once to each list item and sort them,\nascending or descending, according to their function values.\n\nThe reverse flag can be set to sort in descending order.'
return None
__List__ = list
class dict(object):
"dict() -> new empty dictionary\ndict(mapping) -> new dictionary initialized from a mapping object's\n (key, value) pairs\ndict(iterable) -> new dictionary initialized as if via:\n d = {}\n for k, v in iterable:\n d[k] = v\ndict(**kwargs) -> new dictionary initialized with the name=value pairs\n in the keyword argument list. For example: dict(one=1, two=2)"
__class__ = dict
def __contains__(self, key):
'True if the dictionary has the specified key, else False.'
return False
def __delitem__(self, key):
'Delete self[key].'
return None
def __eq__(self, value):
'Return self==value.'
return False
def __ge__(self, value):
'Return self>=value.'
return False
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __getitem__(self, key):
'x.__getitem__(y) <==> x[y]'
pass
def __gt__(self, value):
'Return self>value.'
return False
__hash__ = None
def __init__(self, iterable):
"dict() -> new empty dictionary\ndict(mapping) -> new dictionary initialized from a mapping object's\n (key, value) pairs\ndict(iterable) -> new dictionary initialized as if via:\n d = {}\n for k, v in iterable:\n d[k] = v\ndict(**kwargs) -> new dictionary initialized with the name=value pairs\n in the keyword argument list. For example: dict(one=1, two=2)"
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __iter__(self):
'Implement iter(self).'
return __DictKeys__()
def __le__(self, value):
'Return self<=value.'
return False
def __len__(self):
'Return len(self).'
return 0
def __lt__(self, value):
'Return self<value.'
return False
def __ne__(self, value):
'Return self!=value.'
return False
def __repr__(self):
'Return repr(self).'
return ''
def __reversed__(self):
'Return a reverse iterator over the dict keys.'
pass
def __setitem__(self, key, value):
'Set self[key] to value.'
return None
def __sizeof__(self):
'D.__sizeof__() -> size of D in memory, in bytes'
return 0
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
def clear(self):
'D.clear() -> None. Remove all items from D.'
return None
def copy(self):
'D.copy() -> a shallow copy of D'
return dict()
@classmethod
def fromkeys(cls, type, iterable, value):
'Create a new dictionary with keys from iterable and values set to value.'
return {}
def get(self, key, default):
'Return the value for key if key is in the dictionary, else default.'
return self[0]
def items(self):
"D.items() -> a set-like object providing a view on D's items"
return __DictItems__()
def keys(self):
"D.keys() -> a set-like object providing a view on D's keys"
return __DictKeys__()
def pop(self, k, d=None):
'D.pop(k[,d]) -> v, remove specified key and return the corresponding value.\nIf key is not found, d is returned if given, otherwise KeyError is raised'
return self.keys()[0]
def popitem(self):
'Remove and return a (key, value) pair as a 2-tuple.\n\nPairs are returned in LIFO (last-in, first-out) order.\nRaises KeyError if the dict is empty.'
return self.items()[0]
def setdefault(self, key, default):
'Insert key with a value of default if key is not in the dictionary.\n\nReturn the value for key if key is in the dictionary, else default.'
return self[0]
def update(self, d):
'D.update([E, ]**F) -> None. Update D from dict/iterable E and F.\nIf E is present and has a .keys() method, then does: for k in E: D[k] = E[k]\nIf E is present and lacks a .keys() method, then does: for k, v in E: D[k] = v\nIn either case, this is followed by: for k in F: D[k] = F[k]'
return None
def values(self):
"D.values() -> an object providing a view on D's values"
return __DictValues__()
__Dict__ = dict
class set(object):
'set() -> new empty set object\nset(iterable) -> new set object\n\nBuild an unordered collection of unique elements.'
def __and__(self, value):
'Return self&value.'
return set()
__class__ = set
def __contains__(self, value):
'x.__contains__(y) <==> y in x.'
return False
def __eq__(self, value):
'Return self==value.'
return False
def __ge__(self, value):
'Return self>=value.'
return False
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __gt__(self, value):
'Return self>value.'
return False
__hash__ = None
def __iand__(self, value):
'Return self&=value.'
return None
def __init__(self, iterable):
'set() -> new empty set object\nset(iterable) -> new set object\n\nBuild an unordered collection of unique elements.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __ior__(self, value):
'Return self|=value.'
return None
def __isub__(self, value):
'Return self-=value.'
return None
def __iter__(self):
'Implement iter(self).'
return __SetIterator__()
def __ixor__(self, value):
'Return self^=value.'
return None
def __le__(self, value):
'Return self<=value.'
return False
def __len__(self):
'Return len(self).'
return 0
def __lt__(self, value):
'Return self<value.'
return False
def __ne__(self, value):
'Return self!=value.'
return False
def __or__(self, value):
'Return self|value.'
return set()
def __rand__(self, value):
'Return value&self.'
return set()
def __reduce__(self):
'Return state information for pickling.'
return ''; return ()
def __repr__(self):
'Return repr(self).'
return ''
def __ror__(self, value):
'Return value|self.'
return set()
def __rsub__(self, value):
'Return value-self.'
return set()
def __rxor__(self, value):
'Return value^self.'
return set()
def __sizeof__(self):
'S.__sizeof__() -> size of S in memory, in bytes'
return 0
def __sub__(self, value):
'Return self-value.'
return set()
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
def __xor__(self, value):
'Return self^value.'
return set()
def add(self, value):
'Add an element to a set.\n\nThis has no effect if the element is already present.'
return None
def clear(self):
'Remove all elements from this set.'
return None
def copy(self):
'Return a shallow copy of a set.'
return set()
def difference(self, other):
'Return the difference of two or more sets as a new set.\n\n(i.e. all elements that are in this set but not the others.)'
return set()
def difference_update(self, *others):
'Remove all elements of another set from this set.'
return None
def discard(self, elem):
'Remove an element from a set if it is a member.\n\nIf the element is not a member, do nothing.'
return None
def intersection(self, other):
'Return the intersection of two sets as a new set.\n\n(i.e. all elements that are in both sets.)'
return set()
def intersection_update(self, *others):
'Update a set with the intersection of itself and another.'
return None
def isdisjoint(self, other):
'Return True if two sets have a null intersection.'
return False
def issubset(self, other):
'Report whether another set contains this set.'
return False
def issuperset(self, other):
'Report whether this set contains another set.'
return False
def pop(self):
'Remove and return an arbitrary set element.\nRaises KeyError if the set is empty.'
pass
def remove(self, elem):
'Remove an element from a set; it must be a member.\n\nIf the element is not a member, raise a KeyError.'
return None
def symmetric_difference(self, other):
'Return the symmetric difference of two sets as a new set.\n\n(i.e. all elements that are in exactly one of the sets.)'
return set()
def symmetric_difference_update(self, *others):
'Update a set with the symmetric difference of itself and another.'
return None
def union(self, *others):
'Return the union of sets as a new set.\n\n(i.e. all elements that are in either set.)'
return set()
def update(self, *others):
'Update a set with the union of itself and others.'
return None
__Set__ = set
class frozenset(object):
'frozenset() -> empty frozenset object\nfrozenset(iterable) -> frozenset object\n\nBuild an immutable unordered collection of unique elements.'
def __and__(self, value):
'Return self&value.'
return frozenset()
__class__ = frozenset
def __contains__(self, value):
'x.__contains__(y) <==> y in x.'
return False
def __eq__(self, value):
'Return self==value.'
return False
def __ge__(self, value):
'Return self>=value.'
return False
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __gt__(self, value):
'Return self>value.'
return False
def __hash__(self):
'Return hash(self).'
return 0
def __init__(self, iterable):
'frozenset() -> empty frozenset object\nfrozenset(iterable) -> frozenset object\n\nBuild an immutable unordered collection of unique elements.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __iter__(self):
'Implement iter(self).'
return __SetIterator__()
def __le__(self, value):
'Return self<=value.'
return False
def __len__(self):
'Return len(self).'
return 0
def __lt__(self, value):
'Return self<value.'
return False
def __ne__(self, value):
'Return self!=value.'
return False
def __or__(self, value):
'Return self|value.'
return frozenset()
def __rand__(self, value):
'Return value&self.'
return frozenset()
def __reduce__(self):
'Return state information for pickling.'
return ''; return ()
def __repr__(self):
'Return repr(self).'
return ''
def __ror__(self, value):
'Return value|self.'
return frozenset()
def __rsub__(self, value):
'Return value-self.'
return frozenset()
def __rxor__(self, value):
'Return value^self.'
return frozenset()
def __sizeof__(self):
'S.__sizeof__() -> size of S in memory, in bytes'
return 0
def __sub__(self, value):
'Return self-value.'
return frozenset()
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
def __xor__(self, value):
'Return self^value.'
return frozenset()
def copy(self):
'Return a shallow copy of a set.'
return frozenset()
def difference(self, other):
'Return the difference of two or more sets as a new set.\n\n(i.e. all elements that are in this set but not the others.)'
return frozenset()
def intersection(self, other):
'Return the intersection of two sets as a new set.\n\n(i.e. all elements that are in both sets.)'
return frozenset()
def isdisjoint(self, other):
'Return True if two sets have a null intersection.'
return False
def issubset(self, other):
'Report whether another set contains this set.'
return False
def issuperset(self, other):
'Report whether this set contains another set.'
return False
def symmetric_difference(self, other):
'Return the symmetric difference of two sets as a new set.\n\n(i.e. all elements that are in exactly one of the sets.)'
return frozenset()
def union(self, *others):
'Return the union of sets as a new set.\n\n(i.e. all elements that are in either set.)'
return frozenset()
__FrozenSet__ = frozenset
class bytes(object):
'bytes(iterable_of_ints) -> bytes\nbytes(string, encoding[, errors]) -> bytes\nbytes(bytes_or_buffer) -> immutable copy of bytes_or_buffer\nbytes(int) -> bytes object of size given by the parameter initialized with null bytes\nbytes() -> empty bytes object\n\nConstruct an immutable array of bytes from:\n - an iterable yielding integers in range(256)\n - a text string encoded using the specified encoding\n - any object implementing the buffer API.\n - an integer'
def __add__(self, value):
'Return self+value.'
return bytes()
__class__ = bytes
def __contains__(self, key):
'Return key in self.'
return False
def __eq__(self, value):
'Return self==value.'
return False
def __ge__(self, value):
'Return self>=value.'
return False
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __getitem__(self, key):
'Return self[key].'
return bytes()
def __getnewargs__(self):
return ()
def __gt__(self, value):
'Return self>value.'
return False
def __hash__(self):
'Return hash(self).'
return 0
def __init__(self, string, encoding, errors=None):
'bytes(iterable_of_ints) -> bytes\nbytes(string, encoding[, errors]) -> bytes\nbytes(bytes_or_buffer) -> immutable copy of bytes_or_buffer\nbytes(int) -> bytes object of size given by the parameter initialized with null bytes\nbytes() -> empty bytes object\n\nConstruct an immutable array of bytes from:\n - an iterable yielding integers in range(256)\n - a text string encoded using the specified encoding\n - any object implementing the buffer API.\n - an integer'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __iter__(self):
'Implement iter(self).'
return __BytesIterator__()
def __le__(self, value):
'Return self<=value.'
return False
def __len__(self):
'Return len(self).'
return 0
def __lt__(self, value):
'Return self<value.'
return False
def __mod__(self, value):
'Return self%value.'
return bytes()
def __mul__(self, value):
'Return self*value.'
return bytes()
def __ne__(self, value):
'Return self!=value.'
return False
def __repr__(self):
'Return repr(self).'
return ''
def __rmod__(self, value):
'Return value%self.'
return bytes()
def __rmul__(self, value):
'Return value*self.'
return bytes()
def __str__(self):
'Return str(self).'
return ''
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
def capitalize(self):
'B.capitalize() -> copy of B\n\nReturn a copy of B with only its first character capitalized (ASCII)\nand the rest lower-cased.'
return bytes()
def center(self, width, fillchar):
'Return a centered string of length width.\n\nPadding is done using the specified fill character.'
return bytes()
def count(self, sub, start=0, end=-1):
'B.count(sub[, start[, end]]) -> int\n\nReturn the number of non-overlapping occurrences of subsection sub in\nbytes B[start:end]. Optional arguments start and end are interpreted\nas in slice notation.'
return 0
def decode(self, encoding, errors):
"Decode the bytes using the codec registered for encoding.\n\n encoding\n The encoding with which to decode the bytes.\n errors\n The error handling scheme to use for the handling of decoding errors.\n The default is 'strict' meaning that decoding errors raise a\n UnicodeDecodeError. Other possible values are 'ignore' and 'replace'\n as well as any other name registered with codecs.register_error that\n can handle UnicodeDecodeErrors."
return ''
def endswith(self, suffix, start=0, end=-1):
'B.endswith(suffix[, start[, end]]) -> bool\n\nReturn True if B ends with the specified suffix, False otherwise.\nWith optional start, test B beginning at that position.\nWith optional end, stop comparing B at that position.\nsuffix can also be a tuple of bytes to try.'
return False
def expandtabs(self, tabsize):
'Return a copy where all tab characters are expanded using spaces.\n\nIf tabsize is not given, a tab size of 8 characters is assumed.'
return bytes()
def find(self, sub, start=0, end=-1):
'B.find(sub[, start[, end]]) -> int\n\nReturn the lowest index in B where subsection sub is found,\nsuch that sub is contained within B[start,end]. Optional\narguments start and end are interpreted as in slice notation.\n\nReturn -1 on failure.'
return 0
@classmethod
def fromhex(cls, type, string):
"Create a bytes object from a string of hexadecimal numbers.\n\nSpaces between two numbers are accepted.\nExample: bytes.fromhex('B9 01EF') -> b'\\\\xb9\\\\x01\\\\xef'."
return b''
def hex(self):
"Create a str of hexadecimal numbers from a bytes object.\n\n sep\n An optional single character or byte to separate hex bytes.\n bytes_per_sep\n How many bytes between separators. Positive values count from the\n right, negative values count from the left.\n\nExample:\n>>> value = b'\\xb9\\x01\\xef'\n>>> value.hex()\n'b901ef'\n>>> value.hex(':')\n'b9:01:ef'\n>>> value.hex(':', 2)\n'b9:01ef'\n>>> value.hex(':', -2)\n'b901:ef'"
return ''
def index(self, sub, start=0, end=-1):
'B.index(sub[, start[, end]]) -> int\n\nReturn the lowest index in B where subsection sub is found,\nsuch that sub is contained within B[start,end]. Optional\narguments start and end are interpreted as in slice notation.\n\nRaises ValueError when the subsection is not found.'
return 0
def isalnum(self):
'B.isalnum() -> bool\n\nReturn True if all characters in B are alphanumeric\nand there is at least one character in B, False otherwise.'
return False
def isalpha(self):
'B.isalpha() -> bool\n\nReturn True if all characters in B are alphabetic\nand there is at least one character in B, False otherwise.'
return False
def isascii(self):
'B.isascii() -> bool\n\nReturn True if B is empty or all characters in B are ASCII,\nFalse otherwise.'
return True
def isdigit(self):
'B.isdigit() -> bool\n\nReturn True if all characters in B are digits\nand there is at least one character in B, False otherwise.'
return False
def islower(self):
'B.islower() -> bool\n\nReturn True if all cased characters in B are lowercase and there is\nat least one cased character in B, False otherwise.'
return False
def isspace(self):
'B.isspace() -> bool\n\nReturn True if all characters in B are whitespace\nand there is at least one character in B, False otherwise.'
return False
def istitle(self):
'B.istitle() -> bool\n\nReturn True if B is a titlecased string and there is at least one\ncharacter in B, i.e. uppercase characters may only follow uncased\ncharacters and lowercase characters only cased ones. Return False\notherwise.'
return False
def isupper(self):
'B.isupper() -> bool\n\nReturn True if all cased characters in B are uppercase and there is\nat least one cased character in B, False otherwise.'
return False
def join(self, iterable_of_bytes):
"Concatenate any number of bytes objects.\n\nThe bytes whose method is called is inserted in between each pair.\n\nThe result is returned as a new bytes object.\n\nExample: b'.'.join([b'ab', b'pq', b'rs']) -> b'ab.pq.rs'."
return b''
def ljust(self, width, fillchar):
'Return a left-justified string of length width.\n\nPadding is done using the specified fill character.'
return bytes()
def lower(self):
'B.lower() -> copy of B\n\nReturn a copy of B with all ASCII characters converted to lowercase.'
return bytes()
def lstrip(self, bytes):
'Strip leading bytes contained in the argument.\n\nIf the argument is omitted or None, strip leading ASCII whitespace.'
return bytes()
@classmethod
def maketrans(cls, frm, to):
'Return a translation table useable for the bytes or bytearray translate method.\n\nThe returned table will be one where each byte in frm is mapped to the byte at\nthe same position in to.\n\nThe bytes objects frm and to must be of the same length.'
return b''
def partition(self, sep):
'Partition the bytes into three parts using the given separator.\n\nThis will search for the separator sep in the bytes. If the separator is found,\nreturns a 3-tuple containing the part before the separator, the separator\nitself, and the part after it.\n\nIf the separator is not found, returns a 3-tuple containing the original bytes\nobject and two empty bytes objects.'
return (bytes(), bytes(), bytes())
def replace(self, old, new, count):
'Return a copy with all occurrences of substring old replaced by new.\n\n count\n Maximum number of occurrences to replace.\n -1 (the default value) means replace all occurrences.\n\nIf the optional argument count is given, only the first count occurrences are\nreplaced.'
return bytes()
def rfind(self, sub, start=0, end=-1):
'B.rfind(sub[, start[, end]]) -> int\n\nReturn the highest index in B where subsection sub is found,\nsuch that sub is contained within B[start,end]. Optional\narguments start and end are interpreted as in slice notation.\n\nReturn -1 on failure.'
return 0
def rindex(self, sub, start=0, end=-1):
'B.rindex(sub[, start[, end]]) -> int\n\nReturn the highest index in B where subsection sub is found,\nsuch that sub is contained within B[start,end]. Optional\narguments start and end are interpreted as in slice notation.\n\nRaise ValueError when the subsection is not found.'
return 0
def rjust(self, width, fillchar):
'Return a right-justified string of length width.\n\nPadding is done using the specified fill character.'
return bytes()
def rpartition(self, sep):
'Partition the bytes into three parts using the given separator.\n\nThis will search for the separator sep in the bytes, starting at the end. If\nthe separator is found, returns a 3-tuple containing the part before the\nseparator, the separator itself, and the part after it.\n\nIf the separator is not found, returns a 3-tuple containing two empty bytes\nobjects and the original bytes object.'
return (bytes(), bytes(), bytes())
def rsplit(self, sep, maxsplit):
'Return a list of the sections in the bytes, using sep as the delimiter.\n\n sep\n The delimiter according which to split the bytes.\n None (the default value) means split on ASCII whitespace characters\n (space, tab, return, newline, formfeed, vertical tab).\n maxsplit\n Maximum number of splits to do.\n -1 (the default value) means no limit.\n\nSplitting is done starting at the end of the bytes and working to the front.'
return [bytes()]
def rstrip(self, bytes):
'Strip trailing bytes contained in the argument.\n\nIf the argument is omitted or None, strip trailing ASCII whitespace.'
return bytes()
def split(self, sep, maxsplit):
'Return a list of the sections in the bytes, using sep as the delimiter.\n\n sep\n The delimiter according which to split the bytes.\n None (the default value) means split on ASCII whitespace characters\n (space, tab, return, newline, formfeed, vertical tab).\n maxsplit\n Maximum number of splits to do.\n -1 (the default value) means no limit.'
return [bytes()]
def splitlines(self, keepends):
'Return a list of the lines in the bytes, breaking at line boundaries.\n\nLine breaks are not included in the resulting list unless keepends is given and\ntrue.'
return [self()]
def startswith(self, prefix, start=0, end=-1):
'B.startswith(prefix[, start[, end]]) -> bool\n\nReturn True if B starts with the specified prefix, False otherwise.\nWith optional start, test B beginning at that position.\nWith optional end, stop comparing B at that position.\nprefix can also be a tuple of bytes to try.'
return False
def strip(self, bytes):
'Strip leading and trailing bytes contained in the argument.\n\nIf the argument is omitted or None, strip leading and trailing ASCII whitespace.'
return bytes()
def swapcase(self):
'B.swapcase() -> copy of B\n\nReturn a copy of B with uppercase ASCII characters converted\nto lowercase ASCII and vice versa.'
return bytes()
def title(self):
'B.title() -> copy of B\n\nReturn a titlecased version of B, i.e. ASCII words start with uppercase\ncharacters, all remaining cased characters have lowercase.'
return bytes()
def translate(self, table, delete):
'Return a copy with each character mapped by the given translation table.\n\n table\n Translation table, which must be a bytes object of length 256.\n\nAll characters occurring in the optional argument delete are removed.\nThe remaining characters are mapped through the given translation table.'
return bytes()
def upper(self):
'B.upper() -> copy of B\n\nReturn a copy of B with all ASCII characters converted to uppercase.'
return bytes()
def zfill(self, width):
'Pad a numeric string with zeros on the left, to fill a field of the given width.\n\nThe original string is never truncated.'
return bytes()
__Bytes__ = bytes
class bytes_iterator(object):
__class__ = bytes_iterator
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __init__(self, *args, **kwargs):
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __iter__(self):
'Implement iter(self).'
return bytes_iterator()
def __length_hint__(self):
'Private method returning an estimate of len(list(it)).'
return 0
def __next__(self):
'Implement next(self).'
return 0
def __reduce__(self):
'Return state information for pickling.'
return ''; return ()
def __setstate__(self, state):
'Set state information for unpickling.'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
__BytesIterator__ = bytes_iterator
class str(object):
"str(object='') -> str\nstr(bytes_or_buffer[, encoding[, errors]]) -> str\n\nCreate a new string object from the given object. If encoding or\nerrors is specified, then the object must expose a data buffer\nthat will be decoded using the given encoding and error handler.\nOtherwise, returns the result of object.__str__() (if defined)\nor repr(object).\nencoding defaults to sys.getdefaultencoding().\nerrors defaults to 'strict'."
def __add__(self, value):
'Return self+value.'
return str()
__class__ = str
def __contains__(self, key):
'Return key in self.'
return False
def __eq__(self, value):
'Return self==value.'
return False
def __format__(self, format_spec):
'Return a formatted version of the string as described by format_spec.'
return ''
def __ge__(self, value):
'Return self>=value.'
return False
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __getitem__(self, key):
'Return self[key].'
return str()
def __getnewargs__(self):
return ()
def __gt__(self, value):
'Return self>value.'
return False
def __hash__(self):
'Return hash(self).'
return 0
def __init__(self, bytes_or_buffer, encoding=None, errors=None):
"str(object='') -> str\nstr(bytes_or_buffer[, encoding[, errors]]) -> str\n\nCreate a new string object from the given object. If encoding or\nerrors is specified, then the object must expose a data buffer\nthat will be decoded using the given encoding and error handler.\nOtherwise, returns the result of object.__str__() (if defined)\nor repr(object).\nencoding defaults to sys.getdefaultencoding().\nerrors defaults to 'strict'."
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __iter__(self):
'Implement iter(self).'
return __UnicodeIterator__()
def __le__(self, value):
'Return self<=value.'
return False
def __len__(self):
'Return len(self).'
return 0
def __lt__(self, value):
'Return self<value.'
return False
def __mod__(self, value):
'Return self%value.'
return str()
def __mul__(self, value):
'Return self*value.'
return str()
def __ne__(self, value):
'Return self!=value.'
return False
def __repr__(self):
'Return repr(self).'
return ''
def __rmod__(self, value):
'Return value%self.'
return str()
def __rmul__(self, value):
'Return value*self.'
return str()
def __sizeof__(self):
'Return the size of the string in memory, in bytes.'
return 0
def __str__(self):
'Return str(self).'
return ''
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
def capitalize(self):
'Return a capitalized version of the string.\n\nMore specifically, make the first character have upper case and the rest lower\ncase.'
return str()
def casefold(self):
'Return a version of the string suitable for caseless comparisons.'
return str()
def center(self, width, fillchar):
'Return a centered string of length width.\n\nPadding is done using the specified fill character (default is a space).'
return str()
def count(self, sub, start=0, end=-1):
'S.count(sub[, start[, end]]) -> int\n\nReturn the number of non-overlapping occurrences of substring sub in\nstring S[start:end]. Optional arguments start and end are\ninterpreted as in slice notation.'
return 0
def encode(self, encoding, errors):
"Encode the string using the codec registered for encoding.\n\n encoding\n The encoding in which to encode the string.\n errors\n The error handling scheme to use for encoding errors.\n The default is 'strict' meaning that encoding errors raise a\n UnicodeEncodeError. Other possible values are 'ignore', 'replace' and\n 'xmlcharrefreplace' as well as any other name registered with\n codecs.register_error that can handle UnicodeEncodeErrors."
return b''
def endswith(self, suffix, start=0, end=-1):
'S.endswith(suffix[, start[, end]]) -> bool\n\nReturn True if S ends with the specified suffix, False otherwise.\nWith optional start, test S beginning at that position.\nWith optional end, stop comparing S at that position.\nsuffix can also be a tuple of strings to try.'
return False
def expandtabs(self, tabsize):
'Return a copy where all tab characters are expanded using spaces.\n\nIf tabsize is not given, a tab size of 8 characters is assumed.'
return str()
def find(self, sub, start=0, end=-1):
'S.find(sub[, start[, end]]) -> int\n\nReturn the lowest index in S where substring sub is found,\nsuch that sub is contained within S[start:end]. Optional\narguments start and end are interpreted as in slice notation.\n\nReturn -1 on failure.'
return 0
def format(self, *args, **kwargs):
"S.format(*args, **kwargs) -> str\n\nReturn a formatted version of S, using substitutions from args and kwargs.\nThe substitutions are identified by braces ('{' and '}')."
return str()
def format_map(self, mapping):
"S.format_map(mapping) -> str\n\nReturn a formatted version of S, using substitutions from mapping.\nThe substitutions are identified by braces ('{' and '}')."
return str()
def index(self, sub, start=0, end=-1):
'S.index(sub[, start[, end]]) -> int\n\nReturn the lowest index in S where substring sub is found,\nsuch that sub is contained within S[start:end]. Optional\narguments start and end are interpreted as in slice notation.\n\nRaises ValueError when the substring is not found.'
return 0
def isalnum(self):
'Return True if the string is an alpha-numeric string, False otherwise.\n\nA string is alpha-numeric if all characters in the string are alpha-numeric and\nthere is at least one character in the string.'
return False
def isalpha(self):
'Return True if the string is an alphabetic string, False otherwise.\n\nA string is alphabetic if all characters in the string are alphabetic and there\nis at least one character in the string.'
return False
def isascii(self):
'Return True if all characters in the string are ASCII, False otherwise.\n\nASCII characters have code points in the range U+0000-U+007F.\nEmpty string is ASCII too.'
pass
def isdecimal(self):
'Return True if the string is a decimal string, False otherwise.\n\nA string is a decimal string if all characters in the string are decimal and\nthere is at least one character in the string.'
return False
def isdigit(self):
'Return True if the string is a digit string, False otherwise.\n\nA string is a digit string if all characters in the string are digits and there\nis at least one character in the string.'
return False
def isidentifier(self):
'Return True if the string is a valid Python identifier, False otherwise.\n\nCall keyword.iskeyword(s) to test whether string s is a reserved identifier,\nsuch as "def" or "class".'
return False
def islower(self):
'Return True if the string is a lowercase string, False otherwise.\n\nA string is lowercase if all cased characters in the string are lowercase and\nthere is at least one cased character in the string.'
return False
def isnumeric(self):
'Return True if the string is a numeric string, False otherwise.\n\nA string is numeric if all characters in the string are numeric and there is at\nleast one character in the string.'
return False
def isprintable(self):
'Return True if the string is printable, False otherwise.\n\nA string is printable if all of its characters are considered printable in\nrepr() or if it is empty.'
return False
def isspace(self):
'Return True if the string is a whitespace string, False otherwise.\n\nA string is whitespace if all characters in the string are whitespace and there\nis at least one character in the string.'
return False
def istitle(self):
'Return True if the string is a title-cased string, False otherwise.\n\nIn a title-cased string, upper- and title-case characters may only\nfollow uncased characters and lowercase characters only cased ones.'
return False
def isupper(self):
'Return True if the string is an uppercase string, False otherwise.\n\nA string is uppercase if all cased characters in the string are uppercase and\nthere is at least one cased character in the string.'
return False
def join(self, iterable):
"Concatenate any number of strings.\n\nThe string whose method is called is inserted in between each given string.\nThe result is returned as a new string.\n\nExample: '.'.join(['ab', 'pq', 'rs']) -> 'ab.pq.rs'"
return ''
def ljust(self, width, fillchar):
'Return a left-justified string of length width.\n\nPadding is done using the specified fill character (default is a space).'
return str()
def lower(self):
'Return a copy of the string converted to lowercase.'
return str()
def lstrip(self, chars):
'Return a copy of the string with leading whitespace removed.\n\nIf chars is given and not None, remove characters in chars instead.'
return str()
@classmethod
def maketrans(x, y, z):
'Return a translation table usable for str.translate().\n\nIf there is only one argument, it must be a dictionary mapping Unicode\nordinals (integers) or characters to Unicode ordinals, strings or None.\nCharacter keys will be then converted to ordinals.\nIf there are two arguments, they must be strings of equal length, and\nin the resulting dictionary, each character in x will be mapped to the\ncharacter at the same position in y. If there is a third argument, it\nmust be a string, whose characters will be mapped to None in the result.'
return {}
def partition(self, sep):
'Partition the string into three parts using the given separator.\n\nThis will search for the separator in the string. If the separator is found,\nreturns a 3-tuple containing the part before the separator, the separator\nitself, and the part after it.\n\nIf the separator is not found, returns a 3-tuple containing the original string\nand two empty strings.'
return (str(), str(), str())
def replace(self, old, new, count):
'Return a copy with all occurrences of substring old replaced by new.\n\n count\n Maximum number of occurrences to replace.\n -1 (the default value) means replace all occurrences.\n\nIf the optional argument count is given, only the first count occurrences are\nreplaced.'
return str()
def rfind(self, sub, start=0, end=-1):
'S.rfind(sub[, start[, end]]) -> int\n\nReturn the highest index in S where substring sub is found,\nsuch that sub is contained within S[start:end]. Optional\narguments start and end are interpreted as in slice notation.\n\nReturn -1 on failure.'
return 0
def rindex(self, sub, start=0, end=-1):
'S.rindex(sub[, start[, end]]) -> int\n\nReturn the highest index in S where substring sub is found,\nsuch that sub is contained within S[start:end]. Optional\narguments start and end are interpreted as in slice notation.\n\nRaises ValueError when the substring is not found.'
return 0
def rjust(self, width, fillchar):
'Return a right-justified string of length width.\n\nPadding is done using the specified fill character (default is a space).'
return str()
def rpartition(self, sep):
'Partition the string into three parts using the given separator.\n\nThis will search for the separator in the string, starting at the end. If\nthe separator is found, returns a 3-tuple containing the part before the\nseparator, the separator itself, and the part after it.\n\nIf the separator is not found, returns a 3-tuple containing two empty strings\nand the original string.'
return (str(), str(), str())
def rsplit(self, sep, maxsplit):
'Return a list of the words in the string, using sep as the delimiter string.\n\n sep\n The delimiter according which to split the string.\n None (the default value) means split according to any whitespace,\n and discard empty strings from the result.\n maxsplit\n Maximum number of splits to do.\n -1 (the default value) means no limit.\n\nSplits are done starting at the end of the string and working to the front.'
return [str()]
def rstrip(self, chars):
'Return a copy of the string with trailing whitespace removed.\n\nIf chars is given and not None, remove characters in chars instead.'
return str()
def split(self, sep, maxsplit):
'Return a list of the words in the string, using sep as the delimiter string.\n\n sep\n The delimiter according which to split the string.\n None (the default value) means split according to any whitespace,\n and discard empty strings from the result.\n maxsplit\n Maximum number of splits to do.\n -1 (the default value) means no limit.'
return [str()]
def splitlines(self, keepends):
'Return a list of the lines in the string, breaking at line boundaries.\n\nLine breaks are not included in the resulting list unless keepends is given and\ntrue.'
return [self()]
def startswith(self, prefix, start=0, end=-1):
'S.startswith(prefix[, start[, end]]) -> bool\n\nReturn True if S starts with the specified prefix, False otherwise.\nWith optional start, test S beginning at that position.\nWith optional end, stop comparing S at that position.\nprefix can also be a tuple of strings to try.'
return False
def strip(self, chars):
'Return a copy of the string with leading and trailing whitespace removed.\n\nIf chars is given and not None, remove characters in chars instead.'
return str()
def swapcase(self):
'Convert uppercase characters to lowercase and lowercase characters to uppercase.'
return str()
def title(self):
'Return a version of the string where each word is titlecased.\n\nMore specifically, words start with uppercased characters and all remaining\ncased characters have lower case.'
return str()
def translate(self, table):
'Replace each character in the string using the given translation table.\n\n table\n Translation table, which must be a mapping of Unicode ordinals to\n Unicode ordinals, strings, or None.\n\nThe table must implement lookup/indexing via __getitem__, for instance a\ndictionary or list. If this operation raises LookupError, the character is\nleft untouched. Characters mapped to None are deleted.'
return str()
def upper(self):
'Return a copy of the string converted to uppercase.'
return str()
def zfill(self, width):
'Pad a numeric string with zeros on the left, to fill a field of the given width.\n\nThe string is never truncated.'
return str()
__Unicode__ = str
class str_iterator(object):
__class__ = str_iterator
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __init__(self, *args, **kwargs):
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __iter__(self):
'Implement iter(self).'
return str_iterator()
def __length_hint__(self):
'Private method returning an estimate of len(list(it)).'
return 0
def __next__(self):
'Implement next(self).'
return __Unicode__()
def __reduce__(self):
'Return state information for pickling.'
return ''; return ()
def __setstate__(self, state):
'Set state information for unpickling.'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
__UnicodeIterator__ = str_iterator
__Str__ = __Unicode__
__StrIterator__ = __UnicodeIterator__
class module(object):
'Create a module object.\n\nThe name must be a string; the optional doc argument can have any type.'
__class__ = module
def __delattr__(self, name):
'Implement delattr(self, name).'
return None
__dict__ = {}
def __dir__(self):
'__dir__() -> list\nspecialized dir() implementation'
return ['']
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __init__(self, *args, **kwargs):
'Create a module object.\n\nThe name must be a string; the optional doc argument can have any type.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __repr__(self):
'Return repr(self).'
return ''
def __setattr__(self, name, value):
'Implement setattr(self, name, value).'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
__Module__ = module
class function(object):
'Create a function object.\n\n code\n a code object\n globals\n the globals dictionary\n name\n a string that overrides the name from the code object\n argdefs\n a tuple that specifies the default argument values\n closure\n a tuple that supplies the bindings for free variables'
@property
def __annotations__(self):
return {}
def __call__(self, *args, **kwargs):
'Call self as a function.'
pass
__class__ = function
@property
def __closure__(self):
pass
@property
def __code__(self):
return object()
@property
def __defaults__(self):
pass
__dict__ = {}
def __get__(self, instance, owner):
'Return an attribute of instance, which is of type owner.'
return function()
@property
def __globals__(self):
return {}
def __init__(self, *args, **kwargs):
'Create a function object.\n\n code\n a code object\n globals\n the globals dictionary\n name\n a string that overrides the name from the code object\n argdefs\n a tuple that specifies the default argument values\n closure\n a tuple that supplies the bindings for free variables'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@property
def __kwdefaults__(self):
pass
__name__ = 'function'
__qualname__ = 'function'
def __repr__(self):
'Return repr(self).'
return ''
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
__Function__ = function
class wrapper_descriptor(object):
def __call__(self, *args, **kwargs):
'Call self as a function.'
pass
__class__ = wrapper_descriptor
def __get__(self, instance, owner):
'Return an attribute of instance, which is of type owner.'
return wrapper_descriptor()
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
__name__ = 'wrapper_descriptor'
@property
def __objclass__(self):
pass
__qualname__ = 'wrapper_descriptor'
def __reduce__(self):
return ''; return ()
def __repr__(self):
'Return repr(self).'
return ''
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
__text_signature__ = None
__BuiltinMethodDescriptor__ = wrapper_descriptor
class builtin_function_or_method(object):
def __call__(self, *args, **kwargs):
'Call self as a function.'
pass
__class__ = builtin_function_or_method
def __eq__(self, value):
'Return self==value.'
return False
def __ge__(self, value):
'Return self>=value.'
return False
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __gt__(self, value):
'Return self>value.'
return False
def __hash__(self):
'Return hash(self).'
return 0
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __le__(self, value):
'Return self<=value.'
return False
def __lt__(self, value):
'Return self<value.'
return False
__name__ = 'builtin_function_or_method'
def __ne__(self, value):
'Return self!=value.'
return False
__qualname__ = 'builtin_function_or_method'
def __reduce__(self):
return ''; return ()
def __repr__(self):
'Return repr(self).'
return ''
@property
def __self__(self):
pass
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
__text_signature__ = None
__BuiltinFunction__ = builtin_function_or_method
class generator(object):
__class__ = generator
def __del__(self):
return None
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __init__(self, *args, **kwargs):
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __iter__(self):
'Implement iter(self).'
return generator()
__name__ = 'generator'
def __next__(self):
'Implement next(self).'
pass
__qualname__ = 'generator'
def __repr__(self):
'Return repr(self).'
return ''
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
def close(self):
'close() -> raise GeneratorExit inside generator.'
return None
@property
def gi_code(self):
pass
@property
def gi_frame(self):
pass
@property
def gi_running(self):
pass
@property
def gi_yieldfrom(self):
'object being iterated by yield from, or None'
pass
def send(self, value):
"send(arg) -> send 'arg' into generator,\nreturn next yielded value or raise StopIteration."
return self.__next__()
def throw(self, type, value=None, traceback=None):
'throw(typ[,val[,tb]]) -> raise exception in generator,\nreturn next yielded value or raise StopIteration.'
return None
__Generator__ = generator
class property(object):
'Property attribute.\n\n fget\n function to be used for getting an attribute value\n fset\n function to be used for setting an attribute value\n fdel\n function to be used for del\'ing an attribute\n doc\n docstring\n\nTypical use is to define a managed attribute x:\n\nclass C(object):\n def getx(self): return self._x\n def setx(self, value): self._x = value\n def delx(self): del self._x\n x = property(getx, setx, delx, "I\'m the \'x\' property.")\n\nDecorators make defining new properties or modifying existing ones easy:\n\nclass C(object):\n @property\n def x(self):\n "I am the \'x\' property."\n return self._x\n @x.setter\n def x(self, value):\n self._x = value\n @x.deleter\n def x(self):\n del self._x'
__class__ = property
def __delete__(self, instance):
'Delete an attribute of instance.'
return None
def __get__(self, instance, owner):
'Return an attribute of instance, which is of type owner.'
return property()
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __init__(self, *args, **kwargs):
'Property attribute.\n\n fget\n function to be used for getting an attribute value\n fset\n function to be used for setting an attribute value\n fdel\n function to be used for del\'ing an attribute\n doc\n docstring\n\nTypical use is to define a managed attribute x:\n\nclass C(object):\n def getx(self): return self._x\n def setx(self, value): self._x = value\n def delx(self): del self._x\n x = property(getx, setx, delx, "I\'m the \'x\' property.")\n\nDecorators make defining new properties or modifying existing ones easy:\n\nclass C(object):\n @property\n def x(self):\n "I am the \'x\' property."\n return self._x\n @x.setter\n def x(self, value):\n self._x = value\n @x.deleter\n def x(self):\n del self._x'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@property
def __isabstractmethod__(self):
pass
def __set__(self, instance, value):
'Set an attribute of instance to value.'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
def deleter(self, func):
'Descriptor to change the deleter on a property.'
return func
@property
def fdel(self):
pass
@property
def fget(self):
pass
@property
def fset(self):
pass
def getter(self, func):
'Descriptor to change the getter on a property.'
return func
def setter(self, func):
'Descriptor to change the setter on a property.'
return func
__Property__ = property
class classmethod(object):
'classmethod(function) -> method\n\nConvert a function to be a class method.\n\nA class method receives the class as implicit first argument,\njust like an instance method receives the instance.\nTo declare a class method, use this idiom:\n\n class C:\n @classmethod\n def f(cls, arg1, arg2, ...):\n ...\n\nIt can be called either on the class (e.g. C.f()) or on an instance\n(e.g. C().f()). The instance is ignored except for its class.\nIf a class method is called for a derived class, the derived class\nobject is passed as the implied first argument.\n\nClass methods are different than C++ or Java static methods.\nIf you want those, see the staticmethod builtin.'
__class__ = classmethod
__dict__ = {}
@property
def __func__(self):
pass
def __get__(self, instance, owner):
'Return an attribute of instance, which is of type owner.'
return classmethod()
def __init__(self, function):
'classmethod(function) -> method\n\nConvert a function to be a class method.\n\nA class method receives the class as implicit first argument,\njust like an instance method receives the instance.\nTo declare a class method, use this idiom:\n\n class C:\n @classmethod\n def f(cls, arg1, arg2, ...):\n ...\n\nIt can be called either on the class (e.g. C.f()) or on an instance\n(e.g. C().f()). The instance is ignored except for its class.\nIf a class method is called for a derived class, the derived class\nobject is passed as the implied first argument.\n\nClass methods are different than C++ or Java static methods.\nIf you want those, see the staticmethod builtin.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@property
def __isabstractmethod__(self):
pass
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
__ClassMethod__ = classmethod
class staticmethod(object):
'staticmethod(function) -> method\n\nConvert a function to be a static method.\n\nA static method does not receive an implicit first argument.\nTo declare a static method, use this idiom:\n\n class C:\n @staticmethod\n def f(arg1, arg2, ...):\n ...\n\nIt can be called either on the class (e.g. C.f()) or on an instance\n(e.g. C().f()). Both the class and the instance are ignored, and\nneither is passed implicitly as the first argument to the method.\n\nStatic methods in Python are similar to those found in Java or C++.\nFor a more advanced concept, see the classmethod builtin.'
__class__ = staticmethod
__dict__ = {}
@property
def __func__(self):
pass
def __get__(self, instance, owner):
'Return an attribute of instance, which is of type owner.'
return staticmethod()
def __init__(self, function):
'staticmethod(function) -> method\n\nConvert a function to be a static method.\n\nA static method does not receive an implicit first argument.\nTo declare a static method, use this idiom:\n\n class C:\n @staticmethod\n def f(arg1, arg2, ...):\n ...\n\nIt can be called either on the class (e.g. C.f()) or on an instance\n(e.g. C().f()). Both the class and the instance are ignored, and\nneither is passed implicitly as the first argument to the method.\n\nStatic methods in Python are similar to those found in Java or C++.\nFor a more advanced concept, see the classmethod builtin.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@property
def __isabstractmethod__(self):
pass
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
__StaticMethod__ = staticmethod
class ellipsis(object):
__class__ = ellipsis
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __init__(self, *args, **kwargs):
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __reduce__(self):
return ''; return ()
def __repr__(self):
'Return repr(self).'
return ''
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
__Ellipsis__ = ellipsis
class tuple_iterator(object):
__class__ = tuple_iterator
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __init__(self, *args, **kwargs):
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __iter__(self):
'Implement iter(self).'
return tuple_iterator()
def __length_hint__(self):
'Private method returning an estimate of len(list(it)).'
return 0
def __next__(self):
'Implement next(self).'
pass
def __reduce__(self):
'Return state information for pickling.'
return ''; return ()
def __setstate__(self, state):
'Set state information for unpickling.'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
__TupleIterator__ = tuple_iterator
class list_iterator(object):
__class__ = list_iterator
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __init__(self, *args, **kwargs):
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __iter__(self):
'Implement iter(self).'
return list_iterator()
def __length_hint__(self):
'Private method returning an estimate of len(list(it)).'
return 0
def __next__(self):
'Implement next(self).'
pass
def __reduce__(self):
'Return state information for pickling.'
return ''; return ()
def __setstate__(self, state):
'Set state information for unpickling.'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
__ListIterator__ = list_iterator
class dict_keys(object):
def __and__(self, value):
'Return self&value.'
return dict_keys()
__class__ = dict_keys
def __contains__(self, key):
'Return key in self.'
return False
def __eq__(self, value):
'Return self==value.'
return False
def __ge__(self, value):
'Return self>=value.'
return False
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __gt__(self, value):
'Return self>value.'
return False
__hash__ = None
def __init__(self, *args, **kwargs):
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __iter__(self):
'Implement iter(self).'
return dict_keys()
def __le__(self, value):
'Return self<=value.'
return False
def __len__(self):
'Return len(self).'
return 0
def __lt__(self, value):
'Return self<value.'
return False
def __ne__(self, value):
'Return self!=value.'
return False
def __or__(self, value):
'Return self|value.'
return dict_keys()
def __rand__(self, value):
'Return value&self.'
return dict_keys()
def __repr__(self):
'Return repr(self).'
return ''
def __reversed__(self):
'Return a reverse iterator over the dict keys.'
pass
def __ror__(self, value):
'Return value|self.'
return dict_keys()
def __rsub__(self, value):
'Return value-self.'
return dict_keys()
def __rxor__(self, value):
'Return value^self.'
return dict_keys()
def __sub__(self, value):
'Return self-value.'
return dict_keys()
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
def __xor__(self, value):
'Return self^value.'
return dict_keys()
def isdisjoint(self, other):
'Return True if the view and the given iterable have a null intersection.'
return False
__DictKeys__ = dict_keys
class dict_values(object):
__class__ = dict_values
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __init__(self, *args, **kwargs):
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __iter__(self):
'Implement iter(self).'
return dict_values()
def __len__(self):
'Return len(self).'
return 0
def __repr__(self):
'Return repr(self).'
return ''
def __reversed__(self):
'Return a reverse iterator over the dict values.'
pass
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
__DictValues__ = dict_values
class dict_items(object):
def __and__(self, value):
'Return self&value.'
return dict_items()
__class__ = dict_items
def __contains__(self, key):
'Return key in self.'
return False
def __eq__(self, value):
'Return self==value.'
return False
def __ge__(self, value):
'Return self>=value.'
return False
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __gt__(self, value):
'Return self>value.'
return False
__hash__ = None
def __init__(self, *args, **kwargs):
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __iter__(self):
'Implement iter(self).'
return dict_items()
def __le__(self, value):
'Return self<=value.'
return False
def __len__(self):
'Return len(self).'
return 0
def __lt__(self, value):
'Return self<value.'
return False
def __ne__(self, value):
'Return self!=value.'
return False
def __or__(self, value):
'Return self|value.'
return dict_items()
def __rand__(self, value):
'Return value&self.'
return dict_items()
def __repr__(self):
'Return repr(self).'
return ''
def __reversed__(self):
'Return a reverse iterator over the dict items.'
pass
def __ror__(self, value):
'Return value|self.'
return dict_items()
def __rsub__(self, value):
'Return value-self.'
return dict_items()
def __rxor__(self, value):
'Return value^self.'
return dict_items()
def __sub__(self, value):
'Return self-value.'
return dict_items()
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
def __xor__(self, value):
'Return self^value.'
return dict_items()
def isdisjoint(self, other):
'Return True if the view and the given iterable have a null intersection.'
return False
__DictItems__ = dict_items
class set_iterator(object):
__class__ = set_iterator
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __init__(self, *args, **kwargs):
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __iter__(self):
'Implement iter(self).'
return set_iterator()
def __length_hint__(self):
'Private method returning an estimate of len(list(it)).'
return 0
def __next__(self):
'Implement next(self).'
pass
def __reduce__(self):
'Return state information for pickling.'
return ''; return ()
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
__SetIterator__ = set_iterator
class callable_iterator(object):
__class__ = callable_iterator
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __init__(self, *args, **kwargs):
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __iter__(self):
'Implement iter(self).'
return callable_iterator()
def __next__(self):
'Implement next(self).'
pass
def __reduce__(self):
'Return state information for pickling.'
return ''; return ()
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
__CallableIterator__ = callable_iterator
__builtin_module_names__ = "_abc,_ast,_bisect,_blake2,_codecs,_collections,_csv,_datetime,_elementtree,_functools,_heapq,_imp,_io,_locale,_md5,_operator,_pickle,_posixsubprocess,_random,_sha1,_sha256,_sha3,_sha512,_signal,_socket,_sre,_stat,_statistics,_string,_struct,_symtable,_thread,_tracemalloc,_warnings,_weakref,array,atexit,binascii,builtins,cmath,errno,faulthandler,fcntl,gc,grp,itertools,marshal,math,posix,pwd,pyexpat,select,spwd,sys,syslog,time,unicodedata,xxsubtype,zlib"
class ArithmeticError(Exception):
'Base class for arithmetic errors.'
__class__ = ArithmeticError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Base class for arithmetic errors.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class AssertionError(Exception):
'Assertion failed.'
__class__ = AssertionError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Assertion failed.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class AttributeError(Exception):
'Attribute not found.'
__class__ = AttributeError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Attribute not found.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class BaseException(object):
'Common base class for all exceptions'
@property
def __cause__(self):
'exception cause'
pass
__class__ = BaseException
@property
def __context__(self):
'exception context'
pass
def __delattr__(self, name):
'Implement delattr(self, name).'
return None
__dict__ = {}
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __init__(self, *args, **kwargs):
'Common base class for all exceptions'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __reduce__(self):
return ''; return ()
def __repr__(self):
'Return repr(self).'
return ''
def __setattr__(self, name, value):
'Implement setattr(self, name, value).'
return None
def __setstate__(self, state):
return None
def __str__(self):
'Return str(self).'
return ''
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
@property
def __suppress_context__(self):
pass
@property
def __traceback__(self):
pass
@property
def args(self):
pass
def with_traceback(self):
'Exception.with_traceback(tb) --\n set self.__traceback__ to tb and return self.'
pass
class BlockingIOError(OSError):
'I/O operation would block.'
__class__ = BlockingIOError
__dict__ = {}
def __init__(self, *args, **kwargs):
'I/O operation would block.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class BrokenPipeError(ConnectionError):
'Broken pipe.'
__class__ = BrokenPipeError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Broken pipe.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class BufferError(Exception):
'Buffer error.'
__class__ = BufferError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Buffer error.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class BytesWarning(Warning):
'Base class for warnings about bytes and buffer related problems, mostly\nrelated to conversion from str or comparing to str.'
__class__ = BytesWarning
__dict__ = {}
def __init__(self, *args, **kwargs):
'Base class for warnings about bytes and buffer related problems, mostly\nrelated to conversion from str or comparing to str.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class ChildProcessError(OSError):
'Child process error.'
__class__ = ChildProcessError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Child process error.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class ConnectionAbortedError(ConnectionError):
'Connection aborted.'
__class__ = ConnectionAbortedError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Connection aborted.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class ConnectionError(OSError):
'Connection error.'
__class__ = ConnectionError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Connection error.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class ConnectionRefusedError(ConnectionError):
'Connection refused.'
__class__ = ConnectionRefusedError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Connection refused.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class ConnectionResetError(ConnectionError):
'Connection reset.'
__class__ = ConnectionResetError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Connection reset.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class DeprecationWarning(Warning):
'Base class for warnings about deprecated features.'
__class__ = DeprecationWarning
__dict__ = {}
def __init__(self, *args, **kwargs):
'Base class for warnings about deprecated features.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class EOFError(Exception):
'Read beyond end of file.'
__class__ = EOFError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Read beyond end of file.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
Ellipsis = ellipsis()
EnvironmentError = OSError
class Exception(BaseException):
'Common base class for all non-exit exceptions.'
__class__ = Exception
__dict__ = {}
def __init__(self, *args, **kwargs):
'Common base class for all non-exit exceptions.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class FileExistsError(OSError):
'File already exists.'
__class__ = FileExistsError
__dict__ = {}
def __init__(self, *args, **kwargs):
'File already exists.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class FileNotFoundError(OSError):
'File not found.'
__class__ = FileNotFoundError
__dict__ = {}
def __init__(self, *args, **kwargs):
'File not found.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class FloatingPointError(ArithmeticError):
'Floating point operation failed.'
__class__ = FloatingPointError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Floating point operation failed.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class FutureWarning(Warning):
'Base class for warnings about constructs that will change semantically\nin the future.'
__class__ = FutureWarning
__dict__ = {}
def __init__(self, *args, **kwargs):
'Base class for warnings about constructs that will change semantically\nin the future.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class GeneratorExit(BaseException):
'Request that a generator exit.'
__class__ = GeneratorExit
__dict__ = {}
def __init__(self, *args, **kwargs):
'Request that a generator exit.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
IOError = OSError
class ImportError(Exception):
"Import can't find module, or can't find name in module."
__class__ = ImportError
__dict__ = {}
def __init__(self, *args, **kwargs):
"Import can't find module, or can't find name in module."
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __reduce__(self):
return ''; return ()
def __str__(self):
'Return str(self).'
return ''
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
@property
def msg(self):
'exception message'
pass
@property
def name(self):
'module name'
pass
@property
def path(self):
'module path'
pass
class ImportWarning(Warning):
'Base class for warnings about probable mistakes in module imports'
__class__ = ImportWarning
__dict__ = {}
def __init__(self, *args, **kwargs):
'Base class for warnings about probable mistakes in module imports'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class IndentationError(SyntaxError):
'Improper indentation.'
__class__ = IndentationError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Improper indentation.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class IndexError(LookupError):
'Sequence index out of range.'
__class__ = IndexError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Sequence index out of range.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class InterruptedError(OSError):
'Interrupted by signal.'
__class__ = InterruptedError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Interrupted by signal.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class IsADirectoryError(OSError):
"Operation doesn't work on directories."
__class__ = IsADirectoryError
__dict__ = {}
def __init__(self, *args, **kwargs):
"Operation doesn't work on directories."
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class KeyError(LookupError):
'Mapping key not found.'
__class__ = KeyError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Mapping key not found.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __str__(self):
'Return str(self).'
return ''
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class KeyboardInterrupt(BaseException):
'Program interrupted by user.'
__class__ = KeyboardInterrupt
__dict__ = {}
def __init__(self, *args, **kwargs):
'Program interrupted by user.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class LookupError(Exception):
'Base class for lookup errors.'
__class__ = LookupError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Base class for lookup errors.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class MemoryError(Exception):
'Out of memory.'
__class__ = MemoryError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Out of memory.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class ModuleNotFoundError(ImportError):
'Module not found.'
__class__ = ModuleNotFoundError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Module not found.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class NameError(Exception):
'Name not found globally.'
__class__ = NameError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Name not found globally.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class NotADirectoryError(OSError):
'Operation only works on directories.'
__class__ = NotADirectoryError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Operation only works on directories.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
NotImplemented = NotImplementedType()
class NotImplementedError(RuntimeError):
"Method or function hasn't been implemented yet."
__class__ = NotImplementedError
__dict__ = {}
def __init__(self, *args, **kwargs):
"Method or function hasn't been implemented yet."
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class OSError(Exception):
'Base class for I/O related errors.'
__class__ = OSError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Base class for I/O related errors.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __reduce__(self):
return ''; return ()
def __str__(self):
'Return str(self).'
return ''
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
@property
def characters_written(self):
pass
@property
def errno(self):
'POSIX exception code'
pass
@property
def filename(self):
'exception filename'
pass
@property
def filename2(self):
'second exception filename'
pass
@property
def strerror(self):
'exception strerror'
pass
class OverflowError(ArithmeticError):
'Result too large to be represented.'
__class__ = OverflowError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Result too large to be represented.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class PendingDeprecationWarning(Warning):
'Base class for warnings about features which will be deprecated\nin the future.'
__class__ = PendingDeprecationWarning
__dict__ = {}
def __init__(self, *args, **kwargs):
'Base class for warnings about features which will be deprecated\nin the future.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class PermissionError(OSError):
'Not enough permissions.'
__class__ = PermissionError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Not enough permissions.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class ProcessLookupError(OSError):
'Process not found.'
__class__ = ProcessLookupError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Process not found.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class RecursionError(RuntimeError):
'Recursion limit exceeded.'
__class__ = RecursionError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Recursion limit exceeded.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class ReferenceError(Exception):
'Weak ref proxy used after referent went away.'
__class__ = ReferenceError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Weak ref proxy used after referent went away.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class ResourceWarning(Warning):
'Base class for warnings about resource usage.'
__class__ = ResourceWarning
__dict__ = {}
def __init__(self, *args, **kwargs):
'Base class for warnings about resource usage.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class RuntimeError(Exception):
'Unspecified run-time error.'
__class__ = RuntimeError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Unspecified run-time error.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class RuntimeWarning(Warning):
'Base class for warnings about dubious runtime behavior.'
__class__ = RuntimeWarning
__dict__ = {}
def __init__(self, *args, **kwargs):
'Base class for warnings about dubious runtime behavior.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class StopAsyncIteration(Exception):
'Signal the end from iterator.__anext__().'
__class__ = StopAsyncIteration
__dict__ = {}
def __init__(self):
'Signal the end from iterator.__anext__().'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class StopIteration(Exception):
'Signal the end from iterator.__next__().'
__class__ = StopIteration
__dict__ = {}
def __init__(self):
'Signal the end from iterator.__next__().'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
@property
def value(self):
'generator return value'
pass
class SyntaxError(Exception):
'Invalid syntax.'
__class__ = SyntaxError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Invalid syntax.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __str__(self):
'Return str(self).'
return ''
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
@property
def filename(self):
'exception filename'
pass
@property
def lineno(self):
'exception lineno'
pass
@property
def msg(self):
'exception msg'
pass
@property
def offset(self):
'exception offset'
pass
@property
def print_file_and_line(self):
'exception print_file_and_line'
pass
@property
def text(self):
'exception text'
pass
class SyntaxWarning(Warning):
'Base class for warnings about dubious syntax.'
__class__ = SyntaxWarning
__dict__ = {}
def __init__(self, *args, **kwargs):
'Base class for warnings about dubious syntax.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class SystemError(Exception):
'Internal error in the Python interpreter.\n\nPlease report this to the Python maintainer, along with the traceback,\nthe Python version, and the hardware/OS platform and version.'
__class__ = SystemError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Internal error in the Python interpreter.\n\nPlease report this to the Python maintainer, along with the traceback,\nthe Python version, and the hardware/OS platform and version.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class SystemExit(BaseException):
'Request to exit from the interpreter.'
__class__ = SystemExit
__dict__ = {}
def __init__(self, *args, **kwargs):
'Request to exit from the interpreter.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
@property
def code(self):
'exception code'
pass
class TabError(IndentationError):
'Improper mixture of spaces and tabs.'
__class__ = TabError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Improper mixture of spaces and tabs.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class TimeoutError(OSError):
'Timeout expired.'
__class__ = TimeoutError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Timeout expired.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class TypeError(Exception):
'Inappropriate argument type.'
__class__ = TypeError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Inappropriate argument type.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class UnboundLocalError(NameError):
'Local name referenced but not bound to a value.'
__class__ = UnboundLocalError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Local name referenced but not bound to a value.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class UnicodeDecodeError(UnicodeError):
'Unicode decoding error.'
__class__ = UnicodeDecodeError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Unicode decoding error.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __str__(self):
'Return str(self).'
return ''
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
@property
def encoding(self):
'exception encoding'
pass
@property
def end(self):
'exception end'
pass
@property
def object(self):
'exception object'
pass
@property
def reason(self):
'exception reason'
pass
@property
def start(self):
'exception start'
pass
class UnicodeEncodeError(UnicodeError):
'Unicode encoding error.'
__class__ = UnicodeEncodeError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Unicode encoding error.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __str__(self):
'Return str(self).'
return ''
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
@property
def encoding(self):
'exception encoding'
pass
@property
def end(self):
'exception end'
pass
@property
def object(self):
'exception object'
pass
@property
def reason(self):
'exception reason'
pass
@property
def start(self):
'exception start'
pass
class UnicodeError(ValueError):
'Unicode related error.'
__class__ = UnicodeError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Unicode related error.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class UnicodeTranslateError(UnicodeError):
'Unicode translation error.'
__class__ = UnicodeTranslateError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Unicode translation error.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __str__(self):
'Return str(self).'
return ''
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
@property
def encoding(self):
'exception encoding'
pass
@property
def end(self):
'exception end'
pass
@property
def object(self):
'exception object'
pass
@property
def reason(self):
'exception reason'
pass
@property
def start(self):
'exception start'
pass
class UnicodeWarning(Warning):
'Base class for warnings about Unicode related problems, mostly\nrelated to conversion problems.'
__class__ = UnicodeWarning
__dict__ = {}
def __init__(self, *args, **kwargs):
'Base class for warnings about Unicode related problems, mostly\nrelated to conversion problems.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class UserWarning(Warning):
'Base class for warnings generated by user code.'
__class__ = UserWarning
__dict__ = {}
def __init__(self, *args, **kwargs):
'Base class for warnings generated by user code.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class ValueError(Exception):
'Inappropriate argument value (of correct type).'
__class__ = ValueError
__dict__ = {}
def __init__(self, ofcorrecttype):
'Inappropriate argument value (of correct type).'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class Warning(Exception):
'Base class for warning categories.'
__class__ = Warning
__dict__ = {}
def __init__(self, *args, **kwargs):
'Base class for warning categories.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class ZeroDivisionError(ArithmeticError):
'Second argument to a division or modulo operation was zero.'
__class__ = ZeroDivisionError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Second argument to a division or modulo operation was zero.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
def __build_class__(func, name, *bases, metaclass=None, **kwds):
'__build_class__(func, name, /, *bases, [metaclass], **kwds) -> class\n\nInternal helper function used by the class statement.'
pass
__doc__ = "Built-in functions, exceptions, and other objects.\n\nNoteworthy: None is the `nil' object; Ellipsis represents `...' in slices."
def __import__(name, globals=None, locals=None, fromlist=(), level=0):
"__import__(name, globals=None, locals=None, fromlist=(), level=0) -> module\n\nImport a module. Because this function is meant for use by the Python\ninterpreter and not for general use, it is better to use\nimportlib.import_module() to programmatically import a module.\n\nThe globals argument is only used to determine the context;\nthey are not modified. The locals argument is unused. The fromlist\nshould be a list of names to emulate ``from name import ...'', or an\nempty list to emulate ``import name''.\nWhen importing a module from a package, note that __import__('A.B', ...)\nreturns package A when fromlist is empty, but its submodule B when\nfromlist is not empty. The level argument is used to determine whether to\nperform absolute or relative imports: 0 is absolute, while a positive number\nis the number of parent directories to search relative to the current module."
pass
__name__ = 'builtins'
__package__ = ''
def abs(x):
'Return the absolute value of the argument.'
pass
def all(iterable):
'Return True if bool(x) is True for all values x in the iterable.\n\nIf the iterable is empty, return True.'
return False
def any(iterable):
'Return True if bool(x) is True for any x in the iterable.\n\nIf the iterable is empty, return False.'
return False
def ascii(obj):
'Return an ASCII-only representation of an object.\n\nAs repr(), return a string containing a printable representation of an\nobject, but escape the non-ASCII characters in the string returned by\nrepr() using \\\\x, \\\\u or \\\\U escapes. This generates a string similar\nto that returned by repr() in Python 2.'
return ''
def bin(number):
"Return the binary representation of an integer.\n\n >>> bin(2796202)\n '0b1010101010101010101010'"
return ''
def breakpoint(*args, **kws):
'breakpoint(*args, **kws)\n\nCall sys.breakpointhook(*args, **kws). sys.breakpointhook() must accept\nwhatever arguments are passed.\n\nBy default, this drops you into the pdb debugger.'
pass
class bytearray(object):
'bytearray(iterable_of_ints) -> bytearray\nbytearray(string, encoding[, errors]) -> bytearray\nbytearray(bytes_or_buffer) -> mutable copy of bytes_or_buffer\nbytearray(int) -> bytes array of size given by the parameter initialized with null bytes\nbytearray() -> empty bytes array\n\nConstruct a mutable bytearray object from:\n - an iterable yielding integers in range(256)\n - a text string encoded using the specified encoding\n - a bytes or a buffer object\n - any object implementing the buffer API.\n - an integer'
def __add__(self, value):
'Return self+value.'
return bytearray()
def __alloc__(self):
'B.__alloc__() -> int\n\nReturn the number of bytes actually allocated.'
return 1
__class__ = bytearray
def __contains__(self, key):
'Return key in self.'
return False
def __delitem__(self, key):
'Delete self[key].'
return None
def __eq__(self, value):
'Return self==value.'
return False
def __ge__(self, value):
'Return self>=value.'
return False
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __getitem__(self, key):
'Return self[key].'
pass
def __gt__(self, value):
'Return self>value.'
return False
__hash__ = None
def __iadd__(self, value):
'Implement self+=value.'
return None
def __imul__(self, value):
'Implement self*=value.'
return None
def __init__(self, string, encoding, errors=None):
'bytearray(iterable_of_ints) -> bytearray\nbytearray(string, encoding[, errors]) -> bytearray\nbytearray(bytes_or_buffer) -> mutable copy of bytes_or_buffer\nbytearray(int) -> bytes array of size given by the parameter initialized with null bytes\nbytearray() -> empty bytes array\n\nConstruct a mutable bytearray object from:\n - an iterable yielding integers in range(256)\n - a text string encoded using the specified encoding\n - a bytes or a buffer object\n - any object implementing the buffer API.\n - an integer'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __iter__(self):
'Implement iter(self).'
return bytearray()
def __le__(self, value):
'Return self<=value.'
return False
def __len__(self):
'Return len(self).'
return 0
def __lt__(self, value):
'Return self<value.'
return False
def __mod__(self, value):
'Return self%value.'
return bytearray()
def __mul__(self, value):
'Return self*value.'
return bytearray()
def __ne__(self, value):
'Return self!=value.'
return False
def __reduce__(self):
'Return state information for pickling.'
return ''; return ()
def __reduce_ex__(self, proto):
'Return state information for pickling.'
return ''; return ()
def __repr__(self):
'Return repr(self).'
return ''
def __rmod__(self, value):
'Return value%self.'
return bytearray()
def __rmul__(self, value):
'Return value*self.'
return bytearray()
def __setitem__(self, key, value):
'Set self[key] to value.'
return None
def __sizeof__(self):
'Returns the size of the bytearray object in memory, in bytes.'
return 0
def __str__(self):
'Return str(self).'
return ''
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
def append(self, item):
'Append a single item to the end of the bytearray.\n\n item\n The item to be appended.'
pass
def capitalize(self):
'B.capitalize() -> copy of B\n\nReturn a copy of B with only its first character capitalized (ASCII)\nand the rest lower-cased.'
return bytearray()
def center(self, width, fillchar):
'Return a centered string of length width.\n\nPadding is done using the specified fill character.'
return bytearray()
def clear(self):
'Remove all items from the bytearray.'
return None
def copy(self):
'Return a copy of B.'
return bytearray()
def count(self, x):
'B.count(sub[, start[, end]]) -> int\n\nReturn the number of non-overlapping occurrences of subsection sub in\nbytes B[start:end]. Optional arguments start and end are interpreted\nas in slice notation.'
return 0
def decode(self, encoding, errors):
"Decode the bytearray using the codec registered for encoding.\n\n encoding\n The encoding with which to decode the bytearray.\n errors\n The error handling scheme to use for the handling of decoding errors.\n The default is 'strict' meaning that decoding errors raise a\n UnicodeDecodeError. Other possible values are 'ignore' and 'replace'\n as well as any other name registered with codecs.register_error that\n can handle UnicodeDecodeErrors."
pass
def endswith(self, suffix, start=0, end=-1):
'B.endswith(suffix[, start[, end]]) -> bool\n\nReturn True if B ends with the specified suffix, False otherwise.\nWith optional start, test B beginning at that position.\nWith optional end, stop comparing B at that position.\nsuffix can also be a tuple of bytes to try.'
return False
def expandtabs(self, tabsize):
'Return a copy where all tab characters are expanded using spaces.\n\nIf tabsize is not given, a tab size of 8 characters is assumed.'
return bytearray()
def extend(self, iterable_of_ints):
'Append all the items from the iterator or sequence to the end of the bytearray.\n\n iterable_of_ints\n The iterable of items to append.'
pass
def find(self, sub, start=0, end=-1):
'B.find(sub[, start[, end]]) -> int\n\nReturn the lowest index in B where subsection sub is found,\nsuch that sub is contained within B[start,end]. Optional\narguments start and end are interpreted as in slice notation.\n\nReturn -1 on failure.'
return 0
@classmethod
def fromhex(cls, type, string):
"Create a bytearray object from a string of hexadecimal numbers.\n\nSpaces between two numbers are accepted.\nExample: bytearray.fromhex('B9 01EF') -> bytearray(b'\\\\xb9\\\\x01\\\\xef')"
pass
def hex(self):
"Create a str of hexadecimal numbers from a bytearray object.\n\n sep\n An optional single character or byte to separate hex bytes.\n bytes_per_sep\n How many bytes between separators. Positive values count from the\n right, negative values count from the left.\n\nExample:\n>>> value = bytearray([0xb9, 0x01, 0xef])\n>>> value.hex()\n'b901ef'\n>>> value.hex(':')\n'b9:01:ef'\n>>> value.hex(':', 2)\n'b9:01ef'\n>>> value.hex(':', -2)\n'b901:ef'"
return ''
def index(self, v):
'B.index(sub[, start[, end]]) -> int\n\nReturn the lowest index in B where subsection sub is found,\nsuch that sub is contained within B[start,end]. Optional\narguments start and end are interpreted as in slice notation.\n\nRaises ValueError when the subsection is not found.'
return 0
def insert(self, index, item):
'Insert a single item into the bytearray before the given index.\n\n index\n The index where the value is to be inserted.\n item\n The item to be inserted.'
pass
def isalnum(self):
'B.isalnum() -> bool\n\nReturn True if all characters in B are alphanumeric\nand there is at least one character in B, False otherwise.'
return False
def isalpha(self):
'B.isalpha() -> bool\n\nReturn True if all characters in B are alphabetic\nand there is at least one character in B, False otherwise.'
return False
def isascii(self):
'B.isascii() -> bool\n\nReturn True if B is empty or all characters in B are ASCII,\nFalse otherwise.'
return True
def isdigit(self):
'B.isdigit() -> bool\n\nReturn True if all characters in B are digits\nand there is at least one character in B, False otherwise.'
return False
def islower(self):
'B.islower() -> bool\n\nReturn True if all cased characters in B are lowercase and there is\nat least one cased character in B, False otherwise.'
return False
def isspace(self):
'B.isspace() -> bool\n\nReturn True if all characters in B are whitespace\nand there is at least one character in B, False otherwise.'
return False
def istitle(self):
'B.istitle() -> bool\n\nReturn True if B is a titlecased string and there is at least one\ncharacter in B, i.e. uppercase characters may only follow uncased\ncharacters and lowercase characters only cased ones. Return False\notherwise.'
return False
def isupper(self):
'B.isupper() -> bool\n\nReturn True if all cased characters in B are uppercase and there is\nat least one cased character in B, False otherwise.'
return False
def join(self, iterable_of_bytes):
'Concatenate any number of bytes/bytearray objects.\n\nThe bytearray whose method is called is inserted in between each pair.\n\nThe result is returned as a new bytearray object.'
pass
def ljust(self, width, fillchar):
'Return a left-justified string of length width.\n\nPadding is done using the specified fill character.'
return bytearray()
def lower(self):
'B.lower() -> copy of B\n\nReturn a copy of B with all ASCII characters converted to lowercase.'
return bytearray()
def lstrip(self, bytes):
'Strip leading bytes contained in the argument.\n\nIf the argument is omitted or None, strip leading ASCII whitespace.'
return bytearray()
@classmethod
def maketrans(cls, frm, to):
'Return a translation table useable for the bytes or bytearray translate method.\n\nThe returned table will be one where each byte in frm is mapped to the byte at\nthe same position in to.\n\nThe bytes objects frm and to must be of the same length.'
pass
def partition(self, sep):
'Partition the bytearray into three parts using the given separator.\n\nThis will search for the separator sep in the bytearray. If the separator is\nfound, returns a 3-tuple containing the part before the separator, the\nseparator itself, and the part after it as new bytearray objects.\n\nIf the separator is not found, returns a 3-tuple containing the copy of the\noriginal bytearray object and two empty bytearray objects.'
return (bytearray(), bytearray(), bytearray())
def pop(self, index):
'Remove and return a single item from B.\n\n index\n The index from where to remove the item.\n -1 (the default value) means remove the last item.\n\nIf no index argument is given, will pop the last item.'
pass
def remove(self, value):
'Remove the first occurrence of a value in the bytearray.\n\n value\n The value to remove.'
return None
def replace(self, old, new, count):
'Return a copy with all occurrences of substring old replaced by new.\n\n count\n Maximum number of occurrences to replace.\n -1 (the default value) means replace all occurrences.\n\nIf the optional argument count is given, only the first count occurrences are\nreplaced.'
return bytearray()
def reverse(self):
'Reverse the order of the values in B in place.'
pass
def rfind(self, sub, start=0, end=-1):
'B.rfind(sub[, start[, end]]) -> int\n\nReturn the highest index in B where subsection sub is found,\nsuch that sub is contained within B[start,end]. Optional\narguments start and end are interpreted as in slice notation.\n\nReturn -1 on failure.'
return 0
def rindex(self, sub, start=0, end=-1):
'B.rindex(sub[, start[, end]]) -> int\n\nReturn the highest index in B where subsection sub is found,\nsuch that sub is contained within B[start,end]. Optional\narguments start and end are interpreted as in slice notation.\n\nRaise ValueError when the subsection is not found.'
return 0
def rjust(self, width, fillchar):
'Return a right-justified string of length width.\n\nPadding is done using the specified fill character.'
return bytearray()
def rpartition(self, sep):
'Partition the bytearray into three parts using the given separator.\n\nThis will search for the separator sep in the bytearray, starting at the end.\nIf the separator is found, returns a 3-tuple containing the part before the\nseparator, the separator itself, and the part after it as new bytearray\nobjects.\n\nIf the separator is not found, returns a 3-tuple containing two empty bytearray\nobjects and the copy of the original bytearray object.'
return (bytearray(), bytearray(), bytearray())
def rsplit(self, sep, maxsplit):
'Return a list of the sections in the bytearray, using sep as the delimiter.\n\n sep\n The delimiter according which to split the bytearray.\n None (the default value) means split on ASCII whitespace characters\n (space, tab, return, newline, formfeed, vertical tab).\n maxsplit\n Maximum number of splits to do.\n -1 (the default value) means no limit.\n\nSplitting is done starting at the end of the bytearray and working to the front.'
return [bytearray()]
def rstrip(self, bytes):
'Strip trailing bytes contained in the argument.\n\nIf the argument is omitted or None, strip trailing ASCII whitespace.'
return bytearray()
def split(self, sep, maxsplit):
'Return a list of the sections in the bytearray, using sep as the delimiter.\n\n sep\n The delimiter according which to split the bytearray.\n None (the default value) means split on ASCII whitespace characters\n (space, tab, return, newline, formfeed, vertical tab).\n maxsplit\n Maximum number of splits to do.\n -1 (the default value) means no limit.'
return [bytearray()]
def splitlines(self, keepends):
'Return a list of the lines in the bytearray, breaking at line boundaries.\n\nLine breaks are not included in the resulting list unless keepends is given and\ntrue.'
return [self()]
def startswith(self, prefix, start=0, end=-1):
'B.startswith(prefix[, start[, end]]) -> bool\n\nReturn True if B starts with the specified prefix, False otherwise.\nWith optional start, test B beginning at that position.\nWith optional end, stop comparing B at that position.\nprefix can also be a tuple of bytes to try.'
return False
def strip(self, bytes):
'Strip leading and trailing bytes contained in the argument.\n\nIf the argument is omitted or None, strip leading and trailing ASCII whitespace.'
return bytearray()
def swapcase(self):
'B.swapcase() -> copy of B\n\nReturn a copy of B with uppercase ASCII characters converted\nto lowercase ASCII and vice versa.'
return bytearray()
def title(self):
'B.title() -> copy of B\n\nReturn a titlecased version of B, i.e. ASCII words start with uppercase\ncharacters, all remaining cased characters have lowercase.'
return bytearray()
def translate(self, table, delete):
'Return a copy with each character mapped by the given translation table.\n\n table\n Translation table, which must be a bytes object of length 256.\n\nAll characters occurring in the optional argument delete are removed.\nThe remaining characters are mapped through the given translation table.'
pass
def upper(self):
'B.upper() -> copy of B\n\nReturn a copy of B with all ASCII characters converted to uppercase.'
return bytearray()
def zfill(self, width):
'Pad a numeric string with zeros on the left, to fill a field of the given width.\n\nThe original string is never truncated.'
return bytearray()
def callable(obj):
'Return whether the object is callable (i.e., some kind of function).\n\nNote that classes are callable, as are instances of classes with a\n__call__() method.'
return False
def chr(i):
'Return a Unicode string of one character with ordinal i; 0 <= i <= 0x10ffff.'
return ''
def compile(source, filename, mode, flags, dont_inherit, optimize):
"Compile source into a code object that can be executed by exec() or eval().\n\nThe source code may represent a Python module, statement or expression.\nThe filename will be used for run-time error messages.\nThe mode must be 'exec' to compile a module, 'single' to compile a\nsingle (interactive) statement, or 'eval' to compile an expression.\nThe flags argument, if present, controls which future statements influence\nthe compilation of the code.\nThe dont_inherit argument, if true, stops the compilation inheriting\nthe effects of any future statements in effect in the code calling\ncompile; if absent or false these statements do influence the compilation,\nin addition to any features explicitly specified."
pass
def copyright(self):
'interactive prompt objects for printing the license text, a list of\n contributors and the copyright notice.'
pass
def credits(self):
'interactive prompt objects for printing the license text, a list of\n contributors and the copyright notice.'
pass
def delattr(obj, name):
"Deletes the named attribute from the given object.\n\ndelattr(x, 'y') is equivalent to ``del x.y''"
pass
def dir(object=None):
"dir([object]) -> list of strings\n\nIf called without an argument, return the names in the current scope.\nElse, return an alphabetized list of names comprising (some of) the attributes\nof the given object, and of attributes reachable from it.\nIf the object supplies a method named __dir__, it will be used; otherwise\nthe default dir() logic is used and returns:\n for a module object: the module's attributes.\n for a class object: its attributes, and recursively the attributes\n of its bases.\n for any other object: its attributes, its class's attributes, and\n recursively the attributes of its class's base classes."
return list()
def divmod(x, y):
'Return the tuple (x//y, x%y). Invariant: div*y + mod == x.'
return (0, 0)
class enumerate(object):
'Return an enumerate object.\n\n iterable\n an object supporting iteration\n\nThe enumerate object yields pairs containing a count (from start, which\ndefaults to zero) and a value yielded by the iterable argument.\n\nenumerate is useful for obtaining an indexed list:\n (0, seq[0]), (1, seq[1]), (2, seq[2]), ...'
__class__ = enumerate
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __init__(self, *args, **kwargs):
'Return an enumerate object.\n\n iterable\n an object supporting iteration\n\nThe enumerate object yields pairs containing a count (from start, which\ndefaults to zero) and a value yielded by the iterable argument.\n\nenumerate is useful for obtaining an indexed list:\n (0, seq[0]), (1, seq[1]), (2, seq[2]), ...'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __iter__(self):
'Implement iter(self).'
return enumerate()
def __next__(self):
'Implement next(self).'
pass
def __reduce__(self):
'Return state information for pickling.'
return ''; return ()
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
def eval(source, globals, locals):
'Evaluate the given source in the context of globals and locals.\n\nThe source may be a string representing a Python expression\nor a code object as returned by compile().\nThe globals must be a dictionary and locals can be any mapping,\ndefaulting to the current globals and locals.\nIf only globals is given, locals defaults to it.'
pass
def exec(source, globals, locals):
'Execute the given source in the context of globals and locals.\n\nThe source may be a string representing one or more Python statements\nor a code object as returned by compile().\nThe globals must be a dictionary and locals can be any mapping,\ndefaulting to the current globals and locals.\nIf only globals is given, locals defaults to it.'
pass
def exit(self, code):
pass
class filter(object):
'filter(function or None, iterable) --> filter object\n\nReturn an iterator yielding those items of iterable for which function(item)\nis true. If function is None, return the items that are true.'
__class__ = filter
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __init__(self, functionorNone, iterable):
'filter(function or None, iterable) --> filter object\n\nReturn an iterator yielding those items of iterable for which function(item)\nis true. If function is None, return the items that are true.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __iter__(self):
'Implement iter(self).'
return filter()
def __next__(self):
'Implement next(self).'
pass
def __reduce__(self):
'Return state information for pickling.'
return ''; return ()
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
def format(value, format_spec):
"Return value.__format__(format_spec)\n\nformat_spec defaults to the empty string.\nSee the Format Specification Mini-Language section of help('FORMATTING') for\ndetails."
return ''
def getattr(object, name, default=None):
"getattr(object, name[, default]) -> value\n\nGet a named attribute from an object; getattr(x, 'y') is equivalent to x.y.\nWhen a default argument is given, it is returned when the attribute doesn't\nexist; without it, an exception is raised in that case."
pass
def globals():
"Return the dictionary containing the current scope's global variables.\n\nNOTE: Updates to this dictionary *will* affect name lookups in the current\nglobal scope and vice-versa."
return __Dict__()
def hasattr(obj, name):
'Return whether the object has an attribute with the given name.\n\nThis is done by calling getattr(obj, name) and catching AttributeError.'
return False
def hash(obj):
'Return the hash value for the given object.\n\nTwo objects that compare equal must also have the same hash value, but the\nreverse is not necessarily true.'
return 0
def help(self, *args, **kwds):
"Define the builtin 'help'.\n\n This is a wrapper around pydoc.help that provides a helpful message\n when 'help' is typed at the Python interactive prompt.\n\n Calling help() at the Python prompt starts an interactive help session.\n Calling help(thing) prints help for the python object 'thing'.\n "
pass
def hex(number):
"Return the hexadecimal representation of an integer.\n\n >>> hex(12648430)\n '0xc0ffee'"
return ''
def id(obj):
"Return the identity of an object.\n\nThis is guaranteed to be unique among simultaneously existing objects.\n(CPython uses the object's memory address.)"
return 0
def input(prompt):
'Read a string from standard input. The trailing newline is stripped.\n\nThe prompt string, if given, is printed to standard output without a\ntrailing newline before reading input.\n\nIf the user hits EOF (*nix: Ctrl-D, Windows: Ctrl-Z+Return), raise EOFError.\nOn *nix systems, readline is used if available.'
return ''
def isinstance(obj, class_or_tuple):
'Return whether an object is an instance of a class or of a subclass thereof.\n\nA tuple, as in ``isinstance(x, (A, B, ...))``, may be given as the target to\ncheck against. This is equivalent to ``isinstance(x, A) or isinstance(x, B)\nor ...`` etc.'
pass
def issubclass(cls, class_or_tuple):
"Return whether 'cls' is a derived from another class or is the same class.\n\nA tuple, as in ``issubclass(x, (A, B, ...))``, may be given as the target to\ncheck against. This is equivalent to ``issubclass(x, A) or issubclass(x, B)\nor ...`` etc."
pass
def iter(callable, sentinel):
'iter(iterable) -> iterator\niter(callable, sentinel) -> iterator\n\nGet an iterator from an object. In the first form, the argument must\nsupply its own iterator, or be a sequence.\nIn the second form, the callable is called until it returns the sentinel.'
pass
def len(obj):
'Return the number of items in a container.'
return 0
def license(self):
'interactive prompt objects for printing the license text, a list of\n contributors and the copyright notice.'
pass
def locals():
"Return a dictionary containing the current scope's local variables.\n\nNOTE: Whether or not updates to this dictionary will affect name lookups in\nthe local scope and vice-versa is *implementation dependent* and not\ncovered by any backwards compatibility guarantees."
return __Dict__()
class map(object):
'map(func, *iterables) --> map object\n\nMake an iterator that computes the function using arguments from\neach of the iterables. Stops when the shortest iterable is exhausted.'
__class__ = map
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __init__(self, func, *iterables):
'map(func, *iterables) --> map object\n\nMake an iterator that computes the function using arguments from\neach of the iterables. Stops when the shortest iterable is exhausted.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __iter__(self):
'Implement iter(self).'
return map()
def __next__(self):
'Implement next(self).'
pass
def __reduce__(self):
'Return state information for pickling.'
return ''; return ()
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
def max(iterable, *, default=obj, key=func):
'max(iterable, *[, default=obj, key=func]) -> value\nmax(arg1, arg2, *args, *[, key=func]) -> value\n\nWith a single iterable argument, return its biggest item. The\ndefault keyword-only argument specifies an object to return if\nthe provided iterable is empty.\nWith two or more arguments, return the largest argument.'
pass
class memoryview(object):
'Create a new memoryview object which references the given object.'
__class__ = memoryview
def __delitem__(self, key):
'Delete self[key].'
return None
def __enter__(self):
return self
def __eq__(self, value):
'Return self==value.'
return False
def __exit__(self):
pass
def __ge__(self, value):
'Return self>=value.'
return False
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __getitem__(self, key):
'Return self[key].'
pass
def __gt__(self, value):
'Return self>value.'
return False
def __hash__(self):
'Return hash(self).'
return 0
def __init__(self, *args, **kwargs):
'Create a new memoryview object which references the given object.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __le__(self, value):
'Return self<=value.'
return False
def __len__(self):
'Return len(self).'
return 0
def __lt__(self, value):
'Return self<value.'
return False
def __ne__(self, value):
'Return self!=value.'
return False
def __repr__(self):
'Return repr(self).'
return ''
def __setitem__(self, key, value):
'Set self[key] to value.'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
@property
def c_contiguous(self):
'A bool indicating whether the memory is C contiguous.'
pass
def cast(self, format):
'Cast a memoryview to a new format or shape.'
pass
@property
def contiguous(self):
'A bool indicating whether the memory is contiguous.'
pass
@property
def f_contiguous(self):
'A bool indicating whether the memory is Fortran contiguous.'
pass
@property
def format(self):
'A string containing the format (in struct module style)\n for each element in the view.'
return ''
def hex(self):
"Return the data in the buffer as a str of hexadecimal numbers.\n\n sep\n An optional single character or byte to separate hex bytes.\n bytes_per_sep\n How many bytes between separators. Positive values count from the\n right, negative values count from the left.\n\nExample:\n>>> value = memoryview(b'\\xb9\\x01\\xef')\n>>> value.hex()\n'b901ef'\n>>> value.hex(':')\n'b9:01:ef'\n>>> value.hex(':', 2)\n'b9:01ef'\n>>> value.hex(':', -2)\n'b901:ef'"
return ''
@property
def itemsize(self):
'The size in bytes of each element of the memoryview.'
pass
@property
def nbytes(self):
'The amount of space in bytes that the array would use in\n a contiguous representation.'
pass
@property
def ndim(self):
'An integer indicating how many dimensions of a multi-dimensional\n array the memory represents.'
pass
@property
def obj(self):
'The underlying object of the memoryview.'
pass
@property
def readonly(self):
'A bool indicating whether the memory is read only.'
pass
def release(self):
'Release the underlying buffer exposed by the memoryview object.'
pass
@property
def shape(self):
'A tuple of ndim integers giving the shape of the memory\n as an N-dimensional array.'
pass
@property
def strides(self):
'A tuple of ndim integers giving the size in bytes to access\n each element for each dimension of the array.'
pass
@property
def suboffsets(self):
'A tuple of integers used internally for PIL-style arrays.'
pass
def tobytes(self, order):
"Return the data in the buffer as a byte string. Order can be {'C', 'F', 'A'}.\nWhen order is 'C' or 'F', the data of the original array is converted to C or\nFortran order. For contiguous views, 'A' returns an exact copy of the physical\nmemory. In particular, in-memory Fortran order is preserved. For non-contiguous\nviews, the data is converted to C first. order=None is the same as order='C'."
pass
def tolist(self):
'Return the data in the buffer as a list of elements.'
pass
def toreadonly(self):
'Return a readonly version of the memoryview.'
pass
def min(iterable, *, default=obj, key=func):
'min(iterable, *[, default=obj, key=func]) -> value\nmin(arg1, arg2, *args, *[, key=func]) -> value\n\nWith a single iterable argument, return its smallest item. The\ndefault keyword-only argument specifies an object to return if\nthe provided iterable is empty.\nWith two or more arguments, return the smallest argument.'
pass
def next(iterator, default=None):
'next(iterator[, default])\n\nReturn the next item from the iterator. If default is given and the iterator\nis exhausted, it is returned instead of raising StopIteration.'
pass
def oct(number):
"Return the octal representation of an integer.\n\n >>> oct(342391)\n '0o1234567'"
return ''
def open(file, mode, buffering, encoding, errors, newline, closefd, opener):
'Open file and return a stream. Raise OSError upon failure.\n\nfile is either a text or byte string giving the name (and the path\nif the file isn\'t in the current working directory) of the file to\nbe opened or an integer file descriptor of the file to be\nwrapped. (If a file descriptor is given, it is closed when the\nreturned I/O object is closed, unless closefd is set to False.)\n\nmode is an optional string that specifies the mode in which the file\nis opened. It defaults to \'r\' which means open for reading in text\nmode. Other common values are \'w\' for writing (truncating the file if\nit already exists), \'x\' for creating and writing to a new file, and\n\'a\' for appending (which on some Unix systems, means that all writes\nappend to the end of the file regardless of the current seek position).\nIn text mode, if encoding is not specified the encoding used is platform\ndependent: locale.getpreferredencoding(False) is called to get the\ncurrent locale encoding. (For reading and writing raw bytes use binary\nmode and leave encoding unspecified.) The available modes are:\n\n========= ===============================================================\nCharacter Meaning\n--------- ---------------------------------------------------------------\n\'r\' open for reading (default)\n\'w\' open for writing, truncating the file first\n\'x\' create a new file and open it for writing\n\'a\' open for writing, appending to the end of the file if it exists\n\'b\' binary mode\n\'t\' text mode (default)\n\'+\' open a disk file for updating (reading and writing)\n\'U\' universal newline mode (deprecated)\n========= ===============================================================\n\nThe default mode is \'rt\' (open for reading text). For binary random\naccess, the mode \'w+b\' opens and truncates the file to 0 bytes, while\n\'r+b\' opens the file without truncation. The \'x\' mode implies \'w\' and\nraises an `FileExistsError` if the file already exists.\n\nPython distinguishes between files opened in binary and text modes,\neven when the underlying operating system doesn\'t. Files opened in\nbinary mode (appending \'b\' to the mode argument) return contents as\nbytes objects without any decoding. In text mode (the default, or when\n\'t\' is appended to the mode argument), the contents of the file are\nreturned as strings, the bytes having been first decoded using a\nplatform-dependent encoding or using the specified encoding if given.\n\n\'U\' mode is deprecated and will raise an exception in future versions\nof Python. It has no effect in Python 3. Use newline to control\nuniversal newlines mode.\n\nbuffering is an optional integer used to set the buffering policy.\nPass 0 to switch buffering off (only allowed in binary mode), 1 to select\nline buffering (only usable in text mode), and an integer > 1 to indicate\nthe size of a fixed-size chunk buffer. When no buffering argument is\ngiven, the default buffering policy works as follows:\n\n* Binary files are buffered in fixed-size chunks; the size of the buffer\n is chosen using a heuristic trying to determine the underlying device\'s\n "block size" and falling back on `io.DEFAULT_BUFFER_SIZE`.\n On many systems, the buffer will typically be 4096 or 8192 bytes long.\n\n* "Interactive" text files (files for which isatty() returns True)\n use line buffering. Other text files use the policy described above\n for binary files.\n\nencoding is the name of the encoding used to decode or encode the\nfile. This should only be used in text mode. The default encoding is\nplatform dependent, but any encoding supported by Python can be\npassed. See the codecs module for the list of supported encodings.\n\nerrors is an optional string that specifies how encoding errors are to\nbe handled---this argument should not be used in binary mode. Pass\n\'strict\' to raise a ValueError exception if there is an encoding error\n(the default of None has the same effect), or pass \'ignore\' to ignore\nerrors. (Note that ignoring encoding errors can lead to data loss.)\nSee the documentation for codecs.register or run \'help(codecs.Codec)\'\nfor a list of the permitted encoding error strings.\n\nnewline controls how universal newlines works (it only applies to text\nmode). It can be None, \'\', \'\\n\', \'\\r\', and \'\\r\\n\'. It works as\nfollows:\n\n* On input, if newline is None, universal newlines mode is\n enabled. Lines in the input can end in \'\\n\', \'\\r\', or \'\\r\\n\', and\n these are translated into \'\\n\' before being returned to the\n caller. If it is \'\', universal newline mode is enabled, but line\n endings are returned to the caller untranslated. If it has any of\n the other legal values, input lines are only terminated by the given\n string, and the line ending is returned to the caller untranslated.\n\n* On output, if newline is None, any \'\\n\' characters written are\n translated to the system default line separator, os.linesep. If\n newline is \'\' or \'\\n\', no translation takes place. If newline is any\n of the other legal values, any \'\\n\' characters written are translated\n to the given string.\n\nIf closefd is False, the underlying file descriptor will be kept open\nwhen the file is closed. This does not work when a file name is given\nand must be True in that case.\n\nA custom opener can be used by passing a callable as *opener*. The\nunderlying file descriptor for the file object is then obtained by\ncalling *opener* with (*file*, *flags*). *opener* must return an open\nfile descriptor (passing os.open as *opener* results in functionality\nsimilar to passing None).\n\nopen() returns a file object whose type depends on the mode, and\nthrough which the standard file operations such as reading and writing\nare performed. When open() is used to open a file in a text mode (\'w\',\n\'r\', \'wt\', \'rt\', etc.), it returns a TextIOWrapper. When used to open\na file in a binary mode, the returned class varies: in read binary\nmode, it returns a BufferedReader; in write binary and append binary\nmodes, it returns a BufferedWriter, and in read/write mode, it returns\na BufferedRandom.\n\nIt is also possible to use a string or bytearray as a file for both\nreading and writing. For strings StringIO can be used like a file\nopened in a text mode, and for bytes a BytesIO can be used like a file\nopened in a binary mode.'
pass
def ord(c):
'Return the Unicode code point for a one-character string.'
pass
def pow(base, exp, mod):
'Equivalent to base**exp with 2 arguments or base**exp % mod with 3 arguments\n\nSome types, such as ints, are able to use a more efficient algorithm when\ninvoked using the three argument form.'
pass
def print():
"print(value, ..., sep=' ', end='\\n', file=sys.stdout, flush=False)\n\nPrints the values to a stream, or to sys.stdout by default.\nOptional keyword arguments:\nfile: a file-like object (stream); defaults to the current sys.stdout.\nsep: string inserted between values, default a space.\nend: string appended after the last value, default a newline.\nflush: whether to forcibly flush the stream."
pass
def quit(self, code):
pass
class range(object):
'range(stop) -> range object\nrange(start, stop[, step]) -> range object\n\nReturn an object that produces a sequence of integers from start (inclusive)\nto stop (exclusive) by step. range(i, j) produces i, i+1, i+2, ..., j-1.\nstart defaults to 0, and stop is omitted! range(4) produces 0, 1, 2, 3.\nThese are exactly the valid indices for a list of 4 elements.\nWhen step is given, it specifies the increment (or decrement).'
def __bool__(self):
'self != 0'
return False
__class__ = range
def __contains__(self, key):
'Return key in self.'
return False
def __eq__(self, value):
'Return self==value.'
return False
def __ge__(self, value):
'Return self>=value.'
return False
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __getitem__(self, key):
'Return self[key].'
pass
def __gt__(self, value):
'Return self>value.'
return False
def __hash__(self):
'Return hash(self).'
return 0
def __init__(self, start, stop, step=None):
'range(stop) -> range object\nrange(start, stop[, step]) -> range object\n\nReturn an object that produces a sequence of integers from start (inclusive)\nto stop (exclusive) by step. range(i, j) produces i, i+1, i+2, ..., j-1.\nstart defaults to 0, and stop is omitted! range(4) produces 0, 1, 2, 3.\nThese are exactly the valid indices for a list of 4 elements.\nWhen step is given, it specifies the increment (or decrement).'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __iter__(self):
'Implement iter(self).'
return range()
def __le__(self, value):
'Return self<=value.'
return False
def __len__(self):
'Return len(self).'
return 0
def __lt__(self, value):
'Return self<value.'
return False
def __ne__(self, value):
'Return self!=value.'
return False
def __reduce__(self):
return ''; return ()
def __repr__(self):
'Return repr(self).'
return ''
def __reversed__(self):
'Return a reverse iterator.'
pass
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
def count(self, x):
'rangeobject.count(value) -> integer -- return number of occurrences of value'
return 0
def index(self, v):
'rangeobject.index(value) -> integer -- return index of value.\nRaise ValueError if the value is not present.'
return 0
@property
def start(self):
pass
@property
def step(self):
pass
@property
def stop(self):
pass
def repr(obj):
'Return the canonical string representation of the object.\n\nFor many object types, including most builtins, eval(repr(obj)) == obj.'
return ''
class reversed(object):
'Return a reverse iterator over the values of the given sequence.'
__class__ = reversed
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __init__(self, *args, **kwargs):
'Return a reverse iterator over the values of the given sequence.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __iter__(self):
'Implement iter(self).'
return reversed()
def __length_hint__(self):
'Private method returning an estimate of len(list(it)).'
return 0
def __next__(self):
'Implement next(self).'
pass
def __reduce__(self):
'Return state information for pickling.'
return ''; return ()
def __setstate__(self, state):
'Set state information for unpickling.'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
def round(number, ndigits):
'Round a number to a given precision in decimal digits.\n\nThe return value is an integer if ndigits is omitted or None. Otherwise\nthe return value has the same type as the number. ndigits may be negative.'
return 0.0
def setattr(obj, name, value):
"Sets the named attribute on the given object to the specified value.\n\nsetattr(x, 'y', v) is equivalent to ``x.y = v''"
pass
class slice(object):
'slice(stop)\nslice(start, stop[, step])\n\nCreate a slice object. This is used for extended slicing (e.g. a[0:10:2]).'
__class__ = slice
def __eq__(self, value):
'Return self==value.'
return False
def __ge__(self, value):
'Return self>=value.'
return False
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __gt__(self, value):
'Return self>value.'
return False
__hash__ = None
def __init__(self, start, stop, step=None):
'slice(stop)\nslice(start, stop[, step])\n\nCreate a slice object. This is used for extended slicing (e.g. a[0:10:2]).'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __le__(self, value):
'Return self<=value.'
return False
def __lt__(self, value):
'Return self<value.'
return False
def __ne__(self, value):
'Return self!=value.'
return False
def __reduce__(self):
'Return state information for pickling.'
return ''; return ()
def __repr__(self):
'Return repr(self).'
return ''
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
def indices(self):
'S.indices(len) -> (start, stop, stride)\n\nAssuming a sequence of length len, calculate the start and stop\nindices, and the stride length of the extended slice described by\nS. Out of bounds indices are clipped in a manner consistent with the\nhandling of normal slices.'
return tuple()
@property
def start(self):
pass
@property
def step(self):
pass
@property
def stop(self):
pass
def sorted(iterable):
'Return a new list containing all items from the iterable in ascending order.\n\nA custom key function can be supplied to customize the sort order, and the\nreverse flag can be set to request the result in descending order.'
return __List__()
def sum(iterable, start):
"Return the sum of a 'start' value (default: 0) plus an iterable of numbers\n\nWhen the iterable is empty, return the start value.\nThis function is intended specifically for use with numeric values and may\nreject non-numeric types."
pass
class super(object):
'super() -> same as super(__class__, <first argument>)\nsuper(type) -> unbound super object\nsuper(type, obj) -> bound super object; requires isinstance(obj, type)\nsuper(type, type2) -> bound super object; requires issubclass(type2, type)\nTypical use to call a cooperative superclass method:\nclass C(B):\n def meth(self, arg):\n super().meth(arg)\nThis works for class methods too:\nclass C(B):\n @classmethod\n def cmeth(cls, arg):\n super().cmeth(arg)\n'
__class__ = super
def __get__(self, instance, owner):
'Return an attribute of instance, which is of type owner.'
return super()
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __init__(self, type, type2):
'super() -> same as super(__class__, <first argument>)\nsuper(type) -> unbound super object\nsuper(type, obj) -> bound super object; requires isinstance(obj, type)\nsuper(type, type2) -> bound super object; requires issubclass(type2, type)\nTypical use to call a cooperative superclass method:\nclass C(B):\n def meth(self, arg):\n super().meth(arg)\nThis works for class methods too:\nclass C(B):\n @classmethod\n def cmeth(cls, arg):\n super().cmeth(arg)\n'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __repr__(self):
'Return repr(self).'
return ''
@property
def __self__(self):
'the instance invoking super(); may be None'
pass
@property
def __self_class__(self):
'the type of the instance invoking super(); may be None'
pass
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
@property
def __thisclass__(self):
'the class invoking super()'
pass
def vars(object=None):
'vars([object]) -> dictionary\n\nWithout arguments, equivalent to locals().\nWith an argument, equivalent to object.__dict__.'
return dict()
class zip(object):
'zip(*iterables) --> zip object\n\nReturn a zip object whose .__next__() method returns a tuple where\nthe i-th element comes from the i-th iterable argument. The .__next__()\nmethod continues until the shortest iterable in the argument sequence\nis exhausted and then it raises StopIteration.'
__class__ = zip
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __init__(self, *iterables):
'zip(*iterables) --> zip object\n\nReturn a zip object whose .__next__() method returns a tuple where\nthe i-th element comes from the i-th iterable argument. The .__next__()\nmethod continues until the shortest iterable in the argument sequence\nis exhausted and then it raises StopIteration.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __iter__(self):
'Implement iter(self).'
return zip()
def __next__(self):
'Implement next(self).'
pass
def __reduce__(self):
'Return state information for pickling.'
return ''; return ()
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
| [
"stpn.s@yandex.ru"
] | stpn.s@yandex.ru |
03f04ed8e4882c523b67905057a762d097be0037 | 35e3e103e95128d6398ee14ce0a1b46d2033b688 | /proxy_source/__init__.py | 1186ed6da179a5002fec1b180a92b1302a9d2b16 | [] | no_license | Godofbush/easy_proxy | 819558195f355a441a82a2d38fbee1cb9d3a6867 | 60bb04a812ff9e522b019e0968d2d0e4b26d72d7 | refs/heads/master | 2023-01-15T12:42:17.290896 | 2020-11-23T15:58:30 | 2020-11-23T15:58:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | from .goubanjia import get_goubanjia
from .kuaidaili import get_kuaidaili
from .kxdaili import get_kxdaili
PROXY_SOURCE_LIST = [get_goubanjia, get_kuaidaili, get_kxdaili]
| [
"zhuyan265@gmail.com"
] | zhuyan265@gmail.com |
7e39d9636d8d51231c8e255ea73707f11e4c337e | 56ffce29f0d27f83206e11870d95982c38524aae | /apweb/site/view/session_test.py | a77c127ed42f7fbac3078f43a773ba651e4786d4 | [] | no_license | adamandpaul/apweb | cce365085e2ee58cfbc31544c5a7414e67ad56b4 | b1bb81fa7d7b39f19e187462aa3447ff482b46af | refs/heads/master | 2022-10-19T02:09:52.437906 | 2021-05-21T06:10:08 | 2021-05-21T06:10:08 | 201,398,036 | 0 | 3 | null | 2022-09-21T21:39:41 | 2019-08-09T05:41:06 | Python | UTF-8 | Python | false | false | 691 | py | # -*- coding:utf-8 -*-
from . import session
from unittest import TestCase
from unittest.mock import MagicMock
from unittest.mock import patch
class TestSessionView(TestCase):
def setUp(self):
self.request = MagicMock()
self.context = MagicMock()
self.view = session.SessionView(self.context, self.request)
@patch("apweb.site.view.session.UserView")
def test_user(self, UserView):
self.assertEqual(self.view.user, UserView.return_value.info_manage)
UserView.assert_called_with(self.request.user, self.request)
def test_info(self):
self.view.__dict__["user"] = "foo"
self.assertEqual(self.view.info["user"], "foo")
| [
"arterrey@gmail.com"
] | arterrey@gmail.com |
a38e5b8ea1259ceb3d591026ad3ca10564a3d8fe | fc1dc477569b8c9676d824556f6df796ea10ee47 | /net_16_ldap/vip_ldap3_1_get_user_info.py | d69f62aa45d09a420391290db308389b823d4ffa | [] | no_license | pengpeng2100/python_code_classic | 4007873fef426aa3b609beff19c4b66ff9ed746d | 121a028cb24e217988f0800792e946f540cf1a59 | refs/heads/master | 2023-03-30T04:27:22.942432 | 2021-03-30T16:00:28 | 2021-03-30T16:00:28 | 353,030,129 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,821 | py | #!/usr/bin/env python3
# -*- coding=utf-8 -*-
# 本脚由亁颐堂现任明教教主编写,用于乾颐盾Python课程!
# 教主QQ:605658506
# 亁颐堂官网www.qytang.com
# 教主技术进化论拓展你的技术新边疆
# https://ke.qq.com/course/271956?tuin=24199d8a
from ldap3 import Connection
from net_16_ldap.vip_ldap3_0_login_info import server, ad_admin_username, ad_admin_password
def get_user_info(username):
# 返回用户属于的组与用户名
try:
# 连接服务器
c = Connection(server, auto_bind=True, user="qytang\\"+ad_admin_username, password=ad_admin_password)
# 提取域qytang.com, 用户的memberOf,sn和department信息
c.search(search_base='dc=qytang,dc=com',
search_filter='(&(samAccountName=' + username + '))',
attributes=['memberOf',
'sn',
'department',
'createTimeStamp',
'accountExpires',
'userAccountControl',
'objectClass',
'pwdLastSet'],
paged_size=5)
# 返回获取的memberOf,sn和department信息
return {'dn': c.response[0]['dn'],
'memberOf': c.response[0]['attributes']['memberOf'],
'sn': c.response[0]['attributes']['sn'],
'department': c.response[0]['attributes']['department'],
'createTimeStamp': c.response[0]['attributes']['createTimeStamp'],
'accountExpires': c.response[0]['attributes']['accountExpires'],
'userAccountControl': c.response[0]['attributes']['userAccountControl'],
'objectClass': c.response[0]['attributes']['objectClass'],
'pwdLastSet': c.response[0]['attributes']['pwdLastSet'],}
except Exception:
return None
def get_user_self_info(username, password):
# 返回用户属于的组与用户名
try:
# 连接服务器
c = Connection(server, auto_bind=True, user="qytang\\"+username, password=password)
# 提取域qytang.com, 用户的memberOf,sn和department信息
c.search(search_base='dc=qytang,dc=com',
search_filter='(&(samAccountName=' + username + '))',
attributes=['memberOf',
'sn',
'department',
'createTimeStamp',
'accountExpires',
'userAccountControl',
'objectClass',
'pwdLastSet'],
paged_size=5)
# 返回获取的memberOf,sn和department信息
return {'dn': c.response[0]['dn'],
'memberOf': c.response[0]['attributes']['memberOf'],
'sn': c.response[0]['attributes']['sn'],
'department': c.response[0]['attributes']['department'],
'createTimeStamp': c.response[0]['attributes']['createTimeStamp'],
'accountExpires': c.response[0]['attributes']['accountExpires'],
'userAccountControl': c.response[0]['attributes']['userAccountControl'],
'objectClass': c.response[0]['attributes']['objectClass'],
'pwdLastSet': c.response[0]['attributes']['pwdLastSet'],}
except Exception as e:
print(e)
return None
if __name__ == '__main__':
# 可以查詢用戶
from pprint import pprint
pprint(get_user_info('qyt-qink'))
pprint(get_user_self_info('qyt-qink', 'Cisc0123'))
# 可以查詢組
# print(get_user_info('vipgroup'))
# userAccountControl
# https://lesca.me/archives/common-useraccountcontrol-values.html
| [
"pengpeng2100@gmail.com"
] | pengpeng2100@gmail.com |
973cb04e9006b61f124b9eec5ee4e4edb2e8c86d | 66246d9b5f98203fe1a2532e32757dea922d798a | /data_clean.py | 1c4edf642dd332831f99d2ff4689fb98ec8aa22a | [] | no_license | hexinuser/data-analysis-of-resold-apartment | 418dcd710fd8015da5471c9c50651f233cf86e09 | fec90fb8ba668a6375e27ab266d02572b1979bdb | refs/heads/master | 2020-04-06T14:37:06.867501 | 2018-11-14T13:55:13 | 2018-11-14T13:55:13 | 157,547,689 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,159 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Sep 15 13:07:16 2018
@author: Evan_He
"""
import pandas as pd
import re
#data = pd.read_excel('C:/Users/Evan_He/Desktop/scrap_ershoufang/data/data.xlsx',encoding="utf8",index_col='链家id')
#data.to_csv('C:/Users/Evan_He/Desktop/scrap_ershoufang/data/house_data.csv',encoding="utf8")
data = pd.read_csv('C:/Users/Evan_He/Desktop/scrap_ershoufang/data/house_data.csv',encoding="utf8",index_col='链家id')
data['备注']=data['备注'].fillna(0)
data.dropna(axis=0, how='any', inplace=True)
data['小区名称']=data['小区名称'].apply(lambda x: x.strip())
data['房屋朝向']=data['房屋朝向'].apply(lambda x: x.strip())
data['电梯'] = data['电梯'].apply(lambda x: x.strip()[0])
data['装修'] =data['装修'].apply(lambda x: x.strip())
data['区域'] =data['区域'].apply(lambda x: x.strip())
data['布局']=data['布局'].apply(lambda x: x.strip())
data = data[data['布局']!='车位']
data['卧室个数']=data['布局'].apply(lambda x: x[0])
data['客厅数']=data['布局'].apply(lambda x: x[2])
data.drop('布局',axis=1,inplace=True)
data.rename(columns={'总价':'总价(万元)','备注':'地铁'},inplace =True)
data['单价']=data['单价'].apply(lambda x: int(re.compile('单价(.*?)元').findall(x)[0]))
def bieshu(x):
if len(x)==1:
return 0
else:
return 1
data['别墅'] = data['别墅'].apply(bieshu)
data['面积']=data['面积'].apply(lambda x: float(x.strip()[:-2]))
data = data[data['面积']<=600]
def ele_or(x):
if x=='无':
return 0
else:
return 1
data['电梯'] = data['电梯'].apply(ele_or) #0表示无电梯,1表示有电梯
def floor_sum(x):
try:
return int(re.compile('共(.*?)层').findall(x)[0])
except:
return int(re.compile('(.*?)层').findall(x)[0])
data['总楼层'] = data['地址'].apply(floor_sum)
data.drop(['装修','地址'],axis=1,inplace=True)
data = data[data['单价']<=60000]
data.to_csv('C:/Users/Evan_He/Desktop/scrap_ershoufang/data/clean_data.csv',encoding="utf8")
| [
"noreply@github.com"
] | hexinuser.noreply@github.com |
99bc5f810433c2c56027c7cadd2f629bb37f2406 | 7f33d68323240d66e610e5a89efc516915a11a96 | /manage.py | cd6b58f7934e4cd956b6d3cad8298609c08f1d21 | [
"Apache-2.0"
] | permissive | dbca-wa/observations | 100df2765ef0f6f62aaf45fc13fbb4af4395f519 | 48b2ad17afa9f0019524cb22a9a0bba74850b87f | refs/heads/master | 2021-05-31T16:29:30.906717 | 2016-04-06T02:42:05 | 2016-04-06T02:42:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 297 | py | #!/usr/bin/env python
import os
import sys
import confy
confy.read_environment_file()
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "incredibus.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"asi@dpaw.wa.gov.au"
] | asi@dpaw.wa.gov.au |
d3ebcf3dec9963fff0f23664aaf16564a0ce8009 | d8bc4d108baa770455ea294498be87b5f51f0b23 | /open_price_strategy/deal_judge_direction_result_file.py | 07d10d7228e01988c8959e09e91bfb0f446ae007 | [] | no_license | dxcv/pratice_project | 852ccde684a05ff4f17f5d17bea0cfddc4c3015c | f4b78bf9f6ed196ff926036a51bfb638bb240369 | refs/heads/master | 2020-05-26T19:01:42.983442 | 2018-11-23T08:36:43 | 2018-11-23T08:36:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,745 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 14 14:49:45 2017
# 该脚本用于对judge_direction_after_auction_total_level进行分析
# 首先我根据脚本判断,如果持有一天到收盘,最后能不能盈利?
@author: Tracy Zhu
"""
### 导入系统库
import sys, time
import logging
from collections import deque
### 导入用户库
sys.path.append("C:\\Users\\Tracy Zhu\\Desktop\\tool\\open_price_strategy")
from plot_depth_quote import *
open_time = "20:59:00"
target_result_file_name = "C:\\Users\\Tracy Zhu\\Desktop\\tool\\open_price_strategy\\judge_direction_after_auction_total_level.csv"
target_result_file = open(target_result_file_name, 'r')
result_lines = target_result_file.readlines()
target_result_file.close()
output_file_name = "deal_judge_direction_result_file.csv"
out_file = open(output_file_name, 'wb')
print>>out_file, "instrument_id, trading_day, open_direction, open_price, close_price, profit"
profit = 0
for one_line in result_lines[1:]:
one_list = one_line.split(',')
instrument_id = one_list[0]
trading_day = one_list[1]
open_direction = one_list[5]
variety_id = get_variety_id(instrument_id)
_, unit, _ = get_variety_information(variety_id)
close_price = get_close_price(instrument_id, trading_day)
open_price = get_open_price_from_quote_data(instrument_id, trading_day)
print instrument_id, trading_day
if open_direction == 'long':
profit = close_price - open_price
profit = profit * unit
elif open_direction == 'short':
profit = open_price - close_price
profit = profit * unit
print >> out_file, instrument_id, ',', trading_day, ',', open_direction, ',', open_price, ',', close_price, ',', profit
out_file.close() | [
"tracy.zhu7@gmail.com"
] | tracy.zhu7@gmail.com |
c538fb5cbdac74431e65498c5bb4964e8dcd47c5 | 63768dc92cde5515a96d774a32facb461a3bf6e9 | /jacket/compute/cloud/vm_mode.py | 7ca85fbd53edb92822a9d5b0385735b37fb28c03 | [
"Apache-2.0"
] | permissive | ljZM33nd/jacket | 6fe9156f6f5789e5c24425afa7ce9237c302673d | d7ad3147fcb43131098c2a5210847634ff5fb325 | refs/heads/master | 2023-04-16T11:02:01.153751 | 2016-11-15T02:48:12 | 2016-11-15T02:48:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,212 | py | # Copyright 2012 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Possible vm modes for instances.
Compute instance vm modes represent the host/guest ABI used for the
virtual machine / container. Individual hypervisors may support
multiple different vm modes per host. Available vm modes for a hypervisor
driver may also vary according to the architecture it is running on.
The 'vm_mode' parameter can be set against an instance to
choose what sort of VM to boot.
"""
from jacket.compute import exception
HVM = "hvm" # Native ABI (aka fully virtualized)
XEN = "xen" # Xen 3.0 paravirtualized
UML = "uml" # User Mode Linux paravirtualized
EXE = "exe" # Executables in containers
ALL = [HVM, XEN, UML, EXE]
def get_from_instance(instance):
"""Get the vm mode for an instance
:param instance: instance object to query
:returns: canonicalized vm mode for the instance
"""
mode = instance.vm_mode
return canonicalize(mode)
def is_valid(name):
"""Check if a string is a valid vm mode
:param name: vm mode name to validate
:returns: True if @name is valid
"""
return name in ALL
def canonicalize(mode):
"""Canonicalize the vm mode
:param name: vm mode name to canonicalize
:returns: a canonical vm mode name
"""
if mode is None:
return None
mode = mode.lower()
# For compatibility with pre-Folsom deployments
if mode == "pv":
mode = XEN
if mode == "hv":
mode = HVM
if mode == "baremetal":
mode = HVM
if not is_valid(mode):
raise exception.InvalidVirtualMachineMode(vmmode=mode)
return mode
| [
"nkapotoxin@gmail.com"
] | nkapotoxin@gmail.com |
8beeae688c7148ebe2715f0ca83ccfd8f6ce9996 | 6b9084d234c87d7597f97ec95808e13f599bf9a1 | /data/tracking/sampler/SiamFC/_deprecated/sampler.py | db5571b4db36b29aa180d356235ddcd410d4e57c | [] | no_license | LitingLin/ubiquitous-happiness | 4b46234ce0cb29c4d27b00ec5a60d3eeb52c26fc | aae2d764e136ca4a36c054212b361dd7e8b22cba | refs/heads/main | 2023-07-13T19:51:32.227633 | 2021-08-03T16:02:03 | 2021-08-03T16:02:03 | 316,664,903 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,797 | py | import numpy as np
from Dataset.SOT.Storage.MemoryMapped.dataset import SingleObjectTrackingDataset_MemoryMapped
from Dataset.MOT.Storage.MemoryMapped.dataset import MultipleObjectTrackingDataset_MemoryMapped
from Dataset.DET.Storage.MemoryMapped.dataset import DetectionDataset_MemoryMapped
from data.tracking.sampler._sampler.sequence.SiamFC.DET import \
do_sampling_in_detection_dataset_image, get_one_random_sample_in_detection_dataset_image
from data.tracking.sampler._sampler.sequence.SiamFC.SOT import \
do_sampling_in_single_object_tracking_dataset_sequence, \
do_negative_sampling_in_single_object_tracking_dataset_sequence, \
get_one_random_sample_in_single_object_tracking_dataset_sequence
from data.tracking.sampler._sampler.sequence.SiamFC.MOT import \
do_sampling_in_multiple_object_tracking_dataset_sequence, \
do_negative_sampling_in_multiple_object_tracking_dataset_sequence, \
get_one_random_sample_in_multiple_object_tracking_dataset_sequence
from data.tracking.sampler.SiamFC.type import SiamesePairSamplingMethod
class SOTTrackingSiameseIterableDatasetSampler:
def __init__(self, datasets, negative_sample_ratio, enforce_fine_positive_sample, sampling_method: SiamesePairSamplingMethod, datasets_sampling_parameters=None, datasets_sampling_weight=None, data_processor=None):
self.datasets = datasets
self.dataset_lengths = [len(dataset) for dataset in datasets]
self.datasets_sampling_weight = datasets_sampling_weight
self.negative_sample_ratio = negative_sample_ratio
self.enforce_fine_positive_sample = enforce_fine_positive_sample
raise NotImplementedError
self.sampling_method = sampling_method
self.data_processor = data_processor
self.datasets_sampling_parameters = datasets_sampling_parameters
self.current_index_of_dataset = None
self.current_index_of_sequence = None
self.current_is_sampling_positive_sample = None
def move_next(self, rng_engine: np.random.Generator):
index_of_dataset = rng_engine.choice(np.arange(len(self.datasets)), p=self.datasets_sampling_weight)
if self.negative_sample_ratio == 0:
is_negative = False
else:
is_negative = rng_engine.random() < self.negative_sample_ratio
index_of_sequence = rng_engine.integers(0, self.dataset_lengths[index_of_dataset])
self.current_index_of_dataset = index_of_dataset
self.current_is_sampling_positive_sample = not is_negative
self.current_index_of_sequence = index_of_sequence
def _pick_random_object_as_negative_sample(self, rng_engine: np.random.Generator):
index_of_dataset = rng_engine.choice(np.arange(len(self.datasets)), p=self.datasets_sampling_weight)
dataset = self.datasets[index_of_dataset]
index_of_sequence = rng_engine.integers(0, len(dataset))
sequence = dataset[index_of_sequence]
if isinstance(dataset, DetectionDataset_MemoryMapped):
data = get_one_random_sample_in_detection_dataset_image(sequence, rng_engine)
elif isinstance(dataset, SingleObjectTrackingDataset_MemoryMapped):
data = get_one_random_sample_in_single_object_tracking_dataset_sequence(sequence, rng_engine)
elif isinstance(dataset, MultipleObjectTrackingDataset_MemoryMapped):
data = get_one_random_sample_in_multiple_object_tracking_dataset_sequence(sequence, rng_engine)
else:
raise NotImplementedError
return data
def do_sampling(self, rng_engine: np.random.Generator):
dataset = self.datasets[self.current_index_of_dataset]
sequence = dataset[self.current_index_of_sequence]
frame_range = 100
if self.datasets_sampling_parameters is not None:
sampling_parameter = self.datasets_sampling_parameters[self.current_index_of_dataset]
if 'frame_range' in sampling_parameter:
frame_range = sampling_parameter['frame_range']
if isinstance(dataset, (SingleObjectTrackingDataset_MemoryMapped, MultipleObjectTrackingDataset_MemoryMapped)):
if sequence.has_fps():
fps = sequence.get_fps()
frame_range = int(round(fps / 30 * frame_range))
if self.current_is_sampling_positive_sample:
if isinstance(dataset, DetectionDataset_MemoryMapped):
z_image, z_bbox = do_sampling_in_detection_dataset_image(sequence, rng_engine)
data = (z_image, z_bbox, z_image, z_bbox, True)
elif isinstance(dataset, (SingleObjectTrackingDataset_MemoryMapped, MultipleObjectTrackingDataset_MemoryMapped)):
if isinstance(dataset, SingleObjectTrackingDataset_MemoryMapped):
sampled_data, is_positive = do_sampling_in_single_object_tracking_dataset_sequence(sequence, frame_range, rng_engine)
else:
sampled_data, is_positive = do_sampling_in_multiple_object_tracking_dataset_sequence(sequence, frame_range, rng_engine)
if is_positive == 0:
data = (sampled_data[0][0], sampled_data[0][1], sampled_data[0][0], sampled_data[0][1], True)
else:
data = (sampled_data[0][0], sampled_data[0][1], sampled_data[1][0], sampled_data[1][1], is_positive == 1)
else:
raise NotImplementedError
else:
if isinstance(dataset, DetectionDataset_MemoryMapped):
z_image, z_bbox = do_sampling_in_detection_dataset_image(sequence, rng_engine)
x_image, x_bbox = self._pick_random_object_as_negative_sample(rng_engine)
data = (z_image, z_bbox, x_image, x_bbox, False)
elif isinstance(dataset, (SingleObjectTrackingDataset_MemoryMapped, MultipleObjectTrackingDataset_MemoryMapped)):
if isinstance(dataset, SingleObjectTrackingDataset_MemoryMapped):
sampled_data = do_negative_sampling_in_single_object_tracking_dataset_sequence(sequence, frame_range, rng_engine)
else:
sampled_data = do_negative_sampling_in_multiple_object_tracking_dataset_sequence(sequence, frame_range, rng_engine)
if len(sampled_data) == 1:
x_image, x_bbox = self._pick_random_object_as_negative_sample(rng_engine)
data = (sampled_data[0][0], sampled_data[0][1], x_image, x_bbox, False)
else:
data = (sampled_data[0][0], sampled_data[0][1], sampled_data[1][0], sampled_data[1][1], False)
else:
raise NotImplementedError
if self.data_processor is not None:
data = self.data_processor(*data)
return data
| [
"linliting06@live.com"
] | linliting06@live.com |
34fe61172c2f5e69c0655b2388e2cb932655b39b | 15d29e054857b808ce28be78aeeb9604160dcdd2 | /biota/biota/calibrate.py | 94371ce6089d4cf82ec0b7337500029e505d1343 | [
"GPL-3.0-only",
"MIT"
] | permissive | eriklindquist/smfm_biota | 1b1c7196599623d726c8dda335d1ada54ff0fe26 | 23653cb590683c7668b1e3a4c4f1a4b6a6944cdb | refs/heads/main | 2023-03-25T22:55:12.315701 | 2021-03-05T15:00:05 | 2021-03-05T15:00:05 | 345,615,223 | 0 | 0 | MIT | 2021-03-08T10:21:53 | 2021-03-08T10:21:53 | null | UTF-8 | Python | false | false | 4,714 | py | #!/usr/bin/env python
# This is a set of scripts for calibration of a biomass-backscatter curve
import argparse
import csv
import numpy as np
import scipy.stats
import biota
import biota.mask
import pdb
def extractGamma0(dataloc, year, shp, plot_field, agb_field, buffer_size = 0, verbose = False, units = 'natural'):
'''
Extract gamma0 from ALOS tiles.
Args:
shp: A shapefile containing plot data
year: Year to load
plot_field: The shapefile field containing plot names
agb_field: The shapefile field containing AGB estimates
Returns:
A dictionary containing plot names, AGB and gamma0 values
'''
# Use example data to get processing steps
#downsample_factor = tile_example.downsample_factor
#lee_filter = tile_example.lee_filter
#year = tile_example.year
#dataloc = tile_example.dataloc
assert (year >= 2007 and year <= 2010) or year >= 2015, "Invalid year (%s) input"%str(year)
downsample_factor = 1
lee_filter = True
window_size = 3
# Extract relevant info from shapefile
plot_names = biota.mask.getField(shp, plot_field)
if agb_field is not None:
agb = biota.mask.getField(shp, agb_field).astype(np.float)
# Identify tiles that contain gamma0 data for the shapefile
tiles = biota.mask.getTilesInShapefile(shp)
# Check whether all the necessary tiles are all present. Decide what to do if they aren't.
# TODO
# Generate output array for backscatter
gamma0_mean_hv = np.empty_like(plot_names, dtype = np.float32)
gamma0_mean_hv[:] = np.nan
gamma0_mean_hh = gamma0_mean_hv.copy()
gamma0_std_hv = gamma0_mean_hv.copy()
gamma0_std_hh = gamma0_mean_hv.copy()
doy = gamma0_mean_hv.copy()
# For each tile covered by shapefile
for lat, lon in tiles:
if verbose: print('Doing lat: %s, lon: %s'%(str(lat), str(lon)))
# Load tile
try:
tile = biota.LoadTile(dataloc, lat, lon, year, downsample_factor = downsample_factor, lee_filter = lee_filter, window_size = window_size)
except:
continue
# Get backscatter (both polarisations) and DOY
data_gamma0_hv = tile.getGamma0(polarisation = 'HV', units = units)
data_gamma0_hh = tile.getGamma0(polarisation = 'HH', units = units)
data_doy = tile.getDOY()
# Mask out each plot
plot_mask = biota.mask.maskShapefile(tile, shp, location_id = True, buffer_size = buffer_size)
# Extract values for each plot
for n in np.unique(plot_mask[plot_mask != 0]):
# Get mask for plot and tile
this_mask = np.logical_and(plot_mask == n, tile.mask == False)
if this_mask.sum() == 0: continue
# Add metrics to output array
gamma0_mean_hv[n-1] = np.nanmean(data_gamma0_hv[this_mask])
gamma0_mean_hh[n-1] = np.nanmean(data_gamma0_hh[this_mask])
gamma0_std_hv[n-1] = np.nanstd(data_gamma0_hv[this_mask])
gamma0_std_hh[n-1] = np.nanstd(data_gamma0_hh[this_mask])
doy[n-1] = np.nanmedian(data_doy[this_mask])
if np.isnan(np.nanmean(data_gamma0_hv[this_mask])): pdb.set_trace()
# Return data as a dictionary
data_dict = {}
data_dict['plot_name'] = plot_names.tolist()
data_dict['gamma0_mean_HH'] = gamma0_mean_hh.tolist()
data_dict['gamma0_mean_HV'] = gamma0_mean_hv.tolist()
data_dict['gamma0_std_HH'] = gamma0_std_hh.tolist()
data_dict['gamma0_std_HV'] = gamma0_std_hv.tolist()
data_dict['DOY'] = doy.tolist()
if agb_field is not None:
data_dict['plot_AGB'] = agb.tolist()
with open('gamma0_%s_by_plot.csv'%str(year), 'wb') as f: # Just use 'w' mode in 3.x
writer = csv.writer(f, delimiter = ',')
writer.writerow(list(data_dict.keys()))
for row in range(len(plot_names)):
writer.writerow([data_dict[k][row] for k in list(data_dict.keys())])
return data_dict
def fitLinearModel(data_dict):
'''
Fit a linear model to relate AGB to gamma0 backscatter.
Args:
data_dict: Dictionary output from extractGamma0() function.
Returns:
model slope, model intercept
'''
# Select only data that have values. NaN can arise in cases of masked areas in ALOS tiles
sel = np.logical_and(np.isfinite(data_dict['gamma0_HV']), np.isfinite(data_dict['plot_AGB']))
assert sel.sum() > 0, "No usable data in data_dict."
slope, intercept, r_value, p_value, stderr = scipy.stats.linregress(data_dict['gamma0_HV'][sel], data_dict['AGB'][sel])
print("r-squared:", r_value**2)
print("p value:", p_value)
return slope, intercept
| [
"dfgm2006@gmail.com"
] | dfgm2006@gmail.com |
4497578546be8b9a91eb065eca3349fb21651b5c | e20a119de04049ba75ec2f845daa0a218cc63101 | /py_deviare_directsound.py | 88e27652873c0039399b3bb1bc434bce8d83af7e | [] | no_license | wx1yyn/DirectSound_Capture_With_Deviare | cc499b25d44cd87df6a2542e32481ed0fb949b85 | 250a294e2efc0325c070123984b45b53a6bc24f7 | refs/heads/master | 2021-01-20T00:58:51.758899 | 2013-12-02T07:18:00 | 2013-12-02T07:18:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,464 | py | from py_deviare_objects import *
from py_deviare_wavtools import *
import ctypes
#Globals
g_Listener = None
g_ISoundBuffer_Hooked = False
g_ISound_Hooked = False
g_Target_Process = ''
g_Handlers = []
g_BuffersData = dict()
g_UsePolling = False
#Constants
VT_ISOUND_CREATE = 3
VT_ISBUFFER_GETPOS = 4
VT_ISBUFFER_INIT = 10
VT_ISBUFFER_SETFORMAT = 14
VT_ISBUFFER_UNLOCK = 19
S_OK = 0
DS_OK = 0
IDSOUND_ID = "{0F0F0F0F-113A-4832-9104-000000000002}"
IDSOUNDBUFFER_ID = "{0F0F0F0F-113A-4832-9104-000000000003}"
#Storage class
class BufferData(object):
def __init__(self, address = 0, buffer_size = 0):
self.buffer_address = address
self.buffer_size = buffer_size
self.chunks = []
#Inherit class to recieve DirectSound events:
class Listener(object):
def OnBufferCreated(self, buffer_id, is_primary, flags, buffer_size):
print "Sound buffer created: ", is_primary, " ", buffer_id
def OnBufferInit(self, buffer_id, wave_format_ex):
print "Sound buffer initialized"
def OnBufferWritten(self, buffer_id, wav_data):
print "Sound buffer written"
def RaiseBufferWritten(self, buffer_id, chunk_list):
for chunk in chunk_list:
if (chunk == None):
continue
self.OnBufferWritten(buffer_id, chunk)
#Hook IDirectSoundBuffer Init, Unlock, SetFormat, GetCurrentPosition
def HookISoundBuffer(proc, vt):
global g_ISoundBuffer_Hooked
global g_Handlers
if (g_ISoundBuffer_Hooked):
return
table = [[IDSOUNDBUFFER_ID, vt, VT_ISBUFFER_UNLOCK, CALL_DEFAULT, DSBufferUnlockHandler],
[IDSOUNDBUFFER_ID, vt, VT_ISBUFFER_INIT, CALL_AFTER, DSBufferInitHandler],
[IDSOUNDBUFFER_ID, vt, VT_ISBUFFER_SETFORMAT, CALL_AFTER, DSBufferSetFormatHandler],
[IDSOUNDBUFFER_ID, vt, VT_ISBUFFER_GETPOS, CALL_AFTER, DSBufferGetPositionHandler]]
for ti in table:
h = HookMember(proc, ti[0], ti[1], ti[2], ti[3], ti[4])
g_Handlers.append(h)
g_ISoundBuffer_Hooked = True
#Hook IDirectSound CreateBuffer
def HookISound(proc_name, vt):
global g_ISound_Hooked
global g_Handlers
if (g_ISound_Hooked):
return
h = HookMember(proc_name, IDSOUND_ID, vt, VT_ISOUND_CREATE, CALL_AFTER, DSoundCreateBufferHandler)
g_Handlers.append(h)
g_ISound_Hooked = True
#IDirectSoundBuffer::GetCurrentPosition handler
class DSBufferGetPositionHandler(HookHandler):
def __init__(self):
super(self.__class__, self).__init__()
self.__last_pos = None
self.__proc_mem = None
self.__dump_ix = 0
def PyFunctionCalled(self, proc, ci, rc):
if (not g_UsePolling):
return
if self.__proc_mem == None:
self.__proc_mem = proc.RawMemory
pms = ci.Params
pThis = pms.Item(0).Value
pos_read = self.__read_pointer(pms.Item(1))
pos_write = self.__read_pointer(pms.Item(2))
self.__update_pointers(pThis, pos_read, pos_write)
def __update_pointers(self, buffer_id, pos_read, pos_write):
global g_Listener
global g_BuffersData
#Init last_pos with the current write pointer
curr_pos = pos_write
if self.__last_pos == None:
self.__last_pos = curr_pos
return
last_pos = self.__last_pos
#Skip if nothing was written
if curr_pos == last_pos:
return
dsbuffers = g_BuffersData
if (not dsbuffers.has_key(buffer_id)):
print "unable to update buffer data: %X" %(buffer_id)
return
base_ptr = dsbuffers[buffer_id].buffer_address
buffer_size = dsbuffers[buffer_id].buffer_size
#Calc necesary chunks:
chunk_sz = (buffer_size >> 5)
assert(chunk_sz > 0)
chunk_mx = buffer_size/chunk_sz
#Skip small changes in offsets
if abs(curr_pos - last_pos) < chunk_sz:
return
chunk_ix = last_pos / chunk_sz
chunk_ex = curr_pos / chunk_sz
chunk_rg = None
if chunk_ex > chunk_ix:
chunk_rg = range(chunk_ix, chunk_ex)
else:
r1 = range(chunk_ix, chunk_mx)
r2 = range(chunk_ex)
r1.extend(r2)
chunk_rg = r1
#Read chunks
for ix in chunk_rg:
mem = self.__proc_mem
ptr = base_ptr + (ix * chunk_sz)
wave = ReadBuffer(mem, ptr, chunk_sz)
dsbuffers[buffer_id].chunks.append(wave)
#Flush the cache
chunks = dsbuffers[buffer_id].chunks
if len(chunks) > buffer_size/chunk_sz - 1:
g_Listener.RaiseBufferWritten(buffer_id, chunks)
dsbuffers[buffer_id].chunks = []
#Store last offset
self.__last_pos = curr_pos
def __read_pointer(self, pm):
ret = None
pm = pm.CastTo("int*")
if (pm.Value != 0):
pm = pm.Evaluated
ret = pm.Value
return ret
#IDirectSoundBuffer::Initialize handler
class DSBufferInitHandler(HookHandler):
def PyFunctionCalled(self, proc, ci, rc):
global g_Listener
pm = ci.Params.Item(2)
pm = pm.CastTo("LPCDSBUFFERDESC")
pm = pm.Evaluated
pm = pm.Fields.Item(4) # LPWAVEFORMATEX
if (pm.Value == 0):
return
pm = pm.Evaluated
raw_mem = pm.RawMemory
buffer = ReadBuffer(raw_mem, 0, pm.Size)
wf = WaveFormatEx.Create(buffer)
if (wf != None):
g_Listener.OnBufferInit(buffer_id, wf)
#IDirectSoundBuffer::SetFormat handler
class DSBufferSetFormatHandler(HookHandler):
def PyFunctionCalled(self, proc, ci, rc):
global g_Listener
pms = ci.Params
buffer_id = pms.Item(0).Value
pm = pms.Item(1)
pm = pm.CastTo("LPCWAVEFORMATEX")
pm = pm.Evaluated
buffer = ReadBuffer(pm.RawMemory, 0, pm.Size)
wf = WaveFormatEx.Create(buffer)
if (wf != None):
g_Listener.OnBufferInit(buffer_id, wf)
#IDirectSoundBuffer::Unlock handler
class DSBufferUnlockHandler(HookHandler):
def __init__(self):
super(self.__class__, self).__init__()
self.proc_mem = None
self.calls = dict()
def PyFunctionCalled(self, proc, ci, rc):
global g_Listener
global g_BuffersData
global g_UsePolling
cookie = ci.Cookie
pms = ci.Params
buffer_id = pms.Item(0).Value
#Store buffer address and size:
if g_UsePolling:
buffer_1 = pms.Item(1).Value
buffer_1_len = pms.Item(2).Value
dsbuffers = g_BuffersData
if (not dsbuffers.has_key(buffer_id) ):
#On the first call the full buffer is referenced
dsbuffers[buffer_id] = BufferData(buffer_1, buffer_1_len)
return
if (ci.HookFlags == CALL_BEFORE):
buffer_1 = pms.Item(1).Value
buffer_1_len = pms.Item(2).Value
buffer_2 = pms.Item(3).Value
buffer_2_len = pms.Item(4).Value
raw_mem = proc.RawMemory
cache_1 = ReadBuffer(raw_mem, buffer_1, buffer_1_len)
cache_2 = ReadBuffer(raw_mem, buffer_2, buffer_2_len)
self.calls[cookie] = [ cache_1, cache_2 ]
else:
ret = ci.ReturnValue
call_data = self.calls.pop(cookie, None)
if (ret == DS_OK) and (call_data != None):
g_Listener.RaiseBufferWritten(buffer_id, [call_data[0], call_data[1]])
#IDirectSound::CreateSoundBuffer
class DSoundCreateBufferHandler(HookHandler):
def PyFunctionCalled(self, proc, ci, rc):
if (ci.ReturnValue != S_OK):
return
#Read Settings:
is_primary = True
waveFormat = None
pm = ci.Params.Item(1)
pm = pm.CastTo("LPCDSBUFFERDESC") #pm.Evaluated
pm = pm.Evaluated
flags = pm.Fields.Item(1).Value # dwFlags
buffer_size = pm.Fields.Item(2).Value #
pm = pm.Fields.Item(4) # LPWAVEFORMATEX
if (pm.Value != 0):
pm = pm.Evaluated
raw_mem = pm.RawMemory
buffer = ReadBuffer(raw_mem, 0, pm.Size)
wf = WaveFormatEx.Create(buffer)
waveFormat = wf
is_primary = False
#Get buffer 'id'
pms = ci.Params
pm = pms.Item(2) #DirectSoundBuffer**
pm = pm.CastTo("int*")
pm = pm.Evaluated
buffer_id = pm.Value
#Hook ISoundBuffer
pm = pm.CastTo("int*")
pm = pm.Evaluated
vt = pm.Value
HookISoundBuffer(proc.Name, vt)
#Report Event:
g_Listener.OnBufferCreated(buffer_id, is_primary, flags, buffer_size)
if (waveFormat != None):
g_Listener.OnBufferInit(buffer_id, waveFormat)
#dsound.dll!CreateDirectSound() handler
class DSoundHandler(HookHandler):
def PyFunctionCalled(self, proc, ci, rc):
retVal = ci.ReturnValue
if (retVal != S_OK): #success from DirectSoundCreate
return
pms = ci.Params
pm = pms.Item(1).Evaluated
pm = pm.CastTo("int*")
pm = pm.Evaluated
vt = pm.Value
HookISound(proc.Name, vt)
#Load directsound and find vtables to use in remote process:
def FindAndHookVTables(proc_name):
helper = ctypes.windll.DeviareDirectSoundHelper
vt_IS = helper.GetISoundVTable()
vt_ISB = helper.GetISoundBufferVTable()
if (vt_IS == 0 or vt_ISB == 0):
print "Failed to obtain DirectSound vtables"
return
HookISound(proc_name, vt_IS)
HookISoundBuffer(proc_name, vt_ISB)
#Hook Interfaces as they are discovered:
def HookOnDemand(proc_name):
h = HookProcess(proc_name, "dsound.dll!DirectSoundCreate", CALL_AFTER, DSoundHandler)
g_Handlers.append(h)
h = HookProcess(proc_name, "dsound.dll!DirectSoundCreate8", CALL_AFTER, DSoundHandler)
g_Handlers.append(h)
## Hook direct sound and start monitoring
def run_dispatch(proc_name, listener = Listener(), use_polling = False):
assert( issubclass(listener.__class__, Listener) )
global g_Handlers
global g_Target_Process
global g_Listener
global g_UsePolling
g_Target_Process = proc_name
g_Listener = listener
g_UsePolling = use_polling
#This is a faster way to intercept calls right away. If this is not working for you,
#comment this call and use the one below
FindAndHookVTables(proc_name)
#HookOnDemand(proc_name)
| [
"flypig17v@sina.com"
] | flypig17v@sina.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.