blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c70f0cbea76fd3fa267753a6fd35353a26b79129 | 939ca419e7bcfda248bdc6636f88971a14bd1a1f | /pytests/tuqquery/tuq_auto_prepare.py | f12406bce593df3258583bd2eb0ebeff245adb02 | [] | no_license | ritamcouchbase/secrets-mgmt | 061515c4a3c100f42ca2ce57f63d4a1acf85745c | 03641bdb63e298a7c966789559ea22e7129fb7d3 | refs/heads/master | 2022-11-20T06:22:09.954115 | 2019-10-03T10:12:26 | 2019-10-03T10:12:26 | 98,027,382 | 0 | 1 | null | 2020-07-24T07:11:59 | 2017-07-22T11:35:37 | Python | UTF-8 | Python | false | false | 15,437 | py | from membase.api.rest_client import RestConnection, RestHelper
from tuq import QueryTests
from remote.remote_util import RemoteMachineShellConnection
from membase.api.exception import CBQError
import logger
class QueryAutoPrepareTests(QueryTests):
def setUp(self):
super(QueryAutoPrepareTests, self).setUp()
self.log.info("============== QueryAutoPrepareTests setup has started ==============")
self.run_cbq_query('delete from system:prepareds')
self.log.info("============== QueryAutoPrepareTests setup has completed ==============")
self.log_config_info()
def suite_setUp(self):
super(QueryAutoPrepareTests, self).suite_setUp()
self.log.info("============== QueryAutoPrepareTests suite_setup has started ==============")
self.log.info("============== QueryAutoPrepareTests suite_setup has completed ==============")
self.log_config_info()
def tearDown(self):
self.log_config_info()
self.log.info("============== QueryAutoPrepareTests tearDown has started ==============")
self.log.info("============== QueryAutoPrepareTests tearDown has completed ==============")
super(QueryAutoPrepareTests, self).tearDown()
def suite_tearDown(self):
self.log_config_info()
self.log.info("============== QueryAutoPrepareTests suite_tearDown has started ==============")
self.log.info("============== QueryAutoPrepareTests suite_tearDown has completed ==============")
super(QueryAutoPrepareTests, self).suite_tearDown()
'''Test auto-prepare, prepare on first node, check if it is prepared on both nodes and that it can be executed on
both nodes'''
def test_basic_auto_prepare(self):
self.run_cbq_query(query="PREPARE P1 FROM select * from default limit 5", server=self.servers[0])
self.sleep(2)
prepared_results = self.run_cbq_query(query="select * from system:prepareds")
self.assertEqual(prepared_results['metrics']['resultCount'], 2)
query_results = self.run_cbq_query(query="execute P1", server=self.servers[0])
self.assertEqual(query_results['metrics']['resultCount'], 5)
query_results2 = self.run_cbq_query(query="execute P1", server=self.servers[1])
self.assertEqual(query_results2['metrics']['resultCount'], 5)
'''Test if you can execute a prepared statement by its name in clustered format [ip:port]<prepared_name> , if a node
doesn't have the prepared statement it should be able to pull it from a node that does'''
def test_pull_prepare(self):
prepared_result = self.run_cbq_query(query="PREPARE P1 FROM select * from default limit 5", server=self.servers[0])
self.sleep(2)
self.query = "delete from system:prepareds where node = '%s:%s'" \
% (self.servers[1].ip,self.servers[1].port)
self.run_cbq_query()
query_results = self.run_cbq_query(query="execute P1", server=self.servers[0])
self.assertEqual(query_results['metrics']['resultCount'], 5)
query_results2 = self.run_cbq_query(query="execute '[%s:%s]P1'"
% (self.servers[0].ip, self.servers[0].port), server=self.servers[1])
self.assertEqual(query_results2['metrics']['resultCount'], 5)
'''Delete docs to change the index that the index is using, should be able to execute prepareds without repreparing'''
def test_change_index_delete_docs(self):
try:
self.run_cbq_query(query= "CREATE INDEX idx on default(join_day)")
self.run_cbq_query(query="PREPARE P1 FROM select * from default WHERE join_day = 10 limit 5",
server=self.servers[0])
self.sleep(2)
prepared_results = self.run_cbq_query(query="select * from system:prepareds")
self.assertEqual(prepared_results['metrics']['resultCount'], 2)
query_results = self.run_cbq_query(query="execute P1", server=self.servers[0])
self.assertEqual(query_results['metrics']['resultCount'], 5)
query_results2 = self.run_cbq_query(query="execute P1", server=self.servers[1])
self.assertEqual(query_results2['metrics']['resultCount'], 5)
self.run_cbq_query(query="DELETE FROM default LIMIT 10")
self.assertEqual(prepared_results['metrics']['resultCount'], 2)
query_results = self.run_cbq_query(query="execute P1", server=self.servers[0])
self.assertEqual(query_results['metrics']['resultCount'], 5)
query_results2 = self.run_cbq_query(query="execute P1", server=self.servers[1])
self.assertEqual(query_results2['metrics']['resultCount'], 5)
finally:
self.run_cbq_query(query="DROP INDEX default.idx")
'''Drop an index and create a new index on the same field, this is a new index that the prepared needs to use,
this should trigger a re-prepare'''
def test_recreate_index(self):
try:
self.run_cbq_query(query="CREATE INDEX idx on default(join_day)")
self.run_cbq_query(query="PREPARE P1 FROM select * from default WHERE join_day = 10 limit 5",
server=self.servers[0])
self.sleep(2)
prepared_results = self.run_cbq_query(query="select * from system:prepareds")
self.assertEqual(prepared_results['metrics']['resultCount'], 2)
query_results = self.run_cbq_query(query="execute P1", server=self.servers[0])
self.assertEqual(query_results['metrics']['resultCount'], 5)
query_results2 = self.run_cbq_query(query="execute P1", server=self.servers[1])
self.assertEqual(query_results2['metrics']['resultCount'], 5)
self.run_cbq_query(query="DROP INDEX default.idx")
self.sleep(5)
self.run_cbq_query(query="CREATE INDEX idx2 on default(join_day)")
query_results = self.run_cbq_query(query="execute P1", server=self.servers[0])
self.assertEqual(query_results['metrics']['resultCount'], 5)
query_results2 = self.run_cbq_query(query="execute P1", server=self.servers[1])
self.assertEqual(query_results2['metrics']['resultCount'], 5)
finally:
self.run_cbq_query(query="DROP INDEX default.idx2")
'''Run a prepared statement using primary index, then drop primary index and create a new index that the query will
use instead'''
def test_new_index(self):
try:
self.run_cbq_query(query="PREPARE P1 FROM select * from default WHERE join_day = 10 limit 5",
server=self.servers[0])
self.sleep(2)
prepared_results = self.run_cbq_query(query="select * from system:prepareds")
self.assertEqual(prepared_results['metrics']['resultCount'], 2)
query_results = self.run_cbq_query(query="execute P1", server=self.servers[0])
self.assertEqual(query_results['metrics']['resultCount'], 5)
query_results2 = self.run_cbq_query(query="execute P1", server=self.servers[1])
self.assertEqual(query_results2['metrics']['resultCount'], 5)
self.run_cbq_query(query="DROP PRIMARY INDEX on default")
self.sleep(5)
self.run_cbq_query(query="CREATE INDEX idx on default(join_day)")
query_results = self.run_cbq_query(query="execute P1", server=self.servers[0])
self.assertEqual(query_results['metrics']['resultCount'], 5)
query_results2 = self.run_cbq_query(query="execute P1", server=self.servers[1])
self.assertEqual(query_results2['metrics']['resultCount'], 5)
finally:
self.run_cbq_query(query="DROP INDEX default.idx")
self.run_cbq_query(query="CREATE PRIMARY INDEX ON default")
'''Alter the node the index is present on to trigger a re-prepare'''
def test_alter_index(self):
try:
self.run_cbq_query(query="CREATE INDEX idx on default(join_day)")
self.run_cbq_query(query="PREPARE P1 FROM select * from default WHERE join_day = 10 limit 5",
server=self.servers[0])
self.sleep(2)
prepared_results = self.run_cbq_query(query="select * from system:prepareds")
self.assertEqual(prepared_results['metrics']['resultCount'], 2)
query_results = self.run_cbq_query(query="execute P1", server=self.servers[0])
self.assertEqual(query_results['metrics']['resultCount'], 5)
query_results2 = self.run_cbq_query(query="execute P1", server=self.servers[1])
self.assertEqual(query_results2['metrics']['resultCount'], 5)
self.run_cbq_query(query="ALTER INDEX default.idx WITH {'action':'move','nodes':['%s:%s']}" % (self.servers[0].ip, self.servers[0].port))
self.sleep(5)
query_results = self.run_cbq_query(query="execute P1", server=self.servers[0])
self.assertEqual(query_results['metrics']['resultCount'], 5)
query_results2 = self.run_cbq_query(query="execute P1", server=self.servers[1])
self.assertEqual(query_results2['metrics']['resultCount'], 5)
finally:
self.run_cbq_query(query="DROP INDEX default.idx")
def test_delete_recreate_bucket(self):
try:
self.run_cbq_query(query="CREATE INDEX idx on default(join_day)")
self.run_cbq_query(query="PREPARE P1 FROM select * from default WHERE join_day = 10 limit 5",
server=self.servers[0])
self.sleep(2)
prepared_results = self.run_cbq_query(query="select * from system:prepareds")
self.assertEqual(prepared_results['metrics']['resultCount'], 3)
query_results = self.run_cbq_query(query="execute P1", server=self.servers[0])
self.assertEqual(query_results['metrics']['resultCount'], 5)
query_results2 = self.run_cbq_query(query="execute P1", server=self.servers[1])
self.assertEqual(query_results2['metrics']['resultCount'], 5)
self.rest.delete_bucket("default")
self.sleep(5)
self.rest.create_bucket(bucket="default", ramQuotaMB=100)
self.sleep(5)
self.run_cbq_query(query="CREATE INDEX idx on default(join_day)")
query_results = self.run_cbq_query(query="execute P1", server=self.servers[0])
self.assertEqual(query_results['metrics']['resultCount'], 0)
query_results2 = self.run_cbq_query(query="execute P1", server=self.servers[1])
self.assertEqual(query_results2['metrics']['resultCount'], 0)
finally:
self.run_cbq_query(query="DROP INDEX default.idx")
def test_add_node_no_rebalance(self):
services_in = ["index", "n1ql", "kv"]
# rebalance in a node
rest = RestConnection(self.master)
rest.add_node(self.master.rest_username, self.master.rest_password, self.servers[self.nodes_init].ip,
self.servers[self.nodes_init].port, services=services_in)
self.sleep(30)
self.run_cbq_query(query="PREPARE p1 from select * from default limit 5", server=self.servers[0])
self.sleep(5)
nodes = rest.node_statuses()
rest.rebalance(otpNodes=[node.id for node in nodes], ejectedNodes=[])
self.sleep(30)
for i in range(self.nodes_init + 1):
try:
self.run_cbq_query(query="execute p1", server=self.servers[i])
except CBQError,ex:
self.assertTrue("No such prepared statement: p1" in str(ex), "There error should be no such prepared "
"statement, it really is %s" % ex)
self.log.info(ex)
self.log.info("node: %s:%s does not have the statement" % (self.servers[i].ip, self.servers[i].port))
def test_server_drop(self):
remote = RemoteMachineShellConnection(self.servers[1])
remote.stop_server()
self.sleep(30)
try:
self.run_cbq_query(query="PREPARE p1 from select * from default limit 5", server=self.servers[1])
self.sleep(5)
finally:
remote.start_server()
self.sleep(30)
for i in range(self.nodes_init + 1):
try:
self.run_cbq_query(query="execute p1", server=self.servers[i])
except CBQError,ex:
self.assertTrue("No such prepared statement: p1" in str(ex), "There error should be no such prepared "
"statement, it really is %s" % ex)
self.log.info(ex)
self.log.info("node: %s:%s does not have the statement" % (self.servers[i].ip, self.servers[i].port))
def test_rebalance_in_query_node(self):
self.run_cbq_query(query="PREPARE p1 from select * from default limit 5", server=self.servers[0])
self.sleep(5)
for i in range(self.nodes_init):
self.run_cbq_query(query="execute p1", server=self.servers[i])
services_in = ["n1ql", "index", "data"]
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [self.servers[self.nodes_init + 1]],[],
services=services_in)
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
self.sleep(30)
for i in range(self.nodes_init + 2):
self.run_cbq_query(query="execute '[%s:%s]p1'" % (self.servers[0].ip, self.servers[0].port), server=self.servers[i])
def test_query_swap_rebalance(self):
self.run_cbq_query(query="PREPARE p1 from select * from default limit 5", server=self.servers[0])
self.sleep(5)
for i in range(self.nodes_init):
if not self.servers[i] == self.servers[1]:
self.run_cbq_query(query="execute p1", server=self.servers[i])
nodes_out_list = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
to_add_nodes = [self.servers[self.nodes_init + 2]]
to_remove_nodes = [nodes_out_list]
services_in = ["index", "n1ql", "data"]
self.log.info(self.servers[:self.nodes_init])
# do a swap rebalance
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], to_add_nodes, [], services=services_in)
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init + 2], [], to_remove_nodes)
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
self.sleep(30)
for i in range(self.nodes_init):
if not self.servers[i] == self.servers[1]:
self.n1ql_helper.run_cbq_query(query="execute '[%s:%s]p1'" % (self.servers[2].ip, self.servers[2].port),
server=self.servers[i])
| [
"ritamcouchbase@gmail.com"
] | ritamcouchbase@gmail.com |
aad36f672042a87b37bbd6ff1583f40ff6faaeab | ad80f3e8b139cc3a02e5287b2ad56ffeb94a5263 | /hkextools/fAnalysis.py | f84330e1da0dad0c29486790184b1200842a3328 | [
"MIT"
] | permissive | jefflai0101/Stock-Analysis | f4bebc23bb90773a1a873e33481ac203d31cd93e | fc87b5f56aaaf3dc04f05e2451f219df160b694c | refs/heads/master | 2020-12-21T19:50:41.375104 | 2017-01-15T09:21:59 | 2017-01-15T09:21:59 | 57,353,923 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 21,065 | py | #imports
from bs4 import BeautifulSoup
import urllib.request
import os
import re
import csv
import socket
import requests
import threading
import xlsxwriter
#===============================================================================================================================================
#Obtain current folder path
thisDay = ''
thisMonth = ''
thisYear = ''
folderPath = ''
outputPath = ''
fxRate = {}
#outputPath = 'D:\\Dropbox\\Station\\HKEx'
#fxRate = {'USD' : 7.78, 'RMB' : 1.1, 'HKD' : 1, 'RM' : 1.74, 'SGD' : 5.43, 'JPY' : 0.067, 'EUR' : 8.23, 'CAD' : 5.90, 'GBP' : 9.47}
labelDict = {'NFI' : ['Currency', 'Unit', 'Year End', 'Gross Profit Margin','EBITDA Margin','Net Profit Margin','EBITDA Coverage','Current Ratio','Quick Ratio','NAV','Debt to Assets','Debt to Equity','Average Total Assets','Average Total Equity','Assets Turnover','Leverage Ratio','ROE','Z-Score', 'PE', '3 Months Average', 'Latest Price'], 'FI' : ['Currency', 'Unit', 'Year End', 'EBITDA Margin', 'NAV', 'Average Total Assets','Average Total Equity', 'Assets Turnover', 'Leverage Ratio', 'ROE', 'ROA', 'Loan/Deposit', 'Efficieny Ratio', 'Bad Debts Provision', 'Net Interest Spread', 'CAR', '3 Months Average', 'Latest Price']}
#===============================================================================================================================================
#Imports from HKEx Tools
from hkextools import nettools
from hkextools import utiltools
#===============================================================================================================================================
#*********************************** Connect and Scrape ***********************************
#===============================================================================================================================================
#Function in scraping company financial info on Aastocks
def cNS(coInfo):
print('Collecting Financials for %s' %coInfo[0])
if (os.path.isdir(os.path.join(folderPath, 'Companies', coInfo[0])) == False): os.system ('mkdir ' + os.path.join(folderPath, 'Companies', coInfo[0]))
if (os.path.isdir(os.path.join(folderPath, 'Companies', coInfo[0], 'Financial')) == False): os.system ('mkdir ' + os.path.join('Companies', coInfo[0], 'Financial'))
mode = ['profit-loss?symbol=', 'balance-sheet?symbol=', 'cash-flow?symbol=']
coData = []
for i in range(0,3):
link = 'http://www.aastocks.com/en/stocks/analysis/company-fundamental/' + mode[i] + coInfo[0]
content = nettools.tryConnect(link)
coSoup = BeautifulSoup(content, 'html.parser')
for trInfo in coSoup.find_all('tr', {'ref' : re.compile(r'[A-Z]+_Field(.*)+')}):
fData = []
fData.append(trInfo.find('td', {'class' : re.compile(r'fieldWithoutBorder')}).get_text().strip())
if not ((i != 2) and (fData[0] in ['Auditor\'s Opinion', 'Unit', 'Currency'])) and not ((i != 0) and (fData[0] == 'Closing Date')):
for tdInfo in trInfo.find_all('td', {'class' : re.compile(r'(.*)cls(.*)')}):
if (tdInfo.get_text() != ''): fData.append(tdInfo.get_text().split('/')[0]) if (fData[0] == 'Closing Date') else fData.append(tdInfo.get_text())
if (fData[0] == 'Closing Date') and (fData[-1] == coInfo[14] and fData[-1] != ''): return coInfo
if len(fData) > 1 : coData.append(fData)
with open(os.path.join(folderPath,'Companies', coInfo[0], 'Financial', 'Figures.csv'), "w+", newline='', encoding='utf-8') as csvfile:
csvwriter = csv.writer(csvfile)
for fData in coData: csvwriter.writerow(fData)
coInfo[14] = coData[0][-1]
coInfo[15] = 'FI' if len(coData) == 78 else 'NFI'
return coInfo
#Add case for adding onto existing financial data e.g. [2011~2015] + 2016
#===============================================================================================================================================
def stockPriceInfo(coCode):
link = 'https://hk.finance.yahoo.com/q/hp?s='+coCode[1:]+'.HK&a=00&b=4&c='+str(int(thisYear)-2)+'&d=00&e=13&f='+thisYear+'&g=d'
content = nettools.tryConnect(link)
coSoup = BeautifulSoup(content, 'html.parser')
priceList = []
for trInfo in coSoup.find_all('tr'):
tdInfo = trInfo.find_all('td', {'class' : 'yfnc_tabledata1'})
if tdInfo==None or len(tdInfo)!=7: continue
priceList.append(float(tdInfo[4].get_text()))
if (priceList==[]):
lastPrice = float(coSoup.find('span', {'class' : 'time_rtq_ticker'}).get_text())
threeMonthAverage = float(lastPrice)
else:
lastPrice = priceList[0]
threeMonthAverage = round(sum(priceList)/len(priceList),3)
return threeMonthAverage, lastPrice
#===============================================================================================================================================
def collectData(coList):
for coInfo in coList[1:]: coInfo = cNS(coInfo)
with open(os.path.join(folderPath, 'coList.csv'), 'w+', newline='', encoding='utf-8') as csvfile:
csvwriter = csv.writer(csvfile)
for coInfo in coList: csvwriter.writerow(coInfo)
return coList
#===============================================================================================================================================
def collectCAR(coCode):
link = 'http://www.aastocks.com/en/stocks/analysis/company-fundamental/financial-ratios?symbol=' + coCode
content = nettools.tryConnect(link)
coSoup = BeautifulSoup(content, 'html.parser')
for trInfo in coSoup.find_all('tr', {'ref' : re.compile(r'[A-Z]+_Field(.*)+')}):
fData = []
fData.append(trInfo.find('td', {'class' : re.compile(r'fieldWithoutBorder')}).get_text().strip())
if (fData[0] == 'Capital Adequacy (%)'):
for tdInfo in trInfo.find_all('td', {'class' : re.compile(r'(.*)cls(.*)')}):
fData.append(tdInfo.get_text().split('/')[0]) if (fData[0] == 'Closing Date') else fData.append(tdInfo.get_text())
return fData
#===============================================================================================================================================
def copyFiles():
#[copy coList.csv and IndustryIndex.csv to dropbox]
os.system ('copy ' + 'coList.csv ' + os.path.join(outputPath, 'coList.csv'))
os.system ('copy ' + 'IndustryIndex.csv ' + os.path.join(outputPath, 'IndustryIndex.csv'))
#===============================================================================================================================================
#*********************************** Calculation ***********************************
#===============================================================================================================================================
def calRatio(coList):
#Exclude '02277' and '08346'?
for coInfo in coList:
print('Calculating ratios for %s' %coInfo[0])
calThread = threading.Thread(target=calNOut(coInfo))
calThread.start()
#===============================================================================================================================================
def calNOut(coInfo):
allRatios = []
fData = utiltools.readCSV(os.path.join(folderPath, 'Companies', coInfo[0], 'Financial', 'Figures.csv'))
if (fData[18][0]=='Interest Paid'): fData[18][0]='Interest Expense'
fKey = {dataPoint[0] : key for key, dataPoint in enumerate(fData)}
fData = [row[1:] for row in fData]
figUnit = 1
if (fData[fKey['Unit']][0]=='Thousand'): figUnit = 1000
if (fData[fKey['Unit']][0]=='Million'): figUnit = 1000000
#----------------------------------------------------------------------------------------------------------
#General
#----------------------------------------------------------------------------------------------------------
#[0] Currency - Unit
allRatios.append(fData[fKey['Currency']])
#[1] Unit
allRatios.append(fData[fKey['Unit']])
#[2] Ratio for Year ended
allRatios.append(fData[fKey['Closing Date']])
#[4] EBITDA Margin
if (coInfo[15]=='FI'): allRatios.append(ratioFormula(1, a=fData[fKey['Profit Before Taxation']], b=fData[fKey['Total Turnover']]))
if (coInfo[15]=='NFI'): allRatios.append(ratioFormula(1, a=fData[fKey['EBITDA']], b=fData[fKey['Total Turnover']]))
#[12] Average Total Assets
allRatios.append(ratioFormula(2, a=fData[fKey['Total Assets']]))
#[13] Average Total Equity
allRatios.append(ratioFormula(2, a=fData[fKey['Total Equity']]))
#[14] Assets Turnover
allRatios.append(ratioFormula(1, a=fData[fKey['Net Profit']], b=allRatios[4]))
#[15] Leverage Ratio
allRatios.append(ratioFormula(1, a=allRatios[4], b=allRatios[5]))
#[16] ROE
allRatios.append(ratioFormula(3, a=allRatios[3], b=allRatios[6], c=allRatios[7]))
#[19] 3 Months Average
threeMonthAverage, lastPrice = stockPriceInfo(coInfo[0])
allRatios.append(toList(len(allRatios[0]), threeMonthAverage))
#[20] Latest Price
allRatios.append(toList(len(allRatios[0]), lastPrice))
#----------------------------------------------------------------------------------------------------------
#Non-FI
#----------------------------------------------------------------------------------------------------------
if (coInfo[15]=='NFI'):
#[3] Gross Profit Margin
allRatios.insert(3, ratioFormula(1, a=fData[fKey['Gross Profit']], b=fData[fKey['Total Turnover']]))
#[5] Net Profit Margin
allRatios.insert(5, ratioFormula(1, a=fData[fKey['Net Profit']], b=fData[fKey['Total Turnover']]))
#[6] EBITDA Coverage
allRatios.insert(6, ratioFormula(1, a=fData[fKey['EBITDA']], b=fData[fKey['Interest Expense']]))
#[7] Current Ratio
allRatios.insert(7, ratioFormula(1, a=fData[fKey['Current Assets']], b=fData[fKey['Current Liabilities']]))
#[8] Quick Ratio
allRatios.insert(8, ratioFormula(1, a=fData[fKey['Cash On Hand']], b=fData[fKey['Current Liabilities']]))
#[9] NAV
allRatios.insert(9, fData[fKey['Owner\'s Equity']])
#[10] Debt to Assets
allRatios.insert(10, ratioFormula(1, a=fData[fKey['Total Liabilities']], b=fData[fKey['Total Assets']]))
#[11] Debt to Equity
allRatios.insert(11, ratioFormula(1, a=fData[fKey['Total Liabilities']], b=fData[fKey['Owner\'s Equity']]))
#[17] Z-Score
CA = cleanVar(fData[fKey['Current Assets']])[-1]
CL = cleanVar(fData[fKey['Current Liabilities']])[-1]
reserves = cleanVar(fData[fKey['Reserves']])[-1]
EBITDA = cleanVar(fData[fKey['EBITDA']])[-1]
depN = cleanVar(fData[fKey['Depreciation']])[-1]
averageTD = sum(cleanVar(fData[fKey['Total Liabilities']][-2:]))/2
netProfit = cleanVar(fData[fKey['Net Profit']])[-1]
allRatios.insert(17, findZscore(len(allRatios[0]), CA, CL, allRatios[12][-1], reserves, EBITDA, depN, int(coInfo[9].replace(',', '')), allRatios[17][-1], figUnit, averageTD, netProfit))
#[18] PE Ratio
# allRatios.append(toList(len(allRatios[0]), 0 if (int(fData[fKey['Net Profit']][-1].replace(',', '')) < 0) else ((allRatios[9] * int(coInfo[9].replace(',', ''))) / (int(fData[fKey['Net Profit']][-1]) * figUnit))))
allRatios.insert(18, toList(len(allRatios[0]), ((allRatios[18][-1] * int(coInfo[9].replace(',', ''))) / (int(netProfit) * figUnit * float(fxRate[coInfo[8]])))))
#----------------------------------------------------------------------------------------------------------
#FI
#----------------------------------------------------------------------------------------------------------
if (coInfo[15]=='FI'):
#[3] NAV
allRatios.insert(6, fData[fKey['Equity']])
#[10] ROA
allRatios.insert(10, ratioFormula(1, a=fData[fKey['Net Profit']], b=allRatios[4]))
#[11] Loan/Deposit Ratio
allRatios.insert(11, ratioFormula(1, a=fData[fKey['Loans']], b=fData[fKey['Deposits']]))
#[12] Efficiency Ratio
allRatios.insert(12, ratioFormula(1, a=fData[fKey['Operating Expenses']], b=fData[fKey['Total Turnover']]))
#[13] Bad Debts Provision
allRatios.insert(13, fData[fKey['Bad Debt Provisions']])
#[14] Net Interest Spread
allRatios.insert(14, ratioFormula(1, a=fData[fKey['Net Interest Income']], b=ratioFormula(4, a=fData[fKey['Loans']], b=fData[fKey['Financial Asset']], c=fData[fKey['Other Assets']])))
#[15] Capital Adequacy Ratio (CAR)
allRatios.insert(15, collectCAR(coInfo[0])[1:])
allRatios = [[labelDict[coInfo[15]][i]]+allRatio for i, allRatio in enumerate(allRatios)]
with open(os.path.join(folderPath, 'Companies', coInfo[0], 'Financial', 'Ratios.csv'), 'w+', newline='', encoding='utf-8') as csvfile:
csvwriter = csv.writer(csvfile)
for allRatio in allRatios: csvwriter.writerow(allRatio)
del allRatio
#Fix Positions
#===============================================================================================================================================
def ratioFormula(mode, a=None, b=None, c=None):
fRatio = []
if (a != None): a = cleanVar(a)
if (b != None): b = cleanVar(b)
if (c != None): c = cleanVar(c)
if (mode==1): fRatio = ['-' if b==0 else a/b for a,b in zip(a,b)]
if (mode==2): fRatio = ['-'] + [(a+b)/2 for a,b in zip(a[1:], a[:-1])]
if (mode==3): fRatio = ['-' if (a*b*c)==0 else (a*b*c) for a,b,c in zip(a,b,c)]
if (mode==4): fRatio = [a+b+c for a,b,c in zip(a,b,c)]
return fRatio
#===============================================================================================================================================
def findZscore(listLen, CA, CL, averageTA, reserves, EBITDA, depN, issuedShares, threeMonthAverage, figUnit, averageTD, netProfit):
zScoreA = (CA-CL)/averageTA
zScoreB = reserves/averageTA
zScoreC = (EBITDA - depN)/averageTA
zScoreD = (issuedShares * threeMonthAverage)/(figUnit * averageTD)
zScoreE = netProfit/averageTA
zScore = round((1.2 * zScoreA) + (1.4 * zScoreB) + (3.3 * zScoreC) + (0.6 * zScoreD) + zScoreE, 3)
return toList(listLen, zScore)
#===============================================================================================================================================
def cleanVar(dataSet):
return [0 if (fData=='0' or fData=='' or fData=='-') else fData if ((type(fData) is float) or (type(fData) is int)) else int(fData.replace(',', '')) for fData in dataSet]
#===============================================================================================================================================
def toList(listLen, inputValue):
theList = ['-'] * (listLen - 1)
theList.append(inputValue)
return theList
#===============================================================================================================================================
#********************************* Classification ********************************
#===============================================================================================================================================
def classifyList(coList):
# classList = {item[0] : item[7].split(' (')[0].split(' - ')[-1] for item in coList if item[15] != 'FI'}
# classList.update({item[0] : 'Financial Services' for item in coList if item[15] == 'FI'})
classList = {item[0] : item[7].split(' (')[0].split(' - ')[-1] if (item[7].split(' (')[0].split(' - ')[-1] != 'Banks') else item[7].split(' (')[0].split(' - ')[-1] if (item[15] == 'FI') else 'Other Financials' for item in coList}
uniqueList = sorted(set(classList.values()))
with open(os.path.join(folderPath, 'IndustryIndex.csv'), 'w+', newline='', encoding='utf-8') as csvfile:
csvwriter = csv.writer(csvfile)
for industry in uniqueList:
listToWrite = [industry] + [coCode for coCode in classList if classList[coCode] == industry]
csvwriter.writerow(listToWrite)
#===============================================================================================================================================
#************************************* Output ***************************************
#===============================================================================================================================================
#Extracts ratios and export as one industry
def industryRatio(sectorList, fileName, mode):
labelKey = {0 : 'NFI', 1 : 'FI'}
sectorRatios = list(labelDict[labelKey[mode]])
sectorRatios.insert(2, 'Code')
sectorRatios.insert(3, 'Name')
keyDict = {label : i for i, label in enumerate(sectorRatios)}
sectorRatios = [sectorRatios]
for coInfo in sectorList:
adjustFactor = 1
coRatios = utiltools.readCSV(os.path.join(folderPath, 'Companies', coInfo[0], 'Financial', 'Ratios.csv'))
coRatios = [coRatio[-1] for coRatio in coRatios]
coRatios.insert(2, coInfo[0])
coRatios.insert(3, coInfo[1])
fxAdjust = float(fxRate[coRatios[keyDict['Currency']]])
coRatios[keyDict['Currency']] = 'HKD'
unitAdjust = 1000 if coRatios[keyDict['Unit']]=='Thousand' else 1
coRatios[keyDict['Unit']] = 'Million'
toHKD = ['NAV', 'Average Total Assets', 'Average Total Equity']
if (mode): toHKD.extend(['Bad Debts Provision'])
for item in toHKD: coRatios[keyDict[item]] = round(float(coRatios[keyDict[item]].replace(',', '')) * fxAdjust / unitAdjust, 2)
dtoP = ['EBITDA Margin', 'Assets Turnover', 'Leverage Ratio', 'ROE']
dtoP.extend(['ROA', 'Loan/Deposit', 'Efficieny Ratio', 'Net Interest Spread']) if (mode) else dtoP.extend(['Gross Profit Margin', 'Net Profit Margin', 'Debt to Assets', 'Debt to Equity'])
for item in dtoP: coRatios[keyDict[item]] = 0 if coRatios[keyDict[item]] == '-' else round(float(coRatios[keyDict[item]].replace(',',''))*100, 2)
toRound = [] if (mode) else ['EBITDA Coverage','Current Ratio','Quick Ratio', 'PE']
for item in toRound: coRatios[keyDict[item]] = 0 if coRatios[keyDict[item]] == '-' else round(float(coRatios[keyDict[item]].replace(',','')), 2)
coRatios = [0 if item=='-' else round(float(item), 2) if (type(item) is float or type(item) is int) else round(float(item.replace(',','')), 2) if (',' in item) else item for i, item in enumerate(coRatios)]
sectorRatios.append(coRatios)
sectorRatios = list(map(list, zip(*sectorRatios)))
with open(os.path.join(folderPath, 'Industries', fileName + '.csv'), 'w+', newline='', encoding='utf-8') as csvfile:
csvwriter = csv.writer(csvfile)
for allRatio in sectorRatios: csvwriter.writerow(allRatio)
#Separate 'FI' companies
#Group others by industry
if (os.path.isdir(os.path.join(folderPath, 'ExcelRatios')) == False): os.system ('mkdir ' + os.path.join(folderPath, 'ExcelRatios'))
workbook = xlsxwriter.Workbook(os.path.join(folderPath, 'ExcelRatios', fileName + '.xlsx'))
worksheet = workbook.add_worksheet()
bold = workbook.add_format({'bold': True})
for i, rowItems in enumerate(sectorRatios):
for j, item in enumerate(rowItems):
worksheet.write(i, j, item, bold) if (j==0) else worksheet.write(i, j, item)
worksheet.set_column(0, 0, 20)
worksheet.set_column(1, len(sectorRatios[0])-1, 15)
worksheet.freeze_panes(0,1)
workbook.close()
#===============================================================================================================================================
#Extracts ratios and export as one industry, in excel format
def consolExcel(sectorNames):
#outputPath = folderPath
workbook = xlsxwriter.Workbook('consolRatios.xlsx')
bold = workbook.add_format({'bold': True})
percent_format = workbook.add_format({'num_format': '0.00"%"'})
twodp_format = workbook.add_format({'num_format': '0.00""'})
for sectorName in sectorNames:
worksheet = workbook.add_worksheet(sectorName[0:30])
sectorRatios = utiltools.readCSV(os.path.join('Industries', sectorName + '.csv'))
for i, rowItems in enumerate(sectorRatios):
for j, item in enumerate(rowItems):
worksheet.write(i, j, item, bold) if (j==0) else worksheet.write(i, j, item)
worksheet.set_column(0, 0, 20)
worksheet.set_column(1, len(sectorRatios[0]), 15)
worksheet.freeze_panes(0,1)
workbook.close()
#===============================================================================================================================================
#*********************************** Main Part ***********************************
#===============================================================================================================================================
def main(mode):
coList = utiltools.readCSV(os.path.join(folderPath, 'coList.csv'))
if (mode == 1) or (sum([True if item[15] == '' else False for item in coList])): coList = collectData(coList)
calRatio(coList[1:])
classifyList(coList[1:])
if (os.path.isdir(os.path.join(folderPath, 'Industries')) == False): os.system ('mkdir ' + os.path.join(folderPath, 'Industries'))
sectorLists = utiltools.readCSV(os.path.join(folderPath, 'IndustryIndex.csv'))
for sectorList in sectorLists:
sectorName = sectorList[0].replace('/', '').replace('(HSIC*)','')
print ('Working on : ' + sectorName)
sectorCoInfo = [coInfo[0:2] for coInfo in coList if coInfo[0] in sectorList]
industryRatio(sectorCoInfo, sectorName, 1) if (sectorName == 'Banks') else industryRatio(sectorCoInfo, sectorName, 0)
consolExcel([sectorList[0].replace('/', '').replace('(HSIC*)','') for sectorList in sectorLists])
copyFiles()
#=============================================================================================================================================== | [
"jefflai0101@gmail.com"
] | jefflai0101@gmail.com |
4e5832c5c5b4c8807e7bcdabe9568f504fedc426 | 67b8ea7f463e76a74d5144202952e6c8c26a9b75 | /cluster-env/bin/undill | 455df88dec71f58462d5ce48b4a02c95dad99b63 | [
"MIT"
] | permissive | democrazyx/elecsim | 5418cb99962d7ee2f9f0510eb42d27cd5254d023 | e5b8410dce3cb5fa2e869f34998dfab13161bc54 | refs/heads/master | 2023-06-27T09:31:01.059084 | 2021-07-24T19:39:36 | 2021-07-24T19:39:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 617 | #!/home/alexkell/elecsim/cluster-env/bin/python3
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 2008-2016 California Institute of Technology.
# Copyright (c) 2016-2019 The Uncertainty Quantification Foundation.
# License: 3-clause BSD. The full license text is available at:
# - https://github.com/uqfoundation/dill/blob/master/LICENSE
"""
unpickle the contents of a pickled object file
Examples::
$ undill hello.pkl
['hello', 'world']
"""
if __name__ == '__main__':
import sys
import dill
for file in sys.argv[1:]:
print (dill.load(open(file,'rb')))
| [
"alexander@kell.es"
] | alexander@kell.es | |
c4990568bf1c185dddb218412e9dedc79ddb809c | 27dc7899caf408ed3a1996d5bdd21c7e37b420ac | /GBFVI/tetris.py | e877885d27dd11d74653f67aff5dc85520583995 | [] | no_license | kkroy36/Non-parametric-Fitted-Relational-VI | b62803ce1767dab665caa410cfe8e51730493152 | f4d2c692db88bae5ba3fbf9caaa338f8640f40e3 | refs/heads/master | 2021-06-26T20:01:48.442679 | 2019-03-11T21:24:56 | 2019-03-11T21:24:56 | 118,942,103 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 14,514 | py | #!/usr/bin/env python2
#-*- coding: utf-8 -*-
import random
from pykeyboard import PyKeyboard
def send_key(key):
'''simulates key press'''
keyboard = PyKeyboard()
keyboard.press_key(key)
keyboard.release_key(key)
# NOTE FOR WINDOWS USERS:
# You can download a "exefied" version of this game at:
# http://hi-im.laria.me/progs/tetris_py_exefied.zip
# If a DLL is missing or something like this, write an E-Mail (me@laria.me)
# or leave a comment on this gist.
# Very simple tetris implementation
#
# Control keys:
# Down - Drop stone faster
# Left/Right - Move stone
# Up - Rotate Stone clockwise
# Escape - Quit game
# P - Pause game
# Return - Instant drop
#
# Have fun!
# Copyright (c) 2010 "Laria Carolin Chabowski"<me@laria.me>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from random import randrange as rand
import pygame, sys
# The configuration
cell_size = 18
cols = 10
rows = 22
maxfps = 30
colors = [
(0, 0, 0 ),
(255, 85, 85),
(100, 200, 115),
(120, 108, 245),
(255, 140, 50 ),
(50, 120, 52 ),
(146, 202, 73 ),
(150, 161, 218 ),
(35, 35, 35) # Helper color for background grid
]
# Define the shapes of the single parts
tetris_shapes = [
[[1, 1, 1],
[0, 1, 0]],
[[0, 2, 2],
[2, 2, 0]],
[[3, 3, 0],
[0, 3, 3]],
[[4, 0, 0],
[4, 4, 4]],
[[0, 0, 5],
[5, 5, 5]],
[[6, 6, 6, 6]],
[[7, 7],
[7, 7]]
]
def rotate_clockwise(shape):
return [ [ shape[y][x]
for y in xrange(len(shape)) ]
for x in xrange(len(shape[0]) - 1, -1, -1) ]
def check_collision(board, shape, offset):
off_x, off_y = offset
for cy, row in enumerate(shape):
for cx, cell in enumerate(row):
try:
if cell and board[ cy + off_y ][ cx + off_x ]:
return True
except IndexError:
return True
return False
def remove_row(board, row):
del board[row]
return [[0 for i in xrange(cols)]] + board
def join_matrixes(mat1, mat2, mat2_off):
off_x, off_y = mat2_off
for cy, row in enumerate(mat2):
for cx, val in enumerate(row):
mat1[cy+off_y-1 ][cx+off_x] += val
return mat1
def new_board():
board = [ [ 0 for x in xrange(cols) ]
for y in xrange(rows) ]
board += [[ 1 for x in xrange(cols)]]
return board
class TetrisApp(object):
def __init__(self):
pygame.init()
pygame.key.set_repeat(250,25)
self.width = cell_size*(cols+6)
self.height = cell_size*rows
self.rlim = cell_size*cols
self.gameover = False
self.paused = False
self.n_stones = 0
self.bground_grid = [[ 8 if x%2==y%2 else 0 for x in xrange(cols)] for y in xrange(rows)]
self.shape_counts = {}
for shape in tetris_shapes:
self.shape_counts[tuple([tuple(item) for item in shape])] = 0
self.default_font = pygame.font.Font(
pygame.font.get_default_font(), 12)
self.screen = pygame.display.set_mode((self.width, self.height))
pygame.event.set_blocked(pygame.MOUSEMOTION) # We do not need
# mouse movement
# events, so we
# block them.
self.next_stone = tetris_shapes[rand(len(tetris_shapes))]
self.init_game()
def new_stone(self):
self.stone = self.next_stone[:]
if self.stone in tetris_shapes:
stone_tuple = tuple([tuple(item) for item in self.stone])
self.shape_counts[stone_tuple] += 1
self.n_stones += 1
self.next_stone = tetris_shapes[rand(len(tetris_shapes))]
self.stone_x = int(cols / 2 - len(self.stone[0])/2)
self.stone_y = 0
if check_collision(self.board,
self.stone,
(self.stone_x, self.stone_y)):
self.gameover = True
def init_game(self):
self.board = new_board()
self.new_stone()
self.level = 1
self.score = 0
self.lines = 0
pygame.time.set_timer(pygame.USEREVENT+1, 1000)
def disp_msg(self, msg, topleft):
x,y = topleft
for line in msg.splitlines():
self.screen.blit(
self.default_font.render(
line,
False,
(255,255,255),
(0,0,0)),
(x,y))
y+=14
def center_msg(self, msg):
for i, line in enumerate(msg.splitlines()):
msg_image = self.default_font.render(line, False,
(255,255,255), (0,0,0))
msgim_center_x, msgim_center_y = msg_image.get_size()
msgim_center_x //= 2
msgim_center_y //= 2
self.screen.blit(msg_image, (
self.width // 2-msgim_center_x,
self.height // 2-msgim_center_y+i*22))
def draw_matrix(self, matrix, offset):
off_x, off_y = offset
for y, row in enumerate(matrix):
for x, val in enumerate(row):
if val:
pygame.draw.rect(
self.screen,
colors[val],
pygame.Rect(
(off_x+x) *
cell_size,
(off_y+y) *
cell_size,
cell_size,
cell_size),0)
def add_cl_lines(self, n):
linescores = [0, 40, 100, 300, 1200]
self.lines += n
self.score += linescores[n] * self.level
if self.lines >= self.level*6:
self.level += 1
newdelay = 1000-50*(self.level-1)
newdelay = 100 if newdelay < 100 else newdelay
pygame.time.set_timer(pygame.USEREVENT+1, newdelay)
def move(self, delta_x):
if not self.gameover and not self.paused:
new_x = self.stone_x + delta_x
if new_x < 0:
new_x = 0
if new_x > cols - len(self.stone[0]):
new_x = cols - len(self.stone[0])
if not check_collision(self.board,
self.stone,
(new_x, self.stone_y)):
self.stone_x = new_x
def quit(self):
self.center_msg("Exiting...")
pygame.display.update()
sys.exit()
def drop(self, manual):
if not self.gameover and not self.paused:
self.score += 1 if manual else 0
self.stone_y += 1
if check_collision(self.board,
self.stone,
(self.stone_x, self.stone_y)):
self.board = join_matrixes(
self.board,
self.stone,
(self.stone_x, self.stone_y))
self.new_stone()
cleared_rows = 0
while True:
for i, row in enumerate(self.board[:-1]):
if 0 not in row:
self.board = remove_row(
self.board, i)
cleared_rows += 1
break
else:
break
self.add_cl_lines(cleared_rows)
return True
return False
def insta_drop(self):
if not self.gameover and not self.paused:
while(not self.drop(True)):
pass
def rotate_stone(self):
if not self.gameover and not self.paused:
new_stone = rotate_clockwise(self.stone)
if not check_collision(self.board,
new_stone,
(self.stone_x, self.stone_y)):
self.stone = new_stone
def toggle_pause(self):
self.paused = not self.paused
def start_game(self):
if self.gameover:
self.init_game()
self.gameover = False
def run(self):
key_actions = {
'ESCAPE': self.quit,
'a': lambda:self.move(-1),
'd': lambda:self.move(+1),
's': lambda:self.drop(True),
'w': self.rotate_stone,
'p': self.toggle_pause,
'SPACE': self.start_game,
'RETURN': self.insta_drop
}
self.gameover = False
self.paused = False
dont_burn_my_cpu = pygame.time.Clock()
i = 0
while 1:
self.screen.fill((0,0,0))
if self.gameover:
self.center_msg("""Game Over!\nYour score: %d
Press space to continue""" % self.score)
else:
if self.paused:
self.center_msg("Paused")
else:
pygame.draw.line(self.screen,
(255,255,255),
(self.rlim+1, 0),
(self.rlim+1, self.height-1))
self.disp_msg("Next:", (
self.rlim+cell_size,
2))
self.disp_msg("Score: %d\n\nLevel: %d\
\nLines: %d" % (self.score, self.level, self.lines),
(self.rlim+cell_size, cell_size*5))
self.draw_matrix(self.bground_grid, (0,0))
self.draw_matrix(self.board, (0,0))
self.draw_matrix(self.stone,
(self.stone_x, self.stone_y))
self.draw_matrix(self.next_stone,
(cols+1,2))
pygame.display.update()
for event in pygame.event.get():
#send_key('s')
if event.type == pygame.USEREVENT+1:
self.drop(False)
elif event.type == pygame.QUIT:
self.quit()
elif event.type == pygame.KEYDOWN:
for key in key_actions:
if event.key == eval("pygame.K_"
+key):
key_actions[key]()
dont_burn_my_cpu.tick(maxfps)
i += 1
class Tetris(object):
'''class for Tetris simulator'''
bk = ["shape_count(+state,+shape,[high;low])",
"value(state)"]
def __init__(self,number = 1,start=False):
'''class constructor'''
if start:
self.nstones = 0
self.state_number = 1
self.goal_state = False
self.shape_counts = [0 for i in range(7)]
self.run = TetrisApp()
self.all_actions = ['w','a','s','d']
def goal(self):
if self.goal_state:
return True
return False
def execute_action(self,action):
'''returns new state
does nothing on invalid actions
'''
self.state_number += 1
if action not in self.all_actions:
return state
key_actions = {
'ESCAPE': self.run.quit,
'a': lambda:self.run.move(-1),
'd': lambda:self.run.move(+1),
's': lambda:self.run.drop(True),
'w': self.run.rotate_stone,
'p': self.run.toggle_pause,
'SPACE': self.run.start_game,
'RETURN': self.run.insta_drop
}
#self.gameover = False
#self.paused = False
dont_burn_my_cpu = pygame.time.Clock()
i = 0
while i < 1:
self.run.screen.fill((0,0,0))
if self.run.gameover:
self.goal_state = True
return self
#self.run.center_msg("""Game Over!\nYour score: %d Press space to continue""" % self.run.score)
else:
if self.run.paused:
self.run.center_msg("Paused")
else:
pygame.draw.line(self.run.screen,
(255,255,255),
(self.run.rlim+1, 0),
(self.run.rlim+1, self.run.height-1))
self.run.disp_msg("Next:", (
self.run.rlim+cell_size,
2))
self.run.disp_msg("Score: %d\n\nLevel: %d\\nLines: %d" % (self.run.score, self.run.level, self.run.lines),(self.run.rlim+cell_size, cell_size*5))
self.run.draw_matrix(self.run.bground_grid, (0,0))
self.run.draw_matrix(self.run.board, (0,0))
self.run.draw_matrix(self.run.stone,
(self.run.stone_x, self.run.stone_y))
self.run.draw_matrix(self.run.next_stone,
(cols+1,2))
pygame.display.update()
for event in pygame.event.get():
send_key(action)
if event.type == pygame.USEREVENT+1:
self.run.drop(False)
elif event.type == pygame.QUIT:
self.run.quit()
elif event.type == pygame.KEYDOWN:
for key in key_actions:
if event.key == eval("pygame.K_"
+key):
key_actions[key]()
dont_burn_my_cpu.tick(maxfps)
i += 1
self.nstones = self.run.n_stones
self.shape_counts = self.run.shape_counts.values()
return self
def get_state_facts(self):
facts = []
N = len(self.shape_counts)
for i in range(N):
if self.shape_counts[i] > 2:
facts.append("shape_count(s"+str(self.state_number)+",sh"+str(i+1)+",high)")
elif self.shape_counts[i] <=2:
facts.append("shape_count(s"+str(self.state_number)+",sh"+str(i+1)+",low)")
return facts
def sample(self,pdf):
cdf = [(i, sum(p for j,p in pdf if j < i)) for i,_ in pdf]
R = max(i for r in [random.random()] for i,c in cdf if c <= r)
return R
def execute_random_action(self,N=4):
random_actions = []
action_potentials = []
for i in range(N):
random_action = random.choice(self.all_actions)
random_actions.append(random_action)
action_potentials.append(random.randint(1,9))
action_probabilities = [potential/float(sum(action_potentials)) for potential in action_potentials]
actions_not_executed = [action for action in self.all_actions if action != random_action]
probability_distribution_function = zip(random_actions,action_probabilities)
sampled_action = self.sample(probability_distribution_function)
new_state = self.execute_action(sampled_action)
return (new_state,[sampled_action],actions_not_executed)
def factored(self,state):
'''returns factored state'''
return [self.nstones]+self.shape_counts
def __repr__(self):
'''outputs this on call to print'''
output_string = str(self.nstones)+","
output_string += ",".join([str(x) for x in self.shape_counts])
return output_string
'''
with open("tetris_out.txt","a") as f:
i = 0
while i < 2:
state = Tetris(start = True)
f.write("start state: "+str(state.get_state_facts())+"\n")
while not state.goal():
f.write("="*80+"\n")
state_action_pair = state.execute_random_action()
state = state_action_pair[0]
f.write(str(state.get_state_facts())+"\n")
i += 1
'''
| [
"kkroy36@gmail.com"
] | kkroy36@gmail.com |
76e7db842526119014fe55bbb2a46e55b8298270 | 6db200ea5cc6bf077559c052cd76764e41aa9735 | /06_default_dict/default_dict.py | 55456dc5c4b2ea2a8a69b0c5e1cdc92bf17d31db | [] | no_license | beeva-manueldepaz/ninja_taller_python_2 | d948d7017284541dc9c2df7518985b47c370f4ff | 6b7cb0ed8843c062bb8a7a2828261f30fe4afc57 | refs/heads/master | 2021-01-20T01:56:58.228134 | 2017-05-16T10:19:48 | 2017-05-16T10:19:48 | 89,349,731 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
a = ["hola", "mundo"]
results = {}
for i,x in enumerate(range(1000)):
k = a[0] if i%2 == 0 else a[1]
if k not in results.keys():
results[k] = []
print(results)
| [
"meusebio.depaz@bbva.com"
] | meusebio.depaz@bbva.com |
16ab1b9d3b85f79ef340f774f64f000adccb6a0b | b0ebf50ac10a43b8dcc56bdc107fd7dcb371a97c | /cloud-init-0.7.9/usr/lib/python3/dist-packages/cloudinit/config/cc_snappy.py | a9682f1966619879d982f4cafb75640b02b2cda9 | [] | no_license | rom1212/cloud-init | 2d73d904332be436612b36c74c94cf83c6949928 | 0becf0f79c72037a6e02de90c83b9c0995cdb36d | refs/heads/master | 2021-07-05T00:06:16.989437 | 2017-09-26T17:05:58 | 2017-09-26T17:05:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,598 | py | # This file is part of cloud-init. See LICENSE file for license information.
"""
Snappy
------
**Summary:** snappy modules allows configuration of snappy.
The below example config config would install ``etcd``, and then install
``pkg2.smoser`` with a ``<config-file>`` argument where ``config-file`` has
``config-blob`` inside it. If ``pkgname`` is installed already, then
``snappy config pkgname <file>``
will be called where ``file`` has ``pkgname-config-blob`` as its content.
Entries in ``config`` can be namespaced or non-namespaced for a package.
In either case, the config provided to snappy command is non-namespaced.
The package name is provided as it appears.
If ``packages_dir`` has files in it that end in ``.snap``, then they are
installed. Given 3 files:
- <packages_dir>/foo.snap
- <packages_dir>/foo.config
- <packages_dir>/bar.snap
cloud-init will invoke:
- snappy install <packages_dir>/foo.snap <packages_dir>/foo.config
- snappy install <packages_dir>/bar.snap
.. note::
that if provided a ``config`` entry for ``ubuntu-core``, then
cloud-init will invoke: snappy config ubuntu-core <config>
Allowing you to configure ubuntu-core in this way.
The ``ssh_enabled`` key controls the system's ssh service. The default value
is ``auto``. Options are:
- **True:** enable ssh service
- **False:** disable ssh service
- **auto:** enable ssh service if either ssh keys have been provided
or user has requested password authentication (ssh_pwauth).
**Internal name:** ``cc_snappy``
**Module frequency:** per instance
**Supported distros:** ubuntu
**Config keys**::
#cloud-config
snappy:
system_snappy: auto
ssh_enabled: auto
packages: [etcd, pkg2.smoser]
config:
pkgname:
key2: value2
pkg2:
key1: value1
packages_dir: '/writable/user-data/cloud-init/snaps'
"""
from cloudinit import log as logging
from cloudinit.settings import PER_INSTANCE
from cloudinit import util
import glob
import os
import tempfile
LOG = logging.getLogger(__name__)
frequency = PER_INSTANCE
SNAPPY_CMD = "snappy"
NAMESPACE_DELIM = '.'
BUILTIN_CFG = {
'packages': [],
'packages_dir': '/writable/user-data/cloud-init/snaps',
'ssh_enabled': "auto",
'system_snappy': "auto",
'config': {},
}
distros = ['ubuntu']
def parse_filename(fname):
fname = os.path.basename(fname)
fname_noext = fname.rpartition(".")[0]
name = fname_noext.partition("_")[0]
shortname = name.partition(".")[0]
return(name, shortname, fname_noext)
def get_fs_package_ops(fspath):
if not fspath:
return []
ops = []
for snapfile in sorted(glob.glob(os.path.sep.join([fspath, '*.snap']))):
(name, shortname, fname_noext) = parse_filename(snapfile)
cfg = None
for cand in (fname_noext, name, shortname):
fpcand = os.path.sep.join([fspath, cand]) + ".config"
if os.path.isfile(fpcand):
cfg = fpcand
break
ops.append(makeop('install', name, config=None,
path=snapfile, cfgfile=cfg))
return ops
def makeop(op, name, config=None, path=None, cfgfile=None):
return({'op': op, 'name': name, 'config': config, 'path': path,
'cfgfile': cfgfile})
def get_package_config(configs, name):
# load the package's config from the configs dict.
# prefer full-name entry (config-example.canonical)
# over short name entry (config-example)
if name in configs:
return configs[name]
return configs.get(name.partition(NAMESPACE_DELIM)[0])
def get_package_ops(packages, configs, installed=None, fspath=None):
# get the install an config operations that should be done
if installed is None:
installed = read_installed_packages()
short_installed = [p.partition(NAMESPACE_DELIM)[0] for p in installed]
if not packages:
packages = []
if not configs:
configs = {}
ops = []
ops += get_fs_package_ops(fspath)
for name in packages:
ops.append(makeop('install', name, get_package_config(configs, name)))
to_install = [f['name'] for f in ops]
short_to_install = [f['name'].partition(NAMESPACE_DELIM)[0] for f in ops]
for name in configs:
if name in to_install:
continue
shortname = name.partition(NAMESPACE_DELIM)[0]
if shortname in short_to_install:
continue
if name in installed or shortname in short_installed:
ops.append(makeop('config', name,
config=get_package_config(configs, name)))
# prefer config entries to filepath entries
for op in ops:
if op['op'] != 'install' or not op['cfgfile']:
continue
name = op['name']
fromcfg = get_package_config(configs, op['name'])
if fromcfg:
LOG.debug("preferring configs[%(name)s] over '%(cfgfile)s'", op)
op['cfgfile'] = None
op['config'] = fromcfg
return ops
def render_snap_op(op, name, path=None, cfgfile=None, config=None):
if op not in ('install', 'config'):
raise ValueError("cannot render op '%s'" % op)
shortname = name.partition(NAMESPACE_DELIM)[0]
try:
cfg_tmpf = None
if config is not None:
# input to 'snappy config packagename' must have nested data. odd.
# config:
# packagename:
# config
# Note, however, we do not touch config files on disk.
nested_cfg = {'config': {shortname: config}}
(fd, cfg_tmpf) = tempfile.mkstemp()
os.write(fd, util.yaml_dumps(nested_cfg).encode())
os.close(fd)
cfgfile = cfg_tmpf
cmd = [SNAPPY_CMD, op]
if op == 'install':
if path:
cmd.append("--allow-unauthenticated")
cmd.append(path)
else:
cmd.append(name)
if cfgfile:
cmd.append(cfgfile)
elif op == 'config':
cmd += [name, cfgfile]
util.subp(cmd)
finally:
if cfg_tmpf:
os.unlink(cfg_tmpf)
def read_installed_packages():
ret = []
for (name, date, version, dev) in read_pkg_data():
if dev:
ret.append(NAMESPACE_DELIM.join([name, dev]))
else:
ret.append(name)
return ret
def read_pkg_data():
out, err = util.subp([SNAPPY_CMD, "list"])
pkg_data = []
for line in out.splitlines()[1:]:
toks = line.split(sep=None, maxsplit=3)
if len(toks) == 3:
(name, date, version) = toks
dev = None
else:
(name, date, version, dev) = toks
pkg_data.append((name, date, version, dev,))
return pkg_data
def disable_enable_ssh(enabled):
LOG.debug("setting enablement of ssh to: %s", enabled)
# do something here that would enable or disable
not_to_be_run = "/etc/ssh/sshd_not_to_be_run"
if enabled:
util.del_file(not_to_be_run)
# this is an indempotent operation
util.subp(["systemctl", "start", "ssh"])
else:
# this is an indempotent operation
util.subp(["systemctl", "stop", "ssh"])
util.write_file(not_to_be_run, "cloud-init\n")
def set_snappy_command():
global SNAPPY_CMD
if util.which("snappy-go"):
SNAPPY_CMD = "snappy-go"
elif util.which("snappy"):
SNAPPY_CMD = "snappy"
else:
SNAPPY_CMD = "snap"
LOG.debug("snappy command is '%s'", SNAPPY_CMD)
def handle(name, cfg, cloud, log, args):
cfgin = cfg.get('snappy')
if not cfgin:
cfgin = {}
mycfg = util.mergemanydict([cfgin, BUILTIN_CFG])
sys_snappy = str(mycfg.get("system_snappy", "auto"))
if util.is_false(sys_snappy):
LOG.debug("%s: System is not snappy. disabling", name)
return
if sys_snappy.lower() == "auto" and not(util.system_is_snappy()):
LOG.debug("%s: 'auto' mode, and system not snappy", name)
return
set_snappy_command()
pkg_ops = get_package_ops(packages=mycfg['packages'],
configs=mycfg['config'],
fspath=mycfg['packages_dir'])
fails = []
for pkg_op in pkg_ops:
try:
render_snap_op(**pkg_op)
except Exception as e:
fails.append((pkg_op, e,))
LOG.warning("'%s' failed for '%s': %s",
pkg_op['op'], pkg_op['name'], e)
# Default to disabling SSH
ssh_enabled = mycfg.get('ssh_enabled', "auto")
# If the user has not explicitly enabled or disabled SSH, then enable it
# when password SSH authentication is requested or there are SSH keys
if ssh_enabled == "auto":
user_ssh_keys = cloud.get_public_ssh_keys() or None
password_auth_enabled = cfg.get('ssh_pwauth', False)
if user_ssh_keys:
LOG.debug("Enabling SSH, ssh keys found in datasource")
ssh_enabled = True
elif cfg.get('ssh_authorized_keys'):
LOG.debug("Enabling SSH, ssh keys found in config")
elif password_auth_enabled:
LOG.debug("Enabling SSH, password authentication requested")
ssh_enabled = True
elif ssh_enabled not in (True, False):
LOG.warning("Unknown value '%s' in ssh_enabled", ssh_enabled)
disable_enable_ssh(ssh_enabled)
if fails:
raise Exception("failed to install/configure snaps")
# vi: ts=4 expandtab
| [
"romans1212notes@gmail.com"
] | romans1212notes@gmail.com |
4b1da50fcbe42f0e5c38e138a1564ff4cb5a39a4 | 64bdfbaa89e7a923ac71d8a03f40b8ab0bb12fae | /523 함수.py | 0d2b9123a94974924f9ea17973b8c6dd423b55d7 | [] | no_license | m01am01a/py | 02779bb6d30ef23895c9dd7426050a8d17804d0c | 8efa1e48e337dd4205bba5b43f4188ea0272d137 | refs/heads/master | 2020-03-18T20:55:08.093621 | 2018-05-29T07:41:39 | 2018-05-29T07:41:39 | 135,247,256 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 477 | py | def cal_apple(a, b, c):
apple=0
i=1
while i<=b:
if i%c==0:
apple=apple+i
i=i+1
return apple
def cal_peer(d,e):
peer=0
i=d
while i<=end:
peer=peer+i
i=i+1
return peer
start=int(input("시작 값: "))
end=int(input("끝 값: "))
baesu=int(input("배수 값으로 지정할 정수: "))
applepie=cal_apple(start, end, baesu)
print(applepie)
print(cal_peer(start, end))
| [
"noreply@github.com"
] | m01am01a.noreply@github.com |
e0f57e9c630c3093a29c0bce6df465d92b60aeaf | afde7810ec72531f6e47c9c1554f44c6c416234e | /templated_docs_adecuated/templatetags/templated_docs_tags.py | 8ff78175ef63b9c413694991e7cb65c25dbd894b | [
"MIT"
] | permissive | jejimenez/invetronic | f95224769fc8c91cb9b31de5de618125dbc8a027 | 999d58bc0224b6056b16d4e54fefcc81a22e334c | refs/heads/master | 2021-09-10T08:41:51.352441 | 2018-03-23T04:15:39 | 2018-03-23T04:15:39 | 106,342,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,274 | py | # --coding: utf8--
import os.path
from django.db.models.fields.files import ImageFieldFile
from django.utils.safestring import mark_safe
from django.utils.html import escape
from django import template
register = template.Library()
PIXEL_TO_CM = 0.00846666
class ImageNode(template.Node):
def __init__(self, value):
self.value = template.Variable(value)
def render(self, context):
try:
self.value = self.value.resolve(context)
if not isinstance(self.value, ImageFieldFile):
raise template.VariableDoesNotExist(
'Image argument should be an ImageField')
images = context.dicts[0].setdefault('ootemplate_imgs', {})
id = len(images)
z_index = id + 3 # Magic
width = self.value.width * PIXEL_TO_CM
height = self.value.height * PIXEL_TO_CM
filename = os.path.basename(self.value.name)
basename = os.path.splitext(filename)[0]
images[self.value.path] = self.value
img_frame = '<draw:frame draw:style-name="gr%(z_index)s" ' \
'draw:name="%(basename)s" ' \
'draw:id="id%(id)s" ' \
'text:anchor-type="paragraph" svg:width="%(width)fcm" ' \
'svg:height="%(height)fcm" draw:z-index="%(z_index)s">' \
'<draw:image xlink:href="Pictures/%(filename)s" ' \
'xlink:type="simple" xlink:show="embed" ' \
'xlink:actuate="onLoad"/></draw:frame>'
return (img_frame) % locals()
except template.VariableDoesNotExist:
return ''
@register.tag
def image(parser, token):
"""
Insert an image from a ImageField into a document.
"""
try:
tag_name, value = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError(
'%r tag requires a file as an argument' % tag_name)
return ImageNode(value)
@register.filter
def lolinebreaks(value):
"""
LibreOffice-flavored ``linebreaks`` filter.
"""
if not value:
return ''
paragraphs = [line for line in escape(value).splitlines()]
return mark_safe('<text:line-break/>'.join(paragraphs))
| [
"jimenez.ing.sis@gmail.com"
] | jimenez.ing.sis@gmail.com |
eeec76ad93eefcfb23ca1865f19b7685215db38b | 5ccbc3bb70f38962fd31f325154346eb4749088c | /debug.py | e3776b6038d61c1ff5768e1bde4a02b51be2bb17 | [] | no_license | cephalopodMD/graffiti | d4a2d4b6b63c112cbb4c19d88bb1af3954e79779 | 5281c1b7e3b93f0b84b61c77ff6d2fdc38652791 | refs/heads/master | 2021-01-22T15:50:51.850844 | 2017-01-18T23:32:52 | 2017-01-18T23:32:52 | 48,558,938 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 137 | py | #env python
__author__ = 'cephalopodMD'
if __name__ == '__main__':
from api import app
app.run(host='0.0.0.0', port=5000, debug=True)
| [
"acl3qb@virginia.edu"
] | acl3qb@virginia.edu |
0c37f55467dce8f368e6bef2f9fc7049e65164ad | 954409857ef789aa0d759ea13b80e4ca3d610a8a | /SL_LAB/Lab external practice/iris/iris.py | 307e22f675d7184e2682081e5a9b30e4718b00d4 | [] | no_license | AkarshSimha007/ISE-SEM_5-Labs | 488f00d7b8e78317a75b5b87c3256e38ef4fa4c6 | 34e34da22d2d3a646fc175da942682deedf36980 | refs/heads/master | 2023-03-09T11:42:41.972159 | 2021-02-09T13:16:19 | 2021-02-09T13:16:19 | 323,646,435 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 938 | py | import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
df=pd.read_csv("iris.csv")
print(df.describe)
print(df.head())
df.drop(['Sepal_Length'],inplace=True,axis=1)
print(df.groupby('Class').agg(['mean']))
ax = sns.countplot(data = df,hue = 'Class',palette="Set1",x = ' Sepal_Width')
ax.set(title="Flowers of each specie",xlabel="Sepal Width",ylabel="No.of Flowers")
plt.show()
interval = (0,1,2,4)
category = ['<1','1 to 2','>2']
df['Petal_Catg'] = pd.cut(df[' Petal_Width'],interval,labels=category)
ax = sns.countplot(data = df,x = 'Petal_Catg',hue='Class',palette='YlOrRd')
ax.set(title='Petal Width',xlabel='Category of Petals',ylabel='No. of flowers')
plt.show()
ax = sns.countplot(data = df[df['Class'] != 'Iris-setosa'],hue = 'Class', x = ' Sepal_Width',palette='Set1')
ax.set(xlabel='Sepal Width',ylabel='No. of flowers')
plt.show()
print(pd.crosstab(df["Class"],df[" Sepal_Width"])) | [
"akarsh07simha@gmail.com"
] | akarsh07simha@gmail.com |
63f504cacbf389261088e79f4286f5b03181374b | 1314496f23da33e7227a38c46825fd67ed9c6c48 | /Google Code Jam 2008/Round 1A/A/A.py | e401133988958d0d6a70da33a4fb1d8b40530101 | [] | no_license | Ungsik-Yun/codejam | 8bf60beb2c96f4366b0f31cb4c2ac91a04b42f7d | de1e094acd5a75365c37891496f6f608f89f49f0 | refs/heads/master | 2021-01-10T20:59:25.953603 | 2014-07-25T15:11:56 | 2014-07-25T15:11:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,214 | py | '''
Created on 2014. 4. 16.
@author: Alice
'''
import multiprocessing
from pprint import pprint
def file_into_problem(file_name):
with open(file_name, 'r+') as f:
case_number = int(f.readline())
problem = {'case_number':case_number}
problem['cases'] = list()
for i in xrange(case_number):
numbers = int(f.readline())
x = [int(n) for n in f.readline().split()]
y = [int(n) for n in f.readline().split()]
case = {}
case['numbers'] = numbers
case['x'] = x
case['y'] = y
case['no'] = i + 1
problem['cases'].append(case)
return problem
def solve_problem(problem):
case_number = problem['case_number']
with open('A result.out', 'w+') as f:
for case in problem['cases']:
result = 0
x = sorted(case['x'])
y = sorted(case['y'])
for i in xrange(len(x)):
result += x.pop(0) * y.pop()
f.write("Case #%d: %d\n" % (case['no'], result))
if __name__ == '__main__':
p = file_into_problem('A-large-practice.in')
solve_problem(p)
| [
"ungsik.yun@gmail.com"
] | ungsik.yun@gmail.com |
84467444eded16b73a6db76ccfbdbaf9d0894be4 | dfd1edf11947cfe2e04144939447ff983c08db46 | /src/models/trip.py | 1a10e922248c0f77de6f75a32872623a86ba5ec1 | [] | no_license | kousiknath/fuber | e497cf6b56a4eacb062c6098915c956d4de1368b | 0865a8d9aeb9e87937ed52f0d614e3f77dd74ac2 | refs/heads/master | 2021-01-21T05:23:44.402489 | 2017-02-26T06:03:51 | 2017-02-26T06:03:51 | 83,186,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,687 | py | import sys
from src.fuber_core import utils
import datetime
from src.registry import trip_status
import copy
from src.response import standard_response
from src.response.standard_response import ApplicationErrorCodes
from src.models.order import Order
class Trip(object):
def __init__(self, trip_request, cab_action, trip_registry, *args, **kwargs):
self.__request = trip_request
self.__cab_action = cab_action
self.__order = None
self.__trip_id = utils.get_id()
self.__trip_start_time = None
self.__trip_end_time = None
self.__trip_duration = None
self.__trip_status = trip_status.TRIP_DISPATCHED # Initial stage of the trip
self.__trip_allocation_time = datetime.datetime.now()
self.__trip_distance = 0.0 # Assumed to be in Kilometers
self.__args = args
self.__kwargs = kwargs
trip_registry.register_trip(self)
def get_trip_id(self):
return self.__trip_id
def start_trip(self):
if self.__trip_status != trip_status.TRIP_DISPATCHED:
return standard_response.get_standard_api_response(False, "Trip not requested / dispatched", ApplicationErrorCodes.REQUEST_NOT_FULFILLED)
self.__trip_start_time = datetime.datetime.now()
self.__trip_status = trip_status.TRIP_STARTED
return standard_response.get_standard_api_response(True, "", ApplicationErrorCodes.SUCCESS, self.to_dict())
def cancel_trip(self):
if not self.__is_trip_cancellable():
return standard_response.get_standard_api_response(False, "Trip not requested, not dispatched or already started", ApplicationErrorCodes.REQUEST_NOT_FULFILLED)
self.__trip_status = trip_status.TRIP_CANCELLED
self.__cab_action.deallocate_cab()
return standard_response.get_standard_api_response(True, "", ApplicationErrorCodes.SUCCESS, self.to_dict())
def __is_trip_cancellable(self):
if self.__trip_status == trip_status.TRIP_DISPATCHED:
return True
return False
def end_trip(self):
if self.__trip_status != trip_status.TRIP_STARTED:
return standard_response.get_standard_api_response(False, "Trip not started", ApplicationErrorCodes.REQUEST_NOT_FULFILLED)
self.__trip_status = trip_status.TRIP_COMPLETED
self.__cab_action.deallocate_cab()
self.__cab_action.update_cab_location(self.__request.get_trip_end_location())
self.__trip_distance = utils.get_trip_distance(self.__request.get_trip_start_location(), self.__request.get_trip_end_location())
self.__trip_duration = utils.get_trip_timing(self.__trip_distance) # self.__trip_end_time - self.__trip_start_time
self.__trip_end_time = self.__trip_start_time + datetime.timedelta(minutes = int(self.__trip_duration)) # datetime.now()
self.__order = Order(self.__trip_id, self.__request.get_trip_customer(), self.__trip_distance, self.__trip_duration, self.__request.get_trip_preference().get_cab_color())
return standard_response.get_standard_api_response(True, "", ApplicationErrorCodes.SUCCESS, self.to_dict())
def generate_order_details(self):
pass
def to_dict(self):
order_summary = {}
if self.__order is not None:
order_summary = self.__order.to_dict()
return {"trip_request" : self.__request.to_dict(), "trip_id" : str(self.__trip_id), "trip_start_time" : str(self.__trip_start_time), "trip_end_time" : str(self.__trip_end_time), "trip_duration" : str(self.__trip_duration), "trip_status" : trip_status.get_trip_status(self.__trip_status), "trip_allocation_time: " : repr(self.__trip_allocation_time), "trip_distance" : str(self.__trip_distance), "cab_info" : self.__cab_action.get_cab_info(), "order_summary" : order_summary}
def get_standard_trip_response(self):
return standard_response.get_standard_api_response(True, "", ApplicationErrorCodes.SUCCESS, self.to_dict())
| [
"dnkousik1992@gmail.com"
] | dnkousik1992@gmail.com |
812b28e3b3c48e776cf45e6ca228686798776c28 | a649a23d823797b6824cee4c635f9595a5fb0a90 | /data/optFileProcess.py | faeab0e8101747f4e3d81e75976434e591191f11 | [] | no_license | dzynin/golfdbPipeline | f5c19dd3b0e94823a0fa73decf0134c3843ed573 | fc587d48bced02eaee7b0636423626e0c8b882db | refs/heads/master | 2023-04-23T08:43:42.415530 | 2021-05-13T02:43:38 | 2021-05-13T02:43:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,465 | py | import pandas as pd
import os
import cv2
from multiprocessing import Pool
import numpy as np
import sys
import numpy
from config import cfg
optFilesFolder = cfg.OPT_ORI_FILE_PATH
videosBboxPath = cfg.BBOX_INFO_PATH
optFilesFolder_160 = cfg.OPT_RESIZE_FILE_PATH
inputDim = cfg.INPUT_DIM
def preprocess_optFiles(optFilesFolder_160, dim=160):
bboxMap = {}
with open(videosBboxPath) as f:
for line in f.readlines():
bbox = []
line = line.rstrip()
bboxKey = line.split(':')[0]
tmp = line.split(':', 1)[1]
tmp = tmp.rstrip(']')
tmp = tmp.strip('[')
a1 = tmp.split(',', 3)[0]
a2 = tmp.split(',', 3)[1]
a3 = tmp.split(',', 3)[2]
a4 = tmp.split(',', 3)[3]
bbox.append(int(a1))
bbox.append(int(a2))
bbox.append(int(a3))
bbox.append(int(a4))
bboxMap[bboxKey] = bbox
count = 0 # 记录处理了多少文件夹
for optFolder in os.listdir(optFilesFolder):
bbox = bboxMap[optFolder]
path = optFilesFolder_160
if not os.path.exists(path):
os.mkdir(path)
print('Processing folder id {}'.format(optFolder))
optFilesResizeFoler = os.path.join(path, optFolder)
if not os.path.exists(optFilesResizeFoler):
os.mkdir(optFilesResizeFoler)
x = int(bbox[0])
y = int(bbox[1])
w = int(bbox[2])
h = int(bbox[3])
folderPath = os.path.join(optFilesFolder, optFolder)
fileNum = 0 # 记录resize了多少文件
for optFile in os.listdir(folderPath):
# 这个图片尺寸需要根据需要更改
filePath = os.path.join(folderPath, optFile)
# opticalOri = np.fromfile(
# filePath, np.float32, offset=12).reshape(960, 544, 2)
# opticalArray = np.empty([960, 544, 3], np.float32)
# opticalArray[..., 0] = 255
# opticalArray[..., 1] = opticalOri[:, :, 0]
# opticalArray[..., 2] = opticalOri[:, :, 1]
opticalArray = cv2.imread(filePath)
# if count >= events[0] and count <= events[-1]:
crop_img = opticalArray[y:y + h, x:x + w]
crop_size = crop_img.shape[:2]
ratio = dim / max(crop_size)
new_size = tuple([int(x*ratio) for x in crop_size])
resized = cv2.resize(crop_img, (new_size[1], new_size[0]))
delta_w = dim - new_size[1]
delta_h = dim - new_size[0]
top, bottom = delta_h // 2, delta_h - (delta_h // 2)
left, right = delta_w // 2, delta_w - (delta_w // 2)
# b_img = cv2.copyMakeBorder(resized, top, bottom, left, right, cv2.BORDER_CONSTANT,
# value=[0.406*255, 0.456*255, 0.485*255]) # ImageNet means (BGR)
b_img = cv2.copyMakeBorder(resized, top, bottom, left, right, cv2.BORDER_CONSTANT,
value=[0, 0, 0]) # ImageNet means (BGR)
# b_img = cv2.copyMakeBorder(
# resized, top, bottom, left, right, cv2.BORDER_REPLICATE)
opt_160_file_path = os.path.join(optFilesResizeFoler, optFile)
# objOutput = open(opt_160_file_path, 'wb')
# numpy.array([80, 73, 69, 72], numpy.uint8).tofile(objOutput)
# opticalArray = np.empty([160, 160, 2], np.float32)
# opticalArray[..., 0] = b_img[..., 1]
# opticalArray[..., 1] = b_img[..., 2]
# numpy.array([opticalArray.shape[2], opticalArray.shape[1]],
# numpy.int32).tofile(objOutput)
# numpy.array(opticalArray, numpy.float32).tofile(objOutput)
# objOutput.close()
cv2.imwrite(opt_160_file_path, b_img)
fileNum += 1
count += 1
print("resize {} files".format(fileNum))
print("resize {} filefolders".format(count))
if __name__ == '__main__':
# optFilesFolder原始光流图所在位置
# if len(sys.argv) != 4:
# print("not enough param in optFileProcess")
# sys.exit(1)
# videosBboxPath = sys.argv[1]
# optFilesFolder_160 = sys.argv[2]
# optFilesFolder = sys.argv[3]
# videosBboxPath = "/home/zqr/codes/data/videosBbox.txt"
# optFilesFolder = "/home/zqr/codes/data/optOri"
preprocess_optFiles(optFilesFolder_160, inputDim)
| [
"zhangqingrui@bupt.edu.cn"
] | zhangqingrui@bupt.edu.cn |
b942953197ccc99a62c1a5662a82598a509014dc | e9530f45e1a9ada1cd6aa87be132ab5764435b9b | /common/consts.py | e0bd31674add7b5d89cb669a1f04872a30fe535c | [] | no_license | tfluo/iplocation | e8ba0461bf7bbe615aae04d19f350543a0286d4c | 9ff998804bf3fa65f1f86dd8c550ab8cfd8aabc0 | refs/heads/master | 2021-05-17T19:24:46.115732 | 2020-03-29T03:09:03 | 2020-03-29T03:09:03 | 250,936,813 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 68 | py |
MSGID_INVALID_FUNC = 1
MSGID_INVALID_BODY = 2
MSGID_INVALID_IP = 3
| [
"noreply@github.com"
] | tfluo.noreply@github.com |
9aa16588702b2ee3774b4f8e69495a406cb8b76d | 27153d5c0b9aec1688662bd7baff5f5cf580c739 | /venv/Scripts/pip3.7-script.py | 51175ac05ba8f9d2393ee9efce037480d49d0df6 | [] | no_license | thecatcher/module_demo | 08ea203dce58a205475669b3c527b24ea3c1d0ec | a1dd0ac4928853a2fb9ef9a5f4a1a3a06bb8cad5 | refs/heads/master | 2020-12-21T15:37:53.793173 | 2020-01-27T11:27:54 | 2020-01-27T11:27:54 | 236,475,842 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 421 | py | #!C:\Users\Brady\PycharmProjects\module_demo\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.7'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.7')()
)
| [
"gaoyang1019@hotmail.com"
] | gaoyang1019@hotmail.com |
7e1dc70fd767643e21a2a3d78d6d72df7ecced09 | ed1253f32f347c23e84b62f2ee2c1bef2c151d73 | /Butterfly/manage.py | dc43cf3990eda25633ce959fc74b4f19d92d6554 | [] | no_license | Yashkrity/Butterfly_Deploy | 8529cda257cc05890822afebaa7544d73701025d | 09fcadb8b54e0304d1ae44715c2a4c7f4c32ef0b | refs/heads/master | 2020-04-27T20:15:20.392002 | 2019-03-09T05:11:35 | 2019-03-09T05:11:35 | 174,650,968 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Butterfly.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"36617931+Yashkrity@users.noreply.github.com"
] | 36617931+Yashkrity@users.noreply.github.com |
5de533ddfe0f0b2b99bd1efc8f9d4f81d624faf1 | e944bc381145fd22d2f7df2f77fc8771ff2d8b64 | /Week_02/889.根据前序和后序遍历构造二叉树.medium.py | 1b55b07a6bfe52559f3bd3526e59d7e70a1fd37f | [] | no_license | yipuwu-001/AlgorithmQIUZHAO | e6dcf7c641cfab76e9e545aefd8069680282e942 | 45b1e5cf989f65224de5f462cfa5e42a448ae780 | refs/heads/master | 2022-12-03T21:20:51.441669 | 2020-08-23T10:09:23 | 2020-08-23T10:09:23 | 280,621,918 | 0 | 0 | null | 2020-07-18T09:13:30 | 2020-07-18T09:13:29 | null | UTF-8 | Python | false | false | 2,068 | py | #
# @lc app=leetcode.cn id=889 lang=python
#
# [889] 根据前序和后序遍历构造二叉树
#
# https://leetcode-cn.com/problems/construct-binary-tree-from-preorder-and-postorder-traversal/description/
#
# algorithms
# Medium (65.55%)
# Likes: 99
# Dislikes: 0
# Total Accepted: 5.8K
# Total Submissions: 8.9K
# Testcase Example: '[1,2,4,5,3,6,7]\n[4,5,2,6,7,3,1]'
#
# 返回与给定的前序和后序遍历匹配的任何二叉树。
#
# pre 和 post 遍历中的值是不同的正整数。
#
#
#
# 示例:
#
# 输入:pre = [1,2,4,5,3,6,7], post = [4,5,2,6,7,3,1]
# 输出:[1,2,3,4,5,6,7]
#
#
#
#
# 提示:
#
#
# 1 <= pre.length == post.length <= 30
# pre[] 和 post[] 都是 1, 2, ..., pre.length 的排列
# 每个输入保证至少有一个答案。如果有多个答案,可以返回其中一个。
#
#
#
#ref: https://leetcode-cn.com/problems/construct-binary-tree-from-preorder-and-postorder-traversal/solution/tu-jie-889-gen-ju-qian-xu-he-hou-xu-bian-li-gou-2/
# @lc code=start
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def constructFromPrePost(self, preorder, postorder):
"""
:type pre: List[int]
:type post: List[int]
:rtype: TreeNode
"""
if len(preorder) == 0 or len(postorder) == 0:
return None
root = TreeNode(preorder[0])
if len(preorder) == 1:
return root
mid_idx = postorder.index(preorder[1])
root.left = self.constructFromPrePost(preorder[1:mid_idx+2], postorder[:mid_idx+1])
root.right = self.constructFromPrePost(preorder[mid_idx+2:], postorder[mid_idx+1:-1])
return root
# @lc code=end
# if __name__ == "__main__":
# preorder = [1,2,4,5,3,6,7]
# postoreder = [4,5,2,6,7,3,1]
# t = Solution().constructFromPrePost(preorder, postoreder)
# import print_tree
# print_tree.printTree(t)
| [
"shuwen@email.com"
] | shuwen@email.com |
6fb1658a55c61d4601482e56c7317174b44d907c | 78ecc329b2d429550c2931afc0d138e79cedf6b0 | /shortener/migrations/0003_auto_20190418_1341.py | 4d0b09b473d2e989070ffe4ca1928060bda85b9c | [] | no_license | NikitaMasand/URLShorteningService | eb2b93e670680e208befd9134314c2053be1db92 | acd538865d6dddf73cb04984e729523c419230a2 | refs/heads/master | 2020-05-15T11:28:33.509777 | 2019-04-19T08:27:30 | 2019-04-19T08:27:30 | 182,057,276 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 567 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2019-04-18 08:11
from __future__ import unicode_literals
from django.db import migrations, models
import shortener.validators
class Migration(migrations.Migration):
dependencies = [
('shortener', '0002_auto_20190417_1808'),
]
operations = [
migrations.AlterField(
model_name='kirrurl',
name='url',
field=models.CharField(max_length=220, validators=[shortener.validators.validate_url, shortener.validators.validate_dot_com]),
),
]
| [
"masandnikita@gmail.com"
] | masandnikita@gmail.com |
63655078077b3e9d9b986b5bf295f5aae86a05c0 | 7b26ead5cca82bc8ec8cec01505435db06959284 | /spider.py | 8ecd93d5e9b67fa07028d64a1ee83625d4721952 | [] | no_license | mnahm5/Web-Crawler | dffa8725f56a1c4c9265c120b9ac5500a497bff3 | 552ca54fd13e4fc30e1315b6a22fb511d2aaf345 | refs/heads/master | 2021-01-19T00:52:17.912824 | 2016-06-30T12:45:25 | 2016-06-30T12:45:25 | 60,887,947 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,375 | py | from urllib.request import urlopen
from link_finder import LinkFinder
from general import *
class Spider:
# Class variables (shared among all instances)
project_name = ""
base_url = ""
domain_name = ""
queue_file = ""
crawled_file = ""
queue = set()
crawled = set()
def __init__(self, project_name, base_url, domain_name):
Spider.project_name = project_name
Spider.base_url = base_url
Spider.domain_name = domain_name
Spider.queue_file = Spider.project_name + "/queue.txt"
Spider.crawled_file = Spider.project_name + "/crawled.txt"
self.boot()
self.crawl_page("First Spider", Spider.base_url)
@staticmethod
def boot():
create_project_dir(Spider.project_name)
create_data_files(Spider.project_name, Spider.base_url)
Spider.queue = file_to_set(Spider.queue_file)
Spider.crawled = file_to_set(Spider.crawled_file)
@staticmethod
def crawl_page(thread_name, page_url):
if page_url not in Spider.crawled:
print(thread_name + ' now crawling ' + page_url)
print("Queue " + str(len(Spider.queue)) + " | Crawled " + str(len(Spider.crawled)))
Spider.add_links_to_queue(Spider.gather_links(page_url))
Spider.queue.remove(page_url)
Spider.crawled.add(page_url)
Spider.update_files()
@staticmethod
def gather_links(page_url):
html_string = ""
try:
response = urlopen(page_url)
if response.getheader("Content-Type") == "text/html":
html_bytes = response.read()
html_string = html_bytes.decode("utf-8")
finder = LinkFinder(Spider.base_url, page_url)
finder.feed(html_string)
except:
print("Error: cannot crawl page")
return set()
return finder.page_links()
@staticmethod
def add_links_to_queue(links):
for url in links:
if url in Spider.queue:
continue
if url in Spider.crawled:
continue
if Spider.domain_name not in url:
continue
Spider.queue.add(url)
@staticmethod
def update_files():
set_to_file(Spider.queue, Spider.queue_file)
set_to_file(Spider.crawled, Spider.crawled_file)
| [
"ahmed.nadim59@gmail.com"
] | ahmed.nadim59@gmail.com |
5266a6809c1e8319da15b99a9970f7de2ac3ce80 | 204de6a777338c091c9347be51d2bd8640a8dd7f | /day_3.py | ba81b396414b726f1669aab6668ce34de59ccd23 | [] | no_license | ktgnair/Python | 24755a77a3ad3426c324f24a7b58686d4d56e890 | 3e3110546846a93558027cca9e742ae75fd6aba5 | refs/heads/master | 2023-05-14T08:46:10.568410 | 2021-06-13T17:00:04 | 2021-06-13T17:00:04 | 361,490,338 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py | #Control Flow with if/else and conditional operators
print("Voter Criteria Checker")
age = int(input("Please enter your age: "))
if age >= 18:
print("You are eligible to vote")
else:
print("You are still a minor!")
| [
"ktgnair95@gmail.com"
] | ktgnair95@gmail.com |
df179dd939d9a1bc4dd1aff053b8f46f39796c0a | bfde55d192317b1d55538288a8dca991c3561eee | /4_Human_readable_duration_format.py | 34445ae18641dcfd265eb9fa3925f56db93ec2d7 | [] | no_license | QuinnFargen/CodeWarKata | b03f58b7a865c638e770b7a63f3d3f1379766045 | 6fe72e970cb5525bf9c6ca7f78107683a96c3b05 | refs/heads/master | 2023-04-04T03:43:17.684432 | 2021-04-17T00:08:19 | 2021-04-17T00:08:19 | 275,986,133 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,223 | py |
# years, days, hours, minutes and seconds.
min = 60;hour = 3600;day = 86400;year = 31536000
seconds = 315360004
years = round(seconds / year); seconds = seconds - (year * years)
days = round(seconds / day); seconds = seconds - (day * days)
hours = round(seconds / hour); seconds = seconds - (hour * hours)
mins = round(seconds / min); seconds = seconds - (min * mins)
secs = seconds
from math import floor
def str_duration(time, label):
if time == 0:
return ''
if time == 1:
return '1 ' + label
else :
return str(time) + ' ' + label + 's'
def str_final(TimeList):
if len(TimeList) == 2:
return TimeList[0] + ' and ' + TimeList[1]
if len(TimeList) == 3:
return TimeList[0] + ', ' + TimeList[1] + ' and ' + TimeList[2]
if len(TimeList) == 4:
return TimeList[0] + ', ' + TimeList[1] + ', ' + TimeList[2] + ' and ' + TimeList[3]
if len(TimeList) == 5:
return TimeList[0] + ', ' + TimeList[1] + ', ' + TimeList[2] + ', ' + TimeList[3] + ' and ' + TimeList[4]
def format_duration(seconds):
if seconds == 0:
return 'now'
min = 60;hour = 3600;day = 86400;year = 31536000
years = floor(seconds / year); seconds = seconds - (year * years)
days = floor(seconds / day); seconds = seconds - (day * days)
hours = floor(seconds / hour); seconds = seconds - (hour * hours)
mins = floor(seconds / min); seconds = seconds - (min * mins)
secs = seconds
year_str = str_duration(years,'year')
day_str = str_duration(days,'day')
hour_str = str_duration(hours,'hour')
min_str = str_duration(mins,'minute')
sec_str = str_duration(secs,'second')
AllTimeList = [year_str,day_str,hour_str,min_str,sec_str]
TimeList = [i for i in AllTimeList if i != '']
if len(TimeList) == 1:
return TimeList[0]
return str_final(TimeList)
seconds = 3153600042
format_duration(seconds)
test.assert_equals(format_duration(1), "1 second")
test.assert_equals(format_duration(62), "1 minute and 2 seconds")
test.assert_equals(format_duration(120), "2 minutes")
test.assert_equals(format_duration(3600), "1 hour")
test.assert_equals(format_duration(3662), "1 hour, 1 minute and 2 seconds") | [
"QuinnFargen@gmail.com"
] | QuinnFargen@gmail.com |
3d813f8a0cbbb45aa3784cf49c34898f49e65bb6 | 53e01918468149a3d54697c36e1ef332ae31502a | /challenges/challenge_find_the_duplicate.py | 595bc3c71bf6d66cca82197ab8e1dab315c5d33d | [] | no_license | pedrobth/python-algorythms | 991832d4281162699d7bc4e5d083aee4e709fdac | 086134b4b01f58b05c7be2479cc33ffb62023a1a | refs/heads/master | 2023-07-09T21:48:21.511362 | 2021-08-04T21:56:12 | 2021-08-04T21:56:12 | 391,405,646 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | def find_duplicate(nums):
nums.sort()
for (index, number) in enumerate(nums[:-1]):
if not isinstance(number, int) or number < 0:
return False
if number == nums[index + 1]:
return number
return False
| [
"pmerchiorattor@gmail.com"
] | pmerchiorattor@gmail.com |
dd4794a957a343b972762813cfc69bd534871682 | 2cb19ab2c4ee527436dfdbe69010a82d508dba61 | /Prac_1/Graphic recognition.py | 3f6dc9a7aada591cd56f1323bcd787901e5a55a2 | [] | no_license | wuyongzhi/Deeplearning | d823c1b7d2bfbae536ac7f8d0122e2f109cd6127 | ba7982ca4595c58a0af6fff6c376f12f2ad8b83c | refs/heads/master | 2020-06-22T14:41:32.667961 | 2018-04-07T13:08:27 | 2018-04-07T13:08:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,749 | py | # -*- coding: utf-8 -*-
from keras.callbacks import TensorBoard
from keras.layers import Dense, Dropout, MaxPooling2D, Flatten, Convolution2D
from keras.models import Sequential
from keras import backend as K
import load_data as ld
import matplotlib.pyplot as plt
import csv
import numpy as np
def f1(y_true, y_pred):
def recall(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
precision = precision(y_true, y_pred)
recall = recall(y_true, y_pred)
return 2 * ((precision * recall) / (precision + recall))
def built_model():
model = Sequential()
# first layers
model.add(Convolution2D(filters=8,
kernel_size=(5, 5),
input_shape=(40, 40, 1),
activation='relu'))
model.add(Convolution2D(filters=16,
kernel_size=(3, 3),
activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
# second layers
model.add(Convolution2D(filters=16,
kernel_size=(3, 3),
activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
# third layers
model.add(Dense(units=128,
activation='relu'))
model.add(Dropout(0.5))
# fourth layers
model.add(Dense(units=1,
activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy', f1])
model.summary()
return model
def train_model(batch_size=64, epochs=20, model=None):
train_x, train_y, test_x, test_y, t = ld.load_train_test_data()
if model is None:
model = built_model()
history = model.fit(train_x, train_y,
batch_size=batch_size,
epochs=epochs,
verbose=2,
validation_split=0.1,
callbacks=[TensorBoard(log_dir='/tmp/autoencoder')])
print "刻画损失函数在训练与验证集的变化"
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='valid')
plt.legend()
plt.show()
predicted = model.predict(t,
batch_size=batch_size,
verbose=1)
predicted = np.array([round(w) for w in predicted])
score = model.evaluate(test_x, test_y,
batch_size=batch_size)
print score
print "刻画预测结果与测试集结果"
# count = 0
# for i in range(len(predicted)):
# print [predicted[i], test_y[i]]
# if predicted[i] == test_y[i]:
# count += 1
# print "正确个数:" + str(count)
# print "正确率:" + str(count * 1.0 /len(predicted))
model.save('my_model.h5')
return predicted
if __name__ == '__main__':
predicted = train_model()
num = 4000
csvFile = open('test_y.csv', 'w')
write = csv.writer(csvFile)
write.writerow(['id', 'y'])
for i in predicted:
write.writerow([num, int(i)])
num += 1
print predicted
| [
"liaochuntao@live.com"
] | liaochuntao@live.com |
a4296797b1ed56b7ceddf621cb693714d0bff378 | f0813328ea567d640d574e9ddcfd0be77c3627cb | /qa/rpc-tests/test_framework/util.py | 956d77a71d09c306960ba470a57e4fccf20fe9af | [
"MIT"
] | permissive | freelancerstudio/nyancoin-client | 6e2a25e6bb71be2683e7cdfb822beb335bc488eb | a4ecd6ca374f62ff69d58c6b991c65cd3707577e | refs/heads/master | 2023-09-04T07:25:54.443490 | 2021-10-02T23:12:57 | 2021-10-02T23:12:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,868 | py | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
import math
import os
import sys
from binascii import hexlify, unhexlify
from base64 import b64encode
from decimal import Decimal, ROUND_DOWN
import json
import http.client
import random
import shutil
import subprocess
import time
import re
import errno
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
COVERAGE_DIR = None
# The maximum number of nodes a single test can spawn
MAX_NODES = 8
# Don't assign rpc or p2p ports lower than this
PORT_MIN = 11000
# The number of ports to "reserve" for p2p and rpc, each
PORT_RANGE = 5000
BITCOIND_PROC_WAIT_TIMEOUT = 60
class PortSeed:
# Must be initialized with a unique integer for each process
n = None
#Set Mocktime default to OFF.
#MOCKTIME is only needed for scripts that use the
#cached version of the blockchain. If the cached
#version of the blockchain is used without MOCKTIME
#then the mempools will not sync due to IBD.
MOCKTIME = 0
def enable_mocktime():
#For backwared compatibility of the python scripts
#with previous versions of the cache, set MOCKTIME
#to Jan 1, 2014 + (201 * 10 * 60)
global MOCKTIME
MOCKTIME = 1388534400 + (201 * 10 * 60)
def disable_mocktime():
global MOCKTIME
MOCKTIME = 0
def get_mocktime():
return MOCKTIME
def enable_coverage(dirname):
"""Maintain a log of which RPC calls are made during testing."""
global COVERAGE_DIR
COVERAGE_DIR = dirname
def get_rpc_proxy(url, node_number, timeout=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
COVERAGE_DIR, node_number) if COVERAGE_DIR else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
assert(n <= MAX_NODES)
return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_port(n):
return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def sync_blocks(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same tip.
sync_blocks needs to be called with an rpc_connections set that has least
one node already synced to the latest, stable tip, otherwise there's a
chance it might return before all nodes are stably synced.
"""
# Use getblockcount() instead of waitforblockheight() to determine the
# initial max height because the two RPCs look at different internal global
# variables (chainActive vs latestBlock) and the former gets updated
# earlier.
maxheight = max(x.getblockcount() for x in rpc_connections)
start_time = cur_time = time.time()
while cur_time <= start_time + timeout:
tips = [r.waitforblockheight(maxheight, int(wait * 1000)) for r in rpc_connections]
if all(t["height"] == maxheight for t in tips):
if all(t["hash"] == tips[0]["hash"] for t in tips):
return
raise AssertionError("Block sync failed, mismatched block hashes:{}".format(
"".join("\n {!r}".format(tip) for tip in tips)))
cur_time = time.time()
raise AssertionError("Block sync to height {} timed out:{}".format(
maxheight, "".join("\n {!r}".format(tip) for tip in tips)))
def sync_chain(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same best block
"""
while timeout > 0:
best_hash = [x.getbestblockhash() for x in rpc_connections]
if best_hash == [best_hash[0]]*len(best_hash):
return
time.sleep(wait)
timeout -= wait
raise AssertionError("Chain sync failed: Best block hashes don't match")
def sync_mempools(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while timeout > 0:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
return
time.sleep(wait)
timeout -= wait
raise AssertionError("Mempool sync failed")
bitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
rpc_u, rpc_p = rpc_auth_pair(n)
with open(os.path.join(datadir, "nyancoin.conf"), 'w', encoding='utf8') as f:
f.write("regtest=1\n")
f.write("rpcuser=" + rpc_u + "\n")
f.write("rpcpassword=" + rpc_p + "\n")
f.write("port="+str(p2p_port(n))+"\n")
f.write("rpcport="+str(rpc_port(n))+"\n")
f.write("listenonion=0\n")
return datadir
def rpc_auth_pair(n):
return 'rpcuser💻' + str(n), 'rpcpass🔑' + str(n)
def rpc_url(i, rpchost=None):
rpc_u, rpc_p = rpc_auth_pair(i)
host = '127.0.0.1'
port = rpc_port(i)
if rpchost:
parts = rpchost.split(':')
if len(parts) == 2:
host, port = parts
else:
host = rpchost
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
def wait_for_bitcoind_start(process, url, i):
'''
Wait for bitcoind to start. This means that RPC is accessible and fully initialized.
Raise an exception if bitcoind exits during initialization.
'''
while True:
if process.poll() is not None:
raise Exception('bitcoind exited with status %i during initialization' % process.returncode)
try:
rpc = get_rpc_proxy(url, i)
blocks = rpc.getblockcount()
break # break out of loop on success
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
if e.error['code'] != -28: # RPC in warmup?
raise # unknown JSON RPC exception
time.sleep(0.25)
def initialize_chain(test_dir, num_nodes, cachedir):
"""
Create a cache of a 120-block-long chain (with wallet) for MAX_NODES
Afterward, create num_nodes copies from the cache
"""
assert num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(os.path.join(cachedir, 'node'+str(i))):
create_cache = True
break
if create_cache:
#find and delete old cache directories if any exist
for i in range(MAX_NODES):
if os.path.isdir(os.path.join(cachedir,"node"+str(i))):
shutil.rmtree(os.path.join(cachedir,"node"+str(i)))
# Create cache directories, run bitcoinds:
for i in range(MAX_NODES):
datadir=initialize_datadir(cachedir, i)
args = [ os.getenv("NYANCOIND", "nyancoind"), "-server", "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print("initialize_chain: nyancoind started, waiting for RPC to come up")
wait_for_bitcoind_start(bitcoind_processes[i], rpc_url(i), i)
if os.getenv("PYTHON_DEBUG", ""):
print("initialize_chain: RPC successfully started")
rpcs = []
for i in range(MAX_NODES):
try:
rpcs.append(get_rpc_proxy(rpc_url(i), i))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 120-block-long chain; each of the 4 first nodes
# gets 15 mature blocks and 15 immature.
# Note: To preserve compatibility with older versions of
# initialize_chain, only 4 nodes will generate coins.
#
# blocks are created with timestamps 1 minute apart
# starting from 2010 minutes in the past
enable_mocktime()
block_time = get_mocktime() - (121 * 60)
for i in range(2):
for peer in range(4):
for j in range(15):
set_node_times(rpcs, block_time)
rpcs[peer].generate(1)
block_time += 60
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
disable_mocktime()
for i in range(MAX_NODES):
os.remove(log_filename(cachedir, i, "debug.log"))
os.remove(log_filename(cachedir, i, "db.log"))
os.remove(log_filename(cachedir, i, "peers.dat"))
os.remove(log_filename(cachedir, i, "fee_estimates.dat"))
for i in range(num_nodes):
from_dir = os.path.join(cachedir, "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in bitcoin.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
"""
Start a bitcoind and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
if binary is None:
binary = os.getenv("NYANCOIND", "nyancoind")
args = [ binary, "-datadir="+datadir, "-server", "-keypool=1", "-discover=0", "-rest", "-mocktime="+str(get_mocktime()) ]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print("start_node: nyancoind started, waiting for RPC to come up")
url = rpc_url(i, rpchost)
wait_for_bitcoind_start(bitcoind_processes[i], url, i)
if os.getenv("PYTHON_DEBUG", ""):
print("start_node: RPC successfully started")
proxy = get_rpc_proxy(url, i, timeout=timewait)
if COVERAGE_DIR:
coverage.write_all_rpc_commands(COVERAGE_DIR, proxy)
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
"""
Start multiple bitcoinds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for _ in range(num_nodes) ]
if binary is None: binary = [ None for _ in range(num_nodes) ]
rpcs = []
try:
for i in range(num_nodes):
rpcs.append(start_node(i, dirname, extra_args[i], rpchost, timewait=timewait, binary=binary[i]))
except: # If one node failed to start, stop the others
stop_nodes(rpcs)
raise
return rpcs
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
try:
node.stop()
except http.client.CannotSendRequest as e:
print("WARN: Unable to stop node: " + repr(e))
return_code = bitcoind_processes[i].wait(timeout=BITCOIND_PROC_WAIT_TIMEOUT)
assert_equal(return_code, 0)
del bitcoind_processes[i]
def stop_nodes(nodes):
for i, node in enumerate(nodes):
stop_node(node, i)
assert not bitcoind_processes.values() # All connections must be gone now
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using its output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_fee_amount(fee, tx_size, fee_per_kB):
"""Assert the fee was in range"""
target_fee = round_tx_size(tx_size) * fee_per_kB / 1000
if fee < target_fee:
raise AssertionError("Fee of %s BTC too low! (Should be %s BTC)"%(str(fee), str(target_fee)))
# allow the wallet's estimation to be at most 2 bytes off
if fee > round_tx_size(tx_size + 2) * fee_per_kB / 1000:
raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)"%(str(fee), str(target_fee)))
def round_tx_size(tx_size):
return int(math.ceil(tx_size / 1000.0)) * 1000
def assert_equal(thing1, thing2, *args):
if thing1 != thing2 or any(thing1 != arg for arg in args):
raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_greater_than_or_equal(thing1, thing2):
if thing1 < thing2:
raise AssertionError("%s < %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError("Expected substring not found:"+e.error['message'])
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_raises_jsonrpc(code, message, fun, *args, **kwds):
"""Run an RPC and verify that a specific JSONRPC exception code and message is raised.
Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException
and verifies that the error code and message are as expected. Throws AssertionError if
no JSONRPCException was returned or if the error code/message are not as expected.
Args:
code (int), optional: the error code returned by the RPC call (defined
in src/rpc/protocol.h). Set to None if checking the error code is not required.
message (string), optional: [a substring of] the error string returned by the
RPC call. Set to None if checking the error string is not required
fun (function): the function to call. This should be the name of an RPC.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
try:
fun(*args, **kwds)
except JSONRPCException as e:
# JSONRPCException was thrown as expected. Check the code and message values are correct.
if (code is not None) and (code != e.error["code"]):
raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
if (message is not None) and (message not in e.error['message']):
raise AssertionError("Expected substring not found:"+e.error['message'])
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find = False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find == True:
assert_equal(expected, { })
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find == True:
num_matched = num_matched+1
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0 and should_not_find != True:
raise AssertionError("No objects matched %s"%(str(to_match)))
if num_matched > 0 and should_not_find == True:
raise AssertionError("Objects were found %s"%(str(to_match)))
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
node.generate(int(0.5*count)+61)
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value/2)
outputs[addr2] = satoshi_round(send_value/2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransaction(raw_tx)["hex"]
txid = node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" #OP_RETURN OP_PUSH2 512 bytes
for i in range (512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = "81"
for k in range(128):
# add txout value
txouts = txouts + "0000000000000000"
# add length of script_pubkey
txouts = txouts + "fd0402"
# add script_pubkey
txouts = txouts + script_pubkey
return txouts
def create_tx(node, coinbase, to_address, amount):
inputs = [{ "txid" : coinbase, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
addr = node.getnewaddress()
txids = []
for _ in range(num):
t = utxos.pop()
inputs=[{ "txid" : t["txid"], "vout" : t["vout"]}]
outputs = {}
change = t['amount'] - fee
outputs[addr] = satoshi_round(change)
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransaction(newtx, None, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def mine_large_block(node, utxos=None):
# generate a 66k transaction,
# and 14 of them is close to the 1MB block limit
num = 14
txouts = gen_return_txouts()
utxos = utxos if utxos is not None else []
if len(utxos) < num:
utxos.clear()
utxos.extend(node.listunspent())
fee = 100 * node.getnetworkinfo()["relayfee"]
create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee)
node.generate(1)
def get_bip9_status(node, key):
info = node.getblockchaininfo()
return info['bip9_softforks'][key]
| [
"btcinfo@sdf.org"
] | btcinfo@sdf.org |
47bc8bcd02dd3777219c11a328028ebd9c5b01bd | dc5dcca6bab3205b62f2c3de90f256f2933a52aa | /dijkstra.py | 778944d73edd58e44dac6d456ed540d67031b69f | [] | no_license | Megatron8010/dijkstra | 4773f72bbe79b969ea688549d014bced53554a28 | bc4865ecd063632e0c74a60649036d406aae7d7c | refs/heads/master | 2020-03-09T06:23:10.630514 | 2018-04-08T12:16:33 | 2018-04-08T12:16:33 | 128,637,750 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,959 | py |
graph = {
'B': {'A': 5, 'D': 1, 'G': 2},
'A': {'B': 5, 'D': 3, 'E': 12, 'F' :5},
'D': {'B': 1, 'G': 1, 'E': 1, 'A': 3},
'G': {'B': 2, 'D': 1, 'C': 2},
'C': {'G': 2, 'E': 1, 'F': 16},
'E': {'A': 12, 'D': 1, 'C': 1, 'F': 2},
'F': {'A': 5, 'E': 2, 'C': 16}}
def dijkstra(graph,start,end,visited=[],distances={},predecessors={}):
""" calculates a shortest path tree routed in start
"""
# a few sanity checks
if start not in graph:
raise TypeError('The root of the shortest path tree cannot be found')
if end not in graph:
raise TypeError('The target of the shortest path cannot be found')
# ending condition
if start == end:
# We build the shortest path and display it
path=[]
pred=end
while pred != None:
path.append(pred)
pred=predecessors.get(pred,None)
print('shortest path: '+str(path)+" cost="+str(distances[end]))
else :
# if it is the initial run, initializes the cost
if not visited:
distances[start]=0
# visit the neighbors
for neighbor in graph[start] :
if neighbor not in visited:
new_distance = distances[start] + graph[start][neighbor]
if new_distance < distances.get(neighbor,float('inf')):
distances[neighbor] = new_distance
predecessors[neighbor] = start
# mark as visited
visited.append(start)
# now that all neighbors have been visited: recurse
# select the non visited node with lowest distance 'x'
# run Dijskstra with start='x'
unvisited={}
for k in graph:
if k not in visited:
unvisited[k] = distances.get(k,float('inf'))
x=min(unvisited, key=unvisited.get)
dijkstra(graph,x,end,visited,distances,predecessors)
| [
"aditya.singh8010@gmail.com"
] | aditya.singh8010@gmail.com |
b74c791aeb0e95343badaa728ecd212a3b2023e8 | 9cc39b5cac2e3ba326f913af98867d60357d120e | /osdf/utils/cipherUtils.py | 169f1a1a96d51da284a09988b28b414ecc3d5c97 | [
"Apache-2.0"
] | permissive | onap/optf-osdf | 148deee7b7968bbe8bdc7b82b911f382fab552c8 | 61c838971a4a962fc74d2187be13f2b7561fd627 | refs/heads/master | 2023-07-24T21:29:45.091918 | 2023-07-03T12:39:06 | 2023-07-03T12:46:24 | 115,063,339 | 4 | 0 | NOASSERTION | 2021-06-29T18:41:53 | 2017-12-22T01:34:17 | Python | UTF-8 | Python | false | false | 2,017 | py | #
# -------------------------------------------------------------------------
# Copyright (C) 2020 Wipro Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -------------------------------------------------------------------------
from Crypto.Cipher import AES
from osdf.config.base import osdf_config
from Crypto.Util.Padding import unpad
from Crypto.Util.Padding import pad
class AESCipher(object):
__instance = None
@staticmethod
def get_instance(key = None):
if AESCipher.__instance is None:
print("Creating the singleton instance")
AESCipher(key)
return AESCipher.__instance
def __init__(self, key=None):
if AESCipher.__instance is not None:
raise Exception("This class is a singleton!")
else:
AESCipher.__instance = self
self.bs = 32
if key is None:
key = osdf_config.deployment["appkey"]
self.key = key.encode()
def encrypt(self, data):
data = data.encode()
cipher = AES.new(self.key, AES.MODE_CBC)
ciphered_data = cipher.encrypt(pad(data, AES.block_size))
enc = (cipher.iv.hex())+(ciphered_data.hex())
return enc
def decrypt(self, enc):
iv = bytes.fromhex(enc[:32])
ciphered_data = bytes.fromhex(enc[32:])
cipher = AES.new(self.key, AES.MODE_CBC, iv=iv)
original_data = unpad(cipher.decrypt(ciphered_data), AES.block_size).decode()
return original_data
| [
"dhebeha.mj71@wipro.com"
] | dhebeha.mj71@wipro.com |
46b946382e840f6f5e41cde85dc11a51b39a6a5e | 8283ed41a009b07a3b0ac20c5771f10dc9e97164 | /100_exercicios_guanabara/Concluídos/exe85.py | 7e366587c12ad2b9e5a51f3b50853085f791b6b6 | [] | no_license | carlosalbertoestrela/CursoEmVideoPython | 77c7f22a2e97f0b319f903d1e14897359084c50c | 8acf95e464853c3ee3fb6aaf8281e5b8aec2fd25 | refs/heads/master | 2022-12-17T12:20:37.690105 | 2020-09-14T16:42:40 | 2020-09-14T16:42:40 | 286,602,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 560 | py | """
Exercício Python 085:
Crie um programa onde o usuário possa digitar sete valores numéricos e cadastre-os em uma lista única que mantenha
separados os valores pares e ímpares. No final, mostre os valores pares e ímpares em ordem crescente.
"""
lista = [[], []]
for i in range(1,8):
n = int(input(f'Digite o {i}º número: '))
if n % 2 == 0:
lista[0].append(n)
else:
lista[1].append(n)
print('-='*30)
print(f'Os números impáres que você digitou foram: {sorted(lista[1])}')
print(f'E os pares foram: {sorted(lista[0])}!')
| [
"carlos.alberto-estrela@hotmail.com"
] | carlos.alberto-estrela@hotmail.com |
585b630d916df8728e69377596489e4d48f1320c | 9fb41006fd391b2beeea64836e3c289fea36d05c | /myapi/config.py | bc461c00a2d21fe5aab14142e4fb737591d54bae | [] | no_license | harjiwiga/restful_api | 6a5527d2dd3577dad4bbe520d92cdc0a6a6073e1 | 950d124d488cc34850c4f40887e049f2418a9de4 | refs/heads/master | 2020-04-14T09:19:56.804407 | 2019-02-03T19:14:50 | 2019-02-03T19:14:50 | 163,757,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | import os
basedir = os.path.abspath(os.path.dirname(__file__))
"""Default configuration
Use env var to override
"""
DEBUG = True
SECRET_KEY = "changeme"
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL')# or 'sqlite:///' + os.path.join(basedir, 'myapi.db')
# "sqlite:////tmp/myapi.db"
SQLALCHEMY_TRACK_MODIFICATIONS = False
JWT_BLACKLIST_ENABLED = True
JWT_BLACKLIST_TOKEN_CHECKS = ['access', 'refresh']
CELERY_BROKER_URL = "amqp://guest:guest@localhost/"
CELERY_RESULT_BACKEND = "amqp://guest:guest@localhost/"
LOG_TO_STDOUT = os.environ.get('LOG_TO_STDOUT') or True
ALLOW_NO_VALUE = True | [
"harjiwigaasmoko@localhost.localdomain"
] | harjiwigaasmoko@localhost.localdomain |
1c2e6fc89feaef8003cf91c6e3db19398008dde5 | fa5070498f31026b662053d1d5d91282cb1f68b9 | /test01/tapp/views.py | 36f742b4db34a7c1ddbfc041c48b401998010c04 | [
"Apache-2.0"
] | permissive | jinguangzhu/the_first_python | f074c4943028421f96285a2f772e7ccf102248e5 | d9d035b44652a4cd6ecd1834dd9930d1c78bf360 | refs/heads/master | 2020-03-19T14:16:35.860167 | 2018-06-19T13:58:25 | 2018-06-19T13:58:25 | 136,615,947 | 0 | 3 | Apache-2.0 | 2018-06-19T13:31:00 | 2018-06-08T12:27:09 | Python | UTF-8 | Python | false | false | 270 | py | from django.shortcuts import render
from tapp.models import *
# Create your views here.
def my_student(req):
student = Student.objects.all()
return render(req,"student.html",context={"student":student})
def first(req):
return render(req,"hellodjango.html") | [
"ubuntu@localhost.localdomain"
] | ubuntu@localhost.localdomain |
717f391d6f42eb4573a62df0e77f06f1bca837f6 | c4b618ae721abc13862c617f91c6ccf0f86fc01b | /neutron/plugins/ibm/common/exceptions.py | d2e5e7ed8e5f1e03a4a01fac73cf5f91012b4ce8 | [
"Apache-2.0"
] | permissive | virtualopensystems/neutron | 2c3938375d02e3b80a0a32640573e3ed0dffa11d | 067acd95ab6042ca5d123342abd420a2a938acd2 | refs/heads/master | 2020-07-02T03:22:36.448097 | 2019-04-18T09:46:34 | 2019-04-18T09:46:34 | 22,715,796 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,008 | py | # Copyright 2014 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mohammad Banikazemi, IBM Corp.
from neutron.common import exceptions
class SdnveException(exceptions.NeutronException):
message = _("An unexpected error occurred in the SDN-VE Plugin. "
"Here is the error message: %(msg)s")
class BadInputException(exceptions.BadRequest):
message = _("The input does not contain nececessary info: %(msg)s")
| [
"mb@us.ibm.com"
] | mb@us.ibm.com |
4031931c37ca21b115f5abfba4f5ad8ca4b791a6 | aab9a5082ae8875821f7c15fdaa3c43013f0a906 | /ur5_ws/src/pick_place.py | 76928ffadda8279fd2044b827fff72c319701153 | [] | no_license | Raouf-sawas/Gazebo | 9255e65a908067f40937bc261b63d58bfcca8a0c | a1310c32c430c64bd921c545c4a2a725490b6128 | refs/heads/main | 2023-03-15T09:18:12.592359 | 2021-03-04T18:43:36 | 2021-03-04T18:43:36 | 302,495,661 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,926 | py | #!/usr/bin/env python33
import sys
import rospy
import moveit_commander
import geometry_msgs.msg
import random
from trajectory_msgs.msg import JointTrajectory
from std_msgs.msg import Header
from trajectory_msgs.msg import JointTrajectoryPoint
import rospy
import argparse
import actionlib
import control_msgs.msg
def gripper_client(value):
# Create an action client
client = actionlib.SimpleActionClient(
'/gripper_controller/gripper_cmd', # namespace of the action topics
control_msgs.msg.GripperCommandAction # action type
)
# Wait until the action server has been started and is listening for goals
client.wait_for_server()
# Create a goal to send (to the action server)
goal = control_msgs.msg.GripperCommandGoal()
#goal.command.position = random.uniform(0.0,0.8) # From 0.0 to 0.8
goal.command.position = value
goal.command.max_effort = 6.0 # Do not limit the effort
print(goal.command.max_effort)
client.send_goal(goal)
client.wait_for_result()
return client.get_result()
moveit_commander.roscpp_initialize(sys.argv)
rospy.init_node('moveit_group_python_interface',anonymous=True)
robot = moveit_commander.RobotCommander()
arm_group = moveit_commander.MoveGroupCommander("arm")
arm_group.set_max_velocity_scaling_factor(1)
arm_group.set_max_acceleration_scaling_factor(0.01)
# Note this is not the speed of the end-effector point.
# group.setMaxVelocityScalingFactor(0.005);
# group.setMaxAccelerationScalingFactor(0.001);
pose_targe = geometry_msgs.msg.Pose()
pose_targe = geometry_msgs.msg.Pose()
pose_targe.orientation.w = -0.5
pose_targe.orientation.x = 0.5
pose_targe.orientation.y = -0.5
pose_targe.orientation.z = 0.5
#pose_targe.position.x = 0.4
pose_targe.position.x = 0.42111
pose_targe.position.y = 0.0
pose_targe.position.z = 0.515018
arm_group.set_pose_target(pose_targe)
plan1 =arm_group.go()
print("first")
gripper_client(0.0)
pose_targe.position.x = 0.4218
pose_targe.position.y = 0.0
pose_targe.position.z = 0.5
arm_group.set_pose_target(pose_targe)
plan1 =arm_group.go()
print("sec")
pose_targe.position.x = 0.4218
pose_targe.position.y = 0.0
pose_targe.position.z = 0.523
arm_group.set_pose_target(pose_targe)
plan1 =arm_group.go()
#pick done
#arm_group.set_max_acceleration_scaling_factor(0.001)
rospy.sleep(1)
pose_targe.position.x = 0.43
pose_targe.position.y = 0.0
pose_targe.position.z = 0.522
arm_group.set_pose_target(pose_targe)
plan1 =arm_group.go()
print("thirfd")
arm_group.set_max_acceleration_scaling_factor(0.2)
rospy.sleep(2)
pose_targe.position.x = 0.43
pose_targe.position.y = 0.0
pose_targe.position.z = 0.522
arm_group.set_pose_target(pose_targe)
plan1 =arm_group.go()
pose_targe.position.x = 0.46
pose_targe.position.y = 0.0
pose_targe.position.z = 0.522
arm_group.set_pose_target(pose_targe)
plan1 =arm_group.go()
print("fourth")
moveit_commander.roscpp_shutdown()
| [
"a.sawas.92@gmail.com"
] | a.sawas.92@gmail.com |
e19038d708892904b6a8cdebe1b6472274db5e99 | 24ec47fff2ee8916a5a0d9c2cd5d0188bd66c17b | /src/common/common.py | ec584fb0680448ade828ad6a004513ae898eca22 | [] | no_license | excitingmvr/flk_second_2 | 2eab91d5164bb8c229113dc113ccc4e14008c2c9 | 190ffd518ce2ec04edf600a4417ce7d44d09a108 | refs/heads/main | 2023-01-10T12:04:32.553775 | 2020-11-09T05:38:43 | 2020-11-09T05:38:43 | 311,227,614 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,999 | py | # -- coding: utf-8 --
from flask import request
from random import *
import datetime, os
from src.common.constants import Constants
from src.common.util import Util
from src.common.database import Database
from src.common.commonSql import CommonSql
dt = datetime.datetime.now()
constantsObj = Constants()
utilObj = Util()
databaseObj = Database()
commonSqlObj = CommonSql()
class Common():
def __init__(self, module):
self.moduleListUrl = "/" + module + "/list"
self.moduleFormUrl = "/" + module + "/form"
self.moduleViewUrl = "/" + module + "/view"
self.moduleListHtml = "/" + module + "/" + module + "List.html"
self.moduleFormHtml = "/" + module + "/" + module + "Form.html"
self.moduleViewHtml = "/" + module + "/" + module + "View.html"
def uploadFile(self, inputName, module, lastrowid, tablePrefix):
inputName = inputName
fileKindCd = inputName[-1:]
files = request.files.getlist(inputName)
i = 0
fileDefaultNy = 1
filePathFull = constantsObj.UPLOAD_FOLDER_FULL + "/" + module + "/" + inputName + "/" + str(dt.year) + "/" + str(dt.month) + "/" + str(dt.day) + "/" + str(dt.hour)
filePathForLink = constantsObj.UPLOAD_FOLDER_FOR_LINK + "/" + module + "/" + inputName + "/" + str(dt.year) + "/" + str(dt.month) + "/" + str(dt.day) + "/" + str(dt.hour)
if os.path.isdir(filePathFull):
pass
else:
os.makedirs(filePathFull)
for file in files:
if file.filename:
file.save(os.path.join(filePathFull, file.filename))
fileExtension = os.path.splitext(file.filename)[1]
fileNameSystem = str(randint(1000, 9999)) + str(dt.year)[0:2] + utilObj.setZerofill(str(dt.month)) + utilObj.setZerofill(str(dt.day)) + utilObj.setZerofill(str(dt.hour)) + str(dt.microsecond)
os.rename(filePathFull + "/" + file.filename, filePathFull + "/" + fileNameSystem + fileExtension)
if i > 0:
defaultNy = 0
else:
pass
dicInsertFile = {
"deftFileKindCd":fileKindCd,
"deftFileDefaultNy":fileDefaultNy,
"deftFilePath":filePathForLink,
"deftFileNameOriginal":file.filename,
"deftFileNameSystem":fileNameSystem,
"deftFileExtension":fileExtension[1:],
"deftFileSize":os.path.getsize(filePathFull + "/" + fileNameSystem + fileExtension),
"deftFileOrder":i,
"deftRegIp":request.environ["REMOTE_ADDR"],
"deftRegSeq":0,
"deftRegOffset":0,
"deftRegDatetime":datetime.datetime.today(),
"deftRegDeviceCd":utilObj.getDeviceCd(),
"deftModIp":request.environ["REMOTE_ADDR"],
"deftModSeq":0,
"deftModOffset":0,
"deftModDatetime":datetime.datetime.today(),
"deftModDeviceCd":utilObj.getDeviceCd(),
"deftSys":constantsObj.SYS_NUMBER,
"deftDelNy":0,
"deftUpperSeq":lastrowid
}
i = i + 1
commonSqlObj.uploadFile(tablePrefix, (dicInsertFile))
else:
pass
def getTotalFileSize(self, files):
total = 0
for file in files:
total = total + file["deftFileSize"]
return total
| [
"excitingmvr@gmail.com"
] | excitingmvr@gmail.com |
d3bb92b4fb27224e68766a3ebc2ca6285c54634f | b1bbbceca05fc36d010174a95c9d18892fe29448 | /excel_move/test/test_scanning_as.py | e6a11230b595fdc7e29e8891b25936282748b21c | [] | no_license | JimmyDaSilva/excel-util | 87801d12ca5e94ab746d03945b0c50e24a1b2cf3 | 985c4e8c3a8bef07ca32f582082e555e03876a3b | refs/heads/hydro | 2021-01-21T02:20:39.836788 | 2015-01-12T20:47:50 | 2015-01-12T20:47:50 | 39,127,250 | 0 | 1 | null | 2015-07-15T09:08:54 | 2015-07-15T09:08:54 | null | UTF-8 | Python | false | false | 902 | py | #! /usr/bin/python
import sys
import actionlib
import rospy
from excel_move.msg import ScanningAction, ScanningGoal
def main():
rospy.init_node('test_scanning_as')
act_goal = ScanningGoal()
# act_goal.good_bins = ["333", "555", "999"]
# act_goal.bad_bins = ["111", "777"]
# act_goal.good_bins = ["111", "555"]
# act_goal.bad_bins = ["333", "777", "999"]
act_goal.good_bins = ["333", "777", "999"]
act_goal.bad_bins = ["111", "555"]
print "Waiting for action server 'scan_parts' ..."
scanning_ac = actionlib.SimpleActionClient('scan_parts', ScanningAction)
scanning_ac.wait_for_server()
print "Found action server."
print "Sending action goal:", act_goal
outcome = scanning_ac.send_goal_and_wait(act_goal)
result = scanning_ac.get_result()
print "Outcome:", outcome
print "Result:", result
if __name__ == "__main__":
main()
| [
"kphawkins@gmail.com"
] | kphawkins@gmail.com |
79d9d6cab1424a8f758f9bc427220aa90cc5ea9a | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/bob/40dea17339b24a50970c59a9ab7f2661.py | 2b91b02b1c565cfce002da2177d0c653c3e0759a | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 365 | py | #
# Skeleton file for the Python "Bob" exercise.
#
def hey(phrase):
if not phrase.strip() == '':
if any(c.isalpha() for c in phrase) and not any(
c.islower() for c in phrase):
return 'Whoa, chill out!'
elif phrase.endswith('?'):
return 'Sure.'
return 'Whatever.'
return 'Fine. Be that way!'
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
6830b269b52a966fce72fbe2d08cf234965d0a86 | 24aa49a08c844762b5efee160d1f807aaabe2882 | /route.py | afe72e31c8108b7094ebed0332481f5f34801a5c | [] | no_license | Hezam20/flask_web_app | 516c49495a92f8249a50161993e337e389e2ff0f | 45eb7c8902ff714101b33af2219d487d97eaefce | refs/heads/master | 2023-01-25T05:37:20.717790 | 2020-11-22T16:05:43 | 2020-11-22T16:05:43 | 314,378,022 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,746 | py | from flask import Flask, render_template, request, redirect, flash, url_for, jsonify
from forms import RegistrationForm, LoginForm
import psycopg2
import os
from werkzeug.security import check_password_hash, generate_password_hash
app = Flask(__name__)
app.config['SECRET_KEY'] = '0e31e03058818ff1d3db6ec620545dcb'
conn = psycopg2.connect(dbname=os.getenv("dbname"), user=os.getenv("user"), port=os.getenv("port"), password=os.getenv('user_auth'))
cur = conn.cursor()
posts = [
{
'author': 'Lola ',
'title': 'Blog Post 1',
'content': 'First post content',
'date_posted': 'April 20, 2020'
},
{
'author': 'Ms piggy',
'title': 'Blog Post 2',
'content': 'Second post content',
'date_posted': 'April 21, 2018'
}
]
@app.route("/")
@app.route("/home")
def home(name=None):
return render_template("home.html", title='Home', posts=posts)
@app.route("/about")
def about(name=None):
return render_template("about.html", title="about")
@app.route("/register", methods=["GEt", "POST"])
def register():
form = RegistrationForm()
if request.method == "GET":
return render_template("register_form.html", title='Register', form=form)
# if form['password'] != form['confirm_password']:
# return render_template("register_form.html", error='confirm_password')
if form.validate_on_submit():
cur.execute("SELECT email FROM people WHERE email = %s", (form.email.data,))
is_email_exist = cur.fetchone()
if is_email_exist:
return render_template("register_form.html", form=form, is_email_exist=is_email_exist[0])
password = generate_password_hash(form.password.data)
cur.execute("INSERT INTO people (first_name, last_name, email, password, confirm_password) VALUES (%s, %s, %s, %s, %s)",
(form.first_name.data, form.last_name.data, form.email.data, form.password.data, form.confirm_password.data))
conn.commit()
cur.close()
conn.close()
flash(f"Account created for {form.first_name.data}!", "success")
return redirect("/login")
return render_template("register_form.html", form=form)
@app.route("/login", methods=["GET", "Post"])
def login():
form = LoginForm()
if request.method == "GET":
return render_template("login_form.html", title='Login', form=form)
cur.execute("SELECT * FROM people WHERE email = %s", (form.email.data,))
is_email_exist = cur.fetchone()
if not is_email_exist:
flash(f"This user does not exist!. Please create an accout!", "danger")
return render_template("login_form.html", title="Login", form=form)
return redirect("/home", title='Home', posts=posts) | [
"hezam.hezam20@gmail.com"
] | hezam.hezam20@gmail.com |
d38d4a1e39d4e23ccfdf9b20604cbc85ea00c538 | d198498188c4bbc3840da495e4124bb32f48a055 | /spark/EMRScripts/Dimensions/ProductCSVToParquet.py | 5caf9eff325888fe6c8f138c6624b81b406b18fb | [] | no_license | SrikarNimmagadda/datalake | b34354292246deedd06630a344d88430899ae360 | 35c9bccdbf2df67a9fd361362674023d3acc5632 | refs/heads/master | 2020-03-10T02:42:36.775143 | 2018-04-17T19:05:02 | 2018-04-17T19:05:02 | 129,143,433 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,932 | py | import sys
from pyspark.sql import SparkSession
from pyspark.sql.functions import year, from_unixtime, unix_timestamp, substring
import boto3
import os
import csv
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
class ProductCSVToParquet(object):
def __init__(self):
self.productRQ4FilePath = sys.argv[1]
self.couponsFilePath = sys.argv[2]
self.productIdentifierFilePath = sys.argv[3]
self.discoveryProductWorkingPath = sys.argv[4]
self.dataProcessingErrorPath = sys.argv[5] + '/discovery'
self.discoveryBucket = self.discoveryProductWorkingPath[self.discoveryProductWorkingPath.index('tb'):].split("/")[0]
self.productRQ4Name = self.discoveryProductWorkingPath[self.discoveryProductWorkingPath.index('tb'):].split("/")[1]
self.couponsName = self.couponsFilePath[self.couponsFilePath.index('tb'):].split("/")[1]
self.productIdentifierName = self.productIdentifierFilePath[self.productIdentifierFilePath.index('tb'):].split("/")[1]
self.workingName = self.discoveryProductWorkingPath[self.discoveryProductWorkingPath.index('tb'):].split("/")[2]
self.productRQ4OutputWorkingPath = 's3://' + self.discoveryBucket + '/' + self.productRQ4Name + '/Working'
self.couponsOutputWorkingPath = 's3://' + self.discoveryBucket + '/' + self.couponsName + '/Working'
self.productIdentifierOutputPartitionPath = 's3://' + self.discoveryBucket + '/' + self.productIdentifierName
self.productRQ4OutputPartitionPath = 's3://' + self.discoveryBucket + '/' + self.productRQ4Name
self.couponsOutputPartitionPath = 's3://' + self.discoveryBucket + '/' + self.couponsName
self.productIdentifierOutputWorkingPath = 's3://' + self.discoveryBucket + '/' + self.productIdentifierName + '/Working'
self.appName = self.__class__.__name__
self.sparkSession = SparkSession.builder.appName(self.appName).getOrCreate()
self.log4jLogger = self.sparkSession.sparkContext._jvm.org.apache.log4j
self.log = self.log4jLogger.LogManager.getLogger(self.appName)
self.s3 = boto3.resource('s3')
self.client = boto3.client('s3')
self.fileFormat = ".csv"
self.productRQ4FileColumnCount = 61
self.couponsFileColumnCount = 5
self.productIdentifierFileColumnCount = 4
self.productRQ4File, self.productRQ4Header = self.searchFile(self.productRQ4FilePath)
self.log.info(self.productRQ4File)
self.log.info("Product RQ4 Columns:" + ','.join(self.productRQ4Header))
self.couponsFile, self.couponsHeader = self.searchFile(self.couponsFilePath)
self.log.info(self.couponsFile)
self.log.info("Coupons Columns:" + ','.join(self.couponsHeader))
self.productIdentifierFile, self.productIdentifierHeader = self.searchFile(self.productIdentifierFilePath)
self.log.info(self.productIdentifierFile)
self.log.info("ProductIdentifier Columns:" + ','.join(self.productIdentifierHeader))
def isValidFormatInSource(self):
productRQ4FileName, productRQ4FileExtension = os.path.splitext(os.path.basename(self.productRQ4File))
couponsFileName, couponsFileExtension = os.path.splitext(os.path.basename(self.couponsFile))
productIdentifierFileName, productIdentifierFileExtension = os.path.splitext(os.path.basename(self.productIdentifierFile))
isValidProductRQ4Format = self.fileFormat in productRQ4FileExtension
isValidCouponsFormat = self.fileFormat in couponsFileExtension
isValidProductIdentifierFormat = self.fileFormat in productIdentifierFileExtension
if all([isValidProductRQ4Format, isValidCouponsFormat, isValidProductIdentifierFormat]):
return True
return False
def isValidSchemaInSource(self):
self.log.info("Product RQ4 column count " + str(self.productRQ4Header.__len__()))
self.log.info("Coupons column count " + str(self.couponsHeader.__len__()))
self.log.info("ProductIdentifier column count " + str(self.productIdentifierHeader.__len__()))
isValidProductRQ4Schema = False
if self.productRQ4Header.__len__() >= self.productRQ4FileColumnCount:
isValidProductRQ4Schema = True
isValidCouponsSchema = False
if self.couponsHeader.__len__() >= self.couponsFileColumnCount:
isValidCouponsSchema = True
isValidProductIdentifierSchema = False
if self.productIdentifierHeader.__len__() >= self.productIdentifierFileColumnCount:
isValidProductIdentifierSchema = True
self.log.info("isValidProductRQ4Schema " + isValidProductRQ4Schema.__str__() + "isValidCouponsSchema " +
isValidCouponsSchema.__str__() + "isValidProductIdentifierSchema " + isValidProductIdentifierSchema.__str__())
if all([isValidProductRQ4Schema, isValidCouponsSchema, isValidProductIdentifierSchema]):
return True
return False
def copyFile(self, strS3url, newS3PathURL):
newBucketWithPath = urlparse(newS3PathURL)
newBucket = newBucketWithPath.netloc
newPath = newBucketWithPath.path.lstrip('/')
bucketWithPath = urlparse(strS3url)
bucket = bucketWithPath.netloc
originalName = bucketWithPath.path.lstrip('/')
self.client.copy_object(Bucket=newBucket, CopySource=bucket + '/' + originalName, Key=newPath)
self.log.info('File name ' + originalName + ' within path ' + bucket + " copied to new path " + newS3PathURL)
def searchFile(self, strS3url):
bucketWithPath = urlparse(strS3url)
bucket = bucketWithPath.netloc
path = bucketWithPath.path.lstrip('/')
mybucket = self.s3.Bucket(bucket)
objs = mybucket.objects.filter(Prefix=path)
filePath = ''
fileName = ''
file = ''
body = ''
header = ''
for s3Object in objs:
path, filename = os.path.split(s3Object.key)
filePath = path
fileName = filename
file = "s3://" + bucket + "/" + s3Object.key
body = s3Object.get()['Body'].read()
self.log.info('File name ' + fileName + ' exists in path ' + filePath)
for i, line in enumerate(csv.reader(body.splitlines(), delimiter=',', quotechar='"')):
if i == 0:
header = line
return file, header
def loadParquet(self):
self.log.info('Exception Handling starts')
validSourceFormat = self.isValidFormatInSource()
if not validSourceFormat:
self.log.error("Product Source files not in csv format.")
validSourceSchema = self.isValidSchemaInSource()
if not validSourceSchema:
self.log.error("Product Source schema does not have all the required columns.")
if not validSourceFormat or not validSourceSchema:
self.log.info("Copy the source files to data processing error path and return.")
self.copyFile(self.productRQ4File, self.dataProcessingErrorPath + '/' + self.productRQ4Name +
self.fileFormat)
self.copyFile(self.couponsFile, self.dataProcessingErrorPath + '/' + self.couponsName + self.fileFormat)
self.copyFile(self.productIdentifierFile, self.dataProcessingErrorPath + '/' + self.productIdentifierName + self.fileFormat)
return
self.log.info('Source format and schema validation successful.')
self.log.info('Reading the input parquet file')
dfProduct = self.sparkSession.read.format("com.databricks.spark.csv"). \
option("encoding", "UTF-8"). \
option("ignoreLeadingWhiteSpace", "true"). \
option("ignoreTrailingWhiteSpace", "true"). \
option("header", "true"). \
option("treatEmptyValuesAsNulls", "true"). \
option("inferSchema", "true"). \
option("escape", '"'). \
option("quote", "\""). \
option("multiLine", "true"). \
load(self.productRQ4FilePath).toDF(*self.productRQ4Header)
dfProductIden = self.sparkSession.read.format("com.databricks.spark.csv"). \
option("encoding", "UTF-8"). \
option("ignoreLeadingWhiteSpace", "true"). \
option("ignoreTrailingWhiteSpace", "true"). \
option("header", "true"). \
option("treatEmptyValuesAsNulls", "true"). \
option("inferSchema", "true"). \
option("escape", '"'). \
option("quote", "\""). \
option("multiLine", "true"). \
load(self.productIdentifierFilePath).toDF(*self.productIdentifierHeader)
dfCoupons = self.sparkSession.read.format("com.databricks.spark.csv"). \
option("encoding", "UTF-8"). \
option("ignoreLeadingWhiteSpace", "true"). \
option("ignoreTrailingWhiteSpace", "true"). \
option("header", "true"). \
option("treatEmptyValuesAsNulls", "true"). \
option("inferSchema", "true"). \
option("escape", '"'). \
option("quote", "\""). \
option("multiLine", "true"). \
load(self.couponsFilePath).toDF(*self.couponsHeader)
dfProduct.coalesce(1).write.mode('overwrite').format('parquet').save(self.productRQ4OutputWorkingPath)
dfCoupons.coalesce(1).write.mode('overwrite').format('parquet').save(self.couponsOutputWorkingPath)
dfProductIden.coalesce(1).write.mode('overwrite').format('parquet').save(self.productIdentifierOutputWorkingPath)
dfProduct.coalesce(1).withColumn("year", year(from_unixtime(unix_timestamp()))).\
withColumn("month", substring(from_unixtime(unix_timestamp()), 6, 2)).\
write.mode('append').partitionBy('year', 'month').format('parquet').\
save(self.productRQ4OutputPartitionPath)
dfCoupons.coalesce(1).withColumn("year", year(from_unixtime(unix_timestamp()))).\
withColumn("month", substring(from_unixtime(unix_timestamp()), 6, 2)).\
write.mode('append').partitionBy('year', 'month').format('parquet').\
save(self.couponsOutputPartitionPath)
dfProductIden.coalesce(1).withColumn("year", year(from_unixtime(unix_timestamp()))).\
withColumn("month", substring(from_unixtime(unix_timestamp()), 6, 2)).\
write.mode('append').partitionBy('year', 'month').format('parquet').\
save(self.productIdentifierOutputPartitionPath)
self.sparkSession.stop()
if __name__ == "__main__":
ProductCSVToParquet().loadParquet()
| [
"noreply@github.com"
] | SrikarNimmagadda.noreply@github.com |
2246a9d2f683dbff192646de38c732a7d8245055 | c7d5fdbf4cbc39da9639a6a6867297b244ac8cef | /Deep_Learning_A_Z/Supervised/CNN/my_cnn.py | df32e733be97d35f344a9c9c0fdcda367923a046 | [] | no_license | mehmetgoren/machine_learning | d2e924c44387be6b673eccc0e1cc9ce10d206374 | d9779deb8a3d53357e24679881d774bab9a19903 | refs/heads/master | 2020-03-24T15:23:56.781443 | 2018-11-07T06:35:49 | 2018-11-07T06:35:49 | 142,788,355 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,781 | py | """
CNN
"""
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPool2D
from keras.layers import Flatten
from keras.layers import Dense
classifier = Sequential()
#step 1 Convolution
classifier.add(Conv2D(64,(3,3), input_shape=(128,128,3), activation="relu"))
#step 2 Pooling
classifier.add(MaxPool2D(pool_size=(2,2), data_format="channels_first"))
#lets add new conv layet to improve accuracy
classifier.add(Conv2D(64,(3,3), input_shape=(128,128,3), activation="relu"))
classifier.add(MaxPool2D(pool_size=(2,2), data_format="channels_first"))
classifier.add(Conv2D(64,(3,3), input_shape=(128,128,3), activation="relu"))
classifier.add(MaxPool2D(pool_size=(2,2), data_format="channels_first"))
#step 3 Flattining (a big single vector)
classifier.add(Flatten())
#step 4 Full Connection
classifier.add(Dense(units=128, activation="relu"))
classifier.add(Dense(units=64, activation="relu"))
classifier.add(Dense(units=32, activation="relu"))
classifier.add(Dense(units=16, activation="relu"))
classifier.add(Dense(units=8, activation="relu"))
classifier.add(Dense(units=1, activation="sigmoid"))
#Compiling the CNN
classifier.compile(optimizer="adam", loss='binary_crossentropy', metrics=['accuracy'])
#to avoid overfitting
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True)
test_datagen = ImageDataGenerator(rescale = 1./255)
training_set = train_datagen.flow_from_directory('dataset/training_set',
target_size = (128, 128),
batch_size = 32,
class_mode = 'binary')
test_set = test_datagen.flow_from_directory('dataset/test_set',
target_size = (128, 128),
batch_size = 32,
class_mode = 'binary')
classifier.fit_generator(training_set,
steps_per_epoch = 8000,
epochs = 1,
validation_data = test_set,
validation_steps = 2000)
import numpy as np
from keras.preprocessing import image
test_image = image.load_img('dataset/single_prediction/cat_or_dog_1.jpg', target_size = (64, 64))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)
result = classifier.predict(test_image)
training_set.class_indices
if result[0][0] == 1:
prediction = 'dog'
else:
prediction = 'cat' | [
"mehmetgoren@outlook.com"
] | mehmetgoren@outlook.com |
2adb684b2926de62dd1a877cb43f45be5c107ae7 | 7e7a2872907d3dd7a8139278b321c4a6a7129e87 | /go_fluent_app/migrations/0007_remove_result_user.py | 031d05254cf5f7425bebe4cd6f13c21e8641315f | [] | no_license | xmaanall/go-fluent-psi | 47bdfbc39e56c66fd42486cbd589e2227da57e30 | 67ce5a0579101b073b0ea35085ee9e8b6944bccb | refs/heads/master | 2023-05-28T23:08:19.440928 | 2021-06-17T07:04:23 | 2021-06-17T07:04:23 | 377,732,347 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | # Generated by Django 3.2.2 on 2021-05-18 05:50
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('go_fluent_app', '0006_alter_result_user'),
]
operations = [
migrations.RemoveField(
model_name='result',
name='user',
),
]
| [
"xmaanall@hotmail.com"
] | xmaanall@hotmail.com |
454c1c3281c38e34c9c619121d72d13f1a258149 | f0b26c711fdf1f230c0937f3b3999a6549503937 | /sandbox/.old0/sockets_tcp_server.py | 7465f77611550f264e91437eda6f4e2d356de69a | [] | no_license | ntwong0/sjsu-cmpe-2014-2015-07-card | fb19351d266f8351446cc926dc9e64a34692fd2b | eda08e388929ae37e5c3646287b8f711eb543369 | refs/heads/master | 2021-01-25T09:00:19.800627 | 2015-05-06T05:11:36 | 2015-05-06T05:11:36 | 34,148,029 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 911 | py | import socket
import sys
import time
#from _thread import *
from thread import *
host = ''
port = 5555
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
s.bind((host, port))
except socket.error as e:
print(str(e))
connlist = []
s.listen(5)
print('Waiting for a connection.')
def threaded_client(connlist,conn):
#conn.send(str.encode('Welcome, type your info\n'))
conn.send('Welcome, type your info\n')
message = 0
while True:
data = conn.recv(2048)
#reply = 'Server output: '+ data.decode('utf-8')
#reply = 'Server output: '+ data
if not data:
break
conn.sendall(str(message))
print "hello"
message = message + 1
time.sleep(5)
conn.close()
while True:
conn, addr = s.accept()
print('connected to: '+addr[0]+':'+str(addr[1]))
connlist.append(addr)
start_new_thread(threaded_client,(connlist,conn,))
| [
"ntwong0@gmail.com"
] | ntwong0@gmail.com |
556857a334ad7fac92fe1a66d4fa579c83a1adbb | e62127fe1f0269844e7ee1c7dcf3ac93644f9733 | /sudoku.py | 9871a3b8446f83ce46b5e96bc681250794d4482f | [] | no_license | mlanghinrichs/sudoku | 2220c35f8154f0a97ad409bafe4f089f3cf0841d | b1dbb8bde699897e1e829f338c5314e310b5d789 | refs/heads/master | 2020-04-09T02:04:25.266964 | 2018-12-13T20:41:25 | 2018-12-13T20:41:25 | 159,927,615 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,010 | py | import argparse
class Sudoku():
def __init__(self, puzzle):
# Takes a list of row-lists containing 9 ints from 1-9,
# or 0 for empty cells
self.raw = puzzle
def __getitem__(self, key):
# Allow for self[r, c] indexing
try:
a, b = key
return self.raw[a][b]
except IndexError:
raise IndexError(f"Nothing exists at {a}, {b}")
except TypeError:
raise TypeError("Sudoku indexing requires int, int coordinates")
def __setitem__(self, key, value):
# Allow for self[r, c] index setting
try:
a, b = key
self.raw[a][b] = value
except IndexError:
raise IndexError(f"{a}, {b} is out of Sudoku bounds")
except TypeError:
raise TypeError("Sudoku indexing requires int, int coordinates")
def __iter__(self):
"""Yields each cell in Sudoku as a dict with val, r(ow), and c(ol)."""
for r in range(9):
for c in range(9):
yield {"val": self.raw[r][c], "r": r, "c": c}
def __len__(self):
"""Return the number of unfilled cells in the puzzle."""
length = 0
for cell in self:
if cell["val"] > 0:
length += 1
return length
def __str__(self):
string = ""
# Count which # cell you're on to track where to add square
i = 0
for cell in self:
if cell["val"] != 0:
string += str(cell['val'])
else:
# Print '-' for a blank cell
string += "-"
i += 1
# Check for being at the end of column 3 or 6
if i%9 == 3 or i%9 == 6:
string += "|"
# If not at the end of a line, add " " to pad #s
elif i % 9:
string += " "
# If at the end of the line but not the end of the puzzle, add \n
elif i % 81 and not i % 9:
string += "\n"
# At end of row 3 and 6, add horizontal bar
if i == 27 or i == 54:
string += "-----------------\n"
return string
def percent_done(self):
"""Return % completion rounded to 2 decimal points."""
return round((100*len(self)) / 81, 2)
def full(self):
"""Return True if all cells are full, False if not."""
return len(self) == 81
def validate(self):
"""Return whether the sudoku values appear to be valid."""
valid = True
# Check for non-0 duplicates in col_ and row_
for i in range(9):
col_ = [val for val in self.column(i) if val != 0]
row_ = [val for val in self.row(i) if val != 0]
if len(col_) != len(set(col_)) or len(row_) != len(set(row_)):
valid = False
# Check for non-0 duplicates in sqr_
for sqx in (0, 3, 6):
for sqy in (0, 3, 6):
sqr_ = [val for val in self.square(sqx, sqy) if val != 0]
if len(sqr_) != len(set(sqr_)):
valid = False
return valid
def column(self, col_):
"""Iterate over col_'s contents."""
for row in self.raw:
yield row[col_]
def others_in_column(self, row_, col_):
"""Iterate over col_'s contents except for row_."""
for r in range(9):
if r != row_:
yield self[r, col_]
def row(self, row_):
"""Iterate over row_'s contents."""
for cell_val in self.raw[row_]:
yield cell_val
def others_in_row(self, row_, col_):
"""Iterate over row_'s contents except for col_."""
for c in range(9):
if c != col_:
yield self[row_, c]
def square(self, row_, col_):
"""Iterate over the square containing row_, col_."""
for r in range(9):
for c in range(9):
if r//3 == row_//3 and c//3 == col_//3:
yield self[r, c]
def others_in_square(self, row_, col_):
"""Iterate over the square containing row_, col_ except for that cell."""
for r in range(9):
for c in range(9):
# if it's in the same square and not in the same cell...
if (r//3 == row_//3 and c//3 == col_//3
and not (r == row_ and c == col_)):
yield self[r, c]
def possibles(self, row_, col_, none_if_full=True):
"""Return set of the possible values for self[row_, col_]."""
if self[row_, col_] and none_if_full:
return set()
else:
poss = set([i+1 for i in range(9)])
for val in self.others_in_column(row_, col_):
poss.discard(val)
for val in self.others_in_row(row_, col_):
poss.discard(val)
for val in self.others_in_square(row_, col_):
poss.discard(val)
return poss
def other_row_possibles(self, row_, col_):
"""Return set of possible values for other cells in row."""
orp = set()
for c in range(9):
if c != col_:
orp = orp | self.possibles(row_, c)
return orp
def other_column_possibles(self, row_, col_):
"""Return set of possible values for other cells in column."""
ocp = set()
for r in range(9):
if r != row_:
ocp = ocp | self.possibles(r, col_)
return ocp
def other_square_possibles(self, row_, col_):
"""Return set of possible values for other cells in square."""
osp = set()
for r in range(9):
for c in range(9):
if row_//3 == r//3 and col_//3 == c//3 and (r, c) != (row_, col_):
osp = osp | self.possibles(r, c)
return osp
def fill_possibles(self, verbose=False):
"""Fill cells if they have only one possible value and return full()."""
if verbose: print("running fill_possibles()")
for cell in self:
r = cell["r"]
c = cell["c"]
poss = self.possibles(r, c)
if len(poss) == 1:
val = poss.pop()
self[r, c] = val
if verbose: print(f"({r}, {c}) -> {val}")
return self.full()
def only_possibles(self, verbose=False):
"""Fill cell if it has a unique possibility, return full()."""
if verbose: print("running only_possibles()")
for cell in self:
r, c = cell["r"], cell["c"]
check = self.possibles(r, c)
if not check:
continue
for val in range(1, 10):
if (val in check and
(not val in self.other_row_possibles(r, c)
or not val in self.other_column_possibles(r, c)
or not val in self.other_square_possibles(r, c))):
if verbose: print(f"({r}, {c}) -> {val}")
self[r, c] = val
return self.full()
def guess_dicts(self):
"""Return list of dicts containing empty cells and their possibilities."""
out = []
for cell in self:
r, c = cell["r"], cell["c"]
if cell["val"] == 0:
out.append({"val": cell["val"],
"r": r,
"c": c,
"poss": self.possibles(r, c)
})
return out
def guess(self, verbose=False, *dicts):
"""Recursively guess and validate options from a set of cell dicts."""
# given {val, r, c, poss}
if not dicts:
return True
for cell in dicts:
r, c = cell["r"], cell["c"]
for p in cell["poss"]:
self[r, c] = p
if verbose: print(f"Guessing ({r}, {c}) -> {p}")
if self.validate() and self.guess(verbose, *dicts[1:]):
return True
elif not self.validate():
if verbose: print(f"{p} was wrong")
continue
else:
if verbose:
print("Valid, but all sub-guesses wrong - retreating")
pass
self[r, c] = 0
return False
def solve_funcs(self, verbose=False, *args):
"""Recur args until none of them change self.raw, return full()."""
current = len(self)
while not args[0](verbose):
if current == len(self) and len(args) > 1:
self.solve_funcs(verbose, *args[1:])
if current == len(self):
return self.full()
current = len(self)
return self.full()
def solve(self, verbose=False, guess_last=True):
"""Call solve_funcs() with solve algorithms and return solution."""
if self.solve_funcs(verbose, self.fill_possibles, self.only_possibles):
print("Puzzle complete!")
else:
print("Incomplete.")
if guess_last:
self.guess(verbose, *self.guess_dicts())
print(self)
return self
def solve_by_guessing(self, verbose=False):
"""Run guess() until the puzzle is solved, then return self."""
self.guess(verbose, *self.guess_dicts())
print(self)
return self
@classmethod
def process_str(cls, path, src=False):
"""Create a Sudoku from text file located at (path) or in ./src/(path)"""
if src == True:
name = f"./src/{path}"
else:
name = path
with open(name, "r") as f:
source = f.read().split("\n")
# Check len(line) to not accidentally import blank lines
source = [list(map(int, list(line)))
for line in source if len(line) > 0]
return cls(source)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("path", help="Path to the Sudoku to solve; use --src for included puzzles")
group = parser.add_mutually_exclusive_group()
group.add_argument("-g", "--guess", help="Solve only by guessing",
action="store_true")
group.add_argument("-a", "--algorithmic", help="Solve only by algorithm",
action="store_true")
parser.add_argument("-v", "--verbose", help="Log solver actions",
action="store_true")
parser.add_argument("-s", "--src", action="store_true",
help="Pull named file from src instead of specifying a path")
args = parser.parse_args()
to_do = Sudoku.process_str(args.path, args.src)
if args.guess:
to_do.solve_by_guessing(args.verbose)
elif args.algorithmic:
to_do.solve(args.verbose, guess_last=False)
else:
to_do.solve(args.verbose)
if __name__ == "__main__":
main()
| [
"mlanghinrichs@gmail.com"
] | mlanghinrichs@gmail.com |
e445ccceccf4ae3a1c1d7ba154ec46249a0dd4e5 | f24a567ca7caf6188dd3fe37696aa5ad55530bde | /writeStory.py | dba073746ed6d04c07f0f827ae6b20058df1c34a | [] | no_license | chris-langfield/MessengerMarkov | b74240554fe098bc0459726c0c8ff3b36f7054ee | 76a9960d24ab499a2ce0fac8b2130ee33613c3b3 | refs/heads/master | 2021-06-07T04:51:22.060206 | 2021-05-28T15:12:46 | 2021-05-28T15:12:46 | 149,949,604 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,642 | py | import string
import random
import operator
FILENAME = "messages.txt"
NUM_WORDS = 500
def contentsAsStringList(FILENAME):
f = open(FILENAME,"r", encoding = "utf-8")
contents = f.read()
contentsList = contents.split()
f.close()
return contentsList
def nextWordProbabilities(word, FILENAME):
f = open(FILENAME,"r", encoding = "utf-8")
contents = f.read()
contentsList = contents.split()
totalInstances = 0
nextWordInstances = {}
for i in range(len(contentsList)):
if contentsList[i].lower() == word.lower():
totalInstances += 1
if (contentsList[i+1].lower() not in nextWordInstances):
nextWordInstances[contentsList[i+1].lower()] = 1
else:
nextWordInstances[contentsList[i+1].lower()] += 1
f.close()
probabilities = {}
#print("Total instances of " + word + ": " + str(totalInstances))
for entry in nextWordInstances:
probabilities[entry] = nextWordInstances[entry] * (100.0 / totalInstances)
sortedProbabilities = sorted(probabilities.items(), key=operator.itemgetter(1))
return sortedProbabilities
def getNextWord(word, FILENAME):
sortedProbs = nextWordProbabilities(word, FILENAME)
return sortedProbs[-1][0]
contents = contentsAsStringList
startingWord = input("Enter starting word: ")
word = startingWord
story = ""
k = 0
while(True):
if (k == NUM_WORDS):
break
story += word
story += " "
if not nextWordProbabilities(word, FILENAME):
word = random.choice(contents)
else:
word = getNextWord(word, FILENAME)
k += 1
print(story)
| [
"chris@Chriss-MBP.fios-router.home"
] | chris@Chriss-MBP.fios-router.home |
07a682cfee239063d69eebd3dc3dfd0b5bb9833f | 9d6f76fc4c5f56582628757afd8205e8e0c5909d | /python.py | 1d511752dad9e2cae5bc320508693dbc13c38629 | [] | no_license | leyla-demir/sprint-5_letter_in_string | 132375908e377cb05164671700b954919e5a9ebe | 776b2960647d2b7488edbbf2e9eff639e8f70f8c | refs/heads/main | 2023-02-03T19:16:09.824820 | 2020-12-15T07:39:34 | 2020-12-15T07:39:34 | 321,584,681 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 205 | py | string = input('Enter A String : ')
letter = input('Enter A Letter : ')
count = string.lower().count(letter.lower())
print(f'{string} kelimesinin içerisinde {letter} kelimesi {count} defa geçmektedir') | [
"ladylilith.139@gmail.com"
] | ladylilith.139@gmail.com |
84c6065c23e583682f49658ed98aca5e051d5d52 | 3b94bc467617a0272a80917abee39f37acd81a8b | /rseqflow2/ExpressionEstimation/SplitByChromosome_for_transcriptomeSamFile.py | 64fcc278e1aacad46a1e275e34f880fb34c65e76 | [] | no_license | falconer502/RseqFlow | b5f47ca2371b43798bf61be1de1d000010a0fde5 | fcb94a6f4ae2e01b9759292a8f85d25503d7f99f | refs/heads/master | 2021-01-15T10:24:57.343473 | 2014-09-23T21:36:51 | 2014-09-23T21:36:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,258 | py | #!/usr/bin/python
'''
Created on 2012-08-19
@author: linliu
Modified by J.Herstein 2013-04-01
'''
import os
import sys
import optparse
import string
import re
prog_base = os.path.split(sys.argv[0])[1]
parser = optparse.OptionParser()
parser.add_option("-i", "--input-file", action = "store", type = "string", dest = "input_sam_file",
help = "input file to be split (sam format)")
parser.add_option("-p", "--prefix-output", action = "store", type = "string", dest = "output_prefix",
help = "prefix of output files")
parser.add_option("-o", "--output-chrList", action = "store", type = "string", dest = "output_chromosome_list",
help = "chromosome list output file")
(options, args) = parser.parse_args()
if (options.input_sam_file is None or
options.output_prefix is None or
options.output_chromosome_list is None):
print prog_base + ": error: missing required command-line arguments"
parser.print_help()
sys.exit(1)
fname_input=options.input_sam_file
fname_output=options.output_chromosome_list
prefix_output=options.output_prefix
#JSH 2013-08-26
############ create initial empty files (required for PEGASUS workflow) #######
complete_chr_list=[]
# range endpoint is never included so use 23 to get 22 chromosomes.
for v in range(1,23):
complete_chr_list.append("chr"+str(v))
# don't know if gtf will have chrM or chrMT, so including both
complete_chr_list.append("chrX")
complete_chr_list.append("chrY")
complete_chr_list.append("chrM")
complete_chr_list.append("chrMT")
for v in range(0,len(complete_chr_list)):
open(prefix_output+"_"+complete_chr_list[v]+"_alignment.sam", 'w').close()
bk_input=open(fname_input)
#sh_input=bk_input.readlines()
#row_number_input=len(sh_input)
#bk_input.close()
############split input file##############
#chr_fileLine={}
chr_fileLine=[]
chr_out={}
samline=0
chrlist_output=open(fname_output, 'w')
for v in bk_input.xreadlines():
samline+=1
temp=v[0:-1].split('\t')
# temp1=temp[2]
# if (temp1 is not "*"):
# temp2=temp1.split('_',2)
# temp3=temp2[2].split('=',1)
# temp4=temp3[1].split(':',1)
# chromosome=temp4[0]
#JSH
temp1=re.search(r'(chr.*?):',temp[2])
if temp1:
chromosome=temp1.group(1)
if (not chromosome in chr_fileLine):
chr_fileLine.append(chromosome)
chr_out[chromosome]=open(prefix_output+'_'+chromosome+'_alignment.sam', 'w')
chr_out[chromosome].writelines(v)
# try:
# chr_fileLine[chromosome].append(v)
# except:
# chr_fileLine[chromosome]=[]
# chr_fileLine[chromosome].append(v)
bk_input.close()
for chr in chr_out:
chr_out[chr].close()
for chr in chr_fileLine:
chrlist_output.writelines(chr+'\n')
chrlist_output.close()
##########write to files#######################
#chrlist_output=open(fname_output, 'w')
#for chr in chr_fileLine:
# chr_out=open(prefix_output+'_'+chr+'_alignment.sam', 'w')
# for v in range(0, len(chr_fileLine[chr])):
# insertline=chr_fileLine[chr][v]
# chr_out.writelines(insertline)
# chr_out.close()
# chrlist_output.writelines(chr+'\n')
#chrlist_output.close()
#print 'Split Done for Input Sam Files'
print "\nSplit done for", fname_input
| [
"herstein@med.usc.edu"
] | herstein@med.usc.edu |
d01205e2e9c7605be9f7761f16014e29f037ed6b | 7360444bf23da0fd9cd7ad3b9e96d665a983a42e | /apps/user/adminx.py | 8b98355c54055ba8aedef6e406bd6d8b2f9d6ae4 | [] | no_license | seasailz/blog | 7f93edb8a5aa7886be3cf0b5c2839549fc1c7deb | fb3691c0cacebf9fa013c408bf7fb9c5ce3ce3ea | refs/heads/master | 2020-08-07T19:34:14.682764 | 2019-10-10T07:57:03 | 2019-10-10T07:57:03 | 213,565,441 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 552 | py | # _*_ coding: utf-8 _*_
__date__ = '2019/9/18 13:43'
import xadmin
from xadmin import views
# 后台页面全局配置--主题功能
class BaseSetting(object):
enable_themes = True
use_bootswatch = True
# 后台页面全局配置--页头页脚
class GlobalSetting(object):
site_title = '个人博客'
site_footer = '该网站由海涵制作'
# 将导航栏以手风琴的样式收叠
menu_style = 'accordion'
xadmin.site.register(views.BaseAdminView, BaseSetting)
xadmin.site.register(views.CommAdminView, GlobalSetting)
| [
"244590638@qq.com"
] | 244590638@qq.com |
ad8b74a84db6612b31d409da2c5d8f57bb36440a | d2076692e4042408742de1996c702eea71ca49e8 | /openfmri/openfmri.py | c4ca4d3937f02415b3689e341f694b902d916300 | [] | no_license | fabianp/pypreprocess | eaba1e6e6be53c3b37e48ce392d47f7a276cbfd5 | 47d99c0daab6ca71abb1614ef4ff005c7aab6e82 | refs/heads/master | 2021-01-18T20:42:45.628490 | 2014-01-08T10:14:38 | 2014-01-08T10:14:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32,321 | py | import os
import sys
import re
import glob
import json
import shutil
import tempfile
import urllib2
import multiprocessing
from optparse import OptionParser
import numpy as np
import nibabel as nb
from nipy.modalities.fmri.design_matrix import make_dmtx
from nipy.modalities.fmri.experimental_paradigm import EventRelatedParadigm
from nipy.modalities.fmri.experimental_paradigm import BlockParadigm
from nipy.modalities.fmri.glm import FMRILinearModel
# import externals
fpath = os.path.abspath(sys.argv[0])
module_dir = os.path.join(os.path.sep, *fpath.split(os.path.sep)[:-2])
sys.path.append(module_dir)
sys.path.append(os.path.join(module_dir, 'external'))
from nilearn.datasets import _fetch_dataset
from datasets_extras import unzip_nii_gz
import nipype_preproc_spm_utils
from datasets import dataset_names, dataset_files, map_id
from datasets import dataset_ignore_list
# wildcard defining directory structure
subject_id_wildcard = "sub*"
# DARTEL ?
DO_DARTEL = False
os.curdir = '..'
DATASET_DESCRIPTION = """\
<p><a href="https://openfmri.org/data-sets">openfmri.org datasets</a>.</p>
"""
def normalize_name(name):
return re.sub("[^0-9a-zA-Z\-]+", '_', name).lower().strip('_')
# ----------------------------------------------------------------------------
# download openfmri data
# ----------------------------------------------------------------------------
def fetch_openfmri(dataset_id, data_dir, redownload=False, verbose=1):
""" Downloads and extract datasets from www.openfmri.org
Parameters
----------
accession_number: str
Dataset identifier, short version of https://openfmri.org/data-sets
data_dir: str
Destination directory.
redownload: boolean
Set to True to force redownload of already available data.
Defaults to False.
Datasets
--------
{accession_number}: {dataset name}
ds001: Balloon Analog Risk-taking Task
ds002: Classification learning
ds003: Rhyme judgment
ds005: Mixed-gambles task
ds007: Stop-signal task with spoken & manual responses
ds008: Stop-signal task with unselective and selective stopping
ds011: Classification learning and tone-counting
ds017: Classification learning and stop-signal (1 year test-retest)
ds051: Cross-language repetition priming
ds052: Classification learning and reversal
ds101: Simon task dataset
ds102: Flanker task (event-related)
ds105: Visual object recognition
ds107: Word and object processing
Returns
-------
ds_path: str
Path of the dataset.
"""
ds_url = 'https://openfmri.org/system/files/%s.tgz'
ds_name = normalize_name(dataset_names[dataset_id])
ds_urls = [ds_url % name for name in dataset_files[dataset_id]]
dl_path = os.path.join(data_dir, ds_name)
ds_path = os.path.join(data_dir, dataset_id)
if verbose > 0:
print 'Download dataset to: %s' % (ds_path)
if not os.path.exists(ds_path) or redownload:
if os.path.exists(ds_path):
shutil.rmtree(ds_path)
_fetch_dataset(ds_name, ds_urls, data_dir, verbose=1)
shutil.move(os.path.join(dl_path, dataset_id), ds_path)
shutil.rmtree(dl_path)
return ds_path
# ----------------------------------------------------------------------------
# parse openfmri layout
# ----------------------------------------------------------------------------
def get_study_tr(study_dir):
return float(
open(os.path.join(study_dir, 'scan_key.txt')).read().split()[1])
def get_study_conditions(study_dir, model_id='model001'):
conditions = {}
for subject_dir in glob.glob(os.path.join(study_dir, 'sub*')):
sessions_id = get_subject_sessions(subject_dir)
for session_id in sessions_id:
session_dir = os.path.join(subject_dir, 'model',
model_id, 'onsets', session_id)
conditions.setdefault(session_id, set())
for c in glob.glob(os.path.join(session_dir, 'cond*.txt')):
condition_id = os.path.split(c)[1].split('.txt')[0]
conditions[session_id].add(condition_id)
return conditions
def get_subject_task_per_session(subject_dir):
sessions = os.path.join(subject_dir, 'BOLD', '*')
return [os.path.split(session)[1].split('_')[0]
for session in sorted(glob.glob(sessions))]
def get_subject_sessions(subject_dir):
# get runs identifiers, checks that bold runs and onset runs match.
# also checks that onset folders contain at least one condition file.
sessions = os.path.join(subject_dir, 'BOLD', '*')
bold_sessions = [os.path.split(session)[1]
for session in sorted(glob.glob(sessions))]
sessions = os.path.join(subject_dir, 'model', 'model001', 'onsets', '*')
onset_sessions = [os.path.split(session)[1]
for session in sorted(glob.glob(sessions))
if os.path.exists(os.path.join(session, 'cond001.txt'))]
return sorted(set(bold_sessions).intersection(onset_sessions))
def get_subject_motion_per_session(subject_dir):
sessions = get_subject_sessions(subject_dir)
# sessions = os.path.join(subject_dir, 'BOLD', '*')
motion = []
for session_id in sessions:
session_dir = os.path.join(subject_dir, 'BOLD', session_id)
motion_s = open(os.path.join(session_dir, 'motion.txt')).read()
motion_s = np.array([l.split() for l in motion_s.split('\n')][:-1])
motion.append(np.array(motion_s).astype('float'))
return motion
def get_subject_bold_images(subject_dir):
sessions = get_subject_sessions(subject_dir)
# sessions = os.path.join(subject_dir, 'BOLD', '*')
images = []
for session_id in sessions:
session_dir = os.path.join(subject_dir, 'BOLD', session_id)
img = nb.load(os.path.join(session_dir, 'normalized_bold.nii.gz'))
images.append(img)
n_scans = [img.shape[-1] for img in images]
return images, n_scans
def get_subject_events(study_dir, subject_dir):
conditions = get_study_conditions(study_dir)
sessions = get_subject_sessions(subject_dir)
# sessions = os.path.join(subject_dir, 'model', 'model001', 'onsets', '*')
events = []
for session_id in sessions:
session_dir = os.path.join(subject_dir,
'model', 'model001', 'onsets', session_id)
# conditions = glob.glob(os.path.join(session_dir, 'cond*.txt'))
onsets = []
cond_id = []
for condition_id in sorted(conditions[session_id]):
# cond_onsets = open(path, 'rb').read().split('\n')
# cond_onsets = [l.split() for l in cond_onsets[:-1]]
# cond_onsets = np.array(cond_onsets).astype('float')
path = os.path.join(session_dir, '%s.txt' % condition_id)
if os.path.exists(path):
cond_onsets = np.loadtxt(path)
else:
cond_onsets = np.array([[.0, .0, .0]])
onsets.append(cond_onsets)
cond_id.append([
int(condition_id.split('cond')[1])] * cond_onsets.shape[0])
events.append((np.vstack(onsets), np.concatenate(cond_id)))
return events
def get_task_contrasts(study_dir, subject_dir, model_id):
contrasts_path = os.path.join(
study_dir, 'models', model_id, 'task_contrasts.txt')
do_all_tasks = False
task_contrasts = {}
for line in open(contrasts_path, 'rb').read().split('\n')[:-1]:
line = line.split()
task_id = line[0]
contrast_id = line[1]
con_val = np.array(line[2:]).astype('float')
if 'task' not in task_id: # alternative format
do_all_tasks = True
task_id = 'task001'
contrast_id = line[0]
con_val = np.array(line[1:]).astype('float')
task_contrasts.setdefault(task_id, {}).setdefault(contrast_id, con_val)
if do_all_tasks:
tasks = set([run.split('_')[0]
for run in get_subject_task_per_session(subject_dir)])
for task_id in tasks:
task_contrasts[task_id] = task_contrasts['task001']
ordered = {}
tasks_conditions = dict(
[(k.split('_')[0], sorted(v))
for k, v in get_study_conditions(study_dir).items()])
for task_id in sorted(task_contrasts.keys()):
for contrast_id in task_contrasts[task_id]:
for session_task_id in get_subject_task_per_session(subject_dir):
if session_task_id == task_id:
con_val = task_contrasts[task_id][contrast_id]
else:
n_conds = len(tasks_conditions[task_id])
con_val = np.array([0] * n_conds)
ordered.setdefault(contrast_id, []).append(con_val)
return ordered
def make_design_matrices(events, n_scans, tr, hrf_model='canonical',
drift_model='cosine', motion=None):
design_matrices = []
n_sessions = len(n_scans)
for i in range(n_sessions):
onsets = events[i][0][:, 0]
duration = events[i][0][:, 1]
amplitude = events[i][0][:, 2]
cond_id = events[i][1]
order = np.argsort(onsets)
# make a block or event paradigm depending on stimulus duration
if duration.sum() == 0:
paradigm = EventRelatedParadigm(cond_id[order],
onsets[order],
amplitude[order])
else:
paradigm = BlockParadigm(cond_id[order], onsets[order],
duration[order], amplitude[order])
frametimes = np.linspace(0, (n_scans[i] - 1) * tr, n_scans[i])
if motion is not None:
add_regs = np.array(motion[i]).astype('float')
add_reg_names = ['motion_%i' % r
for r in range(add_regs.shape[1])]
design_matrix = make_dmtx(
frametimes, paradigm, hrf_model=hrf_model,
drift_model=drift_model,
add_regs=add_regs, add_reg_names=add_reg_names)
else:
design_matrix = make_dmtx(
frametimes, paradigm, hrf_model=hrf_model,
drift_model=drift_model)
design_matrices.append(design_matrix.matrix)
return design_matrices
# ----------------------------------------------------------------------------
# dump openfmri layout from spm
# ----------------------------------------------------------------------------
def write_new_model(study_dir, model_id, contrasts):
models_dir = os.path.join(study_dir, 'models')
if not os.path.exists(os.path.join(models_dir, model_id)):
os.makedirs(os.path.join(models_dir, model_id))
cond_model001 = os.path.join(models_dir, 'model001', 'condition_key.txt')
cond_model002 = os.path.join(models_dir, model_id, 'condition_key.txt')
shutil.copyfile(cond_model001, cond_model002)
contrasts_path = os.path.join(models_dir, model_id, 'task_contrasts.txt')
with open(contrasts_path, 'wb') as f:
for contrast in contrasts:
task_id, contrast_id = contrast.split('__')
con_val = contrasts[contrast]
con_val = ' '.join(np.array(con_val).astype('|S32'))
f.write('%s %s %s\n' % (task_id, contrast_id, con_val))
def spm_to_openfmri(out_dir, preproc_docs, intra_docs, metadata=None,
n_jobs=-1, verbose=1):
metadata = _check_metadata(metadata, preproc_docs[0], intra_docs[0])
_openfmri_metadata(os.path.join(out_dir, metadata['study_id']), metadata)
n_jobs = multiprocessing.cpu_count() if n_jobs == -1 else n_jobs
docs = zip(preproc_docs, intra_docs)
if n_jobs == 1:
for i, (preproc_doc, intra_doc) in enumerate(docs):
_openfmri_preproc(out_dir, preproc_doc, metadata, verbose)
_openfmri_intra(out_dir, intra_doc, metadata, verbose)
else:
pool = multiprocessing.Pool(processes=n_jobs)
for i, (preproc_doc, intra_doc) in enumerate(docs):
pool.apply_async(_openfmri_preproc,
args=(out_dir, preproc_doc, metadata, verbose))
pool.apply_async(_openfmri_intra,
args=(out_dir, intra_doc, metadata, verbose))
pool.close()
pool.join()
def _check_metadata(metadata, preproc_doc, intra_doc):
if metadata is None:
metadata = {}
if not 'run_key' in metadata:
metadata['run_key'] = ['task%03i run%03i' % (1, i + 1)
for i in range(len(preproc_doc['n_scans']))]
if not 'condition_key' in metadata:
metadata['condition_key'] = {}
for run_key, conditions in zip(metadata['run_key'],
intra_doc['condition_key']):
metadata['condition_key'][run_key] = conditions
if not 'scan_key' in metadata:
metadata['scan_key'] = {}
metadata['scan_key']['TR'] = intra_doc['tr']
if 'study_id' in intra_doc:
metadata['study_id'] = intra_doc['study_id']
else:
metadata['study_id'] = ''
return metadata
def _openfmri_preproc(out_dir, doc, metadata=None, verbose=1):
"""
Parameters
----------
metadata: dict
- run_key: naming the sessions
Examples
--------
{'run_key': ['task001 run001', 'task001 run002',
'task002 run001', 'task002 run002']}
"""
if 'study_id' in doc:
study_dir = os.path.join(out_dir, doc['study_id'])
else:
study_dir = out_dir
if verbose > 0:
print '%s@%s: dumping preproc' % (doc['subject_id'], doc['study_id'])
subject_dir = os.path.join(study_dir, doc['subject_id'])
anatomy_dir = os.path.join(subject_dir, 'anatomy')
if not os.path.exists(anatomy_dir):
os.makedirs(anatomy_dir)
anatomy = doc['preproc']['anatomy']
wm_anatomy = doc['final']['anatomy']
anatomy = nb.load(anatomy)
wm_anatomy = nb.load(wm_anatomy)
nb.save(anatomy, os.path.join(anatomy_dir, 'highres001.nii.gz'))
nb.save(wm_anatomy, os.path.join(anatomy_dir,
'normalized_highres001.nii.gz'))
bold_dir = os.path.join(subject_dir, 'BOLD')
for session, run_key in zip(
doc['slice_timing']['bold'], metadata['run_key']):
bold = nb.concat_images(session)
session_dir = os.path.join(bold_dir, run_key.replace(' ', '_'))
if not os.path.exists(session_dir):
os.makedirs(session_dir)
nb.save(bold, os.path.join(session_dir, 'bold.nii.gz'))
for session, motion, run_key in zip(doc['final']['bold'],
doc['realign']['motion'],
metadata['run_key']):
bold = nb.concat_images(session)
session_dir = os.path.join(bold_dir, run_key.replace(' ', '_'))
if not os.path.exists(session_dir):
os.makedirs(session_dir)
nb.save(bold, os.path.join(session_dir, 'normalized_bold.nii.gz'))
shutil.copyfile(motion, os.path.join(session_dir, 'motion.txt'))
def _openfmri_intra(out_dir, doc, metadata=None, verbose=1):
"""
Parameters
----------
metadata: dict
- condition_key
https://openfmri.org/content/metadata-condition-key
Examples
--------
{'condition_key': {'task001 cond001': 'task',
'task001 cond002': 'parametric gain'}}
"""
if 'study_id' in doc:
study_dir = os.path.join(out_dir, doc['study_id'])
else:
study_dir = out_dir
if verbose > 0:
print '%s@%s: dumping stats intra' % (doc['subject_id'],
doc['study_id'])
subject_dir = os.path.join(study_dir, doc['subject_id'])
model_dir = os.path.join(study_dir, 'models', 'model001')
if not os.path.exists(model_dir):
os.makedirs(model_dir)
# conditions specification
conditions_spec = []
for key, val in sorted(metadata['condition_key'].iteritems()):
for i, name in enumerate(val):
conditions_spec.append(
'%s cond%03i %s\n' % (key.split(' ')[0], i + 1, name))
with open(os.path.join(model_dir, 'condition_key.txt'), 'wb') as f:
f.write(''.join(sorted(set(conditions_spec))))
# contrasts specification
contrasts_spec = []
for key, val in doc['task_contrasts'].iteritems():
if 'task_contrasts' in metadata:
key = doc['task_contrasts'][key]
for i, session_contrast in enumerate(val):
task_id = metadata['run_key'][i].split(' ')[0]
# check not null and 1d
if (np.abs(session_contrast).sum() > 0
and len(np.array(session_contrast).shape) == 1):
con = ' '.join(np.array(session_contrast).astype('|S32'))
contrasts_spec.append('%s %s %s\n' % (task_id, key, con))
with open(os.path.join(model_dir, 'task_contrasts.txt'), 'wb') as f:
f.write(''.join(sorted(set(contrasts_spec))))
# dump onsets
model_dir = os.path.join(subject_dir, 'model', 'model001')
onsets_dir = os.path.join(model_dir, 'onsets')
for onsets, run_key in zip(doc['onsets'], metadata['run_key']):
run_dir = os.path.join(onsets_dir, run_key.replace(' ', '_'))
if not os.path.exists(run_dir):
os.makedirs(run_dir)
for condition_id, values in onsets.iteritems():
cond = os.path.join(run_dir, '%s.txt' % condition_id)
with open(cond, 'wb') as f:
for timepoint in values:
f.write('%s %s %s\n' % timepoint)
# analyses
for dtype in ['c_maps', 't_maps']:
data_dir = os.path.join(model_dir, dtype)
if not os.path.exists(data_dir):
os.makedirs(data_dir)
if isinstance(doc[dtype], dict):
for contrast_id in doc[dtype].keys():
fname = normalize_name(contrast_id)
img = nb.load(doc[dtype][contrast_id])
nb.save(img, os.path.join(data_dir, '%s.nii.gz' % fname))
# general data for analysis
img = nb.load(doc['mask'])
nb.save(img, os.path.join(model_dir, 'mask.nii.gz'))
json.dump(doc, open(os.path.join(model_dir, 'SPM.json'), 'wb'))
def _openfmri_metadata(out_dir, metadata):
""" General dataset information
Parameters
----------
metadata: dict
- task_key -- https://openfmri.org/content/metadata-task-key
- scan_key -- https://openfmri.org/content/metadata-scan-key
Examples
--------
{'task_key': {'task001': 'stop signal with manual response',
'task002': 'stop signal with letter naming'}}
{'scan_key': {'TR': 2.0}
"""
if not os.path.exists(out_dir):
os.makedirs(out_dir)
# naming the tasks
if 'task_key' in metadata:
with open(os.path.join(out_dir, 'task_key.txt'), 'wb') as f:
for key, val in sorted(metadata['task_key'].iteritems()):
f.write('%s %s\n' % (key, val))
# scanning info
if 'scan_key' in metadata:
with open(os.path.join(out_dir, 'scan_key.txt'), 'wb') as f:
for key, val in sorted(metadata['scan_key'].iteritems()):
f.write('%s %s\n' % (key, val))
# extra info, for example subject_id mapping etc...
if 'extras' in metadata:
meta_dir = os.path.join(out_dir, 'metadata')
if not os.path.exists(meta_dir):
os.makedirs(meta_dir)
for key, val in metadata['extras'].iteritems():
with open(os.path.join(meta_dir, '%s.txt' % key), 'wb') as f:
for k, v in sorted(val.iteritems()):
f.write('%s %s\n' % (k, v))
# ----------------------------------------------------------------------------
# GLM on openfmri layout
# ----------------------------------------------------------------------------
def first_level_glm(study_dir, subjects_id, model_id,
hrf_model='canonical', drift_model='cosine',
glm_model='ar1', mask='compute', n_jobs=-1, verbose=1):
n_jobs = multiprocessing.cpu_count() if n_jobs == -1 else n_jobs
print study_dir, subjects_id
if n_jobs == 1:
for subject_id in subjects_id:
_first_level_glm(study_dir, subject_id, model_id,
hrf_model=hrf_model,
drift_model=drift_model,
glm_model=glm_model, mask=mask, verbose=verbose)
else:
pool = multiprocessing.Pool(processes=n_jobs)
for subject_id in subjects_id:
pool.apply_async(
_first_level_glm,
args=(study_dir, subject_id, model_id),
kwds={'hrf_model': hrf_model,
'drift_model': drift_model,
'glm_model': glm_model, 'mask': mask, 'verbose': verbose})
pool.close()
pool.join()
def _first_level_glm(study_dir, subject_id, model_id,
hrf_model='canonical', drift_model='cosine',
glm_model='ar1', mask='compute', verbose=1):
study_id = os.path.split(study_dir)[1]
subject_dir = os.path.join(study_dir, subject_id)
if verbose > 0:
print '%s@%s: first level glm' % (subject_id, study_id)
tr = get_study_tr(study_dir)
images, n_scans = get_subject_bold_images(subject_dir)
motion = get_subject_motion_per_session(subject_dir)
contrasts = get_task_contrasts(study_dir, subject_dir, model_id)
events = get_subject_events(study_dir, subject_dir)
design_matrices = make_design_matrices(events, n_scans, tr,
hrf_model, drift_model, motion)
glm = FMRILinearModel(images, design_matrices, mask=mask)
glm.fit(do_scaling=True, model=glm_model)
for contrast_id in contrasts:
con_val = []
for session_con, session_dm in zip(contrasts[contrast_id],
design_matrices):
con = np.zeros(session_dm.shape[1])
con[:len(session_con)] = session_con
con_val.append(con)
z_map, t_map, c_map, var_map = glm.contrast(
con_val,
con_id=contrast_id,
output_z=True,
output_stat=True,
output_effects=True,
output_variance=True,)
model_dir = os.path.join(subject_dir, 'model', model_id)
for dtype, img in zip(['z', 't', 'c', 'var'],
[z_map, t_map, c_map, var_map]):
map_dir = os.path.join(model_dir, '%s_maps' % dtype)
if not os.path.exists(map_dir):
os.makedirs(map_dir)
path = os.path.join(
map_dir, '%s.nii.gz' % normalize_name(contrast_id))
nb.save(img, path)
nb.save(glm.mask, os.path.join(model_dir, 'mask.nii.gz'))
# ----------------------------------------------------------------------------
# preprocessing
# ----------------------------------------------------------------------------
def dataset_preprocessing(dataset_id, data_dir, output_dir, ignore_list=None,
dataset_description=None):
"""Main function for preprocessing (and analysis ?)
Parameters
----------
returns list of Bunch objects with fields anat, func, and subject_id
for each preprocessed subject
"""
data_dir = os.path.join(data_dir, dataset_id)
if not os.path.exists(data_dir):
os.makedirs(data_dir)
output_dir = os.path.join(output_dir, dataset_id)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
dataset_description = DATASET_DESCRIPTION if \
dataset_description is None else dataset_description
ignore_list = [] if ignore_list is None else ignore_list
# glob for subject ids
subjects_id = [
os.path.basename(x)
for x in glob.glob(os.path.join(data_dir, subject_id_wildcard))]
subjects_id.sort()
sessions_id = {}
# producer subject data
def subject_factory():
for subject_id in subjects_id:
if subject_id in ignore_list:
continue
sessions = get_subject_sessions(os.path.join(data_dir, subject_id))
sessions_id[subject_id] = sessions
# construct subject data structure
subject_data = nipype_preproc_spm_utils.SubjectData()
subject_data.session_id = sessions
subject_data.subject_id = subject_id
subject_data.func = []
assert sessions != []
# glob for bold data
has_bad_sessions = False
for session_id in subject_data.session_id:
bold_dir = os.path.join(
data_dir,
"%s/BOLD/%s" % (subject_id, session_id))
# extract .nii.gz to .nii
unzip_nii_gz(bold_dir)
# glob bold data for this session
func = glob.glob(os.path.join(bold_dir, "bold.nii"))
# check that this session is OK (has bold data, etc.)
if not func:
has_bad_sessions = True
break
subject_data.func.append(func[0])
# exclude subject if necessary
if has_bad_sessions:
continue
# glob for anatomical data
anat_dir = os.path.join(
data_dir,
"%s/anatomy" % subject_id)
# extract .nii.gz to .ni
unzip_nii_gz(anat_dir)
# glob anatomical data proper
subject_data.anat = glob.glob(
os.path.join(
data_dir,
"%s/anatomy/highres001_brain.nii" % subject_id))[0]
# set subject output dir (all calculations for
# this subject go here)
subject_data.output_dir = os.path.join(
output_dir,
subject_id)
yield subject_data
# do preprocessing proper
report_filename = os.path.join(output_dir, "_report.html")
for results in nipype_preproc_spm_utils.do_subjects_preproc(
subject_factory(),
n_jobs=1,
output_dir=output_dir,
do_deleteorient=True, # some openfmri data have garbage orientation
do_dartel=DO_DARTEL,
dataset_id=dataset_id,
# do_cv_tc=False,
dataset_description=dataset_description,
# do_report=False,
report_filename=report_filename,
do_shutdown_reloaders=True, # XXX rm this if u want to chain GLM QA
):
pass
subject_id = results['subject_id']
# dump results in openfmri layout
if not isinstance(results['estimated_motion'], list):
results['estimated_motion'] = [results['estimated_motion']]
if not isinstance(results['func'], list):
results['func'] = [results['func']]
img = nb.load(results['anat'])
nb.save(img, os.path.join(
data_dir, subject_id, 'anatomy',
'normalized_highres001.nii.gz'))
for session_id, motion, func in zip(sessions_id[subject_id],
results['estimated_motion'],
results['func']):
# estimated motion
shutil.copyfile(motion, os.path.join(
data_dir, subject_id, 'BOLD', session_id, 'motion.txt'))
# preprocessed bold
img = nb.load(func)
nb.save(img, os.path.join(
data_dir, subject_id, 'BOLD',
session_id, 'normalized_bold.nii.gz'))
# ----------------------------------------------------------------------------
# launcher stuff
# ----------------------------------------------------------------------------
def get_dataset_description(dataset_id):
full_id = map_id[dataset_id]
html = urllib2.urlopen('https://openfmri.org/dataset/%s' % full_id).read()
return html.split(('<div class="field-item even" '
'property="content:encoded"><p>')
)[1].split(('</p>\n</div></div></div><div class="field '
'field-name-field-mixedformattasksconditions'))[0]
def get_options(args):
parser = OptionParser()
parser.add_option(
'-t', '--dataset', dest='dataset_id',
help='The openfmri dataset id.\nSee https://openfmri.org/data-sets')
parser.add_option('-d', '--dataset-dir', dest='dataset_dir',
help='Parent path for the dataset.')
parser.add_option('-m', '--model', dest='model_id', default='model001',
help='The model to be used from the GLM.')
parser.add_option(
'-p', '--preprocessing', dest='preproc_dir',
default=tempfile.gettempdir(),
help='Parent path for preprocessing.')
parser.add_option(
'-f', '--force-dowload', dest='force_download',
action='store_true', default=False,
help='Force redownload the dataset.')
parser.add_option(
'-v', '--verbose', dest='verbose',
type='int', default=1,
help='Verbosity level.')
parser.add_option(
'-a', '--skip-preprocessing', dest='skip_preprocessing',
action='store_true', default=False,
help='Force preprocessing skipping.')
parser.add_option(
'-u', '--subject_id', dest='subject_id',
help='If defined, only this subject is processed.')
options, args = parser.parse_args(args)
dataset_dir = options.dataset_dir
dataset_id = options.dataset_id
if dataset_id is None:
parser.error("The dataset id is mandatory.")
if dataset_dir is None:
parser.error("The data directory is mandatory.")
return options
def setup_dataset(dataset_id, dataset_dir, preproc_dir,
force_download, verbose=1):
if not os.path.exists(preproc_dir):
os.makedirs(preproc_dir)
if verbose > 0:
print 'Fetching data...'
dataset_dir = fetch_openfmri(dataset_id, dataset_dir,
redownload=force_download)
if verbose > 0:
print 'Copying models...'
# update and/or create models
model001_dir = os.path.join(dataset_dir, 'models', 'model001')
for task_contrasts_path in glob.glob(os.path.join('models', '*')):
ds_id, model_id, f = os.path.split(task_contrasts_path)[1].split('__')
if ds_id == dataset_id:
model_dir = os.path.join(dataset_dir, 'models', model_id)
if not os.path.exists(model_dir):
shutil.copytree(model001_dir, model_dir)
shutil.copyfile(task_contrasts_path, os.path.join(model_dir, f))
def process_dataset(dataset_id, model_id, dataset_dir, preproc_dir,
force_download, subject_id=None,
skip_preprocessing=False, verbose=1):
ignore_list = dataset_ignore_list[dataset_id]
description = get_dataset_description(dataset_id)
study_dir = os.path.join(dataset_dir, dataset_id)
if subject_id is not None:
ignore_list = [os.path.split(p)[1]
for p in glob.glob(os.path.join(study_dir, 'sub*'))
if os.path.split(p)[1] != subject_id]
subjects_id = [os.path.split(p)[1]
for p in glob.glob(os.path.join(study_dir, 'sub*'))
if os.path.split(p)[1] not in ignore_list]
setup_dataset(dataset_id, dataset_dir, preproc_dir, force_download)
if verbose > 0:
print 'Preprocessing data...'
if not skip_preprocessing:
dataset_preprocessing(dataset_id, dataset_dir, preproc_dir,
ignore_list, description)
first_level_glm(study_dir, subjects_id, model_id, n_jobs=6,
verbose=verbose)
| [
"yannick.schwartz@cea.fr"
] | yannick.schwartz@cea.fr |
f8b7da1386f587a5f1d838d09e70728bd518972a | 1522a576fe53c79d44a61cce481325889080f6db | /1.4 循环.py | dd5fa9cbac8e926d4904a9bfa381e35e2ba6e4b2 | [] | no_license | conroewuhao/Python- | 8ac2c0c7db851d650b0b748b033533d30e776e7c | 52118c73c9d19ad8912b86cbe7f340dd0ed3ca99 | refs/heads/master | 2016-08-11T15:32:13.540566 | 2016-04-22T08:17:08 | 2016-04-22T08:17:08 | 53,392,849 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 523 | py | #coding=utf-8
#遍历数组进行打印
newArray=["wuhao","ligang","micheal"];
for name in newArray:
print(name);
#简单的进行计算
a=0;
for x in [1,2,3,4,5,6,7,8,9,10]:
a+=x;
print(a);
#简单的求和运算
b=0;
#range(101)代表的意思是[0,1,2,3.......100];
#range()函数可以生成整数数列
for x in range(101):
b+=x;
print(b);
newsum=0;
n=0;
while n<100:
newsum+=n;
n+=2;
print(newsum);
# while 1:
# print("你是一个好人");
def hello(x):
return -x
a=hello(10)
print(a)
| [
"416916723@qq.com"
] | 416916723@qq.com |
1fb64e2989fe6c0d4984b1cd7d2f3f3ce235e1b9 | 44ebaa3b4863528e7c87fade68e9ef69820406db | /2_1.py | 0cc467f785d23c05ea521814773b1856776dbc99 | [] | no_license | marzukr/google-foobar | 4637273cd011a7568f77d47bcb6746f4b48a752a | e126b558d0bb9f243615d0f29f11276cb5692a78 | refs/heads/master | 2020-03-21T21:53:15.888202 | 2019-06-16T00:18:35 | 2019-06-16T00:18:35 | 139,090,902 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,666 | py | def answer(xs):
if len(xs) >= 2:
mostPos = 0
mostNeg = 0
nextNeg = 1
for i in xrange(len(xs) - 1, -1, -1):
if xs[i] > xs[mostPos]:
mostPos = i
elif xs[i] < xs[mostNeg]:
mostNeg = i
elif xs[i] < xs[nextNeg]:
nextNeg = i
if xs[mostPos] > 0:
newList = xs[:]
del newList[mostPos]
result = answer(newList)
if int(result) <= 0:
result = "1"
return stringMultiply([str(xs[mostPos]), result])
elif xs[mostNeg] < 0 and xs[nextNeg] < 0 and mostNeg != nextNeg:
newList = [element for i, element in enumerate(xs) if i not in [mostNeg, nextNeg]]
result = answer(newList)
if int(result) <= 0:
result = "1"
return stringMultiply([str(xs[mostNeg]), str(xs[nextNeg]), result])
else:
return "0"
return str(xs[0])
#assumes xm is string list of length 2 or greater
def stringMultiply(xm):
addNums = []
count = 0
isNegative = (xm[0] == "-" and xm[1] != "-") or (xm[1] == "-" and xm[0] != "-")
first = xm[0][::-1].replace("-", "")
second = xm[1][::-1].replace("-", "")
for i in first:
addNums.append("0" * count)
remNum = 0
for j in second:
toAdd = int(i) * int(j) + remNum
if toAdd >= 10:
addNums[count] += str(toAdd % 10)
remNum = toAdd // 10
else:
addNums[count] += str(toAdd)
remNum = 0
if remNum != 0:
addNums[count] += str(remNum)[::-1]
addNums[count] = addNums[count][::-1]
count += 1
result = stringAdd(addNums)
toReturn = ""
if isNegative:
toReturn = "-"
if len(xm) > 2:
toReturn += stringMultiply([result] + xm[2:])
else:
toReturn += result
return toReturn
#assumes only positive numbers
def stringAdd(xa):
result = ""
shouldContinue = True
i = 0
rem = 0
while shouldContinue:
shouldContinue = False
total = rem
for num in xa:
if len(num) > i:
total += int(num[len(num) - 1 - i])
shouldContinue = True
if shouldContinue:
result += str(total % 10)
if total >= 10:
rem = total // 10
else:
rem = 0
i += 1
if rem != 0:
result += str(rem)[::-1]
return result[::-1]
# print answer([1000] * 50)
# print answer([2,-3,1,0,-5])
# print answer([-2, -3, 4, -5]) | [
"marzukr@gmail.com"
] | marzukr@gmail.com |
0e7da854cd08df8257225916f68692919fa366eb | 6c0e18ecd73fc65d65d2afbe481bff03beab8e2c | /SendThread.py | efa19ec86b10ddca5e5a0de5dd2a9caaecfcb169 | [] | no_license | Matt45D/BCRegMed_Hackathon | 694356a91d49a2a9f7ef4f3378b4cc34ff84de74 | debe5b8fe78272816c77b1c87c576b74ec79b87f | refs/heads/master | 2020-08-06T09:04:29.095831 | 2019-10-07T04:29:02 | 2019-10-07T04:29:02 | 212,917,215 | 2 | 3 | null | 2019-10-07T04:30:52 | 2019-10-04T23:21:20 | Python | UTF-8 | Python | false | false | 216 | py | import json
import logging
import os
from queue import Queue
from threading import Thread
from time import time
def SendThread():
print("Initializing the Sender Thread...")
if __name__ == '__main__':
main() | [
"matt45davison@gmail.com"
] | matt45davison@gmail.com |
23c3f5b37a37960f637d8a881b7102947760ec7d | 5d4fd4b3174a9bf95bd2d6a5c07b4ea03d39c88e | /model/src/GCN.py | 70522a32cb9c7e672490bd12c254afccf27f89d3 | [] | no_license | MKSwaminathan/gcn-restaurant-classifier | 99c7891bedd1d1f0ae0938ced54b45d20a45b410 | 148708925f0135bfb6ed3aa100d64d1b7dd7dc3b | refs/heads/master | 2022-06-28T20:09:44.626018 | 2020-05-13T23:34:04 | 2020-05-13T23:34:04 | 256,064,501 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,142 | py | import numpy as np
import math
import torch
import torch.nn
import torch.nn.functional as F
import torch.nn.init as tinit
from torch.autograd import Variable
dtype = torch.FloatTensor
class GCNLayer(torch.nn.Module):
def __init__(self, nodes, in_features, out_features, isrelu=True):
super(GCNLayer, self).__init__()
self.kernel = torch.nn.Parameter(torch.Tensor(out_features, in_features))
self.out_features = out_features
self.nodes = nodes
if isrelu:
self.init_params_relu()
else:
self.init_params()
def init_params_relu(self):
# He initializations are generally better for ReLU activations:
# Source: He, K. et al. (2015)
tinit.kaiming_uniform_(self.kernel, a=math.sqrt(5))
def init_params(self):
# Xavier initializations are universally good - He is better for relu though
tinit.xavier_uniform_(self.kernel, gain=nn.init.calculate_gain('sigmoid'))
def forward(self, A, H):
# Iterate over all nodes and "convolve"
H_out = torch.empty(self.nodes,self.out_features).type(dtype)
H_out.fill_(0)
for index in range(self.nodes):
#print('Iteration percentage: ',(index/self.nodes)*100,'%')
# NOTE: here you can define which aggregate policy to use
aggregate = self.make_aggregate_mean(index, A, H)
H_out[index] = F.linear(aggregate, self.kernel)
return H_out
def make_aggregate_mean(self, index, A, H):
aggregate = torch.empty(1,list(H.size())[1]).type(dtype)
aggregate.fill_(0)
n_neighbors = 1
for neighbor_index in range(self.nodes):
if A[index,neighbor_index] != torch.tensor(0).type(dtype):
aggregate = aggregate + H[neighbor_index]
n_neighbors+=1
return aggregate/n_neighbors
def make_aggregate_weighted_mean(self, index, A, H):
aggregate = torch.empty(1,list(H.size())[1]).type(dtype)
aggregate.fill_(0)
n_neighbors = 1
for neighbor_index in range(self.nodes):
if A[index,neighbor_index] != torch.tensor(0).type(dtype):
aggregate = aggregate + H[neighbor_index]
n_neighbors+=1
# This is a param that can be changed
self_weight_factor = n_neighbors
aggregate = aggregate + self_weight_factor*H[index]
return aggregate/(n_neighbors + self_weight_factor)
def make_aggregate_sum(self, index, A, H):
aggregate = torch.empty(1,list(H.size())[1]).type(dtype)
aggregate.fill_(0)
n_neighbors = 1
for neighbor_index in range(self.nodes):
if A[index,neighbor_index] != torch.tensor(0).type(dtype):
aggregate = aggregate + H[neighbor_index]
n_neighbors+=1
return aggregate
class GCN(torch.nn.Module):
def __init__(self, nodes, x_dim, h1_dim, h2_dim, h3_dim, classes):
super(GCN, self).__init__()
self.gcn1 = GCNLayer(nodes, x_dim, h1_dim)
self.gcn2 = GCNLayer(nodes, h1_dim, h2_dim)
self.gcn3 = GCNLayer(nodes, h2_dim, h3_dim)
self.gcn4 = GCNLayer(nodes, h3_dim, classes)
self.sigmoid = torch.nn.Sigmoid()
def forward(self, A, X):
h = self.gcn1(A, X)
h = torch.relu(h)
h = self.gcn2(A, h)
h = torch.relu(h)
h = self.gcn3(A, h)
h = torch.relu(h)
h = self.gcn4(A, h)
#h = self.sigmoid(h)
return h
from load_dataset import *
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
device = torch.device("cpu")
#
# DEFINE MODEL
#
model = GCN(N,num_features,10,5,2,1)
#
# HYPERPARAMS
#
lr = 0.01
betas = (0.9,0.99)
weight_decay = 0
epochs = 20
loss_fn = torch.nn.MSELoss(size_average=False)
#loss_fn = torch.nn.CrossEntropyLoss(size_average=False)
#loss_fn = torch.nn.NLLLoss()
optimizer = torch.optim.Adam(model.parameters(),
lr=lr,
betas=betas,
weight_decay=0)
best_auc = 0
best_model = GCN(N,num_features,10,5,2,1)
import tqdm
from sklearn import metrics
for t in range(epochs):
print('Epoch: ',t,'/',epochs)
y_pred = model(A,X)
#loss = loss_fn(y_pred, y)
#print(y.size())
#print(y_pred.size())
#loss = torch.sum((y - y_pred)**2))
hinge_loss = y - y_pred + 1
hinge_loss[hinge_loss < 0] = 0
loss = torch.sum(hinge_loss)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Find AUC
#y_predi = torch.abs(1-y_pred)
y_hat = y_pred.data.numpy()
y_true = y.data.numpy()
fpr, tpr, thresholds = metrics.roc_curve(y_true, y_hat)
AUC = metrics.auc(fpr, tpr)
if AUC > best_auc:
best_auc = AUC
best_model = model
print('AUC SCORE: ',AUC, ' Loss: ',loss.item())
# Find AUC for Test Set
y_pred = best_model(A_test,X_test)
y_true = y_test.data.numpy()
y_hat = y_pred.data.numpy()
fpr, tpr, thresholds = metrics.roc_curve(y_true, y_hat)
AUC = metrics.auc(fpr, tpr)
print('Test AUC SCORE: ',AUC)
| [
"mksn@login1.maverick2.tacc.utexas.edu"
] | mksn@login1.maverick2.tacc.utexas.edu |
5455dbfa3f6bdb95fbe0d82fe40400246f03ff85 | d5beb80c402954d1b66f765b5d5c93d28491324d | /evtstrd_test/filter.py | 854a71c52f33622147a05d857a11213a9abf3fb8 | [
"MIT"
] | permissive | srittau/eventstreamd | 978d822d7dd504f91aebdf11091733a04bb4c5c2 | 688ee94aea704230e2d0693195062cea8ba3eb73 | refs/heads/main | 2023-08-18T21:27:23.962517 | 2023-08-17T09:55:24 | 2023-08-17T09:55:24 | 85,480,241 | 0 | 0 | MIT | 2023-09-08T06:45:51 | 2017-03-19T14:00:49 | Python | UTF-8 | Python | false | false | 3,121 | py | from unittest import TestCase
from asserts import assert_equal, assert_false, assert_raises, assert_true
from evtstrd.filters import parse_filter
class FilterTest(TestCase):
def test_str(self) -> None:
filter_ = parse_filter("foo.bar<='ABC'")
assert_equal("foo.bar<='ABC'", str(filter_))
def test_string_filter__path_not_found(self) -> None:
filter_ = parse_filter("foo.bar<='ABC'")
assert_false(filter_({"foo": {}}))
def test_string_filter__wrong_type(self) -> None:
filter_ = parse_filter("foo.bar<='50'")
assert_false(filter_({"foo": {"bar": 13}}))
def test_string_filter__compare(self) -> None:
filter_ = parse_filter("foo.bar<='ABC'")
assert_true(filter_({"foo": {"bar": "AAA"}}))
assert_true(filter_({"foo": {"bar": "ABC"}}))
assert_false(filter_({"foo": {"bar": "CAA"}}))
def test_string_filter__lt(self) -> None:
filter_ = parse_filter("foo.bar<'ABC'")
assert_true(filter_({"foo": {"bar": "AAA"}}))
assert_false(filter_({"foo": {"bar": "ABC"}}))
assert_false(filter_({"foo": {"bar": "CAA"}}))
def test_string_filter__gt(self) -> None:
filter_ = parse_filter("foo.bar>'ABC'")
assert_false(filter_({"foo": {"bar": "AAA"}}))
assert_false(filter_({"foo": {"bar": "ABC"}}))
assert_true(filter_({"foo": {"bar": "CAA"}}))
class ParseFilterTest(TestCase):
def test_invalid_filter(self) -> None:
with assert_raises(ValueError):
parse_filter("INVALID")
def test_invalid_values(self) -> None:
with assert_raises(ValueError):
parse_filter("foo=bar")
with assert_raises(ValueError):
parse_filter("foo='bar")
with assert_raises(ValueError):
parse_filter("foo='")
with assert_raises(ValueError):
parse_filter("foo=2000-12-32")
def test_no_such_field(self) -> None:
f = parse_filter("foo<=10")
assert_false(f({}))
def test_wrong_type(self) -> None:
f = parse_filter("foo<=10")
assert_false(f({"foo": ""}))
def test_eq_int(self) -> None:
f = parse_filter("foo=10")
assert_false(f({"foo": 9}))
assert_true(f({"foo": 10}))
assert_false(f({"foo": 11}))
def test_le_int(self) -> None:
f = parse_filter("foo<=10")
assert_true(f({"foo": 9}))
assert_true(f({"foo": 10}))
assert_false(f({"foo": 11}))
def test_ge_int(self) -> None:
f = parse_filter("foo>=10")
assert_false(f({"foo": 9}))
assert_true(f({"foo": 10}))
assert_true(f({"foo": 11}))
def test_eq_str(self) -> None:
f = parse_filter("foo='bar'")
assert_false(f({"foo": "baz"}))
assert_true(f({"foo": "bar"}))
def test_eq_date(self) -> None:
f = parse_filter("foo=2016-03-24")
assert_false(f({"foo": "2000-01-01"}))
assert_true(f({"foo": "2016-03-24"}))
def test_nested_value(self) -> None:
f = parse_filter("foo.bar<=10")
assert_true(f({"foo": {"bar": 10}}))
| [
"srittau@rittau.biz"
] | srittau@rittau.biz |
710e513735d3a7a12064bbc906c33166fe70075d | 3d5542c95f1bf847d37cdd4a82a6bbe71c70bcb5 | /add.py | 856634fa9b20956d9c0fb0b4d6d0e46e5a81ed85 | [
"MIT"
] | permissive | anilsth915/Sentiment-analysis-of-tweet-data- | edb81f114f7e7233da2886da7fde74c847e285da | 6f9fce8e8e9c3983d6c5cd75213eada481400d56 | refs/heads/master | 2021-08-08T03:26:39.547296 | 2017-11-09T13:23:01 | 2017-11-09T13:23:01 | 77,144,572 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12 | py | print(3+3)
| [
"noreply@github.com"
] | anilsth915.noreply@github.com |
9ae524b8f9026f3c5eb13a7e510c2d2990ec4ec4 | afe5177105808480a216e9b547a935be0e74a978 | /TestAutomation/remote_server_db.py | 2daf63e8bd423e639d3447a36a5a7bc8dd507f0a | [] | no_license | martinwr57/DjangoProject | 5ddd7a98debfb6fa1d59ca0c0257e2fb61fb15e0 | ebdc586b71d30fd1f96707fb81100903d40c5b72 | refs/heads/master | 2021-09-10T04:42:01.830609 | 2018-03-21T02:29:27 | 2018-03-21T02:29:27 | 126,110,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 50,049 | py | #!/usr/bin/env python
__author__ = "Willie Martin"
__version__ = "$Revision: 1.5 $"
import binascii
from binascii import crc32
from optparse import OptionParser
import os, os.path, sys, json, pprint, datetime
import string, time, datetime
from datetime import datetime
import adodbapi
from AutoPerfAnalysis3 import analysis
#from AutoPerfAnalysis import perf_d2d
import shelve
from twisted.protocols import basic
from twisted.application import service, internet
from twisted.internet import reactor, protocol
#LOGGING
from twisted.python import log
from twisted.python import components
from twisted.internet.protocol import ServerFactory
from twisted.internet.protocol import ClientFactory
from twisted.protocols.basic import FileSender
from twisted.internet.defer import Deferred
from twisted.spread import pb
from zope.interface import interface, implements
from twisted.cred import checkers, portal
from twisted.enterprise import adbapi
import wmi
from sqlalchemy import create_engine
from sqlalchemy import sql, schema, types, exc, pool
from sqlalchemy import Table, Integer, Sequence, Column, String, MetaData
from sqlalchemy.orm import sessionmaker
#engine = create_engine('mssql+pyodbc://@DATABASE\SQLEXPRESS/RADatabase')
engine = create_engine('mssql+pyodbc://@DATABASE\AUTOMATION/Automation')
connection = engine.connect()
metadata = MetaData(bind=engine)
Session = sessionmaker(bind=engine)
sep = os.sep
paths = '%s%sResults%s' % (os.environ['USERPROFILE'], sep,sep)
paths = '%s%shddlab%sFileShare%sPerformance_Results%s' % (sep,sep,sep,sep,sep)
dir_name = '%s%shddlab%sFileShare%sPerformance_Results%sMetrics_outputs%s' % (sep,sep, sep,sep,sep, sep)
pp = pprint.PrettyPrinter(indent=1)
keys = ['VenderID', 'ProductID', 'FirmwareRevisionLevel', 'ProductSerialNumber', 'TargetDeviceName', 'TargetPortIdentifier1',
'TargetPortIdentifier2', 'FormFactorWidth', 'FormFactorHeight', 'DeviceID', 'ServoCodeLevel', 'PCBASerialNumber', 'PCBAPartNumber',
'DiskMediaVendor', 'MotorSerialNumber', 'FlexCircuitAssemblySerialNumber', 'HeadVendor', 'HDCRevision', 'ActuatorSerialNumber',
'HeadDiskAssembly', 'YearofManufacture', 'WeekofManufacture', 'DayofManufacture', 'LocationofManufacture', 'DellPPID', 'MediumRotationRate', 'Diff', 'SED']
code_name= { 'Vendor':'SEAGATE','SerialNumber':"ST3500620SS", 'MarketName' : "Seagate Moose SAS_DUP",
'Vendor':'SEAGATE','SerialNumber':"ST3750630SS", 'MarketName' : "Seagate Moose SAS_DUP",
'Vendor':'SEAGATE','SerialNumber':"ST31000640SS" , 'MarketName' : "Seagate Moose SAS_DUP",
'Vendor':'SEAGATE','SerialNumber':"ST3146356SS" , 'MarketName' : "Seagate Hurricane_DUP",
'Vendor':'SEAGATE','SerialNumber':"ST3300656SS" , 'MarketName' : "Seagate Hurricane_DUP",
'Vendor':'SEAGATE','SerialNumber':"ST3450856SS" , 'MarketName' : "Seagate Hurricane_DUP",
'Vendor':'SEAGATE','SerialNumber':"ST3250310NS" , 'MarketName' : "Seagate Moose SATA_DUP",
'Vendor':'SEAGATE','SerialNumber':"ST3500320NS" , 'MarketName' : "Seagate Moose SATA_DUP",
'Vendor':'SEAGATE','SerialNumber':"ST3750330NS" , 'MarketName' : "Seagate Moose SATA_DUP",
'Vendor':'SEAGATE','SerialNumber':"ST31000340NS" , 'MarketName' : "Seagate Moose SATA_DUP",
'Vendor':'SEAGATE','SerialNumber':"ST3400755SS" , 'MarketName' : "Seagate Timberland NS_DUP",
'Vendor':'FUJITSU','SerialNumber':"MBA3073RC" , 'MarketName' : "Fujitsu Allegro 10LX_DUP",
'Vendor':'FUJITSU','SerialNumber':"MBA3147RC" , 'MarketName' : "Fujitsu Allegro 10LX_DUP",
'Vendor':'FUJITSU','SerialNumber':"MBA3300RC" , 'MarketName' : "Fujitsu Allegro 10LX_DUP",
'Vendor':'WESTERN DIGITAL','SerialNumber':"WDCWD1602ABKS-1" , 'MarketName' : "WD Pinnacle (XL320 RE)",
'Vendor':'WESTERN DIGITAL','SerialNumber':"WDCWD2502ABYS-1" , 'MarketName' : "WD Pinnacle (XL320 RE)",
'Vendor':'WESTERN DIGITAL','SerialNumber':"WDCWD5002ABYS-1" , 'MarketName' : "WD Pinnacle (XL320 RE)",
'Vendor':'WESTERN DIGITAL','SerialNumber':"WDCWD7502ABYS-1" , 'MarketName' : "WD Mars (XL333 RE)",
'Vendor':'WESTERN DIGITAL','SerialNumber':"WDCWD1002FBYS-1" , 'MarketName' : "WD Mars (XL333 RE)",
'Vendor':'FUJITSU','SerialNumber':"MHZ2080BK" , 'MarketName' : "Fujitsu Aries (A160-ED)",
'Vendor':'FUJITSU','SerialNumber':"MHZ2160BK" , 'MarketName' : "Fujitsu Aries (A160-ED)",
'Vendor':'FUJITSU','SerialNumber':"MHZ2250BK" , 'MarketName' : "Fujitsu Aries (A160-ED)",
'Vendor':'SEAGATE','SerialNumber':"ST973452SS" , 'MarketName' : "Seagate Hornet_DUP",
'Vendor':'SEAGATE','SerialNumber':"ST9146852SS" , 'MarketName' : "Seagate Hornet_DUP",
'Vendor':'SEAGATE','SerialNumber':"ST9146752SS" , 'MarketName' : "Seagate Hornet_DUP, SED",
'Vendor':'SEAGATE','SerialNumber':"ST9146803SS" , 'MarketName' : "Seagate Firefly_DUP",
'Vendor':'SEAGATE','SerialNumber':"ST9300603SS" , 'MarketName' : "Seagate Firefly_DUP",
'Vendor':'SEAGATE','SerialNumber':"ST9300503SS" , 'MarketName' : "Seagate Firefly_DUP, SED",
'Vendor':'SEAGATE','SerialNumber':"ST9500430SS" , 'MarketName' : "Seagate Dragonfly_DUP",
'Vendor':'SEAGATE','SerialNumber':"ST9500431SS" , 'MarketName' : "Seagate Dragonfly_DUP, SED",
'Vendor':'SEAGATE','SerialNumber':"ST3300657SS" , 'MarketName' : "Seagate Eagle_DUP",
'Vendor':'SEAGATE','SerialNumber':"ST3450857SS" , 'MarketName' : "Seagate Eagle_DUP",
'Vendor':'SEAGATE','SerialNumber':"ST3600057SS" , 'MarketName' : "Seagate Eagle_DUP",
'Vendor':'SEAGATE','SerialNumber':"ST3450757SS" , 'MarketName' : "Seagate Eagle_DUP, SED",
'Vendor':'SEAGATE','SerialNumber':"ST3600957SS" , 'MarketName' : "Seagate Eagle_DUP, SED",
'Vendor':'SEAGATE','SerialNumber':"ST3600002SS" , 'MarketName' : "Seagate Eagle RP_DUP",
'Vendor':'SEAGATE','SerialNumber':"ST3500414SS" , 'MarketName' : "Seagate Muskie_DUP",
'Vendor':'SEAGATE','SerialNumber':"ST31000424SS" , 'MarketName' : "Seagate Muskie_DUP",
'Vendor':'SEAGATE','SerialNumber':"ST32000444SS" , 'MarketName' : "Seagate Muskie_DUP",
'Vendor':'SEAGATE','SerialNumber':"ST31000425SS" , 'MarketName' : "Seagate Muskie_DUP, SED",
'Vendor':'SEAGATE','SerialNumber':"ST32000445SS" , 'MarketName' : "Seagate Muskie_DUP, SED",
'Vendor':'SEAGATE','SerialNumber':"ST9500530NS" , 'MarketName' : "Seagate Dragonfly ES_DUP",
'Vendor':'SEAGATE','SerialNumber':"ST3500514NS" , 'MarketName' : "Seagate Muskie ES_DUP",
'Vendor':'SEAGATE','SerialNumber':"ST31000524NS" , 'MarketName' : "Seagate Muskie ES_DUP",
'Vendor':'SEAGATE','SerialNumber':"ST32000544NS" , 'MarketName' : "Seagate Muskie ES_DUP",
'Vendor':'FUJITSU','SerialNumber':"MBD2147RC" , 'MarketName' : "Fujitsu Allegro 11SE_DUP",
'Vendor':'FUJITSU','SerialNumber':"MBD2300RC" , 'MarketName' : "Fujitsu Allegro 11SE_DUP",
'Vendor':'FUJITSU','SerialNumber':"MBE2073RC" , 'MarketName' : "Fujitsu Allegro 11SX_DUP",
'Vendor':'FUJITSU','SerialNumber':"MBE2147RC" , 'MarketName' : "Fujitsu Allegro 11SX_DUP",
'Vendor':'HITACHI','SerialNumber':"HUC103014CSS600" , 'MarketName' : "Hitachi Cobra C_DUP",
'Vendor':'HITACHI','SerialNumber':"HUC103030CSS600" , 'MarketName' : "Hitachi Cobra C_DUP",
'Vendor':'HITACHI','SerialNumber':"HUC151473CSS600" , 'MarketName' : "Hitachi King Cobra C_DUP",
'Vendor':'HITACHI','SerialNumber':"HUC151414CSS600" , 'MarketName' : "Hitachi King Cobra C_DUP",
'Vendor':'HITACHI','SerialNumber':"HUS156030VLS600" , 'MarketName' : "Hitachi Viper C_DUP",
'Vendor':'HITACHI','SerialNumber':"HUS156045VLS600" , 'MarketName' : "Hitachi Viper C_DUP",
'Vendor':'HITACHI','SerialNumber':"HUS156060VLS600" , 'MarketName' : "Hitachi Viper C_DUP",
'Vendor':'WESTERN DIGITAL','SerialNumber':"WDCWD1460BKFG-1" , 'MarketName' : "WD Rigel (SL150)_DUP",
'Vendor':'WESTERN DIGITAL','SerialNumber':"WDCWD3000BKFG-1" , 'MarketName' : "WD Rigel (SL150)_DUP",
'Vendor':'WESTERN DIGITAL','SerialNumber':"WDCWD1460BKFG-1" , 'MarketName' : "WD Rigel (SL150) 6Gb_DUP",
'Vendor':'WESTERN DIGITAL','SerialNumber':"WDCWD3000BKFG-1" , 'MarketName' : "WD Rigel (SL150) 6Gb_DUP",
'Vendor':'WESTERN DIGITAL','SerialNumber':"WDCWD3000BKHG-1" , 'MarketName' : "WD Vega_DUP",
'Vendor':'WESTERN DIGITAL','SerialNumber':"WDCWD6000BKHG-1" , 'MarketName' : "WD Vega_DUP",
'Vendor':'WESTERN DIGITAL','SerialNumber':"WDCWD2000FYYG-1" , 'MarketName' : "WD Bach_DUP",
'Vendor':'SAMSUNG','SerialNumber':"HE161HJ" , 'MarketName' : "Samsung F1R",
'Vendor':'WESTERN DIGITAL','SerialNumber':"WDCWD2002FYPS-1" , 'MarketName' : "WD Sparta ES_DUP",
'Vendor':'WESTERN DIGITAL','SerialNumber':"WDCWD2003FYYS-1" , 'MarketName' : "WD Mantis ES",
'Vendor':'WESTERN DIGITAL','SerialNumber':"WDCWD1003FBYX-1" , 'MarketName' : "WD Vulcan_DUP_1TB",
'Vendor':'WESTERN DIGITAL','SerialNumber':"WDCWD2503ABYX-1" , 'MarketName' : "WD Summit_DUP_250GB",
'Vendor':'WESTERN DIGITAL','SerialNumber':"WDCWD5003ABYX-1" , 'MarketName' : "WD Summit_DUP_500GB",
'Vendor':'HITACHI','SerialNumber':"HUA722020ALA330" , 'MarketName' : "Hitachi Jupiter K ES_DUP",
'Vendor':'SEAGATE','SerialNumber':"ST9600204SS" , 'MarketName' : "Seagate Firestorm_DUP",
'Vendor':'SEAGATE','SerialNumber':"ST9600104SS" , 'MarketName' : "Seagate Firestorm_DUP, SED",
'Vendor':'TOSHIBA','SerialNumber':"MBF2300RC" , 'MarketName' : "Toshiba Allegro 12SE_DUP",
'Vendor':'TOSHIBA','SerialNumber':"MBF2600RC" , 'MarketName' : "Toshiba Allegro 12SE_DUP",
'Vendor':'SAMSUNG','SerialNumber':"HE253GJ" , 'MarketName' : "Samsung F3R ES",
'Vendor':'SAMSUNG','SerialNumber':"HE502HJ" , 'MarketName' : "Samsung F3R ES",
'Vendor':'SAMSUNG','SerialNumber':"HE103SJ" , 'MarketName' : "Samsung F3R ES",
'Vendor':'HITACHI','SerialNumber':"HUC106030CSS600" , 'MarketName' : "Hitachi Cobra D_DUP",
'Vendor':'HITACHI','SerialNumber':"HUC106060CSS600" , 'MarketName' : "Hitachi Cobra D_DUP",
'Vendor':'SEAGATE','SerialNumber':"ST936751SS" , 'MarketName' : "Seagate Maverick_non-DUP",
'Vendor':'SEAGATE','SerialNumber':"ST973451SS" , 'MarketName' : "Seagate Maverick_non-DUP",
'Vendor':'WESTERN DIGITAL','SerialNumber':"WDCWD1000FYPS-1" , 'MarketName' : "WD Hulk (GP250RE2)",
'Vendor':'HITACHI','SerialNumber':"HUS154530VLS300" , 'MarketName' : "Hitachi Viper B+_DUP",
'Vendor':'HITACHI','SerialNumber':"HUS154545VLS300" , 'MarketName' : "Hitachi Viper B+_DUP",
'Vendor':'HITACHI','SerialNumber':"HUS153073VLS300" , 'MarketName' : "Hitachi Viper B_DUP",
'Vendor':'HITACHI','SerialNumber':"HUS153014VLS300" , 'MarketName' : "Hitachi Viper B_DUP",
'Vendor':'HITACHI','SerialNumber':"HUS153030VLS300" , 'MarketName' : "Hitachi Viper B_DUP",
'Vendor':'HITACHI','SerialNumber':"HUC101473CSS300" , 'MarketName' : "Hitachi Cobra B_DUP",
'Vendor':'HITACHI','SerialNumber':"HUC101414CSS300" , 'MarketName' : "Hitachi Cobra B_DUP",
'Vendor':'HITACHI','SerialNumber':"HUA721050KLA330" , 'MarketName' : "Hitachi Gemini K",
'Vendor':'HITACHI','SerialNumber':"HUA721075KLA330" , 'MarketName' : "Hitachi Gemini K",
'Vendor':'HITACHI','SerialNumber':"HUA721010KLA330" , 'MarketName' : "Hitachi Gemini K",
'Vendor':'SEAGATE','SerialNumber':"ST973452SS" , 'MarketName' : "Seagate Hornet_DUP",
'Vendor':'SEAGATE','SerialNumber':"ST9146852SS" , 'MarketName' : "Seagate Hornet_DUP",
'Vendor':'SEAGATE','SerialNumber':"ST9146803SS-H" , 'MarketName' : "Seagate Firefly_DUP",
'Vendor':'SEAGATE','SerialNumber':"ST9146803SS" , 'MarketName' : "Seagate Firefly_DUP",
'Vendor':'SEAGATE','SerialNumber':"ST9300603SS" , 'MarketName' : "Seagate Firefly_DUP",
'Vendor':'SEAGATE','SerialNumber':"ST9500430SS" , 'MarketName' : "Seagate Dragonfly_DUP",
'Vendor':'SEAGATE','SerialNumber':"ST3300657SS" , 'MarketName' : "Seagate Eagle_DUP",
'Vendor':'SEAGATE','SerialNumber':"ST3450857SS" , 'MarketName' : "Seagate Eagle_DUP",
'Vendor':'SEAGATE','SerialNumber':"ST3600057SS" , 'MarketName' : "Seagate Eagle_DUP",
'Vendor':'SEAGATE','SerialNumber':"ST3600002SS" , 'MarketName' : "Seagate Eagle RP_DUP",
'Vendor':'FUJITSU','SerialNumber':"MBD2147RC" , 'MarketName' : "Fujitsu Allegro 11SE_DUP",
'Vendor':'FUJITSU','SerialNumber':"MBD2147RC" , 'MarketName' : "Fujitsu Allegro 11SE_DUP",
'Vendor':'FUJITSU','SerialNumber':"MBD2300RC" , 'MarketName' : "Fujitsu Allegro 11SE_DUP",
'Vendor':'FUJITSU','SerialNumber':"MBE2073RC" , 'MarketName' : "Fujitsu Allegro 11SX_DUP",
'Vendor':'FUJITSU','SerialNumber':"MBE2147RC" , 'MarketName' : "Fujitsu Allegro 11SX_DUP",
'Vendor':'HITACHI','SerialNumber':"HUC103014CSS600" , 'MarketName' : "Hitachi Cobra C_DUP",
'Vendor':'HITACHI','SerialNumber':"HUC103030CSS600" , 'MarketName' : "Hitachi Cobra C_DUP",
'Vendor':'WESTERN DIGITAL','SerialNumber':"WD5000YS-18MPB1" , 'MarketName' : "WD Zeus ES",
'Vendor':'HITACHI','SerialNumber':"HDS725050KLA360" , 'MarketName' : "Hitachi Kurofune II",
'Vendor':'FUJITSU','SerialNumber':"MAX3036RC" , 'MarketName' : "Fujitsu Allegro 9LX",
'Vendor':'FUJITSU','SerialNumber':"MAX3073RC" , 'MarketName' : "Fujitsu Allegro 9LX",
'Vendor':'FUJITSU','SerialNumber':"MAX3147RC" , 'MarketName' : "Fujitsu Allegro 9LX",
'Vendor':'HITACHI','SerialNumber':"HUS151436VLS300" , 'MarketName' : "Hitachi Viper A'",
'Vendor':'HITACHI','SerialNumber':"HUS151473VLS300" , 'MarketName' : "Hitachi Viper A'",
'Vendor':'HITACHI','SerialNumber':"HUS151414VLS300" , 'MarketName' : "Hitachi Viper A'",
'Vendor':'FUJITSU','SerialNumber':"MAY2036RC" , 'MarketName' : "Fujitsu Allegro 9SE+",
'Vendor':'FUJITSU','SerialNumber':"MAY2073RC" , 'MarketName' : "Fujitsu Allegro 9SE+",
'Vendor':'MAXTOR','SerialNumber':"ATLAS10K5-073SAS" , 'MarketName' : "Maxtor Genesis",
'Vendor':'MAXTOR','SerialNumber':"ATLAS10K5-147SAS" , 'MarketName' : "Maxtor Genesis",
'Vendor':'MAXTOR','SerialNumber':"ATLAS10K5-300SAS" , 'MarketName' : "Maxtor Genesis",
'Vendor':'MAXTOR','SerialNumber':"ATLAS15K2-036SAS" , 'MarketName' : "Maxtor Blackbird ",
'Vendor':'MAXTOR','SerialNumber':"ATLAS15K2-073SAS" , 'MarketName' : "Maxtor Blackbird ",
'Vendor':'MAXTOR','SerialNumber':"ATLAS15K2-147SAS" , 'MarketName' : "Maxtor Blackbird ",
'Vendor':'SEAGATE','SerialNumber':"ST336754SS" , 'MarketName' : "Seagate Cheetah 15K.4",
'Vendor':'SEAGATE','SerialNumber':"ST373454SS" , 'MarketName' : "Seagate Cheetah 15K.4",
'Vendor':'SEAGATE','SerialNumber':"ST3146854SS" , 'MarketName' : "Seagate Cheetah 15K.4",
'Vendor':'WESTERN DIGITAL','SerialNumber':"WD800JD-75MSA3" , 'MarketName' : "WD Unicorn II",
'Vendor':'WESTERN DIGITAL','SerialNumber':"WD1600JS-75NCB3" , 'MarketName' : "WD Hawk II",
'Vendor':'WESTERN DIGITAL','SerialNumber':"WD2500JS-75NCB3" , 'MarketName' : "WD Hawk II",
'Vendor':'SEAGATE','SerialNumber':"ST380819AS" , 'MarketName' : "Seagate Puma II",
'Vendor':'SEAGATE','SerialNumber':"ST3160828AS" , 'MarketName' : "Seagate Puma II",
'Vendor':'SEAGATE','SerialNumber':"ST3808110AS" , 'MarketName' : "Seagate Tonka II",
'Vendor':'SEAGATE','SerialNumber':"ST3160812AS" , 'MarketName' : "Seagate Tonka II",
'Vendor':'SEAGATE','SerialNumber':"ST3250824AS" , 'MarketName' : "Seagate Tonka Plus",
'Vendor':'SEAGATE','SerialNumber':"ST936701SS" , 'MarketName' : "Seagate Savvio (10K.1)",
'Vendor':'SEAGATE','SerialNumber':"ST973401SS" , 'MarketName' : "Seagate Savvio (10K.1)",
'Vendor':'FUJITSU','SerialNumber':"MHV2040BS" , 'MarketName' : "Fujitsu Mercury 60-ED",
'Vendor':'FUJITSU','SerialNumber':"MHW2040BS" , 'MarketName' : "Fujitsu Mercury 80-ED",
'Vendor':'FUJITSU','SerialNumber':"MHW2080BS" , 'MarketName' : "Fujitsu Mercury 80-ED",
'Vendor':'SEAGATE','SerialNumber':"ST3250620NS" , 'MarketName' : "Seagate Galaxy ES",
'Vendor':'SEAGATE','SerialNumber':"ST3500630NS" , 'MarketName' : "Seagate Galaxy ES",
'Vendor':'SEAGATE','SerialNumber':"ST3750640NS" , 'MarketName' : "Seagate Galaxy ES",
'Vendor':'WESTERN DIGITAL','SerialNumber':"WD1600YS-18SHB2" , 'MarketName' : "WD Hawk ES",
'Vendor':'WESTERN DIGITAL','SerialNumber':"WD2500YS-18SHB2" , 'MarketName' : "WD Hawk ES",
'Vendor':'SEAGATE','SerialNumber':"ST373455SS" , 'MarketName' : "Seagate Timberland 15K non-DUP (Field only)",
'Vendor':'SEAGATE','SerialNumber':"ST3146855SS" , 'MarketName' : "Seagate Timberland 15K non-DUP (Field only)",
'Vendor':'SEAGATE','SerialNumber':"ST3300655SS" , 'MarketName' : "Seagate Timberland 15K non-DUP (Field only)",
'Vendor':'SEAGATE','SerialNumber':"ST373355SS" , 'MarketName' : "Seagate Timberland T10 non-DUP",
'Vendor':'SEAGATE','SerialNumber':"ST3146755SS" , 'MarketName' : "Seagate Timberland T10 non-DUP",
'Vendor':'SEAGATE','SerialNumber':"ST3300555SS" , 'MarketName' : "Seagate Timberland T10 non-DUP",
'Vendor':'SEAGATE','SerialNumber':"ST973402SS" , 'MarketName' : "Seagate Firebird non-DUP",
'Vendor':'SEAGATE','SerialNumber':"ST9146802SS" , 'MarketName' : "Seagate Firebird non-DUP",
'Vendor':'FUJITSU','SerialNumber':"MHW2080BK" , 'MarketName' : "Fujitsu Aries (A80-ED)",
'Vendor':'FUJITSU','SerialNumber':"MHW2120BK" , 'MarketName' : "Fujitsu Aries (A80-ED)",
'Vendor':'WESTERN DIGITAL','SerialNumber':"WDCWD800AAJS-1" , 'MarketName' : "WD Unicorn ES",
'Vendor':'WESTERN DIGITAL','SerialNumber':"WDCWD5001ABYS-1" , 'MarketName' : "WD Tornado (XL160M)",
'Vendor':'SEAGATE','SerialNumber':"ST373355SS" , 'MarketName' : "Seagate Timberland T10_DUP",
'Vendor':'SEAGATE','SerialNumber':"ST3146755SS" , 'MarketName' : "Seagate Timberland T10_DUP",
'Vendor':'SEAGATE','SerialNumber':"ST3300555SS" , 'MarketName' : "Seagate Timberland T10_DUP",
'Vendor':'WESTERN DIGITAL','SerialNumber':"WD1601ABYS-18C0A0" , 'MarketName' : "WD Sequoia (XL160)",
'Vendor':'SAMSUNG','SerialNumber':"HE160HJ" , 'MarketName' : "Samsung S166R",
'Vendor':'SEAGATE','SerialNumber':"ST973402SS" , 'MarketName' : "Seagate Firebird_DUP",
'Vendor':'SEAGATE','SerialNumber':"ST9146802SS" , 'MarketName' : "Seagate Firebird_DUP",
'Vendor':'SEAGATE','SerialNumber':"ST373455SS" , 'MarketName' : "Seagate Timberland 15K_DUP",
'Vendor':'SEAGATE','SerialNumber':"ST3146855SS" , 'MarketName' : "Seagate Timberland 15K_DUP",
'Vendor':'SEAGATE','SerialNumber':"ST3300655SS" , 'MarketName' : "Seagate Timberland 15K_DUP",
'Vendor':'FUJITSU','SerialNumber':"MBB2073RC" , 'MarketName' : "Fujitsu Allegro 10SE_DUP",
'Vendor':'FUJITSU','SerialNumber':"MBB2147RC" , 'MarketName' : "Fujitsu Allegro 10SE_DUP",
'Vendor':'FUJITSU','SerialNumber':"MBC2036RC" , 'MarketName' : "Fujitsu Allegro 10SX_DUP",
'Vendor':'FUJITSU','SerialNumber':"MBC2073RC" , 'MarketName' : "Fujitsu Allegro 10SX_DUP",
'Vendor':'SAMSUNG','SerialNumber':"MCBQE25G5MPQ-0VAD3" , 'MarketName' : "Samsung RBX",
'Vendor':'SAMSUNG','SerialNumber':"MCCOE50G5MPQ-0VAD3" , 'MarketName' : "Samsung RBX",
'Vendor':'SAMSUNG','SerialNumber':"MCB4E50G5MXP-0VBD3" , 'MarketName' : "Samsung SS805",
'Vendor':'SAMSUNG','SerialNumber':"MCCOE1HG5MXP-0VBD3" , 'MarketName' : "Samsung SS805",
'Vendor':'PLIANT','SerialNumber':"LB150S" , 'MarketName' : "LB150S",
'Vendor':'WESTERN DIGITAL','SerialNumber':"WDCWD6000BKHG-1" , 'MarketName' : "WD Vega_600GB",
'Vendor':'HITACHI','SerialNumber':"HitachiHUA72202" , 'MarketName' : "Hitachi JupiterK",
'Vendor':'SEAGATE','SerialNumber':"ST9300605SS" , 'MarketName' : "Compass ST9300605SS ",
'Vendor':'SEAGATE','SerialNumber':"ST9900805SS" , 'MarketName' : "Compass ST9900805SS ",
'Vendor':'SEAGATE','SerialNumber':"ST91000640NS" , 'MarketName' : "Airwalker ST91000640NS "}
class MessageLogger:
"""
An independent logger class (because separation of application
and protocol logic is a good thing).
"""
def __init__(self, file):
self.file = file
def log(self, message):
"""Write a message to the file."""
timestamp = time.strftime("[%H:%M:%S]", time.localtime(time.time()))
self.file.write('%s %s\n' % (timestamp, message))
self.file.flush()
def close(self):
self.file.close()
class Receiver(pb.Root):
"""Contains Twisted Server methods for transferring Test System Client data.
"""
def __init__(self):
self.clients = []
self.fpath=''
self.outfile= None
self.dataQueue={}
self.drives = None
self.device_data ={}
self.logger = MessageLogger(open('log\server_logger.txt', "a"))
self.logger.log("Server is awake now at %s" % time.asctime(time.localtime(time.time())) )
#self.logger.close()
def remote_shutdown(self):
reactor.stop()
# def remote_addDeviceData(self, data): No longer being used
#
# if self.device_data:
# drive_info = self.device_data
# else:
# drive_info = data
#
# drive = Table('Device', metadata, autoload=True)
# info ={}
# s = drive.select((drive.columns.ModelNumber != drive_info['ModelNumber']) & (drive.columns.SerialNumber != drive_info['SerialNumber']) )
# #s = drive.select(drive.columns.SerialNumber == drive_info['SerialNumber'])
#
# val = connection.execute(s)
# if val.rowcount == 0:
# for d in drive.columns:
# col = str(d).split('.')[1]
# if col in drive_info.keys():
# info[col]=drive_info[col]
# if col == 'Shipping':
# info[col]=0
# if col == 'SupportDUP':
# info[col]=0
# try:
# i = drive.insert()
# i.execute(info)
# except Exception:
# print 'Entry not made'
# pass
# else:
# driverev = Table('DeviceRev', metadata, autoload=True)
# info.clear()
# s = driverev.select((driverev.columns.ModelNumber == drive_info['ModelNumber']) & (driverev.columns.FWRev != drive_info['Firmware']))
#
# #s = driverev.select(driverev.columns.FWRev != drive_info['Firmware'])
# val = connection.execute(s)
# if val.rowcount:
# try:
# info = {'ModelNumber':drive_info['ModelNumber'], 'FWRev':drive_info['Firmware']}
# i = driverev.insert()
# i.execute(info)
# except Exception:
# print 'Entry not made'
# pass
#
#
# return
def remote_addDeviceData_batch(self, data):
"""Passes device information to SQL Server Database
@type remote_server_db: Twisted Server
@param data: data is a dictionary containing keys and values that correspond to the db scheme for device
@param device: device db table instance
@param devicerev: devicerev db table instance
"""
drive = Table('Device', metadata, autoload=True)
driverev = Table('DeviceRev', metadata, autoload=True)
for k in data.keys():
drive_info = data[k]
info ={}
s = drive.select((drive.columns.ModelNumber != drive_info['ModelNumber']) & (drive.columns.SerialNumber != drive_info['SerialNumber']) )
val = connection.execute(s)
if val.rowcount==0:
for d in drive.columns:
col = str(d).split('.')[1]
if col in drive_info.keys():
info[col]=str(drive_info[col])
if col == 'Shipping':
info[col]=0
if col == 'SupportDUP':
info[col]=0
try:
i = drive.insert()
i.execute(info)
self.logger.log("[Data Entry Successful] at %s" % time.asctime(time.localtime(time.time())) )
self.logger.log("Drive information has been entered into the database")
self.logger.log("Dataset: %s" % drive_info )
except Exception:
self.logger.log("[Data Entry Was Not Made] at %s" % time.asctime(time.localtime(time.time())) )
self.logger.log("Drive information already in the database")
pass
else:
info.clear()
s = driverev.select((driverev.columns.ModelNumber == str(drive_info['ModelNumber'])) & (driverev.columns.FWRev != str(drive_info['Firmware'])))
val = connection.execute(s)
if val.rowcount:
try:
info = {'ModelNumber':str(drive_info['ModelNumber']), 'FWRev':str(drive_info['Firmware'])}
i = driverev.insert()
i.execute(info)
except Exception:
self.logger.log("[Data Entry Was Not Made] at %s" % time.asctime(time.localtime(time.time())) )
self.logger.log("Information already in the database")
pass
#self.logger.close()
# def remote_addResultData(self, data): No longer being used
# drive_info = data
# info ={}
# result = Table('Result', metadata, autoload=True)
# info.clear()
# for d in result.columns:
# col = str(d).split('.')[1]
# if col in drive_info.keys():
# info[col]=drive_info[col]
#
# try:
# i = result.insert()
# i.execute(info)
# except Exception:
# print 'Entry not made'
# pass
#
# return
def remote_PostAnalysis(self, data):
"""Passes device information to SQL Server Database
@type remote_server_db: Twisted Server
@param data: is a list of file pointers for post analysis
@param data[0]: is the csv output results file from iometer
@param data[1]: is the file name generated for the D2D output
@param dir_name: is the location where D2D metrics files are written
"""
self.logger.log("[D2D Metrics Analysis] at %s" % time.asctime(time.localtime(time.time())) )
self.logger.log(data)
analysis.run(data[0], dir_name, data[1])
print data
#perf_d2d.run(data[0], dir_name, data[1])
def remote_addResultData_batch(self, data):
"""Passes device information to SQL Server Database
@type remote_server_db: Twisted Server
@param data: data is a dictionary containing keys and values that correspond to the db scheme for device
@param device: device db table instance
@param devicerev: devicerev db table instance
"""
result = Table('Result', metadata, autoload=True)
drive = Table('Device', metadata, autoload=True)
metrics={}
input={}
info ={}
drive_info = data
f = open(drive_info['RawData'], 'r')
rawd = f.read()
f.close()
info['Raw'] = repr(rawd)
for d in result.columns:
col = str(d).split('.')[1]
if col in drive_info.keys():
if col == 'Manufacturer':
info['Vendor'] = str(drive_info['Manufacturer']).strip()
else:
info[col]=str(drive_info[col]).strip()
if col == 'Timestamp':
info[col] = time.strftime("%Y-%m-%d %H:%M:%S")
dD = drive.select(drive.columns.ModelNumber == str(drive_info['ModelNumber']).strip())
val = dD.execute()
if val.rowcount == -1:
row = val.fetchone()
input = {'Timestamp':time.strftime("%Y-%m-%d %H:%M:%S"), 'RPM':row['RPM'],'Firmware':row['Firmware'],'FormFactor':row['FormFactor'], 'Capacity':str(row['Capacity']).strip(), 'PPID':row['DellPPID'] , 'MemoryType':str(row['MemoryType']).strip(), 'Vendor': str(row['Manufacturer']).strip() }
new_info = info.update(input)
try:
#-------Create metrics analysis dictionary for data entry--
m = metrics.update(info)
del metrics['RawData']
del info['Reports']
metrics['TestName'] = 'D2D Metrics'
#----------------------------------------------------------
i = result.insert()
i.execute(info)
i.execute(metrics) #Enter metrics analysis data
self.logger.log("[Entry Made] at %s" % time.asctime(time.localtime(time.time())) )
except Exception:
self.logger.log("[Data Entry Was Not Made] at %s" % time.asctime(time.localtime(time.time())) )
#self.logger.log("Drive data not present to update data")
pass
else:
try:
#-------Create metrics analysis dictionary for data entry--
m = metrics.update(info)
del metrics['RawData']
del info['Reports']
metrics['TestName'] = 'D2D Metrics'
#----------------------------------------------------------
i = result.insert()
i.execute(info)
self.logger.log("[Entry Made] at %s" % time.asctime(time.localtime(time.time())) )
except:
self.logger.log("[Entry Not Made] at %s" % time.asctime(time.localtime(time.time())) )
pass
# def remote_addTestData(self, data): No longer being used
#
# test = Table('Test', metadata, autoload=True)
# drive_info = data
#
# info ={}
# s = test.select((test.columns.TestName == drive_info['TestName']) & (test.columns.Script == drive_info['Script']) )
# val = connection.execute(s)
# if val.rowcount:
#
# testrev = Table('TestRev', metadata, autoload=True)
# info.clear()
# for d in testrev.columns:
# col = str(d).split('.')[1]
# if col in drive_info.keys():
# info[col]=drive_info[col]
# try:
# i = testrev.insert()
# i.execute(info)
# except Exception:
# print 'Entry not made'
# pass
#
# else:
# info.clear()
# for d in test.columns:
# col = str(d).split('.')[1]
# if col in drive_info.keys():
# info[col]=drive_info[col]
# try:
# i = test.insert()
# i.execute(info)
# except Exception:
# print 'Entry not made'
# pass
#
#
#
# return
#
def remote_addTestData_batch(self, data):
"""Passes device information to SQL Server Database
@type remote_server_db: Twisted Server
@param data: data is a dictionary containing keys and values that correspond to the db scheme for device
@param device: device db table instance
@param devicerev: devicerev db table instance
"""
test = Table('Test', metadata, autoload=True)
testrev = Table('TestRev', metadata, autoload=True)
drive_info = data
info={}
s = test.select((test.columns.TestName == str(drive_info['TestName'])) & (test.columns.Script == str(drive_info['Script'])) )
val = connection.execute(s)
if val.rowcount==0:
info.clear()
for d in testrev.columns:
col = str(d).split('.')[1]
if col in drive_info.keys():
info[col]=str(drive_info[col])
try:
i = test.insert()
i.execute(info)
self.logger.log("[Entry Made] at %s" % time.asctime(time.localtime(time.time())) )
except Exception:
self.logger.log("[Data Entry Was Not Made] at %s" % time.asctime(time.localtime(time.time())) )
self.logger.log("Drive information already in the database")
pass
try:
i = testrev.insert()
i.execute(info)
self.logger.log("[Entry Made] at %s" % time.asctime(time.localtime(time.time())) )
except Exception:
self.logger.log("[Data Entry Was Not Made] at %s" % time.asctime(time.localtime(time.time())) )
self.logger.log("Drive information already in the database")
pass
else:
info.clear()
for d in test.columns:
col = str(d).split('.')[1]
if col in drive_info.keys():
info[col]=str(drive_info[col])
try:
i = test.insert()
i.execute(info)
self.logger.log("[Entry Made] at %s" % time.asctime(time.localtime(time.time())) )
except Exception:
self.logger.log("[Data Entry Was Not Made] at %s" % time.asctime(time.localtime(time.time())) )
self.logger.log("Drive information already in the database")
pass
def remote_addInquiryData(self, data):
"""Passes device information to SQL Server Database
@type remote_server_db: Twisted Server
@param data: data is a dictionary containing keys and values that correspond to the db scheme for device
@param device: device db table instance
@param devicerev: devicerev db table instance
"""
inquiry_info = data
inq = Table('DELLInqData', metadata, autoload=True)
driverev = Table('DeviceRev', metadata, autoload=True)
drive = Table('Device', metadata, autoload=True)
info ={}
s = inq.select(inq.columns.ProductSerialNumber == inquiry_info['ProductSerialNumber'])
val = connection.execute(s)
if val.rowcount == 0:
for d in inq.columns:
col = str(d).split('.')[1]
if col in inquiry_info.keys():
info[col]=str(inquiry_info[col]).strip()
try:
i = inq.insert()
i.execute(info)
self.logger.log("[Entry Made] at %s" % time.asctime(time.localtime(time.time())) )
except Exception:
print 'Entry not made'
self.logger.log("[Data Entry Was Not Made] at %s" % time.asctime(time.localtime(time.time())) )
#self.logger.log("Drive information already in the database")
pass
#update section
input = {'FormFactor':inquiry_info['FormFactorWidth'], 'Height':inquiry_info['FormFactorHeight'], 'RPM':inquiry_info['MediumRotationRate']
, 'DellPPID':inquiry_info['DellPPID'] }
d = drive.select(drive.columns.SerialNumber == inquiry_info['ProductSerialNumber'])
val = d.execute()
if val.rowcount == -1:
ss = drive.update(whereclause=((drive.columns.SerialNumber == inquiry_info['ProductSerialNumber'])), values=input )
ss.execute()
return
def remote_addModeData(self, data):
"""Passes device information to SQL Server Database
@type remote_server_db: Twisted Server
@param data: data is a dictionary containing keys and values that correspond to the db scheme for device
@param device: device db table instance
@param devicerev: devicerev db table instance
"""
inquiry_info = data
inq = Table('DELLInqData', metadata, autoload=True)
driverev = Table('DeviceRev', metadata, autoload=True)
drive = Table('Device', metadata, autoload=True)
info ={}
s = inq.select(inq.columns.ProductSerialNumber == inquiry_info['ProductSerialNumber'])
val = connection.execute(s)
if val.rowcount == 0:
for d in inq.columns:
col = str(d).split('.')[1]
if col in inquiry_info.keys():
info[col]=str(inquiry_info[col]).strip()
try:
i = inq.insert()
i.execute(info)
self.logger.log("[Entry Made] at %s" % time.asctime(time.localtime(time.time())) )
except Exception:
self.logger.log("[Data Entry Was Not Made] at %s" % time.asctime(time.localtime(time.time())) )
#self.logger.log("Drive information already in the database")
pass
def remote_addServerData(self, data):
"""Passes device information to SQL Server Database
@type remote_server_db: Twisted Server
@param data: data is a dictionary containing keys and values that correspond to the db scheme for device
@param device: device db table instance
@param devicerev: devicerev db table instance
Note: have not been put into use yet in the automatic registering on Servers
"""
system_info = data
system = Table('Systems', metadata, autoload=True)
info ={}
s = system.select(system.columns.SystemName == system_info['SystemName'])
val = connection.execute(s)
if val.rowcount == 0:
for d in system.columns:
col = str(d).split('.')[1]
if col in system_info.keys():
info[col]=system_info[col]
try:
i = system.insert()
i.execute(info)
self.logger.log("[Entry Made] at %s" % time.asctime(time.localtime(time.time())) )
except Exception:
self.logger.log("[Data Entry Was Not Made] at %s" % time.asctime(time.localtime(time.time())) )
#self.logger.log("Drive information already in the database")
pass
return
def remote_LoadDellInqData(self, dell_files):
"""Passes device information to SQL Server Database
@type remote_server_db: Twisted Server
@param data: data is a dictionary containing keys and values that correspond to the db scheme for device
@param device: device db table instance
@param devicerev: devicerev db table instance
"""
data={}
self.check_in={}
hex_keys = {}
inquiry = []
#filelist = os.listdir(paths)
for d in dell_files.keys():
# dd = ''.join((paths,d))
#
# f = open(dd, 'r')
# s = f.readlines()
# for i in s:
# b = i.split(':')
# byte = b[0].replace('0x','')
# byte = int(byte, 16)
# value = b[1].split(' ')[1]
# hex_keys[byte] = value
# inquiry.extend(value.split(' '))
# f.close()
inquiry = dell_files[d]
values = ''
#Get Vender ID
for x in range(4,12):
values = values + inquiry[x]
out = binascii.unhexlify(''.join(values.split( )))
data['VenderID'] = out.strip()
#Get Product ID
values = ''
for x in range(12,27):
values = values + inquiry[x]
out = binascii.unhexlify(''.join(values.split( )))
data['ProductID'] = out.strip()
#Get Firmware Revision Level
values = ''
for x in range(28,32):
values = values + inquiry[x]
out = binascii.unhexlify(''.join(values.split( )))
data['FirmwareRevisionLevel'] = out.strip()
#Get 'Product Serial Number'
values = ''
for x in range(32,51):
values = values + inquiry[x]
out = binascii.unhexlify(''.join(values.split( )))
data['ProductSerialNumber'] = out.strip()
#Get 'Target Device Name'
values = ''
for x in range(52,60):
values = values + inquiry[x]
out = binascii.unhexlify(''.join(values.split( )))
#out = binascii.b2a_hqx(''.join(values.split( )))
data['TargetDeviceName'] = out.strip()
#Get 'Target Port Identifier 1'
values = ''
for x in range(60,68):
values = values + inquiry[x]
#out = binascii.unhexlify(''.join(values.split( )))
out = binascii.b2a_hqx(''.join(values.split( )))
data['TargetPortIdentifier1'] = out.strip()
#Get 'Target Port Identifier 2'
values = ''
for x in range(68,76):
values = values + inquiry[x]
#out = binascii.unhexlify(''.join(values.split( )))
out = binascii.b2a_hqx(''.join(values.split( )))
data['TargetPortIdentifier2'] = out.strip()
#Get 'Form Factor Width'
values = ''
for x in range(76,80):
values = values + inquiry[x]
out = binascii.unhexlify(''.join(values.split( )))
try:
data['FormFactorWidth'] = int(out.strip()) * 0.001
except:
print out
data['FormFactorWidth'] = 0.0
pass
#Get 'Form Factor Height'
values = ''
for x in range(80,85):
values = values + inquiry[x]
out = binascii.unhexlify(''.join(values.split( )))
try:
data['FormFactorHeight'] = int(out.strip()) * 0.01
except:
print out
data['FormFactorHeight'] = 0.0
pass
#Get 'Device ID'
values = ''
for x in range(84,92):
values = values + inquiry[x]
out = binascii.unhexlify(''.join(values.split( )))
data['DeviceID'] = out.strip()
#Get 'Servo Code Level'
values = ''
for x in range(92,99):
#print inquiry[x]
values = values + inquiry[x]
out = binascii.unhexlify(''.join(values.split( )))
data['ServoCodeLevel'] = out.strip()
#Get 'PCBA Serial Number'
values = ''
for x in range(100,116):
values = values + inquiry[x]
out = binascii.unhexlify(''.join(values.split( )))
data['PCBASerialNumber'] = out.strip()
#Get 'PCBA Part Number'
values = ''
for x in range(117,131):
values = values + inquiry[x]
out = binascii.unhexlify(''.join(values.split( )))
data['PCBAPartNumber'] = out.strip()
#Get 'Disk Media Vendor'
values = ''
for x in range(132,147):
values = values + inquiry[x]
out = binascii.unhexlify(''.join(values.split( )))
data['DiskMediaVendor'] = out.strip()
#Get 'Motor Serial Number'
values = ''
for x in range(148,163):
values = values + inquiry[x]
out = binascii.unhexlify(''.join(values.split( )))
data['MotorSerialNumber'] = out.strip()
#Get 'Flex Circuit Assembly Serial Number'
values = ''
for x in range(164,180):
values = values + inquiry[x]
out = binascii.unhexlify(''.join(values.split( )))
data['FlexCircuitAssemblySerialNumber'] = out.strip()
#Get 'Head Vendor'
values = ''
for x in range(180,196):
values = values + inquiry[x]
out = binascii.unhexlify(''.join(values.split( )))
data['HeadVendor'] = out.strip()
#Get 'HDC Revision'
values = ''
for x in range(196,211):
values = values + inquiry[x]
out = binascii.unhexlify(''.join(values.split( )))
data['HDCRevision'] = out.strip()
#Get 'Actuator Serial Number'
values = ''
for x in range(212,227):
values = values + inquiry[x]
out = binascii.unhexlify(''.join(values.split( )))
data['ActuatorSerialNumber'] = out.strip()
#Get 'Head Disk Assembly'
values = ''
for x in range(228,244):
values = values + inquiry[x]
out = binascii.unhexlify(''.join(values.split( )))
data['HeadDiskAssembly'] = out.strip()
#Get 'Year of Manufacture'
values = ''
for x in range(244,248):
values = values + inquiry[x]
out = binascii.unhexlify(''.join(values.split( )))
data['YearofManufacture'] = out.strip()
#Get 'Week of Manufacture'
values = ''
for x in range(247,249):
values = values + inquiry[x]
out = binascii.unhexlify(''.join(values.split( )))
data['WeekofManufacture'] = out.strip()
#Get 'Day of Manufacture'
values = ''
for x in range(250,252):
values = values + inquiry[x]
out = binascii.unhexlify(''.join(values.split( )))
data['DayofManufacture'] = out.strip()
#Get 'Location of Manufacture'
values = ''
for x in range(251,260):
values = values + inquiry[x]
out = binascii.unhexlify(''.join(values.split( )))
data['LocationofManufacture'] = out.strip()
#Get 'Dell PPID'
values = ''
for x in range(261,284):
values = values + inquiry[x]
out = binascii.unhexlify(''.join(values.split( )))
data['DellPPID'] = out.strip()
#Get 'Medium Rotation Rate'
values = ''
for x in range(284,286):
values = values + inquiry[x]
out = int(values, 16)
data['MediumRotationRate'] = out
self.check_in[data['ProductSerialNumber']] = data
inquiry=[]
self.remote_addInquiryData(data)
def remote_LoadModePages(self, drive):
"""Passes device information to SQL Server Database
@type remote_server_db: Twisted Server
@param data: data is a dictionary containing keys and values that correspond to the db scheme for device
@param device: device db table instance
@param devicerev: devicerev db table instance
"""
self.checkin = shelve.open('CheckIn.db', flag='c', writeback=True)
self.checkout = shelve.open('CheckOut.db', flag='c', writeback=True)
dell_files = drive[0]
type = drive[1]
hex_keys = {}
inquiry = []
#filelist = os.listdir(path)
for d in dell_files.keys():
inquiry = dell_files[d]
if '0x00' in d:
VSM={}
value =''.join(('0x', inquiry[6]))
val = int(value, 16)
VSM['UEAR'] = val
#print 'VSM', VSM
if type == 'CheckIn':
self.checkin[d] = VSM #add the writing to the shelf db
elif type == 'CheckOut':
self.checkout[d] = VSM #add the writing to the shelf db
if '0x01' in d:
RWER={}
RW = {7:'AWRE',6:'ARRE',5:'TB',4:'RC',3:'EER',2:'PER',1:'DTE',0:'DCR'}
RTL = ''.join((inquiry[14],inquiry[15]))
RWER['RTL'] = RTL
#print inquiry
value =''.join(('0x', inquiry[6]))
val = int(value, 16)
try:
val = int(value, 16)
out = bin(val)
out = out.split('0b')
op = out[1]
bin_list = list(op)
bin_list.reverse()
for x in range(0,8):
RWER[RW[x]] = bin_list[x]
#print RWER
except:
pass
#print 'RWER', RWER
if type == 'CheckIn':
self.checkin[d] = RWER #add the writing to the shelf db
elif type == 'CheckOut':
self.checkout[d] = RWER #add the writing to the shelf db
if '0x07' in d:
VERM={}
ERM = {7:'AWRE',6:'ARRE',5:'TB',4:'RC',3:'EER',2:'PER',1:'DTE',0:'DCR'}
RTL = ''.join((inquiry[14],inquiry[15]))
VERM['VRTL'] = RTL
value =''.join(('0x', inquiry[6]))
val = int(value, 16)
try:
val = int(value, 16)
if not val:
for x in range(0,4):
try:
VERM[ERM[x]] = '0'
except:
pass
else:
out = bin(val)
out = out.split('0b')
op = out[1]
bin_list = list(op)
bin_list.reverse()
for x in range(0,8):
VERM[ERM[x]] = bin_list[x]
except:
pass
#print 'VERM', VERM
if type == 'CheckIn':
self.checkin[d] = VERM #add the writing to the shelf db
elif type == 'CheckOut':
self.checkout[d] = VERM #add the writing to the shelf db
if '0x08' in d:
CACHE={}
CP = {7:'IC',6:'ABPF',5:'CAP',4:'DISC',3:'SIZE',2:'WCE',1:'MF',0:'RCD'}
value =''.join(('0x', inquiry[6]))
val = int(value, 16)
try:
val = int(value, 16)
if not val:
for x in range(0,8):
try:
CACHE[CP[x]] = '0'
except:
pass
else:
out = bin(val)
out = out.split('0b')
op = out[1]
bin_list = list(op)
bin_list.reverse()
for x in range(0,8):
CACHE[CP[x]] = bin_list[x]
except:
pass
#print 'CACHE', CACHE
if type == 'CheckIn':
self.checkin[d] = CACHE #add the writing to the shelf db
elif type == 'CheckOut':
self.checkout[d] = CACHE #add the writing to the shelf db
if '0x0A' in d:
CMP={}
CM = {2:'D_SENSE',1:'GLTSD',0:'RLEC'}
value =''.join(('0x', inquiry[6]))
try:
val = int(value, 16)
if not val:
for x in range(0,3):
try:
CMP[CM[x]] = '0'
except:
pass
else:
out = bin(val)
out = out.split('0b')
op = out[1]
bin_list = list(op)
bin_list.reverse()
for x in range(0,8):
CMP[CM[x]] = bin_list[x]
except:
pass
#print 'CMP', CMP
if type == 'CheckIn':
self.checkin[d] = CMP #add the writing to the shelf db
elif type == 'CheckOut':
self.checkout[d] = CMP #add the writing to the shelf db
if '0x18' in d:
PSLUM={}
SLU = {4:'TLR'}
value =''.join(('0x', inquiry[6]))
try:
val = int(value, 16)
if not val:
PSLUM['TLR'] = '0'
else:
out = bin(val)
out = out.split('0b')
op = out[1]
bin_list = list(op)
while len(bin_list) <= 7:
op = '0' + op
bin_list = list(op)
bin_list.reverse()
for x in range(0,8):
try:
PSLUM[SLU[x]] = bin_list[x]
except:
pass
except:
pass
#print 'PSLUM', PSLUM
if type == 'CheckIn':
self.checkin[d] = PSLUM #add the writing to the shelf db
elif type == 'CheckOut':
self.checkout[d] = PSLUM #add the writing to the shelf db
if '0x19' in d:
PSPMP={}
PM = {6:'CAWT', 5:'BAE',4:'RLM'}
NLT = ''.join((inquiry[8],inquiry[9]))
IRT = ''.join((inquiry[10],inquiry[11]))
PSPMP['NLT'] = NLT
PSPMP['IRT'] = IRT
value =''.join(('0x', inquiry[6]))
val = int(value, 16)
#print inquiry
try:
val = int(value, 16)
if not val:
for x in range(0,4):
try:
PSPMP[PM[x]] = '0'
except:
pass
else:
out = bin(val)
out = out.split('0b')
op = out[1]
bin_list = list(op)
while len(bin_list) <= 7:
op = '0' + op
bin_list = list(op)
bin_list.reverse()
for x in range(0,8):
try:
PSPMP[PM[x]] = bin_list[x]
except:
pass
except:
pass
#print 'PSPMP', PSPMP
if type == 'CheckIn':
self.checkin[d] = PSPMP #add the writing to the shelf db
elif type == 'CheckOut':
self.checkout[d] = PSPMP #add the writing to the shelf db
if '0x1C' in d:
IECP={}
IEC = {7:'PERF',6:'RESERVED2',5:'EBF',4:'EWASC',3:'DEXCPT',2:'TEST',1:'RESERVED1',0:'LOGERR'}
IT = ''.join((inquiry[8],inquiry[11]))
RC = ''.join((inquiry[12],inquiry[15]))
IECP['IT'] = IT
IECP['RC'] = RC
value =''.join(('0x', inquiry[6]))
val = int(value, 16)
#print inquiry
try:
val = int(value, 16)
if not val:
for x in range(0,4):
try:
IECP[IEC[x]] = '0'
except:
pass
else:
out = bin(val)
out = out.split('0b')
op = out[1]
bin_list = list(op)
while len(bin_list) <= 7:
op = '0' + op
bin_list = list(op)
bin_list.reverse()
for x in range(0,8):
IECP[IEC[x]] = bin_list[x]
except:
pass
#print 'IECP', IECP
if type == 'CheckIn':
self.checkin[d] = IECP #add the writing to the shelf db
elif type == 'CheckOut':
self.checkout[d] = IECP #add the writing to the shelf db
inquiry=[]
if type == 'CheckOut':
stamp = time.asctime(time.localtime(time.time()))
f_name = 'Check_Out_Report_' + '(' + str(stamp) + ')'
fp = open(f_name, 'w')
for k in self.checkout.keys():
out = dict_diff(self.checkin, self.checkout)
fp.write('Check In & Check Out Report \n\r')
print 'drive mode page', k
if out:
print 'Failed'
else:
print 'Passed'
fp.close()
self.checkin.close()
self.checkout.close()
def dict_diff(first, second):
""" Return a dict of keys that differ with another config object. If a value is
not found in one fo the configs, it will be represented by KEYNOTFOUND.
@param first: Fist dictionary to diff.
@param second: Second dicationary to diff.
@return diff: Dict of Key => (first.val, second.val)
"""
diff = {}
sd1 = set(first)
sd2 = set(second)
#Keys missing in the second dict
for key in sd1.difference(sd2):
diff[key] = KEYNOTFOUNDIN2
#Keys missing in the first dict
for key in sd2.difference(sd1):
diff[key] = KEYNOTFOUNDIN1
#Check for differences
for key in sd1.intersection(sd2):
if first[key] != second[key]:
diff[key] = (first[key], second[key])
return diff
reactor.listenTCP(1331, pb.PBServerFactory(Receiver()))
print 'Server is running and waiting.....'
#reactor.listenTCP(8007, pb.PBServerFactory(Receiver()))
reactor.run() | [
"wmartin@softlayer.com"
] | wmartin@softlayer.com |
43f0bebabdb065d073d007cdb38e6616326d91c0 | c4b9678308d9480339126342c8f88a9b03a6b8cf | /forms/ui.py | 3a07e05ed75ebaabbb3b06181f9df41710448cd0 | [] | no_license | ljurk/mailcow-register | 308fabebf13c5aec6205efd4b1edd5f1ded800bc | 3b77fada3aa908f8eea88f2334b8055bf5711d95 | refs/heads/master | 2023-03-11T03:37:08.020979 | 2021-02-22T10:30:15 | 2021-02-22T10:30:15 | 340,334,942 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 930 | py | from os import getenv
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, BooleanField, IntegerField
from wtforms.validators import Optional, DataRequired
class BaseForm(FlaskForm):
token = StringField('Token', description='will be regenerated on page reload', validators=[DataRequired()])
username = StringField('Username', validators=[DataRequired()])
submit = SubmitField('Submit')
message = "~"
class Admin(BaseForm):
domain = StringField('Domain', default=getenv("REGISTER_DOMAIN"), validators=[DataRequired()])
tlsIn = BooleanField('TLS in', default=True)
tlsOut = BooleanField('TLS out', default=True)
quota = IntegerField("Quota", default=getenv("REGISTER_QUOTA"), validators=[DataRequired()])
class Ui(BaseForm):
fullname = StringField('Fullname', validators=[Optional()])
password = PasswordField('Password', validators=[Optional()])
| [
"lukas.jurk@tu-dresden.de"
] | lukas.jurk@tu-dresden.de |
ebf11ef210f67ab5fbd898517e6e67a3b68caeba | 754b7a2ab760b9b8ced3702af7b429b5bf30c7ff | /blogging/admin.py | f17c893fbbd74ca1c044b49995b14803dbe8b474 | [] | no_license | shadlestewart/06-django-blog | 6820e1f1e53715e03c8ec3a8b32dc230fe5c46cc | 14cc2e9b29d04d6bfccf69f04dffb8f75ccb4704 | refs/heads/master | 2023-05-14T07:38:17.047690 | 2021-06-04T23:04:59 | 2021-06-04T23:04:59 | 366,571,719 | 0 | 0 | null | 2021-06-04T23:05:00 | 2021-05-12T02:40:52 | Python | UTF-8 | Python | false | false | 396 | py | from django.contrib import admin
from blogging.models import Post, Category
class CategoryInLine(admin.StackedInline):
model = Post.category.through
exclude = (
"name",
"description",
)
class PostAdmin(admin.ModelAdmin):
inlines = [
CategoryInLine,
]
exclude = ("category",)
admin.site.register(Post, PostAdmin)
admin.site.register(Category)
| [
"shadlestewart@gmail.com"
] | shadlestewart@gmail.com |
07ce7f49027559761be8dd0379236dc5cff3011f | 9de6bf623a469e8ccf5f191ce985330dd93dde30 | /stacks/django/projects/orm/books_authors_v1/books_authors/books_authors/urls.py | c871e62b45be1f412e98ce000ae6b5bb191f9eb7 | [] | no_license | tjf2015/assignments | 001f3f7072341d7b0a6437ec316a8993fcf206f0 | 18573c6b9bf533609f5f684a26daba17728e32d3 | refs/heads/main | 2023-02-28T19:39:05.510493 | 2021-02-12T03:19:04 | 2021-02-12T03:19:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 731 | py | """books_authors URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path, include
urlpatterns = [
path('', include('main.urls')),
]
| [
"73617242+timfoltz@users.noreply.github.com"
] | 73617242+timfoltz@users.noreply.github.com |
3dedf611bc54472811b3f467db4eb932c8506bf7 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03971/s127085901.py | 34b5ed2240205dc9db4f33c7ab3dd930dddf6d8a | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | n,a,b=map(int,input().split())
s=input()
passed=0
abroad_passed=0
for i in s:
if i=="a":
if passed<a+b:
print("Yes")
passed+=1
else:
print("No")
elif i=="b":
if passed<a+b and abroad_passed<=b-1:
print("Yes")
passed+=1
abroad_passed+=1
else:
print("No")
else:
print("No") | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
0f8ebdd234606243284b482e06e4083e1328c38d | 3ef0fd7ff4ab98da91de28b4a3ae6bbd55a38361 | /wxrobot-host/wxrobot/baidu_shibie.py | 7c2115361e6ee26f68503dd32ffd03c7d4f6470f | [] | no_license | nudepig/wxrobot | d0cbcbe0b1fb0a69532bb2c45630bc01ded8c2af | 82bd8f68d3163d8dddf1b9a8ccc14532f040fbab | refs/heads/master | 2020-12-27T13:35:24.043856 | 2020-02-03T09:30:04 | 2020-02-03T09:30:04 | 237,920,035 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,770 | py | # from aip import AipOcr # 如果已安装pip,执行pip install baidu-aip即可
# import os
# """ 你的 APPID AK SK """
# APP_ID = '16802142'
# API_KEY = 'FcIxTPz25FZOSjOfgTKfAWIn'
# SECRET_KEY = 'GKIvG4tFqqyzisDCY81ASkMihg3LHrwx'
#
# client = AipOcr(APP_ID, API_KEY, SECRET_KEY)
# """ 读取图片 """
# def get_file_content(filePath): # 读取图片
# with open(filePath, 'rb') as fp:
# return fp.read()
#
# def image_identify(picture):
# image = get_file_content(picture)
# # print(image)
# # time_one = time.time()
# result = client.basicAccurate(image) # 获取百度识别的结果
# # time_two = time.time()
# # print(time_two - time_one)
# # if time_two - time_one > 6:
# # else:
# if os.path.exists('result.txt'):
# os.remove('result.txt')
# for result_words in list(result['words_result']): # 提取返回结果
# with open('result.txt', 'a+', encoding='utf-8') as file:
# file.write(result_words['words'] + '\n')
# with open('result.txt', 'r', encoding='utf-8') as file:
# result_input = file.read()
# return result_input # 返回识别的文字结果,文字分行
#
# picture = r'f43a9ae3508254911d9b551d3b0a2d5.png'
# image_identify(picture)
# encoding:utf-8
# 旧版api
import requests
import base64
import os
'''
通用文字识别(高精度版)
'''
def image_identify(picture):
# client_id 为官网获取的AK, client_secret 为官网获取的SK
host = 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=v6ChGHmbOGNu5yyP1bchGYmF&client_secret=RSLGkQm44tYEti0m7dfg2GGgAibFKkZ2'
access_token = requests.get(host)
request_url = "https://aip.baidubce.com/rest/2.0/ocr/v1/accurate_basic"
# 二进制方式打开图片文件
f = open(picture, 'rb')
img = base64.b64encode(f.read())
access_token = access_token.json()
access_token = access_token['access_token']
params = {"image": img}
# access_token = '[调用鉴权接口获取的token]'
request_url = '{}?access_token={}'.format(request_url, access_token)
headers = {'content-type': 'application/x-www-form-urlencoded'}
response = requests.post(request_url, data=params, headers=headers)
response = response.json()
if os.path.exists('result.txt'):
os.remove('result.txt')
for result_words in list(response['words_result']): # 提取返回结果
with open('result.txt', 'a+', encoding='utf-8') as file:
file.write(result_words['words'] + '\n')
with open('result.txt', 'r', encoding='utf-8') as file:
result_input = file.read()
return result_input
| [
"ubuntu@localhost.localdomain"
] | ubuntu@localhost.localdomain |
526a36db003f6b888927cfb7031603fc97188b7a | 2c143ba64032f65c7f7bf1cbd567a1dcf13d5bb1 | /整数转罗马数字.py | 2a08f7032b28b37c736f253256397e561ff86593 | [] | no_license | tx991020/MyLeetcode | 5b6121d32260fb30b12cc8146e44e6c6da03ad89 | cfe4f087dfeb258caebbc29fc366570ac170a68c | refs/heads/master | 2020-04-09T21:43:41.403553 | 2019-03-27T18:54:35 | 2019-03-27T18:54:35 | 160,611,089 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,205 | py | '''
罗马数字包含以下七种字符: I, V, X, L,C,D 和 M。
字符 数值
I 1
V 5
X 10
L 50
C 100
D 500
M 1000
例如, 罗马数字 2 写做 II ,即为两个并列的 1。12 写做 XII ,即为 X + II 。 27 写做 XXVII, 即为 XX + V + II 。
通常情况下,罗马数字中小的数字在大的数字的右边。但也存在特例,例如 4 不写做 IIII,而是 IV。数字 1 在数字 5 的左边,所表示的数等于大数 5 减小数 1 得到的数值 4 。同样地,数字 9 表示为 IX。这个特殊的规则只适用于以下六种情况:
I 可以放在 V (5) 和 X (10) 的左边,来表示 4 和 9。
X 可以放在 L (50) 和 C (100) 的左边,来表示 40 和 90。
C 可以放在 D (500) 和 M (1000) 的左边,来表示 400 和 900。
给定一个整数,将其转为罗马数字。输入确保在 1 到 3999 的范围内。
示例 1:
输入: 3
输出: "III"
示例 2:
输入: 4
输出: "IV"
示例 3:
输入: 9
输出: "IX"
'''
class Solution:
def intToRoman(self, num):
"""
:type num: int
:rtype: str
"""
| [
"wudi@hetao101.com"
] | wudi@hetao101.com |
598ca595b5b250d8d69b8ba997f84854b199709b | 65317d5b1c001bdcb6aa8e09dc9632257a73fafa | /week2/d5/server/shirts/views.py | 78f612d6c4f475ce907ac8a46660b83a730ae83b | [] | no_license | neilm813/python_demos_12-2019 | 30eb6f4f31fde28bfb815a51df529613589424c5 | 1ea7d06ca05b1ba5bc30a0204bed6d2a223f468e | refs/heads/master | 2021-06-20T17:24:06.729215 | 2019-12-23T01:11:31 | 2019-12-23T01:11:31 | 224,014,029 | 1 | 2 | null | 2021-06-10T19:32:08 | 2019-11-25T18:22:13 | Python | UTF-8 | Python | false | false | 3,262 | py | from django.shortcuts import render, redirect
from .models import *
def index(request):
# if already logged in
if request.session.get('uid'):
return redirect('/home')
else:
return render(request, 'index.html')
def login(request):
# when added to session, user is considered 'logged in'
# .filter ALWAYS returns a query set LIST 0 or more items
# (need to index list)
found_users = User.objects.filter(email=request.POST['email'])
if len(found_users) > 0:
user_from_db = found_users[0]
if user_from_db.password == request.POST['password']:
request.session['uid'] = user_from_db.id
return redirect('/home')
else:
print('password incorrect')
else:
print('no user found')
return redirect('/')
def register(request):
# add validations later
new_user = User.objects.create(first_name=request.POST['first_name'],
last_name=request.POST['last_name'],
email=request.POST['email'],
password=request.POST['password'])
request.session['uid'] = new_user.id
return redirect('/home')
def home(request):
uid = request.session.get('uid')
if uid is not None:
user_from_db = User.objects.get(id=uid)
context = {
'user': user_from_db
}
return render(request, 'home.html', context)
else:
return redirect('/')
def users_profile(request, id):
uid = request.session.get('uid')
if uid is not None:
user_from_db = User.objects.get(id=uid)
context = {
'user': user_from_db
}
return render(request, 'profile.html', context)
else:
return redirect('/')
def logout(request):
request.session.clear()
return redirect('/')
# SHIRTS Section
def new_shirt(request):
if request.session.get('uid') is None:
return redirect('/')
return render(request, 'shirts/new.html')
def create_shirt(request):
uid = request.session.get('uid')
if uid is None:
return redirect('/')
logged_in_user = User.objects.get(id=uid)
new_shirt = Shirt.objects.create(
phrase=request.POST['phrase'],
price=request.POST['price'],
uploaded_by=logged_in_user)
return redirect('/shirts')
def all_shirts(request):
uid = request.session.get('uid')
if uid is None:
return redirect('/')
logged_in_user = User.objects.get(id=uid)
context = {
'all_shirts': Shirt.objects.all(),
'logged_in_user': logged_in_user,
}
return render(request, 'shirts/all.html', context)
def like_shirt(request, shirt_id):
uid = request.session.get('uid')
if uid is None:
return redirect('/')
logged_in_user = User.objects.get(id=uid)
found_shirts = Shirt.objects.filter(id=shirt_id)
if len(found_shirts) > 0:
shirt_to_like = found_shirts[0]
if logged_in_user in shirt_to_like.users_who_liked.all():
shirt_to_like.users_who_liked.remove(logged_in_user)
else:
shirt_to_like.users_who_liked.add(logged_in_user)
return redirect('/shirts')
| [
"neilm813@gmail.com"
] | neilm813@gmail.com |
eb4b171f444cfa4fedb0289438eb8266b5152894 | b63fafc8b7818d0f6c420ef7de5a17dada162a10 | /swagger_client/api_client.py | 2440bfb0aaf7b5067b81a09c992ad723146d1999 | [] | no_license | optimumtact/tgs-python-client | 8f153945d8370d875652ecc7b754e24eec2f92a2 | 75b3ebe1e58b99cc440746ee6f505d258431e0ce | refs/heads/master | 2022-07-14T10:40:04.376078 | 2020-05-19T01:00:23 | 2020-05-19T01:00:31 | 265,103,499 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,770 | py | # coding: utf-8
"""
TGS API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 6.4.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import datetime
import json
import mimetypes
from multiprocessing.pool import ThreadPool
import os
import re
import tempfile
# python 2 and python 3 compatibility library
import six
from six.moves.urllib.parse import quote
from swagger_client.configuration import Configuration
import swagger_client.models
from swagger_client import rest
class ApiClient(object):
"""Generic API client for Swagger client library builds.
Swagger generic API client. This client handles the client-
server communication, and is invariant across implementations. Specifics of
the methods and models for each application are generated from the Swagger
templates.
NOTE: This class is auto generated by the swagger code generator program.
Ref: https://github.com/swagger-api/swagger-codegen
Do not edit the class manually.
:param configuration: .Configuration object for this client
:param header_name: a header to pass when making calls to the API.
:param header_value: a header value to pass when making calls to
the API.
:param cookie: a cookie to include in the header when making calls
to the API
"""
PRIMITIVE_TYPES = (float, bool, bytes, six.text_type) + six.integer_types
NATIVE_TYPES_MAPPING = {
'int': int,
'long': int if six.PY3 else long, # noqa: F821
'float': float,
'str': str,
'bool': bool,
'date': datetime.date,
'datetime': datetime.datetime,
'object': object,
}
def __init__(self, configuration=None, header_name=None, header_value=None,
cookie=None):
if configuration is None:
configuration = Configuration()
self.configuration = configuration
self.pool = ThreadPool()
self.rest_client = rest.RESTClientObject(configuration)
self.default_headers = {}
if header_name is not None:
self.default_headers[header_name] = header_value
self.cookie = cookie
# Set default User-Agent.
self.user_agent = 'Swagger-Codegen/1.0.0/python'
def __del__(self):
self.pool.close()
self.pool.join()
@property
def user_agent(self):
"""User agent for this API client"""
return self.default_headers['User-Agent']
@user_agent.setter
def user_agent(self, value):
self.default_headers['User-Agent'] = value
def set_default_header(self, header_name, header_value):
self.default_headers[header_name] = header_value
def __call_api(
self, resource_path, method, path_params=None,
query_params=None, header_params=None, body=None, post_params=None,
files=None, response_type=None, auth_settings=None,
_return_http_data_only=None, collection_formats=None,
_preload_content=True, _request_timeout=None):
config = self.configuration
# header parameters
header_params = header_params or {}
header_params.update(self.default_headers)
if self.cookie:
header_params['Cookie'] = self.cookie
if header_params:
header_params = self.sanitize_for_serialization(header_params)
header_params = dict(self.parameters_to_tuples(header_params,
collection_formats))
# path parameters
if path_params:
path_params = self.sanitize_for_serialization(path_params)
path_params = self.parameters_to_tuples(path_params,
collection_formats)
for k, v in path_params:
# specified safe chars, encode everything
resource_path = resource_path.replace(
'{%s}' % k,
quote(str(v), safe=config.safe_chars_for_path_param)
)
# query parameters
if query_params:
query_params = self.sanitize_for_serialization(query_params)
query_params = self.parameters_to_tuples(query_params,
collection_formats)
# post parameters
if post_params or files:
post_params = self.prepare_post_parameters(post_params, files)
post_params = self.sanitize_for_serialization(post_params)
post_params = self.parameters_to_tuples(post_params,
collection_formats)
# auth setting
self.update_params_for_auth(header_params, query_params, auth_settings)
# body
if body:
body = self.sanitize_for_serialization(body)
# request url
url = self.configuration.host + resource_path
# perform request and return response
response_data = self.request(
method, url, query_params=query_params, headers=header_params,
post_params=post_params, body=body,
_preload_content=_preload_content,
_request_timeout=_request_timeout)
self.last_response = response_data
return_data = response_data
if _preload_content:
# deserialize response data
if response_type:
return_data = self.deserialize(response_data, response_type)
else:
return_data = None
if _return_http_data_only:
return (return_data)
else:
return (return_data, response_data.status,
response_data.getheaders())
def sanitize_for_serialization(self, obj):
"""Builds a JSON POST object.
If obj is None, return None.
If obj is str, int, long, float, bool, return directly.
If obj is datetime.datetime, datetime.date
convert to string in iso8601 format.
If obj is list, sanitize each element in the list.
If obj is dict, return the dict.
If obj is swagger model, return the properties dict.
:param obj: The data to serialize.
:return: The serialized form of data.
"""
if obj is None:
return None
elif isinstance(obj, self.PRIMITIVE_TYPES):
return obj
elif isinstance(obj, list):
return [self.sanitize_for_serialization(sub_obj)
for sub_obj in obj]
elif isinstance(obj, tuple):
return tuple(self.sanitize_for_serialization(sub_obj)
for sub_obj in obj)
elif isinstance(obj, (datetime.datetime, datetime.date)):
return obj.isoformat()
if isinstance(obj, dict):
obj_dict = obj
else:
# Convert model obj to dict except
# attributes `swagger_types`, `attribute_map`
# and attributes which value is not None.
# Convert attribute name to json key in
# model definition for request.
obj_dict = {obj.attribute_map[attr]: getattr(obj, attr)
for attr, _ in six.iteritems(obj.swagger_types)
if getattr(obj, attr) is not None}
return {key: self.sanitize_for_serialization(val)
for key, val in six.iteritems(obj_dict)}
def deserialize(self, response, response_type):
"""Deserializes response into an object.
:param response: RESTResponse object to be deserialized.
:param response_type: class literal for
deserialized object, or string of class name.
:return: deserialized object.
"""
# handle file downloading
# save response body into a tmp file and return the instance
if response_type == "file":
return self.__deserialize_file(response)
# fetch data from response object
try:
data = json.loads(response.data)
except ValueError:
data = response.data
return self.__deserialize(data, response_type)
def __deserialize(self, data, klass):
"""Deserializes dict, list, str into an object.
:param data: dict, list or str.
:param klass: class literal, or string of class name.
:return: object.
"""
if data is None:
return None
if type(klass) == str:
if klass.startswith('list['):
sub_kls = re.match(r'list\[(.*)\]', klass).group(1)
return [self.__deserialize(sub_data, sub_kls)
for sub_data in data]
if klass.startswith('dict('):
sub_kls = re.match(r'dict\(([^,]*), (.*)\)', klass).group(2)
return {k: self.__deserialize(v, sub_kls)
for k, v in six.iteritems(data)}
# convert str to class
if klass in self.NATIVE_TYPES_MAPPING:
klass = self.NATIVE_TYPES_MAPPING[klass]
else:
klass = getattr(swagger_client.models, klass)
if klass in self.PRIMITIVE_TYPES:
return self.__deserialize_primitive(data, klass)
elif klass == object:
return self.__deserialize_object(data)
elif klass == datetime.date:
return self.__deserialize_date(data)
elif klass == datetime.datetime:
return self.__deserialize_datatime(data)
else:
return self.__deserialize_model(data, klass)
def call_api(self, resource_path, method,
path_params=None, query_params=None, header_params=None,
body=None, post_params=None, files=None,
response_type=None, auth_settings=None, async_req=None,
_return_http_data_only=None, collection_formats=None,
_preload_content=True, _request_timeout=None):
"""Makes the HTTP request (synchronous) and returns deserialized data.
To make an async request, set the async_req parameter.
:param resource_path: Path to method endpoint.
:param method: Method to call.
:param path_params: Path parameters in the url.
:param query_params: Query parameters in the url.
:param header_params: Header parameters to be
placed in the request header.
:param body: Request body.
:param post_params dict: Request post form parameters,
for `application/x-www-form-urlencoded`, `multipart/form-data`.
:param auth_settings list: Auth Settings names for the request.
:param response: Response data type.
:param files dict: key -> filename, value -> filepath,
for `multipart/form-data`.
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param collection_formats: dict of collection formats for path, query,
header, and post parameters.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return:
If async_req parameter is True,
the request will be called asynchronously.
The method will return the request thread.
If parameter async_req is False or missing,
then the method will return the response directly.
"""
if not async_req:
return self.__call_api(resource_path, method,
path_params, query_params, header_params,
body, post_params, files,
response_type, auth_settings,
_return_http_data_only, collection_formats,
_preload_content, _request_timeout)
else:
thread = self.pool.apply_async(self.__call_api, (resource_path,
method, path_params, query_params,
header_params, body,
post_params, files,
response_type, auth_settings,
_return_http_data_only,
collection_formats,
_preload_content, _request_timeout))
return thread
def request(self, method, url, query_params=None, headers=None,
post_params=None, body=None, _preload_content=True,
_request_timeout=None):
"""Makes the HTTP request using RESTClient."""
if method == "GET":
return self.rest_client.GET(url,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
headers=headers)
elif method == "HEAD":
return self.rest_client.HEAD(url,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
headers=headers)
elif method == "OPTIONS":
return self.rest_client.OPTIONS(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "POST":
return self.rest_client.POST(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "PUT":
return self.rest_client.PUT(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "PATCH":
return self.rest_client.PATCH(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "DELETE":
return self.rest_client.DELETE(url,
query_params=query_params,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
else:
raise ValueError(
"http method must be `GET`, `HEAD`, `OPTIONS`,"
" `POST`, `PATCH`, `PUT` or `DELETE`."
)
def parameters_to_tuples(self, params, collection_formats):
"""Get parameters as list of tuples, formatting collections.
:param params: Parameters as dict or list of two-tuples
:param dict collection_formats: Parameter collection formats
:return: Parameters as list of tuples, collections formatted
"""
new_params = []
if collection_formats is None:
collection_formats = {}
for k, v in six.iteritems(params) if isinstance(params, dict) else params: # noqa: E501
if k in collection_formats:
collection_format = collection_formats[k]
if collection_format == 'multi':
new_params.extend((k, value) for value in v)
else:
if collection_format == 'ssv':
delimiter = ' '
elif collection_format == 'tsv':
delimiter = '\t'
elif collection_format == 'pipes':
delimiter = '|'
else: # csv is the default
delimiter = ','
new_params.append(
(k, delimiter.join(str(value) for value in v)))
else:
new_params.append((k, v))
return new_params
def prepare_post_parameters(self, post_params=None, files=None):
"""Builds form parameters.
:param post_params: Normal form parameters.
:param files: File parameters.
:return: Form parameters with files.
"""
params = []
if post_params:
params = post_params
if files:
for k, v in six.iteritems(files):
if not v:
continue
file_names = v if type(v) is list else [v]
for n in file_names:
with open(n, 'rb') as f:
filename = os.path.basename(f.name)
filedata = f.read()
mimetype = (mimetypes.guess_type(filename)[0] or
'application/octet-stream')
params.append(
tuple([k, tuple([filename, filedata, mimetype])]))
return params
def select_header_accept(self, accepts):
"""Returns `Accept` based on an array of accepts provided.
:param accepts: List of headers.
:return: Accept (e.g. application/json).
"""
if not accepts:
return
accepts = [x.lower() for x in accepts]
if 'application/json' in accepts:
return 'application/json'
else:
return ', '.join(accepts)
def select_header_content_type(self, content_types):
"""Returns `Content-Type` based on an array of content_types provided.
:param content_types: List of content-types.
:return: Content-Type (e.g. application/json).
"""
if not content_types:
return 'application/json'
content_types = [x.lower() for x in content_types]
if 'application/json' in content_types or '*/*' in content_types:
return 'application/json'
else:
return content_types[0]
def update_params_for_auth(self, headers, querys, auth_settings):
"""Updates header and query params based on authentication setting.
:param headers: Header parameters dict to be updated.
:param querys: Query parameters tuple list to be updated.
:param auth_settings: Authentication setting identifiers list.
"""
if not auth_settings:
return
for auth in auth_settings:
auth_setting = self.configuration.auth_settings().get(auth)
if auth_setting:
if not auth_setting['value']:
continue
elif auth_setting['in'] == 'header':
headers[auth_setting['key']] = auth_setting['value']
elif auth_setting['in'] == 'query':
querys.append((auth_setting['key'], auth_setting['value']))
else:
raise ValueError(
'Authentication token must be in `query` or `header`'
)
def __deserialize_file(self, response):
"""Deserializes body to file
Saves response body into a file in a temporary folder,
using the filename from the `Content-Disposition` header if provided.
:param response: RESTResponse.
:return: file path.
"""
fd, path = tempfile.mkstemp(dir=self.configuration.temp_folder_path)
os.close(fd)
os.remove(path)
content_disposition = response.getheader("Content-Disposition")
if content_disposition:
filename = re.search(r'filename=[\'"]?([^\'"\s]+)[\'"]?',
content_disposition).group(1)
path = os.path.join(os.path.dirname(path), filename)
with open(path, "wb") as f:
f.write(response.data)
return path
def __deserialize_primitive(self, data, klass):
"""Deserializes string to primitive type.
:param data: str.
:param klass: class literal.
:return: int, long, float, str, bool.
"""
try:
return klass(data)
except UnicodeEncodeError:
return six.text_type(data)
except TypeError:
return data
def __deserialize_object(self, value):
"""Return a original value.
:return: object.
"""
return value
def __deserialize_date(self, string):
"""Deserializes string to date.
:param string: str.
:return: date.
"""
try:
from dateutil.parser import parse
return parse(string).date()
except ImportError:
return string
except ValueError:
raise rest.ApiException(
status=0,
reason="Failed to parse `{0}` as date object".format(string)
)
def __deserialize_datatime(self, string):
"""Deserializes string to datetime.
The string should be in iso8601 datetime format.
:param string: str.
:return: datetime.
"""
try:
from dateutil.parser import parse
return parse(string)
except ImportError:
return string
except ValueError:
raise rest.ApiException(
status=0,
reason=(
"Failed to parse `{0}` as datetime object"
.format(string)
)
)
def __hasattr(self, object, name):
return name in object.__class__.__dict__
def __deserialize_model(self, data, klass):
"""Deserializes list or dict to model.
:param data: dict, list.
:param klass: class literal.
:return: model object.
"""
if not klass.swagger_types and not self.__hasattr(klass, 'get_real_child_model'):
return data
kwargs = {}
if klass.swagger_types is not None:
for attr, attr_type in six.iteritems(klass.swagger_types):
if (data is not None and
klass.attribute_map[attr] in data and
isinstance(data, (list, dict))):
value = data[klass.attribute_map[attr]]
kwargs[attr] = self.__deserialize(value, attr_type)
instance = klass(**kwargs)
if (isinstance(instance, dict) and
klass.swagger_types is not None and
isinstance(data, dict)):
for key, value in data.items():
if key not in klass.swagger_types:
instance[key] = value
if self.__hasattr(instance, 'get_real_child_model'):
klass_name = instance.get_real_child_model(data)
if klass_name:
instance = self.__deserialize(data, klass_name)
return instance
| [
"email@oranges.net.nz"
] | email@oranges.net.nz |
5ee73a5d7a4d21e8ff1b542b13b9828616bbdac6 | e02dbefe9f362c3e9b2849c1e22c0ab27e010164 | /이것이 코딩 테스트다 - 연습문제/19. 1로 만들기.py | 18fba447785ec8766aa41a48e8bf4090b6b8e8c1 | [] | no_license | hoyeoon/CodingTest | ac77574539a7a96cbdb64eb1768ba20ab6ad3b4f | 4d34b422f0dc85f3d506a6c997f3fa883b7162ab | refs/heads/master | 2023-06-05T17:43:38.348537 | 2021-06-28T10:05:22 | 2021-06-28T10:05:22 | 378,081,127 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 738 | py | d = [0] * 30000
d[1], d[2], d[3], d[4], d[5] = 0, 1, 1, 2, 1
x = int(input())
for i in range(6, x + 1):
if i % 2 == 0 and i % 3 == 0 and i % 5 == 0:
d[i] = min(d[i - 1], d[i // 2], d[i // 3], d[i // 5]) + 1
elif i % 2 == 0 and i % 3 == 0:
d[i] = min(d[i - 1], d[i // 2], d[i // 3]) + 1
elif i % 2 == 0 and i % 5 == 0:
d[i] = min(d[i - 1], d[i // 2], d[i // 5]) + 1
elif i % 3 == 0 and i % 5 == 0:
d[i] = min(d[i - 1], d[i // 3], d[i // 5]) + 1
elif i % 5 == 0:
d[i] = min(d[i - 1], d[i // 5]) + 1
elif i % 3 == 0:
d[i] = min(d[i - 1], d[i // 3]) + 1
elif i % 2 == 0:
d[i] = min(d[i - 1], d[i // 2]) + 1
else:
d[i] = d[i - 1] + 1
print(d[x]) | [
"chy1995@ajou.ac.kr"
] | chy1995@ajou.ac.kr |
25b4323fe523d4977277bb6f893c5d68dbf95c17 | f019a4de949777d0de2f1e2bb8f15bd1f13e3e46 | /manage.py | ca6c8b532d3eced4e542cc09b0c3de25c7c858a3 | [] | no_license | csula-kejjy/pineapple-chatbot | 9f1530dc5ce4ebd210ea913175a37ff97c16fde9 | 37d2d78411d0cf4683fc321e39e898f377d5974f | refs/heads/master | 2022-12-11T09:56:21.261441 | 2020-04-29T17:56:29 | 2020-04-29T17:56:29 | 208,667,321 | 1 | 0 | null | 2020-02-08T21:42:44 | 2019-09-15T22:48:10 | CSS | UTF-8 | Python | false | false | 647 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'system.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"enrique.castillo.rosales64@gmail.com"
] | enrique.castillo.rosales64@gmail.com |
26f6fadab64cb8da35caf9c3bf1d9d4f2218b7d8 | 47911ab146aa23fcb957b871bc8978839affe3da | /substring/3_longest_no_repeat_substring.py | 5f503750a45b0bece2c8fc66e38fdb04d2ec86ca | [] | no_license | shineNEVERLAND/-3000- | 7c04638008dd5b58b26ac51c4a7a89b4880e1cd2 | 7cdade045accd8fb98650afb1483bad5848c45d1 | refs/heads/master | 2020-07-30T00:36:28.109769 | 2020-03-24T02:12:33 | 2020-03-24T02:12:33 | 210,020,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 691 | py | # coding: utf-8
class Solution(object):
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
res = 0 # 记录当前无重复的最长子串
left = -1 # 记录最长子串 最左 的前一个位置 的坐标
position = dict() # 记录每个字符 最近一次出现 的坐标
for i in xrange(0, len(s), 1):
if s[i] in position and position[s[i]] > left:
left = position[s[i]]
position[s[i]] = i
res = max(res, i-left)
return res
if __name__ == '__main__':
s = 'abcabcbb'
res = Solution().lengthOfLongestSubstring(s)
print res
| [
"chengwenjing@bytedance.com"
] | chengwenjing@bytedance.com |
6071605bf6432368de7e86b3ecd72ac4a7aaadda | dc47465f93a5d9acfac86b16d7241870de80f9ca | /micropython/main.py | 0aa80b8f5b22e653bbf04e3aed38639cd9c55cef | [] | no_license | SERC-IoT/ESP8266-Starter-Wifi | 4d513a1aa01eafbdb621c7a7677805ba85cb2d5b | 629383f6ba25eb26c0e84d4f6d89a4931550533d | refs/heads/master | 2022-05-17T16:31:55.418628 | 2020-07-11T13:53:10 | 2020-07-11T13:53:10 | 239,703,577 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31 | py | # main.py is run after boot.py
| [
"35724907+markpatterson27@users.noreply.github.com"
] | 35724907+markpatterson27@users.noreply.github.com |
f2874514f35d1723a5d681cb9058a4e30cd19e15 | f40f39211238259871b8f46819b025cd5d3a924b | /django_tutorial_1/urls.py | 6c16eb9e52e745efdbbbaa323efc8d05cc856edf | [] | no_license | Madenden/django-heroku-first-lesson | 8e92cb25be571abe0a760afedea750dc12105f58 | 59e2f6b1e382049409028508ce7ed7731afcb3f6 | refs/heads/master | 2020-03-22T19:54:45.368997 | 2018-07-13T08:54:35 | 2018-07-13T08:54:35 | 140,559,852 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,025 | py | """django_tutorial_1 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls import url
from todo.views import open_to_do, create_an_item, edit_an_item, toggle_status
urlpatterns = [
path('admin/', admin.site.urls),
path('', open_to_do),
path('add', create_an_item),
url(r'^edit/(?P<id>\d+)$', edit_an_item),
url(r'^toggle/(?P<id>\d+)$', toggle_status),
]
| [
"mden7785@gmail.com"
] | mden7785@gmail.com |
8e4736cc849b4a87f5c2726bb16f2d0248a5cf0a | 2c4755c6cdd621509504e93e5a074ade5fc25c31 | /ex7.py | 56b802de782e873354fce7dc44d980ab3089b5a7 | [] | no_license | adiputra524/learnPython | 923eb1cd4371b71e2e8360820489865d5fe3d223 | 1828b1e00cb677b5bd4805e39da9b6d56e5991d1 | refs/heads/master | 2021-05-25T16:59:39.806193 | 2015-12-04T12:06:10 | 2015-12-04T12:06:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | print("Mary had a little lamb.")
print("It's fleece was white as %s." % 'snow')
print("And everywhere that Mary went.")
print("." * 10) # what'd that do?
end1 = "C"
end2 = "h"
end3 = "e"
end4 = "e"
end5 = "s"
end6 = "e"
end7 = "B"
end8 = "u"
end9 = "r"
end10 = "g"
end11 = "e"
end12 = "r"
print(end1 + end2 + end3 + end4 + end5 + end6,)
print(end7 + end8 + end9 + end10 + end11 +end12)
| [
"gsedubun@gmail.com"
] | gsedubun@gmail.com |
9188de8b93d646effe4f71b0136eaa1bccec6e28 | 50143ef62cc9b7172b1bd71c3524053ddc1797dc | /Challenge2/name_ranker.py | 9af12e6201f563e0a45f1b6efd351026eb5c772f | [] | no_license | jonbloom/GoogleIO2015-CodeChallenge | cd82357a340a98faab55ca4834489be01da825f5 | d169be839cd7af152e93bf5eb80d5d764adb500a | refs/heads/master | 2021-01-24T14:33:16.899348 | 2015-05-29T14:57:03 | 2015-05-29T14:57:03 | 36,451,590 | 0 | 0 | null | 2015-05-28T16:30:00 | 2015-05-28T16:30:00 | null | UTF-8 | Python | false | false | 500 | py | from collections import Counter
print '\n\n\nChallenge 2\n'
class NameRanker():
def __init__(self):
self._counter = Counter()
def process_file(self,filename):
with open(filename, 'r') as f:
for name in f:
self._counter[name.strip()] += 1
def top_n(self,n):
return self._counter.most_common(n)
def number_of_names(self, name):
return self._counter[name]
ranker = NameRanker()
ranker.process_file('test_input.txt')
for name in ranker.top_n(10):
print "{0} - {1}".format(*name) | [
"git@jon.tw"
] | git@jon.tw |
54ceb5d460c811307a4e5e8a7f54e6b990c302b3 | 0fbd56d4a2ee512cb47f557bea310618249a3d2e | /official/vision/image_classification/configs/base_configs.py | efdcdc0b4327871dd04a854f057cbcdf84a9db9e | [
"Apache-2.0"
] | permissive | joppemassant/models | 9968f74f5c48096f3b2a65e6864f84c0181465bb | b2a6712cbe6eb9a8639f01906e187fa265f3f48e | refs/heads/master | 2022-12-10T01:29:31.653430 | 2020-09-11T11:26:59 | 2020-09-11T11:26:59 | 294,675,920 | 1 | 1 | Apache-2.0 | 2020-09-11T11:21:51 | 2020-09-11T11:21:51 | null | UTF-8 | Python | false | false | 7,936 | py | # Lint as: python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Definitions for high level configuration groups.."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, List, Mapping, Optional
import dataclasses
from official.modeling import hyperparams
from official.modeling.hyperparams import config_definitions
CallbacksConfig = config_definitions.CallbacksConfig
TensorboardConfig = config_definitions.TensorboardConfig
RuntimeConfig = config_definitions.RuntimeConfig
@dataclasses.dataclass
class ExportConfig(hyperparams.Config):
"""Configuration for exports.
Attributes:
checkpoint: the path to the checkpoint to export.
destination: the path to where the checkpoint should be exported.
"""
checkpoint: str = None
destination: str = None
@dataclasses.dataclass
class MetricsConfig(hyperparams.Config):
"""Configuration for Metrics.
Attributes:
accuracy: Whether or not to track accuracy as a Callback. Defaults to None.
top_5: Whether or not to track top_5_accuracy as a Callback. Defaults to
None.
"""
accuracy: bool = None
top_5: bool = None
@dataclasses.dataclass
class TimeHistoryConfig(hyperparams.Config):
"""Configuration for the TimeHistory callback.
Attributes:
log_steps: Interval of steps between logging of batch level stats.
"""
log_steps: int = None
@dataclasses.dataclass
class TrainConfig(hyperparams.Config):
"""Configuration for training.
Attributes:
resume_checkpoint: Whether or not to enable load checkpoint loading.
Defaults to None.
epochs: The number of training epochs to run. Defaults to None.
steps: The number of steps to run per epoch. If None, then this will be
inferred based on the number of images and batch size. Defaults to None.
callbacks: An instance of CallbacksConfig.
metrics: An instance of MetricsConfig.
tensorboard: An instance of TensorboardConfig.
set_epoch_loop: Whether or not to set `experimental_steps_per_execution` to
equal the number of training steps in `model.compile`. This reduces the
number of callbacks run per epoch which significantly improves end-to-end
TPU training time.
"""
resume_checkpoint: bool = None
epochs: int = None
steps: int = None
callbacks: CallbacksConfig = CallbacksConfig()
metrics: MetricsConfig = None
tensorboard: TensorboardConfig = TensorboardConfig()
time_history: TimeHistoryConfig = TimeHistoryConfig()
set_epoch_loop: bool = False
@dataclasses.dataclass
class EvalConfig(hyperparams.Config):
"""Configuration for evaluation.
Attributes:
epochs_between_evals: The number of train epochs to run between evaluations.
Defaults to None.
steps: The number of eval steps to run during evaluation. If None, this will
be inferred based on the number of images and batch size. Defaults to
None.
skip_eval: Whether or not to skip evaluation.
"""
epochs_between_evals: int = None
steps: int = None
skip_eval: bool = False
@dataclasses.dataclass
class LossConfig(hyperparams.Config):
"""Configuration for Loss.
Attributes:
name: The name of the loss. Defaults to None.
label_smoothing: Whether or not to apply label smoothing to the loss. This
only applies to 'categorical_cross_entropy'.
"""
name: str = None
label_smoothing: float = None
@dataclasses.dataclass
class OptimizerConfig(hyperparams.Config):
"""Configuration for Optimizers.
Attributes:
name: The name of the optimizer. Defaults to None.
decay: Decay or rho, discounting factor for gradient. Defaults to None.
epsilon: Small value used to avoid 0 denominator. Defaults to None.
momentum: Plain momentum constant. Defaults to None.
nesterov: Whether or not to apply Nesterov momentum. Defaults to None.
moving_average_decay: The amount of decay to apply. If 0 or None, then
exponential moving average is not used. Defaults to None.
lookahead: Whether or not to apply the lookahead optimizer. Defaults to
None.
beta_1: The exponential decay rate for the 1st moment estimates. Used in the
Adam optimizers. Defaults to None.
beta_2: The exponential decay rate for the 2nd moment estimates. Used in the
Adam optimizers. Defaults to None.
epsilon: Small value used to avoid 0 denominator. Defaults to 1e-7.
"""
name: str = None
decay: float = None
epsilon: float = None
momentum: float = None
nesterov: bool = None
moving_average_decay: Optional[float] = None
lookahead: Optional[bool] = None
beta_1: float = None
beta_2: float = None
epsilon: float = None
@dataclasses.dataclass
class LearningRateConfig(hyperparams.Config):
"""Configuration for learning rates.
Attributes:
name: The name of the learning rate. Defaults to None.
initial_lr: The initial learning rate. Defaults to None.
decay_epochs: The number of decay epochs. Defaults to None.
decay_rate: The rate of decay. Defaults to None.
warmup_epochs: The number of warmup epochs. Defaults to None.
batch_lr_multiplier: The multiplier to apply to the base learning rate, if
necessary. Defaults to None.
examples_per_epoch: the number of examples in a single epoch. Defaults to
None.
boundaries: boundaries used in piecewise constant decay with warmup.
multipliers: multipliers used in piecewise constant decay with warmup.
scale_by_batch_size: Scale the learning rate by a fraction of the batch
size. Set to 0 for no scaling (default).
staircase: Apply exponential decay at discrete values instead of continuous.
"""
name: str = None
initial_lr: float = None
decay_epochs: float = None
decay_rate: float = None
warmup_epochs: int = None
examples_per_epoch: int = None
boundaries: List[int] = None
multipliers: List[float] = None
scale_by_batch_size: float = 0.
staircase: bool = None
@dataclasses.dataclass
class ModelConfig(hyperparams.Config):
"""Configuration for Models.
Attributes:
name: The name of the model. Defaults to None.
model_params: The parameters used to create the model. Defaults to None.
num_classes: The number of classes in the model. Defaults to None.
loss: A `LossConfig` instance. Defaults to None.
optimizer: An `OptimizerConfig` instance. Defaults to None.
"""
name: str = None
model_params: hyperparams.Config = None
num_classes: int = None
loss: LossConfig = None
optimizer: OptimizerConfig = None
@dataclasses.dataclass
class ExperimentConfig(hyperparams.Config):
"""Base configuration for an image classification experiment.
Attributes:
model_dir: The directory to use when running an experiment.
mode: e.g. 'train_and_eval', 'export'
runtime: A `RuntimeConfig` instance.
train: A `TrainConfig` instance.
evaluation: An `EvalConfig` instance.
model: A `ModelConfig` instance.
export: An `ExportConfig` instance.
"""
model_dir: str = None
model_name: str = None
mode: str = None
runtime: RuntimeConfig = None
train_dataset: Any = None
validation_dataset: Any = None
train: TrainConfig = None
evaluation: EvalConfig = None
model: ModelConfig = None
export: ExportConfig = None
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
22d0f0f167daf2f54af3d8a0969674c1730daff3 | 45877df08d3d60f327f0620fa402a816e58b00ee | /helloworld/helloworld/asgi.py | 9b7c9998d56bcc397f2301a39241290b117579b4 | [
"MIT"
] | permissive | leelightman/swe1-app | 8b83a7d6c1a254661b5dc14ed9352b9e15b3a7fb | 20787cb0bf664e2f7692a2d540401fafa54e1e48 | refs/heads/main | 2022-12-29T03:41:55.379020 | 2020-10-20T03:30:11 | 2020-10-20T03:30:11 | 304,467,830 | 0 | 2 | MIT | 2020-10-19T16:13:14 | 2020-10-15T23:08:50 | Python | UTF-8 | Python | false | false | 397 | py | """
ASGI config for helloworld project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "helloworld.settings")
application = get_asgi_application()
| [
"leelightman@gmail.com"
] | leelightman@gmail.com |
55e3fff99a6a53657ed6aa3797ba4ebd66cd1a7a | be84495751737bbf0a8b7d8db2fb737cbd9c297c | /renlight/tests/renderer/test_sampler.py | b751d8606bc7eb66c363b055ea2f3a538bd86591 | [] | no_license | mario007/renmas | 5e38ff66cffb27b3edc59e95b7cf88906ccc03c9 | bfb4e1defc88eb514e58bdff7082d722fc885e64 | refs/heads/master | 2021-01-10T21:29:35.019792 | 2014-08-17T19:11:51 | 2014-08-17T19:11:51 | 1,688,798 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,747 | py |
import unittest
from tdasm import Runtime
from renlight.sdl.shader import Shader
from renlight.sdl import FloatArg, IntArg
from renlight.renderer.sampler import Sampler
class SamplerTest(unittest.TestCase):
def test_sampler(self):
sam = Sampler()
sam.set_resolution(2, 2)
sam.load('regular')
sam.compile()
runtimes = [Runtime()]
sam.prepare(runtimes)
code = """
sample = Sample()
r1 = generate_sample(sample)
p1 = sample.x
p2 = sample.y
p3 = sample.ix
p4 = sample.iy
"""
p1 = FloatArg('p1', 566.6)
p2 = FloatArg('p2', 566.6)
p3 = IntArg('p3', 5655)
p4 = IntArg('p4', 5655)
r1 = IntArg('r1', 5655)
args = [p1, p2, p3, p4, r1]
shader = Shader(code=code, args=args)
shader.compile([sam.shader])
shader.prepare(runtimes)
shader.execute()
self._check_result(shader, -0.5, -0.5, 0, 0, 1)
shader.execute()
self._check_result(shader, 0.5, -0.5, 1, 0, 1)
shader.execute()
self._check_result(shader, -0.5, 0.5, 0, 1, 1)
shader.execute()
self._check_result(shader, 0.5, 0.5, 1, 1, 1)
shader.execute()
ret = shader.get_value('r1')
self.assertEqual(ret, 0)
def _check_result(self, shader, p1, p2, p3, p4, r1):
t1 = shader.get_value('p1')
self.assertEqual(t1, p1)
t2 = shader.get_value('p2')
self.assertEqual(t2, p2)
t3 = shader.get_value('p3')
self.assertAlmostEqual(t3, p3)
t4 = shader.get_value('p4')
self.assertAlmostEqual(t4, p4)
k1 = shader.get_value('r1')
self.assertEqual(k1, r1)
if __name__ == "__main__":
unittest.main()
| [
"mvidov@yahoo.com"
] | mvidov@yahoo.com |
a90b77944cf50883447f4adbe557e5b102d1cc3d | 760307b23f604d4e433ad76d9bd699095b5ff351 | /boards/models.py | 020eb06136f01d2002925738a5160749f99683f7 | [] | no_license | onyedikachi-david/django-boards | 4ebfb1bdc193157df847c35aadc341e947d27555 | e6fbe324d3a1c2a6ec01dee15b89de5592292657 | refs/heads/main | 2023-03-29T21:35:53.060659 | 2021-03-30T14:20:46 | 2021-03-30T14:20:46 | 352,997,411 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,129 | py | from django.db import models
# Create your models here.
from django.db import models
from django.contrib.auth.models import User
class Board(models.Model):
name = models.CharField(max_length=30, unique=True)
description = models.CharField(max_length=100)
def __str__(self):
return str(self.name)
class Topic(models.Model):
subject = models.CharField(max_length=255)
last_updated = models.DateTimeField(auto_now_add=True)
board = models.ForeignKey(Board, related_name='topics', on_delete=models.SET_NULL, null=True)
starter = models.ForeignKey(User, related_name='topics', on_delete=models.SET_NULL, null=True)
class Post(models.Model):
message = models.TextField(max_length=4000)
topic = models.ForeignKey(Topic, related_name='posts', on_delete=models.SET_NULL, null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(null=True)
created_by = models.ForeignKey(User, related_name='posts', on_delete=models.SET_NULL, null=True)
updated_by = models.ForeignKey(User, null=True, related_name='+', on_delete=models.SET_NULL)
| [
"davidanyatonwu@gmail.com"
] | davidanyatonwu@gmail.com |
39bc1ed404e3a26ca5acbf101e00f0ab77e03d6d | 7360252cf1c347a096fa57fe430f4685d3d037a8 | /big_data_py/쇼핑외업종/쇼핑외업종01_Train및Test데이터분할.py | bc0c09cbd617b92f87312194d36d32c417c0c2cd | [] | no_license | suwonY/SURISURI_IT | 126f4caaf717bb24126d0dd0ac1687d74b304413 | 6c45106e4ffedb5c1e64456e17d76fc26a62de2d | refs/heads/master | 2021-12-30T12:07:02.700251 | 2021-12-02T01:59:49 | 2021-12-02T01:59:49 | 112,294,179 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,097 | py | import pandas as pd
import numpy as np
date_train = pd.read_csv('C:/Users/조만재/Desktop/lpoint/lpoint/date_else_train_cnt.csv' , sep=',', engine='python',
dtype= {'ID' : str})
from sklearn.model_selection import train_test_split
date_else_train , date_else_test = train_test_split(date_train,train_size=10000 , random_state = 42)
date_else_train1 = date_else_train[0:2000]
date_else_train2 = date_else_train[2000:4000]
date_else_train3 = date_else_train[4000:6000]
date_else_train4 = date_else_train[6000:8000]
date_else_train5 = date_else_train[8000:10000]
#csv로 내보내기
date_else_train.to_csv('C:/Users/조만재/Desktop/lpoint/lpoint/date_else_trainset.csv' , index=False )
date_else_train1.to_csv('C:/Users/조만재/Desktop/lpoint/lpoint/date_else_train1.csv' , index=False )
date_else_train2.to_csv('C:/Users/조만재/Desktop/lpoint/lpoint/date_else_train2.csv' , index=False )
date_else_train3.to_csv('C:/Users/조만재/Desktop/lpoint/lpoint/date_else_train3.csv' , index=False )
date_else_train4.to_csv('C:/Users/조만재/Desktop/lpoint/lpoint/date_else_train4.csv' , index=False )
date_else_train5.to_csv('C:/Users/조만재/Desktop/lpoint/lpoint/date_else_train5.csv' , index=False )
date_else_test.to_csv('C:/Users/조만재/Desktop/lpoint/lpoint/date_else_testset.csv' , index=False )
#txt로 내보내기
date_else_train.to_csv('C:/Users/조만재/Desktop/lpoint/lpoint/date_else_train.txt' , index=False )
date_else_train1.to_csv('C:/Users/조만재/Desktop/lpoint/lpoint/date_else_train1.txt' , index=False )
date_else_train2.to_csv('C:/Users/조만재/Desktop/lpoint/lpoint/date_else_train2.txt' , index=False )
date_else_train3.to_csv('C:/Users/조만재/Desktop/lpoint/lpoint/date_else_train3.txt' , index=False )
date_else_train4.to_csv('C:/Users/조만재/Desktop/lpoint/lpoint/date_else_train4.txt' , index=False )
date_else_train5.to_csv('C:/Users/조만재/Desktop/lpoint/lpoint/date_else_train5.txt' , index=False )
date_else_test.to_csv('C:/Users/조만재/Desktop/lpoint/lpoint/date_else_test.txt' , index=False ) | [
"dongpeeee@gmail.com"
] | dongpeeee@gmail.com |
8288b545fc62302fbb329f74acb6af3c632c8387 | 27083d668b348c08e2e8c97cede9eeecd50cf8ab | /src/sensors.py | 15d0d66e1877f4d348afc88175fe7fb5e07b584c | [] | no_license | BobAfwata/rpi-sensors | 6c3e3ef7e40ef1817c9c1b42f0c6f47ec7693076 | 313892306fb5a4889416886522cce91d5823d571 | refs/heads/master | 2020-06-14T05:58:23.538814 | 2019-07-03T19:31:41 | 2019-07-03T19:31:41 | 194,926,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | import RPi.GPIO as GPIO #Import GPIO library
import time
class sensor():
def init_sensors():
#
def read_values()
def deinit_sensors():
| [
"bobraphtoneafwata@gmail.com"
] | bobraphtoneafwata@gmail.com |
06a22bd4d2ef980a8bf8ceb2a13a88b006b28f39 | 5a82795c3860745112b7410d9060c5ef671adba0 | /leetcode/Kth Smallest Element in a BST.py | b169e7cff32636d2f2a3af72ff6449ae26da5f4b | [] | no_license | ashishvista/geeks | 8e09d0f3a422c1c9a1c1b19d879ebafa31b62f44 | 1677a304fc7857a3054b574e8702491f5ce01a04 | refs/heads/master | 2023-03-05T12:01:03.911096 | 2021-02-15T03:00:56 | 2021-02-15T03:00:56 | 336,996,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,268 | py | # Definition for a binary tree node.
from collections import deque
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
def deserialize(arr):
n = len(arr)
dq = deque()
root = TreeNode(int(arr[0]))
dq.append(root)
i = 1
while dq:
top = dq.popleft()
if i < n:
if arr[i] != "null":
top.left = TreeNode(int(arr[i]))
dq.append(top.left)
if (i + 1) < n:
if arr[i + 1] != "null":
top.right = TreeNode(int(arr[i + 1]))
dq.append(top.right)
i += 2
return root
class Solution:
def kthSmallest(self, root: TreeNode, k: int) -> int:
if root is None:
return []
st = []
c = 0
while True:
while root:
st.append(root)
root = root.left
root = st.pop()
c += 1
if c == k:
return root.val
root = root.right
if __name__ == "__main__":
arr = input().strip()[1:-1].split(",")
k = int(input())
root = deserialize(arr)
res = Solution().kthSmallest(root, k)
print(res)
| [
"ashish@groomefy.com"
] | ashish@groomefy.com |
b0b8a70942642d468f3b8671790e6597fac7f896 | c9835c348a22e9f6873e9b8dfa988a3b68d7169e | /1.py | 8c2ec1873dbf0d45232281ac253323b3b54e0263 | [] | no_license | Kathia1996/Python | e6fd02555959711616748c5faa8dff6c8013fe09 | efbd06639d5608beca02dc208510c6e95c6b744b | refs/heads/master | 2020-05-18T07:08:16.699336 | 2015-03-05T16:24:15 | 2015-03-05T16:24:15 | 31,723,077 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | a = "martina"
b = "martina"
if (a == b):
print "si lo son"
else:
print "no soy iguales" | [
"mbolanosa@alumno.unsm.edu.pe"
] | mbolanosa@alumno.unsm.edu.pe |
35e2c03b3cf5d9fe9bb8622e5bfe3740d8af73ba | dba12d9289c6d3198dde722f4475c4e76529819c | /Drawing1.py | d25442128a3cb60e982aebc5a04bd40ef35173cd | [] | no_license | jeromepeng183/Warehouse | dd348b2fcc147720a0a6b9a846cc05d52c942d05 | 584d3bf2b11d62f1d74bbc17eed02387258d4c1a | refs/heads/master | 2022-12-11T03:08:24.549661 | 2020-09-07T23:10:13 | 2020-09-07T23:10:13 | 285,953,110 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 848 | py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
from pandas import Series,DataFrame
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
nd = np.linspace(0,100, num=50)
s = Series(nd)
s.plot()
# In[4]:
s.cumsum().plot()
# In[3]:
df = DataFrame(np.random.randint(0,30,size=(10,4)),index=list('abcdefghij'),
columns=list('ABCD'))
df
# In[4]:
df.plot(title='DataFrame')
# In[5]:
df.plot(kind = 'bar')
# In[10]:
df.plot(kind = 'barh')
# In[6]:
nd = np.random.randint(0,5,size=10)
s=Series(nd)
# In[9]:
nd
# In[7]:
s.hist()
# In[18]:
nd1 = np.random.randint(0,50,size=(50,5))
df1=DataFrame(nd1,columns=list('XYABC'))
df1.plot(x='X',y='Y',kind='scatter')
# In[19]:
pd.plotting.scatter_matrix(df1,diagonal='kde')
| [
"noreply@github.com"
] | jeromepeng183.noreply@github.com |
47123b87521e562cfa0581409f0dd2b40037d1de | 52930840bcb6dcc4a1a3017123c8b07e0cfef8c8 | /NodeData.py | 486535180b7974b80f0a9c6551575ba0cff18781 | [] | no_license | iAmMortos/SudokuApp | 8a0a73277fc01439d2353ade4441989fea04e543 | ca5f6752790b27a8b8fe5c1848e00721b304b278 | refs/heads/master | 2020-08-10T03:49:59.910467 | 2019-10-10T17:49:20 | 2019-10-10T17:49:20 | 214,248,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 306 | py | class NodeData (object):
def __init__(self, r=-1, c=-1, n=-1):
self.set(r,c,n)
def set(self, r=-1, c=-1, n=-1):
self.r = r
self.c = c
self.n = n
def equals(self, o):
return self.r == o.r and self.c == o.c and self.n == o.n
def __repr__(self):
return '[%s,%s]:%s' % (self.c, self.r, self.n)
| [
"iAmMortos@users.noreply.github.com"
] | iAmMortos@users.noreply.github.com |
a49e3ed005188518b84eb367a76afe8c6aed96d3 | 2a5f67db7dfe10c21ee5a148731c4e95cf5f613a | /30 Days of Code/Day 24 - More Linked Lists.py | 7fe94449c1d6e0bf7eeac377f35f9172729ebeb4 | [] | no_license | bayoishola20/HackerRank | b8d49da0ff648463fda4e590b662b8914550402c | 466b17c326ccaf208239fa40dee014efeb9b8561 | refs/heads/master | 2021-09-06T09:19:51.979879 | 2018-02-04T23:40:02 | 2018-02-04T23:40:02 | 64,167,170 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,270 | py | #==================== GIVEN CODE ======================#
class Node:
def __init__(self,data):
self.data = data
self.next = None
class Solution:
def insert(self,head,data):
p = Node(data)
if head==None:
head=p
elif head.next==None:
head.next=p
else:
start=head
while(start.next!=None):
start=start.next
start.next=p
return head
def display(self,head):
current = head
while current:
print current.data,
current = current.next
#===================== END =========================#
def removeDuplicates(self,head):
#Write your code here
node = head
while node and node.next:
while node.next and node.data is node.next.data:
node.next = node.next.next
node = node.next
return head # return head
#==================== GIVEN CODE ======================#
mylist= Solution()
T=int(input())
head=None
for i in range(T):
data=int(input())
head=mylist.insert(head,data)
head=mylist.removeDuplicates(head)
mylist.display(head);
#===================== END =========================# | [
"bayoishola20@yahoo.com"
] | bayoishola20@yahoo.com |
74ffb4e03fbb9173c7f3efc2ce1835325cef73f7 | e94d771aac72df4105f1cc6dbe40b9123f098d92 | /portfolio_server/src/main/resources/static/pythonHelper/indexFetcher.py | 4235a382d4aa0926d76536c9e9659c0fe56e1ecd | [] | no_license | HarperLiu/Portfolio | d28147afb84c20386397ab90d57a67b6105267c6 | 03949a2273010fe032c7c6c5164ffe7361cd2472 | refs/heads/master | 2022-07-07T02:21:12.779705 | 2019-05-23T14:39:53 | 2019-05-23T14:39:53 | 181,262,652 | 0 | 1 | null | 2022-01-15T04:11:05 | 2019-04-14T05:04:21 | Python | UTF-8 | Python | false | false | 184 | py | import tushare as ts
df = ts.get_index()
df.to_json('/Users/apple/Desktop/Portfolio/portfolio_server/src/main/resources/static/jsonData/index.json',orient='records',force_ascii=False) | [
"161250080@smail.nju.edu.cn"
] | 161250080@smail.nju.edu.cn |
890c3a5b6925857914d18cbd4d4b232142dc2998 | 8f2c77bb51dd171f45972e5c0fecc952add05c9e | /usb_testing.py | 71c00afd4960bc6550382fb6991493c5d70cc780 | [] | no_license | nibezg/H-Lamb-shift-measurement | 6ec99ae50af8a7a9dcebbc94f210a4a904dfbbce | 97919c44df0f1707f0a4e710b0629e3a097e92df | refs/heads/master | 2020-03-28T16:43:38.581461 | 2019-05-16T15:04:15 | 2019-05-16T15:04:15 | 148,722,331 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,243 | py | from __future__ import print_function
import argparse
import string
import struct
import sys
import win32api
import win32file
import pywintypes
#%%
help(win32file.DeviceIoControl)
#%%
def CTL_CODE(DeviceType, Function, Method, Access):
return (DeviceType << 16) | (Access << 14) | (Function << 2) | Method
def USB_CTL(id):
# CTL_CODE(FILE_DEVICE_USB, (id), METHOD_BUFFERED, FILE_ANY_ACCESS)
return CTL_CODE(0x22, id, 0, 0)
IOCTL_USB_GET_ROOT_HUB_NAME = USB_CTL(258) # HCD_GET_ROOT_HUB_NAME
IOCTL_USB_GET_NODE_INFORMATION = USB_CTL(258) # USB_GET_NODE_INFORMATION
IOCTL_USB_GET_NODE_CONNECTION_INFORMATION = USB_CTL(259) # USB_GET_NODE_CONNECTION_INFORMATION
IOCTL_USB_GET_NODE_CONNECTION_DRIVERKEY_NAME = USB_CTL(264) # USB_GET_NODE_CONNECTION_DRIVERKEY_NAME
IOCTL_USB_GET_NODE_CONNECTION_NAME = USB_CTL(261) # USB_GET_NODE_CONNECTION_NAME
IOCTL_USB_GET_DESCRIPTOR_FROM_NODE_CONNECTION = USB_CTL(260) # USB_GET_DESCRIPTOR_FROM_NODE_CONNECTION
print(IOCTL_USB_GET_DESCRIPTOR_FROM_NODE_CONNECTION)
USB_CONFIGURATION_DESCRIPTOR_TYPE = 2
USB_STRING_DESCRIPTOR_TYPE = 3
USB_INTERFACE_DESCRIPTOR_TYPE = 4
MAXIMUM_USB_STRING_LENGTH = 255
def open_dev(name):
try:
handle = win32file.CreateFile(name,
win32file.GENERIC_WRITE,
win32file.FILE_SHARE_WRITE,
None,
win32file.OPEN_EXISTING,
0,
None)
except pywintypes.error as e:
return None
return handle
def get_root_hub_name(handle):
buf = win32file.DeviceIoControl(handle,
IOCTL_USB_GET_ROOT_HUB_NAME,
None,
6,
None)
act_len, _ = struct.unpack('LH', buf)
buf = win32file.DeviceIoControl(handle,
IOCTL_USB_GET_ROOT_HUB_NAME,
None,
act_len,
None)
return buf[4:].decode('utf-16le')
def get_driverkey_name(handle, index):
key_name = chr(index) + '\0'*9
try:
buf = win32file.DeviceIoControl(handle,
IOCTL_USB_GET_NODE_CONNECTION_DRIVERKEY_NAME,
key_name,
10,
None)
except pywintypes.error as e:
print(e.strerror, index)
sys.exit(1)
_, act_len, _ = struct.unpack('LLH', buf)
buf = win32file.DeviceIoControl(handle,
IOCTL_USB_GET_NODE_CONNECTION_DRIVERKEY_NAME,
key_name,
act_len,
None)
return buf[8:].decode('utf-16le')
def get_ext_hub_name(handle, index):
hub_name = chr(index) + '\0'*9
buf = win32file.DeviceIoControl(handle,
IOCTL_USB_GET_NODE_CONNECTION_NAME,
hub_name,
10,
None)
_, act_len, _ = struct.unpack('LLH', buf)
buf = win32file.DeviceIoControl(handle,
IOCTL_USB_GET_NODE_CONNECTION_NAME,
hub_name,
act_len,
None)
return buf[8:].decode('utf-16le')
def get_str_desc(handle, conn_idx, str_idx):
req = struct.pack('LBBHHH',
conn_idx,
0,
0,
(USB_STRING_DESCRIPTOR_TYPE<<8) | str_idx,
win32api.GetSystemDefaultLangID(),
12+MAXIMUM_USB_STRING_LENGTH)
try:
buf = win32file.DeviceIoControl(handle,
IOCTL_USB_GET_DESCRIPTOR_FROM_NODE_CONNECTION,
req,
200+MAXIMUM_USB_STRING_LENGTH,
None)
except pywintypes.error as e:
return 'ERROR: no String Descriptor for index {}'.format(str_idx)
if len(buf) > 16:
return buf[14:].decode('utf-16le')
return ''
def exam_hub(name, verbose, level):
handle = open_dev(r'\\.\{}'.format(name))
if not handle:
print('Failed to open device {}'.format(name))
return
buf = win32file.DeviceIoControl(handle,
IOCTL_USB_GET_NODE_INFORMATION,
None,
76,
None)
print_hub_ports(handle, ord(buf[6]), verbose, level)
handle.close()
def print_str_or_hex(to_be_print):
if all(c in string.printable for c in to_be_print):
print('"{}"'.format(to_be_print))
return
print('Hex: ', end='')
for x in to_be_print:
print('{:02x} '.format(ord(x)), end='')
print('')
def print_hub_ports(handle, num_ports, verbose, level):
for idx in range(1, num_ports+1):
info = chr(idx) + '\0'*34
try:
buf = win32file.DeviceIoControl(handle,
IOCTL_USB_GET_NODE_CONNECTION_INFORMATION,
info,
34 + 11*30,
None)
except pywintypes.error as e:
print(e.winerror, e.funcname, e.strerror)
return
_, vid, pid, vers, manu, prod, seri, _, ishub, _, stat = struct.unpack('=12sHHHBBB3s?6sL', buf[:35])
if ishub:
if verbose:
print('{} [Port{}] {}'.format(' '*level, idx, 'USB Hub'))
exam_hub(get_ext_hub_name(handle, idx), verbose, level)
elif stat == 0 and verbose:
print('{} [Port{}] {}'.format(' '*level, idx, 'NoDeviceConnected'))
elif stat == 1:
print(prod)
if verbose or (manu != 0 or prod != 0 or seri != 0):
print('{} [Port{}] {}'.format(' '*level, idx, get_driverkey_name(handle, idx)))
print('{} Vendor ID: 0x{:04X}'.format(' '*level, vid))
print('{} Product ID: 0x{:04X}'.format(' '*level, pid))
print('{} Device BCD: 0x{:04X}'.format(' '*level, vers))
if manu != 0:
print('{} Manufacturer (0x{:x}) -> '.format(' '*level, manu), end='')
print_str_or_hex(get_str_desc(handle, idx, manu))
if prod != 0:
print('{} Product (0x{:x}) -> '.format(' '*level, prod), end='')
print_str_or_hex(get_str_desc(handle, idx, prod))
if seri != 0:
print('{} Serial No (0x{:x}) -> '.format(' '*level, seri), end='')
print_str_or_hex(get_str_desc(handle, idx, seri))
descr=500
print(get_str_desc(handle, idx, manu))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', action='store_true',
help="Increase output verbosity.")
args = parser.parse_args()
for i in range(10):
name = r"\\.\HCD{}".format(i)
handle = open_dev(name)
if not handle:
continue
root = get_root_hub_name(handle)
print('{}RootHub: {}'.format('\n' if i != 0 else '', root))
dev_name = r'\\.\{}'.format(root)
dev_handle = open_dev(dev_name)
if not dev_handle:
print('Failed to open device {}'.format(dev_name))
continue
buf = win32file.DeviceIoControl(dev_handle,
IOCTL_USB_GET_NODE_INFORMATION,
None,
76,
None)
print_hub_ports(dev_handle, ord(buf[6]), args.verbose, 0)
dev_handle.close()
handle.close()
if __name__ == '__main__':
main()
#%%
print(win32api.GetSystemDefaultLangID()) | [
"43257670+nibezg@users.noreply.github.com"
] | 43257670+nibezg@users.noreply.github.com |
79c75af234d1c9f5c3700b0968c7cc65893723c8 | e88465b61a8d779bb940b666cacf9672aa04ddc8 | /setup.py | cf2ba481f352b6c62fe4795af2402e3a7a0f7ddb | [] | no_license | upgraddev/django-event-logger | 24877df11a6d4f69d5592bd8f05bc39fca452da2 | 02e08e64391eac5ae5f31f3cfa8565bbb92404e9 | refs/heads/master | 2020-03-23T01:34:54.613763 | 2018-04-09T06:20:24 | 2018-04-09T06:20:24 | 140,925,332 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='django-event-logger',
version='0.0.0',
description="""Django app to log events based on model changes.""",
long_description='...',
author='UpGrad',
packages=[
'event_logger',
],
include_package_data=True,
keywords='django-event-logger',
)
| [
"aman.mathur@upgrad.com"
] | aman.mathur@upgrad.com |
7c2934973f8736399141a0d706ccc8f9a2b9f54e | d7a05d71ef7f4d3faa7c63436ec47ab7a548cc72 | /com/ww/taobao/top/api/rest/TopSecretGetRequest.py | 20224b6d7d0080f2254af9f26184c74bc9d4e428 | [] | no_license | banfish/python_sign | 22515ddb075c2ffe27705ae257ebf1fa46a2a456 | 459e8c3250e94cf206e945cb95fe14686f738863 | refs/heads/master | 2023-08-23T13:13:29.503047 | 2021-10-20T03:33:05 | 2021-10-20T03:33:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 330 | py | '''
Created by auto_sdk on 2016.04.06
'''
from com.ww.taobao.top.api.base import RestApi
class TopSecretGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.random_num = None
self.secret_version = None
def getapiname(self):
return 'taobao.top.secret.get'
| [
"1249217495@qq.com"
] | 1249217495@qq.com |
59be0bf880afb7289bde3428351fe26aef1322ec | bda892fd07e3879df21dcd1775c86269587e7e07 | /leetcode/0058_E_最后一个单词的长度.py | 140ca073f044a5177a84e26a03f62afdfde003a6 | [] | no_license | CrzRabbit/Python | 46923109b6e516820dd90f880f6603f1cc71ba11 | 055ace9f0ca4fb09326da77ae39e33173b3bde15 | refs/heads/master | 2021-12-23T15:44:46.539503 | 2021-09-23T09:32:42 | 2021-09-23T09:32:42 | 119,370,525 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,021 | py | '''
给你一个字符串 s,由若干单词组成,单词之间用空格隔开。返回字符串中最后一个单词的长度。如果不存在最后一个单词,请返回 0 。
单词 是指仅由字母组成、不包含任何空格字符的最大子字符串。
示例 1:
输入:s = "Hello World"
输出:5
示例 2:
输入:s = " "
输出:0
提示:
1 <= s.length <= 104
s 仅有英文字母和空格 ' ' 组成
'''
class Solution:
def _lengthOfLastWord(self, s: str) -> int:
left = -1
right = -1
index = len(s) - 1
while index >= 0:
if right < 0 and s[index] != ' ':
right = index
if right > 0 and s[index] == ' ':
left = index
break
index -= 1
print(right, left)
return right - left
def lengthOfLastWord(self, s: str) -> int:
return len(s.split()[-1:][0])
so = Solution()
print(so.lengthOfLastWord('b a ')) | [
"1016864609@qq.com"
] | 1016864609@qq.com |
bfe8d9dc08a0ca72c67db74573a132bcf6fe4f11 | 5a76df64c8e53e1e846d459a5584fb0d78b694f2 | /code/frame_level_models.py | 40896078c7674ee97f51d8edf88bc11f33a781bf | [
"Apache-2.0"
] | permissive | msu-ml/17spr_chugh_dey | db8c528be9820b6353c8af5d94fc40922ca8ff15 | 4019d135680509624d7377f3e986d92b125b9599 | refs/heads/master | 2021-03-27T11:57:20.309036 | 2017-04-30T03:45:48 | 2017-04-30T04:03:37 | 81,892,047 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 16,033 | py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains a collection of models which operate on variable-length sequences.
"""
import math
import models
import video_level_models
import tensorflow as tf
import model_utils as utils
import tensorflow.contrib.slim as slim
from tensorflow import flags
FLAGS = flags.FLAGS
flags.DEFINE_integer("iterations", 30,
"Number of frames per batch for DBoF.")
flags.DEFINE_bool("dbof_add_batch_norm", True,
"Adds batch normalization to the DBoF model.")
flags.DEFINE_bool(
"sample_random_frames", True,
"If true samples random frames (for frame level models). If false, a random"
"sequence of frames is sampled instead.")
flags.DEFINE_integer("dbof_cluster_size", 8192,
"Number of units in the DBoF cluster layer.")
flags.DEFINE_integer("dbof_hidden_size", 1024,
"Number of units in the DBoF hidden layer.")
flags.DEFINE_string("dbof_pooling_method", "max",
"The pooling method used in the DBoF cluster layer. "
"Choices are 'average' and 'max'.")
flags.DEFINE_string("video_level_classifier_model", "MoeModel",
"Some Frame-Level models can be decomposed into a "
"generalized pooling operation followed by a "
"classifier layer")
flags.DEFINE_integer("lstm_cells", 512, "Number of LSTM cells.")
flags.DEFINE_integer("lstm_layers", 2, "Number of LSTM layers.")
flags.DEFINE_integer("gru_cells", 256, "Number of GRU cells.")
flags.DEFINE_integer("gru_layers", 2, "Number of GRU layers.")
class FrameLevelLogisticModel(models.BaseModel):
def create_model(self, model_input, vocab_size, num_frames, **unused_params):
"""Creates a model which uses a logistic classifier over the average of the
frame-level features.
This class is intended to be an example for implementors of frame level
models. If you want to train a model over averaged features it is more
efficient to average them beforehand rather than on the fly.
Args:
model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of
input features.
vocab_size: The number of classes in the dataset.
num_frames: A vector of length 'batch' which indicates the number of
frames for each video (before padding).
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
'batch_size' x 'num_classes'.
"""
num_frames = tf.cast(tf.expand_dims(num_frames, 1), tf.float32)
feature_size = model_input.get_shape().as_list()[2]
denominators = tf.reshape(
tf.tile(num_frames, [1, feature_size]), [-1, feature_size])
avg_pooled = tf.reduce_sum(model_input,
axis=[1]) / denominators
output = slim.fully_connected(
avg_pooled, vocab_size, activation_fn=tf.nn.sigmoid,
weights_regularizer=slim.l2_regularizer(1e-8))
return {"predictions": output}
class NeuralNetworkModel(models.BaseModel):
def create_model(self, model_input, vocab_size, num_frames, **unused_params):
"""Creates a model which uses a logistic classifier over the average of the
frame-level features.
This class is intended to be an example for implementors of frame level
models. If you want to train a model over averaged features it is more
efficient to average them beforehand rather than on the fly.
Args:
model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of
input features.
vocab_size: The number of classes in the dataset.
num_frames: A vector of length 'batch' which indicates the number of
frames for each video (before padding).
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
'batch_size' x 'num_classes'.
"""
num_frames = tf.cast(tf.expand_dims(num_frames, 1), tf.float32)
feature_size = model_input.get_shape().as_list()[2]
denominators = tf.reshape(
tf.tile(num_frames, [1, feature_size]), [-1, feature_size])
avg_pooled = tf.reduce_sum(model_input,
axis=[1]) / denominators
layer1 = slim.fully_connected(
avg_pooled, vocab_size*2, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(1e-8))
output = slim.fully_connected(
layer1, vocab_size, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(1e-8))
return {"predictions": output}
class DbofModel(models.BaseModel):
"""Creates a Deep Bag of Frames model.
The model projects the features for each frame into a higher dimensional
'clustering' space, pools across frames in that space, and then
uses a configurable video-level model to classify the now aggregated features.
The model will randomly sample either frames or sequences of frames during
training to speed up convergence.
Args:
model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of
input features.
vocab_size: The number of classes in the dataset.
num_frames: A vector of length 'batch' which indicates the number of
frames for each video (before padding).
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
'batch_size' x 'num_classes'.
"""
def create_model(self,
model_input,
vocab_size,
num_frames,
iterations=None,
add_batch_norm=None,
sample_random_frames=None,
cluster_size=None,
hidden_size=None,
is_training=True,
**unused_params):
iterations = iterations or FLAGS.iterations
add_batch_norm = add_batch_norm or FLAGS.dbof_add_batch_norm
random_frames = sample_random_frames or FLAGS.sample_random_frames
cluster_size = cluster_size or FLAGS.dbof_cluster_size
hidden1_size = hidden_size or FLAGS.dbof_hidden_size
num_frames = tf.cast(tf.expand_dims(num_frames, 1), tf.float32)
if random_frames:
model_input = utils.SampleRandomFrames(model_input, num_frames,
iterations)
else:
model_input = utils.SampleRandomSequence(model_input, num_frames,
iterations)
max_frames = model_input.get_shape().as_list()[1]
feature_size = model_input.get_shape().as_list()[2]
reshaped_input = tf.reshape(model_input, [-1, feature_size])
tf.summary.histogram("input_hist", reshaped_input)
if add_batch_norm:
reshaped_input = slim.batch_norm(
reshaped_input,
center=True,
scale=True,
is_training=is_training,
scope="input_bn")
cluster_weights = tf.Variable(tf.random_normal(
[feature_size, cluster_size],
stddev=1 / math.sqrt(feature_size)))
tf.summary.histogram("cluster_weights", cluster_weights)
activation = tf.matmul(reshaped_input, cluster_weights)
if add_batch_norm:
activation = slim.batch_norm(
activation,
center=True,
scale=True,
is_training=is_training,
scope="cluster_bn")
else:
cluster_biases = tf.Variable(
tf.random_normal(
[cluster_size], stddev=1 / math.sqrt(feature_size)))
tf.summary.histogram("cluster_biases", cluster_biases)
activation += cluster_biases
activation = tf.nn.relu6(activation)
tf.summary.histogram("cluster_output", activation)
activation = tf.reshape(activation, [-1, max_frames, cluster_size])
activation = utils.FramePooling(activation, FLAGS.dbof_pooling_method)
hidden1_weights = tf.Variable(tf.random_normal(
[cluster_size, hidden1_size],
stddev=1 / math.sqrt(cluster_size)))
tf.summary.histogram("hidden1_weights", hidden1_weights)
activation = tf.matmul(activation, hidden1_weights)
if add_batch_norm:
activation = slim.batch_norm(
activation,
center=True,
scale=True,
is_training=is_training,
scope="hidden1_bn")
else:
hidden1_biases = tf.Variable(
tf.random_normal(
[hidden1_size], stddev=0.01))
tf.summary.histogram("hidden1_biases", hidden1_biases)
activation += hidden1_biases
activation = tf.nn.relu6(activation)
tf.summary.histogram("hidden1_output", activation)
aggregated_model = getattr(video_level_models,
FLAGS.video_level_classifier_model)
return aggregated_model().create_model(
model_input=activation,
vocab_size=vocab_size,
**unused_params)
class LstmModel(models.BaseModel):
def create_model(self, model_input, vocab_size, num_frames, **unused_params):
"""Creates a model which uses a stack of LSTMs to represent the video.
Args:
model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of
input features.
vocab_size: The number of classes in the dataset.
num_frames: A vector of length 'batch' which indicates the number of
frames for each video (before padding).
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
'batch_size' x 'num_classes'.
"""
lstm_size = FLAGS.lstm_cells
number_of_layers = FLAGS.lstm_layers
## Batch normalize the input
stacked_lstm = tf.contrib.rnn.MultiRNNCell(
[
tf.contrib.rnn.BasicLSTMCell(
lstm_size, forget_bias=1.0, state_is_tuple=False)
for _ in range(number_of_layers)
],
state_is_tuple=False)
loss = 0.0
with tf.variable_scope("RNN"):
outputs, state = tf.nn.dynamic_rnn(stacked_lstm, model_input,
sequence_length=num_frames,
dtype=tf.float32)
aggregated_model = getattr(video_level_models,
FLAGS.video_level_classifier_model)
return aggregated_model().create_model(
model_input=state,
vocab_size=vocab_size,
**unused_params)
class GruModel(models.BaseModel):
def create_model(self, model_input, vocab_size, num_frames, **unused_params):
"""Creates a model which uses a stack of GRUs to represent the video.
Args:
model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of
input features.
vocab_size: The number of classes in the dataset.
num_frames: A vector of length 'batch' which indicates the number of
frames for each video (before padding).
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
'batch_size' x 'num_classes'.
"""
gru_size = FLAGS.gru_cells
number_of_layers = FLAGS.gru_layers
## Batch normalize the input
stacked_gru = tf.contrib.rnn.MultiRNNCell(
[
tf.contrib.rnn.GRUCell(
gru_size)
for _ in range(number_of_layers)
],
state_is_tuple=False)
loss = 0.0
with tf.variable_scope("RNN"):
outputs, state = tf.nn.dynamic_rnn(stacked_gru, model_input,
sequence_length=num_frames,
dtype=tf.float32)
aggregated_model = getattr(video_level_models,
FLAGS.video_level_classifier_model)
return aggregated_model().create_model(
model_input=state,
vocab_size=vocab_size,
**unused_params)
class SVMModel(models.BaseModel):
def create_model(self,
model_input,
vocab_size,
num_frames,
labels,
batch_size=1024,
iterations=None,
add_batch_norm=None,
sample_random_frames=None,
cluster_size=None,
hidden_size=None,
is_training=True,
**unused_params):
"""Creates a model which uses a logistic classifier over the average of the
frame-level features.
This class is intended to be an example for implementors of frame level
models. If you want to train a model over averaged features it is more
efficient to average them beforehand rather than on the fly.
Args:
model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of
input features.
vocab_size: The number of classes in the dataset.
num_frames: A vector of length 'batch' which indicates the number of
frames for each video (before padding).
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
'batch_size' x 'num_classes'.
"""
num_frames = tf.cast(tf.expand_dims(num_frames, 1), tf.float32)
labels = tf.cast(labels, tf.float32)
feature_size = model_input.get_shape().as_list()[2]
denominators = tf.reshape(
tf.tile(num_frames, [1, feature_size]), [-1, feature_size])
avg_pooled = tf.reduce_sum(model_input,
axis=[1]) / denominators
# Create variables for svm
# batch_size = model_input.get_shape().as_list()[0]
b_1 = tf.Variable(tf.random_normal(shape=[3,batch_size]))
gamma = tf.constant(-10.0)
dist = tf.reduce_sum(tf.square(avg_pooled), 1)
dist = tf.reshape(dist, [-1,1])
sq_dists = tf.multiply(2., tf.matmul(avg_pooled, tf.transpose(avg_pooled)))
my_kernel = tf.exp(tf.multiply(gamma, tf.abs(sq_dists)))
# Compute SVM Model
first_term = tf.reduce_sum(b_1)
b_vec_cross = tf.matmul(tf.transpose(b_1), b_1)
y_target_cross = self.reshape_matmul(labels, batch_size)
second_term = tf.reduce_sum(tf.multiply(my_kernel, tf.multiply(b_vec_cross, y_target_cross)),[1,2])
loss = tf.reduce_sum(tf.negative(tf.subtract(first_term, second_term)))
# Gaussian (RBF) kernel# Gaussian (RBF) prediction kernel
rA = tf.reshape(tf.reduce_sum(tf.square(avg_pooled), 1),[-1,1])
rB = tf.reshape(tf.reduce_sum(tf.square(avg_pooled), 1),[-1,1]) #prediction_grid
pred_sq_dist = tf.add(tf.subtract(rA, tf.multiply(2., tf.matmul(avg_pooled, tf.transpose(avg_pooled)))), tf.transpose(rB))
pred_kernel = tf.exp(tf.multiply(gamma, tf.abs(pred_sq_dist)))
prediction_output = tf.matmul(tf.multiply(labels, b_1), pred_kernel)
prediction = tf.arg_max(prediction_output-tf.expand_dims(tf.reduce_mean(prediction_output,1), 1), 0)
return {"predictions": prediction_output, "loss": loss}
# Declare function to do reshape/batch multiplication
def reshape_matmul(self, mat, batch_size):
v1 = tf.expand_dims(mat, 1)
v2 = tf.reshape(v1, [3, batch_size, 1])
return(tf.matmul(v2, v1))
| [
"rahuldey91@gmail.com"
] | rahuldey91@gmail.com |
b0e913589f473a5ac5d506981477d63f7353ca05 | 90eddb2a9afd7fc07a4898923ad345c5ddb190b6 | /BillPayer/billpayer/main/views.py | d93fc0490fc5db12c7950a1d6250a53054bfc533 | [] | no_license | nate032889/development | 2a361cc4b3b5b084cd5376b72fbb77ecfb910dd2 | 3b7b1a72841a720ba53e88c12ea25f0faf09de10 | refs/heads/master | 2020-04-21T05:25:04.057856 | 2019-04-01T00:34:09 | 2019-04-01T00:34:09 | 169,339,824 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 611 | py | from billpayer.main.forms import ItemTable
from flask import render_template, url_for, flash, redirect, request, Blueprint
main = Blueprint('main', __name__)
class Item(object):
def __init__(self, name, description):
self.name = name
self.description = description
@main.route("/", methods=['GET', 'POST'])
@main.route("/payer", methods=['GET', 'POST'])
def payer():
items = [Item('Name1', 'Description1'),
Item('Name2', 'Description2'),
Item('Name3', 'Description3')]
table = ItemTable(items)
return render_template('main.html', tables=table)
| [
"nate@point3.net"
] | nate@point3.net |
0559ab4c7cf0e944357e9bf4c8cd88a217bc8e7d | 6f24adbcf57c01a59d47f9a6c669b10190c2bb25 | /Variables.py | 80ba34d337c90c9516e0dc007e403cd2c63c2e43 | [] | no_license | fredrikstahre/Python | 63799b5a188d92c5811c3d5cc2e0b8c14bb4d88b | b2ce28dc370da58ecf1c24e6909ce7545c48d983 | refs/heads/master | 2021-05-02T17:44:18.680094 | 2018-02-09T13:24:32 | 2018-02-09T13:24:32 | 120,651,304 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 367 | py | # greeting = 'Hello'
# _myName = 'Fredrik'
# age = 35
#
# print(_myName + ' is' + age + ' years old.')
#
# a = 12
# b = 3
# print(a + b)
# print(a - b)
# print(a * b)
# print(a / b)
# print(a // b)
# print(a % b)
#
# for i in range(1, a//b):
# print(i)
a = 12
b = 3
print(a + b // 3 -4 * 12)
print(8 // 2 *3)
print(8 * 3 // 2)
print(((((a + b) // 3) - 4) * 12)) | [
"fredrik.stahre@gunnebo.com"
] | fredrik.stahre@gunnebo.com |
c7a5b0998019519f661840d674ebc333f1d41f3c | 73ce1820039bf9034876997e0dfbd48db7953731 | /first/fb_post/views.py | 2cab759b17db3ca4c24eff344f23a2a7c02f5323 | [] | no_license | DurgaPrasadWebDev/practies-projects-on-django | 28b047895d8c19951c067410678ba20d8a938237 | 456317fd717c63253a086d22b85564ecb4fbbc63 | refs/heads/master | 2020-07-07T06:16:11.777377 | 2019-08-20T01:29:32 | 2019-08-20T01:29:32 | 203,275,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,234 | py | from django.db.models import *
from .models import *
class PostException(Exception):
def __init__(self, error):
self.error = error
class CommentException(Exception):
def __int__(self, error):
self.error = error
class UserException(Exception):
def __init__(self, error):
self.error = error
class ReactionException(Exception):
def __init__(self, error):
self.error = error
def user_details_fetch(user_details):
user = dict()
user["user_id"] = user_details.id
user["name"] = user_details.user_name
user["profile_pic_url"] = user_details.user_profile_pic_url
return user
def reaction_details_fetch(reactions_details):
reactions = dict()
reactions_type_list = []
total_reaction = 0
for each_reaction in reactions_details:
reaction_type = each_reaction.get_reaction_type_display()
if reaction_type not in reactions_type_list:
reactions_type_list.append(reaction_type)
total_reaction += 1
reactions["count"] = total_reaction
reactions["type"] = reactions_type_list
return reactions
def comment_convert_to_dictionary(comment, no_replies, replies):
comment_details = dict()
comment_details["comment_id"] = comment.id
comment_details["commenter"] = user_details_fetch(comment.user)
comment_details["commented_at"] = time_convert_to_string(comment.comment_at_create)
comment_details["comment_content"] = comment.comment_description
comment_details["reactions"] = reaction_details_fetch(comment.reactions.all())
comment_details["replies_count"] = no_replies
comment_details["replies"] = replies
return comment_details
def reply_convert_to_dictionary(reply):
reply_details = dict()
reply_reactions_details = reaction_details_fetch(reply.reactions.all())
reply_details["comment_id"] = reply.id
reply_details["commenter"] = user_details_fetch(reply.user)
reply_details["commented_at"] = time_convert_to_string(reply.comment_at_create)
reply_details["comment_content"] = reply.comment_description
reply_details["reactions"] = reply_reactions_details
return reply_details
def post_convert_to_dictionary(post, comments, no_comments):
user_details = post.user
user = user_details_fetch(user_details)
if user_details is None:
raise UserException('User Detail Not Found')
post_details = dict()
post_details["post_id"] = post.id
post_details["posted_by"] = user
post_details["posted_at"] = time_convert_to_string(post.post_at_create)
post_details["post_content"] = post.post_description
post_details["reactions"] = reaction_details_fetch(Reaction.objects.select_related().filter(post=post))
post_details["comments"] = comments
post_details["comments_count"] = no_comments
return post_details
def time_convert_to_string(time):
return time.strftime("%d-%b-%Y %H:%M:%S.%f")
def fetch_post_object(post_id):
try:
return Post.objects.get(id=post_id)
except ObjectDoesNotExist:
raise PostException('The Post Object Does Not Found with given Post_id')
def fetch_user_object(user_id):
try:
return User.objects.select_related().get(id=user_id)
except ObjectDoesNotExist:
raise UserException('The User Object Does Not Found with given User_id')
def fetch_comment_object(comment_id):
try:
return Comment.objects.select_related().get(id=comment_id)
except ObjectDoesNotExist:
raise CommentException('The Comment Object Does Not Found with given Comment_id')
def get_post_details(post):
comments_of_post = post.comments.all()#Comment.objects.filter(post=post).annotate(
# reaction_type_count=Count('reaction')).select_related('user', 'parent_comment').prefetch_related('reactions')
total_comments = []
for each_comment in comments_of_post:
if each_comment.parent_comment:
continue
replies = []
for each_reply in comments_of_post:
if each_reply.parent_comment and each_reply.parent_comment == each_comment:
reply_details = reply_convert_to_dictionary(each_reply)
replies.append(reply_details)
no_replies = len(replies)
comment_details = comment_convert_to_dictionary(each_comment, no_replies, replies)
total_comments.append(comment_details)
comment_count = len(total_comments)
post = post_convert_to_dictionary(post, total_comments, comment_count)
return post
def get_post(post_id):
post_details = fetch_post_object(post_id)
posts = Post.objects.filter(id=post_details.id).select_related('user').prefetch_related('comments', 'comments__reactions',
'comments__user', 'reactions',
'comments__parent_comment',
'comments__parent_comment')
post = get_post_details(posts[0])
return post
def create_post(user_id, post_content):
user = fetch_user_object(user_id)
post = user.posts.create(post_description=post_content)
return post.id
def add_comment(post_id, comment_user_id, comment_text):
post = fetch_post_object(post_id)
user = fetch_user_object(comment_user_id)
comment = post.comments.create(comment_description=comment_text, user=user)
return comment.id
def reply_to_comment(comment_id, reply_user_id, reply_text):
comment = fetch_comment_object(comment_id)
user = fetch_user_object(reply_user_id)
post = comment.post
if comment.parent_comment is not None:
comment = comment.parent_comment
reply_comment = Comment.objects.create(comment_description=reply_text, parent_comment=comment, post=post,
user=user)
return reply_comment.id
def react_to_comment(user_id, comment_id, reaction_type):
user = fetch_user_object(user_id)
comment = fetch_comment_object(comment_id)
try:
reaction = Reaction.objects.get(comment=comment, user=user)
if reaction.reaction_type == reaction_type:
reaction.delete()
else:
reaction.reaction_type = reaction_type
reaction.save()
return reaction.id
except ObjectDoesNotExist:
reaction = Reaction.objects.create(reaction_type=reaction_type, comment=comment, user=user)
return reaction.id
def react_to_post(user_id, post_id, reaction_type):
user = fetch_user_object(user_id)
post = fetch_post_object(post_id)
try:
reaction = Reaction.objects.get(post=post, user=user)
if reaction.reaction_type == reaction_type:
reaction.delete()
else:
reaction.reaction_type = reaction_type
reaction.save()
return reaction.id
except ObjectDoesNotExist:
reaction = Reaction.objects.create(reaction_type=reaction_type, post=post, user=user)
return reaction.id
def get_user_posts(user_id):
user = fetch_user_object(user_id)
posts = Post.objects.filter(user=user).select_related('user').prefetch_related('comments','comments__reactions', 'comments__user','reactions','comments__parent_comment','comments__parent_comment')
if posts.count() == 0:
raise PostException('The User Does Not Create Any Posts So Their is Empty Posts')
list_of_posts = []
for each_post in posts:
# user = each_post.user
# comments = each_post.comments.all()
# for each_comment in comments:
# each_comment_details = each_comment.id
# reaction = each_comment.reactions.all()
list_of_posts.append(get_post_details(each_post))
return list_of_posts
def get_posts_with_more_positive_reactions():
positive_reaction_count = Count('reaction', filter=Q(reaction__reaction_type__in=['LI', 'LO', 'HA', 'WO']))
negative_reaction_count = Count('reaction', filter=Q(reaction__reaction_type__in=['SA', 'AN']))
posts_with_reactions = Post.objects.annotate(positive_reaction_count=positive_reaction_count,
negative_reaction_count=negative_reaction_count)
posts_with_more_positive_reactions = posts_with_reactions.filter(
positive_reaction_count__gte=F('negative_reaction_count'))
list_of_posts_id = posts_with_more_positive_reactions.values_list('id', flat=True)
return list_of_posts_id
def get_posts_reacted_by_user(user_id):
user = fetch_user_object(user_id)
reactions = Reaction.objects.filter(user=user).select_related('post')
post_id_list = []
for each_reaction in reactions:
if each_reaction.post is None:
continue
post_id_list.append(each_reaction.post_id)
return post_id_list
def get_reactions_to_post(post_id):
post = fetch_post_object(post_id)
reactions = Reaction.objects.filter(post=post)
reactions_with_user = reactions.annotate(
name=F('user__user_name'), profile_pic_url=F('user__user_profile_pic_url'), reaction=F('reaction_type'))
user_details = reactions_with_user.values('user_id', 'name', 'profile_pic_url', 'reaction')
return user_details
def get_reaction_metrics(post_id):
post = fetch_post_object(post_id)
reaction_metrics = Reaction.objects.filter(post=post).values('reaction_type').annotate(
reaction_count=Count('reaction_type'))
return reaction_metrics
def get_total_reaction_count():
reaction_count = Reaction.postReactions.count()
return reaction_count
def delete_post(post_id):
deleted_post = fetch_post_object(post_id).delete()
return deleted_post
def get_replies_for_comment(comment_id):
comment = fetch_comment_object(comment_id)
replies_objects = Comment.objects.filter(parent_comment=comment).annotate(
reaction_type_count=Count('reaction')).select_related('user').prefetch_related('reactions')
replies = []
for each_reply in replies_objects:
replies_with_user = reply_convert_to_dictionary(each_reply)
replies.append(replies_with_user)
return replies | [
"r141632@rguktrkv.ac.in"
] | r141632@rguktrkv.ac.in |
3f006e7288b20ee04ed3cd9979855e75f941bfc2 | 2dd560dc468af0af4ca44cb4cd37a0b807357063 | /Leetcode/1441. Build an Array With Stack Operations/solution2.py | 6a4545eaa5d00ee6fae207f89c0238a4684c2e0d | [
"MIT"
] | permissive | hi0t/Outtalent | 460fe4a73788437ba6ce9ef1501291035c8ff1e8 | 8a10b23335d8e9f080e5c39715b38bcc2916ff00 | refs/heads/master | 2023-02-26T21:16:56.741589 | 2021-02-05T13:36:50 | 2021-02-05T13:36:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | py | class Solution:
def buildArray(self, target: List[int], n: int) -> List[str]:
result = []
for i in range(1, max(target) + 1):
result.append("Push")
if i not in target: result.append("Pop")
return result
| [
"info@crazysquirrel.ru"
] | info@crazysquirrel.ru |
aefb1957461ebf14dd5915d6e04a9049a4724b04 | 6861813a4bad39413d43189625bdd2b872fdaa7c | /doubanj/settings.py | 34cc65a09be1e9d09a0f629c2d9160febad0367b | [] | no_license | skadai/doubanj_movie | eeddfffe00741043bbe3eba62f4a02a4f575996c | bff7f3a04745dbc46abbf7e8ff7153b86b61c950 | refs/heads/master | 2020-03-11T13:07:27.390207 | 2018-05-14T06:50:08 | 2018-05-14T06:50:08 | 130,016,023 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,817 | py | # -*- coding: utf-8 -*-
# Scrapy settings for doubanj project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'doubanj'
SPIDER_MODULES = ['doubanj.spiders']
NEWSPIDER_MODULE = 'doubanj.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'doubanj (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 2
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 16
CONCURRENT_REQUESTS_PER_IP = 4
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'doubanj.middlewares.DoubanjSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
'doubanj.middlewares.DoubanjDownloaderMiddleware': 1,
# 'doubanj.middlewares.ProxyMiddleware': 100,
}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'doubanj.pipelines.DoubanjPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
AUTOTHROTTLE_ENABLED = True
# The initial download delay
AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
AUTOTHROTTLE_TARGET_CONCURRENCY = 2.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
PROXIES = [
{'ip_port': '183.158.206.15:61234', 'user_pass': ''},
{'ip_port': '60.190.199.68:808', 'user_pass': ''},
{'ip_port': '111.170.104.101:61234', 'user_pass': ''},
{'ip_port': '116.226.112.20:9000', 'user_pass': ''},
{'ip_port': '117.36.103.170:8118', 'user_pass': ''},
]
MY_USER_AGENT = [
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)",
"Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
"Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
"Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.11 TaoBrowser/2.0 Safari/536.11",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; LBBROWSER)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E; LBBROWSER)",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.84 Safari/535.11 LBBROWSER",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; QQBrowser/7.0.3698.400)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SV1; QQDownload 732; .NET4.0C; .NET4.0E; 360SE)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1",
"Mozilla/5.0 (iPad; U; CPU OS 4_2_1 like Mac OS X; zh-cn) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0b13pre) Gecko/20110307 Firefox/4.0b13pre",
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:16.0) Gecko/20100101 Firefox/16.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11",
"Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36",
] | [
"skchang08@126.com"
] | skchang08@126.com |
db3b6353b6a685819d237c58f1e1af3c774e7fa3 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03272/s087307449.py | fcf22c1051c98aacd570a62fca520414b8ef122a | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 62 | py | n,i = map(int,input().split())
count = n - i + 1
print(count) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
377301dcd7d3b8090ad1e3763400d0f830c70790 | 6820a16eef223f31648ad66b1b77cf03f71b7248 | /Project/SVM/5x5-weighted.py | 715fb1a6ec4031969920217304365df05d823aa4 | [] | no_license | usmankiet98/Ai394 | e1a4daf9f283d260a1ba0f932781d729be4b68f7 | 86d7c4a35da5d216374bb1a654e677c1004b53fb | refs/heads/main | 2023-04-11T12:17:54.372365 | 2021-04-26T15:22:10 | 2021-04-26T15:22:10 | 350,694,786 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,910 | py | import numpy as np
import sklearn as sk
import pandas as pd
from sklearn.naive_bayes import MultinomialNB
from sklearn.model_selection import train_test_split
from sklearn import svm
from sklearn.svm import SVC
from sklearn import metrics
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report, accuracy_score
import math
train = pd.read_csv( "train.csv" )
A = train.drop( 'label',axis=1 )
B = train['label']
filter = np.array(
[[1,1,1,1,1],
[1,2,2,2,1],
[1,2,3,2,1],
[1,2,2,2,1],
[1,1,1,1,1]]
)
A = A.to_numpy( )
print( A.shape )
size = np.empty( ( 0,576 ), int )
def complexity( image, filter ):
filter_a, filter_b = filter.shape
filter_len_by_2 = ( filter_a//2 )
n = 28
nn = n - ( filter_len_by_2 *2 )
new_image = np.zeros( ( nn,nn ) )
for i in range( 0,nn ):
for j in range( 0,nn ):
new_image[i][j] = np.sum( image[i:i+filter_a, j:j+filter_b]*filter )//25
return new_image
subset = 500
for img in A[0:subset,:]:
image_2d = np.reshape( img, ( 28,28 ) )
len_image = complexity( image_2d,filter )
len_image1D = np.reshape( len_image, ( -1,576 ) )
s_a = np.append( s_a, len_image1D, axis=0 ) #size
B = B.to_numpy( )
s_b = B[0:subset]
print( s_b.shape )
print( s_a.shape )
s_aTrain, s_aTest, yTrain, yTest = train_test_split( s_a,s_b,test_size=0.2,random_state=0 )
print( s_aTest.shape,", ",yTest.shape )
print( s_aTrain.shape,", ",yTrain.shape )
verify_svn = SVC( kernel="rbf", random_state=42, verbose=3,C=9 )
verify_svn.fit( sizeTrain,yTrain )
y_test_pred_svm = verify_svn.predict( s_aTest )
ans=metrics.accuracy_score( yTest, y_test_pred_svm )
print( "SVMACCURACB IS " )
print( ans )
| [
"noreply@github.com"
] | usmankiet98.noreply@github.com |
d32acff7d71fa483984ad59c17545f8b7e1f5a07 | f7b6867d090e353f2f24421b660f2548af5aca86 | /MaotujiudianSpider/settings.py | 91131f6e7a8e6546b1cfe353f9a5fb06e917a717 | [] | no_license | WITWEI/maotujiudian | 83159b81326d72ab1e0f68fce637504df520a5a8 | 74c070507ed6c8b99facdda5334f40611d99c168 | refs/heads/master | 2020-05-16T04:32:56.782872 | 2019-04-22T12:34:11 | 2019-04-22T12:34:11 | 182,782,013 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,505 | py | # -*- coding: utf-8 -*-
# Scrapy settings for MaotumeishiSpider project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'MaotumeishiSpider'
SPIDER_MODULES = ['MaotumeishiSpider.spiders']
NEWSPIDER_MODULE = 'MaotumeishiSpider.spiders'
# DOWNLOADER_MIDDLEWARES = {
# 'MaotumeishiSpider.middlewares.ProxyMiddleWare': 100,
#
# 'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware': None,
# 'scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware': None,
# }
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'MaotumeishiSpider (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'MaotumeishiSpider.middlewares.MaotumeishispiderSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# DOWNLOADER_MIDDLEWARES = {
# 'MaotumeishiSpider.middlewares.MaotumeishispiderDownloaderMiddleware': 543,
# }
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'MaotumeishiSpider.pipelines.CityItemPipeline': 300,
# 'MaotumeishiSpider.pipelines.RestaurantItemPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"544735084@qq.com"
] | 544735084@qq.com |
178900f468bbceb57c8dc5c3908eb0a354523df7 | 98ebc4eb9761a9188c66449c28f225db28e7fd85 | /stemmer.py | 81c90c7bc5e8f2b1597d9ac2e0b4f88600407b03 | [] | no_license | kalwargupta/nltk | 58501e26f99355e01c4f6d4fa3aa8c2e683809b9 | fe3e3d94321375b830b8b3e28fbf75367c6f2828 | refs/heads/master | 2020-04-18T02:05:55.635335 | 2019-02-07T08:47:11 | 2019-02-07T08:47:11 | 167,147,238 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 583 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 23 15:16:39 2019
@author: jeetu
"""
#importing stemming library
from nltk.stem import PorterStemmer
from nltk.tokenize import word_tokenize
ps=PorterStemmer()
example_words=["python","pythoned","pythoning","pythoner","pythonly"]
#stemming the example words
#for w in example_words:
# print (ps.stem(w))
new_text="It is very important to be pythonly while you are pyhtoning with python. All pythoners have pythoned poorly at least once."
words=word_tokenize(new_text)
for w in words:
print(ps.stem(w)) | [
"kalwar.gupta@gmail.com"
] | kalwar.gupta@gmail.com |
e0cc1664020a79f7d3bbcb9840db8d7c3914b9ad | 8c6ecf8a78d1439e1d5cdf7a67656852676c7376 | /backend/backend/settings.py | bc51a2ddefed674459ad32370d7d20d8ef57e45f | [] | no_license | triangle959/JoJoZu_backend | d58366363c20fc0009f7117e25a0c1dffeedd52a | 5f66db7fcc3818a53193cd838850709bd56e6226 | refs/heads/master | 2021-01-06T10:31:11.071068 | 2020-05-09T01:38:40 | 2020-05-09T01:38:40 | 241,296,548 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,491 | py | """
Django settings for backend project.
Generated by 'django-admin startproject' using Django 2.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
from .config import DB_NAME,DB_IP,DB_PORT,USERNAME,PASSWORD
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@**f20fy%wugc$h*n2g=n6s#t#f9lu@9h#yat4i@4q=jr5gyor'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'rest_framework',
'rest_framework_mongoengine',
'zufang',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'backend.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'backend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
import mongoengine
# 连接mongodb中数据库名称为mongotest5的数据库
conn = mongoengine.connect(DB_NAME, host=DB_IP, port=DB_PORT, username=USERNAME, password=PASSWORD,
authentication_source='admin', authentication_mechanism='SCRAM-SHA-1')
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/1",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
}
}
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
SESSION_CACHE_ALIAS = "default"
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'zh-Hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
'PAGE_SIZE': 10,
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
}
# 跨域增加忽略
CORS_ALLOW_CREDENTIALS = True
CORS_ORIGIN_ALLOW_ALL = True
# CORS_ORIGIN_WHITELIST = (
# '*'
# )
CORS_ALLOW_METHODS = (
'DELETE',
'GET',
'OPTIONS',
'PATCH',
'POST',
'PUT',
'VIEW',
)
CORS_ALLOW_HEADERS = (
'XMLHttpRequest',
'X_FILENAME',
'accept-encoding',
'authorization',
'content-type',
'dnt',
'origin',
'user-agent',
'x-csrftoken',
'x-requested-with',
'Pragma',
) | [
"976264593@qq.com"
] | 976264593@qq.com |
9443737a9ce5e37cccc42c6d00b4967de41f1ab4 | 1358a2450ec6c499ad1f67b38e42a21278857561 | /Hello/urls.py | d83b7cd99eba0d892b6dfaa8533d44c38d9a74df | [] | no_license | atharvparamane/School_Admission_App_using_Django | 4926c90351558cccd462f8ab13fa1f018c457b06 | 803ea27f699b2b0d5040616960ab18eae4c70713 | refs/heads/master | 2023-05-04T05:43:10.612064 | 2021-05-29T05:39:10 | 2021-05-29T05:39:10 | 371,888,525 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 918 | py | """Hello URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
admin.site.site_header = "KIDZEE"
admin.site.site_title = "KIDZEE Admin Portal"
admin.site.index_title = "Welcome to KIDZEE"
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('home.urls'))
]
| [
"atharvparamane111@gmail.com"
] | atharvparamane111@gmail.com |
cbb72df0e716aafe771b5a5fbf84c2dd9cc544ea | fde9a4f3db85d2f093618d4a790868674c6fbac0 | /test/sample.py | 79489942d8221991e8235e3c171fa2248774f80f | [] | no_license | CJey/py-skel | b8f2254004bea87ad9d34d3cd0a5d733b3b59c65 | 95e6ddb4baf0fdbe597ec10dc9d4be9526fd03e1 | refs/heads/master | 2020-03-18T23:35:14.903378 | 2018-05-30T10:19:59 | 2018-05-30T10:19:59 | 135,412,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 776 | py | #! /usr/bin/env python3.5
# -*- coding: utf-8 -*-
import os, sys
__py_dir__ = os.path.split(os.path.realpath(__file__))[0]
__top_dir__ = os.path.dirname(__py_dir__)
sys.path.insert(0, os.path.join(__top_dir__, '3rd'))
sys.path.insert(0, os.path.join(__top_dir__, 'src'))
import config
config.Debug = True
config.DebugFlask = True
import json, time
Cases = [
'Hello, world',
]
def main():
for v in Cases:
_ts = time.time()
data = v
_te = time.time()
data = json.dumps(data, ensure_ascii=False, sort_keys=True, indent=4)
print(data)
print('Assumed: {}ms'.format(round((_te - _ts) * 1000)))
print('---')
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt as ki:
pass
| [
"code@cjey.me"
] | code@cjey.me |
67588e159550a7f2fe20b4131a6aa69ebbcb1372 | c87f80872d810afbd23e512bf25541bda891935f | /apps/travel/migrations/0011_auto_20161122_0247.py | 64c36f32e4ca0595b693ff1c3a85fd07a7b6c0df | [] | no_license | JonStults/travelBuddy | 2713588cd56611cb18cc1da0e7820995000e1fb8 | 73ee9c6e478efb7219f40f710df923518e1bb310 | refs/heads/master | 2020-06-29T11:00:25.003318 | 2016-11-22T04:01:24 | 2016-11-22T04:01:24 | 74,432,821 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,349 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-22 02:47
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('travel', '0010_auto_20161122_0247'),
]
operations = [
migrations.CreateModel(
name='Join',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('destination', models.CharField(blank=True, max_length=45, null=True)),
('description', models.CharField(blank=True, max_length=255, null=True)),
('start_date', models.CharField(blank=True, max_length=45, null=True)),
('end_date', models.CharField(blank=True, max_length=45, null=True)),
],
),
migrations.CreateModel(
name='Users',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=45)),
('username', models.CharField(blank=True, max_length=45, null=True)),
('password', models.CharField(max_length=45)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.AddField(
model_name='location',
name='user_id',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='travel.Users'),
),
migrations.AddField(
model_name='join',
name='location_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='travel.Location'),
),
migrations.AddField(
model_name='join',
name='user_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='travel.Users'),
),
]
| [
"jdstults1@gmail.com"
] | jdstults1@gmail.com |
f64ad220ad893eaa4dd04efc33033e1616faa93b | 9c7b2965396867b7d1459fafacd87b0ed14959c3 | /StatisticalAnalysis/14_analyse_change_in_f+s_types.py | 578050b154d3613a6fd7014c93129f525a6de595 | [] | no_license | clejae/forland_repo | 461cd7fcd85615c2d33b0a5985d5d8ee37164032 | 3387eed69fc3a60e1d3a948b12fe23538f0b79da | refs/heads/master | 2023-08-17T06:30:37.571058 | 2023-08-09T08:46:49 | 2023-08-09T08:46:49 | 241,071,590 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 45,693 | py | #
# github Repo: https://github.com/clejae
# ------------------------------------------ LOAD PACKAGES ---------------------------------------------------#
import os
import numpy as np
from osgeo import gdal
import time
import json
import pandas as pd
import operator
import plotly.graph_objects as go
from functools import reduce
# ------------------------------------------ START TIME ------------------------------------------------------#
stime = time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime())
print("start: " + stime)
# ------------------------------------------ USER INPUT ------------------------------------------------#
WD = r'C:\Users\IAMO\Documents\work_data\cst_paper'
FS_DICT = {'Brandenburg': {'abbreviation': 'BB',
'periods': [1, 2, 3]},
'Bavaria': {'abbreviation': 'BV',
'periods': [1, 2, 3]},
'Lower Saxony': {'abbreviation': 'LS',
'periods': [3]},
'Saxony-Anhalt': {'abbreviation': 'SA',
'periods': [2, 3]}}
PERIODS = ['2005-2011', '2008-2014', '2012-2018']
# ------------------------------------------ DEFINE FUNCTIONS ------------------------------------------------#
os.chdir(WD)
## 1. Loop over periods
## 2. For each federal state (and tile), check if csts are available (for current period)
## 3. If not use 14 as indication for no availabe csts in this year (create array), if yes use existing array
## 4. Reclassify 255 to 15 as indication "no csts classified"
## 4. Multiply period1 * 10000, period2 *100 and period3*1
## 6. Add all arrays up
## 7. Count occurrences
def count_function(fs_abbr, periods):
## Create output dictionary
out_dict = {}
with open(r'data\raster\tile_list_{}.txt'.format(fs_abbr)) as file:
tiles_lst = file.readlines()
tiles_lst = [item.strip() for item in tiles_lst]
for t, tile in enumerate(tiles_lst):
## Loop over periods and open cst-raster
cst_lst = []
for i, period in enumerate(periods):
factor = {0: 10000, 1: 100, 2: 1}[i]
## For each federal state (and tile), check if csts are available (for current period)
## If not use 14 as indication for no availabe csts in this year (create array), if yes use existing array
cst_pth = rf'data\raster\grid_15km\{tile}\{fs_abbr}_{period}_CropSeqType_clean.tif'
if os.path.exists(cst_pth):
cst_ras = gdal.Open(cst_pth)
cst_arr = cst_ras.ReadAsArray()
else:
## 14 is a cst that cannot occur. I use it to indicate that in this state there were not csts
## calculated
cst_arr = np.zeros((3000, 3000)) + 14
## 15 cannot occur. I use it to replace 255 so that all values have only two digits
cst_arr[cst_arr == 255] = 15
cst_arr = cst_arr.astype(np.int32)
## multiply cst array with factor
cst_arr = cst_arr * factor
cst_lst.append(cst_arr)
conc_arr = np.sum(cst_lst, axis=0)
## Count the occurrence of all unique concatenated csts
unique, counts = np.unique(conc_arr, return_counts=True)
tile_dict = dict(zip(unique, counts))
for cst in tile_dict:
if str(cst) not in out_dict:
out_dict[str(cst)] = tile_dict[cst]
else:
out_dict[str(cst)] += tile_dict[cst]
print(fs_abbr, tile, "done!")
for key in out_dict:
out_dict[key] = str(out_dict[key])
json_out = rf'data\tables\changes_in_sequences\{fs_abbr}_changes_in_sequence.json'
with open(json_out, "w") as outfile:
json.dump(out_dict, outfile, indent=4)
def analyze_function(pth, fs_abbr, out_folder=None):
with open(pth) as json_file:
count_dict = json.load(json_file)
## for each count-dictionary:
## delete key 151515
for k in ['151515', '141515']:
if k in count_dict:
count_dict.pop(k)
ana_dict = {}
## This is the area of each period for which a cst could be identified
area_per1 = 0
area_per2 = 0
area_per3 = 0
## separate each keys at 2. and 4. digit (xx-xx-xx)
## in each key: for each two number digit replace 15 with 00 and 14 with xx
for key in count_dict:
k1 = key[:2]
k2 = key[2:4]
k3 = key[4:]
if k1 == '15':
k1 = '00'
elif k1 == '14':
k1 = 'xx'
else:
area_per1 += int(count_dict[key])
if k2 == '15':
k2 = '00'
elif k2 == '14':
k2 = 'xx'
else:
area_per2 += int(count_dict[key])
if k3 == '15':
k3 = '00'
elif k3 == '14':
k3 = 'xx'
else:
area_per3 += int(count_dict[key])
newkey = k1 + '-' + k2 + '-' + k3
ana_dict[newkey] = int(count_dict[key])
## create ranking of all change sequences
ana_dict = dict(sorted(ana_dict.items(), key=operator.itemgetter(1), reverse=True))
## calculate shares of change sequences
total_area = sum(ana_dict.values())
share_dict = {key: ana_dict[key] / total_area for key in ana_dict}
## identify biggest change sequences that make up > 50/75% --> create table
big_dict = {}
cum_share = 0
for i, key in enumerate(share_dict):
if cum_share > .5:
break
cum_share += share_dict[key]
big_dict[key] = cum_share
## calculate area of structural types (first digit of each two number digits)
## for each change sequence
str_dict = {}
fct_dict = {}
ana_l_dict1 = {}
str_l_dict1 = {}
fct_l_dict1 = {}
ana_l_dict2 = {}
str_l_dict2 = {}
fct_l_dict2 = {}
ana_l_dict3 = {}
str_l_dict3 = {}
fct_l_dict3 = {}
for key in ana_dict:
ks = key.split('-')
k1 = ks[0]
k2 = ks[1]
k3 = ks[2]
strkey = k1[0] + '-' + k2[0] + '-' + k3[0]
fctkey = k1[1] + '-' + k2[1] + '-' + k3[1]
if strkey not in str_dict:
str_dict[strkey] = ana_dict[key]
else:
str_dict[strkey] += ana_dict[key]
if fctkey not in fct_dict:
fct_dict[fctkey] = ana_dict[key]
else:
fct_dict[fctkey] += ana_dict[key]
## for plotting
kl1 = k1 + 'a' + '-' + k2 + 'b'
kl2 = k2 + 'b' + '-' + k3 + 'c'
kl3 = k1 + 'a' + '-' + k3 + 'c'
if kl1 not in ana_l_dict1:
ana_l_dict1[kl1] = ana_dict[key]
else:
ana_l_dict1[kl1] += ana_dict[key]
if kl2 not in ana_l_dict2:
ana_l_dict2[kl2] = ana_dict[key]
else:
ana_l_dict2[kl2] += ana_dict[key]
if kl3 not in ana_l_dict3:
ana_l_dict3[kl3] = ana_dict[key]
else:
ana_l_dict3[kl3] += ana_dict[key]
## for plotting
kstr1 = k1[0] + 'a' + '-' + k2[0] + 'b'
kstr2 = k2[0] + 'b' + '-' + k3[0] + 'c'
kstr3 = k1[0] + 'a' + '-' + k3[0] + 'c'
if kstr1 not in str_l_dict1:
str_l_dict1[kstr1] = ana_dict[key]
else:
str_l_dict1[kstr1] += ana_dict[key]
if kstr2 not in str_l_dict2:
str_l_dict2[kstr2] = ana_dict[key]
else:
str_l_dict2[kstr2] += ana_dict[key]
if kstr3 not in str_l_dict3:
str_l_dict3[kstr3] = ana_dict[key]
else:
str_l_dict3[kstr3] += ana_dict[key]
## for plotting
kfct1 = k1[1] + 'a' + '-' + k2[1] + 'b'
kfct2 = k2[1] + 'b' + '-' + k3[1] + 'c'
kfct3 = k1[1] + 'a' + '-' + k3[1] + 'c'
if kfct1 not in fct_l_dict1:
fct_l_dict1[kfct1] = ana_dict[key]
else:
fct_l_dict1[kfct1] += ana_dict[key]
if kfct2 not in fct_l_dict2:
fct_l_dict2[kfct2] = ana_dict[key]
else:
fct_l_dict2[kfct2] += ana_dict[key]
if kfct3 not in fct_l_dict3:
fct_l_dict3[kfct3] = ana_dict[key]
else:
fct_l_dict3[kfct3] += ana_dict[key]
def create_df_from_dict(in_dict, out_pth=None):
df = pd.DataFrame.from_dict(data=in_dict, orient='index')
if df.empty:
return df
df.reset_index(inplace=True)
df.columns = ['change sequence', 'px_count']
df.sort_values(by='px_count', inplace=True, ascending=False)
df['share'] = round(df['px_count'] / df['px_count'].sum() * 100, 1)
df['cum_share'] = df['share'].cumsum()
if out_pth:
df.to_csv(out_pth, sep=';')
return df
out_pth = out_folder + f'/{fs_abbr}-complete_change_sequences.csv'
df_orig = create_df_from_dict(ana_dict, out_pth)
out_pth = out_folder + f'/{fs_abbr}-structural_change_sequences.csv'
df_str = create_df_from_dict(str_dict, out_pth)
out_pth = out_folder + f'/{fs_abbr}-functional_change_sequences.csv'
df_fct = create_df_from_dict(fct_dict, out_pth)
## get total area, including areas that are newly cultivated or abandoned
ana_l_dict1_area = ana_l_dict1.copy()
ana_l_dict2_area = ana_l_dict2.copy()
ana_l_dict3_area = ana_l_dict3.copy()
for key in ["00a-00b", "xxa-xxb", "xxa-00b"]:
if key in ana_l_dict1_area:
ana_l_dict1_area.pop(key)
for key in ["00b-00c", "xxb-xxc", "xxb-00c"]:
if key in ana_l_dict2_area:
ana_l_dict2_area.pop(key)
for key in ["00a-00c", "xxa-xxc", "xxa-00c"]:
if key in ana_l_dict3_area:
ana_l_dict3_area.pop(key)
total_area1 = sum(ana_l_dict1_area.values())
total_area2 = sum(ana_l_dict2_area.values())
total_area3 = sum(ana_l_dict3_area.values())
def create_plot_lists_from_dict(in_dict):
labels = []
source = []
target = []
value = []
label_to_index = {}
i = 0
for key in in_dict:
ks = key.split('-')
k1 = ks[0]
k2 = ks[1]
if k1 not in labels:
labels.append(k1)
label_to_index[k1] = i
i += 1
if k2 not in labels:
labels.append(k2)
label_to_index[k2] = i
i += 1
source.append(label_to_index[k1])
target.append(label_to_index[k2])
value.append(in_dict[key])
link = {'source': source,
'target': target,
'value': value}
return link, labels
def plot_sankey(link, labels, title):
fig = go.Figure(data=[go.Sankey(
node=dict(
pad=15,
thickness=20,
line=dict(color="black", width=0.5),
label=labels,
color="blue"
),
link=link)])
fig.update_layout(title_text=title, font_size=10)
fig.show()
## Exclude all change sequence where in one period there was no cst
# ana_l_dict2 = {key: ana_l_dict2[key] for key in ana_l_dict2 if '0' not in key}
# str_l_dict2 = {key: str_l_dict2[key] for key in str_l_dict2 if '0' not in key}
# fct_l_dict2 = {key: fct_l_dict2[key] for key in fct_l_dict2 if '0' not in key}
## exclude stable change sequences
def exclude_stable_change_sequences(in_dict):
out_dict = {}
stable_dict = {"stable_total_area": 0}
for key in in_dict:
ks = key.split('-')
ks = [k[:-1] for k in ks]
ks = list(set(ks))
if len(ks) > 1:
out_dict[key] = in_dict[key]
else:
if ks != ['00']:
stable_dict["stable_total_area"] += in_dict[key]
if ks[0] not in stable_dict:
stable_dict["stable_" + ks[0]] = in_dict[key]
else:
stable_dict["stable_" + ks[0]] += in_dict[key]
return out_dict, stable_dict
ana_l_dict1_stable_excl, ana_stable_dict1 = exclude_stable_change_sequences(ana_l_dict1)
str_l_dict1_stable_excl, str_stable_dict1 = exclude_stable_change_sequences(str_l_dict1)
fct_l_dict1_stable_excl, fct_stable_dict1 = exclude_stable_change_sequences(fct_l_dict1)
ana_l_dict2_stable_excl, ana_stable_dict2 = exclude_stable_change_sequences(ana_l_dict2)
str_l_dict2_stable_excl, str_stable_dict2 = exclude_stable_change_sequences(str_l_dict2)
fct_l_dict2_stable_excl, fct_stable_dict2 = exclude_stable_change_sequences(fct_l_dict2)
ana_l_dict3_stable_excl, ana_stable_dict3 = exclude_stable_change_sequences(ana_l_dict3)
str_l_dict3_stable_excl, str_stable_dict3 = exclude_stable_change_sequences(str_l_dict3)
fct_l_dict3_stable_excl, fct_stable_dict3 = exclude_stable_change_sequences(fct_l_dict3)
## for complete sequences identify 10 or 20 largest changes
# link_ana, labels_ana = create_plot_lists_from_dict(ana_l_dict2)
# plot_sankey(link_ana, labels_ana, "Complete CSTs")
# link_str, labels_str = create_plot_lists_from_dict(str_l_dict1)
# plot_sankey(link_str, labels_str, f"{fs_abbr}-Structural diversity - all changes - 1.-2.")
# link_fct, labels_fct = create_plot_lists_from_dict(fct_l_dict1)
# plot_sankey(link_fct, labels_fct, f"{fs_abbr}-Functional diversity - all changes - 1.-2.")
# link_str, labels_str = create_plot_lists_from_dict(str_l_dict2)
# plot_sankey(link_str, labels_str, f"{fs_abbr}-Structural diversity - all changes - 2.-3.")
# link_fct, labels_fct = create_plot_lists_from_dict(fct_l_dict2)
# plot_sankey(link_fct, labels_fct, f"{fs_abbr}-Functional diversity - all changes - 2.-3.")
# link_str, labels_str = create_plot_lists_from_dict(str_l_dict3)
# plot_sankey(link_str, labels_str, f"{fs_abbr}-Structural diversity - all changes - 1.-3.")
# link_fct, labels_fct = create_plot_lists_from_dict(fct_l_dict3)
# plot_sankey(link_fct, labels_fct, f"{fs_abbr}-Functional diversity - all changes - 1.-3.")
## identifiy largest losses and largest gains
def identify_largest_changes(in_dict):
out_dict = {}
loss_key_lst = []
gain_key_lst = []
for key in in_dict:
ks = key.split('-')
k_loss = ks[0]
k_gain = ks[1]
if k_loss not in loss_key_lst:
loss_key_lst.append(k_loss)
if k_gain not in gain_key_lst:
gain_key_lst.append(k_gain)
for k in loss_key_lst:
loss_dict = {}
for key in in_dict:
ks = key.split('-')
k_loss = ks[0]
if k_loss == k:
loss_dict[key] = in_dict[key]
max_loss_key = max(loss_dict, key=loss_dict.get)
out_dict[max_loss_key] = loss_dict[max_loss_key]
for k in gain_key_lst:
gain_dict = {}
for key in in_dict:
ks = key.split('-')
k_gain = ks[-1]
if k_gain == k:
gain_dict[key] = in_dict[key]
max_gain_key = max(gain_dict, key=gain_dict.get)
out_dict[max_gain_key] = gain_dict[max_gain_key]
return out_dict
str_l_dict1b = identify_largest_changes(str_l_dict1)
fct_l_dict1b = identify_largest_changes(fct_l_dict1)
str_l_dict2b = identify_largest_changes(str_l_dict2)
fct_l_dict2b = identify_largest_changes(fct_l_dict2)
str_l_dict3b = identify_largest_changes(str_l_dict3)
fct_l_dict3b = identify_largest_changes(fct_l_dict3)
# link_str, labels_str = create_plot_lists_from_dict(str_l_dict1b)
# plot_sankey(link_str, labels_str, f'{fs_abbr}-Structural diversity - largest changes - 1.-2.')
# link_fct, labels_fct = create_plot_lists_from_dict(fct_l_dict1b)
# plot_sankey(link_fct, labels_fct, f'{fs_abbr}-Functional diversity - largest changes - 1.-2.')
# link_str, labels_str = create_plot_lists_from_dict(str_l_dict2b)
# plot_sankey(link_str, labels_str, f'{fs_abbr}-Structural diversity - largest changes - 2.-3.')
# link_fct, labels_fct = create_plot_lists_from_dict(fct_l_dict2b)
# plot_sankey(link_fct, labels_fct, f'{fs_abbr}-Functional diversity - largest changes - 2.-3.')
# link_str, labels_str = create_plot_lists_from_dict(str_l_dict3b)
# plot_sankey(link_str, labels_str, f'{fs_abbr}-Structural diversity - largest changes - 1.-3.')
# link_fct, labels_fct = create_plot_lists_from_dict(fct_l_dict3b)
# plot_sankey(link_fct, labels_fct, f'{fs_abbr}-Functional diversity - largest changes - 1.-3.')
## Calculate net fluxes between csts
def calculate_net_fluxes(in_dict):
out_dict = {}
done_lst = []
for key in in_dict:
if key not in done_lst:
ks = key.split('-')
keys = [k[:-1] for k in ks]
exts = [k[-1] for k in ks]
newkey = keys[1]+exts[0] + '-' + keys[0]+exts[1]
if newkey in in_dict:
change = in_dict[key] - in_dict[newkey]
if change > 0:
out_dict[key] = change
else:
out_dict[newkey] = -change
else:
out_dict[key] = in_dict[key]
done_lst.append(key)
done_lst.append(newkey)
return out_dict
ana_flux_dict1 = calculate_net_fluxes(ana_l_dict1)
str_flux_dict1 = calculate_net_fluxes(str_l_dict1)
fct_flux_dict1 = calculate_net_fluxes(fct_l_dict1)
ana_flux_dict2 = calculate_net_fluxes(ana_l_dict2)
str_flux_dict2 = calculate_net_fluxes(str_l_dict2)
fct_flux_dict2 = calculate_net_fluxes(fct_l_dict2)
ana_flux_dict3 = calculate_net_fluxes(ana_l_dict3)
str_flux_dict3 = calculate_net_fluxes(str_l_dict3)
fct_flux_dict3 = calculate_net_fluxes(fct_l_dict3)
# link_str, labels_str = create_plot_lists_from_dict(str_flux_dict1)
# plot_sankey(link_str, labels_str, "Structural diversity - net changes - 1.-2.")
# link_fct, labels_fct = create_plot_lists_from_dict(fct_flux_dict1)
# plot_sankey(link_fct, labels_fct, "Functional diversity - net changes - 1.-2.")
# link_str, labels_str = create_plot_lists_from_dict(str_flux_dict2)
# plot_sankey(link_str, labels_str, "Structural diversity - net changes - 2.-3.")
# link_fct, labels_fct = create_plot_lists_from_dict(fct_flux_dict2)
# plot_sankey(link_fct, labels_fct, "Functional diversity - net changes - 2.-3.")
# link_str, labels_str = create_plot_lists_from_dict(str_flux_dict3)
# plot_sankey(link_str, labels_str, "Structural diversity - net changes - 1.-3.")
# link_fct, labels_fct = create_plot_lists_from_dict(fct_flux_dict3)
# plot_sankey(link_fct, labels_fct, "Functional diversity - net changes - 1.-3.")
df_orig1 = create_df_from_dict(ana_flux_dict1)
df_str1 = create_df_from_dict(str_flux_dict1)
df_fct1 = create_df_from_dict(fct_flux_dict1)
df_orig2 = create_df_from_dict(ana_flux_dict2)
df_str2 = create_df_from_dict(str_flux_dict2)
df_fct2 = create_df_from_dict(fct_flux_dict2)
df_orig3 = create_df_from_dict(ana_flux_dict3)
df_str3 = create_df_from_dict(str_flux_dict3)
df_fct3 = create_df_from_dict(fct_flux_dict3)
def classify_structural_and_functional_change(df, out_pth):
if df.empty:
return df
df["source_str"] = df["change sequence"].apply(lambda row: row.split('-')[0][0])
df["source_str"] = df["source_str"].str.replace("x", "0")
df["target_str"] = df["change sequence"].apply(lambda row: row.split('-')[1][0])
df["target_str"] = df["target_str"].str.replace("x", "0")
df["source_fct"] = df["change sequence"].apply(lambda row: row.split('-')[0][1])
df["source_fct"] = df["source_fct"].str.replace("x", "0")
df["target_fct"] = df["change sequence"].apply(lambda row: row.split('-')[1][1])
df["target_fct"] = df["target_fct"].str.replace("x", "0")
df["change_str"] = df["target_str"].astype(int) - df["source_str"].astype(int)
bins = [-99, -1, 0, 99]
labels = ["declining", "stable", "increasing"]
df["structural_change"] = pd.cut(df["change_str"], bins, labels=labels)
df["structural_change"] = df["structural_change"].astype(str)
df.loc[df["source_str"] == "0", "structural_change"] = "newly classified"
df.loc[df["target_str"] == "0", "structural_change"] = "no 2nd classification"
df["change_fct"] = df["target_fct"].astype(int) - df["source_fct"].astype(int)
df["change_fct_str"] = df["source_fct"] + df["target_fct"]
sprwin_change_classification = {
'15': 'Increasing spring shares to sw balance',
'25': 'Stable sw balance',
'35': 'Decreasing spring shares to sw balance',
'45': 'Increasing spring shares to sw balance',
'65': 'Decreasing spring shares to sw balance',
'75': 'Increasing spring shares to sw balance',
'85': 'Stable sw balance',
'95': 'Decreasing spring shares to sw balance',
'13': 'Increasing spring shares to s dominance',
'23': 'Increasing spring shares to s dominance',
'43': 'Increasing spring shares to s dominance',
'53': 'Increasing spring shares to s dominance',
'73': 'Increasing spring shares to s dominance',
'83': 'Increasing spring shares to s dominance',
'16': 'Increasing spring shares to s dominance',
'26': 'Increasing spring shares to s dominance',
'46': 'Increasing spring shares to s dominance',
'56': 'Increasing spring shares to s dominance',
'76': 'Increasing spring shares to s dominance',
'86': 'Increasing spring shares to s dominance',
'19': 'Increasing spring shares to s dominance',
'29': 'Increasing spring shares to s dominance',
'49': 'Increasing spring shares to s dominance',
'59': 'Increasing spring shares to s dominance',
'79': 'Increasing spring shares to s dominance',
'89': 'Increasing spring shares to s dominance',
'21': 'Decreasing spring shares to w only',
'31': 'Decreasing spring shares to w only',
'51': 'Decreasing spring shares to w only',
'61': 'Decreasing spring shares to w only',
'81': 'Decreasing spring shares to w only',
'91': 'Decreasing spring shares to w only',
'24': 'Decreasing spring shares to w only',
'34': 'Decreasing spring shares to w only',
'54': 'Decreasing spring shares to w only',
'64': 'Decreasing spring shares to w only',
'84': 'Decreasing spring shares to w only',
'94': 'Decreasing spring shares to w only',
'27': 'Decreasing spring shares to w only',
'37': 'Decreasing spring shares to w only',
'57': 'Decreasing spring shares to w only',
'67': 'Decreasing spring shares to w only',
'87': 'Decreasing spring shares to w only',
'97': 'Decreasing spring shares to w only',
'12': 'Increasing spring shares to sw balance',
'32': 'Increasing spring shares to sw balance',
'42': 'Increasing spring shares to sw balance',
'62': 'Increasing spring shares to sw balance',
'72': 'Increasing spring shares to sw balance',
'92': 'Increasing spring shares to sw balance',
'52': 'Stable sw balance',
'58': 'Stable sw balance',
'14': 'Stable w only',
'41': 'Stable w only',
'74': 'Stable w only',
'47': 'Stable w only',
'36': 'Stable s dominance',
'96': 'Stable s dominance',
'69': 'Stable s dominance',
'63': 'Stable s dominance',
'18': 'Decreasing spring shares to sw balance',
'38': 'Decreasing spring shares to sw balance',
'48': 'Decreasing spring shares to sw balance',
'68': 'Decreasing spring shares to sw balance',
'78': 'Decreasing spring shares to sw balance',
'98': 'Decreasing spring shares to sw balance',
'11': 'Stable w only',
'22': 'Stable sw balance',
'33': 'Stable s dominance',
'44': 'Stable w only',
'55': 'Stable sw balance',
'66': 'Stable s dominance',
'77': 'Stable w only',
'88': 'Stable sw balance',
'99': 'Stable s dominance',
'28': 'Stable sw balance',
'82': 'Stable sw balance',
'17': 'Stable w only',
'71': 'Stable w only',
'39': 'Stable s dominance',
'93': 'Stable s dominance',
'01': 'Newly cultivated to w only',
'02': 'Newly cultivated to sw balance',
'03': 'Newly cultivated to s dominance',
'04': 'Newly cultivated to w only',
'05': 'Newly cultivated to sw balance',
'06': 'Newly cultivated to s dominance',
'07': 'Newly cultivated to w only',
'08': 'Newly cultivated to sw balance',
'09': 'Newly cultivated to s dominance',
}
leacer_change_classification = {
'41': 'Decreasing leaf shares to c only',
'51': 'Decreasing leaf shares to c only',
'61': 'Decreasing leaf shares to c only',
'71': 'Decreasing leaf shares to c only',
'81': 'Decreasing leaf shares to c only',
'91': 'Decreasing leaf shares to c only',
'42': 'Decreasing leaf shares to c only',
'52': 'Decreasing leaf shares to c only',
'62': 'Decreasing leaf shares to c only',
'72': 'Decreasing leaf shares to c only',
'82': 'Decreasing leaf shares to c only',
'92': 'Decreasing leaf shares to c only',
'43': 'Decreasing leaf shares to c only',
'53': 'Decreasing leaf shares to c only',
'63': 'Decreasing leaf shares to c only',
'73': 'Decreasing leaf shares to c only',
'83': 'Decreasing leaf shares to c only',
'93': 'Decreasing leaf shares to c only',
'17': 'Increasing leaf shares to l dominance',
'27': 'Increasing leaf shares to l dominance',
'37': 'Increasing leaf shares to l dominance',
'47': 'Increasing leaf shares to l dominance',
'57': 'Increasing leaf shares to l dominance',
'67': 'Increasing leaf shares to l dominance',
'18': 'Increasing leaf shares to l dominance',
'28': 'Increasing leaf shares to l dominance',
'38': 'Increasing leaf shares to l dominance',
'48': 'Increasing leaf shares to l dominance',
'58': 'Increasing leaf shares to l dominance',
'68': 'Increasing leaf shares to l dominance',
'19': 'Increasing leaf shares to l dominance',
'29': 'Increasing leaf shares to l dominance',
'39': 'Increasing leaf shares to l dominance',
'49': 'Increasing leaf shares to l dominance',
'59': 'Increasing leaf shares to l dominance',
'69': 'Increasing leaf shares to l dominance',
'14': 'Increasing leaf shares to lc balance',
'24': 'Increasing leaf shares to lc balance',
'34': 'Increasing leaf shares to lc balance',
'74': 'Decreasing leaf shares to lc balance',
'84': 'Decreasing leaf shares to lc balance',
'94': 'Decreasing leaf shares to lc balance',
'16': 'Increasing leaf shares to lc balance',
'26': 'Increasing leaf shares to lc balance',
'36': 'Increasing leaf shares to lc balance',
'76': 'Decreasing leaf shares to lc balance',
'86': 'Decreasing leaf shares to lc balance',
'96': 'Decreasing leaf shares to lc balance',
'15': 'Increasing leaf shares to lc balance',
'25': 'Increasing leaf shares to lc balance',
'35': 'Increasing leaf shares to lc balance',
'45': 'Stable lc balance',
'65': 'Stable lc balance',
'75': 'Decreasing leaf shares to lc balance',
'85': 'Decreasing leaf shares to lc balance',
'95': 'Decreasing leaf shares to lc balance',
'11': 'Stable c only',
'22': 'Stable c only',
'33': 'Stable c only',
'44': 'Stable lc balance',
'55': 'Stable lc balance',
'66': 'Stable lc balance',
'77': 'Stable l dominance',
'88': 'Stable l dominance',
'99': 'Stable l dominance',
'23': 'Stable c only',
'56': 'Stable lc balance',
'12': 'Stable c only',
'21': 'Stable c only',
'89': 'Stable l dominance',
'87': 'Stable l dominance',
'78': 'Stable l dominance',
'98': 'Stable l dominance',
'32': 'Stable c only',
'54': 'Stable lc balance',
'46': 'Stable lc balance',
'64': 'Stable lc balance',
'13': 'Stable c only',
'31': 'Stable c only',
'79': 'Stable l dominance',
'97': 'Stable l dominance',
'01': 'Newly cultivated to c only',
'02': 'Newly cultivated to c only',
'03': 'Newly cultivated to c only',
'04': 'Newly cultivated to lc balance',
'05': 'Newly cultivated to lc balance',
'06': 'Newly cultivated to lc balance',
'07': 'Newly cultivated to l dominance',
'08': 'Newly cultivated to l dominance',
'09': 'Newly cultivated to l dominance',
}
df["sprwin_change"] = df["change_fct_str"].map(sprwin_change_classification)
df["leacer_change"] = df["change_fct_str"].map(leacer_change_classification)
# t = df.loc[df["sprwin_change"].isna()].copy()
# missed = t["change_fct_str"].unique()
df.loc[df["sprwin_change"].isna(), "sprwin_change"] = "No change"
# t = df.loc[df["leacer_change"].isna()].copy()
# missed = t["change_fct_str"].unique()
df.loc[df["leacer_change"].isna(), "leacer_change"] = "No change"
# df.loc[df["source_str"] == "0", "sprwin_change"] = "newly classified"
# df.loc[df["source_str"] == "0", "leacer_change"] = "newly classified"
df.loc[df["target_str"] == "0", "sprwin_change"] = "no 2nd classification"
df.loc[df["target_str"] == "0", "leacer_change"] = "no 2nd classification"
df["functional_change"] = df["sprwin_change"] + ' - ' + df["leacer_change"]
df.drop(columns=["source_str", "target_str", "source_fct", "target_fct", "change_str", "change_fct", "change_fct_str"], inplace=True)
df["overall_change"] = df["structural_change"] + '-' + df["functional_change"]
return df
def calc_stats_from_change_sequences(df, stable_dict, total_area):
if df.empty:
writer = pd.ExcelWriter(out_pth)
for c, df in enumerate([df, df, df, df, df, df]):
name = ['Changes_classified', 'combined_change_stats', 'struct_change_stats', 'funct_change_stats',
'stable_area', 'summary'][c]
df.to_excel(writer, sheet_name=name, index=False)
writer.save()
return df, df, df, df, df
stable_area = stable_dict["stable_total_area"]
change_area = total_area - stable_area
net_change_area1 = df["px_count"].sum()
newly_cultivated = df.loc[df["structural_change"] == "newly classified", "px_count"].sum()
abandoned = df.loc[df["structural_change"] == "no 2nd classification", "px_count"].sum()
net_change_area2 = net_change_area1 - newly_cultivated - abandoned
df_stable = pd.DataFrame.from_dict(data=stable_dict, orient='index').reset_index()
df_stable.rename(columns={"index": "class", 0: "px_count"}, inplace=True)
df_stable.sort_values(by="px_count", ascending=False, inplace=True)
df_stable.loc[df_stable["class"] == 'stable_00', "class"] = "no_cst_in_both_periods"
df_stable["share_of_total_area"] = round(df_stable["px_count"] / total_area * 100, 2)
df_stable["share_of_stable_area"] = round(df_stable["px_count"] / stable_area * 100, 2)
df_stable.loc[df_stable["class"] == 'no_cst_in_both_periods', "share_of_stable_area"] = 0
df_stat = df[["overall_change", "px_count"]].groupby("overall_change").sum().reset_index()
df_stat.sort_values(by="px_count", ascending=False, inplace=True)
df_stat["share_of_total_area"] = round(df_stat["px_count"] / total_area * 100, 2)
df_stat["share_of_change_area"] = round(df_stat["px_count"] / change_area * 100, 2)
df_stat["share_of_net_change_area_incl_new"] = round(df_stat["px_count"] / net_change_area1 * 100, 2)
df_stat["share_of_net_change_area_excl_new"] = round(df_stat["px_count"] / net_change_area2 * 100, 2)
df_str_stat = df[["structural_change", "px_count"]].groupby("structural_change").sum().reset_index()
df_str_stat.sort_values(by="px_count", ascending=False, inplace=True)
df_str_stat["share_of_total_area"] = round(df_str_stat["px_count"] / total_area * 100, 2)
df_str_stat["share_of_change_area"] = round(df_str_stat["px_count"] / change_area * 100, 2)
df_str_stat["share_of_net_change_area_incl_new"] = round(df_str_stat["px_count"] / net_change_area1 * 100, 2)
df_str_stat["share_of_net_change_area_excl_new"] = round(df_str_stat["px_count"] / net_change_area2 * 100, 2)
df_fct_stat = df[["functional_change", "px_count"]].groupby("functional_change").sum().reset_index()
df_fct_stat.sort_values(by="px_count", ascending=False, inplace=True)
df_fct_stat["share_of_total_area"] = round(df_fct_stat["px_count"] / total_area * 100, 2)
df_fct_stat["share_of_change_area"] = round(df_fct_stat["px_count"] / change_area * 100, 2)
df_fct_stat["share_of_net_change_area_incl_new"] = round(df_fct_stat["px_count"] / net_change_area1 * 100, 2)
df_fct_stat["share_of_net_change_area_excl_new"] = round(df_fct_stat["px_count"] / net_change_area2 * 100, 2)
df_lcc_stat = df[["leacer_change", "px_count"]].groupby("leacer_change").sum().reset_index()
df_lcc_stat.sort_values(by="px_count", ascending=False, inplace=True)
df_lcc_stat["share_of_total_area"] = round(df_lcc_stat["px_count"] / total_area * 100, 2)
df_lcc_stat["share_of_change_area"] = round(df_lcc_stat["px_count"] / change_area * 100, 2)
df_lcc_stat["share_of_net_change_area_incl_new"] = round(df_lcc_stat["px_count"] / net_change_area1 * 100, 2)
df_lcc_stat["share_of_net_change_area_excl_new"] = round(df_lcc_stat["px_count"] / net_change_area2 * 100, 2)
df_swc_stat = df[["sprwin_change", "px_count"]].groupby("sprwin_change").sum().reset_index()
df_swc_stat.sort_values(by="px_count", ascending=False, inplace=True)
df_swc_stat["share_of_total_area"] = round(df_swc_stat["px_count"] / total_area * 100, 2)
df_swc_stat["share_of_change_area"] = round(df_swc_stat["px_count"] / change_area * 100, 2)
df_swc_stat["share_of_net_change_area_incl_new"] = round(df_swc_stat["px_count"] / net_change_area1 * 100, 2)
df_swc_stat["share_of_net_change_area_excl_new"] = round(df_swc_stat["px_count"] / net_change_area2 * 100, 2)
df_summary = pd.DataFrame(data={
"class": ["total_area", "stable_area", "change_area", "net_change_area", "newly_cultivated", "abandoned"],
"area": [total_area, stable_area, change_area, net_change_area2, newly_cultivated, abandoned]})
writer = pd.ExcelWriter(out_pth)
for c, df in enumerate([df, df_stat, df_str_stat, df_fct_stat, df_stable, df_summary, df_lcc_stat, df_swc_stat]):
name = ['Changes_classified',
'combined_change_stats',
'struct_change_stats',
'funct_change_stats',
'stable_area',
'summary',
'leacer_change_stats',
'sprwin_change_stats'
][c]
df.to_excel(writer, sheet_name=name, index=False)
writer.save()
## use area of second period as it is the reference for
out_pth = out_folder + f'/{fs_abbr}-complete_change_sequences-net_changes-1_2.xlsx'
df_orig1 = classify_structural_and_functional_change(df_orig1, out_pth)
calc_stats_from_change_sequences(df_orig1, ana_stable_dict1, total_area1)
out_pth = out_folder + f'/{fs_abbr}-complete_change_sequences-net_changes-2_3.xlsx'
df_orig2 = classify_structural_and_functional_change(df_orig2, out_pth)
calc_stats_from_change_sequences(df_orig2, ana_stable_dict2, total_area2)
out_pth = out_folder + f'/{fs_abbr}-complete_change_sequences-net_changes-1_3.xlsx'
df_orig3 = classify_structural_and_functional_change(df_orig3, out_pth)
calc_stats_from_change_sequences(df_orig3, ana_stable_dict3, total_area3)
df_orig1 = create_df_from_dict(ana_l_dict1)
df_orig2 = create_df_from_dict(ana_l_dict2)
df_orig3 = create_df_from_dict(ana_l_dict3)
out_pth = out_folder + f'/{fs_abbr}-complete_change_sequences-total_changes-1_2.xlsx'
df_orig1 = classify_structural_and_functional_change(df_orig1, out_pth)
calc_stats_from_change_sequences(df_orig1, ana_stable_dict1,total_area1)
out_pth = out_folder + f'/{fs_abbr}-complete_change_sequences-total_changes-2_3.xlsx'
df_orig2 = classify_structural_and_functional_change(df_orig2, out_pth)
calc_stats_from_change_sequences(df_orig2, ana_stable_dict2, total_area2)
out_pth = out_folder + f'/{fs_abbr}-complete_change_sequences-total_changes-1_3.xlsx'
df_orig3 = classify_structural_and_functional_change(df_orig3, out_pth)
calc_stats_from_change_sequences(df_orig3, ana_stable_dict3, total_area3)
print(fs_abbr, "done!")
def combine_analysis_of_federal_states(fs_dict, changes='net_changes'):
ana_dict = {"periods_1_2": {
"summary": [],
"struct_change_stats": [],
"funct_change_stats": [],
"combined_change_stats": [],
'leacer_change_stats': [],
'sprwin_change_stats': []
},
"periods_2_3": {
"summary": [],
"struct_change_stats": [],
"funct_change_stats": [],
"combined_change_stats": [],
'leacer_change_stats': [],
'sprwin_change_stats': []
},
"periods_1_3": {
"summary": [],
"struct_change_stats": [],
"funct_change_stats": [],
"combined_change_stats": [],
'leacer_change_stats': [],
'sprwin_change_stats': []
}}
for fs in fs_dict:
fs_abbr = FS_DICT[fs]['abbreviation']
if fs_abbr == "LS":
continue
if fs_abbr == "SA":
periods = ['2_3']
else:
periods = ['1_2', '2_3', '1_3']
for period in periods:
pth = rf'data\tables\changes_in_sequences\{fs_abbr}\{fs_abbr}-complete_change_sequences-{changes}-{period}.xlsx'
for key in ana_dict[f"periods_{period}"]:
df = pd.read_excel(pth, sheet_name=key)
df.rename(columns={"area": f"{fs_abbr}",
"px_count": f"{fs_abbr}",
"structural_change": "class",
"functional_change": "class",
"overall_change": "class",
"leacer_change": "class",
"sprwin_change": "class"},
inplace=True)
if df.empty:
df = pd.DataFrame({"class": [], f"{fs_abbr}": []})
else:
df = df[["class", fs_abbr]]
df[fs_abbr] = round((df[fs_abbr] * 25) / 10000, 0)
ana_dict[f"periods_{period}"][key].append(df)
coll_dict = {
"summary": [],
"struct_change_stats": [],
"funct_change_stats": [],
"combined_change_stats": [],
"leacer_change_stats": [],
"sprwin_change_stats": []
}
periods = ['1_2', '2_3', '1_3']
for period in periods:
for key in coll_dict:
df = reduce(lambda df1, df2: pd.merge(df1, df2, on='class', how='outer'), ana_dict[f"periods_{period}"][key])
df[f"total_area"] = df.sum(axis=1)
df.sort_values(by=f"total_area", ascending=False, inplace=True)
# df.columns = pd.MultiIndex.from_product([[period], list(df.columns)])
coll_dict[key].append(df)
summ_df = reduce(lambda df1, df2: pd.merge(df1, df2, on='class', how='outer'), coll_dict["summary"])
str_df = reduce(lambda df1, df2: pd.merge(df1, df2, on='class', how='outer'), coll_dict["struct_change_stats"])
fct_df = reduce(lambda df1, df2: pd.merge(df1, df2, on='class', how='outer'), coll_dict["funct_change_stats"])
comb_df = reduce(lambda df1, df2: pd.merge(df1, df2, on='class', how='outer'), coll_dict["combined_change_stats"])
lcc_df = reduce(lambda df1, df2: pd.merge(df1, df2, on='class', how='outer'), coll_dict["leacer_change_stats"])
swc_df = reduce(lambda df1, df2: pd.merge(df1, df2, on='class', how='outer'), coll_dict["sprwin_change_stats"])
# str_df = pd.concat(str_lst, axis=1)
# fct_df = pd.concat(fct_lst, axis=1)
# comb_df = pd.concat(comb_lst, axis=1)
out_pth = rf'data\tables\changes_in_sequences\summary_{changes}.xlsx'
writer = pd.ExcelWriter(out_pth)
for c, df in enumerate([summ_df, str_df, fct_df, lcc_df, swc_df, comb_df]):
name = ['summary', 'structural_change', 'functional_change', 'leaf_cereal_change', 'spring_winter_change', 'combined_change'][c]
df.to_excel(writer, sheet_name=name, index=True)
writer.save()
def main():
for fs in FS_DICT:
print(fs)
fs_abbr = FS_DICT[fs]['abbreviation']
# count_function(fs_abbr=fs_abbr, periods=PERIODS)
pth = rf'data\tables\changes_in_sequences\{fs_abbr}_changes_in_sequence.json'
out_folder = rf'data\tables\changes_in_sequences\{fs_abbr}'
analyze_function(pth=pth, fs_abbr=fs_abbr, out_folder=out_folder)
# combine_analysis_of_federal_states(FS_DICT, changes='net_changes')
combine_analysis_of_federal_states(FS_DICT, changes='total_changes')
if __name__ == '__main__':
main()
# ------------------------------------------ END TIME --------------------------------------------------------#
etime = time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime())
print("start: " + stime)
print("end: " + etime)
# ------------------------------------------ UNUSED BUT USEFUL CODE SNIPPETS ---------------------------------#
## Old change classification
# change_classification = {
# "11": "no_change", "12": "to_spr_bal", "13": "to_spr_dom", "14": "to_leaf_bal",
# "15": "to_gen_bal",
# "16": "to_spr_dom_leaf_bal", "17": "to_leaf_dom", "18": "to_spr_bal_leaf_dom", "19": "to_spr_dom_leaf_dom",
#
# "21": "to_win_dom", "22": "no_change", "23": "to_spr_dom", "24": "to_win_dom_leaf_bal",
# "25": "to_gen_bal",
# "26": "to_spr_dom_leaf_bal", "27": "to_win_dom_leaf_dom", "28": "to_leaf_dom", "29": "to_spr_dom_leaf_dom",
#
# "31": "to_win_dom", "32": "to_spr_bal", "33": "no_change", "34": "to_win_dom_leaf_bal",
# "35": "to_gen_bal",
# "36": "to_leaf_bal", "37": "to_win_dom_leaf_dom", "38": "to_spr_bal_leaf_dom", "39": "to_leaf_dom",
#
# "41": "to_cer_dom", "42": "to_spr_bal_cer_dom", "43": "to_spr_dom_cer_dom", "44": "no_change",
# "45": "to_gen_bal",
# "46": "to_spr_dom", "47": "to_win_dom_leaf_dom", "48": "to_spr_bal_leaf_dom", "49": "to_spr_dom_leaf_dom",
#
# "51": "to_win_dom_cer_dom", "52": "to_cer_dom", "53": "to_spr_dom_cer_dom", "54": "to_win_dom",
# "55": "no_change",
# "56": "to_spr_dom", "57": "to_win_dom_leaf_dom", "58": "to_leaf_dom", "59": "to_spr_dom_leaf_dom",
#
# "61": "to_win_dom_cer_dom", "62": "to_spr_bal_cer_dom", "63": "to_cer_dom", "64": "to_win_dom",
# "65": "to_gen_bal",
# "66": "no_change", "67": "to_win_dom_leaf_dom", "68": "to_spr_bal_leaf_dom", "69": "to_leaf_dom",
#
# "71": "to_cer_dom", "72": "to_spr_bal_cer_dom", "73": "to_spr_dom_cer_dom", "74": "to_leaf_bal",
# "75": "to_gen_bal",
# "76": "to_spr_dom_leaf_bal", "77": "no_change", "78": "to_spr_bal", "79": "to_spr_dom",
#
# "81": "to_win_dom_cer_dom", "82": "to_cer_dom", "83": "to_spr_dom_cer_dom", "84": "to_win_dom_leaf_bal",
# "85": "to_gen_bal",
# "86": "to_spr_dom_leaf_bal", "87": "to_win_dom", "88": "no_change", "89": "to_spr_dom",
#
# "91": "to_win_dom_cer_dom", "92": "to_spr_bal_cer_dom", "93": "to_cer_dom", "94": "to_win_dom_leaf_bal",
# "95": "to_gen_bal",
# "96": "to_leaf_bal", "97": "to_win_dom", "98": "to_spr_bal", "99": "no_change",
# } | [
"clemens.jaenicke.1@hu-berlin.de"
] | clemens.jaenicke.1@hu-berlin.de |
109a2de5ccb07bcc2e29ac1703a55732ae42bf29 | 61a6ac24167f33133e4d0afe8a62f2144e4d8067 | /backend/api/views.py | 118ce64d7c80f004fa9763e9518620b8b0a92490 | [] | no_license | raccoonyy/select-fix | 2b10c069ba57450555c57af53a79dff4230bee9e | 96e2409f4512b849329edde88825b8e7f4f2455e | refs/heads/master | 2023-01-07T06:00:58.836969 | 2019-12-10T07:45:04 | 2019-12-10T07:45:04 | 101,131,765 | 3 | 2 | null | 2022-12-30T18:24:12 | 2017-08-23T03:06:03 | HTML | UTF-8 | Python | false | false | 333 | py | from rest_framework.decorators import api_view
from rest_framework.response import Response
from .hannanum import hannanum
@api_view(['GET', ])
def analyse(request):
sentence = request.GET.get('sentence')
if sentence is None:
return Response('request failed.', status=400)
return Response(hannanum(sentence))
| [
"raccoonyy@gmail.com"
] | raccoonyy@gmail.com |
890aeede8e1d1e255219d91c99a7e8adecb953b0 | 3dff4f6477398ba4b087c0b0fbac7a08bc7b5317 | /Challenge/lcof/lcof-16.py | 3e543e643219a1017e403eda115abfde51fda5a5 | [] | no_license | frozen007/algo-study | 1e8a45e0b9d3110e6eeceb7ee142acffc7d6faef | f13d820bf46eca423ac4bee6b71ca08c04466a29 | refs/heads/master | 2021-01-19T06:50:35.327045 | 2020-10-22T03:58:53 | 2020-10-22T03:58:53 | 7,502,073 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 683 | py | """
剑指 Offer(第 2 版)
https://leetcode-cn.com/problemset/lcof/
剑指 Offer 16. 数值的整数次方
https://leetcode-cn.com/problems/shu-zhi-de-zheng-shu-ci-fang-lcof/
"""
class Solution(object):
def myPow(self, x, n):
"""
:type x: float
:type n: int
:rtype: float
"""
if n==0:
return 1
t= powx(x, abs(n))
if n<0:
t = 1/t
return t
def powx(x,n):
if n==1:
return x
if n==2:
return x*x
t = 0
if n%2==0:
t = powx(x, n/2)
t = t*t
else:
t = powx(x, (n-1)/2)
t = t*t*x
return t | [
"zhaomingyu@meituan.com"
] | zhaomingyu@meituan.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.