index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
73,496 | CMU-IDS-2021/fp--05839-abby-jeff-kyle-will | refs/heads/main | /support/policy_analysis.py | # policy_analysis.py
# Data pre-processing for policy analysis visualization.
import altair as alt
import pandas as pd
import streamlit as st
def create_stacked_bar():
# read in data as csv
COUNTRY_VOTES = pd.read_csv("data/CountryVotesonAI.csv")
# combine totally agree/tend agree to one agree column for user clarity
COUNTRY_VOTES["Agree"] = (
COUNTRY_VOTES["Tend_Agree"] + COUNTRY_VOTES["Totally_Agree"]
)
# combine totally disagree/tend disagree to one disagree column for user clarity
COUNTRY_VOTES["Disagree"] = (
COUNTRY_VOTES["Tend_Disagree"] + COUNTRY_VOTES["Totally_Disagree"]
)
COUNTRY_VOTES = COUNTRY_VOTES.rename(
columns={
"Totally_Agree": "Totally Agree",
"Tend_Agree": "Tend Agree",
"Tend_Disagree": "Tend Disagree",
"Totally_Disagree": "Totally Disagree",
}
)
country_selector = alt.selection_multi(
fields=["Country"], init=[{"Country": "Hungary"}]
)
# transform fold is conducted so that we can utilize the stacked bar approach
# and analyze the huge discrepanacy between what world powers think
st.title("World Power Belief on AI Safety and Regulation")
all_country_data = (
alt.Chart(COUNTRY_VOTES)
.mark_bar()
.encode(
x=alt.X(
"Agree:Q",
title="Percent Representatives that Want AI Regulation",
scale=alt.Scale(domain=[0, 100]),
),
y=alt.Y("Country:N", title="Country"),
color=alt.condition(
country_selector, alt.value("#714141"), alt.value("#d9d9d9")
),
)
.add_selection(country_selector)
.interactive()
)
by_country2 = (
alt.Chart(COUNTRY_VOTES)
.transform_fold(["Agree", "Disagree"], as_=["Sentiment", "Agree/Disagree"])
.mark_bar()
.encode(
x=alt.X("Country:N", title="Country"),
y=alt.Y(
"Agree/Disagree:Q",
title="Is AI Regulation Needed?",
scale=alt.Scale(domain=[0, 100]),
),
tooltip=[alt.Tooltip("Country:N", title="Country")],
color=alt.Color(
"Sentiment:N",
scale=alt.Scale(
domain=["Agree", "Disagree"], range=["#714141", "#d9d9de"]
),
),
)
.transform_filter(country_selector)
.interactive()
)
joint_chart = all_country_data | by_country2
st.write(joint_chart)
st.write(
"From the chart above we can see that nearly every major world power agrees"
+ " that AI imposes enough risk that it should be regulated. There is not one outlier nation "
+ "that does not think that AI and Robotics are begning to play in increasingly "
+ "dangerous or volatile role."
)
create_stacked_bar()
| {"/streamlit_app.py": ["/support/topic_modeling.py", "/support/sentiment_analysis.py"]} |
73,497 | CMU-IDS-2021/fp--05839-abby-jeff-kyle-will | refs/heads/main | /support/sentiment_analysis.py | # sentiment_analysis.py
# Data pre-processing for sentiment analysis of Nature articles.
import numpy as np
import pandas as pd
import altair as alt
import streamlit as st
def getTopByYear(data):
df = {}
cols = data.head()
for col in cols:
colName = str(col)
df[colName] = data[col][0]
return pd.DataFrame(df)
def getSentiment(data):
newData = data.T
cols = data.columns
newData["Year"] = np.asarray(cols)
newData.columns = ["Average Sentiment", " Positive Articles", "Year"]
newData.set_index("Year")
return newData.melt("Year", var_name="Legend", value_name="Percent")
def public():
# Load data
top10 = pd.read_json("data/top10_nature.json")
sentiment = pd.read_json("data/natureArticlesSentiment.json")
# Build Data
topByYear = getTopByYear(top10)
topByYear.index += 1
sentByYear = getSentiment(sentiment)
# Build slider
top10s = {
"2000": "2000",
"2001": "2001",
"2002": "2002",
"2003": "2003",
"2004": "2004",
"2005": "2005",
"2006": "2006",
"2007": "2007",
"2008": "2008",
"2009": "2009",
"2010": "2010",
"2011": "2011",
"2012": "2012",
"2013": "2013",
"2014": "2014",
"2015": "2015",
"2016": "2016",
"2017": "2017",
"2018": "2018",
"2019": "2019",
"2020": "2020",
}
tops = st.select_slider(
"Select the year for the 'Top 10 Words' found in news articles",
list(top10s.keys()),
)
sentChart = (
alt.Chart(sentByYear)
.mark_line()
.encode(
x=alt.X("Year:N"),
y=alt.Y("Percent:Q", axis=alt.Axis(format="%")),
color=alt.Color(
"Legend:N",
legend=alt.Legend(orient="bottom"),
scale=alt.Scale(scheme="redyellowblue"),
),
)
.properties(title="What is the perception of AI in the media?")
)
line = alt.Chart(pd.DataFrame({" ": [tops]})).mark_rule(color="red").encode(x=" ")
finalChart = sentChart + line
col1, col2 = st.beta_columns([6, 2])
with col1:
st.altair_chart(finalChart, use_container_width=True)
with col2:
st.dataframe(topByYear[tops])
movies(tops)
def movies(year):
blurbs = {
"2003": "The human city of Zion defends itself against the massive invasion of the machines as Neo fights to end the war at another front while also opposing the rogue Agent Smith.",
"2004": "In 2035, a technophobic cop investigates a crime that may have been perpetrated by a robot, which leads to a larger threat to humanity.",
"2014": "In 2028 Detroit, when Alex Murphy, a loving husband, father and good cop, is critically injured in the line of duty, the multinational conglomerate OmniCorp sees their chance for a part-man, part-robot police officer.",
"2015": "In the near future, crime is patrolled by a mechanized police force. When one police droid, Chappie, is stolen and given new programming, he becomes the first robot with the ability to think and feel for himself.",
"2016": "The rise of technology means that humans will have to be very clever not to be left behind as robots take over. But as human labor loses its value and challenges our purpose, people may find that they aren't wanted by the future at all.",
"2017": "Young Blade Runner K's discovery of a long-buried secret leads him to track down former Blade Runner Rick Deckard, who's been missing for thirty years.",
"2020": "Artificial Intelligence has permeated every aspect of planetary life in 2030. Tokyoites experience AI in every aspect of their lives whether medical, finance, transportation or their personal and day-to-day interactions. Many would tell you that AI is indispensable. Yet, as is often the case with technology jumping the gun on ethics and rules AI spins out of control and causes calamity after calamity. The city and the country are in chaos.",
}
years = [
"2003",
"2004",
"2014",
"2015",
"2016",
"2017",
]
yearIndex = {
"2003": "data/2003 - Matrix Reloaded.jpg",
"2004": "data/2004 - i Robot.jpg",
"2014": "data/2014 - RoboCop.jpg",
"2015": "data/2015 - chappie.jpg",
"2016": "data/2016 - Obsolete.jpg",
"2017": "data/2017 -Blade_Runner_2049.png",
"2020": "data/2020 - AI Amok.jpg",
}
narrative = {
"2003": "In 2003, the movie The Matrix Reloaded debuted. This is a story of a dystopian future where machines have taken over and supplanted humanity. As you can see in the line chart, public perception shifted from positve to neutral and media articles were only 50% positive. Did this movie affect the perception of AI?",
"2004": "In 2004, the blockbuster movie i, Robot premiered. This story, while not wholely positive for AI, did have a robot protagonist that helped save the day with Will Smith. Did the perception of AI change again?",
"2014": "2014 was the year the robots turned on the populace. In the movie, RoboCop, robots patrol the streets and have run amok, it is a cyborg that helped keep the streets safe. Are movies and popular media influencing the public about AI?",
"2015": "Another shift in perception as the feelgood movie, Chappie, came out in theaters. If a robot can feel, do we feel safer around them?",
"2016": "An indie film about how technology is supplanting humans. Did the small scale of this movie mean that the perception only mildly drop?",
"2017": "Cult classic, BladeRunner, came out in the early 80's and was a movie about a dystopian future. This sequel came out in 2017 and increased public perception. Did the love of a continuation of a cult classic offset fear toward AI?",
"2020": "",
}
col1, col2 = st.beta_columns([3, 5])
if year in years:
with col1:
st.image(yearIndex[year], width=300)
with col2:
st.subheader("Movie Description")
st.write(blurbs[year])
st.write()
st.subheader("Was there an impact on perception?")
st.write(narrative[year])
| {"/streamlit_app.py": ["/support/topic_modeling.py", "/support/sentiment_analysis.py"]} |
73,499 | bohorqux/My_Messenger_Analysis | refs/heads/master | /unit_test.py | #!/usr/bin/python3
from threadfunctions import *
from construct_data import *
from stringformatting import *
def main():
threads = initThreads("messages.htm")
chat = threads[313]
original_timestamps = [t.string for t in chat.find_all("span", class_ = "meta")]
original_timestamps.reverse()
original_timestamps = original_timestamps[30:55]
print("Testing lead_zeros() function...")
test1 = ""
test2 = "1"
test3 = "10"
test4 = "100"
test5 = "1000"
zeros = 4
print("\n************")
print("Test Case: 1")
print("String = %s\tNum Zeros = %d" % (test1,zeros))
print("\nexpected: %s\nresult: %s\n" % ("0000", lead_zeros(test1, zeros)))
print("***********")
print("\n************")
print("Test Case: 2")
print("String = %s\tNum Zeros = %d" % (test2,zeros))
print("\nexpected: %s\nresult: %s\n" % ("0001", lead_zeros(test2, zeros)))
print("***********")
print("\n************")
print("Test Case: 3")
print("String = %s\tNum Zeros = %d" % (test3,zeros))
print("\nexpected: %s\nresult: %s\n" % ("0010", lead_zeros(test3, zeros)))
print("***********")
print("\n************")
print("Test Case: 4")
print("String = %s\tNum Zeros = %d" % (test4,zeros))
print("\nexpected: %s\nresult: %s\n" % ("0100", lead_zeros(test4, zeros)))
print("***********")
print("\n************")
print("Test Case: 5")
print("String = %s\tNum Zeros = %d" % (test5,zeros))
print("\nexpected: %s\nresult: %s\n" % ("1000", lead_zeros(test5, zeros)))
print("***********")
print("\nTesting miliTime() function...")
print("***************")
print("Test Case: 6")
[print(timestamp) for timestamp in original_timestamps]
print()
[print(string_to_date(timestamp)) for timestamp in original_timestamps]
print()
[print(format_date(string_to_date(timestamp))) for timestamp in original_timestamps]
main()
| {"/unit_test.py": ["/threadfunctions.py", "/construct_data.py", "/stringformatting.py"], "/threadfunctions.py": ["/stringformatting.py"], "/construct_data.py": ["/threadfunctions.py"]} |
73,500 | bohorqux/My_Messenger_Analysis | refs/heads/master | /threadfunctions.py | #!/usr/bin/python3
from stringformatting import *
from bs4 import BeautifulSoup
def initThreads(messenger_html_doc):
""" devises soup object out of specified messenger file """
print("Opening file: %s..." % messenger_html_doc)
fileIn = open(messenger_html_doc, "r")
print("File opened!")
feed = fileIn.read()
fileIn.close()
print("File closed!")
print("Converting %s to soup object..." % messenger_html_doc)
soup = BeautifulSoup(feed, 'lxml')
threads = soup.find_all("div", class_ = "thread")
print("Threads parsed successfully!\n************************\n")
return threads
###################################### DATA RETRIEVAL FUNCTIONS #################################################
def getUniqueUsers(chat):
""" return distinct list of users that exist in a single thread/chat """
users = [user.string for user in chat.find_all("span", class_ = "user")]
return list(set(users))
def getPostingFreq(chat, user):
""" returns number of times user has posted in a chat """
users = [u.string for u in chat.find_all("span", class_ = "user")]
return len([u for u in users if u == user])
def getTotalPosts(chat):
""" returns number of posts in chat """
return len([p.string for p in chat.find_all("p")])
def getStringMatches(chat, string):
""" returns number of times string is found in a chat """
posts = [p.string for p in chat.find_all("p")]
counter = 0
for p in posts:
if p == None:
continue
elif string in p:
counter += 1
return counter
def getStringMatchesByUser(chat, string, user):
""" returns number of times string is said by user in chat """
users = [u.string for u in chat.find_all("span", class_ = "user")]
posts = [p.string for p in chat.find_all("p")]
for i in range(len(posts)):
if posts[i] == None:
continue
elif string in posts[i] and users[i] == user:
counter += 1
return counter
def getStringMatchPercentage(chat, string):
""" returns percentage of chat that consists of string """
return getStringMatches(chat, string)/getTotalPosts(chat)
def getUserPercentage(chat, user):
""" returns percentage of chat that consists of user """
return getPostingFreq(chat, user)/getTotalPosts(chat)
def getAllUserPercentage(chat):
""" returns dictionary that corresponds user with posting percentage """
percentages = dict()
users = getUniqueUsers(chat)
for u in users:
percentages[u] = getUserPercentage(chat, u)
return percentages
def getUserStringPercentage(chat, string, user):
""" returns percentage of string outputted by a user in a chat """
return getStringMatchesByUser(chat, string, user)/getPostringFreq(chat, user)
def getTotalWords(chat):
""" returns total amount of words in chat """
posts = [p.string for p in chat.find_all("p")]
words = [len(pstring.split(" ")) for pstring in posts if pstring != None]
return sum(words)
def getAverageMessageLength(chat):
""" returns average number of words in a chat """
posts = [p.string for p in chat.find_all("p")]
avg = len(posts)/getTotalWords(chat)
avg_n = avg * 100
return avg_n
##################################### DATA RETRIEVAL FUNCTIONS ####################################################
##################################### MIDDLEWARE FUNCTIONS ####################################################
def getPosts(chat):
""" returns list of all posts in a chat """
return [p.string for p in chat.find_all("p") if p.string != None]
def mostCommonPosts(chat):
""" returns tuple list of most common posts/messages that exists in a chat """
posts = [p.string for p in chat.find_all("p") if p.string != None]
return createFrequencyList(posts)
def createFrequencyList(aList):
""" helper function that returns a tuple list (element, frequency) in aList """
frequencies = dict() #initialize dictionary to capture frequency of posts
#count up all occurences of each element
for element in aList:
if element not in frequencies:
frequencies[element] = 1
else:
frequencies[element] += 1
#collect frequency values, terminate program if the chat contains truly distinct posts
scores = [frequencies[key] for key in frequencies if frequencies[key] != 1]
if len(scores) == 0:
print("No distinct frequencies found...terminating")
return 0
#create tuple list of dictionary keys and values
elementScores = [(key, frequencies[key]) for key in frequencies if frequencies[key] in scores]
sortedScores = sorted(elementScores, key= lambda t:t[1])
return sortedScores
def collectWords(chat):
""" returns a list of all words in a chat """
posts = [p.string for p in chat.find_all('p') if p.string != None]
wordies = list()
#probably a really inefficient way to get all the words
for sentence in posts:
for words in sentence.split(" "):
wordies.append(words)
return wordies
def mostCommonWords(chat):
""" returns tuple list of most common words that exist in a chat """
words = collectWords(chat)
return createFrequencyList(words)
def debugTimeStamp(chat, user, istart, iend):
users = [u.string for u in chat.find_all("span", class_ = "user")]
timestamps = [t.string for t in chat.find_all("span", class_ = "meta")]
users.reverse()
timestamps.reverse()
toDate = [string_to_date(t) for t in timestamps]
formatted = [format_date(t) for t in toDate]
for i in (istart, iend, 1):
print("original timestamp:\t%s" % timestamps[i])
print("string_to_date timestamp:\t%s" % toDate[i])
print("formatted timestamp:\t%s" % formatted[i])
print()
def getUserTimestamps(chat, user):
""" returns tuple list of specified user with their timestamps """
users = [u.string for u in chat.find_all("span", class_ = "user")]
timestamps = [format_date(string_to_date(t.string)) for t in chat.find_all("span", class_ = "meta")]
users.reverse()
timestamps.reverse()
cross = [(users[i], timestamps[i]) for i in range(len(timestamps)) if users[i] == user]
return cross
################################# MIDDLEWARE FUNCTIONS ####################################################
################################# THREAD/CHAT VISUAL FUNCTIONS ####################################################
def displayStringMatches(chat, string):
""" prints frequency of string found in chat """
matches = getStringMatches(chat, string)
print("\n####################\nFound %d instances of phrase - %s - in this thread...\n####################" % (matches, string))
def displayPostingFreq(chat):
""" prints frequency of all user posts in chat """
unique_users = getUniqueUsers(chat)
for u in unique_users:
frequency = getPostingFreq(chat, u)
print("%s: %d posts" % (u, frequency))
print("\nTotal post amount: %d\n" % getTotalPosts(chat))
return 0
def displayUsers(chat):
""" prints users involved in a chat """
print("\n--------------- USERS ---------------")
unique_users = getUniqueUsers(chat)
for u in range(len(unique_users)):
if "facebook.com" in unique_users[u]:
print(unique_users[u], " ...Possibly: %s" % unique_users[u-1])
else:
print(unique_users[u])
print("----------------- USERS ---------------\n")
return 0
def displayAllUsers(threads):
""" displays all users involved in each thread """
for i in range(len(threads)):
print("thread[%d]:----" % i)
displayUsers(threads[i])
print("\n")
return 0
def displaySpecified(threads, user):
""" prints the users in a chat if a specified user is found """
acc = 0
for i in range(len(threads)):
if user in getUniqueUsers(threads[i]):
print("thread[%d]:---" % i)
displayUsers(threads[i])
acc += 1
print("Involved in %d message(s)...\n" % acc)
return 0
def displayChat(chat):
""" prints the conversation stored in chat """
users = [u.string for u in chat.find_all("span", class_ = "user")]
users.reverse()
posts = [p.string for p in chat.find_all("p")]
posts.reverse()
timestamps = [format_date(string_to_date(t.string)) for t in chat.find_all("span", class_ = "meta")]
timestamps.reverse()
for i in range(len(users)):
print("%s: %s\n\t%s\n" % (users[i], timestamps[i], posts[i]))
return 0
def displayChatData(chat):
""" prints a bunch of data about a chat """
print("--------------- U S E R S ---------------")
displayUsers(chat)
print("--------------- U S E R S ---------------")
print("\n*************** D A T A ***************")
displayPostingFreq(chat)
print("Total posts in chat: %d" % getTotalPosts(chat))
print("Total words in chat: %d" % getTotalWords(chat))
print("AverageMessageLength: %.3f" % getAverageMessageLength(chat))
userfreq = getAllUserPercentage(chat)
print("\n%%%%%%%%%%%%%%% F R E Q %%%%%%%%%%%%%%%")
for u in userfreq:
print("%s percentage:\t%.2f" % (u, userfreq[u]*100))
print("%%%%%%%%%%%%%%% F R E Q %%%%%%%%%%%%%%%")
print("\n*************** D A T A ***************")
def displayFrequencyList(aList, titlestring, columnstring):
""" prints out the frequency """
print("^^^^^^^^^^^^^^^^^^^^ %s ^^^^^^^^^^^^^^^^^^^^^^^^^" % titlestring)
print("\n%s:\t\tScore:\n--------------------\n" % columnstring)
for (x,y) in aList:
print("%s\t\t%d" % (x, y))
########################## THREAD/CHAT VISUAL FUNCTIONS ####################################################
###################################### MAIN ################################################################
def main():
threads = initThreads("messages.htm")
displayChatData(threads[313])
return 0
###################################### MAIN #################################################################
#main()
| {"/unit_test.py": ["/threadfunctions.py", "/construct_data.py", "/stringformatting.py"], "/threadfunctions.py": ["/stringformatting.py"], "/construct_data.py": ["/threadfunctions.py"]} |
73,501 | bohorqux/My_Messenger_Analysis | refs/heads/master | /stringformatting.py | #!/usr/bin/python3
import re
def lead_zeros(string, num_zeros):
""" pads zero(s) to front of specified string """
return "0" * (num_zeros - len(string)) + string
def miliTime(time):
""" converts meridian time string into military time string """
time = re.sub(":", "", time)
isPM = False
if "pm" in time:
isPM = True
time = int(time[:-2]) #removing am/pm discretion to perform essential mathematical operations
if 1200 <= time <= 1259 and not isPM: #convert time between 12:00am-12:59am into military time
time -= 1200
if isPM and 100 <= time <= 1159:
time += 1200
return str(time)
def string_to_date(timestamp):
""" converts messenger timestamp string into its integer representation
Keyword Arguments:
string -- input must be as follows: {DayofWeek, MonthandDay, Year at Time EDT}
"""
#initialize dictionary to correspond Month with integer
months = dict()
m = ["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"]
for i in range(len(m)):
months[m[i]] = i+1
#format Messenger timestamp to remove irrelevant information
stored = timestamp.split(" ")[1:]
stored[0] = str(months[stored[0]])
stored[1] = re.sub(",", "", stored[1])
processed = [stored[2], lead_zeros(stored[0], 2), lead_zeros(stored[1], 2), lead_zeros(miliTime(stored[4]), 4)]
#devise integer out of resulting timestamp string
timestamp_int = int("".join(processed))
return timestamp_int
def format_date(timestamp):
timestamp = str(timestamp)
timearray = [timestamp[4:6], timestamp[6:8], timestamp[:4], timestamp[8:]]
timestring = ":".join(timearray)
return timestring
| {"/unit_test.py": ["/threadfunctions.py", "/construct_data.py", "/stringformatting.py"], "/threadfunctions.py": ["/stringformatting.py"], "/construct_data.py": ["/threadfunctions.py"]} |
73,502 | bohorqux/My_Messenger_Analysis | refs/heads/master | /construct_data.py | #!/usr/bin/python3
from threadfunctions import *
# m m : d d : y y y y : t t t t
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14
def incrementMinute(timestamp):
assert(type(timestamp)) == str
new_time = timestamp[:-4]
seconds = int(timestamp[-2:]) + 1
return new_time + timestamp[-4:-2] + lead_zeros(str(seconds), 2)
def incrementHour(timestamp):
assert(type(timestamp)) == str
new_time = timestamp[:-4]
hour = int(timestamp[-4:-2]) + 1
return new_time + lead_zeros(str(hour), 2) + timestamp[-2:]
def incrementDay(timestamp):
new_time1 = timestamp[:3]
new_time2 = timestamp[5:]
day = int(timestamp[3:5]) + 1
return new_time1 + lead_zeros(str(day), 2) + new_time2
def incrementMonth(timestamp):
new_time = timestamp[2:]
month = int(timestamp[:2]) + 1
return lead_zeros(str(month), 2) + new_time
def incrementYear(timestamp):
new_time1 = timestamp[:6]
new_time2 = timestamp[10:]
year = int(timestamp[6:10]) + 1
return new_time1 + lead_zeros(str(year), 4) + new_time2
def incrementTime(timestamp):
timestamp = incrementMinute(timestamp)
if int(timestamp[13:]) == 60:
timestamp = incrementHour(timestamp)
timestamp = timestamp[:13] + "00"
elif int(timestamp[11:13]) == 25:
timestamp = incrementDay(timestamp)
timestamp = timestamp[:11] + "0000"
elif int(timestamp[3:5]) == 32:
timestamp = incrementMonth(timestamp)
timestamp = timestamp[:3] + "01" + timestamp[5:]
elif int(timestamp[:2]) == 13:
timestamp = incrementYear(timestamp)
timestamp = "01" + timestamp[2:]
return timestamp
def timeStampCounter(start, end, outFile, timearray, val):
counter = 0
timeindice = 0
while start != incrementTime(end) and timeindice < len(timearray):
if start == timearray[timeindice]:
t = "".join(start.split(":"))
timeindice += 1
outFile.write("%d\t%d\n" % (counter, val))
else:
start = incrementTime(start)
outFile.write("%d\n" % counter)
counter += 1
print("time indice = %d\tarray length = %d\n" % (timeindice, len(timearray)))
def createPlot(filename, chat, user, val):
timearray = [timestamp[1] for timestamp in getUserTimestamps(chat, user)]
plot = open(filename, "w")
timeStampCounter(timearray[0], timearray[-1], plot, timearray, val)
plot.close()
def main():
threads = initThreads("messages.htm")
chat = threads[313]
createPlot("Xlog.dat", chat, "Xavier Bohorquez", 1)
createPlot("Dlog.dat", chat, "Dan Palacio", 2)
main()
| {"/unit_test.py": ["/threadfunctions.py", "/construct_data.py", "/stringformatting.py"], "/threadfunctions.py": ["/stringformatting.py"], "/construct_data.py": ["/threadfunctions.py"]} |
73,503 | xueyeelong/Amonitor | refs/heads/master | /dataCalculate/flow/flow.py | # -*- coding: utf-8 -*-
#
import MySQLdb
import pandas as pd
import numpy
import json
import sys
import os
import datetime
#添加路径
sys.path.insert(0,os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
from libfile import pysql
from libfile import logger
import config
#日志
log = logger.Logger('flow',os.path.abspath(os.path.dirname(os.path.dirname(__file__))) + "/" + config.log_path)
#数据包含时间
def timeScale(startTime = "2017-03-01"):
nowTime = datetime.date.today()
i = 0
timeList = []
while True:
endTime = str(nowTime-datetime.timedelta(days=i))
timeList.insert(0,endTime)
if endTime == startTime:
break
i = i + 1
return timeList
#贷款金额情况
def loan():
timeList = timeScale()
sql = 'select distinct createDate from dayAddApi_flowloanmoney'
tmRest = pysql.dbInfoLocal(sql)
tmRest = tmRest.fillna(0)
tmwait = []
if not tmRest.empty:
tmwait = [str(x)[:10] for x in tmRest['createDate']]
for i in range(len(timeList)-1):
stTime = timeList[i]
edTime = timeList[i+1]
if stTime in tmwait:
continue
#借贷金额
print '借贷金额数据更新:' + stTime + '~' + edTime
sql = """
select productId,repayMoney
from loan_repaying
where compatibleStatus <> 'CANCEL' and productId != 1001
and createdTime >= '{}' and createdTime < '{}';
""".format(stTime,edTime)
data = pysql.dbInfo(sql)
data = data.fillna(0)
product = config.product
for key in product.keys():
tp = data[data['productId']==int(key)]
if not tp.empty:
money = int(sum(tp['repayMoney']))
else:
money = 0
sql = """ insert into dayAddApi_flowloanmoney(product,money,createDate) values (%s,%s,%s) """
dset = [(product[key],money,stTime)]
status = pysql.insertData(sql,dset)
log.log('每日借贷金额更新状态-{}! ({})'.format(status,stTime),'info')
allLoan = int(sum(data['repayMoney']))
sql = """ insert into dayAddApi_flowloanmoney(product,money,createDate) values (%s,%s,%s) """
dset = [('All',allLoan,stTime)]
status = pysql.insertData(sql,dset)
log.log('每日借贷金额更新状态-{}! ({})'.format(status,stTime),'info')
#总金额
sql = """
select productId,sum(repayMoney) 'repayMoney'
from loan_repaying
where compatibleStatus <> 'CANCEL' and productId != 1001
group by productId
"""
data = pysql.dbInfo(sql)
data = data.fillna(0)
product = config.product
for key in product.keys():
proname = product[key]
tp = data[data['productId']==int(key)]
if not tp.empty:
money = int(sum(tp['repayMoney']))
else:
money = 0
sql = """ insert into dayAddApi_flowloanmoneysum(product,money,createDate) values (%s,%s,%s) """
dset = [(proname,money,str(datetime.datetime.today())[:10])]
status = pysql.insertData(sql,dset)
log.log('借贷总金额更新状态-{}!({})!'.format(status,stTime),'info')
#还款金额情况
def paid():
timeList = timeScale()
sql = 'select distinct createDate from dayAddApi_flowpaidmoney'
tmRest = pysql.dbInfoLocal(sql)
tmRest = tmRest.fillna(0)
tmwait = []
if not tmRest.empty:
tmwait = [str(x)[:10] for x in tmRest['createDate']]
for i in range(len(timeList)-1):
stTime = timeList[i]
edTime = timeList[i+1]
if stTime in tmwait:
continue
#还款金额
print '还款金额数据更新:' + stTime + '~' + edTime
sql = """
select sum(repayMoney)
from loan_repaying
where compatibleStatus <> 'CANCEL' and productId != 1001
and repaidTime >= '{}' and repaidTime < '{}';
""".format(stTime,edTime)
data = pysql.dbInfo(sql)
data = data.fillna(0)
money = data.values[0][0]
sql = """ insert into dayAddApi_flowpaidmoney(paidMoney, createDate) values (%s, %s) """
dset = [(money,stTime)]
status = pysql.insertData(sql,dset)
log.log('每日还款金额更新状态-{}! ({})'.format(status,stTime),'info')
def loanNO():
timeList = timeScale()
sql = 'select distinct createDate from dayAddApi_flowloanmoneyno'
tmRest = pysql.dbInfoLocal(sql)
tmRest = tmRest.fillna(0)
tmwait = []
if not tmRest.empty:
tmwait = [str(x)[:10] for x in tmRest['createDate']]
for i in range(len(timeList)-1):
stTime = timeList[i]
edTime = timeList[i+1]
if stTime in tmwait:
continue
#借贷金额
print '借贷金额(新老)数据更新:' + stTime + '~' + edTime
#old
sql = """
select sum(repayMoney)
from loan_repaying
where compatibleStatus <> 'CANCEL' and productId != 1001
and createdTime >= '{}' and createdTime < '{}'
and userSid in (select distinct userSid from loan_repaying where createdTime < '{}');
""".format(stTime,edTime,stTime)
data = pysql.dbInfo(sql)
data = data.fillna(0)
loanOld = data.values[0][0]
#new
sql = """
select sum(repayMoney)
from loan_repaying
where compatibleStatus <> 'CANCEL' and productId != 1001
and createdTime >= '{}' and createdTime < '{}'
and userSid not in (select distinct userSid from loan_repaying where createdTime < '{}');
""".format(stTime,edTime,stTime)
data = pysql.dbInfo(sql)
data = data.fillna(0)
loanNew = data.values[0][0]
#插入数据
sql = """ insert into dayAddApi_flowloanmoneyno(loanOld,loanNew,createDate) values (%s,%s,%s) """
dset = [(loanOld,loanNew,stTime)]
status = pysql.insertData(sql,dset)
log.log('借贷金额(新老)更新状态-{}!({})!'.format(status,stTime),'info')
def actRepayment():
timeList = timeScale()
sql = 'select distinct createDate from dayAddApi_indexacrepay'
tmRest = pysql.dbInfoLocal(sql)
tmRest = tmRest.fillna(0)
tmwait = []
if not tmRest.empty:
tmwait = [str(x)[:10] for x in tmRest['createDate']]
for i in range(len(timeList)-1):
stTime = timeList[i]
edTime = timeList[i+1]
if stTime in tmwait:
continue
print '应还实还数据更新:' + stTime
sql = """
select sum(repayMoney) from loan_repaying
where termDate='{}' and compatibleStatus not in ('CANCEL')
""".format(stTime)
data = pysql.dbInfo(sql)
data = data.fillna(0)
allRepayMoney = int(data.values[0][0])
sql = """
select sum(repayMoney) from loan_repaying
where termDate='{}' and compatibleStatus not in ('CANCEL')
and repaidTime is not null and DATE_FORMAT(termDate,'%Y-%m-%d') >= DATE_FORMAT(repaidTime,'%Y-%m-%d')
""".format(stTime)
data = pysql.dbInfo(sql)
data = data.fillna(0)
acRepayMoney = int(data.values[0][0])
repayRate = int(acRepayMoney/float(allRepayMoney)*100)
sql = """ insert into dayAddApi_indexacrepay(allRepayMoney,acRepayMoney,repayRate,createDate) values (%s,%s,%s,%s) """
dset = [(allRepayMoney,acRepayMoney,repayRate,stTime)]
status = pysql.insertData(sql,dset)
log.log('每日应还实还更新状态-{}!({})!'.format(status,stTime),'info')
def c2c():
timeList = timeScale(startTime='2017-12-10')
sql = 'select distinct createDate from dayAddApi_flowc2cfund'
tmRest = pysql.dbInfoLocal(sql)
tmRest = tmRest.fillna(0)
tmwait = []
if not tmRest.empty:
tmwait = set([str(x)[:10] for x in tmRest['createDate']])
for i in range(len(timeList)-1):
stTime = timeList[i]
edTime = timeList[i+1]
if stTime in tmwait:
continue
print 'c2c数据更新:' + stTime
c2c_member = config.c2c_member
for name in c2c_member:
ids = c2c_member[name][0]
sql = """
select count(*) 'num', sum(repayMoney) 'summoney' from loan
where status = 6 and productId = 7
and lastUpdated >= '{}' and lastUpdated < '{}'
and loanerId in ({})
""".format(stTime,edTime,ids)
data = pysql.dbInfo(sql)
data = data.fillna(0)
loancount = data['num'].values[0]
loanmoney = data['summoney'].values[0]
sql = """
select count(*) 'num', sum(ll.repayMoney) 'summoney' from loan l,loan_repaying ll
where l.id = ll.loanId
and l.status = 6 and l.productId = 7
and ll.termDate = '{}'
and l.loanerId in ({})
""".format(stTime,ids)
data = pysql.dbInfo(sql)
data = data.fillna(0)
loanCountTerm = data['summoney'].values[0]
sql = """
select count(*) 'num', sum(lt.repayMoney) 'summoney' from (
select ll.loanId, ll.repayMoney, ll.repaidTime, ll.termDate from loan l,loan_repaying ll
where l.id = ll.loanId
and l.status = 6 and l.productId = 7
and ll.termDate = '{}'
and l.loanerId in ({})) lt
where DATE_FORMAT(lt.repaidTime,'%Y-%m-%d') > DATE_FORMAT(lt.termDate,'%Y-%m-%d') or lt.repaidTime is null
""".format(stTime,ids)
data = pysql.dbInfo(sql)
data = data.fillna(0)
loanCountTermNo = data['summoney'].values[0]
delayRate0 = 0 if loanCountTermNo == 0 else round(loanCountTermNo/float(loanCountTerm)*100,2)
sql = """
select count(*) 'num', sum(l.repayMoney) 'summoney' from loan l,loan_repaying ll
where l.id = ll.loanId
and l.status = 6 and l.productId = 7 and ll.termDate < '{}'
and l.loanerId in ({})
""".format(edTime, ids)
data = pysql.dbInfo(sql)
data = data.fillna(0)
allCountTerm = data['summoney'].values[0]
sql = """
select count(*) 'num', sum(ll.repayMoney) 'summoney' from loan l,loan_repaying ll
where l.id = ll.loanId
and l.status = 6 and l.productId = 7
and l.loanerId in ({})
and if(ll.repaidTime is null, DATEDIFF(DATE_FORMAT(now(), "%Y-%m-%d"), DATE_FORMAT(ll.termDate, "%Y-%m-%d")), DATEDIFF(DATE_FORMAT(ll.repaidTime, "%Y-%m-%d"), DATE_FORMAT(ll.termDate, "%Y-%m-%d"))) >= 7
""".format(ids)
data = pysql.dbInfo(sql)
data = data.fillna(0)
countTerm7 = data['summoney'].values[0]
delayRate7 = 0 if countTerm7==0 else round(countTerm7/float(allCountTerm)*100,2)
sql = """ insert into dayAddApi_flowc2cfund(member, loanCount, loanMoney, loanCountTerm, loanCountTermNo, delayRate0, allCountTerm, delayRate7, countTerm7, createDate) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s) """
dset = [(name, loancount, loanmoney, loanCountTerm, loanCountTermNo, delayRate0, allCountTerm, delayRate7, countTerm7,stTime)]
status = pysql.insertData(sql,dset)
log.log('c2c更新状态-{}!({})!'.format(status,stTime),'info')
def main():
loan()
loanNO()
actRepayment()
paid()
c2c()
if __name__ == '__main__':
main()
| {"/RiskMonitor/apps/login/admin.py": ["/RiskMonitor/apps/login/models.py"]} |
73,504 | xueyeelong/Amonitor | refs/heads/master | /RiskMonitor/api/dayAddApi/migrations/0011_flowc2cfund.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-01-09 02:07
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('dayAddApi', '0010_flowpaidmoney'),
]
operations = [
migrations.CreateModel(
name='FlowC2CFund',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('member', models.CharField(default=b'credan', max_length=128)),
('loanCount', models.IntegerField(default=0)),
('loanMoney', models.FloatField(default=0.0)),
('loanCountTerm', models.IntegerField(default=0)),
('loanCountTermNo', models.IntegerField(default=0)),
('delayRate0', models.FloatField(default=0.0)),
('allCountTerm', models.IntegerField(default=0)),
('delayRate7', models.FloatField(default=0.0)),
('CountTerm7', models.IntegerField(default=0)),
('createDate', models.DateField(default=django.utils.timezone.now)),
],
options={
'ordering': ('createDate',),
},
),
]
| {"/RiskMonitor/apps/login/admin.py": ["/RiskMonitor/apps/login/models.py"]} |
73,505 | xueyeelong/Amonitor | refs/heads/master | /RiskMonitor/RiskMonitor/urls.py | """RiskMonitor URL Configuration"""
from django.conf.urls import url,include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('apps.login.urls')),
url(r'^', include('apps.index.urls')),
url(r'^', include('apps.userInfo.urls')),
url(r'^', include('apps.flow.urls')),
url(r'^', include('apps.finance.urls')),
url(r'^', include('apps.aeye.urls')),
url(r'^', include('apps.market.urls')),
url(r'^', include('apps.collect.urls')),
url(r'^', include('api.dayAddApi.urls')),
url(r'^', include('api.actualApi.urls')),
]
| {"/RiskMonitor/apps/login/admin.py": ["/RiskMonitor/apps/login/models.py"]} |
73,506 | xueyeelong/Amonitor | refs/heads/master | /RiskMonitor/apps/userInfo/views.py | # -*- coding: utf-8 -*-
from django.shortcuts import render,render_to_response
from django.http import HttpResponse,HttpResponseRedirect
from django.template import RequestContext
from django.contrib.auth.decorators import login_required
from django.contrib.auth.decorators import permission_required
@permission_required('part_admin.userPage')
def userIncrease_view(request):
return render(request,'userInfo/userIncrease.html')
@permission_required('part_admin.userPage')
def userInfoSex_view(request):
return render(request,'userInfo/userSex.html')
@permission_required('part_admin.userPage')
def userInfoAge_view(request):
return render(request,'userInfo/userAge.html')
@permission_required('part_admin.userPage')
def userInfoLocation_view(request):
return render(request,'userInfo/userLocation.html')
@permission_required('part_admin.userPage')
def userRest_view(request):
return render(request,'userInfo/userRest.html')
@permission_required('part_admin.userPage')
def userRestAll_view(request):
return render(request,'userInfo/userRestAll.html')
| {"/RiskMonitor/apps/login/admin.py": ["/RiskMonitor/apps/login/models.py"]} |
73,507 | xueyeelong/Amonitor | refs/heads/master | /RiskMonitor/api/actualApi/actualCal/finance/finance.py | # -*- coding: utf-8 -*-
#
import MySQLdb
import pandas as pd
import numpy
import json
import sys
import os
import datetime
import time
#添加路径
sys.path.insert(0,os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
from libfile import pysql
from libfile import logger
import config
#日志
log = logger.Logger('finance',os.path.abspath(os.path.dirname(__file__)) + "/" + config.log_path)
#数字生成千分位
def intothousand(num):
strn = []
n = 5
while n > 0:
n = n - 1
if num < 1000**n:
continue
st = int(num/1000**n)
num = num - st*1000**n
if len(strn) == 0:
strn.append(str(st))
else:
strn.append('0'*(3-len(str(st))) + str(st))
if num < 1000:
strn.append('0'*(3-len(str(num))) + str(num))
break
ss = ','.join(strn)
return ss
def todayLoan():
fundloan = {
u'快乐达连连账户':['10001'],
u'纵横新创30':['10002'],
u'纵横新创14':['10004'],
u'口袋理财v2':['10003'],
u'柚子理财':['10005'],
u'魔贷金融1':['10006'],
u'魔贷金融2':['10010'],
u'小袋理财':['10007'],
u'爱多贷':['10008'],
# u'暴雷科技':['10009'],
u'拿点花':['10011'],
u'付呗零用钱':['10012'],
u'星火钱包1':['20001'],
u'星火钱包2':['20002'],
u'魔贷资金资产':['10017'],
u'钱好借':['10016'],
u'速融超':['10015'],
# u'小桥钱包':['10014'],
u'马上有钱':['10018'],
u'有钱来了':['10013'],
u'借袋钱':['10019'],
u'点点花':['10020'],
}
fundloanToday = {'fundId':{}}
for key in fundloan.keys():
fundId = "','".join(fundloan[key])
sql = """
select sum(payMoney) from loan where status=6 and fundPayAccountId='{}' and createdTime >= DATE_FORMAT(NOW(),'%Y-%m-%d')
""".format(fundId)
data = getdata(sql)
data = data.fillna(0)
fundloanToday['fundId'][key] = int(data.values[0][0])
fundloanToday['paidAll'] = intothousand(numpy.sum(fundloanToday['fundId'].values()))
return fundloanToday
def todayLoanDetail():
loanDetail = {}
hours = []
money = []
cumMoney = []
sql = """
select DATE_FORMAT(createdTime,'%H') 'hour',sum(payMoney) 'loanMoney' from loan
where status=6 and createdTime >= DATE_FORMAT(NOW(),'%Y-%m-%d')
group by DATE_FORMAT(createdTime,'%H')
"""
data = getdata(sql)
data = data.fillna(0)
for i in range(len(data)):
hours.append(data['hour'][i])
money.append(int(data['loanMoney'][i]))
cumMoney.append(int(numpy.sum(data['loanMoney'][:i+1])))
loanDetail['hours'] = hours
loanDetail['money'] = money
loanDetail['cumMoney'] = cumMoney
return loanDetail
def main():
todayLoan()
todayLoanDetail()
def getdata(sql):
data = pd.DataFrame()
try:
conn = pysql.conn_mysql()
data = pd.read_sql_query(sql,con = conn)
conn.close()
except MySQLdb.Error,e:
log.log('数据库链接错误!','warning')
return data
if __name__ == '__main__':
main()
| {"/RiskMonitor/apps/login/admin.py": ["/RiskMonitor/apps/login/models.py"]} |
73,508 | xueyeelong/Amonitor | refs/heads/master | /RiskMonitor/apps/finance/urls.py | # -*- coding: utf-8 -*-
from django.conf.urls import url
import views
urlpatterns = [
url(r'^financetodayloan/$', views.finance_view ,name='financetodayloan'),
]
| {"/RiskMonitor/apps/login/admin.py": ["/RiskMonitor/apps/login/models.py"]} |
73,509 | xueyeelong/Amonitor | refs/heads/master | /RiskMonitor/apps/index/views.py | # -*- coding: utf-8 -*-
from django.shortcuts import render,render_to_response
from django.http import HttpResponse,HttpResponseRedirect
from django.template import RequestContext
from django.contrib.auth.decorators import login_required
from django.contrib.auth.decorators import permission_required
@permission_required('part_admin.indexPage')
def index_view(request):
return render(request,'index.html') | {"/RiskMonitor/apps/login/admin.py": ["/RiskMonitor/apps/login/models.py"]} |
73,510 | xueyeelong/Amonitor | refs/heads/master | /RiskMonitor/apps/login/admin.py | # -*- coding: utf-8 -*-
from django.contrib import admin
from .models import User
@admin.register(User)
class UserAdmin(admin.ModelAdmin):
list_display = ('email','username','password')
search_fields = ('email', 'username')
| {"/RiskMonitor/apps/login/admin.py": ["/RiskMonitor/apps/login/models.py"]} |
73,511 | xueyeelong/Amonitor | refs/heads/master | /RiskMonitor/api/dayAddApi/migrations/0001_initial.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-11-13 08:34
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='CollectDis',
fields=[
('dayto3', models.IntegerField(default=0)),
('dayto10', models.IntegerField(default=0)),
('dayto20', models.IntegerField(default=0)),
('dayto30', models.IntegerField(default=0)),
('dayto60', models.IntegerField(default=0)),
('dayto90', models.IntegerField(default=0)),
('dayover90', models.IntegerField(default=0)),
('currentNum', models.IntegerField(default=0)),
('createDate', models.DateField(default=django.utils.timezone.now, primary_key=True, serialize=False)),
],
options={
'ordering': ('createDate',),
},
),
migrations.CreateModel(
name='CollectNum',
fields=[
('newAdd', models.IntegerField(default=0)),
('newCollectMl1', models.IntegerField(default=0)),
('newCollectMu1', models.IntegerField(default=0)),
('threeDayCollect', models.IntegerField(default=0)),
('threeDayCollectRate', models.FloatField(default=0.0)),
('createDate', models.DateField(default=django.utils.timezone.now, primary_key=True, serialize=False)),
],
options={
'ordering': ('createDate',),
},
),
migrations.CreateModel(
name='CollectRate',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('month', models.CharField(default=b'2017-04', max_length=128)),
('day4Rate', models.FloatField(default=0.0)),
('day7Rate', models.FloatField(default=0.0)),
('day15Rate', models.FloatField(default=0.0)),
('day30Rate', models.FloatField(default=0.0)),
('day60Rate', models.FloatField(default=0.0)),
('day90Rate', models.FloatField(default=0.0)),
('day90Ratem', models.FloatField(default=0.0)),
('createDate', models.DateField(default=django.utils.timezone.now)),
],
options={
'ordering': ('createDate',),
},
),
migrations.CreateModel(
name='FlowDelayRate',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fundName', models.CharField(default=b'credan', max_length=128)),
('delayRate0', models.FloatField(default=0.0)),
('delayRate3', models.FloatField(default=0.0)),
('delayRate7', models.FloatField(default=0.0)),
('delayRate10', models.FloatField(default=0.0)),
('delayRate20', models.FloatField(default=0.0)),
('delayRateM1', models.FloatField(default=0.0)),
('delayRateM2', models.FloatField(default=0.0)),
('delayRateM3', models.FloatField(default=0.0)),
('createDate', models.DateField(default=django.utils.timezone.now)),
],
options={
'ordering': ('createDate',),
},
),
migrations.CreateModel(
name='FlowDelayRateNO',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fundName', models.CharField(default=b'credan', max_length=128)),
('newDelayRate3', models.FloatField(default=0.0)),
('newRepaySum', models.IntegerField(default=0)),
('newPaySum', models.IntegerField(default=0)),
('newPaid', models.IntegerField(default=0)),
('oldDelayRate3', models.FloatField(default=0.0)),
('oldRepaySum', models.IntegerField(default=0)),
('oldPaySum', models.IntegerField(default=0)),
('oldPaid', models.IntegerField(default=0)),
('createDate', models.DateField(default=django.utils.timezone.now)),
],
options={
'ordering': ('createDate',),
},
),
migrations.CreateModel(
name='FlowLoanMoney',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('product', models.CharField(default=b'flash', max_length=32)),
('money', models.IntegerField(default=0)),
('createDate', models.DateField(default=django.utils.timezone.now)),
],
options={
'ordering': ('createDate',),
},
),
migrations.CreateModel(
name='FlowLoanMoneyNO',
fields=[
('loanOld', models.IntegerField(default=0)),
('loanNew', models.IntegerField(default=0)),
('createDate', models.DateField(default=django.utils.timezone.now, primary_key=True, serialize=False)),
],
options={
'ordering': ('createDate',),
},
),
migrations.CreateModel(
name='FlowLoanMoneySum',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('product', models.CharField(default=b'flash', max_length=32)),
('money', models.IntegerField(default=0)),
('createDate', models.DateField(default=django.utils.timezone.now)),
],
options={
'ordering': ('createDate',),
},
),
migrations.CreateModel(
name='IndexAcrepay',
fields=[
('allRepayMoney', models.IntegerField(default=0)),
('acRepayMoney', models.IntegerField(default=0)),
('repayRate', models.IntegerField(default=0)),
('createDate', models.DateField(default=django.utils.timezone.now, primary_key=True, serialize=False)),
],
options={
'ordering': ('createDate',),
},
),
migrations.CreateModel(
name='IndexCity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cityName', models.CharField(default=b'china', max_length=32)),
('numInCity', models.IntegerField(default=0)),
('createDate', models.DateField(default=django.utils.timezone.now)),
],
options={
'ordering': ('createDate',),
},
),
migrations.CreateModel(
name='IndexDash',
fields=[
('avgTermNum', models.IntegerField(default=0)),
('avgMoney', models.IntegerField(default=0)),
('avgServiceMoney', models.IntegerField(default=0)),
('createDate', models.DateField(default=django.utils.timezone.now, primary_key=True, serialize=False)),
],
options={
'ordering': ('createDate',),
},
),
migrations.CreateModel(
name='IndexHead',
fields=[
('tradeMoney', models.IntegerField(default=0)),
('tradeNum', models.IntegerField(default=0)),
('activeUser', models.IntegerField(default=0)),
('sumUser', models.IntegerField(default=0)),
('createDate', models.DateField(default=django.utils.timezone.now, primary_key=True, serialize=False)),
],
options={
'ordering': ('createDate',),
},
),
migrations.CreateModel(
name='IndexHopper',
fields=[
('register', models.IntegerField(default=0)),
('applys', models.IntegerField(default=0)),
('passs', models.IntegerField(default=0)),
('loan', models.IntegerField(default=0)),
('reloan', models.IntegerField(default=0)),
('createDate', models.DateField(default=django.utils.timezone.now, primary_key=True, serialize=False)),
],
options={
'ordering': ('createDate',),
},
),
migrations.CreateModel(
name='MarketNum',
fields=[
('applyPass', models.IntegerField(default=0)),
('firstDayT', models.IntegerField(default=0)),
('firstDay', models.IntegerField(default=0)),
('firstDayRate', models.FloatField(default=0.0)),
('tryRate', models.IntegerField(default=0)),
('secondDay', models.IntegerField(default=0)),
('secondDayRate', models.FloatField(default=0.0)),
('thirdDay', models.IntegerField(default=0)),
('thirdDayRate', models.FloatField(default=0.0)),
('paidNum', models.IntegerField(default=0)),
('paidRate', models.FloatField(default=0.0)),
('auditTime', models.IntegerField(default=0)),
('auditTimeWit', models.IntegerField(default=0)),
('auditTimeToday', models.IntegerField(default=0)),
('createDate', models.DateField(default=django.utils.timezone.now, primary_key=True, serialize=False)),
],
options={
'ordering': ('createDate',),
},
),
migrations.CreateModel(
name='UserAge',
fields=[
('age1', models.IntegerField(default=0)),
('age2', models.IntegerField(default=0)),
('age3', models.IntegerField(default=0)),
('age4', models.IntegerField(default=0)),
('age5', models.IntegerField(default=0)),
('createDate', models.DateField(default=django.utils.timezone.now, primary_key=True, serialize=False)),
],
options={
'ordering': ('createDate',),
},
),
migrations.CreateModel(
name='UserAgeAll',
fields=[
('age1', models.IntegerField(default=0)),
('age2', models.IntegerField(default=0)),
('age3', models.IntegerField(default=0)),
('age4', models.IntegerField(default=0)),
('age5', models.IntegerField(default=0)),
('createDate', models.DateField(default=django.utils.timezone.now, primary_key=True, serialize=False)),
],
options={
'ordering': ('createDate',),
},
),
migrations.CreateModel(
name='UserSex',
fields=[
('male', models.IntegerField(default=0)),
('demale', models.IntegerField(default=0)),
('createDate', models.DateField(default=django.utils.timezone.now, primary_key=True, serialize=False)),
],
options={
'ordering': ('createDate',),
},
),
migrations.CreateModel(
name='UserSexAll',
fields=[
('male', models.IntegerField(default=0)),
('female', models.IntegerField(default=0)),
('createDate', models.DateField(default=django.utils.timezone.now, primary_key=True, serialize=False)),
],
options={
'ordering': ('createDate',),
},
),
]
| {"/RiskMonitor/apps/login/admin.py": ["/RiskMonitor/apps/login/models.py"]} |
73,512 | xueyeelong/Amonitor | refs/heads/master | /dataCalculate/libfile/logger.py | #!/usr/bin/python
#coding=utf-8
import logging
import sys
import os
class Logger:
def __init__(self, logName, logFile):
self._logger = logging.getLogger(logName)
handler = logging.FileHandler(logFile)
formatter = logging.Formatter('%(asctime)s %(filename)s[line:%(lineno)d] %(name)s %(message)s')
handler.setFormatter(formatter)
self._logger.addHandler(handler)
self._logger.setLevel(logging.INFO)
def log(self, msg, level='info'):
if self._logger is not None:
if level == 'warning':
self._logger.warning(msg)
else:
self._logger.info(msg) | {"/RiskMonitor/apps/login/admin.py": ["/RiskMonitor/apps/login/models.py"]} |
73,513 | xueyeelong/Amonitor | refs/heads/master | /RiskMonitor/api/dayAddApi/models.py | # -*- coding: utf-8 -*-
from django.db import models
import django.utils.timezone as timezone
#index
class IndexHead(models.Model):
tradeMoney = models.IntegerField(default=0)
tradeNum = models.IntegerField(default=0)
activeUser = models.IntegerField(default=0)
sumUser = models.IntegerField(default=0)
createDate = models.DateField(primary_key=True,default=timezone.now)
class Meta:
ordering = ('createDate',)
class IndexDash(models.Model):
avgTermNum = models.IntegerField(default=0)
avgMoney = models.IntegerField(default=0)
avgServiceMoney = models.IntegerField(default=0)
createDate = models.DateField(primary_key=True,default=timezone.now)
class Meta:
ordering = ('createDate',)
class IndexHopper(models.Model):
register = models.IntegerField(default=0)
applys = models.IntegerField(default=0)
passs = models.IntegerField(default=0)
loan = models.IntegerField(default=0)
reloan = models.IntegerField(default=0)
createDate = models.DateField(primary_key=True,default=timezone.now)
class Meta:
ordering = ('createDate',)
class IndexCity(models.Model):
cityName = models.CharField(max_length=32,default='china')
numInCity = models.IntegerField(default=0)
createDate = models.DateField(default=timezone.now)
class Meta:
ordering = ('createDate',)
unique_together = ('cityName', 'createDate')
class IndexAcrepay(models.Model):
allRepayMoney = models.IntegerField(default=0)
acRepayMoney = models.IntegerField(default=0)
repayRate = models.IntegerField(default=0)
createDate = models.DateField(primary_key=True,default=timezone.now)
class Meta:
ordering = ('createDate',)
#userInfo
class UserAge(models.Model):
age1 = models.IntegerField(default=0)
age2 = models.IntegerField(default=0)
age3 = models.IntegerField(default=0)
age4 = models.IntegerField(default=0)
age5 = models.IntegerField(default=0)
createDate = models.DateField(primary_key=True,default=timezone.now)
class Meta:
ordering = ('createDate',)
class UserAgeAll(models.Model):
age1 = models.IntegerField(default=0)
age2 = models.IntegerField(default=0)
age3 = models.IntegerField(default=0)
age4 = models.IntegerField(default=0)
age5 = models.IntegerField(default=0)
createDate = models.DateField(primary_key=True,default=timezone.now)
class Meta:
ordering = ('createDate',)
class UserSex(models.Model):
male = models.IntegerField(default=0)
female = models.IntegerField(default=0)
createDate = models.DateField(primary_key=True,default=timezone.now)
class Meta:
ordering = ('createDate',)
class UserSexAll(models.Model):
male = models.IntegerField(default=0)
female = models.IntegerField(default=0)
createDate = models.DateField(primary_key=True,default=timezone.now)
class Meta:
ordering = ('createDate',)
class UserIncrease(models.Model):
register = models.IntegerField(default=0)
allow = models.IntegerField(default=0)
newApply = models.IntegerField(default=0)
oldApply = models.IntegerField(default=0)
createDate = models.DateField(primary_key=True,default=timezone.now)
class Meta:
ordering = ('createDate',)
class UserRest(models.Model):
registerDate = models.CharField(max_length=32,default='2017-12')
currentDate = models.CharField(max_length=32,default='2017-12')
allPass = models.IntegerField(default=0)
currentActive = models.IntegerField(default=0)
currentActiveRate = models.FloatField(default=0.0)
createDate = models.DateField(default=timezone.now)
class Meta:
ordering = ('registerDate', 'currentDate',)
unique_together = ('registerDate', 'currentDate')
#flow
class FlowLoanMoney(models.Model):
product = models.CharField(max_length=32,default='flash')
money = models.IntegerField(default=0)
createDate = models.DateField(default=timezone.now)
class Meta:
ordering = ('createDate',)
unique_together = ('product', 'createDate')
class FlowLoanMoneyNO(models.Model):
loanOld = models.IntegerField(default=0)
loanNew = models.IntegerField(default=0)
createDate = models.DateField(primary_key=True,default=timezone.now)
class Meta:
ordering = ('createDate',)
class FlowLoanMoneySum(models.Model):
product = models.CharField(max_length=32,default='flash')
money = models.IntegerField(default=0)
createDate = models.DateField(default=timezone.now)
class Meta:
ordering = ('createDate',)
unique_together = ('product', 'createDate')
class FlowPaidMoney(models.Model):
paidMoney = models.IntegerField(default=0)
createDate = models.DateField(primary_key=True,default=timezone.now)
class Meta:
ordering = ('createDate',)
class FlowDelayRate(models.Model):
fundName = models.CharField(max_length=128,default='credan')
delayRate0 = models.FloatField(default=0.0)
delayRate3 = models.FloatField(default=0.0)
delayRate7 = models.FloatField(default=0.0)
delayRate10 = models.FloatField(default=0.0)
delayRate20 = models.FloatField(default=0.0)
delayRateM1 = models.FloatField(default=0.0)
delayRateM2 = models.FloatField(default=0.0)
delayRateM3 = models.FloatField(default=0.0)
createDate = models.DateField(default=timezone.now)
class Meta:
ordering = ('createDate',)
unique_together = ('fundName', 'createDate')
class FlowDelayRateNO(models.Model):
fundName = models.CharField(max_length=128,default='credan')
newDelayRate3 = models.FloatField(default=0.0)
newRepaySum = models.IntegerField(default=0)
newPaid = models.IntegerField(default=0)
oldDelayRate3 = models.FloatField(default=0.0)
oldRepaySum = models.IntegerField(default=0)
oldPaid = models.IntegerField(default=0)
createDate = models.DateField(default=timezone.now)
class Meta:
ordering = ('createDate',)
unique_together = ('fundName', 'createDate')
class FlowLoanFund(models.Model):
fundName = models.CharField(max_length=128,default='credan')
sumMoney = models.FloatField(default=0.0)
createDate = models.DateField(default=timezone.now)
class Meta:
ordering = ('createDate',)
unique_together = ('fundName', 'createDate')
class FlowC2CFund(models.Model):
member = models.CharField(max_length=128,default='credan')
loanCount = models.IntegerField(default=0)
loanMoney = models.FloatField(default=0.0)
loanCountTerm = models.IntegerField(default=0)
loanCountTermNo = models.IntegerField(default=0)
delayRate0 = models.FloatField(default=0.0)
allCountTerm = models.IntegerField(default=0)
delayRate7 = models.FloatField(default=0.0)
CountTerm7 = models.IntegerField(default=0)
createDate = models.DateField(default=timezone.now)
class Meta:
ordering = ('createDate',)
unique_together = ('member', 'createDate')
#collect
class CollectRate(models.Model):
month = models.CharField(max_length=128,default='2017-04')
day4Rate = models.FloatField(default=0.0)
day7Rate = models.FloatField(default=0.0)
day15Rate = models.FloatField(default=0.0)
day30Rate = models.FloatField(default=0.0)
day60Rate = models.FloatField(default=0.0)
day90Rate = models.FloatField(default=0.0)
day90Ratem = models.FloatField(default=0.0)
createDate = models.DateField(default=timezone.now)
class Meta:
ordering = ('createDate',)
unique_together = ('month', 'createDate')
class CollectNum(models.Model):
newAdd = models.IntegerField(default=0)
newCollectMl1 = models.IntegerField(default=0)
newCollectMu1 = models.IntegerField(default=0)
threeDayCollect = models.IntegerField(default=0)
threeDayCollectRate = models.FloatField(default=0.0)
createDate = models.DateField(primary_key=True,default=timezone.now)
class Meta:
ordering = ('createDate',)
class CollectDis(models.Model):
dayto3 = models.IntegerField(default=0)
dayto10 = models.IntegerField(default=0)
dayto20 = models.IntegerField(default=0)
dayto30 = models.IntegerField(default=0)
dayto60 = models.IntegerField(default=0)
dayto90 = models.IntegerField(default=0)
dayover90 = models.IntegerField(default=0)
currentNum = models.IntegerField(default=0)
createDate = models.DateField(primary_key=True,default=timezone.now)
class Meta:
ordering = ('createDate',)
#market
class MarketNum(models.Model):
applyPass = models.IntegerField(default=0)
firstDayT = models.IntegerField(default=0)
firstDay = models.IntegerField(default=0)
firstDayRate = models.FloatField(default=0.0)
tryRate = models.IntegerField(default=0)
secondDay = models.IntegerField(default=0)
secondDayRate = models.FloatField(default=0.0)
thirdDay = models.IntegerField(default=0)
thirdDayRate = models.FloatField(default=0.0)
paidNum = models.IntegerField(default=0)
paidRate = models.FloatField(default=0.0)
auditTime = models.IntegerField(default=0)
auditTimeWit = models.IntegerField(default=0)
auditTimeToday = models.IntegerField(default=0)
createDate = models.DateField(primary_key=True,default=timezone.now)
class Meta:
ordering = ('createDate',)
#aeye
class AeyePassRate(models.Model):
applyNum = models.IntegerField(default=0)
passNum = models.IntegerField(default=0)
passRate = models.FloatField(default=0.0)
createDate = models.DateField(primary_key=True,default=timezone.now)
class Meta:
ordering = ('createDate',)
class AeyeGetRate(models.Model):
tryNum = models.IntegerField(default=0)
sucNum = models.IntegerField(default=0)
sucRate = models.FloatField(default=0.0)
createDate = models.DateField(primary_key=True,default=timezone.now)
class Meta:
ordering = ('createDate',)
class AeyeDelayRate(models.Model):
delayRate0 = models.FloatField(default=0.0)
delayRate3 = models.FloatField(default=0.0)
delayRate7 = models.FloatField(default=0.0)
delayRate10 = models.FloatField(default=0.0)
delayRate20 = models.FloatField(default=0.0)
delayRateM1 = models.FloatField(default=0.0)
delayRateM2 = models.FloatField(default=0.0)
delayRateM3 = models.FloatField(default=0.0)
createDate = models.DateField(primary_key=True,default=timezone.now)
class Meta:
ordering = ('createDate',)
class AeyeDelayRateNO(models.Model):
newDelayRate3 = models.FloatField(default=0.0)
newRepaySum = models.IntegerField(default=0)
newPaid = models.IntegerField(default=0)
oldDelayRate3 = models.FloatField(default=0.0)
oldRepaySum = models.IntegerField(default=0)
oldPaid = models.IntegerField(default=0)
createDate = models.DateField(primary_key=True,default=timezone.now)
class Meta:
ordering = ('createDate',)
| {"/RiskMonitor/apps/login/admin.py": ["/RiskMonitor/apps/login/models.py"]} |
73,514 | xueyeelong/Amonitor | refs/heads/master | /RiskMonitor/api/actualApi/views.py | # -*- coding: utf-8 -*-
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from django.contrib.auth.decorators import permission_required
#添加路径
# sys.path.insert(0,os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
from actualCal.finance import finance
@permission_required('part_admin.dayapi')
@api_view(['GET'])
def actime_item(request):
if request.method == 'GET':
tables = request.GET.get('table',None)
if tables == 'finance':
content = request.GET.get('content',None)
if content == 'list':
todayLoan_dict = {'todayLoan':{}}
todayLoan_dict['todayLoan'] = finance.todayLoan()
return Response(todayLoan_dict)
elif content == 'item':
loanDetail = {'hours':[],'cumMoney':[]}
loanDetail = finance.todayLoanDetail()
return Response(loanDetail)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
| {"/RiskMonitor/apps/login/admin.py": ["/RiskMonitor/apps/login/models.py"]} |
73,515 | xueyeelong/Amonitor | refs/heads/master | /RiskMonitor/apps/login/views.py | # -*- coding: utf-8 -*-
from django.shortcuts import render,render_to_response
from django.http import HttpResponse,HttpResponseRedirect
from django.template import RequestContext
from django.contrib.auth.decorators import login_required
from django.contrib.auth import login,logout,authenticate
from django.contrib.auth.models import User as authuser
from django import forms
from .models import User
class UserForm(forms.Form):
email = forms.CharField(label='邮箱',max_length=20)
password = forms.CharField(label='密码',widget=forms.PasswordInput())
def login_view(request):
if request.method == 'POST':
uf = UserForm(request.POST)
if uf.is_valid():
email = uf.cleaned_data['email']
password = uf.cleaned_data['password']
user = User.objects.filter(email=email, password=password)
if user:
user = authenticate(username=email, password=password)
if user is not None and user.is_active:
login(request,user)
return HttpResponseRedirect("/index/")
else:
return render(request,'login/login.html',{'sign': '账号或密码输入错误!'})
else:
return render(request,'login/login.html',{'sign': '输入无效!'})
else:
uf = UserForm()
return render(request,'login/login.html')
@login_required(login_url='/login/')
def logout_view(request):
logout(request)
return HttpResponseRedirect("/login/")
| {"/RiskMonitor/apps/login/admin.py": ["/RiskMonitor/apps/login/models.py"]} |
73,516 | xueyeelong/Amonitor | refs/heads/master | /RiskMonitor/apps/collect/urls.py | # -*- coding: utf-8 -*-
from django.conf.urls import url
import views
urlpatterns = [
url(r'^collectRate/$', views.collectRate_view ,name='collectRate'),
url(r'^collectNum/$', views.collectNum_view ,name='collectNum'),
]
| {"/RiskMonitor/apps/login/admin.py": ["/RiskMonitor/apps/login/models.py"]} |
73,517 | xueyeelong/Amonitor | refs/heads/master | /RiskMonitor/api/dayAddApi/migrations/0007_flowloanfund.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-11-30 05:15
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('dayAddApi', '0006_auto_20171121_1111'),
]
operations = [
migrations.CreateModel(
name='FlowLoanFund',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fundName', models.CharField(default=b'credan', max_length=128)),
('sumMoney', models.FloatField(default=0.0)),
('createDate', models.DateField(default=django.utils.timezone.now)),
],
options={
'ordering': ('createDate',),
},
),
]
| {"/RiskMonitor/apps/login/admin.py": ["/RiskMonitor/apps/login/models.py"]} |
73,518 | xueyeelong/Amonitor | refs/heads/master | /RiskMonitor/api/dayAddApi/migrations/0009_auto_20171207_1450.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-12-07 06:50
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('dayAddApi', '0008_userrest'),
]
operations = [
migrations.AlterModelOptions(
name='userrest',
options={'ordering': ('registerDate', 'currentDate')},
),
]
| {"/RiskMonitor/apps/login/admin.py": ["/RiskMonitor/apps/login/models.py"]} |
73,519 | xueyeelong/Amonitor | refs/heads/master | /dataCalculate/aeye/aeye.py | # -*- coding: utf-8 -*-
#
import MySQLdb
import pandas as pd
import numpy
import json
import sys
import os
import logging
import datetime
#添加路径
sys.path.insert(0,os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
from libfile import pysql
from libfile import logger
import config
#日志
log = logger.Logger('aeye',os.path.abspath(os.path.dirname(os.path.dirname(__file__))) + "/" + config.log_path)
#数据包含时间
def timeScale(startTime = "2017-04-15"):
nowTime = datetime.date.today()
i = 0
timeList = []
while True:
endTime = str(nowTime-datetime.timedelta(days=i))
timeList.insert(0,endTime)
if endTime == startTime:
break
i = i + 1
return timeList
#通过率
def passRate():
timeList = timeScale()
sql = 'select distinct createDate from dayAddApi_aeyepassrate'
tmRest = pysql.dbInfoLocal(sql)
tmRest = tmRest.fillna(0)
tmwait = []
if not tmRest.empty:
tmwait = [str(x)[:10] for x in tmRest['createDate']]
for i in range(len(timeList)-1):
stTime = timeList[i]
edTime = timeList[i+1]
if stTime in tmwait:
continue
print '通过率' + stTime
#每日审核量
sql = """
select count(distinct user_id) from ci_cash_apply_info where audit_date >= '{}' and audit_date < '{}'
""".format(stTime,edTime)
data = pysql.dbInfo(sql)
data = data.fillna(0)
applyNum = data.values[0][0]
#每日通过量
sql = """
select count(distinct user_id) from ci_cash_apply_info where audit_date >= '{}' and audit_date < '{}' and status = 'SUCCESS'
""".format(stTime,edTime)
data = pysql.dbInfo(sql)
data = data.fillna(0)
passNum = data.values[0][0]
#通过率
passRate = round(passNum/float(applyNum)*100,2)
sql = """ insert into dayAddApi_aeyepassrate(applyNum,passNum,passRate,createDate) values (%s,%s,%s,%s) """
dset = [(applyNum,passNum,passRate,stTime)]
status = pysql.insertData(sql,dset)
log.log('通过率数据更新状态-{}({})!'.format(status,stTime),'info')
#通过率新老
def passRateNO():
timeList = timeScale()
sql = 'select distinct createDate from dayAddApi_aeyepassrateno'
tmRest = pysql.dbInfoLocal(sql)
tmRest = tmRest.fillna(0)
tmwait = []
if not tmRest.empty:
tmwait = [str(x)[:10] for x in tmRest['createDate']]
for i in range(len(timeList)-1):
stTime = timeList[i]
edTime = timeList[i+1]
if stTime in tmwait:
continue
print '通过率' + stTime
#每日审核量 新
sql = """
select count(distinct user_id) from ci_cash_apply_info
where audit_date >= '{}' and audit_date < '{}'
and user_id not in (
select distinct user_id from ci_cash_apply_info
where audit_date < '{}' and status = 'SUCCESS'
)
""".format(stTime,edTime,stTime)
data = pysql.dbInfo(sql)
data = data.fillna(0)
newApplyNum = data.values[0][0]
#每日通过量 新
sql = """
select count(distinct user_id) from ci_cash_apply_info
where audit_date >= '{}' and audit_date < '{}'
and user_id not in (
select distinct user_id from ci_cash_apply_info
where audit_date < '{}' and status = 'SUCCESS'
) and status = 'SUCCESS'
""".format(stTime,edTime)
data = pysql.dbInfo(sql)
data = data.fillna(0)
newPassNum = data.values[0][0]
#通过率 新
newPassRate = round(passNum/float(applyNum)*100,2)
#每日审核量 老
sql = """
select count(distinct user_id) from ci_cash_apply_info
where audit_date >= '{}' and audit_date < '{}'
and user_id in (
select distinct user_id from ci_cash_apply_info
where audit_date < '{}' and status = 'SUCCESS'
)
""".format(stTime,edTime,stTime)
data = pysql.dbInfo(sql)
data = data.fillna(0)
oldApplyNum = data.values[0][0]
#每日通过量 老
sql = """
select count(distinct user_id) from ci_cash_apply_info
where audit_date >= '{}' and audit_date < '{}'
and user_id in (
select distinct user_id from ci_cash_apply_info
where audit_date < '{}' and status = 'SUCCESS'
) and status = 'SUCCESS'
""".format(stTime,edTime)
data = pysql.dbInfo(sql)
data = data.fillna(0)
oldPassNum = data.values[0][0]
#通过率 老
oldPassRate = round(passNum/float(applyNum)*100,2)
sql = """ insert into dayAddApi_aeyepassrate(applyNum,passNum,passRate,createDate) values (%s,%s,%s,%s) """
dset = [(applyNum,passNum,passRate,stTime)]
status = pysql.insertData(sql,dset)
log.log('通过率数据更新状态-{}({})!'.format(status,stTime),'info')
#逾期率
def delayDay():
#逾期情况 not in (3,4,5,6,1001)
sql = """
select DATE_FORMAT(b.termDate,'%Y-%m-%d') 'date',sum(b.repayMoney) 'allMoney' from loan a,loan_repaying b
where a.id=b.loanId and a.status=6 and b.compatibleStatus not in ('CANCEL') and b.productId not in (3,4,5,6,1001)
and b.termDate < DATE_FORMAT(now(),'%Y-%m-%d')
GROUP BY DATE_FORMAT(b.termDate,'%Y-%m-%d');
"""
alldata = pysql.dbInfo(sql)
delayPoint = [0,3,7,10,20,30,60,90]
pp = []
for day in delayPoint:
sql = """
select DATE_FORMAT(c.termDate,'%Y-%m-%d') 'date',sum(c.repayMoney) 'payMoney' from (
select a.payMoney,b.* from loan a,loan_repaying b
where a.id=b.loanId and a.status=6 and b.compatibleStatus not in ('CANCEL') and b.productId not in (3,4,5,6,1001)
and b.termDate < DATE_FORMAT(now(),'%Y-%m-%d')
HAVING if(b.repaidTime is NULL,TO_DAYS(now()) - TO_DAYS(b.termDate),TO_DAYS(b.repaidTime) - TO_DAYS(b.termDate)) <= {}) c
GROUP BY DATE_FORMAT(c.termDate,'%Y-%m-%d');
""".format(day)
plan = pysql.dbInfo(sql)
repay = pd.merge(alldata,plan)
pp.append(pd.Series([round(x*100,2) for x in (repay['allMoney']-repay['payMoney'])/repay['allMoney']],index=repay['date']))
pt = pd.concat(pp, axis=1, join_axes=[pp[0].index])
pt.columns = ['首逾率','逾期率3+','逾期率7+','逾期率10+','逾期率20+','逾期率M1','逾期率M2','逾期率M3']
pt = pt.fillna(0)
pt['times'] = list(pt.index)
s0 = list(pt['首逾率'])
s3 = list(pt['逾期率3+'])
s7 = list(pt['逾期率7+'])
s10 = list(pt['逾期率10+'])
s20 = list(pt['逾期率20+'])
sM1 = list(pt['逾期率M1'])
sM2 = list(pt['逾期率M2'])
sM3 = list(pt['逾期率M3'])
stt = list(pt['times'])
sql = "delete from dayAddApi_aeyedelayrate"
status = pysql.deletetData(sql)
log.log(u'逾期数据删除状态-{}!'.format(status),'info')
sql = """ insert into dayAddApi_aeyedelayrate(delayRate0,delayRate3,delayRate7,delayRate10,delayRate20,delayRateM1,delayRateM2,delayRateM3,createDate) values (%s,%s,%s,%s,%s,%s,%s,%s,%s) """
dset = zip(s0,s3,s7,s10,s20,sM1,sM2,sM3,stt)
status = pysql.insertData(sql,dset)
log.log(u'逾期数据更新状态-{}!'.format(status),'info')
#逾期情况(新老)
def delayDayNO():
timeList = timeScale('2017-08-30')[:-3]
sql = "select distinct createDate from dayAddApi_aeyedelayrateno ";
tmRest = pysql.dbInfoLocal(sql)
tmRest = tmRest.fillna(0)
tmwait = []
if not tmRest.empty:
tmwait = [str(x)[:10] for x in tmRest['createDate']]
for i in range(len(timeList)-1):
stTime = timeList[i]
edTime = timeList[i+1]
if stTime in tmwait:
continue
print '逾期(新老)3天逾期率' + stTime
#分新老首逾情况
#new
sql = """
select sum(a.repayMoney)
from loan a,loan_repaying b
where a.id=b.loanId and a.status=6 and b.compatibleStatus not in ('CANCEL') and b.productId not in (3,4,5,6,1001)
and b.termDate >= '{}' and b.termDate < '{}'
and a.userSid not in (select distinct userSid from loan_repaying where termDate < '{}')
""".format(stTime,edTime,stTime)
data = pysql.dbInfo(sql)
data = data.fillna(0)
newRepaySum = data.values[0][0]
sql = """
select sum(a.repayMoney)
from loan a,loan_repaying b
where a.id=b.loanId and a.status=6 and b.compatibleStatus not in ('CANCEL') and b.productId not in (3,4,5,6,1001)
and b.termDate >= '{}' and b.termDate < '{}'
and if(b.repaidTime is NULL,TO_DAYS(now()) - TO_DAYS(b.termDate),TO_DAYS(b.repaidTime) - TO_DAYS(b.termDate)) <= 3
and a.userSid not in (
select distinct userSid from loan_repaying where termDate < '{}'
)
""".format(stTime,edTime,stTime)
data = pysql.dbInfo(sql)
data = data.fillna(0)
newPaid = data.values[0][0]
newDelayRate = round((newRepaySum - newPaid)/newRepaySum*100,2)
#old
sql = """
select sum(a.repayMoney)
from loan a,loan_repaying b
where a.id=b.loanId and a.status=6 and b.compatibleStatus not in ('CANCEL') and b.productId not in (3,4,5,6,1001)
and b.termDate >= '{}' and b.termDate < '{}'
and a.userSid in (
select distinct userSid from loan_repaying where termDate < '{}'
)
""".format(stTime,edTime,stTime)
data = pysql.dbInfo(sql)
data = data.fillna(0)
oldRepaySum = data.values[0][0]
sql = """
select sum(a.repayMoney)
from loan a,loan_repaying b
where a.id=b.loanId and a.status=6 and b.compatibleStatus not in ('CANCEL') and b.productId not in (3,4,5,6,1001)
and b.termDate >= '{}' and b.termDate < '{}'
and if(b.repaidTime is NULL,TO_DAYS(now()) - TO_DAYS(b.termDate),TO_DAYS(b.repaidTime) - TO_DAYS(b.termDate)) <= 3
and a.userSid in (
select distinct userSid from loan_repaying where termDate < '{}'
)
""".format(stTime,edTime,stTime)
data = pysql.dbInfo(sql)
data = data.fillna(0)
oldPaid = data.values[0][0]
oldDelayRate = round((oldRepaySum - oldPaid)/oldRepaySum*100,2)
sql = """ insert into dayAddApi_aeyedelayrateno(newRepaySum,newPaid,newDelayRate3,oldRepaySum,oldPaid,oldDelayRate3,createDate) values (%s,%s,%s,%s,%s,%s,%s) """
dset = [(newRepaySum,newPaid,newDelayRate,oldRepaySum,oldPaid,oldDelayRate,stTime)]
status = pysql.insertData(sql,dset)
log.log(u'逾期3天(新老)数据更新状态-{}!({})!'.format(status,stTime),'info')
#成功贷款率
def getRate():
timeList = timeScale('2017-05-01')
sql = "select distinct createDate from dayAddApi_aeyegetrate";
tmRest = pysql.dbInfoLocal(sql)
tmRest = tmRest.fillna(0)
tmwait = []
if not tmRest.empty:
tmwait = [str(x)[:10] for x in tmRest['createDate']]
for i in range(len(timeList)-1):
stTime = timeList[i]
edTime = timeList[i+1]
if stTime in tmwait:
continue
print '成功贷款率' + stTime
sql = """
select count(distinct userSid) from loan where createdTime >= '{}' and createdTime < '{}'
""".format(stTime,edTime)
data = pysql.dbInfo(sql)
data = data.fillna(0)
tryNum = data.values[0][0]
sql = """
select count(distinct userSid) from loan where createdTime >= '{}' and createdTime < '{}' and status = 6
""".format(stTime,edTime)
data = pysql.dbInfo(sql)
data = data.fillna(0)
sucNum = data.values[0][0]
sucRate = round(sucNum/float(tryNum)*100,2)
sql = """ insert into dayAddApi_aeyegetrate(tryNum,sucNum,sucRate,createDate) values (%s,%s,%s,%s) """
dset = [(tryNum,sucNum,sucRate,stTime)]
status = pysql.insertData(sql,dset)
log.log(u'成功贷款数据更新状态-{}!({})!'.format(status,stTime),'info')
def main():
passRate()
passRateNO()
delayDay()
delayDayNO()
getRate()
if __name__ == '__main__':
main()
| {"/RiskMonitor/apps/login/admin.py": ["/RiskMonitor/apps/login/models.py"]} |
73,520 | xueyeelong/Amonitor | refs/heads/master | /dataCalculate/market/market.py | # -*- coding: utf-8 -*-
#
import MySQLdb
import pandas as pd
import numpy as np
import json
import sys
import os
import logging
import datetime
import gc
#添加路径
sys.path.insert(0,os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
from libfile import pysql
from libfile import logger
import config
#日志
log = logger.Logger('market',os.path.abspath(os.path.dirname(os.path.dirname(__file__))) + "/" + config.log_path)
#路径
def getTheFile(filename):
return os.path.abspath(os.path.dirname(__file__)) +"/"+filename
#数据包含时间
def timeScale(startTime = "2017-05-01"):
nowTime = datetime.date.today()
i = 0
timeList = []
while True:
endTime = str(nowTime-datetime.timedelta(days=i))
timeList.insert(0,endTime)
if endTime == startTime:
break
i = i + 1
return timeList
def passRateloan():
endTime = str(datetime.date.today()-datetime.timedelta(days=3))
timeList = timeScale('2017-05-01')
sql = 'select distinct createDate from dayAddApi_marketnum'
tmRest = pysql.dbInfoLocal(sql)
tmRest = tmRest.fillna(0)
tmwait = []
if not tmRest.empty:
tmwait = [str(x)[:10] for x in tmRest['createDate']]
for i in range(len(timeList)-1):
stTime = timeList[i]
edTime1 = timeList[i+1]
if stTime in tmwait and stTime < endTime:
#当前提现成功率
sql = """
select count(DISTINCT b.userSid) from ci_cash_apply_info a,loan b
where a.user_id=b.userSid and a.product_id = b.productId
and a.audit_date >= '{}' and a.audit_date < '{}'
and b.createdTime > '{}'
and a.status in ('SUCCESS') and b.status=6;
""".format(stTime,edTime1,stTime)
data = pysql.dbInfo(sql)
data = data.fillna(0)
paidNum = data.values[0][0]
sql = """
select applyPass from dayAddApi_marketnum
where createDate >= '{}' and createDate < '{}'
""".format(stTime,edTime1)
data = pysql.dbInfoLocal(sql)
data = data.fillna(0)
applyPass = data.values[0][0]
#当前 申请提现成功率
paidRate = round(paidNum/float(applyPass)*100,2) if paidNum > 2 else 0
#存储数据
sql = """
update dayAddApi_marketnum
set paidRate = {} ,paidNum = {}
where createDate >= '{}' and createDate < '{}'
""".format(paidRate,paidNum,stTime,edTime1)
status = pysql.updateData(sql)
log.log('当前申请提现成功率更新状态-{}! ({})'.format(status,stTime),'info')
continue
print '提现情况' + stTime
#每日数据模板
dayNum = {
'applyPass':0,
'firstDayT':0,
'firstDay':0,
'firstDayRate':0,
'tryRate':0,
'secondDay':0,
'secondDayRate':0,
'thirdDay':0,
'thirdDayRate':0,
'paidNum':0,
'paidRate':0,
'auditTime':0,
'auditTimeWit':0,
'auditTimeToday':0
}
#每日审核通过情况
sql = """
select count(distinct user_id) from ci_cash_apply_info
where audit_date > '{}' and audit_date < '{}' and status in ('SUCCESS','FA_SUCCESS')
""".format(stTime,edTime1)
data = pysql.dbInfo(sql)
data = data.fillna(0)
dayNum['applyPass'] = data.values[0][0]
#当日审核 申请提现
sql = """
select count(DISTINCT b.userSid) from ci_cash_apply_info a,loan b
where a.user_id=b.userSid and a.product_id = b.productId
and a.audit_date > '{}' and a.audit_date < '{}'
and b.createdTime > '{}' and b.createdTime < '{}'
and a.status in ('SUCCESS','FA_SUCCESS');
""".format(stTime,edTime1,stTime,edTime1)
data = pysql.dbInfo(sql)
data = data.fillna(0)
dayNum['firstDayT'] = data.values[0][0]
#当日审核 申请提现成功
sql = """
select count(DISTINCT b.userSid) from ci_cash_apply_info a,loan b
where a.user_id=b.userSid and a.product_id = b.productId
and a.audit_date > '{}' and a.audit_date < '{}'
and b.createdTime > '{}' and b.createdTime < '{}'
and a.status in ('SUCCESS','FA_SUCCESS') and b.status=6;
""".format(stTime,edTime1,stTime,edTime1)
data = pysql.dbInfo(sql)
data = data.fillna(0)
dayNum['firstDay'] = data.values[0][0]
#当日审核 申请提现率
dayNum['tryRate'] = round(dayNum['firstDayT']/float(dayNum['applyPass'])*100,2) if dayNum['applyPass'] > 2 else 0
#当日审核 申请提现成功率
dayNum['firstDayRate'] = round(dayNum['firstDay']/float(dayNum['applyPass'])*100,2) if dayNum['applyPass'] > 2 else 0
#当前提现成功
sql = """
select count(DISTINCT b.userSid) from ci_cash_apply_info a,loan b
where a.user_id=b.userSid and a.product_id = b.productId
and a.audit_date > '{}' and a.audit_date < '{}'
and b.createdTime > '{}'
and a.status in ('SUCCESS','FA_SUCCESS') and b.status=6;
""".format(stTime,edTime1,stTime)
data = pysql.dbInfo(sql)
data = data.fillna(0)
dayNum['paidNum'] = data.values[0][0]
#当前 申请提现成功率
dayNum['paidRate'] = round(dayNum['paidNum']/float(dayNum['applyPass'])*100,2) if dayNum['applyPass'] > 2 else 0
#审核时间
sql = """
select DATE_FORMAT(create_time,'%H') 'hour',(UNIX_TIMESTAMP(audit_date) - UNIX_TIMESTAMP(create_time)) 'wait_second'
from ci_cash_apply_info
where audit_date is not null and status in ('SUCCESS','FA_SUCCESS')
and audit_date > '{}' and audit_date < '{}'
""".format(stTime,edTime1)
data = pysql.dbInfo(sql)
data = data.fillna(0)
if len(data) > 10:
lp = data['wait_second']
lp = lp[lp>np.percentile(data['wait_second'],15)]
lp = lp[lp<np.percentile(data['wait_second'],85)]
dayNum['auditTime'] = int(np.mean(lp)/60)
data = data[data['hour'].map(lambda x: x not in ['22','23','00','01','02','03','04','05','06'])]
if not data.empty:
if len(data) > 10:
lp = data['wait_second']
lp = lp[lp>np.percentile(data['wait_second'],15)]
lp = lp[lp<np.percentile(data['wait_second'],85)]
dayNum['auditTimeWit'] = int(np.mean(lp)/60)
#当日的审核情况
sql = """
select DATE_FORMAT(create_time,'%H') 'hour',(UNIX_TIMESTAMP(audit_date) - UNIX_TIMESTAMP(create_time)) 'wait_second'
from ci_cash_apply_info
where audit_date is not null and status in ('SUCCESS','FA_SUCCESS') and DATE_FORMAT(audit_date,'%Y-%m-%d')=DATE_FORMAT(create_time,'%Y-%m-%d')
and audit_date > '{}' and audit_date < '{}'
""".format(stTime,edTime1)
data = pysql.dbInfo(sql)
data = data.fillna(0)
data = data[data['hour'].map(lambda x: x not in ['22','23','00','01','02','03','04','05','06'])]
if len(data) > 10:
lp = data['wait_second']
lp = lp[lp>np.percentile(data['wait_second'],15)]
lp = lp[lp<np.percentile(data['wait_second'],85)]
dayNum['auditTimeToday'] = int(np.mean(lp)/60)
sql = """ insert into dayAddApi_marketnum(applyPass,firstDayT,firstDay,firstDayRate,tryRate,secondDay,secondDayRate,thirdDay,thirdDayRate,paidNum,paidRate,auditTime,auditTimeWit,auditTimeToday,createDate) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s) """
dset = [(
dayNum['applyPass'] ,
dayNum['firstDayT'] ,
dayNum['firstDay'] ,
dayNum['firstDayRate'] ,
dayNum['tryRate'] ,
dayNum['secondDay'] ,
dayNum['secondDayRate'] ,
dayNum['thirdDay'] ,
dayNum['thirdDayRate'] ,
dayNum['paidNum'] ,
dayNum['paidRate'] ,
dayNum['auditTime'] ,
dayNum['auditTimeWit'] ,
dayNum['auditTimeToday'] ,
stTime
)]
status = pysql.insertData(sql,dset)
log.log(u'逾审核时间数据更新更新状态-{}!({})!'.format(status,stTime),'info')
def main():
passRateloan()
def strtotime(strtime):
if type(strtime) in [pd.tslib.NaTType]:
strtime = datetime.datetime.now()
return strtime
if __name__ == '__main__':
main()
| {"/RiskMonitor/apps/login/admin.py": ["/RiskMonitor/apps/login/models.py"]} |
73,521 | xueyeelong/Amonitor | refs/heads/master | /RiskMonitor/apps/aeye/urls.py | # -*- coding: utf-8 -*-
from django.conf.urls import url
import views
urlpatterns = [
url(r'^aeyePassRate/$', views.aeyePassRate_view ,name='aeyepassrate'),
url(r'^aeyeDelayDay/$', views.aeyeDelayDay_view ,name='aeyedelayday'),
url(r'^aeyeGetRate/$', views.aeyeGetRate_view ,name='aeyegetrate'),
]
| {"/RiskMonitor/apps/login/admin.py": ["/RiskMonitor/apps/login/models.py"]} |
73,522 | xueyeelong/Amonitor | refs/heads/master | /RiskMonitor/apps/flow/urls.py | # -*- coding: utf-8 -*-
from django.conf.urls import url
import views
urlpatterns = [
url(r'^flowLoan/$', views.flowLoan_view ,name='loan'),
url(r'^flowRepayment/$', views.flowRepayment_view ,name='repayment'),
url(r'^flowIncome/$', views.flowIncome_view ,name='income'),
url(r'^flowDelayRateFund/$', views.flowDelayRateFund_view ,name='delayRateFund'),
url(r'^flowLoanFund/$', views.flowLoanFund_view ,name='loanFund'),
url(r'^flowC2C/$', views.flowC2C_view ,name='c2c'),
]
| {"/RiskMonitor/apps/login/admin.py": ["/RiskMonitor/apps/login/models.py"]} |
73,523 | xueyeelong/Amonitor | refs/heads/master | /RiskMonitor/api/dayAddApi/serializers.py | from rest_framework import serializers
# #index
from models import IndexHead, IndexDash, IndexHopper, IndexCity, IndexAcrepay
class IndexHeadSerializer(serializers.ModelSerializer):
class Meta:
model = IndexHead
fields = ('tradeMoney', 'tradeMoney', 'tradeNum', 'activeUser', 'sumUser', 'createDate')
class IndexDashSerializer(serializers.ModelSerializer):
class Meta:
model = IndexDash
fields = ('avgTermNum', 'avgMoney', 'avgServiceMoney', 'createDate')
class IndexHopperSerializer(serializers.ModelSerializer):
class Meta:
model = IndexHopper
fields = ('register', 'applys', 'passs', 'loan', 'reloan', 'createDate')
class IndexCitySerializer(serializers.ModelSerializer):
class Meta:
model = IndexCity
fields = ('cityName', 'numInCity', 'createDate')
class IndexAcrepaySerializer(serializers.ModelSerializer):
class Meta:
model = IndexAcrepay
fields = ('allRepayMoney', 'acRepayMoney', 'repayRate', 'createDate')
#userInfo
from models import UserAge, UserAgeAll, UserSex, UserSexAll, UserIncrease, UserRest
class UserAgeSerializer(serializers.ModelSerializer):
class Meta:
model = UserAge
fields = ('age1', 'age2', 'age3', 'age4', 'age5', 'createDate')
class UserAgeAllSerializer(serializers.ModelSerializer):
class Meta:
model = UserAgeAll
fields = ('age1', 'age2', 'age3', 'age4', 'age5', 'createDate')
class UserSexSerializer(serializers.ModelSerializer):
class Meta:
model = UserSex
fields = ('male', 'female', 'createDate')
class UserSexAllSerializer(serializers.ModelSerializer):
class Meta:
model = UserSexAll
fields = ('male', 'female', 'createDate')
class UserIncreaseSerializer(serializers.ModelSerializer):
class Meta:
model = UserIncrease
fields = ('register', 'allow', 'newApply', 'oldApply', 'createDate')
class UserRestSerializer(serializers.ModelSerializer):
class Meta:
model = UserRest
fields = ('registerDate', 'currentDate', 'allPass', 'currentActive', 'currentActiveRate', 'createDate')
#flow
from models import FlowLoanMoney, FlowLoanMoneyNO, FlowLoanMoneySum, FlowDelayRate, FlowDelayRateNO, FlowLoanFund, FlowPaidMoney, FlowC2CFund
class FlowLoanMoneySerializer(serializers.ModelSerializer):
class Meta:
model = FlowLoanMoney
fields = ('product', 'money', 'createDate')
class FlowLoanMoneyNOSerializer(serializers.ModelSerializer):
class Meta:
model = FlowLoanMoneyNO
fields = ('loanOld', 'loanNew', 'createDate')
class FlowLoanMoneySumSerializer(serializers.ModelSerializer):
class Meta:
model = FlowLoanMoneySum
fields = ('product', 'money', 'createDate')
class FlowDelayRateSerializer(serializers.ModelSerializer):
class Meta:
model = FlowDelayRate
fields = ('fundName', 'delayRate0', 'delayRate3', 'delayRate7', 'delayRate10', 'delayRate20', 'delayRateM1', 'delayRateM2', 'delayRateM3', 'createDate')
class FlowDelayRateNOSerializer(serializers.ModelSerializer):
class Meta:
model = FlowDelayRateNO
fields = ('fundName', 'newDelayRate3', 'newRepaySum', 'newPaid', 'oldDelayRate3', 'oldRepaySum', 'oldPaid', 'createDate', 'createDate')
class FlowLoanFundSerializer(serializers.ModelSerializer):
class Meta:
model = FlowLoanFund
fields = ('fundName', 'sumMoney', 'createDate')
class FlowPaidMoneySerializer(serializers.ModelSerializer):
class Meta:
model = FlowPaidMoney
fields = ('paidMoney', 'createDate')
class FlowC2CFundSerializer(serializers.ModelSerializer):
class Meta:
model = FlowC2CFund
fields = ('member', 'loanCount', 'loanMoney', 'loanCountTerm', 'loanCountTermNo', 'delayRate0', 'allCountTerm', 'delayRate7', 'CountTerm7', 'createDate')
#collect
from models import CollectRate, CollectNum, CollectDis
class CollectRateSerializer(serializers.ModelSerializer):
class Meta:
model = CollectRate
fields = ('month', 'day4Rate', 'day7Rate', 'day15Rate', 'day30Rate', 'day60Rate', 'day90Rate', 'day90Ratem','createDate')
class CollectNumSerializer(serializers.ModelSerializer):
class Meta:
model = CollectNum
fields = ('newAdd', 'newCollectMl1', 'newCollectMu1', 'threeDayCollect', 'threeDayCollectRate', 'createDate')
class CollectDisSerializer(serializers.ModelSerializer):
class Meta:
model = CollectDis
fields = ('dayto3', 'dayto10', 'dayto20', 'dayto30', 'dayto60', 'dayto90', 'dayover90', 'currentNum','createDate')
#market
from models import MarketNum
class MarketNumSerializer(serializers.ModelSerializer):
class Meta:
model = MarketNum
fields = ('applyPass', 'firstDayT', 'firstDay', 'firstDayRate', 'tryRate', 'secondDay', 'secondDayRate', 'thirdDay', 'thirdDayRate', 'paidNum', 'paidRate', 'auditTime', 'auditTimeWit','auditTimeToday','createDate')
#aeye
from models import AeyePassRate, AeyeGetRate, AeyeDelayRate, AeyeDelayRateNO
class AeyePassRateSerializer(serializers.ModelSerializer):
class Meta:
model = AeyePassRate
fields = ('applyNum', 'passNum', 'passRate', 'createDate')
class AeyeGetRateSerializer(serializers.ModelSerializer):
class Meta:
model = AeyeGetRate
fields = ('tryNum', 'sucNum', 'sucRate', 'createDate')
class AeyeDelayRateSerializer(serializers.ModelSerializer):
class Meta:
model = AeyeDelayRate
fields = ('delayRate0', 'delayRate3', 'delayRate7', 'delayRate10', 'delayRate20', 'delayRateM1', 'delayRateM2', 'delayRateM3', 'createDate')
class AeyeDelayRateNOSerializer(serializers.ModelSerializer):
class Meta:
model = AeyeDelayRateNO
fields = ('newDelayRate3', 'newRepaySum', 'newPaid', 'oldDelayRate3', 'oldRepaySum', 'oldPaid', 'createDate', 'createDate') | {"/RiskMonitor/apps/login/admin.py": ["/RiskMonitor/apps/login/models.py"]} |
73,524 | xueyeelong/Amonitor | refs/heads/master | /dataCalculate/userInfo/userInfo.py | # -*- coding: utf-8 -*-
#
import MySQLdb
import pandas as pd
import numpy as np
import sys
import os
import datetime
import dateutil
#添加路径
sys.path.insert(0,os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
from libfile import pysql
from libfile import logger
import config
#日志
log = logger.Logger('userInfo',os.path.abspath(os.path.dirname(os.path.dirname(__file__))) + "/" + config.log_path)
#数据包含时间
def timeScale(startTime = "2017-03-01"):
nowTime = datetime.date.today()
i = 0
timeList = []
while True:
endTime = str(nowTime-datetime.timedelta(days=i))
timeList.insert(0,endTime)
if endTime == startTime:
break
i = i + 1
return timeList
#数据包含时间 month
def timeScaleMonth(startTime = "2017-03-01"):
nowTime = datetime.date.today()
i = 0
timeList = []
while True:
endTime = str(nowTime-dateutil.relativedelta.relativedelta(months=i))
if endTime < startTime:
break
timeList.insert(0,endTime[:7])
i = i + 1
return timeList
def userSex():
#分性别的注册用户
#男
sql = """
select count(sex) 'num' from user where sex = '1';
"""
data = pysql.dbInfo(sql)
data = data.fillna(0)
male = data.values[0][0]
#女
sql = """
select count(sex) 'num' from user where sex = '2';
"""
data = pysql.dbInfo(sql)
data = data.fillna(0)
female = data.values[0][0]
sql = """ insert into dayAddApi_usersexall(male,female,createDate) values (%s,%s,%s) """
dset = [(male,female,str(datetime.datetime.now())[:10])]
status = pysql.insertData(sql,dset)
log.log('用户性别(总)更新状态-{}!'.format(status),'info')
#日增数据
timeList = timeScale()
sql = 'select distinct createDate from dayAddApi_usersex'
tmRest = pysql.dbInfoLocal(sql)
tmRest = tmRest.fillna(0)
tmwait = []
if not tmRest.empty:
tmwait = [str(x)[:10] for x in tmRest['createDate']]
for i in range(len(timeList)-1):
stTime = timeList[i]
edTime = timeList[i+1]
if stTime in tmwait:
continue
print '性别' + stTime
sql = """
select count(*) from user where date_created > '{}' and date_created < '{}' and sex = '1'
""" .format(stTime,edTime)
data = pysql.dbInfo(sql)
data = data.fillna(0)
male = data.values[0][0]
sql = """
select count(*) from user where date_created > '{}' and date_created < '{}' and sex = '2'
""" .format(stTime,edTime)
data = pysql.dbInfo(sql)
data = data.fillna(0)
female = data.values[0][0]
sql = """ insert into dayAddApi_usersex(male,female,createDate) values (%s,%s,%s) """
dset = [(male,female,stTime)]
status = pysql.insertData(sql,dset)
log.log('用户性别(日)更新状态-{}!({})'.format(status,stTime),'info')
def userAge():
#分年龄的注册用户
sql = """
select age,count(age) 'num' from user where age <> 0 group by age;
"""
data = pysql.dbInfo(sql)
age_label = config.age_label
data['age'] = pd.cut(data['age'],age_label['point'],labels=age_label['label'])
data = pd.pivot_table(data,index=["age"],values=["num"],aggfunc='sum')
data['age'] = data.index
sql = """ insert into dayAddApi_userageall(age1,age2,age3,age4,age5,createDate) values (%s,%s,%s,%s,%s,%s) """
dset = [(data['num'][0],data['num'][1],data['num'][2],data['num'][3],data['num'][4],str(datetime.datetime.now())[:10])]
status = pysql.insertData(sql,dset)
log.log('用户年龄(总)更新状态-{}!'.format(status),'info')
timeList = timeScale()
sql = 'select distinct createDate from dayAddApi_userage'
tmRest = pysql.dbInfoLocal(sql)
tmRest = tmRest.fillna(0)
tmwait = []
if not tmRest.empty:
tmwait = [str(x)[:10] for x in tmRest['createDate']]
for i in range(len(timeList)-1):
stTime = timeList[i]
edTime = timeList[i+1]
if stTime in tmwait:
continue
print '年龄' + stTime
sql = """
select age,count(age) 'num' from user where age <> 0 and date_created > '{}' and date_created < '{}' group by age;
""".format(stTime,edTime)
data = pysql.dbInfo(sql)
age_label = config.age_label
data['age'] = pd.cut(data['age'],age_label['point'],labels=age_label['label'])
data = pd.pivot_table(data,index=["age"],values=["num"],aggfunc='sum')
data['age'] = data.index
sql = """ insert into dayAddApi_userage(age1,age2,age3,age4,age5,createDate) values (%s,%s,%s,%s,%s,%s) """
dset = [(data['num'][0],data['num'][1],data['num'][2],data['num'][3],data['num'][4],stTime)]
status = pysql.insertData(sql,dset)
log.log('用户年龄(日)更新状态-{}!({})'.format(status,stTime),'info')
def userRest():
timeList = timeScaleMonth()
sql = 'select distinct createDate from dayAddApi_userrest'
tmRest = pysql.dbInfoLocal(sql)
tmRest = tmRest.fillna(0)
tmwait = []
if not tmRest.empty:
tmwait = [str(x)[:7] for x in tmRest['createDate']]
for i in range(len(timeList)-1):
stTime = timeList[i]
edTime = timeList[i+1]
if stTime in tmwait:
continue
print u'留存' + stTime
sql = """
select DATE_FORMAT(a.audit_date,'%Y-%m') 'month',count(distinct a.user_id) 'allrest' from (
select c.user_id,c.audit_date from ci_cash_apply_info c left join user u
on c.user_id=u.id
where c.status='SUCCESS' and u.date_created > '{}' and u.date_created < '{}'
) a
where a.audit_date = (
select min(b.audit_date) from ci_cash_apply_info b where a.user_id=b.user_id and b.status='SUCCESS' and b.create_time > '{}'
)
group by DATE_FORMAT(a.audit_date,'%Y-%m')
""".format(stTime + '-01',edTime + '-01',stTime + '-01')
allrest = pysql.dbInfo(sql)
allrest = allrest.fillna(0)
sql = """
select DATE_FORMAT(a.createdTime,'%Y-%m') 'month',count(distinct a.userSid) 'currentactive' from (
select l.userSid,l.createdTime from loan l left join user u
on l.userSid=u.id
where l.status=6 and u.date_created > '{}' and u.date_created < '{}' ) a
group by DATE_FORMAT(a.createdTime,'%Y-%m')
""".format(stTime + '-01',edTime + '-01')
cactive = pysql.dbInfo(sql)
cactive = cactive.fillna(0)
aad = pd.merge(allrest,cactive,how='outer')
aad = aad.fillna(0)
aad['allrest'] = [sum(aad['allrest'][:(i+1)]) for i in range(len(aad))]
aad['rtime'] = stTime
aad['activerate'] = aad['currentactive']/aad['allrest']*100
aad['createdTime'] = str(datetime.datetime.now())[:10]
rtime = list(aad['rtime'])
cmonth = list(aad['month'])
allrest = list(aad['allrest'])
currentactive = list(aad['currentactive'])
currentActiveRate = list(aad['activerate'])
ctime = list(aad['createdTime'])
sql = "delete from dayAddApi_userrest where registerDate='{}'".format(stTime)
status = pysql.deletetData(sql)
log.log(u'留存数据删除状态-{}!'.format(status),'info')
sql = """ insert into dayAddApi_userrest(registerDate,currentDate,allPass,currentActive,currentActiveRate,createDate) values (%s,%s,%s,%s,%s,%s) """
dset = zip(rtime,cmonth,allrest,currentactive,currentActiveRate,ctime)
status = pysql.insertData(sql,dset)
log.log('留存数据更新状态-{}!({})'.format(status,stTime),'info')
def main():
userSex()
userAge()
userRest()
if __name__ == '__main__':
main()
| {"/RiskMonitor/apps/login/admin.py": ["/RiskMonitor/apps/login/models.py"]} |
73,525 | xueyeelong/Amonitor | refs/heads/master | /RiskMonitor/api/dayAddApi/migrations/0008_userrest.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-12-07 06:36
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('dayAddApi', '0007_flowloanfund'),
]
operations = [
migrations.CreateModel(
name='UserRest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('registerDate', models.CharField(default=b'2017-12', max_length=32)),
('currentDate', models.CharField(default=b'2017-12', max_length=32)),
('allPass', models.IntegerField(default=0)),
('currentActive', models.IntegerField(default=0)),
('currentActiveRate', models.FloatField(default=0.0)),
('createDate', models.DateField(default=django.utils.timezone.now)),
],
options={
'ordering': ('createDate',),
},
),
]
| {"/RiskMonitor/apps/login/admin.py": ["/RiskMonitor/apps/login/models.py"]} |
73,526 | xueyeelong/Amonitor | refs/heads/master | /RiskMonitor/apps/login/models.py | # -*- coding: utf-8 -*-
from django.db import models
class User(models.Model):
email = models.EmailField('邮箱',max_length=20,primary_key=True)
username = models.CharField('姓名',max_length=20)
password = models.CharField('密码',max_length=20)
level = models.IntegerField('等级')
def __unicode__(self):
return self.username
| {"/RiskMonitor/apps/login/admin.py": ["/RiskMonitor/apps/login/models.py"]} |
73,527 | xueyeelong/Amonitor | refs/heads/master | /dataCalculate/collect/collect.py | # -*- coding: utf-8 -*-
#
import MySQLdb
import pandas as pd
import numpy as np
import json
import sys
import os
import logging
import datetime
#添加路径
sys.path.insert(0,os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
from libfile import pysql
from libfile import logger
import config
#日志
log = logger.Logger('collect',os.path.abspath(os.path.dirname(os.path.dirname(__file__))) + "/" + config.log_path)
#数据包含时间
def timeScale(startTime = "2017-04-15"):
nowTime = datetime.date.today()
i = 0
timeList = []
while True:
endTime = str(nowTime-datetime.timedelta(days=i))
timeList.insert(0,endTime)
if endTime == startTime:
break
i = i + 1
return timeList
def check():
#催收情况
sql = """
select month, round(D4催回金额/D4逾期金额*100,2) 'day4Rate',
round(D7催回金额/D7逾期金额*100,2) 'day7Rate',
round(D15催回金额/D15逾期金额*100,2) 'day15Rate',
round(D30催回金额/D30逾期金额*100,2) 'day30Rate',
round(D60催回金额/D60逾期金额*100,2) 'day60Rate',
round(D90催回金额/D90逾期金额*100,2) 'day90Rate',
round(D90Plus催回金额/D90逾期金额*100,2) 'day90Ratem'
from (
select date_format(a.lendTime,'%Y-%m') 'month',
sum(case when datediff(now(), b.termDate) >= 4 and b.compatibleStatus in ('OVERDUE','OVERDUE_PAID')
then a.payMoney else 0 end) 'D4逾期金额',
sum(case when datediff(now(), b.termDate) >= 7 and b.compatibleStatus in ('OVERDUE','OVERDUE_PAID')
then a.payMoney else 0 end) 'D7逾期金额',
sum(case when datediff(now(), b.termDate) >= 15 and b.compatibleStatus in ('OVERDUE','OVERDUE_PAID')
then a.payMoney else 0 end) 'D15逾期金额',
sum(case when datediff(now(), b.termDate) >= 30 and b.compatibleStatus in ('OVERDUE','OVERDUE_PAID')
then a.payMoney else 0 end) 'D30逾期金额',
sum(case when datediff(now(), b.termDate) >= 60 and b.compatibleStatus in ('OVERDUE','OVERDUE_PAID')
then a.payMoney else 0 end) 'D60逾期金额',
sum(case when datediff(now(), b.termDate) >= 90 and b.compatibleStatus in ('OVERDUE','OVERDUE_PAID')
then a.payMoney else 0 end) 'D90逾期金额',
sum(case when datediff(now(), b.termDate) >= 4 and b.compatibleStatus = 'OVERDUE_PAID' and b.repaidTime <= adddate(b.termDate, interval+4 day)
then a.payMoney else 0 end) 'D4催回金额',
sum(case when datediff(now(), b.termDate) >= 7 and b.compatibleStatus = 'OVERDUE_PAID' and b.repaidTime <= adddate(b.termDate, interval+7 day)
then a.payMoney else 0 end) 'D7催回金额',
sum(case when datediff(now(), b.termDate) >= 15 and b.compatibleStatus = 'OVERDUE_PAID' and b.repaidTime <= adddate(b.termDate, interval+15 day)
then a.payMoney else 0 end) 'D15催回金额',
sum(case when datediff(now(), b.termDate) >= 30 and b.compatibleStatus = 'OVERDUE_PAID' and b.repaidTime <= adddate(b.termDate, interval+30 day)
then a.payMoney else 0 end) 'D30催回金额',
sum(case when datediff(now(), b.termDate) >= 60 and b.compatibleStatus = 'OVERDUE_PAID' and b.repaidTime <= adddate(b.termDate, interval+60 day)
then a.payMoney else 0 end) 'D60催回金额',
sum(case when datediff(now(), b.termDate) >= 90 and b.compatibleStatus = 'OVERDUE_PAID' and b.repaidTime <= adddate(b.termDate, interval+90 day)
then a.payMoney else 0 end) 'D90催回金额',
sum(case when datediff(now(), b.termDate) >= 90 and b.compatibleStatus = 'OVERDUE_PAID'
then a.payMoney else 0 end) 'D90Plus催回金额'
from loan a, loan_repaying b
where a.id = b.loanId and a.status = 6 and b.termDate < curdate() and a.productId not in (3,4)
#and not exists (select 1 from loan g where g.userSid = a.userSid and g.status = 6 and g.lendTime < a.lendTime)
group by month) a;
"""
data = pysql.dbInfo(sql)
data = data.fillna(0)
month = list(data['month'])
day4Rate = list(data['day4Rate'])
day7Rate = list(data['day7Rate'])
day15Rate = list(data['day15Rate'])
day30Rate = list(data['day30Rate'])
day60Rate = list(data['day60Rate'])
day90Rate = list(data['day90Rate'])
day90Ratem = list(data['day90Ratem'])
updateDate = [str(datetime.datetime.today())[:10]] * len(month)
sql = "delete from dayAddApi_collectrate"
status = pysql.deletetData(sql)
log.log(u'催回率数据删除状态-{}!({})'.format(status,str(datetime.date.today())),'info')
sql = """ insert into dayAddApi_collectrate(month,day4Rate,day7Rate,day15Rate,day30Rate,day60Rate,day90Rate,day90Ratem,createDate) values (%s,%s,%s,%s,%s,%s,%s,%s,%s) """
dset = zip(month,day4Rate,day7Rate,day15Rate,day30Rate,day60Rate,day90Rate,day90Ratem,updateDate)
status = pysql.insertData(sql,dset)
log.log('催回率数据更新状态-{}!({})!'.format(status,str(datetime.date.today())),'info')
def collectDisYesterday():
#案件数量情
#每日数据 当前逾期天数的分布
timeList = timeScale()
sql = 'select distinct createDate from dayAddApi_collectdis'
tmRest = pysql.dbInfoLocal(sql)
tmRest = tmRest.fillna(0)
tmwait = []
if not tmRest.empty:
tmwait = [str(x)[:10] for x in tmRest['createDate']]
for i in range(len(timeList)-1):
stTime = timeList[i]
edTime = timeList[i+1]
if stTime in tmwait:
continue
print '案件数量' + stTime
curDisct = {}
#待催收的案件数(未完成的催回+在规定时间外催回的)
sql = """
select count(*) from t_loan_case
where firm_id = 1 and create_date < '{}' and repaid_date > '{}' and loan_status = 'PAID'
""".format(edTime,edTime)
data = pysql.dbInfoCollect(sql)
data = data.fillna(0)
sql = """
select count(*) from t_loan_case
where loan_status = 'OVERDUE' and firm_id = 1 and create_date < '{}'
""".format(edTime)
data1 = pysql.dbInfoCollect(sql)
data1 = data1.fillna(0)
currentNum = data.values[0][0] + data1.values[0][0]
curDisct['currentNum'] = currentNum
#待催收的案件逾期天数分布
sql = """
select DATEDIFF('{}',overdue_date) 'overdue_day' from t_loan_case
where firm_id = 1 and create_date < '{}' and repaid_date > '{}' and loan_status = 'PAID'
""".format(edTime,edTime,edTime)
data = pysql.dbInfoCollect(sql)
sql = """
select DATEDIFF('{}',overdue_date) 'overdue_day' from t_loan_case
where loan_status = 'OVERDUE' and firm_id = 1 and create_date < '{}'
""".format(edTime,edTime)
data1 = pysql.dbInfoCollect(sql)
overdue_day_list = []
for x in data['overdue_day']:
overdue_day_list.append(x)
for x in data1['overdue_day']:
overdue_day_list.append(x)
bins = [0,3,10,20,30,60,90,max(overdue_day_list)+1]
labels = ['1-3','4-10','11-20','21-30','31-60','61-90','90-']
df = pd.cut(overdue_day_list,bins=bins,labels=labels)
df = df.value_counts()
for i in range(len(df)):
curDisct[df.index[i]] = df.values[i]
sql = """ insert into dayAddApi_collectdis(dayto3,dayto10,dayto20,dayto30,dayto60,dayto90,dayover90,currentNum,createDate) values (%s,%s,%s,%s,%s,%s,%s,%s,%s) """
dset = [(curDisct['1-3'],curDisct['4-10'],curDisct['11-20'],curDisct['21-30'],curDisct['31-60'],curDisct['61-90'],curDisct['90-'],curDisct['currentNum'],stTime)]
status = pysql.insertData(sql,dset)
log.log('每日案件逾期天数更新状态-{}! ({})'.format(status,stTime),'info')
def collectNumYesterday():
#每日数据
timeList = timeScale()
sql = 'select distinct createDate from dayAddApi_collectnum'
tmRest = pysql.dbInfoLocal(sql)
tmRest = tmRest.fillna(0)
tmwait = []
if not tmRest.empty:
tmwait = [str(x)[:10] for x in tmRest['createDate']]
for i in range(len(timeList)-1):
stTime = timeList[i]
edTime = timeList[i+1]
if stTime in tmwait:
continue
print '催收每日数据' + stTime
#新增案件数
sql = """
select count(*) from t_loan_case
where create_date like '{}%' and firm_id = 1
""".format(stTime)
data = pysql.dbInfoCollect(sql)
data = data.fillna(0)
yesterdayNew = data.values[0][0]
#催回案件数(30天以上委外 30天以下自催)
sql = """
select count(*) from t_loan_case
where repaid_date like '{}%' and firm_id = 1 and overdue_day >= 30
""".format(stTime)
data = pysql.dbInfoCollect(sql)
data = data.fillna(0)
yesterdayPaidl30 = data.values[0][0]
sql = """
select count(*) from t_loan_case
where repaid_date like '{}%' and firm_id = 1 and overdue_day < 30
""".format(stTime)
data = pysql.dbInfoCollect(sql)
data = data.fillna(0)
yesterdayPaidu30 = data.values[0][0]
#催回 三日前的应催案件数
threeDay = str(datetime.datetime.strptime(stTime, '%Y-%m-%d') - datetime.timedelta(days=3))[:10]
sql = """
select count(*) from t_loan_case
where create_date like '{}%' and firm_id = 1
""".format(threeDay)
data = pysql.dbInfoCollect(sql)
data = data.fillna(0)
threeDayPaing = data.values[0][0]
#催回 三日前的催回案件数
sql = """
select count(*) from t_loan_case
where create_date like '{}%' and repaid_date <= '{}' and firm_id = 1
""".format(threeDay,edTime)
data = pysql.dbInfoCollect(sql)
data = data.fillna(0)
threeDayPaid = data.values[0][0]
#昨日 三日催回率
if threeDayPaing != 0:
NewPaidRate = round(threeDayPaid/float(threeDayPaing)*100,2)
else:
NewPaidRate = 0
sql = """ insert into dayAddApi_collectnum(newAdd,newCollectMl1,newCollectMu1,threeDayCollect,threeDayCollectRate,createDate) values (%s,%s,%s,%s,%s,%s) """
dset = [(yesterdayNew,yesterdayPaidl30,yesterdayPaidu30,threeDayPaing,NewPaidRate,stTime)]
status = pysql.insertData(sql,dset)
log.log('催回基本数据更新状态-{}! ({})'.format(status,stTime),'info')
def main():
check()
collectDisYesterday()
collectNumYesterday()
if __name__ == '__main__':
main()
| {"/RiskMonitor/apps/login/admin.py": ["/RiskMonitor/apps/login/models.py"]} |
73,528 | xueyeelong/Amonitor | refs/heads/master | /RiskMonitor/api/dayAddApi/migrations/0002_auto_20171113_1720.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-11-13 09:20
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('dayAddApi', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='UserIncrease',
fields=[
('register', models.IntegerField(default=0)),
('allow', models.IntegerField(default=0)),
('newApply', models.IntegerField(default=0)),
('oldApply', models.IntegerField(default=0)),
('createDate', models.DateField(default=django.utils.timezone.now, primary_key=True, serialize=False)),
],
options={
'ordering': ('createDate',),
},
),
migrations.RemoveField(
model_name='usersex',
name='demale',
),
migrations.AddField(
model_name='usersex',
name='female',
field=models.IntegerField(default=0),
),
]
| {"/RiskMonitor/apps/login/admin.py": ["/RiskMonitor/apps/login/models.py"]} |
73,529 | xueyeelong/Amonitor | refs/heads/master | /dataCalculate/index/index.py | # -*- coding: utf-8 -*-
#
import MySQLdb
import pandas as pd
import numpy
import json
import sys
import os
import datetime
import time
reload(sys)
sys.setdefaultencoding('utf8')
#添加路径
sys.path.insert(0,os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
from libfile import pysql
from libfile import logger
import config
#日志
log = logger.Logger('index',os.path.abspath(os.path.dirname(os.path.dirname(__file__))) + "/" + config.log_path)
#路径
def getTheFile(filename):
return os.path.abspath(os.path.dirname(__file__)) +"/"+filename
#用户漏斗
def hopperHead():
sql = """select count(*) from user;"""
data = pysql.dbInfo(sql)
registerNum = data.values[0][0]
sql = """select count(distinct user_id) from ci_cash_apply_info;"""
data = pysql.dbInfo(sql)
applyNum = data.values[0][0]
sql = """select count(DISTINCT user_id) from ci_cash_apply_info where status in ('FA_SUCCESS','SUCCESS');"""
data = pysql.dbInfo(sql)
passNum = data.values[0][0]
sql = """select count(*) from batch_acc_mgnt_account_info;"""
data = pysql.dbInfo(sql)
loanNum = data.values[0][0]
sql = """select count(*) from batch_acc_mgnt_account_info where loan_times >=2;"""
data = pysql.dbInfo(sql)
reloanNum = data.values[0][0]
#数据插入
sql = """ insert into dayAddApi_indexhopper(register,applys,passs,loan,reloan,createDate) values (%s,%s,%s,%s,%s,%s) """
dset = [(registerNum,applyNum,passNum,loanNum,reloanNum,str(datetime.datetime.now()-datetime.timedelta(days=1))[:10])]
status = pysql.insertData(sql,dset)
log.log('漏斗数据更新状态-{}'.format(status),'info')
#基本总量情况
sumUser = registerNum
activeUser = loanNum
sql = """select count(*) from loan where status=6;"""
data = pysql.dbInfo(sql)
tradeNum = data.values[0][0]
sql = """select sum(lendMoney) from loan where status=6;"""
data = pysql.dbInfo(sql)
tradeMoney = int(data.values[0][0])
#数据插入
sql = """ insert into dayAddApi_indexhead(sumUser,activeUser,tradeNum,tradeMoney,createDate) values (%s,%s,%s,%s,%s) """
dset = [(sumUser,activeUser,tradeNum,tradeMoney,str(datetime.datetime.now()-datetime.timedelta(days=1))[:10])]
status = pysql.insertData(sql,dset)
log.log('首页标题数据更新状态-{}!'.format(status),'info')
#用户地区分布
def userPlace():
sql = """
select aes_decrypt(a.id_num,'1zhida**') 'id_num' from _user a,batch_acc_mgnt_account_info b where a.id=b.user_id;
"""
# sql = """
# select aes_decrypt(a.id_num,'1zhida**') 'id_num' from _user a,batch_acc_mgnt_account_info b where a.id=b.user_id and b.membership in (3,4,5);
# """
# sql = """
# select distinct aes_decrypt(a.id_num,'1zhida**') 'id_num'
# from _user a,loan_repaying b
# where a.id=b.userSid and b.compatibleStatus = 'OVERDUE';
# """
data = pysql.dbInfo(sql)
id_init = pd.read_csv(getTheFile('../data/t_id_card_init.csv'))
id_init['code'] = id_init['code'].map(str)
province = id_init[id_init['code'].map(lambda x:str(x)[-4:]=='0000')]
province1 = province[province['name'].map(lambda x: '北京'in x or '上海' in x or '天津'in x or '重庆' in x)]
province2 = province[province['name'].map(lambda x: '市' not in x)]
province = pd.concat([province1,province2])
city = id_init[id_init['code'].map(lambda x:str(x)[-2:]=='00')]
data['province_t'] = data['id_num'].map(lambda x:str(x)[:2]+'0000')
data['city_t'] = data['id_num'].map(lambda x:str(x)[:4]+'00')
data['country_t'] = data['id_num'].map(lambda x:str(x)[:6])
data = pd.merge(data,province,left_on='province_t',right_on='code',how='left')
data['省'] = data['name']
del data['code']
del data['name']
data = pd.merge(data,city,left_on='city_t',right_on='code',how='left')
data['市'] = data['name']
del data['code']
del data['name']
del data['province_t']
del data['city_t']
del data['country_t']
data["人数"] = 1
tp = pd.pivot_table(data,index=['省','市'],values=["人数"],aggfunc='count',fill_value=0)
tp['省'] = tp.index.map(lambda x :x[0])
tp['市'] = tp.index.map(lambda x :x[1])
tp = tp.sort_values(by="人数",ascending=False)
tp = tp.reset_index(drop=True)
# tp.to_csv('C:\Users\Amon\Desktop\ct.csv',index=False,encoding='utf_8_sig')
# exit(0)
#生成city
# gg = {}
# for i in range(len(tp)):
# item = tp.ix[i]
# key = item['省'] + item['市']
# gg[key] = item['市'].replace("地区","").replace("市","")
# if i > 71:
# break
# json.dump(gg, open(getTheFile('data/city.json'), 'w'), default=config.set_default)
city = json.load(open(getTheFile('../data/city.json')))
cityName = []
cityNum = []
for i in range(50,-1,-1):
item = tp.ix[i]
key = item['省'].decode('utf-8') + item['市'].decode('utf-8')
if city.get(key,None) is not None:
cityName.append(city.get(key))
cityNum.append(item['人数'])
ctime = [str(datetime.datetime.now()-datetime.timedelta(days=1))[:10]]*len(cityName)
sql = """ insert into dayAddApi_indexcity(cityName,numInCity,createDate) values (%s,%s,%s) """
cityName = [x.decode("utf-8") for x in cityName]
dset = zip(cityName,cityNum,ctime)
status = pysql.insertData(sql,dset)
log.log('用户地区分布数据更新状态-{}!'.format(status),'info')
# city = json.load(open(getTheFile('../static/data/index/city_value.json')))
# data_dict = {}
# import requests
# import string
# for key in city.keys():
# values = {'place' : key}
# url = string.Template('http://maps.googleapis.com/maps/api/geocode/json?address=$place&sensor=false&language=zh-CN')
# url = url.safe_substitute(values)
# r = requests.get(url=url,headers=None,timeout=10)
# rj = json.loads(r.text)
# if rj.get('status') == 'OK':
# rj = rj['results'][0]
# data_dict[key] = [rj['geometry']['location']['lng'],rj['geometry']['location']['lat']]
# json.dump(data_dict, open(getTheFile('../static/data/index/city_zuobiao.json'), 'w'), default=config.set_default)
# city = json.load(open(getTheFile('../static/data/index/city_value.json')))
# data_dict = []
# for key in city.keys():
# temp = {}
# temp['name'] = key
# temp['value'] = 'dateset["'+ key+'""]'
# data_dict.append(temp)
# json.dump(data_dict, open(getTheFile('../static/data/index/city_test.json'), 'w'), default=config.set_default)
#仪表盘数据 平均借贷金额 平均借贷天数 平均服务费用
def dashbook():
dashBook = {}
sql = """select avg(a.repayMoney) 'avgMoney' from loan_repaying a left join loan b
on a.loanId = b.id
where DateDiff(a.createdTime,now())=-1"""
data = pysql.dbInfo(sql)
data = data.fillna(0)
avgMoney = round(data.values[0][0],2)
sql = """
select avg(b.termNum) 'avgTermNum' from loan_repaying a left join loan b
on a.loanId = b.id
where DateDiff(a.createdTime,now())=-1
"""
data = pysql.dbInfo(sql)
data = data.fillna(0)
avgTermNum = round(data.values[0][0],2)
sql = """
select avg(b.repayMoney - b.payMoney) 'avgServiceMoney' from loan_repaying a left join loan b
on a.loanId = b.id
where DateDiff(a.createdTime,now())=-1
"""
data = pysql.dbInfo(sql)
data = data.fillna(0)
avgServiceMoney = round(data.values[0][0],2)
sql = """ insert into dayAddApi_indexdash(avgTermNum,avgMoney,avgServiceMoney,createDate) values (%s,%s,%s,%s) """
dset = [(avgTermNum,avgMoney,avgServiceMoney,str(datetime.datetime.now()-datetime.timedelta(days=1))[:10])]
status = pysql.insertData(sql,dset)
log.log('仪表盘数据更新状态-{}!'.format(status),'info')
def main():
#hopperHead()
userPlace()
#dashbook()
if __name__ == '__main__':
main()
| {"/RiskMonitor/apps/login/admin.py": ["/RiskMonitor/apps/login/models.py"]} |
73,530 | xueyeelong/Amonitor | refs/heads/master | /RiskMonitor/api/dayAddApi/views.py | # -*- coding: utf-8 -*-
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from django.contrib.auth.decorators import permission_required
# #index
from models import IndexHead, IndexDash, IndexHopper, IndexCity, IndexAcrepay
from serializers import IndexHeadSerializer, IndexDashSerializer, IndexHopperSerializer, IndexCitySerializer, IndexAcrepaySerializer
#userInfo
from models import UserAge, UserAgeAll, UserSex, UserSexAll, UserIncrease, UserRest
from serializers import UserAgeSerializer, UserAgeAllSerializer, UserSexSerializer, UserSexAllSerializer, UserIncreaseSerializer, UserRestSerializer
#flow
from models import FlowLoanMoney, FlowLoanMoneyNO, FlowLoanMoneySum, FlowDelayRate, FlowDelayRateNO, FlowLoanFund, FlowPaidMoney, FlowC2CFund
from serializers import FlowLoanMoneySerializer, FlowLoanMoneyNOSerializer, FlowLoanMoneySumSerializer, FlowDelayRateSerializer, FlowDelayRateNOSerializer, FlowLoanFundSerializer, FlowPaidMoneySerializer, FlowC2CFundSerializer
#collect
from models import CollectRate, CollectNum, CollectDis
from serializers import CollectRateSerializer, CollectNumSerializer, CollectDisSerializer
#market
from models import MarketNum
from serializers import MarketNumSerializer
#aeya
from models import AeyePassRate, AeyeGetRate, AeyeDelayRate, AeyeDelayRateNO
from serializers import AeyePassRateSerializer, AeyeGetRateSerializer, AeyeDelayRateSerializer, AeyeDelayRateNOSerializer
#model dict
tableModel = {
'indexhead': {
'models': IndexHead,
'serializers': IndexHeadSerializer,
},
'indexdash': {
'models': IndexDash,
'serializers': IndexDashSerializer,
},
'indexhopper': {
'models': IndexHopper,
'serializers': IndexHopperSerializer,
},
'indexcity': {
'models': IndexCity,
'serializers': IndexCitySerializer,
},
'indexacrepay': {
'models': IndexAcrepay,
'serializers': IndexAcrepaySerializer,
},
'userage': {
'models': UserAge,
'serializers': UserAgeSerializer,
},
'userageall': {
'models': UserAgeAll,
'serializers': UserAgeAllSerializer,
},
'usersex': {
'models': UserSex,
'serializers': UserSexSerializer,
},
'usersexall': {
'models': UserSexAll,
'serializers': UserSexAllSerializer,
},
'userincrease': {
'models': UserIncrease,
'serializers': UserIncreaseSerializer,
},
'userrest': {
'models': UserRest,
'serializers': UserRestSerializer,
},
'flowloanmoney': {
'models': FlowLoanMoney,
'serializers': FlowLoanMoneySerializer,
},
'flowloanmoneyno': {
'models': FlowLoanMoneyNO,
'serializers': FlowLoanMoneyNOSerializer,
},
'flowloanmoneysum': {
'models': FlowLoanMoneySum,
'serializers': FlowLoanMoneySumSerializer,
},
'flowdelayrate': {
'models': FlowDelayRate,
'serializers': FlowDelayRateSerializer,
},
'flowdelayrateno': {
'models': FlowDelayRateNO,
'serializers': FlowDelayRateNOSerializer,
},
'flowloanfund': {
'models': FlowLoanFund,
'serializers': FlowLoanFundSerializer,
},
'flowpaidmoney': {
'models': FlowPaidMoney,
'serializers': FlowPaidMoneySerializer,
},
'flowc2c': {
'models': FlowC2CFund,
'serializers': FlowC2CFundSerializer,
},
'collectrate': {
'models': CollectRate,
'serializers': CollectRateSerializer,
},
'collectnum': {
'models': CollectNum,
'serializers': CollectNumSerializer,
},
'collectdis': {
'models': CollectDis,
'serializers': CollectDisSerializer,
},
'marketnum': {
'models': MarketNum,
'serializers': MarketNumSerializer,
},
'aeyepassrate': {
'models': AeyePassRate,
'serializers': AeyePassRateSerializer,
},
'aeyegetrate': {
'models': AeyeGetRate,
'serializers': AeyeGetRateSerializer,
},
'aeyedelayrate': {
'models': AeyeDelayRate,
'serializers': AeyeDelayRateSerializer,
},
'aeyedelayrateno': {
'models': AeyeDelayRateNO,
'serializers': AeyeDelayRateNOSerializer,
},
}
import datetime
from django.db.models import Max
#@permission_required('part_admin.dayapi')
@api_view(['POST'])
def indexhead_item(request):
if request.method == 'POST':
paralist = eval(request.POST.get('para',None))
tables = paralist.get('table',None)
content = paralist.get('content',None)
if tables and content:
objectModel = tableModel[tables]['models']
objectSerializer = tableModel[tables]['serializers']
para = paralist.get('para',[])
print para
if para:
temp = objectModel.objects.all()
filterstrtemp = "temp.filter({}{}='{}')"
for xkey in para:
key = xkey.get('key','')
value = xkey.get('value','')
way = xkey.get('way','')
way = '__' + way if way else ''
filterstr = filterstrtemp.format(key,way,value)
temp = eval(filterstr)
serializer = objectSerializer(temp, many=True)
return Response(serializer.data)
else:
if content == 'item':
#yesterday = str(datetime.datetime.now() - datetime.timedelta(days=1))[:10]
yesterday = str(objectModel.objects.all().aggregate(Max('createDate')).values()[0])[:10]
temp = objectModel.objects.filter(createDate=yesterday)
serializer = objectSerializer(temp, many=True)
return Response(serializer.data)
elif content == 'list':
temp = objectModel.objects.all()
serializer = objectSerializer(temp, many=True)
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) | {"/RiskMonitor/apps/login/admin.py": ["/RiskMonitor/apps/login/models.py"]} |
73,531 | xueyeelong/Amonitor | refs/heads/master | /dataCalculate/flow/flowDelayRate.py | # -*- coding: utf-8 -*-
#
import MySQLdb
import pandas as pd
import numpy
import json
import sys
import os
import logging
import datetime
import gc
#添加路径
sys.path.insert(0,os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
from libfile import pysql
from libfile import logger
import config
#日志
log = logger.Logger('flow',os.path.abspath(os.path.dirname(os.path.dirname(__file__))) + "/" + config.log_path)
#路径
def getTheFile(filename):
return os.path.abspath(os.path.dirname(__file__)) +"/"+filename
#数据包含时间
def timeScale(startTime = "2017-09-01"):
nowTime = datetime.date.today()
i = 0
timeList = []
while True:
endTime = str(nowTime-datetime.timedelta(days=i))
timeList.insert(0,endTime)
if endTime == startTime:
break
i = i + 1
return timeList
#逾期情况 根据资金方区分
def delayDayFund():
fundId = config.fundloanId
for fundName in fundId.keys():
ids = fundId[fundName][0]
#逾期情况 not in (3,4,5,6,1001)
sql = """
select DATE_FORMAT(b.termDate,'%Y-%m-%d') 'date',sum(b.repayMoney) 'allMoney' from loan a,loan_repaying b
where a.id=b.loanId and a.status=6 and b.compatibleStatus not in ('CANCEL') and a.fundPayAccountId in ({})
and b.productId not in (3,4,5,6,1001)
and b.termDate < DATE_FORMAT(now(),'%Y-%m-%d')
GROUP BY DATE_FORMAT(b.termDate,'%Y-%m-%d');
""".format(ids)
alldata = pysql.dbInfo(sql)
alldata = alldata.fillna(0)
delayPoint = [0,3,7,10,20,30,60,90]
pp = []
for day in delayPoint:
sql = """
select DATE_FORMAT(c.termDate,'%Y-%m-%d') 'date',sum(c.repayMoney) 'payMoney' from (
select a.payMoney,b.* from loan a,loan_repaying b
where a.id=b.loanId and a.status=6 and b.compatibleStatus not in ('CANCEL') and b.productId not in (3,4,5,6,1001)
and a.fundPayAccountId in ({})
and b.termDate < DATE_FORMAT(now(),'%Y-%m-%d')
HAVING if(b.repaidTime is NULL,TO_DAYS(now()) - TO_DAYS(b.termDate),TO_DAYS(b.repaidTime) - TO_DAYS(b.termDate)) <= {}) c
GROUP BY DATE_FORMAT(c.termDate,'%Y-%m-%d');
""".format(ids,day)
data = pysql.dbInfo(sql)
data = data.fillna(0)
repay = pd.merge(alldata,data)
pp.append(pd.Series([round(x*100,2) for x in (repay['allMoney']-repay['payMoney'])/repay['allMoney']],index=repay['date']))
pt = pd.concat(pp, axis=1, join_axes=[pp[0].index])
pt.columns = ['首逾率','逾期率3+','逾期率7+','逾期率10+','逾期率20+','逾期率M1','逾期率M2','逾期率M3']
pt = pt.fillna(0)
pt['times'] = list(pt.index)
s0 = list(pt['首逾率'])
s3 = list(pt['逾期率3+'])
s7 = list(pt['逾期率7+'])
s10 = list(pt['逾期率10+'])
s20 = list(pt['逾期率20+'])
sM1 = list(pt['逾期率M1'])
sM2 = list(pt['逾期率M2'])
sM3 = list(pt['逾期率M3'])
stt = list(pt['times'])
fnanme = [fundName] * len(pt)
sql = "delete from dayAddApi_flowdelayrate where fundName='" + fundName + "'"
status = pysql.deletetData(sql)
log.log(u'逾期数据删除状态-{}!(资金方{})'.format(status,fundName),'info')
sql = """ insert into dayAddApi_flowdelayrate(fundName,delayRate0,delayRate3,delayRate7,delayRate10,delayRate20,delayRateM1,delayRateM2,delayRateM3,createDate) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s) """
dset = zip(fnanme,s0,s3,s7,s10,s20,sM1,sM2,sM3,stt)
status = pysql.insertData(sql,dset)
log.log(u'逾期数据更新状态-{}!(资金方{})'.format(status,fundName),'info')
#逾期情况 分新老 根据资金方区分
def delayDayNOFund():
fundId = config.fundloanId
for fundName in fundId.keys():
ids = fundId[fundName][0]
timeList = timeScale('2017-08-30')[:-3]
sql = "select distinct createDate from dayAddApi_flowdelayrateno where fundName='" + fundName + "'";
tmRest = pysql.dbInfoLocal(sql)
tmRest = tmRest.fillna(0)
tmwait = []
if not tmRest.empty:
tmwait = [str(x)[:10] for x in tmRest['createDate']]
for i in range(len(timeList)-1):
stTime = timeList[i]
edTime = timeList[i+1]
if stTime in tmwait:
continue
print u'逾期(新老)3天逾期率' + fundName + ' ' + stTime
#分新老首逾情况
#new
sql = """
select sum(a.repayMoney)
from loan a,loan_repaying b
where a.id=b.loanId and a.status=6 and b.compatibleStatus not in ('CANCEL') and b.productId not in (3,4,5,6,1001)
and b.termDate >= '{}' and b.termDate < '{}'
and a.userSid not in (select distinct userSid from loan_repaying where termDate < '{}')
and a.fundPayAccountId in ({})
""".format(stTime,edTime,stTime,ids)
data = pysql.dbInfo(sql)
data = data.fillna(0)
newRepaySum = data.values[0][0]
sql = """
select sum(a.repayMoney)
from loan a,loan_repaying b
where a.id=b.loanId and a.status=6 and b.compatibleStatus not in ('CANCEL') and b.productId not in (3,4,5,6,1001)
and b.termDate >= '{}' and b.termDate < '{}'
and if(b.repaidTime is NULL,TO_DAYS(now()) - TO_DAYS(b.termDate),TO_DAYS(b.repaidTime) - TO_DAYS(b.termDate)) <= 3
and a.userSid not in (
select distinct userSid from loan_repaying where termDate < '{}'
)
and a.fundPayAccountId in ({})
""".format(stTime,edTime,stTime,ids)
data = pysql.dbInfo(sql)
data = data.fillna(0)
newPaid = data.values[0][0]
newDelayRate = round((newRepaySum - newPaid)/newRepaySum*100,2)
#old
sql = """
select sum(a.repayMoney)
from loan a,loan_repaying b
where a.id=b.loanId and a.status=6 and b.compatibleStatus not in ('CANCEL') and b.productId not in (3,4,5,6,1001)
and b.termDate >= '{}' and b.termDate < '{}'
and a.userSid in (
select distinct userSid from loan_repaying where termDate < '{}'
)
and a.fundPayAccountId in ({})
""".format(stTime,edTime,stTime,ids)
data = pysql.dbInfo(sql)
data = data.fillna(0)
oldRepaySum = data.values[0][0]
sql = """
select sum(a.repayMoney)
from loan a,loan_repaying b
where a.id=b.loanId and a.status=6 and b.compatibleStatus not in ('CANCEL') and b.productId not in (3,4,5,6,1001)
and b.termDate >= '{}' and b.termDate < '{}'
and if(b.repaidTime is NULL,TO_DAYS(now()) - TO_DAYS(b.termDate),TO_DAYS(b.repaidTime) - TO_DAYS(b.termDate)) <= 3
and a.userSid in (
select distinct userSid from loan_repaying where termDate < '{}'
)
and a.fundPayAccountId in ({})
""".format(stTime,edTime,stTime,ids)
data = pysql.dbInfo(sql)
data = data.fillna(0)
oldPaid = data.values[0][0]
oldDelayRate = round((oldRepaySum - oldPaid)/oldRepaySum*100,2)
sql = """ insert into dayAddApi_flowdelayrateno(fundName,newRepaySum,newPaid,newDelayRate3,oldRepaySum,oldPaid,oldDelayRate3,createDate) values (%s,%s,%s,%s,%s,%s,%s,%s) """
dset = [(fundName,newRepaySum,newPaid,newDelayRate,oldRepaySum,oldPaid,oldDelayRate,stTime)]
status = pysql.insertData(sql,dset)
log.log(u'逾期3天(新老)数据更新状态-{}!({})(资金方{})!'.format(status,stTime,fundName),'info')
#贷款情况 根据资金方区分
def loanFund():
fundId = config.fundloanId
for fundName in fundId.keys():
ids = fundId[fundName][0]
timeList = timeScale('2017-08-30')
sql = "select distinct createDate from dayAddApi_flowloanfund where fundName='" + fundName + "'";
tmRest = pysql.dbInfoLocal(sql)
tmRest = tmRest.fillna(0)
tmwait = []
if not tmRest.empty:
tmwait = [str(x)[:10] for x in tmRest['createDate']]
for i in range(len(timeList)-1):
stTime = timeList[i]
edTime = timeList[i+1]
if stTime in tmwait:
continue
print u'贷款数据' + fundName + ' ' + stTime
sql = """
select sum(lendMoney) from loan
where status=6 and productId not in (3,4,5,6,1001)
and createdTime >= '{}' and createdTime < '{}'
and fundPayAccountId in ({})
""".format(stTime,edTime,ids)
data = pysql.dbInfo(sql)
data = data.fillna(0)
lendMoney = data.values[0][0]
sql = """ insert into dayAddApi_flowloanfund(fundName,sumMoney,createDate) values (%s,%s,%s) """
dset = [(fundName,lendMoney,stTime)]
status = pysql.insertData(sql,dset)
log.log(u'每日贷款数据更新状态-{}!({})(资金方{})!'.format(status,stTime,fundName),'info')
def main():
delayDayFund()
delayDayNOFund()
loanFund()
if __name__ == '__main__':
main()
| {"/RiskMonitor/apps/login/admin.py": ["/RiskMonitor/apps/login/models.py"]} |
73,532 | xueyeelong/Amonitor | refs/heads/master | /RiskMonitor/api/dayAddApi/urls.py | # -*- coding: utf-8 -*-
from django.conf.urls import url
from rest_framework.urlpatterns import format_suffix_patterns
import views
urlpatterns = [
url(r'^api/v1/day/$', views.indexhead_item),
]
urlpatterns = format_suffix_patterns(urlpatterns) | {"/RiskMonitor/apps/login/admin.py": ["/RiskMonitor/apps/login/models.py"]} |
73,533 | xueyeelong/Amonitor | refs/heads/master | /RiskMonitor/api/dayAddApi/migrations/0003_aeyedelayrate_aeyedelayrateno_aeyegetrate_aeyepassrate.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-11-15 06:34
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('dayAddApi', '0002_auto_20171113_1720'),
]
operations = [
migrations.CreateModel(
name='AeyeDelayRate',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('delayRate0', models.FloatField(default=0.0)),
('delayRate3', models.FloatField(default=0.0)),
('delayRate7', models.FloatField(default=0.0)),
('delayRate10', models.FloatField(default=0.0)),
('delayRate20', models.FloatField(default=0.0)),
('delayRateM1', models.FloatField(default=0.0)),
('delayRateM2', models.FloatField(default=0.0)),
('delayRateM3', models.FloatField(default=0.0)),
('createDate', models.DateField(default=django.utils.timezone.now)),
],
options={
'ordering': ('createDate',),
},
),
migrations.CreateModel(
name='AeyeDelayRateNO',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('newDelayRate3', models.FloatField(default=0.0)),
('newRepaySum', models.IntegerField(default=0)),
('newPaySum', models.IntegerField(default=0)),
('newPaid', models.IntegerField(default=0)),
('oldDelayRate3', models.FloatField(default=0.0)),
('oldRepaySum', models.IntegerField(default=0)),
('oldPaySum', models.IntegerField(default=0)),
('oldPaid', models.IntegerField(default=0)),
('createDate', models.DateField(default=django.utils.timezone.now)),
],
options={
'ordering': ('createDate',),
},
),
migrations.CreateModel(
name='AeyeGetRate',
fields=[
('tryNum', models.IntegerField(default=0)),
('sucNum', models.IntegerField(default=0)),
('sucRate', models.FloatField(default=0.0)),
('createDate', models.DateField(default=django.utils.timezone.now, primary_key=True, serialize=False)),
],
options={
'ordering': ('createDate',),
},
),
migrations.CreateModel(
name='AeyePassRate',
fields=[
('applyNum', models.IntegerField(default=0)),
('passNum', models.IntegerField(default=0)),
('passRate', models.FloatField(default=0.0)),
('createDate', models.DateField(default=django.utils.timezone.now, primary_key=True, serialize=False)),
],
options={
'ordering': ('createDate',),
},
),
]
| {"/RiskMonitor/apps/login/admin.py": ["/RiskMonitor/apps/login/models.py"]} |
73,534 | xueyeelong/Amonitor | refs/heads/master | /RiskMonitor/api/actualApi/urls.py | # -*- coding: utf-8 -*-
from django.conf.urls import url
from rest_framework.urlpatterns import format_suffix_patterns
import views
urlpatterns = [
url(r'^api/v1/actime/$', views.actime_item),
]
urlpatterns = format_suffix_patterns(urlpatterns) | {"/RiskMonitor/apps/login/admin.py": ["/RiskMonitor/apps/login/models.py"]} |
73,535 | xueyeelong/Amonitor | refs/heads/master | /RiskMonitor/apps/aeye/views.py | # -*- coding: utf-8 -*-
from django.shortcuts import render,render_to_response
from django.http import HttpResponse,HttpResponseRedirect
from django.template import RequestContext
from django.contrib.auth.decorators import permission_required
@permission_required('part_admin.riskPage')
def aeyePassRate_view(request):
return render(request,'aeye/aeyePassRate.html')
@permission_required('part_admin.riskPage')
def aeyeDelayDay_view(request):
return render(request,'aeye/aeyeDelayDay.html')
@permission_required('part_admin.riskPage')
def aeyeGetRate_view(request):
return render(request,'aeye/aeyeGetRate.html')
| {"/RiskMonitor/apps/login/admin.py": ["/RiskMonitor/apps/login/models.py"]} |
73,536 | xueyeelong/Amonitor | refs/heads/master | /dataCalculate/config.py | # -*- coding: utf-8 -*-
import datetime
import numpy
#日志地址
log_path = 'update.log'
#修正格式
def set_default(obj):
if isinstance(obj, datetime.datetime):
return obj.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(obj, int):
return round(float(obj),2)
elif isinstance(obj, numpy.int64):
return round(float(obj),2)
elif isinstance(obj, float):
return round(float(obj),2)
elif isinstance(obj, object):
return str(obj)
else:
return str(obj)
raise TypeError('%r is not JSON serializable' % obj)
#sex
sex_label = {
'1':'男',
'2':'女'
}
#age
age_label = {
'point':[-1,18,25,33,41,100],
'label':['18岁及以下','19-25岁','26-33岁','34-41岁','42岁及以上']
}
#local
local_label = {
'55566':u'四川',
'45463':u'广东',
'45465':u'湖北',
'45464':u'湖南',
'45462':u'广西',
'35362':u'福建',
'35365':u'江苏',
'35363':u'安徽',
'45466':u'河南',
'35361':u'江西',
'35360':u'山东',
'35364':u'浙江',
'15164':u'河北',
'15163':u'山西',
'55567':u'重庆',
'25266':u'辽宁',
'55564':u'云南',
'55565':u'贵州',
'65666':u'陕西',
'25264':u'黑龙江',
'25265':u'吉林',
'15162':u'内蒙古',
'65665':u'甘肃',
'45461':u'海南',
'35366':u'上海',
'65662':u'新疆',
'65663':u'宁夏',
'15165':u'天津',
'15166':u'北京',
'65664':u'青海',
'55563':u'西藏'
}
#loan_num
loan_label = {
'point':[-1,1,2,4,7,100],
'label':['1次','2次','3-4次','5-7次','8次以上']
}
#product
product = {
'1':u'及时雨',
'2':u'闪电贷',
'3':u'商品贷',
'4':u'现金分期',
'5':u'闪小贷',
'101':u'魔贷',
'102':u'五斗米',
'103':u'仟元贷',
'104':u'爱多贷',
# '105':u'王者钱贷',
'107':u'拿点花',
'108':u'付呗零用钱',
'109':u'有钱来了',
'111':u'速融超',
'112':u'钱好借',
'113':u'马上有钱',
'114':u'借袋钱',
'115':u'点点花',
}
#逾期率
delayRate = {
'label':['首逾率','逾期率3+','逾期率7+','逾期率10+','逾期率20+','逾期率M1','逾期率M2','逾期率M3'],
'trend':{
'times':['2017-09-19'],
'首逾率':0,
'逾期率3+':0,
'逾期率7+':0,
'逾期率10+':0,
'逾期率20+':0,
'逾期率M1':0,
'逾期率M2':0,
'逾期率M3':0
}
}
#催收人员
collectMember = {
'M1':{
'self': {
'2017041018074105e80eed8f884b97a1c550a148548c80':'朱启阳(geoff)',
'201705121519023e53977fd48f43a1a1d267da76bc2400':'董学浩(Wade)',
'201704121314437cc90d73101240438eeb7826f6937700':'王聿松(Lison)',
'20170803145229cfc305df5fe647eea281ec7fc538119c':'李金龙(Kevin)',
'2017080314544787e2b651e0f341ad8b7a409dc68ee24d':'李静萍(Morgana)',
'20170803145300378be2322ca24848948b93847673e7a5':'胡军(Zed)',
'20170523153116e295a60d5df2405c94e91363b91358ac':'何谦(Alisa)',
'20170901100439af50120e72f145fcb8c776fe14581336':'郑伟(Colin)',
'20170901100510e79414cfb09a45499a81019ad4159dec':'施弘祥(Aaron)',
'20170901100553a3689b61a6fb47f3b1a7c2a34d52efbb':'庹正寰(Wilson)',
'20170901100348c0bf4de584d14b028cc33e419e716ac3':'郭锐(Rick)',
'201709070924396645c21d7b144f11ab60d8673c5672cc':'阎苗苗(Nemo)',
'20170907093103e8e663499a3a49b0aed363bfc9ba3e73':'王振国(Apple)',
'20171005092012837194f3a00245b3ba4c30658c68a4e0':'胡红(Ruby)',
'20171005092151e5d2ef5b7927419182cbe211accfe144':'齐峰(Asa)',
'201710050922271a6cebe806a149d68daa6a2a7084b923':'石鹏飞(Bing)'
},
'changxin': {
'2017041213111369c58a9ac1e54c02908008462c0e15d8':'昌信01(changxin01)',
'201704121312417673248b382c4c3ead357d57e89c804d':'昌信02(changxin02)',
'201704121313001c752fce2bd94c0496fbd22791ec9c5f':'昌信03(changxin03)',
'201704121313268898c8a4ce7c4819a80ad6defd7164b9':'昌信04(changxin04)',
'2017041720014778ec6f83bd894f818f01b07b7629b313':'昌信05(changxin05)',
'2017041720023461ecf7effddf43b2b83676b9df5b8a49':'昌信06(changxin06)',
'20170429110239f727d36ac11946e897d65a3cefa141f1':'昌信07(changxin07)',
'20170531202949424d301638644fa1bbbfb1821c7e9ed4':'昌信08(changxin08)',
'20170531203022fe15181ee94244d7801d42ee2cb251f7':'昌信09(changxin09)',
'201705312030594efbffe8cf3b44dda07a1baefcdc52f3':'昌信10(changxin10)',
},
'pancheng': {
'20170901101507106f08a45dc94c1795c8f1fabb09b7cd':'磐辰01(pancheng01)',
'2017090110152347be1c3daec947c987f929b1161d0317':'磐辰02(pancheng02)',
'201709011015571271f59e02b7475abe04092b072c0842':'磐辰03(pancheng03)',
'201709011016128055b6c73dca4a63a47356591c808533':'磐辰04(pancheng04)',
'20170901101628e2bc0fb3e54641e086b7c9487519efc3':'磐辰05(pancheng05)',
'2017090110165043679a0b36e74600874b867c7e5cfc3c':'磐辰06(pancheng06)',
'20170907085950f7d634157f0b44a39f2c602d7b3328a6':'磐辰07(pancheng07)',
'20170907090026e937b37b73c14bb38a11ce0ae57fd708':'磐辰08(pancheng08)',
'20170907090050c42b27e0bbd6424f8a5e639778875904':'磐辰09(pancheng09)',
'201709070901102fa4a776e38d4231aaf7c3445ef3b5ee':'磐辰10(pancheng10)',
},
'yunchi': {
'201708171002468c027414059746c68d2e2238fe7d02b9':'云驰11(yunchi11)',
'2017081710030328c79c7045274273b2ef48a9f5d75ecb':'云驰12(yunchi12)',
'20170817100325cbcc8acc810942c48174f0d7ff61cd81':'云驰13(yunchi13)',
'20170817100344bb6145e6348046c09780e1a1ac616821':'云驰14(yunchi14)',
'201708171004023848b2ac1cd1492ab160bc5bd43eff4c':'云驰15(yunchi15)',
'20170817100422d8254350a2b44de5b7c1e80c52360859':'云驰16(yunchi16)',
'2017090709090197226378b7cf4d75b7a38eb9bd035a23':'云驰21(yunchi21)',
'201709070909206f5010620ff4494ea7aec0b5f9d9278f':'云驰22(yunchi22)',
'20170907090943371b396332af412b93d36c4135f5edb9':'云驰23(yunchi23)',
'20170907091009168e3837c5c141daa2a82f1233360512':'云驰24(yunchi24)',
},
},
'M2':{
'yunchi':{
'201704121146481799db69609e4cebb3982c00a3deadb2':'云驰01(yunchi01)',
'20170412115459e080f05aeb084bc3958345f6832108dc':'云驰02(yunchi02)',
'20170412115900c23a4d6ddd404684bbb0399e2dde4857':'云驰03(yunchi03)',
'2017041213071471ddbcd91b3d4bf28dd5a0a4e00e7d6f':'云驰04(yunchi04)',
'20170531203227f94fd74d0c734cd1a5ea2a57a9965f37':'云驰05(yunchi05)',
'20170531203259cd59c4f96c8e49a0baf92761e3ed1fda':'云驰06(yunchi06)',
'201705312033328c5eb9e0f9e74b64a7e4de5a5a4e73d9':'云驰07(yunchi07)',
'20170607182813c5b29facefad499f8222396887f78325':'云驰08(yunchi08)',
'201706071828497c758785f3a04fc5859e88ae7bf9db93':'云驰09(yunchi09)',
'20170607182912e00fb4b443774f6bbad73d55e60928e1':'云驰10(yunchi10)',
'20170901105504fd30fcfa105f4f28ae243f03218ab08c':'云驰17(yunchi17)',
'201709011055202449f5154bb74e4db992c984630b34f5':'云驰18(yunchi18)',
'20170901105543728941f485e5429b8941218b4b7c7f15':'云驰19(yunchi19)',
},
}
}
#资金方id
fundloanId = {
u'快乐达连连账户':["'10001'"],
u'纵横新创':["'10002','10004'"],
u'口袋理财':["'10003'"],
u'柚子理财':["'10005'"],
u'魔贷金融':["'10006','10010','10017'"],
u'小袋理财':["'10007'"],
u'爱多贷':["'10008'"],
# u'暴雷科技':["'10009'"],
u'拿点花':["'10011'"],
u'付呗零用钱':["'10012'"],
u'星火钱包':["'20002','20001'"],
u'钱好借':["'10016'"],
u'速融超':["'10015'"],
# u'小桥钱包':["'10014'"],
u'马上有钱':["'10018'"],
u'有钱来了':["'10013'"],
u'借袋钱':["'10019'"],
u'点点花':["'10020'"],
}
#c2c_menber
c2c_member = {
u'terry':["'1028'"],
u'gaffey':["'1029'"],
u'yujiahui':["'1030'"],
u'emily':["'1033'"],
u'ailsa':["'1036'"],
u'yulanda':["'1037'"],
u'haoshiduo':["'1040'"],
u'zhonglishi':["'1041'"],
u'vicky':["'1044'"]
}
| {"/RiskMonitor/apps/login/admin.py": ["/RiskMonitor/apps/login/models.py"]} |
73,537 | xueyeelong/Amonitor | refs/heads/master | /RiskMonitor/apps/userInfo/urls.py | # -*- coding: utf-8 -*-
from django.conf.urls import url
import views
urlpatterns = [
url(r'^userIncrease/$', views.userIncrease_view ,name='increase'),
url(r'^userSex/$', views.userInfoSex_view ,name='sex'),
url(r'^userAge/$', views.userInfoAge_view ,name='age'),
url(r'^userRest/$', views.userRest_view ,name='rest'),
url(r'^userRestAll/$', views.userRestAll_view ,name='restall'),
url(r'^userLocation/$', views.userInfoLocation_view ,name='location'),
]
| {"/RiskMonitor/apps/login/admin.py": ["/RiskMonitor/apps/login/models.py"]} |
73,538 | xueyeelong/Amonitor | refs/heads/master | /RiskMonitor/api/dayAddApi/migrations/0005_auto_20171121_1003.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-11-21 02:03
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('dayAddApi', '0004_auto_20171115_1703'),
]
operations = [
migrations.RemoveField(
model_name='flowdelayrateno',
name='newPaySum',
),
migrations.RemoveField(
model_name='flowdelayrateno',
name='oldPaySum',
),
]
| {"/RiskMonitor/apps/login/admin.py": ["/RiskMonitor/apps/login/models.py"]} |
73,539 | xueyeelong/Amonitor | refs/heads/master | /dataCalculate/userInfo/increase.py | # -*- coding: utf-8 -*-
#
import MySQLdb
import pandas as pd
import numpy
import json
import sys
import os
import datetime
import time
#添加路径
sys.path.insert(0,os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
from libfile import pysql
from libfile import logger
import config
#日志
log = logger.Logger('flow-increase',os.path.abspath(os.path.dirname(os.path.dirname(__file__))) + "/" + config.log_path)
#路径
def getTheFile(filename):
return os.path.abspath(os.path.dirname(__file__)) +"/"+filename
#数据包含时间
def timeScale(startTime = "2017-03-01"):
nowTime = datetime.date.today()
i = 0
timeList = []
while True:
endTime = str(nowTime-datetime.timedelta(days=i))
timeList.insert(0,endTime)
if endTime == startTime:
break
i = i + 1
return timeList
#数据增长
def userNum():
timeList = timeScale()
sql = 'select distinct createDate from dayAddApi_userincrease'
tmRest = pysql.dbInfoLocal(sql)
tmRest = tmRest.fillna(0)
tmwait = []
if not tmRest.empty:
tmwait = [str(x)[:10] for x in tmRest['createDate']]
for i in range(len(timeList)-1):
stTime = timeList[i]
edTime = timeList[i+1]
if stTime in tmwait:
continue
#人数增长
print '用户增长数据更新:' + stTime + '~' + edTime
#注册
sql = """
select sum(1) from user where date_created > '{}' and date_created < '{}'
""".format(stTime,edTime)
data = pysql.dbInfo(sql)
data = data.fillna(0)
registerNum = data.values[0][0]
#申请(新老)
sql = """
select count(distinct user_id) from ci_cash_apply_info
where create_time > '{}' and create_time < '{}'
and user_id not in (select distinct user_id from ci_cash_apply_info where create_time < '{}');
""".format(stTime,edTime,stTime)
data = pysql.dbInfo(sql)
data = data.fillna(0)
newApplyNum = data.values[0][0]
sql = """
select count(distinct user_id) from ci_cash_apply_info
where create_time > '{}' and create_time < '{}'
and user_id in (select distinct user_id from ci_cash_apply_info where create_time < '{}');
""".format(stTime,edTime,stTime)
data = pysql.dbInfo(sql)
data = data.fillna(0)
oldApplyNum = data.values[0][0]
#授信
sql = """
select count(distinct user_id) from ci_cash_apply_info where audit_date > '{}' and audit_date < '{}' and status in ('SUCCESS')
""".format(stTime,edTime)
data = pysql.dbInfo(sql)
data = data.fillna(0)
allowNum = data.values[0][0]
#数据插入
sql = """ insert into dayAddApi_userincrease(register,allow,newApply,oldApply,createDate) values (%s,%s,%s,%s,%s) """
dset = [(registerNum,allowNum,newApplyNum,oldApplyNum,stTime)]
status = pysql.insertData(sql,dset)
log.log('用户增长数据更新状态{}({})!'.format(status,stTime),'info')
def main():
userNum()
if __name__ == '__main__':
main()
| {"/RiskMonitor/apps/login/admin.py": ["/RiskMonitor/apps/login/models.py"]} |
73,540 | xueyeelong/Amonitor | refs/heads/master | /RiskMonitor/apps/flow/views.py | # -*- coding: utf-8 -*-
from django.shortcuts import render,render_to_response
from django.http import HttpResponse,HttpResponseRedirect
from django.template import RequestContext
from django.contrib.auth.decorators import login_required
from django.contrib.auth.decorators import permission_required
@permission_required('part_admin.businessPage')
def flowLoan_view(request):
return render(request,'flow/flowLoan.html')
@permission_required('part_admin.businessPage')
def flowRepayment_view(request):
return render(request,'flow/flowRepayment.html')
@permission_required('part_admin.businessPage')
def flowIncome_view(request):
return render(request,'flow/flowIncome.html')
@permission_required('part_admin.businessPage')
def flowDelayRateFund_view(request):
return render(request,'flow/flowDelayRateFund.html')
@permission_required('part_admin.businessPage')
def flowLoanFund_view(request):
return render(request,'flow/flowLoanFund.html')
@permission_required('part_admin.businessPage')
def flowC2C_view(request):
return render(request,'flow/flowc2c.html')
| {"/RiskMonitor/apps/login/admin.py": ["/RiskMonitor/apps/login/models.py"]} |
73,542 | mpascucci/AST-image-processing | refs/heads/master | /tests/benchmark/benchmark.py | # Lint as: python3
"""
Benchmarks improc performance against a golden set of known results.
See benchmark/README.md for more documentation.
"""
import astimp
from imageio import imread
import matplotlib.pyplot as plt
import numpy as np
from argparse import ArgumentParser
from benchmark_utils import *
from preprocess_img import *
from tqdm import tqdm
from multiprocessing import Pool, cpu_count
from functools import partial
STOP_DISPLAY = False
def run_improc_analysis(img_dir, img_filename, pickle_preproc, method):
"""
Runs improc analysis:
1. Preprocesses the image using 1 of 2 methods:
Fast option: If pickle_preproc is provided, reads preproccessing data
from pickle file.
Slow option: If pickle_preproc is not provided, reads the image and
runs preprocessing.
2. Finds inhibition zone diameters
3.
1. list of ImprocAtbResult. This represents all the antibiotics that
improc found in the image.
2. Cropped AST image.
Args:
img_filename: Name of the AST image. This will be used to read either
the original image or $preproc_dir/
pickle_preproc: ndarray. pre-cropped AST image.
"""
if pickle_preproc:
img_preproc = pickle_preproc[img_filename]
else:
img_path = os.path.join(img_dir, img_filename)
img_preproc = preprocess_one_image(img_path)
circles = img_preproc['circles']
labels = img_preproc['labels']
preproc = img_preproc['preproc']
if not circles or not preproc:
return [], img_preproc
#* VOTE-COUNT
if method == "vote-count":
inhibs = astimp.measureDiameters(preproc)
# uncomment this to switch back to the python version of the measuring function
# inhibs = [inhib_diameter_modes.count.measureOneDiameter(preproc, i) for i in range(len(circles))]
#* MIN-MAX
elif method == "minmax":
inhibs = [astimp.measureOneDiameterMinMax(preproc, i) for i in range(len(circles))]
#* STUDENT
elif method == "student":
from inhib_diameter_modes.sudent import measureOneDiameter
inhibs = []
for idx in range(len(circles)):
inhibs.append(measureOneDiameter(preproc,idx))
else:
raise ValueError("{} is not a valid method".format(method))
results = [ImprocAtbResult(c, l, i)
for c, l, i in zip(circles, labels, inhibs)]
return results, img_preproc['crop']
def find_diam_diffs_for_atb(atb_results, expected_diams):
"""
For an single antibiotic within an AST analysis, finds the differences
between expected inhibition zone diameters and the inhibition zone
diameters returned by improc.
This method returns empty if the number of pellets found for the antibiotic
differs between improc's results and the annotation (which indicates that
a pellet was mislabeled.)
For example, if:
atb_results = [ImprocAtbResult(diam=4.0), ImprocAtbResult(diam=5.6)]
expected_diams = [4.5, 6.0]
then:
returns: [0.5, 0.4]
atb_results' expected_diams updated to 4.5 and 6.0
Returns:
List of diffs between actual and expected diameters. (actual - expected)"""
if len(atb_results) != len(expected_diams):
# If the number of pellets found for the antibiotic is wrong, then some
# of the pellets must be mislabeled. We don't know which diameters to
# compare, so just return empty. This antibiotic will not factor into
# diameter measurement.
return []
diam_diffs = []
expected_diams = sorted(expected_diams)
sorted_results = sorted(atb_results, key=lambda result: result.inhib_diam)
for result, expected in zip(sorted_results, expected_diams):
result.expected_diam = expected
diam_diffs.append(result.inhib_diam - expected)
return diam_diffs
def find_diam_diffs_for_ast(improc_results, expected_atbs):
"""
For an entire AST analysis, finds the minimal differences between expected
inhibition zone diameters and the inhibition zone diameters returned by improc.
This method sets each improc_result.expected_diam if it is matched with an expected
diameter.
For example, if:
expected_atbs = {'ATB1': [13.1, 9.7], 'ATB2':[5.0], 'ATB3': [6.0]}
improc_results = [
ImprocAtbResult(label=ATB1, inhib_diam=10.0, expected_diam=None),
ImprocAtbResult(label=ATB1, inhib_diam=12.5, expected_diam=None),
ImprocAtbResult(label=ATB2, inhib_diam=4.3, expected_diam=None),
ImprocAtbResult(label=ATB3, inhib_diam=5.0, expected_diam=None)
]
Then:
returns: [
0.6, # ATB1: 13.1 - 12.5
-0.8, # ATB1: 9.7 - 10.0
0.7, # ATB2: 5.0 - 4.3
1.0 # ATB3: 6.0 - 5.0.
]
expected_atbs is updated to: {'ATB1': [13.2] }
improc_results is updated to:
improc_results = [
ImprocAtbResult(label=ATB1, inhib_diam=10.0, expected_diam=9.7),
ImprocAtbResult(label=ATB2, inhib_diam=4.5, expected_diam=-0.5),
ImprocAtbResult(label=ATB3, inhib_diam=4.3, expected_diam=-1.7),
ImprocAtbResult(label=ATB3, 1inhib_diam=0.0, expected_diam=None)
]
Args:
improc_results: list(ImprocAtbResult).
expected_atbs: bool. Whether to display an image of each AST.
Returns:
List of diffs between actual and expected diameters.
"""
diam_diffs = []
improc_results_per_atb = defaultdict(list)
for result in improc_results:
improc_results_per_atb[result.label].append(result)
# For each antibiotic that improc found, attempt to match it up with
# the closest expected antibiotic with the same label.
for improc_label, atb_results in improc_results_per_atb.items():
expected_diams = expected_atbs[improc_label]
diam_diffs += find_diam_diffs_for_atb(atb_results, expected_diams)
return diam_diffs
def run_one_benchmark(item, img_dir, results, preproc_results, method, use_multiproc=True):
filename, annotation = item
if not use_multiproc: tqdm.write("processing file: {}".format(filename))
try:
improc_results, ast_image = run_improc_analysis(
img_dir, filename, preproc_results, method)
except FileNotFoundError as e:
if not use_multiproc: tqdm.write("File not found: {}".format(e))
return (None,None,None)
except ErrorInPreproc as e:
if not use_multiproc: tqdm.write("Improc threw expection on file {} {}".format(filename, e))
results.record_exception()
return (None,None,None)
except Exception as e:
# raise ## UNCOMMENT FOR DEBUG
if not use_multiproc: tqdm.write("Error {} {}".format(filename, e))
return (None,None,None)
if use_multiproc:
# makes benchmark run faster in multiprocess mode
ast_image = None
return improc_results, annotation, ast_image
def run_benchmark(config, display, img_dir, preproc_dir, jobs=1, method="vote-count", show_result=False):
"""
Runs AST benchmark as described at the top of this file.
Args:
config: Config data derived from benchmark_golden_results.yml,
parsed by parse_and_validate_config().
display: bool. Whether to display an image of each AST.
If true, img_dir is required, even if preproc_dir is provided.
Provide one of:
image_dir: Directory containing AST images. May be None if
display=false and preproc_dir != None.
preproc_dir: Directory containing preprocessing results
generated by preproc.py.
jobs : number of jobs for multiprocessing
"""
# Counters to accumulate metrics across all AST files.
results = BenchmarkResult()
preproc_results = None
if preproc_dir:
preproc_results = PreprocResults(preproc_dir)
# Process each file individually, incrementing the counters above.
f = partial(run_one_benchmark, img_dir=img_dir,
results=results,preproc_results=preproc_results,method=method)
if jobs > 1 and not display:
# use multiprocessing
print("Running in parallel on {} processes".format(jobs))
with Pool(jobs) as p:
out = list(tqdm(p.imap(f,config.items()), total=len(config.items())))
for improc_results, annotation, _ in out:
if improc_results is None and annotation is None:
continue
results.record_ast_result(improc_results, annotation)
results.record_diam_diffs(
find_diam_diffs_for_ast(improc_results, annotation))
else:
# use only one process
for item in tqdm(config.items()):
filename, annotation = item
improc_results, annotation, ast_image = f(item, use_multiproc=False)
if improc_results is None and annotation is None:
continue
results.record_ast_result(improc_results, annotation)
diffs = find_diam_diffs_for_ast(improc_results, annotation)
results.record_diam_diffs(diffs)
if display:
plot_ast_result(improc_results, ast_image, filename, annotation)
if STOP_DISPLAY:
break
if show_result:
results.show_results()
return results.diam_diffs
def plot_atb_result(result, plt, ax, scale):
""" Plots the label and inhibition zone for a single antibiotic. """
center = result.pellet.center
d_inhib_mm = round(result.inhib_diam, 2)
r_inhib_px = result.inhib_diam/scale/2
text_offset = result.pellet.radius + 10
text_loc = np.array(center)+[text_offset, text_offset]
s = f"{result.label}\nd={d_inhib_mm}"
box_color = 'green' if result.expected_diam else 'red'
bbox = dict(boxstyle="square", ec=box_color, fc=box_color, alpha=0.4)
text = plt.Text(*text_loc, s, color='k', bbox=bbox)
ax.add_artist(text)
ax.add_artist(plt.Circle(center, r_inhib_px, ec='r', fc='none', ls='--'))
if result.expected_diam:
expected_inhib_px = result.expected_diam/scale/2
ax.add_artist(plt.Circle(
center, expected_inhib_px, ec='lightgreen', fc='none', ls='--'))
def key_press_callback(event):
# print('press', event.key)
# sys.stdout.flush()
global STOP_DISPLAY
if event.key == 'escape':
STOP_DISPLAY = True
plt.close()
def plot_ast_result(results, image, filename, annotation):
"""
Displays the original AST image, with improc's found antibiotic labels,
inhibition zone diameters, and golden inhibition zone diameters if found.
Args:
results: List of ImprocAtbResult to be overlaid on image.
image: ndarray, AST image to display.
filename: String, name of the image file being processed. Used to add a
title to the display.
missing_expected_atbs: list of strings. Will be added to a subtitle
informing the user of which antibiotics were expected, but improc did
not find them.
"""
fig = plt.figure(figsize=(7, 7))
fig.canvas.mpl_connect('key_press_event', key_press_callback)
plt.imshow(image)
plt.title(filename)
plt.suptitle("press any key to continue, ESC to stop.")
ax = plt.gca()
scale = astimp.get_mm_per_px([result.pellet for result in results])
for result in results:
plot_atb_result(result, plt, ax, scale)
missing_expected_atbs = set(annotation.keys()) - set([r.label for r in results])
if len(missing_expected_atbs) > 0:
missing_atb_strs = []
for label in missing_expected_atbs:
diams = annotation[label]
missing_atb_strs += ["%s:%s" % (label, diam) for diam in diams]
subtitle = "These antibiotics were expected, but not found: %s" % \
", ".join(missing_atb_strs)
plt.annotate(subtitle, (0, 0), (0, -20), xycoords='axes fraction',
textcoords='offset points', wrap=True)
plt.plot()
red_line = plt.Line2D(range(1), range(1), color="r", linewidth=0,
marker='o', markersize=15, markerfacecolor="none")
green_line = plt.Line2D(range(1), range(1), color="lightgreen",
linewidth=0, marker='o', markersize=15,
markerfacecolor="none")
plt.legend((red_line, green_line),
('Improc inhib diam', 'Expected inhib diam'), numpoints=1,
framealpha=0.6, bbox_to_anchor=(0, 0, 0.1, 0.2),
loc="lower left", borderaxespad=0, ncol=2)
plt.axis('off')
plt.show()
def main():
parser = ArgumentParser()
parser.add_argument("-d", "--display", dest="display", action='store_true',
help="Display each AST processing result.")
parser.add_argument("-c", "--config_file", dest="config_file",
default="annotations/amman/amman_golden.yml",
help="Path to the file containing expected AST results.")
parser.add_argument("-i", "--image_dir", dest="image_dir", action='store',
default="images/",
help="""Path of the directory containing AST images.
At least 1 of image_dir or preproc_dir is
required.""")
parser.add_argument("-p", "--preproc_dir", dest="preproc_dir", action='store',
default=None,
help="""Path of the directory containing pickle files
generated by preproc.py. If provided, the
script will not run preprocessing, and will run faster.
If not provided, the script will preprocess each image,
and will run slower. At least 1 of image_dir or
preproc_dir is required.""")
parser.add_argument("-m", "--method", dest="method", action='store',
help="Diameter measuring method.", default="vote-count")
parser.add_argument("-j", "--jobs-number", dest="jobs_number", action='store',
default=cpu_count(),
help="""The number of parallel processes to run for the benchmark.""")
args = parser.parse_args()
jobs = int(args.jobs_number)
config = parse_and_validate_config(args.config_file)
print("############## ASTApp benchmarking starting on %d files\n" % len(config))
run_benchmark(config, args.display, args.image_dir, args.preproc_dir, jobs=jobs, method=args.method, show_result=True)
if __name__ == '__main__':
main()
| {"/tests/benchmark/benchmark_tools/usecases/amman/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/creteil/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/whonet_nada/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py", "/tests/benchmark/benchmark_tools/atb_names/__init__.py"], "/tests/benchmark/inhib_diameter_modes/op/op.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/inhib_diameter_modes/count/__init__.py": ["/tests/benchmark/inhib_diameter_modes/count/count.py"], "/tests/benchmark/preprocess_img/__init__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py"], "/python-module/astimp_tools/datamodels.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/nlclsq/__init__.py": ["/tests/benchmark/inhib_diameter_modes/nlclsq/nlclsq.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/__init__.py": ["/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py"], "/tests/benchmark/inhib_diameter_modes/student/__init__.py": ["/tests/benchmark/inhib_diameter_modes/student/student.py"], "/python-module/astimp_tools/artist.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/op/__init__.py": ["/tests/benchmark/inhib_diameter_modes/op/op.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/preprocess_img/__main__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/interfaces/__init__.py"]} |
73,543 | mpascucci/AST-image-processing | refs/heads/master | /tests/benchmark/benchmark_tools/usecases/amman/__init__.py | # coding: utf8
# benchmark_tools.amman
# Copyright 2019 Fondation Medecins Sans Frontières https://fondation.msf.fr/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file is part of the ASTapp image processing library
# Author: Ellen Sebastian
from ...interfaces import *
from collections import Mapping
from os import path
import pandas as pd
class AST_annotation_Amman(AST_annotation):
"""Class to access one line (an AST) of the annotation files of amman.
This class behaves like a dictionary which keys are extended antibiotic names.
"""
def __init__(self, guiID, df_slice):
self.ast_id = guiID
self.species = df_slice['Organism']
self.expert_system_status = []
self.sample_date = df_slice['Specimen date']
self.atb_names = df_slice.index[4:].tolist()
self.sir_values = None
self.raw_sir_values = None
self.diameters = [float(i) for i in df_slice[4:].tolist()]
self.sample_type = df_slice['Specimen type']
class Annotations_set_Amman(Annotations_set):
"""Utility class for reading and accessing AST annotations from Amman dataset.
It behaves like a dictionary which keys are the guiID (id of the pictures)"""
def __init__(self, annotations_file):
self.file = annotations_file
self.diam_df = Annotations_set_Amman.read_annotation_file(
self.file)
self.atb_names = self.diam_df.keys()[4:]
self.ast_ids = list(self.diam_df.index)
@staticmethod
def read_annotation_file(path):
df = pd.read_csv(path, sep=',', index_col="AST Image")
return df
def get_ast(self, guiID):
try:
df_slice = self.diam_df.loc[guiID]
except:
raise KeyError("ID not found")
return AST_annotation_Amman(guiID, df_slice)
def get_ast_slice(self, slice_instance):
out = []
start = slice_instance.start
stop = slice_instance.stop
step = slice_instance.step if (slice_instance.step is not None) else 1
for i in range(start, stop, step):
if i in self.ast_ids:
out.append(self.get_ast(i))
return out
| {"/tests/benchmark/benchmark_tools/usecases/amman/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/creteil/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/whonet_nada/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py", "/tests/benchmark/benchmark_tools/atb_names/__init__.py"], "/tests/benchmark/inhib_diameter_modes/op/op.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/inhib_diameter_modes/count/__init__.py": ["/tests/benchmark/inhib_diameter_modes/count/count.py"], "/tests/benchmark/preprocess_img/__init__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py"], "/python-module/astimp_tools/datamodels.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/nlclsq/__init__.py": ["/tests/benchmark/inhib_diameter_modes/nlclsq/nlclsq.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/__init__.py": ["/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py"], "/tests/benchmark/inhib_diameter_modes/student/__init__.py": ["/tests/benchmark/inhib_diameter_modes/student/student.py"], "/python-module/astimp_tools/artist.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/op/__init__.py": ["/tests/benchmark/inhib_diameter_modes/op/op.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/preprocess_img/__main__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/interfaces/__init__.py"]} |
73,544 | mpascucci/AST-image-processing | refs/heads/master | /tests/benchmark/benchmark_tools/usecases/creteil/__init__.py | # benchmark_tools.creteil
# Copyright 2019 Fondation Medecins Sans Frontières https://fondation.msf.fr/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file is part of the ASTapp image processing library
# Author: Marco Pascucci
from ...interfaces import *
from collections import Mapping
from os import path
import pandas as pd
class AST_annotation_Creteil(AST_annotation):
"""Class to access one line (an AST) of the annotation files of Creteil.
This class behaves like a dictionary which keys are extended antibiotic names.
"""
def __init__(self, guiID, diams, sirs):
meta = sirs[0:3]
tests = diams[4:9]
species = sirs[3]
diameters = diams[9:]
sir_values = sirs[9:]
atb_names = diams.index[9:]
self.ast_id = guiID
self.species = species
self.expert_system_status = []
for k in tests.index:
value = tests[k]
if not pd.isna(value):
self.expert_system_status.append(
{'name': k, 'value': value, 'input': False})
self.sample_date = meta[2]
self.atb_names = atb_names.tolist()
self.sir_values = sir_values.tolist()
self.raw_sir_values = None
self.diameters = diameters.tolist()
self.sample_type = None
class Annotations_set_Creteil(Annotations_set):
"""Help accessing and combining SIR annotation results.
It beheaves like a dictionary which keys are the guiID (id of the pictures)"""
def __init__(self, annotation_folder):
self.files = {
"diam": path.join(annotation_folder, "annotations_diam.csv"),
"sir": path.join(annotation_folder, "annotations_SIR.csv")
}
self.diam_df = Annotations_set_Creteil.read_annotation_file(
self.files["diam"])
self.sir_df = Annotations_set_Creteil.read_annotation_file(
self.files["sir"])
assert len(self.diam_df) == len(self.sir_df)
self.atb_names = self.diam_df.keys()[9:]
self.ast_ids = list(self.diam_df.index)
@staticmethod
def read_annotation_file(path):
df = pd.read_csv(path, sep=';', index_col="guiID")
return df
def get_ast(self, guiID):
try:
diams = self.diam_df.loc[guiID]
sirs = self.sir_df.loc[guiID]
except:
raise KeyError("ID not found")
# assert diams["Numéro de demande"] == SIRs["Numéro de demande"], "different 'Numéro de demande'"
# assert all(diams[4:9] == SIRs[4:9]), "different tests value between SIR and Diams files"
return AST_annotation_Creteil(guiID, diams, sirs)
def get_ast_slice(self, slice_instance):
out = []
start = slice_instance.start
stop = slice_instance.stop
step = slice_instance.step if (slice_instance.step is not None) else 1
for i in range(start, stop, step):
if i in self.ast_ids:
out.append(self.get_ast(i))
return out
def get_annotations_set(annotation_folder):
return Annotations_set_Creteil(annotation_folder)
| {"/tests/benchmark/benchmark_tools/usecases/amman/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/creteil/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/whonet_nada/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py", "/tests/benchmark/benchmark_tools/atb_names/__init__.py"], "/tests/benchmark/inhib_diameter_modes/op/op.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/inhib_diameter_modes/count/__init__.py": ["/tests/benchmark/inhib_diameter_modes/count/count.py"], "/tests/benchmark/preprocess_img/__init__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py"], "/python-module/astimp_tools/datamodels.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/nlclsq/__init__.py": ["/tests/benchmark/inhib_diameter_modes/nlclsq/nlclsq.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/__init__.py": ["/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py"], "/tests/benchmark/inhib_diameter_modes/student/__init__.py": ["/tests/benchmark/inhib_diameter_modes/student/student.py"], "/python-module/astimp_tools/artist.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/op/__init__.py": ["/tests/benchmark/inhib_diameter_modes/op/op.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/preprocess_img/__main__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/interfaces/__init__.py"]} |
73,545 | mpascucci/AST-image-processing | refs/heads/master | /tests/benchmark/benchmark_tools/usecases/whonet_nada/__init__.py | # benchmark_tools.whonet_nada
# Copyright 2019 Fondation Medecins Sans Frontières https://fondation.msf.fr/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file is part of the ASTapp image processing library
from ...interfaces import *
from ...atb_names import *
from collections import Mapping
from os import path
import pandas as pd
import datetime
class AST_annotation_WhonetNada_es_test(AST_annotation):
"""Class to access one line (an AST) of the annotation files of Creteil.
This class behaves like a dictionary which keys are extended antibiotic names.
"""
def __init__(self, guiID, diams, sirs):
meta = []
tests = []
species = sirs[4]
diameters = diams[6:]
raw_sir_values = sirs[6:]
atb_names = diams.index[6:]
sample_date = diams[2]
sample_type = diams[3]
sample_date = datetime.datetime.strptime(
str(sample_date), '%Y-%m-%d %H:%M:%S').strftime('%d/%m/%Y')
# Organism
species_dict = {
"eco": "Escherichia coli",
"sau": "Staphylococcus aureus",
"kpn": "Klebsiella pneumoniae",
}
species = species_dict.get(species, species)
# Specimen type
sample_dict = {
"sa": "BLOOD",
"ur": "URINE",
"lc": "CRM",
"ab": None, # abcès or pus ?
"bl": "BLOOD",
"sb": None,
"bo": "BONE",
"ti": "TISSUE",
"ps": None,
"fb": None
}
sample_type = sample_dict.get(sample_type, None)
# Add an offset to match original excel line number (because of headers and index start at 0)
self.ast_id = guiID + 2
self.species = species
self.expert_system_status = []
self.sample_date = sample_date
self.atb_names = list(map(
lambda whonet_code: self.getI2Ashortname(whonet_code),
atb_names.tolist()
))
self.sir_values = None
self.raw_sir_values = raw_sir_values.tolist()
self.diameters = diameters.tolist()
self.sample_type = sample_type
def getI2Ashortname(self, whonet_code):
try:
return i2a.whonet_code2short(whonet_code)
except:
return whonet_code
class Annotations_set_WhonetNada_es_test(Annotations_set):
"""Help accessing and combining SIR annotation results.
It beheaves like a dictionary which keys are the guiID (id of the pictures)"""
def __init__(self, annotation_folder):
self.files = {
"sir_and_diam": path.join(annotation_folder, "SRI_and_diameter_amman.xlsx"),
}
self.xl = Annotations_set_WhonetNada_es_test.read_annotation_file(
self.files["sir_and_diam"])
self.diam_df = pd.read_excel(self.xl, 'diametre')
self.sir_df = pd.read_excel(self.xl, 'SRI')
assert len(self.diam_df) == len(self.sir_df)
self.atb_names = self.diam_df.keys()[6:]
self.ast_ids = list(self.diam_df.index)
@staticmethod
def read_annotation_file(path):
xl = pd.ExcelFile(path)
return xl
def get_ast(self, numero):
try:
diams = self.diam_df.loc[numero]
sirs = self.sir_df.loc[numero]
except:
raise KeyError("ID not found")
return AST_annotation_WhonetNada_es_test(numero, diams, sirs)
def get_ast_slice(self, slice_instance):
out = []
start = slice_instance.start
stop = slice_instance.stop
step = slice_instance.step if (slice_instance.step is not None) else 1
for i in range(start, stop, step):
if i in self.ast_ids:
out.append(self.get_ast(i))
return out
def get_annotations_set(annotation_folder):
return Annotations_set_WhonetNada_es_test(annotation_folder)
| {"/tests/benchmark/benchmark_tools/usecases/amman/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/creteil/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/whonet_nada/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py", "/tests/benchmark/benchmark_tools/atb_names/__init__.py"], "/tests/benchmark/inhib_diameter_modes/op/op.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/inhib_diameter_modes/count/__init__.py": ["/tests/benchmark/inhib_diameter_modes/count/count.py"], "/tests/benchmark/preprocess_img/__init__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py"], "/python-module/astimp_tools/datamodels.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/nlclsq/__init__.py": ["/tests/benchmark/inhib_diameter_modes/nlclsq/nlclsq.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/__init__.py": ["/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py"], "/tests/benchmark/inhib_diameter_modes/student/__init__.py": ["/tests/benchmark/inhib_diameter_modes/student/student.py"], "/python-module/astimp_tools/artist.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/op/__init__.py": ["/tests/benchmark/inhib_diameter_modes/op/op.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/preprocess_img/__main__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/interfaces/__init__.py"]} |
73,546 | mpascucci/AST-image-processing | refs/heads/master | /python-module/test.py | # Copyright 2019 The ASTapp Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# <http://www.apache.org/licenses/LICENSE-2.0>
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Marco Pascucci <marpas.paris@gmail.com>.
import sys, glob, os, time
from imageio import imread, imwrite
import matplotlib.pyplot as plt
import numpy as np
from importlib import reload
import time
log = sys.stdout.write
##################
# import astimp
try:
import opencv_mat, astimp
except ImportError:
extra_modules="/Users/marco/Dev/ASTapp/cpy_improc/"
sys.path.append(extra_modules)
import opencv_mat, astimp
log("astimp not installed, importing from specific folder")
from contextlib import contextmanager
@contextmanager
def logged_action(before, after='done'):
start = time.time()
try:
log(before + '...')
yield
except:
raise Exception("An error occurred in: "+before)
finally:
end = time.time()
log("{}, [{:.2f}s]\n".format(after, end-start))
img_path = "../tests/images/test0.jpg"
#### Test Exception
# astimp.throw_custom_exception("test")
with logged_action("read image"):
im_np = np.array(imread(img_path))
with logged_action("AST object"):
ast = astimp.AST(im_np)
for i in range(len(ast.circles)):
print(ast.labels[i].text, ast.inhibitions[i].diameter)
with logged_action("crop Petri dish"):
crop = astimp.cropPetriDish(im_np)
with logged_action("find pellets"):
circles = astimp.find_atb_pellets(crop)
pellets = [astimp.cutOnePelletInImage(crop, circle) for circle in circles]
with logged_action("standardize pellet"):
astimp.standardizePelletImage(pellets[0][:,:,0])
with logged_action("read labels"):
labels = [astimp.getOnePelletText(pellet) for pellet in pellets]
with logged_action("preprocessing"):
preproc = astimp.inhib_diam_preprocessing(crop, circles)
with logged_action("measure diameters"):
disks = astimp.measureDiameters(preproc)
# print()
# for disk in disks:
# print(disk.diameter, disk.confidence)
| {"/tests/benchmark/benchmark_tools/usecases/amman/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/creteil/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/whonet_nada/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py", "/tests/benchmark/benchmark_tools/atb_names/__init__.py"], "/tests/benchmark/inhib_diameter_modes/op/op.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/inhib_diameter_modes/count/__init__.py": ["/tests/benchmark/inhib_diameter_modes/count/count.py"], "/tests/benchmark/preprocess_img/__init__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py"], "/python-module/astimp_tools/datamodels.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/nlclsq/__init__.py": ["/tests/benchmark/inhib_diameter_modes/nlclsq/nlclsq.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/__init__.py": ["/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py"], "/tests/benchmark/inhib_diameter_modes/student/__init__.py": ["/tests/benchmark/inhib_diameter_modes/student/student.py"], "/python-module/astimp_tools/artist.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/op/__init__.py": ["/tests/benchmark/inhib_diameter_modes/op/op.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/preprocess_img/__main__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/interfaces/__init__.py"]} |
73,547 | mpascucci/AST-image-processing | refs/heads/master | /pellet_labels/trainer/pellet_list.py | PELLET_LIST = [
'AK 30', 'AM 2', 'AMC 30', 'AMP 10', 'AMP 2', 'ATM 30', 'AUG 30',
'AX 20', 'C 30', 'CAZ 10', 'CD 2', 'CFM 5', 'CFR 30', 'CIP 5',
'CN 10', 'CN 30', 'CN 500', 'CRO 30', 'CT 50', 'CTX 5', 'E 15',
'ERY 15', 'ETP 10', 'FC 10', 'F 100', 'FEC 40', 'FEP 30', 'FF 200',
'FOX 30', 'IMI 10', 'IPM 10', 'L 15', 'LEV 5', 'LNZ 10', 'LVX 5',
'MEC 10', 'MEM 10', 'MRP 10', 'MXF 5', 'NA 30', 'NET 10', 'NOR 10',
'OX 1', 'P 1', 'PRL 30', 'PT 15', 'RA 5', 'RD 5', 'S 300',
'SXT 25', 'TC 75', 'TEC 30', 'TEM 30', 'TET 30', 'TGC 15', 'TIC 75',
'TIM 85', 'TOB 10', 'TPZ 36', 'TTC 85', 'TZP 36', 'VA 30', 'VA 5']
REMOVED_CLASSES = ['AK 30', 'RD 5', 'FEC 40', 'FC 10', 'TGC 15'] | {"/tests/benchmark/benchmark_tools/usecases/amman/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/creteil/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/whonet_nada/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py", "/tests/benchmark/benchmark_tools/atb_names/__init__.py"], "/tests/benchmark/inhib_diameter_modes/op/op.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/inhib_diameter_modes/count/__init__.py": ["/tests/benchmark/inhib_diameter_modes/count/count.py"], "/tests/benchmark/preprocess_img/__init__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py"], "/python-module/astimp_tools/datamodels.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/nlclsq/__init__.py": ["/tests/benchmark/inhib_diameter_modes/nlclsq/nlclsq.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/__init__.py": ["/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py"], "/tests/benchmark/inhib_diameter_modes/student/__init__.py": ["/tests/benchmark/inhib_diameter_modes/student/student.py"], "/python-module/astimp_tools/artist.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/op/__init__.py": ["/tests/benchmark/inhib_diameter_modes/op/op.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/preprocess_img/__main__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/interfaces/__init__.py"]} |
73,548 | mpascucci/AST-image-processing | refs/heads/master | /pellet_labels/eval_ensemble.py | import argparse
import os
import pickle
import numpy as np
import tensorflow as tf
import pandas as pd
from trainer import model
from trainer.pellet_list import PELLET_LIST, REMOVED_CLASSES
from trainer import task
from util import gcs_util as util
WORKING_DIR = os.getcwd()
MODEL_FOLDER = 'uncertainty_pellet_labels_model'
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--job-dir',
type=str,
required=True,
help='GCS location to the ensemble models')
parser.add_argument(
'--train-files',
type=str,
required=True,
nargs='*',
help='Dataset training file local or GCS')
parser.add_argument(
'--destination-file',
type=str,
required=True,
default='uncertainty_data.pickle',
help='File name to write uncertainty data to')
parser.add_argument(
'--n-ensemble',
type=int,
default=10,
help='Number of ensemble models that were trained')
parser.add_argument(
'--img-size',
type=int,
default=64,
help='square size to resize input images to in pixel, default=64')
parser.add_argument(
'--threshold',
type=str,
default='std_dev',
choices=['std_dev', 'max_p', 'entropy'],
help='which type of threshold to use to calculate uncertainty_data')
args, _ = parser.parse_known_args()
return args
class Evaluator():
def __init__(self, threshold):
self.threshold = threshold
STRATEGIES = {
'entropy': self._evaluate_entropy,
'max_p': self._evaluate_max_p,
'std_dev': self._evaluate_std_dev
}
self.evaluate = STRATEGIES[threshold]
def _evaluate_entropy(self, valid_predictions, ukn_predictions):
uncertainty_data = []
valid_predictions = np.mean(valid_predictions, axis=0)
ukn_predictions = np.mean(ukn_predictions, axis=0)
valid_entropy = compute_entropies(valid_predictions)
ukn_entropy = compute_entropies(ukn_predictions)
for t in np.linspace(0., np.log(10), 1000, endpoint=True):
in_set_acc = len(valid_entropy[valid_entropy <= t]) / len(valid_entropy)
out_set_mis = len(ukn_entropy[ukn_entropy <= t]) / len(ukn_entropy)
uncertainty_data.append([t, in_set_acc, out_set_mis])
return uncertainty_data
def _evaluate_max_p(self, valid_predictions, ukn_predictions):
uncertainty_data = []
valid_predictions = np.mean(valid_predictions, axis=0)
ukn_predictions = np.mean(ukn_predictions, axis=0)
valid_confidence = np.amax(valid_predictions, axis=-1)
ukn_confidence = np.amax(ukn_predictions, axis=-1)
for t in np.arange(0.5, 1, 0.001):
in_set_acc = len(valid_confidence[valid_confidence >= t]) / len(valid_confidence)
out_set_mis = len(ukn_confidence[ukn_confidence >= t]) / len(ukn_confidence)
uncertainty_data.append([t, in_set_acc, out_set_mis])
return uncertainty_data
def _evaluate_std_dev(self, valid_predictions, ukn_predictions):
uncertainty_data = []
valid_deviations = compute_deviations(valid_predictions)
ukn_deviations = compute_deviations(ukn_predictions)
for t in np.arange(0.0001, 0.1, 0.0001):
in_set_acc = len(valid_deviations[valid_deviations <= t]) / len(valid_deviations)
out_set_mis = len(ukn_deviations[ukn_deviations <= t]) / len(ukn_deviations)
uncertainty_data.append([t, in_set_acc, out_set_mis])
return uncertainty_data
def compute_entropies(predictions):
# For a reference on distribution entropy, see [1]
# [1]: https://peltarion.com/knowledge-center/documentation/modeling-view/build-an-ai-model/loss-functions/categorical-crossentropy
return -np.sum(np.log(predictions + 1e-10) * predictions, axis=-1)
def compute_deviations(predictions):
# Compute the deviation between the max probability of each prediction
return np.std(np.max(predictions, axis=-1), axis=0)
def eval_ensemble(args):
# Take an ensemble of models trained on gcloud and evaluate their accuracy in
# classifying in and out of distribution data. The evaluation can be done
# using 3 types of threshold: 'max_p', 'entropy', 'std_dev'. Outputs a pandas
# dataframe with accuracy metrics at different threshold value
assert(args.job_dir.startswith('gs://'))
class_list = [pellet_class for pellet_class in PELLET_LIST
if pellet_class not in REMOVED_CLASSES]
train_images = []
train_labels = []
valid_images = []
valid_labels = []
ukn_images = []
for path in args.train_files:
input_data = model.load_and_preprocess_data(
path,
WORKING_DIR,
args.img_size,
class_list,
REMOVED_CLASSES)
train_images.append(input_data.train_data)
train_labels.append(input_data.train_labels)
valid_images.append(input_data.valid_data)
valid_labels.append(input_data.valid_labels)
ukn_images.append(input_data.ukn_data)
train_images = np.concatenate(train_images, axis=0)
train_labels = np.concatenate(train_labels, axis=0)
valid_images = np.concatenate(valid_images, axis=0)
valid_labels = np.concatenate(valid_labels, axis=0)
ukn_images = np.concatenate(ukn_images, axis=0)
# Load models
model_paths = util.load_models_from_gcs(
args.job_dir, MODEL_FOLDER, task.MODEL_NAME, WORKING_DIR, args.n_ensemble)
models = []
for path in model_paths:
models.append(tf.keras.models.load_model(path, {'sin': tf.sin}))
# Generate predictions
image_gen = model.get_data_generator()
valid_flow = image_gen.flow(valid_images, valid_labels, shuffle=False)
ukn_flow = image_gen.flow(ukn_images, shuffle=False)
valid_predictions = []
ukn_predictions = []
for m in models:
valid_predictions.append(m.predict(valid_flow))
ukn_predictions.append(m.predict(ukn_flow))
evaluator = Evaluator(args.threshold)
uncertainty_data = evaluator.evaluate(valid_predictions, ukn_predictions)
uncertainty_data = pd.DataFrame(uncertainty_data,
columns=[args.threshold, 'in_set_acc', 'out_set_mis'])
uncertainty_path = os.path.join(WORKING_DIR, args.destination_file)
with open(uncertainty_path, 'wb') as file:
pickle.dump(uncertainty_data, file)
if __name__ == '__main__':
args = get_args()
eval_ensemble(args) | {"/tests/benchmark/benchmark_tools/usecases/amman/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/creteil/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/whonet_nada/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py", "/tests/benchmark/benchmark_tools/atb_names/__init__.py"], "/tests/benchmark/inhib_diameter_modes/op/op.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/inhib_diameter_modes/count/__init__.py": ["/tests/benchmark/inhib_diameter_modes/count/count.py"], "/tests/benchmark/preprocess_img/__init__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py"], "/python-module/astimp_tools/datamodels.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/nlclsq/__init__.py": ["/tests/benchmark/inhib_diameter_modes/nlclsq/nlclsq.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/__init__.py": ["/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py"], "/tests/benchmark/inhib_diameter_modes/student/__init__.py": ["/tests/benchmark/inhib_diameter_modes/student/student.py"], "/python-module/astimp_tools/artist.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/op/__init__.py": ["/tests/benchmark/inhib_diameter_modes/op/op.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/preprocess_img/__main__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/interfaces/__init__.py"]} |
73,549 | mpascucci/AST-image-processing | refs/heads/master | /tests/benchmark/benchmark_tools/astscript/__init__.py | # benchmark_tools.astscript
# Copyright 2019 Fondation Medecins Sans Frontières https://fondation.msf.fr/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file is part of the ASTapp image processing library
# Author: Marco Pascucci
import time
from pandas import isna
ASTscript_format = {
"atb": "ATB : {name}, {diam}, {cmi}, {raw_sir}, {interp_sir}",
"test_in": "STATUS_IN : {name}, {value}",
"test_out": "STATUS_OUT : {name}, {value}",
"species": "SPECIES : {name}",
"sample_type": "SAMPLE : {sample_type}"
}
def annotation_to_ASTscript(AST_annotation):
"""Convert the annotation of an AST to an ASTscript"""
species = {"name": AST_annotation.species}
atbs = []
today = time.strftime("%d/%m/%Y")
for name, results in AST_annotation.iteritems():
if isna(results.sir) and isna(results.diam):
continue
diam = "NA" if isna(results.diam) else int(results.diam)
sir = "NA" if isna(results.sir) else results.sir
raw_sir = "NA" if isna(results.raw_sir) else results.raw_sir
atbs.append({'name': name,
'diam': diam,
'cmi': 'NA',
"raw_sir": raw_sir,
'interp_sir': sir})
ASTscript = [
"# ASTscript automatically generated from annotations"]
ASTscript.append(f"# script creation date : {today}")
if AST_annotation.sample_date is not None:
ASTscript.append(f"# sample date : {AST_annotation.sample_date}")
ASTscript.append('')
ASTscript.append(ASTscript_format['species'].format(**species))
if AST_annotation.sample_type is not None:
ASTscript.append(ASTscript_format['sample_type'].format(
sample_type=AST_annotation.sample_type))
ASTscript.append('')
if AST_annotation.expert_system_status:
def inputs(status): return status['input'] is True
def outputs(status): return status['input'] is False
ASTscript += [ASTscript_format['test_in']
.format(**test) for test in filter(inputs, AST_annotation.expert_system_status)]
ASTscript += [ASTscript_format['test_out']
.format(**test) for test in filter(outputs, AST_annotation.expert_system_status)]
ASTscript.append('')
ASTscript += [ASTscript_format['atb'].format(**atb) for atb in atbs]
return '\n'.join(ASTscript)
| {"/tests/benchmark/benchmark_tools/usecases/amman/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/creteil/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/whonet_nada/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py", "/tests/benchmark/benchmark_tools/atb_names/__init__.py"], "/tests/benchmark/inhib_diameter_modes/op/op.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/inhib_diameter_modes/count/__init__.py": ["/tests/benchmark/inhib_diameter_modes/count/count.py"], "/tests/benchmark/preprocess_img/__init__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py"], "/python-module/astimp_tools/datamodels.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/nlclsq/__init__.py": ["/tests/benchmark/inhib_diameter_modes/nlclsq/nlclsq.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/__init__.py": ["/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py"], "/tests/benchmark/inhib_diameter_modes/student/__init__.py": ["/tests/benchmark/inhib_diameter_modes/student/student.py"], "/python-module/astimp_tools/artist.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/op/__init__.py": ["/tests/benchmark/inhib_diameter_modes/op/op.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/preprocess_img/__main__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/interfaces/__init__.py"]} |
73,550 | mpascucci/AST-image-processing | refs/heads/master | /tests/benchmark/inhib_diameter_modes/op/op.py | import astimp
import optimalpartitioning
import numpy as np
from ..shared import no_inhibition, total_inhibition, varianceHall, varianceHallDiff
def measureOneDiameter(preproc, pellet_idx, dimensions, penalty=None):
diameter, cp, opresult = _measureOneDiameter(preproc, pellet_idx, dimensions, penalty)
return astimp.InhibDisk(diameter=diameter, confidence=0)
def _measureOneDiameter(preproc, pellet_idx, dimensions, penalty=None):
y = np.array(astimp.radial_profile(
preproc, pellet_idx, astimp.profile_type["maxaverage"]))
px_per_mm = preproc.px_per_mm
tolerance = int(np.round(px_per_mm))
pellet_radius_in_px = int(np.round(px_per_mm)*3)
offset = pellet_radius_in_px+tolerance
opresult = None
if no_inhibition(y[offset:], preproc, pellet_idx):
cp = len(y)
elif total_inhibition(y[offset:], preproc, pellet_idx):
cp = pellet_radius_in_px
else:
op_data = y[offset:]
if penalty is None:
temp_max = np.max(op_data)
y_temp = op_data/temp_max
sigma = np.sqrt(varianceHallDiff(y_temp))
penalty = sigma*temp_max*np.log(len(y_temp)) * 2*100
if dimensions == 1:
f = optimalpartitioning.op1D
if dimensions == 2:
f = optimalpartitioning.op2D
opresult = f(list(range(len(op_data))), op_data, penalty)
opresult.cp = [cp + offset for cp in opresult.cp]
if len(opresult.cp) > 0:
# cp = opresult.cp[0]
cp = np.mean(opresult.cp)
else:
cp = len(y)
diameter = cp/preproc.px_per_mm*2
return diameter, cp, opresult
| {"/tests/benchmark/benchmark_tools/usecases/amman/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/creteil/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/whonet_nada/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py", "/tests/benchmark/benchmark_tools/atb_names/__init__.py"], "/tests/benchmark/inhib_diameter_modes/op/op.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/inhib_diameter_modes/count/__init__.py": ["/tests/benchmark/inhib_diameter_modes/count/count.py"], "/tests/benchmark/preprocess_img/__init__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py"], "/python-module/astimp_tools/datamodels.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/nlclsq/__init__.py": ["/tests/benchmark/inhib_diameter_modes/nlclsq/nlclsq.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/__init__.py": ["/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py"], "/tests/benchmark/inhib_diameter_modes/student/__init__.py": ["/tests/benchmark/inhib_diameter_modes/student/student.py"], "/python-module/astimp_tools/artist.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/op/__init__.py": ["/tests/benchmark/inhib_diameter_modes/op/op.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/preprocess_img/__main__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/interfaces/__init__.py"]} |
73,551 | mpascucci/AST-image-processing | refs/heads/master | /tests/benchmark/inhib_diameter_modes/nlclsq/nlclsq.py | import numpy as np
from scipy import optimize
import astimp
# from logging import getLogger
# log = getLogger(__name__)
# log.warning("regression module reloaded.")
def measureOneDiameter(preproc, pellet_idx):
diameter, idx, fit_cost, (left, right), fit, th_v = _measureOneDiameter(preproc, pellet_idx)
return astimp.InhibDisk(diameter=diameter, confidence=fit_cost)
def _measureOneDiameter(preproc, pellet_idx):
"""find the inhibition diameter via non linear constrained least_square."""
px_per_mm = preproc.px_per_mm
pellet_radius_in_px = int(np.round(px_per_mm)*3)
tolerance = int(np.round(px_per_mm))
y = np.array(astimp.radial_profile(
preproc, pellet_idx, astimp.profile_type["maxaverage"]))
kmc = preproc.km_centers
th_v = preproc.km_thresholds_local[pellet_idx]
# NOTE: the following line does the real fit. The importat return values are `idx` and `fit_cost`
# The other return values are needed only here for plotting
idx, fit_cost, (left, right), fit = inhib_model_fit(y,
bt=tolerance,
pellet_r=pellet_radius_in_px,
th_v=th_v)
diameter = idx/preproc.px_per_mm*2
return diameter, idx, fit_cost, (left, right), fit, th_v
def inhib_model_fit(p, *, bt, pellet_r, th_v, pellet_v=255):
"""
:param p: intensity profile
:param bt: tolerance distance (int). Usually the value of 1mm in px
:param pelletr: presumed pellet radius (int, pixels)
:param inhib_v, bact_v : inhibition and bacteria intensty values guess
:param pellet_v: pellet intensity value in the profile
:return: the inhibition radius in px, the RMS of the fit
"""
left = pellet_r
# uncomment this lines to detect inner colonies
# pel_end = left+bt
# right = min(len(p)-1, pel_end + max(np.argmax(p[pel_end:]), bt) + 1)
right = len(p)-1
y = p[left:right]
x = np.arange(len(y))
vmin = p.min()
vmax = max(th_v, 200)
# inhib bact breakpoint slope pellet_end pellet_slope
bds_min = [vmin, vmin, 0, 0, 0, 1]
v0 = [vmin, vmax, bt/2, 1, 0, 1]
bds_max = [vmax, vmax, len(y), 3, bt, 50]
# check inconsistent boudaries
assert all([a < b for a, b in zip(bds_min, bds_max)]
), "boundaries error\nmin : {}\nmax : {}".format(bds_min, bds_max)
# check unfeasable fit
assert all([a >= b for a, b in zip(v0, bds_min)]) and all(
[a <= b for a, b in zip(v0, bds_max)])
# alternatives to scipy.least_square
# DLIB : http://dlib.net/optimization.html#find_min_box_constrained
# ALGLIB : http://www.alglib.net/interpolation/leastsquares.php#header21
# CERES (Google) : http://ceres-solver.org/nnls_modeling.html
# comparison and licenses : https://en.wikipedia.org/wiki/Comparison_of_linear_algebra_libraries
fit = optimize.least_squares(inhib_model_cost, v0,
args=(x, y, pellet_v),
bounds=[bds_min, bds_max],
jac='3-point')
if fit.success:
if False: # (fit.x[0] > th_v) and ((fit.x[3] < 0.1) or (abs(fit.x[0]-fit.x[1]) < 10)):
# no inhibition = high inhibition value && (small slope or small step hight)
idx = 0
elif False: #(fit.x[1] < th_v) and ((fit.x[3] < 0.1) or (abs(fit.x[0]-fit.x[1]) < 10)):
# total bacteria = low inhibition value && (small slope or small step hight)
idx = len(y)
else:
idx = fit.x[2]
idx = int(np.round(idx))
fit_cost = np.sqrt(inhib_model_cost(fit.x, range(
len(p[left:])), p[left:], pellet_v).mean())
idx += pellet_r
else:
raise Exception("fit failed")
return idx, fit_cost, (left, right), fit
def inhib_model(v, x, val_pel):
f = sigmoid
temp = f((val_pel, v[0], v[4], v[5]), x) + \
f((v[0], v[1], v[2], v[3]), x) - v[0]
return temp
def inhib_model_cost(v, x, y, *args):
err = (inhib_model(v, x, *args) - y) ** 2
# NOTE : the value 5 hereafter is arbitrary and depends on the image size.
err[0:5] /= 50 # penalize error on the first part (pellet)
err += 1e5*(v[1] < v[0]) # penalize if v[1] < v[0]
return err
def sigmoid(v, x):
return (v[1] - v[0]) / (1 + np.exp((v[2] - x) * v[3])) + v[0]
def sigmoid_inverse(v, y, *, int_output=True):
"""
return the x value that gives y with the given parameters vector v.
NOTE: y must be within v[0] and v[1]
"""
x = -np.log((v[1] - v[0]) / (y - v[0]) - 1) / v[3] + v[2]
try:
if int_output:
x = int(np.round(x))
except ValueError:
raise astErrors.ValueError("The inverse could not be evaluated.")
return x
def cost_sigmoid(v, x, y):
return (sigmoid(v, x) - y) ** 2
| {"/tests/benchmark/benchmark_tools/usecases/amman/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/creteil/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/whonet_nada/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py", "/tests/benchmark/benchmark_tools/atb_names/__init__.py"], "/tests/benchmark/inhib_diameter_modes/op/op.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/inhib_diameter_modes/count/__init__.py": ["/tests/benchmark/inhib_diameter_modes/count/count.py"], "/tests/benchmark/preprocess_img/__init__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py"], "/python-module/astimp_tools/datamodels.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/nlclsq/__init__.py": ["/tests/benchmark/inhib_diameter_modes/nlclsq/nlclsq.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/__init__.py": ["/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py"], "/tests/benchmark/inhib_diameter_modes/student/__init__.py": ["/tests/benchmark/inhib_diameter_modes/student/student.py"], "/python-module/astimp_tools/artist.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/op/__init__.py": ["/tests/benchmark/inhib_diameter_modes/op/op.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/preprocess_img/__main__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/interfaces/__init__.py"]} |
73,552 | mpascucci/AST-image-processing | refs/heads/master | /python-module/setup.py | #!/usr/bin/env python
# coding: utf-8
# Copyright 2019 The ASTapp Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# <http://www.apache.org/licenses/LICENSE-2.0>
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Marco Pascucci <marpas.paris@gmail.com>.
import os
cwd = os.getcwd()
import re
import sys
import platform
import subprocess
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
from distutils.version import LooseVersion
from Cython.Build import cythonize
import glob
import numpy
import setuptools.command.build_py
# ---------------------------------------------------------------------------- #
# CYTHON MODULES #
# ---------------------------------------------------------------------------- #
# ------------------------------ INCLUDE FOLDERS ----------------------------- #
# Specify here the lib and include folder of OpenCV installed on your System
# ON UBUNTU
cv_include_folder = "/usr/include/opencv4"
cv_lib_folder = "/usr/lib/x86_64-linux-gnu"
# ON MAC
# cv_include_folder = "/usr/local/include/opencv4"
# cv_lib_folder = "/usr/local/lib"
assert os.path.isdir(cv_include_folder)
assert os.path.isdir(cv_lib_folder)
astimp_include_folder = os.path.join(cwd, "../astimplib/include")
astimp_lib_folder = os.path.join(cwd, "../build/astimplib")
# --------------------- check that include folders exits --------------------- #
for path in [cv_lib_folder, cv_include_folder, astimp_lib_folder, astimp_include_folder]:
if not os.path.exists(path):
raise FileNotFoundError(path, "not found")
# -------------------- Find opencv libraries in lib_folder ------------------- #
cvlibs = list()
for file in glob.glob(os.path.join(cv_lib_folder, 'libopencv_*')):
cvlibs.append(file.split('.')[0])
cvlibs = list(set(cvlibs))
# cvlibs = ['-L{}'.format(cv_lib_folder)] + \
cvlibs = ['opencv_{}'.format(
lib.split(os.path.sep)[-1].split('libopencv_')[-1]) for lib in cvlibs]
# --------------------------- Extensions definition -------------------------- #
ext_opencv_mat = Extension("opencv_mat",
sources=["opencv_mat.pyx",
"opencv_mat.pxd"],
language="c++",
extra_compile_args=["-std=c++11"],
extra_link_args=[],
include_dirs=[numpy.get_include(),
cv_include_folder,
os.path.join(
cv_include_folder, "opencv2")
],
library_dirs=[cv_lib_folder],
libraries=cvlibs,
)
ext_astimp = Extension("astimp",
sources=["astimp.pyx"],
language="c++",
extra_compile_args=["-std=c++11"],
extra_link_args=[],
include_dirs=[numpy.get_include(),
cv_include_folder,
astimp_include_folder,
os.path.join(cv_include_folder, "opencv2")
],
library_dirs=[cv_lib_folder, astimp_lib_folder],
libraries=cvlibs + ["astimp"],
)
# ---------------------------------------------------------------------------- #
# SETUP #
# ---------------------------------------------------------------------------- #
setup(
name='astimp',
version='0.0.3',
author='Marco Pascucci',
author_email='marpas.paris@gmail.com',
description='image processing fot antibiotic susceptibility testing',
long_description='',
packages= ['astimp_tools'],
ext_modules=cythonize([ext_opencv_mat, ext_astimp]),
# cmdclass=dict(build_astimplib=CMakeBuild),
zip_safe=False,
)
| {"/tests/benchmark/benchmark_tools/usecases/amman/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/creteil/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/whonet_nada/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py", "/tests/benchmark/benchmark_tools/atb_names/__init__.py"], "/tests/benchmark/inhib_diameter_modes/op/op.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/inhib_diameter_modes/count/__init__.py": ["/tests/benchmark/inhib_diameter_modes/count/count.py"], "/tests/benchmark/preprocess_img/__init__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py"], "/python-module/astimp_tools/datamodels.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/nlclsq/__init__.py": ["/tests/benchmark/inhib_diameter_modes/nlclsq/nlclsq.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/__init__.py": ["/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py"], "/tests/benchmark/inhib_diameter_modes/student/__init__.py": ["/tests/benchmark/inhib_diameter_modes/student/student.py"], "/python-module/astimp_tools/artist.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/op/__init__.py": ["/tests/benchmark/inhib_diameter_modes/op/op.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/preprocess_img/__main__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/interfaces/__init__.py"]} |
73,553 | mpascucci/AST-image-processing | refs/heads/master | /tests/benchmark/inhib_diameter_modes/shared/__init__.py | import numpy as np
def total_inhibition(data, preproc, pellet_idx):
"""Return true if estimate that there is no inhibition in this profile"""
if all(data > preproc.km_thresholds_local[pellet_idx]):
return True
else:
return False
def no_inhibition(data, preproc, pellet_idx):
"""Return true if estimate that there is no inhibition in this profile"""
if all(data < preproc.km_thresholds_local[pellet_idx]):
return True
else:
return False
def varianceHall(y):
n = len(y)
d = [0.1942, 0.2809, 0.3832, - 0.8582]
sigma2 = 0
for j in range(1,n-3):
sigma2 += (d[0]*y[j+0] + d[1]*y[j+1] + d[2]*y[j+2] + d[3]*y[j+3])**2
return 1/(n-3)*sigma2
def varianceHallDiff(y):
n = len(y)
d = [0.1942, 0.2809, 0.3832, - 0.8582]
sigma2 = 0
corrector = np.sqrt(d[3]**2 + (d[2]-d[3])**2 + (d[1]-d[2])**2 + (d[0]-d[1])**2 + d[0]**2)
for j in range(1,n-4):
sigma2 += (d[0]*(y[j+0+1]-y[j+0]) + d[1]*(y[j+1+1]-y[j+1]) + d[2]*(y[j+2+1]-y[j+2]) + d[3]*(y[j+3+1]-y[j+3]))**2
return 1/(n-4)/corrector*sigma2 | {"/tests/benchmark/benchmark_tools/usecases/amman/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/creteil/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/whonet_nada/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py", "/tests/benchmark/benchmark_tools/atb_names/__init__.py"], "/tests/benchmark/inhib_diameter_modes/op/op.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/inhib_diameter_modes/count/__init__.py": ["/tests/benchmark/inhib_diameter_modes/count/count.py"], "/tests/benchmark/preprocess_img/__init__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py"], "/python-module/astimp_tools/datamodels.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/nlclsq/__init__.py": ["/tests/benchmark/inhib_diameter_modes/nlclsq/nlclsq.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/__init__.py": ["/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py"], "/tests/benchmark/inhib_diameter_modes/student/__init__.py": ["/tests/benchmark/inhib_diameter_modes/student/student.py"], "/python-module/astimp_tools/artist.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/op/__init__.py": ["/tests/benchmark/inhib_diameter_modes/op/op.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/preprocess_img/__main__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/interfaces/__init__.py"]} |
73,554 | mpascucci/AST-image-processing | refs/heads/master | /tests/benchmark/inhib_diameter_modes/count/__init__.py | from .count import measureOneDiameter, _measureOneDiameter
| {"/tests/benchmark/benchmark_tools/usecases/amman/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/creteil/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/whonet_nada/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py", "/tests/benchmark/benchmark_tools/atb_names/__init__.py"], "/tests/benchmark/inhib_diameter_modes/op/op.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/inhib_diameter_modes/count/__init__.py": ["/tests/benchmark/inhib_diameter_modes/count/count.py"], "/tests/benchmark/preprocess_img/__init__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py"], "/python-module/astimp_tools/datamodels.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/nlclsq/__init__.py": ["/tests/benchmark/inhib_diameter_modes/nlclsq/nlclsq.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/__init__.py": ["/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py"], "/tests/benchmark/inhib_diameter_modes/student/__init__.py": ["/tests/benchmark/inhib_diameter_modes/student/student.py"], "/python-module/astimp_tools/artist.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/op/__init__.py": ["/tests/benchmark/inhib_diameter_modes/op/op.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/preprocess_img/__main__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/interfaces/__init__.py"]} |
73,555 | mpascucci/AST-image-processing | refs/heads/master | /tests/benchmark/preprocess_img/__init__.py | from .preproc import preprocess, preprocess_one_image, PreprocResults, ErrorInPreproc
| {"/tests/benchmark/benchmark_tools/usecases/amman/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/creteil/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/whonet_nada/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py", "/tests/benchmark/benchmark_tools/atb_names/__init__.py"], "/tests/benchmark/inhib_diameter_modes/op/op.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/inhib_diameter_modes/count/__init__.py": ["/tests/benchmark/inhib_diameter_modes/count/count.py"], "/tests/benchmark/preprocess_img/__init__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py"], "/python-module/astimp_tools/datamodels.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/nlclsq/__init__.py": ["/tests/benchmark/inhib_diameter_modes/nlclsq/nlclsq.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/__init__.py": ["/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py"], "/tests/benchmark/inhib_diameter_modes/student/__init__.py": ["/tests/benchmark/inhib_diameter_modes/student/student.py"], "/python-module/astimp_tools/artist.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/op/__init__.py": ["/tests/benchmark/inhib_diameter_modes/op/op.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/preprocess_img/__main__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/interfaces/__init__.py"]} |
73,556 | mpascucci/AST-image-processing | refs/heads/master | /tests/benchmark/inhib_diameter_modes/count/count.py | import astimp
import numpy as np
PELLET_DIAM_IN_MM = 6
def measureOneDiameter(preproc, pellet_idx, th_v=None):
diameter, cp, pellet_end, y_count, data, mse, confidence = _measureOneDiameter(preproc, pellet_idx, th_v)
return astimp.InhibDisk(diameter=diameter, confidence=mse)
def _measureOneDiameter(preproc, pellet_idx, th_v=None):
if th_v is None:
# km_centers = preproc.km_centers_local[pellet_idx]
th_v = preproc.km_thresholds_local[pellet_idx]
th_v /= 255
y_count = np.array(
astimp.radial_profile(preproc, pellet_idx, astimp.profile_type["count"], th_v))
hv = y_count.max()
lv = y_count.min()
pellet_radius_in_px = int(np.round(preproc.px_per_mm)*3)
pellet_end = 0
mse = 0
if all(y_count > 0):
# evident case of no inhibition
cp = pellet_radius_in_px
data = y_count
else:
# while (y_count[pellet_end] > 0 and pellet_end < len(y_count)):
# pellet_end += 1
pellet_end = int(round(preproc.px_per_mm * PELLET_DIAM_IN_MM/2))
cp = 0
mses = []
data = y_count[pellet_end:]
if all(data == 0):
# evident case of full inhibition
cp = len(y_count)
else:
mse = np.mean((data - hv)**2)
for i in range(1, len(data)):
before = data[:i]
after = data[i:]
err_b = before - lv
err_a = after - hv
err = np.concatenate((err_a,err_b))
this_mse = np.mean(err**2)
# print(i,this_mse)
mses.append(this_mse)
if this_mse < mse:
mse = this_mse
cp = i
cp += pellet_end
diameter = (cp)/preproc.px_per_mm*2
if mse != 0:
confidence = min(preproc.px_per_mm*250*1.5/mse,1)
else:
confidence = 1
return diameter, cp, pellet_end, y_count, data, mse, confidence
# An alternative way that uses linear algebra
# def mse(signal):
# return np.mean((signal-signal.mean() )**2)
# def cost(signal, cpts):
# cost = mse(signal[0:cpts[0]])
# for i in range(len(cpts)-1):
# cost += mse(signal[cpts[i]:cpts[i+1]])
# cost += mse(signal[cpts[-1]:])
# return cost
# def min_MSE_n_spline_piecewise_constant(x,y,cp):
# """ Find the spline that minimizes the MSE with the signal (x,y).
# The x coordinates of the nodes are in the cp vetor."""
# # Build X matrix
# X = np.empty((len(x),len(cp)+1))
# ones = np.ones(len(y))
# X[:,0] = ones
# X[:,0][cp[0]:] = 0
# for i,p in enumerate(cp):
# xt = np.ones(len(y)) - X[:,i]
# xt[:p] = 0
# X[:,i+1] = xt
# # linear regression to find vector b
# inv = np.linalg.inv(np.dot(X.T,X))
# b = np.dot(inv,np.dot(X.T,y))
# # Calculate y = Xb
# cpx = x[np.hstack([[0], cp, [len(x)-1]])]
# cpy = np.dot(X,b)[cpx]
# return cpx, cpy, cost(y,cp)
# cp = [57]
# signal = np.hstack([np.random.randn(56)+5, np.random.randn(74)+20])
# plt.plot(signal)
# gmse = mse(signal)
# for cp in range(5,len(y)-5):
# try:
# cpx, cpy, this_mse = min_MSE_n_spline_piecewise_constant(np.arange(len(signal)),signal,[cp])
# except np.linalg.LinAlgError:
# continue
# if this_mse < gmse:
# gmse = this_mse
# final_cp = cpx
# # plot
# for cp in final_cp:
# plt.axvline(cp, color='r')
| {"/tests/benchmark/benchmark_tools/usecases/amman/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/creteil/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/whonet_nada/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py", "/tests/benchmark/benchmark_tools/atb_names/__init__.py"], "/tests/benchmark/inhib_diameter_modes/op/op.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/inhib_diameter_modes/count/__init__.py": ["/tests/benchmark/inhib_diameter_modes/count/count.py"], "/tests/benchmark/preprocess_img/__init__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py"], "/python-module/astimp_tools/datamodels.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/nlclsq/__init__.py": ["/tests/benchmark/inhib_diameter_modes/nlclsq/nlclsq.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/__init__.py": ["/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py"], "/tests/benchmark/inhib_diameter_modes/student/__init__.py": ["/tests/benchmark/inhib_diameter_modes/student/student.py"], "/python-module/astimp_tools/artist.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/op/__init__.py": ["/tests/benchmark/inhib_diameter_modes/op/op.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/preprocess_img/__main__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/interfaces/__init__.py"]} |
73,557 | mpascucci/AST-image-processing | refs/heads/master | /tests/benchmark/benchmark_tools/atb_names/__init__.py | from .usecases import i2a, amman, whonet | {"/tests/benchmark/benchmark_tools/usecases/amman/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/creteil/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/whonet_nada/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py", "/tests/benchmark/benchmark_tools/atb_names/__init__.py"], "/tests/benchmark/inhib_diameter_modes/op/op.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/inhib_diameter_modes/count/__init__.py": ["/tests/benchmark/inhib_diameter_modes/count/count.py"], "/tests/benchmark/preprocess_img/__init__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py"], "/python-module/astimp_tools/datamodels.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/nlclsq/__init__.py": ["/tests/benchmark/inhib_diameter_modes/nlclsq/nlclsq.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/__init__.py": ["/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py"], "/tests/benchmark/inhib_diameter_modes/student/__init__.py": ["/tests/benchmark/inhib_diameter_modes/student/student.py"], "/python-module/astimp_tools/artist.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/op/__init__.py": ["/tests/benchmark/inhib_diameter_modes/op/op.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/preprocess_img/__main__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/interfaces/__init__.py"]} |
73,558 | mpascucci/AST-image-processing | refs/heads/master | /python-module/astimp_tools/datamodels.py | from .image import subimage_by_roi
import astimp
class Antibiotic():
"an antibiotic tested in an AST"
def __init__(self, short_name, pellet_circle, inhibition, image, roi, px_per_mm):
self.short_name = short_name
self.pellet_circle = pellet_circle
self.inhibition = inhibition
self.img = image
self.px_per_mm = px_per_mm
self.roi = roi
self._center_in_roi = None
@property
def center_in_roi(self):
"""center relative to the roi coordinate"""
if self._center_in_roi is None:
cx, cy = self.pellet_circle.center
cx -= self.roi.left
cy -= self.roi.top
self._center_in_roi = (cx, cy)
return self._center_in_roi
def __repr__(self):
return "ATB : {n}, inhibition diameter: {d:.1f}mm".format(n=self.short_name, d=self.inhibition.diameter)
class AST():
"""Represent an AST"""
def __init__(self, ast_image):
self.img = ast_image
self._crop = None
self._petriDish = None
self._circles = None
self._rois = None
self._mm_per_px = None
self._px_per_mm = None
self._pellets = None
self._labels = None
self._labels_text = None
self._preproc = None
self._inhibitions = None
@property
def crop(self):
"""cropped image of Petri dish"""
if self._crop is None:
self._crop = self.petriDish.img
return self._crop
@crop.setter
def crop(self, image):
self._crop = image
@property
def petriDish(self):
"""Petri dish"""
if self._petriDish is None:
self._petriDish = astimp.getPetriDish(self.img)
return self._petriDish
@property
def circles(self):
"""circles representing pellets"""
if self._circles is None:
self._circles = astimp.find_atb_pellets(self.crop)
return self._circles
@property
def rois(self):
if self._rois is None:
max_diam_mm = 40 # TODO: get this from config
self._rois = astimp.inhibition_disks_ROIs(
self.circles, self.crop, max_diam_mm*self.px_per_mm)
return self._rois
@property
def mm_per_px(self):
"""image scale"""
if self._mm_per_px is None:
self._mm_per_px = astimp.get_mm_per_px(self.circles)
return self._mm_per_px
@property
def px_per_mm(self):
"""image scale"""
if self._px_per_mm is None:
self._px_per_mm = 1/astimp.get_mm_per_px(self.circles)
return self._px_per_mm
@property
def pellets(self):
"""subimages of the found pellets"""
if self._pellets is None:
self._pellets = [astimp.cutOnePelletInImage(
self.crop, circle) for circle in self.circles]
return self._pellets
@property
def labels(self):
"""label objects"""
if self._labels is None:
self._labels = [astimp.getOnePelletText(
pellet) for pellet in self.pellets]
return self._labels
@property
def labels_text(self):
"""label texts"""
if self._labels_text is None:
self._labels_text = tuple(label.text for label in self.labels)
return self._labels_text
@property
def preproc(self):
"""preporc object for inhib diameter measurement"""
if self._preproc is None:
self._preproc = astimp.inhib_diam_preprocessing(
self.petriDish, self.circles)
return self._preproc
@property
def inhibitions(self):
"""preporc object for inhib diameter measurement"""
if self._inhibitions is None:
self._inhibitions = astimp.measureDiameters(self.preproc)
return self._inhibitions
def get_atb_by_idx(self, idx):
return Antibiotic(short_name=self.labels[idx].text,
pellet_circle=self.circles[idx],
roi=self.rois[idx],
inhibition=self.inhibitions[idx],
image=subimage_by_roi(self.crop, self.rois[idx]),
px_per_mm=self.px_per_mm)
def get_atb_idx_by_name(self, short_name):
return self.labels_text.index(short_name)
| {"/tests/benchmark/benchmark_tools/usecases/amman/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/creteil/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/whonet_nada/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py", "/tests/benchmark/benchmark_tools/atb_names/__init__.py"], "/tests/benchmark/inhib_diameter_modes/op/op.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/inhib_diameter_modes/count/__init__.py": ["/tests/benchmark/inhib_diameter_modes/count/count.py"], "/tests/benchmark/preprocess_img/__init__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py"], "/python-module/astimp_tools/datamodels.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/nlclsq/__init__.py": ["/tests/benchmark/inhib_diameter_modes/nlclsq/nlclsq.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/__init__.py": ["/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py"], "/tests/benchmark/inhib_diameter_modes/student/__init__.py": ["/tests/benchmark/inhib_diameter_modes/student/student.py"], "/python-module/astimp_tools/artist.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/op/__init__.py": ["/tests/benchmark/inhib_diameter_modes/op/op.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/preprocess_img/__main__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/interfaces/__init__.py"]} |
73,559 | mpascucci/AST-image-processing | refs/heads/master | /tests/benchmark/inhib_diameter_modes/nlclsq/__init__.py | from .nlclsq import measureOneDiameter, _measureOneDiameter, inhib_model
| {"/tests/benchmark/benchmark_tools/usecases/amman/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/creteil/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/whonet_nada/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py", "/tests/benchmark/benchmark_tools/atb_names/__init__.py"], "/tests/benchmark/inhib_diameter_modes/op/op.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/inhib_diameter_modes/count/__init__.py": ["/tests/benchmark/inhib_diameter_modes/count/count.py"], "/tests/benchmark/preprocess_img/__init__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py"], "/python-module/astimp_tools/datamodels.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/nlclsq/__init__.py": ["/tests/benchmark/inhib_diameter_modes/nlclsq/nlclsq.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/__init__.py": ["/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py"], "/tests/benchmark/inhib_diameter_modes/student/__init__.py": ["/tests/benchmark/inhib_diameter_modes/student/student.py"], "/python-module/astimp_tools/artist.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/op/__init__.py": ["/tests/benchmark/inhib_diameter_modes/op/op.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/preprocess_img/__main__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/interfaces/__init__.py"]} |
73,560 | mpascucci/AST-image-processing | refs/heads/master | /tests/benchmark/preprocess_img/preproc.py | from tqdm import tqdm
import os
import glob
import pickle
import numpy as np
from imageio import imread, imwrite
import astimp
from multiprocessing import Pool, cpu_count
from functools import partial
class ErrorInPreproc(Exception):
pass
class Dataset():
"""Datasets consisting of several files in a given input_folder."""
def __init__(self, base_path, glob_patterns=('*.jpg', '*.JPG', '*.png', "*.PNG")):
"""base_path : path to the folder where the files are stored
glob_patterns : a list of patterns for selecting files (e.g. ['*.jpg'])"""
assert os.path.exists(
base_path), "input folder '{}' not found".format(base_path)
self.base_path = base_path
self.paths = []
for pattern in glob_patterns:
self.paths += glob.glob(os.path.join(base_path, pattern))
self.names = [os.path.basename(path).split('.')[0]
for path in self.paths]
class PreprocResults():
"""Access to preprocessed pickled AST images"""
def __init__(self, pickles_folder):
if not os.path.exists(pickles_folder):
raise FileNotFoundError("{} does not exit".format(pickles_folder))
self.pf = pickles_folder
self.ds = Dataset(self.pf, glob_patterns=("*.pickle",))
self.names = self.ds.names
errorlog_path = os.path.join(pickles_folder, "error_log.txt")
if os.path.exists(errorlog_path):
with open(errorlog_path, 'r') as f:
lines = f.readlines()
self.errors = {line.split(',')[0]: line.split(',')[
1] for line in lines}
else:
self.errors = []
def get_by_name(self, name):
"""Load a pickle by name.
Pickles have the same name than images
example:
234_SLR_ESBL.jpg <-> 234_SLR_ESBL.jpg.pickle"""
if name in self.errors and self.errors[name].split(" ") != 'INFO':
raise ErrorInPreproc(self.errors[name].strip())
path = os.path.join(self.pf, name+'.pickle')
if not os.path.exists(path):
raise FileNotFoundError("Pickle {} not found.".format(path))
with open(path, 'rb') as f:
p = pickle.load(f)
return p
def __getitem__(self, name):
return self.get_by_name(name)
def get_all(self):
"""Load all pickles in input folder"""
output = []
for path in tqdm(self.ds.paths, desc="Loading pickles"):
with open(path, 'rb') as f:
p = pickle.load(f)
output.append(p)
return output
def preprocess_one_image(path):
img = np.array(imread(path)) # load image
ast = astimp.AST(img)
crop = ast.crop
circles = ast.circles
pellets = ast.pellets
labels = ast.labels_text
# create preprocessing object
# NOTE the preprocessing object is not created it no pellets where found.
preproc = ast.preproc if len(circles) != 0 else None
pobj = {"ast":ast,
"preproc": preproc,
"circles": circles,
"pellets": pellets,
"labels": labels,
"crop": crop,
"fname": os.path.basename(path),
"inhibitions": ast.inhibitions}
return pobj
def pickle_one_preproc(idx, output_path, image_paths, error_list, skip_existing=False, mute=True):
if mute:
log_function = lambda x : x
else:
log_function = tqdm.write
path = image_paths[idx]
try:
# create output path
fname = os.path.basename(path) # file name from path
ofpath = os.path.join(
output_path, f"{fname}.pickle") # output file path
if skip_existing:
# skip if output file exists already
if os.path.exists(ofpath):
return None
# WARNING for an unknown reason the pickle call must be inside this function
pobj = preprocess_one_image(path)
with open(ofpath, 'wb') as f:
pickle.dump(pobj, f)
if len(pobj['circles']) == 0:
# if no pellet found
error_list[idx] = "INFO : {}, No pellets found".format(fname)
log_function("No pellet found in {}".format(fname))
except Exception as e:
ex_text = ', '.join(map(lambda x: str(x), e.args))
error_list[idx] = "{}, {}".format(fname, ex_text)
log_function("Failed images: {} - {}".format(len(error_list), ex_text))
return None
def preprocess(img_paths, output_path, skip_existing=False, parallel=True):
"""preprocess images and pickle the preproc object.
img_paths : a list of paths of the image files."""
if not os.path.exists(output_path):
os.mkdir(output_path)
errors = [""]*len(img_paths)
if parallel:
jobs = cpu_count()
print("Running in parallel on {} processes".format(jobs))
f = partial(pickle_one_preproc,
image_paths=img_paths,
output_path=output_path,
error_list=errors,
skip_existing=skip_existing
)
with Pool(jobs) as p:
list(tqdm(p.imap(f,range(len(img_paths))), total=len(img_paths)))
errors = [e for e in errors if e != ""]
else:
for idx in tqdm(range(len(img_paths)), desc="Preprocessing"):
pickle_one_preproc(idx, output_path, img_paths, errors, skip_existing, mute=False)
return errors
| {"/tests/benchmark/benchmark_tools/usecases/amman/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/creteil/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/whonet_nada/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py", "/tests/benchmark/benchmark_tools/atb_names/__init__.py"], "/tests/benchmark/inhib_diameter_modes/op/op.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/inhib_diameter_modes/count/__init__.py": ["/tests/benchmark/inhib_diameter_modes/count/count.py"], "/tests/benchmark/preprocess_img/__init__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py"], "/python-module/astimp_tools/datamodels.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/nlclsq/__init__.py": ["/tests/benchmark/inhib_diameter_modes/nlclsq/nlclsq.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/__init__.py": ["/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py"], "/tests/benchmark/inhib_diameter_modes/student/__init__.py": ["/tests/benchmark/inhib_diameter_modes/student/student.py"], "/python-module/astimp_tools/artist.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/op/__init__.py": ["/tests/benchmark/inhib_diameter_modes/op/op.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/preprocess_img/__main__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/interfaces/__init__.py"]} |
73,561 | mpascucci/AST-image-processing | refs/heads/master | /tests/benchmark/benchmark_tools/atb_names/interfaces/__init__.py | from abc import ABC
from collections import namedtuple
WhonetAndConcentration = namedtuple("WHOnet_atb", ["code", "concentration"])
class AtbNamesTranslator(ABC):
def __init__(self, full_names, short_names, whonet_codes, concentrations):
assert len(short_names) == len(
full_names), "names list must have same length"
assert all([name not in full_names for name in short_names]) and all(
[name not in short_names for name in full_names]), "the parameter lists should have void interception"
self.full_names = full_names
self.short_names = short_names
self.whonet_codes = whonet_codes
self.concentrations = concentrations
def full2short(self, full_name):
"""Translate a full antibiotic name into its short version"""
return self.short_names[self.full_names.index(full_name)]
def short2full(self, short_name):
"""Translate a short antibiotic name into its long version"""
return self.full_names[self.short_names.index(short_name)]
def short2whonet_code(self, short_name):
idx = self.short_names.index(short_name)
return self.whonet_codes[idx]
def short2whonet(self, short_name):
idx = self.short_names.index(short_name)
return "{}{}".format(self.whonet_codes[idx], self.concentrations[idx])
def short2concentration(self, short_name):
idx = self.short_names.index(short_name)
return self.concentrations.index(short_name)
def short2whonet_tuple(self, short_name):
idx = self.short_names.index(short_name)
return WhonetAndConcentration(self.whonet_codes[idx], self.concentrations[idx])
def whonet_code2short(self, whonet_code):
"""Translate a WHOnet antibiotic code its i2a short version"""
idx = self.whonet_codes.index(whonet_code)
return self.short_names[idx]
| {"/tests/benchmark/benchmark_tools/usecases/amman/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/creteil/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/whonet_nada/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py", "/tests/benchmark/benchmark_tools/atb_names/__init__.py"], "/tests/benchmark/inhib_diameter_modes/op/op.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/inhib_diameter_modes/count/__init__.py": ["/tests/benchmark/inhib_diameter_modes/count/count.py"], "/tests/benchmark/preprocess_img/__init__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py"], "/python-module/astimp_tools/datamodels.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/nlclsq/__init__.py": ["/tests/benchmark/inhib_diameter_modes/nlclsq/nlclsq.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/__init__.py": ["/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py"], "/tests/benchmark/inhib_diameter_modes/student/__init__.py": ["/tests/benchmark/inhib_diameter_modes/student/student.py"], "/python-module/astimp_tools/artist.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/op/__init__.py": ["/tests/benchmark/inhib_diameter_modes/op/op.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/preprocess_img/__main__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/interfaces/__init__.py"]} |
73,562 | mpascucci/AST-image-processing | refs/heads/master | /pellet_labels/test_model.py | import argparse
import os
import time
import numpy as np
import tensorflow as tf
from trainer import pellet_list
from trainer.model import get_data_generator
from trainer import model
from package_ensemble import EntropyThresholdLayer
PELLET_LIST = pellet_list.PELLET_LIST
WORKING_DIR = os.getcwd()
parser = argparse.ArgumentParser(
description='Test the ensemble model accuracy',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--data-files',
type=str,
required=True,
nargs='*',
help='Dataset including valid files local or GCS')
parser.add_argument(
'--img-size', type=int, default=64,
help='square size to resize input images to in pixel')
parser.add_argument(
'--model', default='models/ensemble_model.h5',
help='path to keras model')
def test_accuracy(args):
"""
For the tested ensemble model, will print a few performance metrics
"""
class_list = PELLET_LIST
valid_images = []
valid_labels = []
for path in args.data_files:
input_data = model.load_and_preprocess_data(
path,
WORKING_DIR,
args.img_size,
class_list)
valid_images.append(input_data.valid_data)
valid_labels.append(input_data.valid_labels)
valid_images = np.concatenate(valid_images, axis=0)
valid_labels = np.concatenate(valid_labels, axis=0)
inputs_gen = get_data_generator().flow(valid_images, shuffle=False)
classifier = tf.keras.models.load_model(
args.model, {'EntropyThresholdLayer': EntropyThresholdLayer})
predictions = classifier.predict(inputs_gen)
results = []
results_within_threshold = []
results_under_tresholds = []
for i, prediction in enumerate(predictions):
results.append(int(np.argmax(prediction) == np.argmax(valid_labels[i])))
if max(prediction) > 0.5:
results_under_tresholds.append(1)
results_within_threshold.append(
int(np.argmax(prediction) == np.argmax(valid_labels[i])))
else:
results_under_tresholds.append(0)
# results is a binary array with 1 for accurate prediction, 0 for false
print("Accuracy of the ensemble model on the valid set: %f"
% (sum(results) / len(results)))
# results_within_threshold is a binary array with 1 for accurate prediction
# of high confidence, 0 for false prediction with high confidence
print("Percentage of images for which the model was highly confident yet\
returned the wrong value: %f" % (
1 - sum(results_within_threshold) / len(results_within_threshold)))
# results_under_threshold is a binary array with 1 for high confidence
# prediction, 0 for low confidence predictions
print("Percentage of images for which the model was low confidence: %f" % (
1 - sum(results_under_tresholds) / len(results_under_tresholds)))
if __name__ == '__main__':
args = parser.parse_args()
test_accuracy(args)
| {"/tests/benchmark/benchmark_tools/usecases/amman/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/creteil/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/whonet_nada/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py", "/tests/benchmark/benchmark_tools/atb_names/__init__.py"], "/tests/benchmark/inhib_diameter_modes/op/op.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/inhib_diameter_modes/count/__init__.py": ["/tests/benchmark/inhib_diameter_modes/count/count.py"], "/tests/benchmark/preprocess_img/__init__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py"], "/python-module/astimp_tools/datamodels.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/nlclsq/__init__.py": ["/tests/benchmark/inhib_diameter_modes/nlclsq/nlclsq.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/__init__.py": ["/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py"], "/tests/benchmark/inhib_diameter_modes/student/__init__.py": ["/tests/benchmark/inhib_diameter_modes/student/student.py"], "/python-module/astimp_tools/artist.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/op/__init__.py": ["/tests/benchmark/inhib_diameter_modes/op/op.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/preprocess_img/__main__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/interfaces/__init__.py"]} |
73,563 | mpascucci/AST-image-processing | refs/heads/master | /tests/benchmark/inhib_diameter_modes/slopeOP/__init__.py | from .slopeOP import measureOneDiameter, _measureOneDiameter
| {"/tests/benchmark/benchmark_tools/usecases/amman/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/creteil/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/whonet_nada/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py", "/tests/benchmark/benchmark_tools/atb_names/__init__.py"], "/tests/benchmark/inhib_diameter_modes/op/op.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/inhib_diameter_modes/count/__init__.py": ["/tests/benchmark/inhib_diameter_modes/count/count.py"], "/tests/benchmark/preprocess_img/__init__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py"], "/python-module/astimp_tools/datamodels.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/nlclsq/__init__.py": ["/tests/benchmark/inhib_diameter_modes/nlclsq/nlclsq.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/__init__.py": ["/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py"], "/tests/benchmark/inhib_diameter_modes/student/__init__.py": ["/tests/benchmark/inhib_diameter_modes/student/student.py"], "/python-module/astimp_tools/artist.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/op/__init__.py": ["/tests/benchmark/inhib_diameter_modes/op/op.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/preprocess_img/__main__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/interfaces/__init__.py"]} |
73,564 | mpascucci/AST-image-processing | refs/heads/master | /tests/benchmark/benchmark_unit_test.py | # Lint as: python3
"""
Unit tests for some internal helper methods of benchmark.py and benchmark_utils.py.
This test does not run actual benchmarking. Use benchmark.py for that.
"""
import unittest
import warnings
import astimp
from benchmark import find_diam_diffs_for_ast
from benchmark_utils import *
from collections import defaultdict
"""
Constants & helpers
"""
def buildAtbResult(label, inhib_diam):
# pellet and confidence do not matter for these tests.
pellet = astimp.Circle((1, 2), 6.0)
confidence = 1.0
return ImprocAtbResult(pellet,
astimp.Pellet_match(label, confidence),
astimp.InhibDisk(inhib_diam, confidence))
atb1 = "atb1"
atb2 = "atb2"
atb1_small_improc_diam = 10.0
atb1_large_improc_diam = 15.0
atb2_small_improc_diam = 10.0
atb2_large_improc_diam = 20.0
atb1_small_expected_diam = 12.0
atb1_large_expected_diam = 12.6
atb2_small_expected_diam = 11.0
atb2_large_expected_diam = 19.0
class BenchmarkUnitTest(unittest.TestCase):
def test_parse_valid_config(self):
config = parse_and_validate_config("unit_test_golden_results.yaml")
img_1_atbs = {'ATB1': [10.1, 10.1, 15.0], 'ATB2': [4.5]}
img_2_atbs = {'ATB1': [1.1], 'ATB2': [2.2], 'ATB3': [3.3]}
expected_config = {
'IMG_20180107_184023.jpg': defaultdict(list, img_1_atbs),
'IMG_20180107_184052.jpg': defaultdict(list, img_2_atbs),
}
self.assertEqual(config, expected_config)
def test_find_nearest_value_larger_than_all_options(self):
array = [1.0, 3.0, 2.0]
value = 3.3
self.assertEqual(find_nearest(array, value), 3.0)
self.assertEqual(find_nearest_index(array, value), 1)
def test_find_nearest_value_in_middle_of_options(self):
array = [100.3, 200.5, 150, 120]
value = 170
self.assertEqual(find_nearest(array, value), 150)
self.assertEqual(find_nearest_index(array, value), 2)
def test_flatten(self):
list_of_lists = [[], [1, 2], ['a'], [3, 4, 5]]
expected_flattened = [1, 2, 'a', 3, 4, 5]
self.assertEqual(flatten(list_of_lists), expected_flattened)
def test_find_diams_all_atb_matched(self):
improc_result = [
buildAtbResult(atb1, atb1_small_improc_diam),
buildAtbResult(atb1, atb1_large_improc_diam),
buildAtbResult(atb2, atb2_large_improc_diam),
buildAtbResult(atb2, atb2_small_improc_diam)
]
expected_diams = {
atb1: [atb1_small_expected_diam, atb1_large_expected_diam],
atb2: [atb2_small_expected_diam, atb2_large_expected_diam]
}
diam_diffs = find_diam_diffs_for_ast(improc_result, expected_diams)
expected_diffs = [
atb1_small_improc_diam - atb1_small_expected_diam,
atb1_large_improc_diam - atb1_large_expected_diam,
atb2_small_improc_diam - atb2_small_expected_diam,
atb2_large_improc_diam - atb2_large_expected_diam,
]
# order does not matter
self.assertListEqual(sorted(diam_diffs), sorted(expected_diffs))
# all improc diameters are matched to their closest expected diameter.
self.assertEqual(
improc_result[0].expected_diam, atb1_small_expected_diam)
self.assertEqual(
improc_result[1].expected_diam, atb1_large_expected_diam)
self.assertEqual(
improc_result[2].expected_diam, atb2_large_expected_diam)
self.assertEqual(
improc_result[3].expected_diam, atb2_small_expected_diam)
def test_find_diam_diffs_for_name_mismatch(self):
improc_result = [buildAtbResult(atb1, atb1_small_improc_diam)]
expected_diams = {
atb1: [atb1_small_expected_diam, atb1_large_expected_diam]
}
diam_diffs = find_diam_diffs_for_ast(improc_result, expected_diams)
# No diffs found because there are a different number of ATB1 in the
# golden data vs. improc result.
self.assertListEqual(diam_diffs, [])
self.assertEqual(improc_result[0].expected_diam, None)
def test_record_results_label_match(self):
all_results = BenchmarkResult()
annotation = defaultdict(list, {'ATB1': [1,2,3], 'ATB2': [4]} )
improc_results = [
buildAtbResult('ATB1', 2),
buildAtbResult('ATB1', 3),
buildAtbResult('ATB3', 4),
]
all_results.record_ast_result(improc_results, annotation)
self.assertEqual(all_results.n_label_match, 2)
self.assertEqual(all_results.label_match_denom, 3)
annotation = defaultdict(list, {'ATB1': [10.1], 'ATB3': [2.0], 'ATB4': [6.0]} )
improc_results = [
buildAtbResult('ATB1', 9.0),
buildAtbResult('ATB2', 11.0),
buildAtbResult('ATB3', 5.0),
]
all_results.record_ast_result(improc_results, annotation)
self.assertEqual(all_results.n_label_match, 4)
self.assertEqual(all_results.label_match_denom, 6)
if __name__ == '__main__':
unittest.main()
| {"/tests/benchmark/benchmark_tools/usecases/amman/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/creteil/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/whonet_nada/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py", "/tests/benchmark/benchmark_tools/atb_names/__init__.py"], "/tests/benchmark/inhib_diameter_modes/op/op.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/inhib_diameter_modes/count/__init__.py": ["/tests/benchmark/inhib_diameter_modes/count/count.py"], "/tests/benchmark/preprocess_img/__init__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py"], "/python-module/astimp_tools/datamodels.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/nlclsq/__init__.py": ["/tests/benchmark/inhib_diameter_modes/nlclsq/nlclsq.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/__init__.py": ["/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py"], "/tests/benchmark/inhib_diameter_modes/student/__init__.py": ["/tests/benchmark/inhib_diameter_modes/student/student.py"], "/python-module/astimp_tools/artist.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/op/__init__.py": ["/tests/benchmark/inhib_diameter_modes/op/op.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/preprocess_img/__main__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/interfaces/__init__.py"]} |
73,565 | mpascucci/AST-image-processing | refs/heads/master | /tests/benchmark/inhib_diameter_modes/__init__.py | from . import nlclsq
from . import slopeOP #! REQUIRES THIS PACKAGE https://github.com/vrunge/slopeOP
from . import op #! REQUIRES THIS PACKAGE https://github.com/mpascucci/optimalpartitioning
from . import count
__doc__ = "functions for inhibition diameter measurement"
| {"/tests/benchmark/benchmark_tools/usecases/amman/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/creteil/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/whonet_nada/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py", "/tests/benchmark/benchmark_tools/atb_names/__init__.py"], "/tests/benchmark/inhib_diameter_modes/op/op.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/inhib_diameter_modes/count/__init__.py": ["/tests/benchmark/inhib_diameter_modes/count/count.py"], "/tests/benchmark/preprocess_img/__init__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py"], "/python-module/astimp_tools/datamodels.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/nlclsq/__init__.py": ["/tests/benchmark/inhib_diameter_modes/nlclsq/nlclsq.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/__init__.py": ["/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py"], "/tests/benchmark/inhib_diameter_modes/student/__init__.py": ["/tests/benchmark/inhib_diameter_modes/student/student.py"], "/python-module/astimp_tools/artist.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/op/__init__.py": ["/tests/benchmark/inhib_diameter_modes/op/op.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/preprocess_img/__main__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/interfaces/__init__.py"]} |
73,566 | mpascucci/AST-image-processing | refs/heads/master | /tests/benchmark/inhib_diameter_modes/student/student.py | import numpy as np
import astimp
def r_matrix(preproc,idx):
"""calculate the radial matrix centered on pellet center"""
img = preproc.img
cx,cy = preproc.circles[idx].center
x = np.arange(0,img.shape[1]) - cx
y = np.arange(0,img.shape[0]) - cy
X,Y = np.meshgrid(x,y)
return np.sqrt(X**2+Y**2)
def measureOneDiameter(preproc, pellet_idx):
"""Return an inhibition diameter object with the measured diameter"""
result = _measureOneDiameter(preproc, pellet_idx)
return astimp.InhibDisk(diameter=result["radius"]*2, confidence=1)
def _measureOneDiameter(preproc, pellet_idx):
"""Measure inhibition radius with the student test method"""
idx = pellet_idx
img = preproc.img
px_per_mm = preproc.px_per_mm
R = r_matrix(preproc, idx)/px_per_mm
Rmax = 20
Rmin = 3
# radii values to scan
rs = np.arange(Rmin,Rmax,0.25)
# check if no inhibition
area_in = img[(R>Rmin)&(R<rs[2])&(img>=0)]
km_centers = preproc.km_centers_local[pellet_idx]
th = km_centers[0] + (km_centers[1] - km_centers[0])/2
if area_in.mean()*255 > th:
return {"x":None, "y":None, "radius":3, "full_inhib":True}
# calculate the T value for each radius
t = [0]
for r in rs[1:]:
# NOTE: t-test can be calculated with scipy.stats.ttest_ind(area_in, area_out)
area_in = img[(R>Rmin)&(R<r)&(img>=0)]
area_out = img[(R>r)&(img>=0)]
m_in = area_in.mean()
v_in = area_in.var()
m_out = area_out.mean()
v_out = area_out.var()
n_in = len(area_in)
n_out = len(area_out)
t.append(abs((m_in - m_out)/np.sqrt(v_in/n_in + v_out/n_out)))
t = np.array(t)
t[0] = t[1]
t = t/np.max(t)*255
return {"x":rs, "y":t, "radius":rs[np.argmax(t)], "full_inhib":False} | {"/tests/benchmark/benchmark_tools/usecases/amman/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/creteil/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/whonet_nada/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py", "/tests/benchmark/benchmark_tools/atb_names/__init__.py"], "/tests/benchmark/inhib_diameter_modes/op/op.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/inhib_diameter_modes/count/__init__.py": ["/tests/benchmark/inhib_diameter_modes/count/count.py"], "/tests/benchmark/preprocess_img/__init__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py"], "/python-module/astimp_tools/datamodels.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/nlclsq/__init__.py": ["/tests/benchmark/inhib_diameter_modes/nlclsq/nlclsq.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/__init__.py": ["/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py"], "/tests/benchmark/inhib_diameter_modes/student/__init__.py": ["/tests/benchmark/inhib_diameter_modes/student/student.py"], "/python-module/astimp_tools/artist.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/op/__init__.py": ["/tests/benchmark/inhib_diameter_modes/op/op.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/preprocess_img/__main__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/interfaces/__init__.py"]} |
73,567 | mpascucci/AST-image-processing | refs/heads/master | /tests/python-module.py | # Copyright 2019 Fondation Medecins Sans Frontières https://fondation.msf.fr/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file is part of the ASTapp image processing library
# Author: Marco Pascucci
from contextlib import contextmanager
import astimp
import sys
import glob
import os
import time
from imageio import imread, imwrite
import matplotlib.pyplot as plt
import numpy as np
from importlib import reload
log = sys.stdout.write
##################
# import astimp
@contextmanager
def logged_action(before, after='done'):
try:
log(before + '...')
yield
finally:
log(after+'\n')
img_path = "tests/images/test0.jpg"
# Test Exception
# astimp.throw_custom_exception("test")
with logged_action("read image"):
im_np = np.array(imread(img_path))
with logged_action("crop Petri dish"):
crop = astimp.cropPetriDish(im_np)
with logged_action("find pellets"):
circles = astimp.find_atb_pellets(crop)
pellets = [astimp.cutOnePelletInImage(crop, circle) for circle in circles]
with logged_action("standardize pellet"):
astimp.standardizePelletImage(pellets[0][:, :, 0])
with logged_action("read labels"):
labels = [astimp.getOnePelletText(pellet) for pellet in pellets]
with logged_action("preprocessing"):
preproc = astimp.inhib_diam_preprocessing(crop, circles)
with logged_action("measure diameters"):
disks = astimp.measureDiameters(preproc)
# print()
# for disk in disks:
# print(disk.diameter, disk.confidence)
| {"/tests/benchmark/benchmark_tools/usecases/amman/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/creteil/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/whonet_nada/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py", "/tests/benchmark/benchmark_tools/atb_names/__init__.py"], "/tests/benchmark/inhib_diameter_modes/op/op.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/inhib_diameter_modes/count/__init__.py": ["/tests/benchmark/inhib_diameter_modes/count/count.py"], "/tests/benchmark/preprocess_img/__init__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py"], "/python-module/astimp_tools/datamodels.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/nlclsq/__init__.py": ["/tests/benchmark/inhib_diameter_modes/nlclsq/nlclsq.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/__init__.py": ["/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py"], "/tests/benchmark/inhib_diameter_modes/student/__init__.py": ["/tests/benchmark/inhib_diameter_modes/student/student.py"], "/python-module/astimp_tools/artist.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/op/__init__.py": ["/tests/benchmark/inhib_diameter_modes/op/op.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/preprocess_img/__main__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/interfaces/__init__.py"]} |
73,568 | mpascucci/AST-image-processing | refs/heads/master | /tests/benchmark/inhib_diameter_modes/student/__init__.py | ## MEASURE DIAMETERS WITH THE STUDENT TEST METHOD PRESENTED IN
## MEASUREMENT OF INHIBITION ZONE DIAMETER IN DISK SUSCEPTIBILITY TESTS BY COMPUTERIZED
## by ANDRE GAVOILLE et al. 1984
from .student import measureOneDiameter, _measureOneDiameter
| {"/tests/benchmark/benchmark_tools/usecases/amman/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/creteil/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/whonet_nada/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py", "/tests/benchmark/benchmark_tools/atb_names/__init__.py"], "/tests/benchmark/inhib_diameter_modes/op/op.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/inhib_diameter_modes/count/__init__.py": ["/tests/benchmark/inhib_diameter_modes/count/count.py"], "/tests/benchmark/preprocess_img/__init__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py"], "/python-module/astimp_tools/datamodels.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/nlclsq/__init__.py": ["/tests/benchmark/inhib_diameter_modes/nlclsq/nlclsq.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/__init__.py": ["/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py"], "/tests/benchmark/inhib_diameter_modes/student/__init__.py": ["/tests/benchmark/inhib_diameter_modes/student/student.py"], "/python-module/astimp_tools/artist.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/op/__init__.py": ["/tests/benchmark/inhib_diameter_modes/op/op.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/preprocess_img/__main__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/interfaces/__init__.py"]} |
73,569 | mpascucci/AST-image-processing | refs/heads/master | /python-module/astimp_tools/artist.py | import matplotlib.pyplot as plt
import astimp
from .image import subimage_by_roi
default = {
"artist": {
'fill': False,
'ls': '--'},
}
def apply_custom_defaults(d, default_key):
for k, v in default[default_key].items():
if k not in d:
d[k] = v
def draw_petri_dish(petri, ax=None):
bb = petri.boundingBox
# DRAW CENTER
# ax.plot(*bb.center, 'or')
if ax is None:
ax = plt.gca()
if petri.isRound:
art = plt.Circle(petri.center, petri.radius, fill=None, ec='r', ls='--')
ax.add_artist(art)
else:
draw_roi(bb, ax)
def draw_roi(roi, ax, text="", ec='r', text_color='w', text_args={}, rect_args={}):
apply_custom_defaults(rect_args, "artist")
rect_args["ec"] = ec
text_args["color"] = text_color
rect = plt.Rectangle((roi.x, roi.y), roi.width, roi.height, **rect_args)
ax.add_artist(rect)
if text != "":
text = plt.text(roi.x, roi.bottom, text, **text_args)
ax.add_artist(text)
def draw_ast(ast, ax, **kwargs):
"""Draw an AST and the analysis results.
options:
atb_labels draw a label for each antibiotic disk
values:
'atb' : display antibiotic label
'number' : displat antibiotic number
'all' (default) both atb and number
'off' : do not draw labels
"""
atb_labels = kwargs.get("atb_labels", 'all')
ax.imshow(ast.crop)
for j in range(len(ast.circles)):
center = ast.circles[j].center
pellet_r = astimp.config.Pellets_DiamInMillimeters/2
temp = (center[0]-pellet_r*ast.px_per_mm,
center[1]-pellet_r*ast.px_per_mm)
s = f"{j}"
if atb_labels != 'off':
bbox = dict(boxstyle="square", ec=(0, 1, 0.5), fc=(0.2, 0.6, 0.2, 0.7))
atb_label_text = ""
if atb_labels == 'number':
atb_label_text = s
elif atb_labels == 'atb':
atb_label_text = f"{ast.labels[j].text}"
elif atb_labels == 'all':
atb_label_text = f"{s}:{ast.labels[j].text}"
text = plt.Text(*temp, atb_label_text, color='w', bbox=bbox)
ax.add_artist(text)
center = ast.circles[j].center
inhib_disk = astimp.measureOneDiameter(ast.preproc, j)
diam = inhib_disk.diameter
conf = inhib_disk.confidence
if conf < 1:
color = 'r'
else:
color = 'c'
circle = plt.Circle(center, diam/2*ast.px_per_mm,
ec=color, fc='none', ls='--', alpha=1)
ax.add_artist(circle)
def draw_antibiotic(atb, ax):
ax.imshow(atb.img)
diam = atb.inhibition.diameter
r = diam/2*atb.px_per_mm
cx, cy = atb.center_in_roi
circle = plt.Circle((cx, cy), r, fill=False, ec='c', ls='--')
ax.add_artist(circle)
def draw(obj, ax=None, **kwargs):
if ax is None:
ax = plt.gca()
if isinstance(obj, astimp.Roi):
draw_roi(obj, ax, **kwargs)
elif isinstance(obj, astimp.AST):
draw_ast(obj, ax, **kwargs)
elif isinstance(obj, astimp.Antibiotic):
draw_antibiotic(obj, ax, **kwargs)
elif isinstance(obj, astimp.PetriDish):
draw_petri_dish(obj, ax)
else:
raise AttributeError("Don't know how to draw: {}.".format(str(obj)))
| {"/tests/benchmark/benchmark_tools/usecases/amman/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/creteil/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/whonet_nada/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py", "/tests/benchmark/benchmark_tools/atb_names/__init__.py"], "/tests/benchmark/inhib_diameter_modes/op/op.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/inhib_diameter_modes/count/__init__.py": ["/tests/benchmark/inhib_diameter_modes/count/count.py"], "/tests/benchmark/preprocess_img/__init__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py"], "/python-module/astimp_tools/datamodels.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/nlclsq/__init__.py": ["/tests/benchmark/inhib_diameter_modes/nlclsq/nlclsq.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/__init__.py": ["/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py"], "/tests/benchmark/inhib_diameter_modes/student/__init__.py": ["/tests/benchmark/inhib_diameter_modes/student/student.py"], "/python-module/astimp_tools/artist.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/op/__init__.py": ["/tests/benchmark/inhib_diameter_modes/op/op.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/preprocess_img/__main__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/interfaces/__init__.py"]} |
73,570 | mpascucci/AST-image-processing | refs/heads/master | /python-module/astimp_tools/image.py | import numpy as np
import cv2
import astimp
def subimage_by_roi(img, roi):
left = int(roi.left)
top = int(roi.top)
right = int(roi.right)
bottom = int(roi.bottom)
return img[top:bottom,left:right]
def angular_profile(image, r, d_r=2, d_angle=1, a=0, b=359, f=np.mean):
"""Extract an angular profile from image.
Params:
- image: a grayscale image
- r : radius
- d_r: width. All pixels within [r-d_r//2, r+d_r//2] will be selected
- d_angle: the resolution of the profile
- a: start angle
- b: end angle
- f: function applied to the selected pixels
"""
Y,X = np.indices(image.shape)
cx = image.shape[1]//2
cy = image.shape[0]//2
X -= cx
Y -= cy
R = np.sqrt(X**2+Y**2).astype(np.uint64)
Z = X-1j*Y
theta = np.angle(Z*np.exp(1j*np.pi)) + np.pi
theta = theta*180/np.pi
a = a%360
b = b%360
if a>b:
points = np.where(
np.logical_or(theta>=a, theta<=b) * np.logical_and(R>=r-d_r//2, R<r+d_r//2)
)
else:
points = np.where(
np.logical_and(theta>=a, theta<=b) * np.logical_and(R>=r-d_r//2, R<r+d_r//2)
)
key = np.argsort(theta[points])
x = theta[points][key]
y = image[points][key]
ux = np.arange(0,360,d_angle)
profile = np.zeros((2,len(ux)))
for i,r in enumerate(ux):
# NOTE: this is approximative because when x<d_angle the selected interval is smaller
profile[:,i] = (r,f(y[abs(x-r)<d_angle]))
return profile
def radial_profile(image, angles, d_angle=1, r_min=0, r_max=np.infty, f=np.mean, interpolation=True,):
"""Extract a radial profile from image
Params:
- image: a grayscale image
- angles: angles at which radial profiles are extracted (iterable)
- d_angle: angle width. Pixels will be selected if in [angle-d_angle, angle+d_angle]
- r_min: start radius for the profile extraction
- r_max: end radius
- f: function to be applied to pixels at same radius
- interpolation: if true, the profile will be interpolated from r_min to r_max resulting
in a constant lenght vector (r_max+1-rmin).
Return: a list of profiles. Each profiles is a np.array of shape (2, d) where d may change according
to the other paramenters. If interpolation=True d=(r_max+1-rmin)
"""
Y,X = np.indices(image.shape)
cx = image.shape[1]//2
cy = image.shape[0]//2
X -= cx
Y -= cy
R = np.sqrt(X**2+Y**2).astype(np.uint64)
r_max = min(r_max,int(max(R[cy,0],R[0,cx])))
r_min = int(r_min)
Z = X-1j*Y
theta = np.angle(Z*np.exp(1j*np.pi)) + np.pi
theta = theta*180/np.pi
profiles = []
for alpha in angles:
a = (alpha - d_angle/2)%360
b = (alpha + d_angle/2)%360
if a>b:
points = np.where(
np.logical_or(theta>=a, theta<=b) * np.logical_and(R>=r_min, R<r_max)
)
else:
points = np.where(
np.logical_and(theta>=a, theta<=b) * np.logical_and(R>=r_min, R<r_max)
)
key = np.argsort(R[points])
x = R[points][key]
y = image[points][key]
ux = np.unique(x)
profile_temp = np.zeros((2,len(ux)))
for i,r in enumerate(ux):
profile_temp[:,i] = (r,f(y[x==r]))
if interpolation:
profile = np.empty((2,r_max+1-r_min))
profile[0] = np.arange(r_min,r_max+1)
profile[1] = np.interp(profile[0],*profile_temp)
else:
profile = profile_temp
profiles.append(profile)
return profiles
class InvalidPelletIndex(Exception):
pass
def get_atb_angle(ast, pivot_atb_idx, other_atb_idx):
"""get the angle between two antibiotics"""
atb_pivot = ast.get_atb_by_idx(pivot_atb_idx)
atb_other = ast.get_atb_by_idx(other_atb_idx)
x,y = atb_other.pellet_circle.center
rx = x-atb_pivot.pellet_circle.center[0]
ry = y-atb_pivot.pellet_circle.center[1]
return np.arctan2(ry,rx)/np.pi*180
def rotate_image(img, center=None, angle=0, adjust_size=False):
"""Rotate image.
if adjust_size is True, adjsut the output size so that all the image is visible at the end."""
h, w = img.shape[:2]
if center is not None:
cx,cy = center
else:
cx = w//2
cy = h//2
rot_mat = cv2.getRotationMatrix2D((cx,cy), angle, 1)
if adjust_size:
cos = np.abs(rot_mat[0, 0])
sin = np.abs(rot_mat[0, 1])
# compute the new bounding dimensions of the image
w = int((h * sin) + (w * cos))
h = int((h * cos) + (w * sin))
# adjust the rotation matrix to take into account translation
rot_mat[0, 2] += (w / 2) - cx
rot_mat[1, 2] += (h / 2) - cy
rot_img = cv2.warpAffine(img, rot_mat, (h,w))
return rot_img, rot_mat
def rotate_ast_and_rois(ast, angle=np.pi):
"""Rotate the ast's Petri-dish image around its center.
Output the rotated image, the updated pellets circles and ROIs"""
rot, M = rotate_image(ast.crop, angle=angle+180)
h, w = ast.crop.shape[:2]
cx = w//2
cy = h//2
ast1 = astimp.AST(np.array(0))
ast1._crop = rot
ast1._px_per_mm = ast.px_per_mm
ast1._circles = []
for c in ast.circles:
px,py = c.center
x = (px - cx)
y = (py - cy)
px = x*M[0,0] + y*M[0,1] + rot.shape[0]//2
py = x*M[1,0] + y*M[1,1] + rot.shape[1]//2
ast1._circles.append(astimp.Circle((px,py),ast.px_per_mm*3))
return rot, ast1.circles, ast1.rois
def draw_circle(img, radius, center=None, bgr_color=(255,255,255), thickness=-1):
"""draw a circle centered in the center of the image"""
if center is None:
cx = int(img.shape[1]//2)
cy = int(img.shape[0]//2)
else:
cx,cy = list(map(int,center))
cv2.circle(img, (cx,cy), int(radius), bgr_color, thickness=thickness)
return img
def mask_pellet_in_atb_image(atb, bgr_color=(255,255,255)):
"""return a copy of the atb image with the pellet masked with a
circle of the specified color."""
masked_image = atb.img.copy()
pellet_r = int(atb.px_per_mm * astimp.config.Pellets_DiamInMillimeters/2)
draw_circle(masked_image, radius=pellet_r, bgr_color=bgr_color, thickness=-1)
return masked_image
def mask_pellets_in_ast_image(ast, pellet_indices=[], bgr_color=(255,255,255), all=False):
"""Return an image of the petri dish with masked pellets.
The pellets are replaced with a disk of the specified color.
if all=True, all pellets are masked and pellet_indices is ignored."""
masked_image = ast.crop.copy()
pellet_r = int(ast.px_per_mm * astimp.config.Pellets_DiamInMillimeters/2)
if all:
pellet_indices = range(len(ast.circles))
for pellet_idx in pellet_indices:
draw_circle(masked_image, radius=pellet_r,
center=ast.circles[pellet_idx].center, bgr_color=bgr_color, thickness=-1)
return masked_image | {"/tests/benchmark/benchmark_tools/usecases/amman/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/creteil/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/whonet_nada/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py", "/tests/benchmark/benchmark_tools/atb_names/__init__.py"], "/tests/benchmark/inhib_diameter_modes/op/op.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/inhib_diameter_modes/count/__init__.py": ["/tests/benchmark/inhib_diameter_modes/count/count.py"], "/tests/benchmark/preprocess_img/__init__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py"], "/python-module/astimp_tools/datamodels.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/nlclsq/__init__.py": ["/tests/benchmark/inhib_diameter_modes/nlclsq/nlclsq.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/__init__.py": ["/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py"], "/tests/benchmark/inhib_diameter_modes/student/__init__.py": ["/tests/benchmark/inhib_diameter_modes/student/student.py"], "/python-module/astimp_tools/artist.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/op/__init__.py": ["/tests/benchmark/inhib_diameter_modes/op/op.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/preprocess_img/__main__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/interfaces/__init__.py"]} |
73,571 | mpascucci/AST-image-processing | refs/heads/master | /tests/benchmark/benchmark_tools/interfaces/__init__.py | # benchmark_tool.interfaces
# Copyright 2019 Fondation Medecins Sans Frontières https://fondation.msf.fr/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file is part of the ASTapp image processing library
# Author: Marco Pascucci
from abc import ABC, abstractmethod, abstractproperty
from .better_abc import ABCMeta, abstract_attribute
from collections import Mapping
from pandas import DataFrame
import yaml
class AST_annotation(ABC, Mapping, metaclass=ABCMeta):
"""Annotation of the results of one AST.
This class behaves like a dictionary which keys are extended antibiotic names.
"""
@abstract_attribute
def ast_id(self): pass
@abstract_attribute
def species(self): pass
@abstract_attribute
def sample_date(self): pass
@abstract_attribute
def atb_names(self): pass
@abstract_attribute
def sir_values(self): pass
@abstract_attribute
def raw_sir_values(self): pass
@abstract_attribute
def diameters(self): pass
@abstract_attribute
def sample_type(self): pass
@property
def results(self):
d = {
"diam": self.diameters,
"sir": self.sir_values,
"raw_sir": self.raw_sir_values
}
return DataFrame(d, index=self.atb_names)
@property
def tested_atbs(self):
data = self.results
return data.dropna(thresh=1)
@abstract_attribute
def expert_system_status(self):
"""A list of expert system's status variables.
each test is represented as a dict {'name':test-name, 'value':test-value}"""
def iteritems(self):
for atb in self:
yield atb, self[atb]
def __getitem__(self, key):
return self.results.loc[key]
def __iter__(self):
for atb in self.results.index:
yield atb
def __len__(self):
return len(self.results)
def __repr__(self):
lines = (
"AST annotation : {}".format(self.ast_id),
"species : {}".format(self.species),
"sample type : {}".format(self.sample_type),
"sample date : {}".format(self.sample_date),
"tested atbs: {}".format(len(self.tested_atbs))
)
text = "\n".join(lines)
return text
class Annotations_set(ABC, Mapping, metaclass=ABCMeta):
"""Collection of AST annotations"""
@abstract_attribute
def ast_ids(self): pass
@abstractmethod
def get_ast(self, ast_id):
"""Should return an instance of AST_annotation corresponding to ast_id by looking in self.df"""
pass
def get_ast_slice(self, slice_instance):
"""Should return a list of of AST_annotation instances"""
raise(NotImplementedError("Slicing (list[a:b]) is not allowed by default. \
Override get_ast_slice(self, slice_instance) if you want to use it"))
def __getitem__(self, selection):
if isinstance(selection, slice):
return self.get_ast_slice(selection)
else:
return self.get_ast(selection)
def __iter__(self):
for ast_id in self.ast_ids:
yield self[ast_id]
def __len__(self):
return len(self.ast_ids)
def write_yaml(self, outfile):
"""
Writes a YAML file representing the AST annotations.
This YAML file can be used as input to benchmark.py.
YAML Format is:
ast_image_filename1.jpg:
- {diameter: 1.0, label: atb1, sir: S}
- {diameter: 2.0, label: atb1, sir: null}
- {diameter: 3.0, label: atb2, sir: R}
ast_image_filename2.jpg:
- {diameter: 1.0, label: atb2, sir: S}
- {diameter: 2.0, label: atb3, sir: S}
Which corresponds to a python object like:
{'ast_image_filename1.jpg':
[{'label': 'atb1', 'diameter': 1.0, 'sir': 'S'},
{'label': 'atb1', 'diameter': 2.0, 'sir': None}]
{'label': 'atb2', 'diameter': 3.0, 'sir': 'R'}],
'ast_image_filename2.jpg':
[{'label': 'atb2', 'diameter': 1.0, 'sir': 'S'},
{'label': 'atb3', 'diameter': 2.0, 'sir': 'S'}]}
"""
with open(outfile, 'w') as file:
yaml.dump(self.create_yaml(), file)
def create_yaml(self):
yaml_data = {}
for ast_id in self.ast_ids:
ast_yaml = []
for _i, atb in self[ast_id].tested_atbs.iterrows():
ast_yaml.append(
{'label': atb.name, 'diameter': atb.diam, 'sir': atb.sir})
yaml_data[ast_id] = ast_yaml
return yaml_data
| {"/tests/benchmark/benchmark_tools/usecases/amman/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/creteil/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/whonet_nada/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py", "/tests/benchmark/benchmark_tools/atb_names/__init__.py"], "/tests/benchmark/inhib_diameter_modes/op/op.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/inhib_diameter_modes/count/__init__.py": ["/tests/benchmark/inhib_diameter_modes/count/count.py"], "/tests/benchmark/preprocess_img/__init__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py"], "/python-module/astimp_tools/datamodels.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/nlclsq/__init__.py": ["/tests/benchmark/inhib_diameter_modes/nlclsq/nlclsq.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/__init__.py": ["/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py"], "/tests/benchmark/inhib_diameter_modes/student/__init__.py": ["/tests/benchmark/inhib_diameter_modes/student/student.py"], "/python-module/astimp_tools/artist.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/op/__init__.py": ["/tests/benchmark/inhib_diameter_modes/op/op.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/preprocess_img/__main__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/interfaces/__init__.py"]} |
73,572 | mpascucci/AST-image-processing | refs/heads/master | /tests/benchmark/inhib_diameter_modes/op/__init__.py | from .op import measureOneDiameter, _measureOneDiameter
| {"/tests/benchmark/benchmark_tools/usecases/amman/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/creteil/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/whonet_nada/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py", "/tests/benchmark/benchmark_tools/atb_names/__init__.py"], "/tests/benchmark/inhib_diameter_modes/op/op.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/inhib_diameter_modes/count/__init__.py": ["/tests/benchmark/inhib_diameter_modes/count/count.py"], "/tests/benchmark/preprocess_img/__init__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py"], "/python-module/astimp_tools/datamodels.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/nlclsq/__init__.py": ["/tests/benchmark/inhib_diameter_modes/nlclsq/nlclsq.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/__init__.py": ["/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py"], "/tests/benchmark/inhib_diameter_modes/student/__init__.py": ["/tests/benchmark/inhib_diameter_modes/student/student.py"], "/python-module/astimp_tools/artist.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/op/__init__.py": ["/tests/benchmark/inhib_diameter_modes/op/op.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/preprocess_img/__main__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/interfaces/__init__.py"]} |
73,573 | mpascucci/AST-image-processing | refs/heads/master | /pellet_labels/tests/trainer/test_model.py | import unittest
import numpy as np
import cv2
from trainer import model
class TestFormatPill(unittest.TestCase):
def test_bigger(self):
output = model.format_pill_for_inference(np.ones((5, 5, 3)), 4)
self.assertEqual(output.shape, (4, 4, 3))
def test_smaller(self):
output = model.format_pill_for_inference(np.ones((3, 3, 3)), 4)
self.assertEqual(output.shape, (4, 4, 3))
def test_non_square(self):
output = model.format_pill_for_inference(np.ones((3, 5, 3)), 4)
self.assertEqual(output.shape, (4, 4, 3))
class TestMapToCommon(unittest.TestCase):
def test_color(self):
output = model.map_to_common_space(np.ones((4, 4, 3), dtype=np.uint8))
self.assertEqual(output.shape, (4, 4))
def test_not_color(self):
output = model.map_to_common_space(np.ones((4, 4), dtype=np.uint8))
self.assertEqual(output.shape, (4, 4))
class TestOversample(unittest.TestCase):
def test_rare(self):
train_images = np.ones((10, 5, 5, 1))
train_labels = np.ones((10, 2))
# Setting all the classes to label index 0
train_labels[:,1] = 0
# Changing the last sample label to index 1 so that we have 9 samples
# of one class and one sample of another
train_labels[-1] = [0, 1]
sample_weights = np.ones(10)
(images, labels), weights = model.oversample_rare_classes(
train_images, train_labels, sample_weights, 3)
# Expect the method to add 2 samples of the second class
# Check that we have 12 samples now
self.assertEqual(len(images), 12)
# Check that the 2 additional samples are for the 2nd class
self.assertEqual(labels[-2, 1], 1)
self.assertEqual(labels[-1, 1], 1)
# Check that the weights have properly been extended to have the same
# shape as the images and labels
self.assertEqual(sum(weights), 12)
self.assertEqual(max(weights), 1)
def test_no_rare(self):
train_images = np.ones((10, 5, 5, 1))
train_labels = np.ones((10, 2))
train_labels[:,1] = 0
train_labels[-1] = [0, 1]
sample_weights = np.ones(10)
(images, labels), weights = model.oversample_rare_classes(train_images, train_labels,
sample_weights, 1)
self.assertEqual(len(images), 10)
self.assertEqual(labels[-2, 1], 0)
self.assertEqual(labels[-1, 1], 1)
self.assertEqual(sum(weights), 10)
self.assertEqual(max(weights), 1)
if __name__ == '__main__':
unittest.main() | {"/tests/benchmark/benchmark_tools/usecases/amman/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/creteil/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/whonet_nada/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py", "/tests/benchmark/benchmark_tools/atb_names/__init__.py"], "/tests/benchmark/inhib_diameter_modes/op/op.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/inhib_diameter_modes/count/__init__.py": ["/tests/benchmark/inhib_diameter_modes/count/count.py"], "/tests/benchmark/preprocess_img/__init__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py"], "/python-module/astimp_tools/datamodels.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/nlclsq/__init__.py": ["/tests/benchmark/inhib_diameter_modes/nlclsq/nlclsq.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/__init__.py": ["/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py"], "/tests/benchmark/inhib_diameter_modes/student/__init__.py": ["/tests/benchmark/inhib_diameter_modes/student/student.py"], "/python-module/astimp_tools/artist.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/op/__init__.py": ["/tests/benchmark/inhib_diameter_modes/op/op.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/preprocess_img/__main__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/interfaces/__init__.py"]} |
73,574 | mpascucci/AST-image-processing | refs/heads/master | /tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py | import astimp
import slopeOP
import numpy as np
from ..shared import no_inhibition, total_inhibition, varianceHall, varianceHallDiff
def measureOneDiameter(preproc, pellet_idx, penalty=None, constraint='null'):
diameter, cp, cpts, offset, slopeOPdata, penalty= _measureOneDiameter(preproc, pellet_idx, penalty, constraint)
return astimp.InhibDisk(diameter=diameter, confidence=0)
def measureOneDiameter_isotonic(preproc, pellet_idx, penalty=None):
y = np.array(astimp.radial_profile(
preproc, pellet_idx, astimp.profile_type["maxaverage"]))
px_per_mm = preproc.px_per_mm
tolerance = int(np.round(px_per_mm))
pellet_radius_in_px = int(np.round(px_per_mm)*3)
offset = pellet_radius_in_px+tolerance
slopeOPdata = []
cpts = []
if no_inhibition(y[offset:], preproc, pellet_idx):
cp = len(y)
elif total_inhibition(y[offset:], preproc, pellet_idx):
cp = pellet_radius_in_px
else:
reduce_factor = 4
slopeOPdata = np.array(np.round(y[offset:]/reduce_factor), dtype=int)
if penalty is None:
temp_max = np.max(slopeOPdata)
y_temp = slopeOPdata
sigma = np.sqrt(varianceHallDiff(y_temp))
penalty = sigma*temp_max*np.log(len(y_temp)) * 2
states = sorted(list(set(slopeOPdata)))
omega = slopeOP.slopeOP(slopeOPdata, states,
penalty, constraint="isotonic")
cpts = np.array(omega.GetChangepoints()) + offset
cpts_filtered = cpts[cpts > offset]
cp = np.mean(cpts_filtered[0:2])
diameter = (cp)/preproc.px_per_mm*2
return diameter, cp, cpts, offset, slopeOPdata, penalty
def measureOneDiameter_unimodal(preproc, pellet_idx, penalty=None):
# radial profile data
y = np.array(astimp.radial_profile(
preproc, pellet_idx, astimp.profile_type["max"]))
# image scale
px_per_mm = preproc.px_per_mm
tolerance = int(np.round(px_per_mm))
pellet_radius_in_px = int(np.round(px_per_mm)*3)
offset = pellet_radius_in_px+tolerance
slopeOPdata = []
cpts = []
reduce_factor = None
if False: # no_inhibition(y[offset:], preproc, pellet_idx):
cp = len(y)
elif False: #total_inhibition(y[offset:], preproc, pellet_idx):
cp = pellet_radius_in_px
else:
reduce_factor = 4
if penalty is None:
# penalty estimation
y_temp = np.array(np.round(y[offset:]/reduce_factor), dtype=int)
temp_max = np.max(y_temp)
y_temp = y_temp
sigma = np.sqrt(varianceHallDiff(y_temp))
penalty = 2*sigma*np.log(len(y_temp)) * temp_max
# data prepared for slopeOP
slopeOPdata = np.array(-np.round(y/reduce_factor), dtype=int)
# finite states
states = sorted(list(set(slopeOPdata)))
# run slopeOP with unimodal constraint
omega = slopeOP.slopeOP(slopeOPdata, states,
penalty, constraint="unimodal")
cpts = np.array(omega.GetChangepoints())
cpts_filtered = cpts[cpts > offset]
# assert len(cpts_filtered) > 1, "found {} cpts < 1".format(len(cpts))
cp = np.mean(cpts_filtered[0:2])
diameter = (cp)/preproc.px_per_mm*2
return diameter, cp, cpts, offset, slopeOPdata, penalty, omega, reduce_factor
def _measureOneDiameter(preproc, pellet_idx, penalty=None, constraint='null'):
if constraint not in ["unimodal", "isotonic", 'null']:
raise(AttributeError("constraint must be either isotonic or unimodal"))
if constraint == "unimodal":
result = measureOneDiameter_unimodal(preproc, pellet_idx, penalty)
elif constraint == "isotonic":
result = measureOneDiameter_isotonic(preproc, pellet_idx, penalty)
else:
raise ValueError("a constraint must be chosen")
return result
| {"/tests/benchmark/benchmark_tools/usecases/amman/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/creteil/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/whonet_nada/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py", "/tests/benchmark/benchmark_tools/atb_names/__init__.py"], "/tests/benchmark/inhib_diameter_modes/op/op.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/inhib_diameter_modes/count/__init__.py": ["/tests/benchmark/inhib_diameter_modes/count/count.py"], "/tests/benchmark/preprocess_img/__init__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py"], "/python-module/astimp_tools/datamodels.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/nlclsq/__init__.py": ["/tests/benchmark/inhib_diameter_modes/nlclsq/nlclsq.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/__init__.py": ["/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py"], "/tests/benchmark/inhib_diameter_modes/student/__init__.py": ["/tests/benchmark/inhib_diameter_modes/student/student.py"], "/python-module/astimp_tools/artist.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/op/__init__.py": ["/tests/benchmark/inhib_diameter_modes/op/op.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/preprocess_img/__main__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/interfaces/__init__.py"]} |
73,575 | mpascucci/AST-image-processing | refs/heads/master | /pellet_labels/tflite_converter.py | import argparse
import tensorflow as tf
from package_ensemble import EntropyThresholdLayer
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--source',
type=str,
required=True,
help='path to model to convert')
parser.add_argument(
'--destination',
type=str,
required=True,
help='full path to save the converted model')
args, _ = parser.parse_known_args()
return args
def convert(source, destination):
#model = tf.keras.models.load_model(
# args.source, {'EntropyThresholdLayer': EntropyThresholdLayer})
converter = tf.lite.TFLiteConverter.from_keras_model_file(
source, custom_objects={'EntropyThresholdLayer': EntropyThresholdLayer})
tflite_model = converter.convert()
with open(destination, 'wb') as f:
f.write(tflite_model)
if __name__=='__main__':
args = get_args()
convert(args.source, args.destination) | {"/tests/benchmark/benchmark_tools/usecases/amman/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/creteil/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/whonet_nada/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py", "/tests/benchmark/benchmark_tools/atb_names/__init__.py"], "/tests/benchmark/inhib_diameter_modes/op/op.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/inhib_diameter_modes/count/__init__.py": ["/tests/benchmark/inhib_diameter_modes/count/count.py"], "/tests/benchmark/preprocess_img/__init__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py"], "/python-module/astimp_tools/datamodels.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/nlclsq/__init__.py": ["/tests/benchmark/inhib_diameter_modes/nlclsq/nlclsq.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/__init__.py": ["/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py"], "/tests/benchmark/inhib_diameter_modes/student/__init__.py": ["/tests/benchmark/inhib_diameter_modes/student/student.py"], "/python-module/astimp_tools/artist.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/op/__init__.py": ["/tests/benchmark/inhib_diameter_modes/op/op.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/preprocess_img/__main__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/interfaces/__init__.py"]} |
73,576 | mpascucci/AST-image-processing | refs/heads/master | /pellet_labels/infer.py | import argparse
import os
import time
import numpy as np
import tensorflow as tf
from trainer.model import get_data_generator, get_model_inputs, get_model_output
from trainer.pellet_list import PELLET_LIST
from package_ensemble import EntropyThresholdLayer
MODEL_TFLITE = "models/ab_recognition_i2a_and_amman_data.tflite"
MODEL_KERAS = "models/ab_recognition_i2a_and_amman_data.h5"
parser = argparse.ArgumentParser(
description='Infer the label using the saved model')
parser.add_argument(
'image', type=str,
help='path to the image file showing one pellet with label')
parser.add_argument(
'--img-size', type=int, default=64,
help='square size to resize input images to in pixel, default=64')
parser.add_argument(
'--model-type', choices=['keras', 'tflite'], default='tflite',
help='keras or tflite')
parser.add_argument(
'--model', default='models/ab_recognition_i2a_and_amman_data.tflite',
help='path to model file')
def timeit(method):
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
print('%r %2.2f ms' % (method.__name__, (te - ts) * 1000))
return result
return timed
@timeit
def infer_keras(image_path, img_size, path):
inputs = np.array([get_model_inputs(image_path, img_size)])
inputs_gen = get_data_generator().flow(inputs, shuffle=False)
classifier = tf.keras.models.load_model(
path, {'EntropyThresholdLayer': EntropyThresholdLayer})
predictions = classifier.predict(inputs_gen)
get_model_output(predictions, PELLET_LIST)
@timeit
def infer_tflite(image_path, img_size, path):
inputs = np.array([get_model_inputs(image_path, img_size)], dtype=np.float32)
inputs_gen = get_data_generator().standardize(inputs)
interpreter = tf.lite.Interpreter(model_path=path)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
interpreter.set_tensor(input_details[0]['index'], inputs_gen)
interpreter.invoke()
predictions = interpreter.get_tensor(output_details[0]['index'])
get_model_output(predictions, PELLET_LIST)
if __name__ == '__main__':
args = parser.parse_args()
print("Using model", args.model)
if args.model_type == 'keras':
infer_keras(args.image, args.img_size, args.model)
elif args.model_type == 'tflite':
infer_tflite(args.image, args.img_size, args.model)
| {"/tests/benchmark/benchmark_tools/usecases/amman/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/creteil/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/whonet_nada/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py", "/tests/benchmark/benchmark_tools/atb_names/__init__.py"], "/tests/benchmark/inhib_diameter_modes/op/op.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/inhib_diameter_modes/count/__init__.py": ["/tests/benchmark/inhib_diameter_modes/count/count.py"], "/tests/benchmark/preprocess_img/__init__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py"], "/python-module/astimp_tools/datamodels.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/nlclsq/__init__.py": ["/tests/benchmark/inhib_diameter_modes/nlclsq/nlclsq.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/__init__.py": ["/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py"], "/tests/benchmark/inhib_diameter_modes/student/__init__.py": ["/tests/benchmark/inhib_diameter_modes/student/student.py"], "/python-module/astimp_tools/artist.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/op/__init__.py": ["/tests/benchmark/inhib_diameter_modes/op/op.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/preprocess_img/__main__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/interfaces/__init__.py"]} |
73,577 | mpascucci/AST-image-processing | refs/heads/master | /pellet_labels/package_ensemble.py | import argparse
import os
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
import tensorflow.keras.backend as K
from util import gcs_util as util
from trainer import task
from trainer.pellet_list import PELLET_LIST
WORKING_DIR = os.getcwd()
MODEL_FOLDER = 'pellet_labels_model'
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--job-dir',
type=str,
required=True,
help='GCS location to the ensemble models')
parser.add_argument(
'--destination',
type=str,
default='./models/ensemble_model.h5',
help='full path to save the ensemble model')
parser.add_argument(
'--n-ensemble',
type=int,
default=10,
help='Number of ensemble models that were trained')
parser.add_argument(
'--threshold-value',
type=float,
default=0.023049,
help='threshold value to determine whether\
an input is out of distribution')
parser.add_argument(
'--img-size',
type=int,
default=64,
help='square size to resize input images to in pixel, default=64')
args, _ = parser.parse_known_args()
return args
class EntropyThresholdLayer(layers.Layer):
def __init__(self, threshold, n_classes, **kwargs):
self.threshold = threshold
self.n_classes = n_classes
super(EntropyThresholdLayer, self).__init__(**kwargs)
def build(self, input_shape):
super(EntropyThresholdLayer, self).build(input_shape)
def call(self, x):
entropy = -K.sum(K.log(x + 1e-10) * x, axis=-1)
# The predictions that don't pass the treshold are set to 0
mask1 = K.cast(K.less_equal(entropy, self.threshold), x.dtype)
mask1 = K.expand_dims(mask1, 1)
mask1 = K.tile(mask1, (1, self.n_classes))
y = x * mask1
# Build a flattened prediction array for items that don't pass the threshold
# in order to reflect greater uncertainty
mask2 = K.cast(K.greater(entropy, self.threshold), x.dtype)
mask2 = K.expand_dims(mask2, 1)
mask2 = K.tile(mask2, (1, self.n_classes))
flattened_pred = (x + 1) / (self.n_classes + 1)
flattened_pred *= mask2
return y + flattened_pred
def get_config(self):
config = {
'threshold': self.threshold,
'n_classes': self.n_classes,
}
base_config = super(EntropyThresholdLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def package_model(args):
# Load ensemble models trained on gcloud and repackage them in a single graph
# including a final entropy threshold function to enable the model to better
# handle uncertainty.
assert(args.job_dir.startswith('gs://'))
# Load models
model_paths = util.load_models_from_gcs(
args.job_dir, MODEL_FOLDER, task.MODEL_NAME, WORKING_DIR, args.n_ensemble)
models = []
for path in model_paths:
models.append(tf.keras.models.load_model(path, {'sin': tf.sin}))
# Create the input for the ensemble
input_layer = layers.Input(shape=(args.img_size, args.img_size, 1))
prediction_layers = []
for i, model in enumerate(models):
# Get rid of the input of the N models
model.layers.pop(0)
x = input_layer
for layer in model.layers:
layer._name += '_' + str(i)
# Rebuild the graph for each model starting from the same input
x = layer(x)
# Collect the final softmax layers
prediction_layers.append(x)
ensemble_prediction = layers.Average()(prediction_layers)
output_layer = EntropyThresholdLayer(
args.threshold_value, len(PELLET_LIST))(ensemble_prediction)
ensemble_model = tf.keras.Model(inputs=input_layer, outputs=output_layer)
# Need parameters to compile the model but they are meaningless outside of training
ensemble_model.compile(optimizer=tf.keras.optimizers.Adam(lr=0.001),
loss=('categorical_crossentropy'))
ensemble_model.save(args.destination)
if __name__ == '__main__':
args = get_args()
package_model(args) | {"/tests/benchmark/benchmark_tools/usecases/amman/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/creteil/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/whonet_nada/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py", "/tests/benchmark/benchmark_tools/atb_names/__init__.py"], "/tests/benchmark/inhib_diameter_modes/op/op.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/inhib_diameter_modes/count/__init__.py": ["/tests/benchmark/inhib_diameter_modes/count/count.py"], "/tests/benchmark/preprocess_img/__init__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py"], "/python-module/astimp_tools/datamodels.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/nlclsq/__init__.py": ["/tests/benchmark/inhib_diameter_modes/nlclsq/nlclsq.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/__init__.py": ["/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py"], "/tests/benchmark/inhib_diameter_modes/student/__init__.py": ["/tests/benchmark/inhib_diameter_modes/student/student.py"], "/python-module/astimp_tools/artist.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/op/__init__.py": ["/tests/benchmark/inhib_diameter_modes/op/op.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/preprocess_img/__main__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/interfaces/__init__.py"]} |
73,578 | mpascucci/AST-image-processing | refs/heads/master | /pellet_labels/trainer/task.py | import argparse
import os
import tensorflow as tf
from tensorflow.keras import layers
import numpy as np
from . import model
from . import pellet_list
from util import gcs_util as util
PELLET_LIST = pellet_list.PELLET_LIST
WORKING_DIR = os.getcwd()
MODEL_NAME = 'pellet_labels_model.h5'
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--job-dir',
type=str,
required=True,
help='GCS location to write checkpoints and export models')
parser.add_argument(
'--train-files',
type=str,
required=True,
nargs='*',
help='Dataset training file local or GCS')
parser.add_argument(
'--num-epochs',
type=int,
default=120,
help='number of times to go through the data, default=500')
parser.add_argument(
'--batch-size',
type=int,
default=64,
help='number of records to read during each training step, default=128')
parser.add_argument(
'--learning-rate',
type=float,
default=.001,
help='learning rate for gradient descent, default=.001')
parser.add_argument(
'--dropout-rate',
type=float,
default=.5,
help='rate for dropout layer, default=.5')
parser.add_argument(
'--img-size',
type=int,
default=64,
help='square size to resize input images to in pixel, default=64')
parser.add_argument(
'--rotation-range',
type=int,
default=360,
help='range of rotation to use for image augmentation, default=360')
parser.add_argument(
'--image-shift',
type=float,
default=0.03,
help='shift to use for image augmentation, default=.03')
parser.add_argument(
'--image-zoom',
type=float,
default=0.05,
help='zoom to use for image augmentation, default=.05')
parser.add_argument(
'--brightness-range-min',
type=float,
default=0.5,
help='brightness range minimum to use for image augmentation,\
default=.5')
parser.add_argument(
'--brightness-range-max',
type=float,
default=1.2,
help='brightness range maximum to use for image augmentation,\
default=1.2')
parser.add_argument(
'--min-samples-per-class',
type=int,
default=100,
help='minimum sample to use per class (if lower, oversample \
distribution), default=100')
parser.add_argument(
'--remove-class',
type=bool,
nargs='?',
const=True,
help='whether to remove 5 classes from the training set (enabling to \
assess the model ability to handle uncertainty')
parser.add_argument(
'--weights',
type=int,
nargs='*',
help='weight to attribute to each training set, \
there should be as many value as there are training folders')
args, _ = parser.parse_known_args()
return args
# TODO(Guillaume): add conversion to tflite
# TODO(Guillaume): add support for other keras backend
def train_and_evaluate(args):
if args.weights:
assert(len(args.weights) == len(args.train_files))
if args.remove_class:
class_list = [pellet_class for pellet_class in PELLET_LIST
if pellet_class not in pellet_list.REMOVED_CLASSES]
removed_list = pellet_list.REMOVED_CLASSES
else:
class_list = PELLET_LIST
removed_list = []
train_images = []
train_labels = []
valid_images = []
valid_labels = []
train_sets_len = []
for path in args.train_files:
input_data = model.load_and_preprocess_data(
path,
WORKING_DIR,
args.img_size,
class_list,
ukn_classes=removed_list)
train_images.append(input_data.train_data)
train_labels.append(input_data.train_labels)
valid_images.append(input_data.valid_data)
valid_labels.append(input_data.valid_labels)
train_sets_len.append(len(input_data.train_data))
train_images = np.concatenate(train_images, axis=0)
train_labels = np.concatenate(train_labels, axis=0)
valid_images = np.concatenate(valid_images, axis=0)
valid_labels = np.concatenate(valid_labels, axis=0)
if args.weights:
sample_weights = []
for w, l in zip(args.weights, train_sets_len):
sample_weights.append(np.array([w] * l))
sample_weights = np.concatenate(sample_weights, axis=0)
else:
sample_weights = np.array([1] * len(train_images))
train_data, sample_weights = model.oversample_rare_classes(
train_images,
train_labels,
sample_weights,
args.min_samples_per_class)
(train_images, train_labels) = train_data
classifier = model.create_keras_model(
(args.img_size, args.img_size, 1),
len(class_list),
args.dropout_rate,
args.learning_rate)
train_generator = tf.keras.preprocessing.image.ImageDataGenerator(
samplewise_center=True,
samplewise_std_normalization=True,
rotation_range=args.rotation_range,
width_shift_range=args.image_shift,
height_shift_range=args.image_shift,
zoom_range=args.image_zoom,
brightness_range=(args.brightness_range_min, args.brightness_range_max),
dtype='uint8')
train_flow = train_generator.flow(train_images, train_labels,
sample_weight=sample_weights,
batch_size=args.batch_size)
valid_generator = model.get_data_generator()
valid_flow = valid_generator.flow(valid_images, valid_labels)
reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss',
factor=0.2, patience=5, min_lr=0.0001)
# Unhappy hack to workaround h5py not being able to write to GCS.
# Force snapshots and saves to local filesystem, then copy them over to GCS.
if args.job_dir.startswith('gs://'):
checkpoint_path = MODEL_NAME
else:
checkpoint_path = os.path.join(args.job_dir, MODEL_NAME)
model_checkpoint = tf.keras.callbacks.ModelCheckpoint(
checkpoint_path,
save_best_only=True)
tensorboard_cb = tf.keras.callbacks.TensorBoard(
os.path.join(args.job_dir, 'keras_tensorboard'),
histogram_freq=1)
classifier.fit_generator(
generator=train_flow,
epochs=args.num_epochs,
validation_data=valid_flow,
callbacks=[reduce_lr, model_checkpoint, tensorboard_cb])
# Unhappy hack to workaround h5py not being able to write to GCS.
# Force snapshots and saves to local filesystem, then copy them over to GCS.
if args.job_dir.startswith('gs://'):
gcs_path = os.path.join(args.job_dir, MODEL_NAME)
util.copy_file_to_gcs(checkpoint_path, gcs_path)
if __name__ == '__main__':
args = get_args()
train_and_evaluate(args)
| {"/tests/benchmark/benchmark_tools/usecases/amman/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/creteil/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/whonet_nada/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py", "/tests/benchmark/benchmark_tools/atb_names/__init__.py"], "/tests/benchmark/inhib_diameter_modes/op/op.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/inhib_diameter_modes/count/__init__.py": ["/tests/benchmark/inhib_diameter_modes/count/count.py"], "/tests/benchmark/preprocess_img/__init__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py"], "/python-module/astimp_tools/datamodels.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/nlclsq/__init__.py": ["/tests/benchmark/inhib_diameter_modes/nlclsq/nlclsq.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/__init__.py": ["/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py"], "/tests/benchmark/inhib_diameter_modes/student/__init__.py": ["/tests/benchmark/inhib_diameter_modes/student/student.py"], "/python-module/astimp_tools/artist.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/op/__init__.py": ["/tests/benchmark/inhib_diameter_modes/op/op.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/preprocess_img/__main__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/interfaces/__init__.py"]} |
73,579 | mpascucci/AST-image-processing | refs/heads/master | /pellet_labels/generate_cpp_model.py | #! /usr/bin/python3
# Copyright 2019 Fondation Medecins Sans Frontieres https://fondation.msf.fr/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file is part of the ASTapp image processing library
import os, sys
from trainer.pellet_list import PELLET_LIST
TFLITE_MODEL = sys.argv[1]
FILE_HEADER = """
#include "pellet_label_tflite_model.hpp"
"""
def write_bytes(name, contents):
# Needs to be aligned in order not to crash on Samsung A10 and other phones.
# 4-byte alignment is required (for execution), and 16-byte is recommended (for performance).
print("__attribute__((__aligned__(16))) const unsigned char %s_UNSIGNED[] = {\n " % name, end='')
linecount = 0
for ch in contents:
# Format as 0xAB,
print('%-06s ' % ('0x%0X,' % ch), end='')
linecount += 1
if linecount == 11:
print("\n ", end='')
linecount = 0
# Write footer
print('\n};')
print("const char *%s = (const char *) %s_UNSIGNED;" % (name, name))
print("size_t %s_SIZE = sizeof(%s_UNSIGNED);" % (name, name))
def print_strings(strings):
length = 100
for s in strings:
if length + len(s) > 80:
print()
print(" ", end='')
length = 2
formatted = '"%s", ' % s
print(formatted, end='')
length += len(formatted)
if __name__ == '__main__':
print(FILE_HEADER)
print("const vector<string> PELLET_LABELS = {", end='')
print_strings([pellet.replace(" ", "") for pellet in PELLET_LIST])
print("};")
print()
with open(TFLITE_MODEL, 'rb') as in_file:
file_content = in_file.read()
write_bytes("PELLET_LABEL_TFLITE_MODEL", file_content)
| {"/tests/benchmark/benchmark_tools/usecases/amman/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/creteil/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/whonet_nada/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py", "/tests/benchmark/benchmark_tools/atb_names/__init__.py"], "/tests/benchmark/inhib_diameter_modes/op/op.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/inhib_diameter_modes/count/__init__.py": ["/tests/benchmark/inhib_diameter_modes/count/count.py"], "/tests/benchmark/preprocess_img/__init__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py"], "/python-module/astimp_tools/datamodels.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/nlclsq/__init__.py": ["/tests/benchmark/inhib_diameter_modes/nlclsq/nlclsq.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/__init__.py": ["/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py"], "/tests/benchmark/inhib_diameter_modes/student/__init__.py": ["/tests/benchmark/inhib_diameter_modes/student/student.py"], "/python-module/astimp_tools/artist.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/op/__init__.py": ["/tests/benchmark/inhib_diameter_modes/op/op.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/preprocess_img/__main__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/interfaces/__init__.py"]} |
73,580 | mpascucci/AST-image-processing | refs/heads/master | /tests/benchmark/preprocess_img/__main__.py | import os, argparse
import astimp
from .preproc import preprocess, Dataset
PICKLE_PATH = "pickled_preprocs"
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("input_folder", help="The images folder")
parser.add_argument('-o', '--output-folder', help="The folder where the "
"pickles will be stored. Defaults to {}".format(
PICKLE_PATH),
required=False)
parser.add_argument('-s', '--diamter-sensibility', help="The sensibility in diameter reading in (0,1) "
"pickles will be stored. Defaults to {}".format(
astimp.config.Inhibition_diameterReadingSensibility),
required=False)
args = parser.parse_args()
print()
ds = Dataset(args.input_folder)
if args.output_folder is not None:
PICKLE_PATH = args.output_folder
if args.diamter_sensibility is not None:
astimp.config.Inhibition_diameterReadingSensibility = float(args.diamter_sensibility)
print("Preprocessing results will be stored in {}".format(PICKLE_PATH))
errors = preprocess(ds.paths, PICKLE_PATH, parallel=True)
if errors:
print("Impossible to process {} files".format(len(errors)))
# save filenames of error images
with open(os.path.join(PICKLE_PATH, 'error_log.txt'), 'w') as f:
f.write('\n'.join(errors))
print("log saved in in {}/error_log.txt".format(PICKLE_PATH))
| {"/tests/benchmark/benchmark_tools/usecases/amman/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/creteil/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/whonet_nada/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py", "/tests/benchmark/benchmark_tools/atb_names/__init__.py"], "/tests/benchmark/inhib_diameter_modes/op/op.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/inhib_diameter_modes/count/__init__.py": ["/tests/benchmark/inhib_diameter_modes/count/count.py"], "/tests/benchmark/preprocess_img/__init__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py"], "/python-module/astimp_tools/datamodels.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/nlclsq/__init__.py": ["/tests/benchmark/inhib_diameter_modes/nlclsq/nlclsq.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/__init__.py": ["/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py"], "/tests/benchmark/inhib_diameter_modes/student/__init__.py": ["/tests/benchmark/inhib_diameter_modes/student/student.py"], "/python-module/astimp_tools/artist.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/op/__init__.py": ["/tests/benchmark/inhib_diameter_modes/op/op.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/preprocess_img/__main__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/interfaces/__init__.py"]} |
73,581 | mpascucci/AST-image-processing | refs/heads/master | /tests/benchmark/benchmark_utils.py | # Lint as: python3
"""
Utility functions and methods to help with benchmark.py.
"""
import numpy as np
import os
import warnings
import yaml
from collections import defaultdict
import matplotlib.pyplot as plt
class ImprocAtbResult:
"""
Utility class to encapsulate information about a single antibiotic disk.
"""
def __init__(self, pellet, label_result, inhib):
self.pellet = pellet
self.label = label_result
self.inhib_diam = inhib.diameter
self.expected_diam = None
class BenchmarkResult:
"""
Utility class to encapsulate summary statistics about an AST benchmark run.
"""
def __init__(self):
self.num_atb_diffs = []
self.diam_diffs = []
self.total_expected_atb = 0
self.total_improc_atb = 0
self.n_files_processed = 0
self.n_exception = 0
self.n_no_pellet = 0
self.n_label_match = 0
self.label_match_denom = 0
def record_exception(self):
self.n_files_processed += 1
self.n_exception += 1
def record_ast_result(self, improc_results, expected_ast):
self.n_files_processed += 1
if len(improc_results) == 0:
self.n_no_pellet += 1
expected_num_atb = len(flatten(expected_ast.values()))
actual_num_atb = len(improc_results)
self.num_atb_diffs.append(actual_num_atb - expected_num_atb)
self.total_expected_atb += expected_num_atb
self.total_improc_atb += min(expected_num_atb, actual_num_atb)
# Count ATB label matches: e.g. if improc returned AUG30, AUG30, P1 and
# golden expected AUG30, C30, P1, P1,
# then n_label_match=2, label_match_denom=4.
improc_labels = [result.label for result in improc_results]
shared_labels = set(improc_labels).intersection(expected_ast.keys())
label_matches = sum([min(improc_labels.count(label), len(expected_ast[label])) for label in shared_labels])
self.n_label_match += label_matches
# Maximum possible number of matches if atb-matching was perfect.
self.label_match_denom += min(len(flatten(expected_ast.values())), len(improc_results))
def record_diam_diffs(self, diam_diffs):
self.diam_diffs += diam_diffs
def show_results(self):
print("------------ Summary Results -----------")
perc_disks_found = self.total_improc_atb / self.total_expected_atb
abs_diam_diffs = [abs(i) for i in self.diam_diffs]
n_matched_pellets = len(self.diam_diffs)
diffs_lt_1mm = sum([diff < 1.0 for diff in abs_diam_diffs])
diffs_lt_2mm = sum([diff < 2.0 for diff in abs_diam_diffs])
diffs_lt_3mm = sum([diff < 3.0 for diff in abs_diam_diffs])
diffs_lt_6mm = sum([diff < 6.0 for diff in abs_diam_diffs])
diffs_more_mm = sum([diff >= 6.0 for diff in abs_diam_diffs])
perc_1mm_diffs = diffs_lt_1mm / n_matched_pellets
perc_2mm_diffs = diffs_lt_2mm / n_matched_pellets
perc_3mm_diffs = diffs_lt_3mm / n_matched_pellets
perc_6mm_diffs = diffs_lt_6mm / n_matched_pellets
perc_more_diffs = diffs_more_mm / n_matched_pellets
error_classes = {
"< 1mm" : perc_1mm_diffs,
"1 to 2mm" : perc_2mm_diffs-perc_1mm_diffs,
"2 to 3mm" : perc_3mm_diffs - perc_2mm_diffs,
"3 to 6mm" : perc_6mm_diffs - perc_3mm_diffs,
"> 6mm" : perc_more_diffs
}
perc_name_match = n_matched_pellets / self.total_improc_atb
perc_exception = self.n_exception / self.n_files_processed
print("% of disks found: {0:.2%} ({1} / {2})"
.format(perc_disks_found, self.total_improc_atb, self.total_expected_atb))
print("% of diameter diffs <1mm: {0:.2%} ({1} / {2})"
.format(perc_1mm_diffs, diffs_lt_1mm, n_matched_pellets))
print("% of diameter diffs <2mm: {0:.2%} ({1} / {2})"
.format(perc_2mm_diffs, diffs_lt_2mm, n_matched_pellets))
print("% of diameter diffs <3mm: {0:.2%} ({1} / {2})"
.format(perc_3mm_diffs, diffs_lt_3mm, n_matched_pellets))
print("% of diameter diffs <6mm: {0:.2%} ({1} / {2})"
.format(perc_6mm_diffs, diffs_lt_6mm, n_matched_pellets))
print("% of diameter diffs >=6mm: {0:.2%} ({1} / {2})"
.format(perc_more_diffs, diffs_more_mm, n_matched_pellets))
print("% of antibiotic name matches: {0:.2%} ({1} / {2})"
.format(self.n_label_match / self.label_match_denom, self.n_label_match, self.label_match_denom))
print("% of exceptions: {0:.2%} ({1} / {2})"
.format(perc_exception, self.n_exception, self.n_files_processed))
print("Diameter diff percentiles:")
for percentile in [.25, .5, .75, .9, .95]:
print(" {0:.0%}ile: {1:.2f}mm".format(
percentile, np.quantile(abs_diam_diffs, percentile)))
# Plot 4 pie charts repeating the same data as above
fig, axs = plt.subplots(2, 2)
patches, _ = axs[0, 0].pie(
[self.n_exception,
self.n_no_pellet,
self.n_files_processed - self.n_no_pellet - self.n_exception])
axs[0, 0].legend(patches,
["Exception", "No pellet found", "Pellets found"],
loc="lower left")
axs[0, 0].set_title("result categorization")
axs[0, 0].axis('equal')
# patches, _ = axs[0, 1].pie([
# sum([diff < 1.0 for diff in abs_diam_diffs]),
# sum([diff >= 1.0 for diff in abs_diam_diffs])])
# axs[0, 1].legend(patches, ["<1mm", ">=1mm"], loc="lower left")
# axs[0, 1].set_title("diameter diffs; median= {0:.2}mm".format(
# np.median(abs_diam_diffs)))
# axs[0, 1].axis('equal')
keys = error_classes.keys()
values = tuple(error_classes[key] for key in keys)
patches, _ = axs[0, 1].pie(values,colors=["#0f9e0b","#60b334","#f9ae52","#db3f34",'gray'])
pieBox = axs[0, 1].get_position()
axs[0, 1].set_position([pieBox.x0, pieBox.y0, pieBox.width*0.6, pieBox.height])
axs[0, 1].legend(patches, keys, bbox_to_anchor=(1, 0), loc="lower left")
axs[0, 1].set_title("diameter diffs; median= {0:.2}mm".format(
np.median(abs_diam_diffs)))
axs[0, 1].axis('equal')
patches, _ = axs[1, 0].pie(
[n_matched_pellets, self.total_improc_atb - n_matched_pellets])
axs[1, 0].legend(patches,
["atb name matches", "name mismatches"],
loc="lower left")
axs[1, 0].set_title("Antibiotic name matches")
axs[1, 0].axis('equal')
patches, _ = axs[1, 1].pie(
[self.total_improc_atb,
self.total_expected_atb - self.total_improc_atb])
axs[1, 1].legend(
patches, ["Disk found", "Disk missing"], loc="lower left")
axs[1, 1].set_title("Antibiotic disks found")
axs[1, 1].axis('equal')
plt.show()
fig.savefig("last_benchmark.jpg")
# ---------------- Plot number of antibiotics found per-plate ---------------- #
plt.hist(self.num_atb_diffs, bins=20)
## In the image, suptitle actually looks like title,
## title looks like subtitle.
plt.suptitle("Golden vs. improc number of pellets found per-AST")
plt.title("Left = too few pellets, Right = too many pellets",
fontsize=10)
plt.xlabel(
"Number of pellets found by improc - golden number of pellets")
plt.ylabel("Frequency of ASTs")
plt.show()
# -------------------- Plot diameter diffs per-antibiotic -------------------- #
plt.hist(self.diam_diffs, bins=30)
plt.suptitle("Diameter diffs per-antibiotic")
plt.title("Among pellets with matched antibiotic names\n" +
"Left = improc too small, right = improc too large",
fontsize=8)
plt.xlabel("Diameter found by improc - golden diameter")
plt.ylabel("Frequency of ASTs")
plt.show()
def parse_and_validate_config(config_path):
"""
Parses expected_results.yml (present in the same directory as benchmark.py)
Skips and logs a warning for any image files that don't exist.
yaml library will raise errors if the required label and diameter fields
are not present.
Returns a map of the form:
{
"path/to/filename1.jpg": {
# antibiotic name -> diameters of its inhibition zones.
# multiple diameters means the antibiotic is present multiple times
# on the AST plate.
"ATB1": [25.0, 23.0],
"ATB2": [6.0],
...
}
"path/to/filename2.jpg": {
"ATB1": [27.0],
"ATB3": [10.0],
}
}
"""
f = open(config_path)
config = yaml.safe_load(f)
f.close()
config_map = {}
for filename, expected_result in config.items():
expected_atbs_in_file = defaultdict(list)
for entry in expected_result:
expected_label = entry['label']
expected_atbs_in_file[expected_label].append(entry['diameter'])
config_map[filename] = expected_atbs_in_file
return config_map
def flatten(list_of_lists):
"""
Returns a list consisting of the elements in the list of lists.
e.g. [[1,2,3],[3,4],[5,6]] -> [1,2,3,3,4,5,6]
"""
return [result for sublist in list_of_lists for result in sublist]
def find_nearest(array, value):
"""Finds the entry in array which is closest to value. """
return array[find_nearest_index(array, value)]
def find_nearest_index(array, value):
"""Finds the index of the entry in array which is closest to value. """
return (np.abs(np.asarray(array) - value)).argmin()
| {"/tests/benchmark/benchmark_tools/usecases/amman/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/creteil/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/whonet_nada/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py", "/tests/benchmark/benchmark_tools/atb_names/__init__.py"], "/tests/benchmark/inhib_diameter_modes/op/op.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/inhib_diameter_modes/count/__init__.py": ["/tests/benchmark/inhib_diameter_modes/count/count.py"], "/tests/benchmark/preprocess_img/__init__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py"], "/python-module/astimp_tools/datamodels.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/nlclsq/__init__.py": ["/tests/benchmark/inhib_diameter_modes/nlclsq/nlclsq.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/__init__.py": ["/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py"], "/tests/benchmark/inhib_diameter_modes/student/__init__.py": ["/tests/benchmark/inhib_diameter_modes/student/student.py"], "/python-module/astimp_tools/artist.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/op/__init__.py": ["/tests/benchmark/inhib_diameter_modes/op/op.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/preprocess_img/__main__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/interfaces/__init__.py"]} |
73,582 | mpascucci/AST-image-processing | refs/heads/master | /tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py | from ..interfaces import AtbNamesTranslator
import os, csv
class CSVBasedAtbNamesTranslator(AtbNamesTranslator):
"""Use a csv based file which has two columns: "short_name; full_name"""
def __init__(self, acronym_file_path):
with open(acronym_file_path, 'r') as csvfile:
csv_reader = csv.DictReader(csvfile, delimiter=';')
full_names = []
short_names = []
whonet_codes = []
concentrations = []
for row in csv_reader:
full_names.append(row['full_name'])
short_names.append(row['short_name'])
whonet_codes.append(row['whonet_code'])
concentrations.append(row['concentration'])
super().__init__(full_names, short_names, whonet_codes, concentrations)
# ------------------------------------ i2a ----------------------------------- #
i2a = CSVBasedAtbNamesTranslator(os.path.join("atb_names", "i2a.csv"))
# ----------------------------------- amman ---------------------------------- #
amman = CSVBasedAtbNamesTranslator(os.path.join("atb_names", "amman.csv"))
# ----------------------------------- amman ---------------------------------- #
whonet = CSVBasedAtbNamesTranslator(os.path.join("atb_names", "whonet.csv"))
| {"/tests/benchmark/benchmark_tools/usecases/amman/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/creteil/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/whonet_nada/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py", "/tests/benchmark/benchmark_tools/atb_names/__init__.py"], "/tests/benchmark/inhib_diameter_modes/op/op.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/inhib_diameter_modes/count/__init__.py": ["/tests/benchmark/inhib_diameter_modes/count/count.py"], "/tests/benchmark/preprocess_img/__init__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py"], "/python-module/astimp_tools/datamodels.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/nlclsq/__init__.py": ["/tests/benchmark/inhib_diameter_modes/nlclsq/nlclsq.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/__init__.py": ["/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py"], "/tests/benchmark/inhib_diameter_modes/student/__init__.py": ["/tests/benchmark/inhib_diameter_modes/student/student.py"], "/python-module/astimp_tools/artist.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/op/__init__.py": ["/tests/benchmark/inhib_diameter_modes/op/op.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/preprocess_img/__main__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/interfaces/__init__.py"]} |
73,583 | mpascucci/AST-image-processing | refs/heads/master | /tests/benchmark/test_benchmark_tools.py | # coding=utf8
import unittest
from benchmark_tools import atb_names
import benchmark_tools as bt
import os
from pandas import DataFrame
import numpy as np
creteil_set = bt.creteil.Annotations_set_Creteil("annotations/creteil")
amman_set = bt.amman.Annotations_set_Amman('annotations/amman/amman_test.csv')
class BenchmarkToolsUnitTest(unittest.TestCase):
def test_amman_annotations(self):
self.assertEqual(len(amman_set.ast_ids), 2)
amman_atb = amman_set.get_ast(amman_set.ast_ids[0])
self.assertEqual(amman_atb.ast_id, "20181024-SAU-20.jpg")
self.assertEqual(amman_atb.species, "sau")
self.assertEqual(amman_atb.sample_date, "7/25/2019 12:00:00 AM")
self.assertEqual(amman_atb.sir_values, None)
self.assertEqual(amman_atb.sample_type, "sb")
tested_atb_names = [row.name for i,
row in amman_atb.tested_atbs.iterrows()]
self.assertEqual(tested_atb_names, ['AK30', 'FOX30', 'C30'])
tested_atb = amman_atb.tested_atbs.loc['AK30']
self.assertEqual(tested_atb.name, "AK30")
self.assertEqual(tested_atb.diam, 7.0)
self.assertEqual(tested_atb.sir, None)
not_tested_atb = amman_atb['AUG30']
self.assertEqual(not_tested_atb.name, 'AUG30')
self.assertTrue(np.isnan(not_tested_atb.diam))
self.assertEqual(tested_atb.sir, None)
def test_create_yaml_amman(self):
expected_yaml = {'20181024-SAU-20.jpg':
[{'label': 'AK30', 'diameter': 7.0, 'sir': None},
{'label': 'FOX30', 'diameter': 6.0, 'sir': None},
{'label': 'C30', 'diameter': 25.0, 'sir': None}],
'20190102-SAU-11.jpg':
[{'label': 'AK30', 'diameter': 6.0, 'sir': None},
{'label': 'FOX30', 'diameter': 6.0, 'sir': None},
{'label': 'C30', 'diameter': 25.0, 'sir': None}]}
self.assertEqual(amman_set.create_yaml(), expected_yaml)
def test_creteil_annotations(self):
self.assertEqual(
creteil_set[545]["PIPERACILLINE 30µg"].diam, 24)
self.assertEqual(
creteil_set[545]["PIPERACILLINE 30µg"].sir, 'S')
self.assertEqual(creteil_set[500].species, "Escherichia coli")
def test_atb_names_translation(self):
self.assertEqual(atb_names.i2a.full2short(
'CIPROFLOXACINE 5µg'), "CIP5")
self.assertEqual(atb_names.i2a.short2full(
"CIP5"), 'CIPROFLOXACINE 5µg')
def test_ast_script(self):
expected_ast_script = """# ASTscript automatically generated from Creteil annotations
# script creation date : 11/11/2019
# sample date : 12/04/2017
SPECIES : Streptococcus constellatus
ATB : PENICILLINE G 1U, 20, NA, NA, S
ATB : OXACILLINE 1µg, 6, NA, NA, PASVAL
ATB : AMPICILLINE 2µg, 16, NA, NA, R
ATB : AMOXICILLINE 20µg, NA, NA, NA, S
ATB : CEFOTAXIME 5µg, 6, NA, NA, S
ATB : STREPTOMYCINE 300µg, 24, NA, NA, S
ATB : GENTAMICINE 30µg, NA, NA, NA, S
ATB : GENTAMICINE 500µg, 23, NA, NA, S
ATB : NORFLOXACINE 10µg, 19, NA, NA, PASVAL
ATB : LEVOFLOXACINE 5µg, 25, NA, NA, S
ATB : MOXIFLOXACINE 5µg, 25, NA, NA, S
ATB : TRIMETHOPRIME + SULFAMIDES 1.25-23.75µg, 24, NA, NA, S
ATB : LINCOMYCINE 15µg, 23, NA, NA, S
ATB : PRISTINAMYCINE 15µg, 22, NA, NA, S
ATB : LINEZOLIDE 10µg, 26, NA, NA, S
ATB : RIFAMPICINE 5µg, 25, NA, NA, S
ATB : TETRACYCLINE 30µg, 24, NA, NA, S
ATB : TIGECYCLINE 15µg, 24, NA, NA, PASVAL
ATB : VANCOMYCINE 5µg, 19, NA, NA, S
ATB : TEICOPLANINE 30µg, 22, NA, NA, S"""
actual_ast_script = bt.astscript.annotation_to_ASTscript(
creteil_set[2])
self.assertEqual(actual_ast_script.split(
'\n')[2:], expected_ast_script.split('\n')[2:])
if __name__ == '__main__':
unittest.main()
| {"/tests/benchmark/benchmark_tools/usecases/amman/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/creteil/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/whonet_nada/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py", "/tests/benchmark/benchmark_tools/atb_names/__init__.py"], "/tests/benchmark/inhib_diameter_modes/op/op.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/inhib_diameter_modes/count/__init__.py": ["/tests/benchmark/inhib_diameter_modes/count/count.py"], "/tests/benchmark/preprocess_img/__init__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py"], "/python-module/astimp_tools/datamodels.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/nlclsq/__init__.py": ["/tests/benchmark/inhib_diameter_modes/nlclsq/nlclsq.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/__init__.py": ["/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py"], "/tests/benchmark/inhib_diameter_modes/student/__init__.py": ["/tests/benchmark/inhib_diameter_modes/student/student.py"], "/python-module/astimp_tools/artist.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/op/__init__.py": ["/tests/benchmark/inhib_diameter_modes/op/op.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/preprocess_img/__main__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/interfaces/__init__.py"]} |
73,584 | mpascucci/AST-image-processing | refs/heads/master | /pellet_labels/tests/test_package_ensemble.py | import unittest
import numpy as np
import package_ensemble
class TestEntropyTresholdLayer(unittest.TestCase):
def test_call(self):
# Expects call function to flatten prediction with low conf (test_data[0])
# and to leave intact prediction with high confidence (test_data[1])
test_data = np.array([[0.9, 0.05, 0.05], [1, 0, 0]])
threshold_layer = package_ensemble.EntropyThresholdLayer(
0.23, test_data.shape[1])
result = threshold_layer.call(test_data)
self.assertEqual(np.all(result == [[0.475, 0.2625, 0.2625], [1, 0, 0]]), True)
if __name__ == '__main__':
unittest.main() | {"/tests/benchmark/benchmark_tools/usecases/amman/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/creteil/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/whonet_nada/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py", "/tests/benchmark/benchmark_tools/atb_names/__init__.py"], "/tests/benchmark/inhib_diameter_modes/op/op.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/inhib_diameter_modes/count/__init__.py": ["/tests/benchmark/inhib_diameter_modes/count/count.py"], "/tests/benchmark/preprocess_img/__init__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py"], "/python-module/astimp_tools/datamodels.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/nlclsq/__init__.py": ["/tests/benchmark/inhib_diameter_modes/nlclsq/nlclsq.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/__init__.py": ["/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py"], "/tests/benchmark/inhib_diameter_modes/student/__init__.py": ["/tests/benchmark/inhib_diameter_modes/student/student.py"], "/python-module/astimp_tools/artist.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/op/__init__.py": ["/tests/benchmark/inhib_diameter_modes/op/op.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/preprocess_img/__main__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/interfaces/__init__.py"]} |
73,585 | mpascucci/AST-image-processing | refs/heads/master | /pellet_labels/setup.py | from setuptools import find_packages
from setuptools import setup
REQUIRED_PACKAGES = [
'opencv-python',
'pillow',
'scipy',
'h5py==2.7.0']
setup(
name='pellets_label',
version='0.1',
install_requires=REQUIRED_PACKAGES,
packages=find_packages(),
include_package_data=True
) | {"/tests/benchmark/benchmark_tools/usecases/amman/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/creteil/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/whonet_nada/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py", "/tests/benchmark/benchmark_tools/atb_names/__init__.py"], "/tests/benchmark/inhib_diameter_modes/op/op.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/inhib_diameter_modes/count/__init__.py": ["/tests/benchmark/inhib_diameter_modes/count/count.py"], "/tests/benchmark/preprocess_img/__init__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py"], "/python-module/astimp_tools/datamodels.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/nlclsq/__init__.py": ["/tests/benchmark/inhib_diameter_modes/nlclsq/nlclsq.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/__init__.py": ["/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py"], "/tests/benchmark/inhib_diameter_modes/student/__init__.py": ["/tests/benchmark/inhib_diameter_modes/student/student.py"], "/python-module/astimp_tools/artist.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/op/__init__.py": ["/tests/benchmark/inhib_diameter_modes/op/op.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/preprocess_img/__main__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/interfaces/__init__.py"]} |
73,586 | mpascucci/AST-image-processing | refs/heads/master | /pellet_labels/trainer/model.py | import tensorflow as tf
from tensorflow.keras import layers
import numpy as np
import cv2
import zipfile
import os
from util import gcs_util as util
class InputData():
def __init__(self, train_data, train_labels, valid_data, valid_labels, ukn_data):
self.train_data = train_data
self.train_labels = train_labels
self.valid_data = valid_data
self.valid_labels = valid_labels
self.ukn_data = ukn_data
def format_pill_for_inference(pill, img_size):
if pill.shape[0] > pill.shape[1]:
pill = pill[0:pill.shape[1],0:pill.shape[1]]
elif pill.shape[0] < pill.shape[1]:
pill = pill[0:pill.shape[0],0:pill.shape[0]]
if pill.shape[0] != img_size:
pill = cv2.resize(pill, (img_size,img_size))
return pill.reshape((img_size, img_size) + (pill.shape[-1],))
def map_to_common_space(pill):
if pill.shape[-1] == 3:
pill = cv2.cvtColor(pill, cv2.COLOR_BGR2GRAY)
return pill
def load_and_preprocess_data(path, working_dir, img_size,
class_list, ukn_classes=[]):
"""Load and preprocess the data for training or infereance
ukn_classes are classes that might have been removed from class_list
to enable assessing the way the model handle uncertainty. The data matching
those classes will be returned in ukn_data
"""
if not path:
raise ValueError('No dataset file defined')
# Getting the file name
file_name = os.path.split(path)[1]
if path.startswith('gs://'):
util.download_file_from_gcs(path, os.path.join(working_dir, file_name))
path = os.path.join(working_dir, file_name)
with zipfile.ZipFile(path, 'r') as zip_ref:
zip_ref.extractall(working_dir)
# Expects extracted folder to have same name as zip file
local_path = os.path.join(working_dir, file_name.split('.')[0])
if not os.path.exists(local_path):
raise ValueError('Unzipped folder wasn\'t named after zip file')
if not os.path.exists(os.path.join(local_path, 'train')):
raise ValueError('No train folder under unzipped folder')
if not os.path.exists(os.path.join(local_path, 'valid')):
raise ValueError('No valid folder under unzipped folder')
(train_data, train_labels) = ([], [])
(valid_data, valid_labels) = ([], [])
ukn_data = []
data = [(train_data, train_labels), (valid_data, valid_labels)]
for (images, labels, folder) in [(train_data, train_labels, 'train'),
(valid_data, valid_labels, 'valid')]:
path = os.path.join(local_path, folder)
for d in os.listdir(path):
if d[0] == '.':
continue
for f in os.listdir(os.path.join(path, d)):
if f[0] == '.':
continue
pill = cv2.imread(os.path.join(path, d, f))
pill = format_pill_for_inference(pill, img_size)
pill = map_to_common_space(pill)
if not d in ukn_classes:
label = class_list.index(d)
labels.append(label)
images.append(pill.reshape(pill.shape + (1,)))
else:
ukn_data.append(pill.reshape(pill.shape + (1,)))
train_data = np.array(train_data)
train_labels = np.array(tf.keras.utils.to_categorical(
train_labels, len(class_list)))
valid_data = np.array(valid_data)
valid_labels = np.array(tf.keras.utils.to_categorical(
valid_labels, len(class_list)))
ukn_data = np.array(ukn_data)
input_data = InputData(train_data, train_labels, valid_data,
valid_labels, ukn_data)
return input_data
def oversample_rare_classes(train_images, train_labels,
sample_weights, min_samples_per_class):
"""Duplicate training sample of classes with population <
min_samples_per_class"""
# Find the index of the pellet label (it’s the index of the max output
# because we have one output per each category).
labels_idx = np.argmax(train_labels, axis=1)
unique_labels, counts = np.unique(labels_idx, return_counts=True)
duplicated_images = []
duplicated_labels = []
duplicated_weights = []
for label, cnt in zip(unique_labels, counts):
if cnt < min_samples_per_class:
images = train_images[labels_idx == label]
labels = train_labels[labels_idx == label]
weights = sample_weights[labels_idx == label]
pool_size = len(images)
p = 0
while cnt < min_samples_per_class:
duplicated_images.append(images[p % pool_size])
duplicated_labels.append(labels[p % pool_size])
duplicated_weights.append(weights[p % pool_size])
cnt += 1
p += 1
if len(duplicated_images):
train_images = np.concatenate((train_images, duplicated_images), axis=0)
train_labels = np.concatenate((train_labels, duplicated_labels), axis=0)
sample_weights = np.concatenate((sample_weights,
duplicated_weights), axis=0)
return (train_images, train_labels), sample_weights
def get_model_inputs(image_path, img_size):
pill = cv2.imread(image_path)
pill = format_pill_for_inference(pill, img_size)
pill = map_to_common_space(pill)
pill = pill.reshape(pill.shape + (1,))
return pill
def get_model_output(predictions, class_list):
print("Predictions:", predictions)
predictions_idx = np.argmax(predictions, axis=1)
output = class_list[predictions_idx[0]]
print("Prediction:", output)
return output
def get_data_generator():
return tf.keras.preprocessing.image.ImageDataGenerator(
samplewise_center=True,
samplewise_std_normalization=True,
dtype='uint8')
def create_keras_model(input_shape, n_classes, dropout_rate, learning_rate):
classifier = tf.keras.models.Sequential()
classifier.add(layers.Conv2D(32, 8, activation='relu',
input_shape=input_shape, data_format="channels_last"))
classifier.add(layers.MaxPool2D(2))
classifier.add(layers.BatchNormalization())
classifier.add(layers.Conv2D(64, 4, activation='relu'))
classifier.add(layers.MaxPool2D(2))
classifier.add(layers.BatchNormalization())
classifier.add(layers.Conv2D(128, 2, activation='relu'))
classifier.add(layers.MaxPool2D(2))
classifier.add(layers.BatchNormalization())
classifier.add(layers.Flatten())
classifier.add(layers.Dropout(dropout_rate))
classifier.add(layers.Dense(n_classes, activation='softmax'))
classifier.compile(optimizer=tf.keras.optimizers.Adam(lr=learning_rate),
loss='categorical_crossentropy',
metrics=['accuracy'])
return classifier
| {"/tests/benchmark/benchmark_tools/usecases/amman/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/creteil/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/whonet_nada/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py", "/tests/benchmark/benchmark_tools/atb_names/__init__.py"], "/tests/benchmark/inhib_diameter_modes/op/op.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/inhib_diameter_modes/count/__init__.py": ["/tests/benchmark/inhib_diameter_modes/count/count.py"], "/tests/benchmark/preprocess_img/__init__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py"], "/python-module/astimp_tools/datamodels.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/nlclsq/__init__.py": ["/tests/benchmark/inhib_diameter_modes/nlclsq/nlclsq.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/__init__.py": ["/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py"], "/tests/benchmark/inhib_diameter_modes/student/__init__.py": ["/tests/benchmark/inhib_diameter_modes/student/student.py"], "/python-module/astimp_tools/artist.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/op/__init__.py": ["/tests/benchmark/inhib_diameter_modes/op/op.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/preprocess_img/__main__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/interfaces/__init__.py"]} |
73,587 | mpascucci/AST-image-processing | refs/heads/master | /pellet_labels/util/gcs_util.py | from tensorflow.python.lib.io import file_io
import os
import subprocess
def download_file_from_gcs(source, destination):
if not os.path.exists(destination):
subprocess.check_call([
'gsutil',
'cp',
source, destination])
else:
print('File %s already present locally, not downloading' % destination)
# h5py workaround: copy local models over to GCS if the job_dir is GCS.
def copy_file_to_gcs(local_path, gcs_path):
with file_io.FileIO(local_path, mode='rb') as input_f:
with file_io.FileIO(gcs_path, mode='w+') as output_f:
output_f.write(input_f.read())
def load_models_from_gcs(
job_dir, model_folder, model_name, working_dir, n_ensemble):
model_paths = []
for i in range(1, n_ensemble + 1):
gcs_path = os.path.join(job_dir, model_folder + str(i), model_name)
local_path = os.path.join(working_dir,
model_folder + str(i), model_name)
download_file_from_gcs(gcs_path, local_path)
model_paths.append(local_path)
return model_paths | {"/tests/benchmark/benchmark_tools/usecases/amman/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/creteil/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/whonet_nada/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py", "/tests/benchmark/benchmark_tools/atb_names/__init__.py"], "/tests/benchmark/inhib_diameter_modes/op/op.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/inhib_diameter_modes/count/__init__.py": ["/tests/benchmark/inhib_diameter_modes/count/count.py"], "/tests/benchmark/preprocess_img/__init__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py"], "/python-module/astimp_tools/datamodels.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/nlclsq/__init__.py": ["/tests/benchmark/inhib_diameter_modes/nlclsq/nlclsq.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/__init__.py": ["/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py"], "/tests/benchmark/inhib_diameter_modes/student/__init__.py": ["/tests/benchmark/inhib_diameter_modes/student/student.py"], "/python-module/astimp_tools/artist.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/op/__init__.py": ["/tests/benchmark/inhib_diameter_modes/op/op.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/preprocess_img/__main__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/interfaces/__init__.py"]} |
73,588 | mpascucci/AST-image-processing | refs/heads/master | /tests/benchmark/benchmark_tools/__init__.py | # coding: utf8
# benchmark_tools.interfaces
# Copyright 2019 Fondation Medecins Sans Frontières https://fondation.msf.fr/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file is part of the ASTapp image processing library
# Author: Marco Pascucci
from .usecases import creteil, creteil_es_test, amman, whonet_nada
from . import astscript
__doc__ = "This module contains abstract classes and implementations to easily " \
"access AST annotations for benchmarking."
| {"/tests/benchmark/benchmark_tools/usecases/amman/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/creteil/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py"], "/tests/benchmark/benchmark_tools/usecases/whonet_nada/__init__.py": ["/tests/benchmark/benchmark_tools/interfaces/__init__.py", "/tests/benchmark/benchmark_tools/atb_names/__init__.py"], "/tests/benchmark/inhib_diameter_modes/op/op.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/inhib_diameter_modes/count/__init__.py": ["/tests/benchmark/inhib_diameter_modes/count/count.py"], "/tests/benchmark/preprocess_img/__init__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py"], "/python-module/astimp_tools/datamodels.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/nlclsq/__init__.py": ["/tests/benchmark/inhib_diameter_modes/nlclsq/nlclsq.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/__init__.py": ["/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py"], "/tests/benchmark/inhib_diameter_modes/student/__init__.py": ["/tests/benchmark/inhib_diameter_modes/student/student.py"], "/python-module/astimp_tools/artist.py": ["/python-module/astimp_tools/image.py"], "/tests/benchmark/inhib_diameter_modes/op/__init__.py": ["/tests/benchmark/inhib_diameter_modes/op/op.py"], "/tests/benchmark/inhib_diameter_modes/slopeOP/slopeOP.py": ["/tests/benchmark/inhib_diameter_modes/shared/__init__.py"], "/tests/benchmark/preprocess_img/__main__.py": ["/tests/benchmark/preprocess_img/preproc.py"], "/tests/benchmark/benchmark_tools/atb_names/usecases/__init__.py": ["/tests/benchmark/benchmark_tools/atb_names/interfaces/__init__.py"]} |
73,600 | paco-ambilab/animalcrossing_server | refs/heads/master | /animalcrossing_server/sells/migrations/0004_auto_20200410_1548.py | # Generated by Django 3.0.3 on 2020-04-10 15:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sells', '0003_auto_20200410_1533'),
]
operations = [
migrations.AlterField(
model_name='sell',
name='close',
field=models.BooleanField(default=False, null=True),
),
migrations.AlterField(
model_name='sell',
name='createTime',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AlterField(
model_name='sell',
name='islandPassCode',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='sell',
name='itemName',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='sell',
name='numberOfItem',
field=models.IntegerField(default=1, null=True),
),
migrations.AlterField(
model_name='sell',
name='reportCount',
field=models.IntegerField(default=0, null=True),
),
migrations.AlterField(
model_name='sell',
name='unitPrice',
field=models.IntegerField(default=0, null=True),
),
]
| {"/animalcrossing_server/buys/schema.py": ["/animalcrossing_server/buys/models.py"], "/animalcrossing_server/islandReservations/schema.py": ["/animalcrossing_server/islandReservations/models.py"], "/animalcrossing_server/accountInfos/schema.py": ["/animalcrossing_server/accountInfos/models.py"], "/animalcrossing_server/islands/schema.py": ["/animalcrossing_server/islands/models.py"]} |
73,601 | paco-ambilab/animalcrossing_server | refs/heads/master | /animalcrossing_server/buys/schema.py | import graphene
import datetime
from graphene_django import DjangoObjectType
from .models import Buy
from accountInfos.models import AccountInfo
from accountInfos.schema import AccountInfoType
from django.db.models import Q
class BuyType(DjangoObjectType):
class Meta:
model = Buy
class CreateBuy(graphene.Mutation):
id = graphene.Int()
accountInfo = graphene.Field(AccountInfoType)
islandPassCode = graphene.String()
itemName = graphene.String()
numberOfItem = graphene.Int()
unitPrice = graphene.Int()
reportCount = graphene.Int()
createTime = graphene.DateTime()
close = graphene.Boolean()
class Arguments:
islandPassCode = graphene.String()
itemName = graphene.String()
numberOfItem = graphene.Int()
unitPrice = graphene.Int()
def mutate(self, info, islandPassCode, itemName, numberOfItem, unitPrice):
user = info.context.user or None
if user is None:
raise Exception('You must be logged first!')
if user.is_anonymous:
raise Exception('You must be logged first!')
accountInfo = AccountInfo.objects.filter(user__id__contains=user.id).first()
if accountInfo is None:
raise Exception('CreateIsland Fail -> cannot find accountInfo')
buy = Buy(
accountInfo = accountInfo,
islandPassCode = islandPassCode,
itemName = itemName,
numberOfItem = numberOfItem,
unitPrice = unitPrice,
createTime = datetime.datetime.now(),
)
buy.save()
return CreateBuy(
id = buy.id,
accountInfo = buy.accountInfo,
islandPassCode = buy.islandPassCode,
itemName = buy.itemName,
numberOfItem = buy.numberOfItem,
unitPrice = buy.unitPrice,
reportCount = buy.reportCount,
createTime = buy.createTime,
close = buy.close,
)
class ChangeBuy(graphene.Mutation):
id = graphene.Int()
accountInfo = graphene.Field(AccountInfoType)
islandPassCode = graphene.String()
itemName = graphene.String()
numberOfItem = graphene.Int()
unitPrice = graphene.Int()
reportCount = graphene.Int()
createTime = graphene.DateTime()
close = graphene.Boolean()
class Arguments:
id = graphene.Int()
islandPassCode = graphene.String()
itemName = graphene.String()
numberOfItem = graphene.Int()
unitPrice = graphene.Int()
close = graphene.Boolean()
def mutate(self, info, id, islandPassCode, itemName, numberOfItem, unitPrice, close):
user = info.context.user or None
if user is None:
raise Exception('You must be logged first!')
if user.is_anonymous:
raise Exception('You must be logged first!')
buy = Buy.objects.get(id = id)
accountInfo = buy.accountInfo
if accountInfo is None:
raise Exception('CreateIsland Fail -> cannot find accountInfo')
if user.id != accountInfo.user.id:
raise Exception('You are not the correct user!')
buy.islandPassCode = islandPassCode
buy.itemName = itemName
buy.numberOfItem = numberOfItem
buy.unitPrice = unitPrice
buy.close = close
buy.save()
return ChangeBuy(
id = buy.id,
accountInfo = buy.accountInfo,
islandPassCode = buy.islandPassCode,
itemName = buy.itemName,
numberOfItem = buy.numberOfItem,
unitPrice = buy.unitPrice,
reportCount = buy.reportCount,
createTime = buy.createTime,
close = buy.close,
)
class DeleteBuy(graphene.Mutation):
id = graphene.Int()
accountInfo = graphene.Field(AccountInfoType)
islandPassCode = graphene.String()
itemName = graphene.String()
numberOfItem = graphene.Int()
unitPrice = graphene.Int()
reportCount = graphene.Int()
createTime = graphene.DateTime()
close = graphene.Boolean()
class Arguments:
id = graphene.Int()
def mutate(self, info, id):
user = info.context.user or None
if user is None:
raise Exception('You must be logged first!')
if user.is_anonymous:
raise Exception('You must be logged first!')
buy = Buy.objects.get(id = id)
buy.delete()
return DeleteBuy(
id = buy.id,
accountInfo = buy.accountInfo,
islandPassCode = buy.islandPassCode,
itemName = buy.itemName,
numberOfItem = buy.numberOfItem,
unitPrice = buy.unitPrice,
reportCount = buy.reportCount,
createTime = buy.createTime,
close = buy.close,
)
class Mutation(graphene.ObjectType):
create_buy = CreateBuy.Field()
change_buy = ChangeBuy.Field()
delete_buy = DeleteBuy.Field()
class Query(graphene.ObjectType):
buys = graphene.List(BuyType, search=graphene.String(), close=graphene.Boolean())
def resolve_buys(self, info, search=None, close=None, **kwargs):
# The value sent with the search parameter will be in the args variable
if close is None:
close = False
if not(search is None):
filter = (
Q(itemName__icontains=search) &
Q(close=close)
)
return Buy.objects.filter(filter)
if search is None:
filter = (
Q(close=close)
)
return Buy.objects.filter(filter)
return Buy.objects.all()
| {"/animalcrossing_server/buys/schema.py": ["/animalcrossing_server/buys/models.py"], "/animalcrossing_server/islandReservations/schema.py": ["/animalcrossing_server/islandReservations/models.py"], "/animalcrossing_server/accountInfos/schema.py": ["/animalcrossing_server/accountInfos/models.py"], "/animalcrossing_server/islands/schema.py": ["/animalcrossing_server/islands/models.py"]} |
73,602 | paco-ambilab/animalcrossing_server | refs/heads/master | /animalcrossing_server/islandReservations/schema.py | import graphene
import datetime
from graphene_django import DjangoObjectType
from .models import IslandReservation
from accountInfos.models import AccountInfo
from islands.models import Island
from accountInfos.schema import AccountInfoType
from islands.schema import IslandType
from django.db.models import Q
class IslandReservationType(DjangoObjectType):
class Meta:
model = IslandReservation
class CreateIslandReservation(graphene.Mutation):
id = graphene.Int()
island = graphene.Field(IslandType)
accountInfo = graphene.Field(AccountInfoType)
createTime = graphene.DateTime()
class Arguments:
islandID = graphene.String()
def mutate(self, info, islandID):
user = info.context.user or None
if user is None:
raise Exception('You must be logged first!')
if user.is_anonymous:
raise Exception('You must be logged first!')
accountInfo = AccountInfo.objects.filter(user__id__contains=user.id).first()
if accountInfo is None:
raise Exception('CreateIslandReservation Fail -> cannot find accountInfo')
if islandID is None:
raise Exception('CreateIslandReservation Fail -> islandID is null')
island = Island.objects.filter(id__contains=islandID).first()
if island is None:
raise Exception('CreateIslandReservation Fail -> cannot find island')
islandReservation = IslandReservation(
island = island,
accountInfo = accountInfo,
createTime = datetime.datetime.now(),
)
island.save()
return CreateIslandReservation(
id = islandReservation.id,
island = islandReservation.island,
accountInfo = islandReservation.accountInfo,
createTime = islandReservation.createTime,
)
class DeleteIslandReservation(graphene.Mutation):
id = graphene.Int()
class Arguments:
islandID = graphene.Int()
def mutate(self, info, islandID):
user = info.context.user or None
if user is None:
raise Exception('You must be logged first!')
if user.is_anonymous:
raise Exception('You must be logged first!')
accountInfo = AccountInfo.objects.filter(user__id__contains=user.id).first()
if accountInfo is None:
raise Exception('CreateIslandReservation Fail -> cannot find accountInfo')
island = Island.objects.get(id=islandID)
if island is None:
raise Exception('CreateIslandReservation Fail -> cannot find island')
filter = (
Q(island=island) &
Q(accountInfo=accountInfo)
)
islandReservation = IslandReservation.objects.filter(filter).first()
if not(islandReservation is None):
islandReservation.delete()
return DeleteIslandReservation(
id = islandReservation.id,
)
class Mutation(graphene.ObjectType):
create_island_reservation = CreateIslandReservation.Field()
delete_island_reservation = DeleteIslandReservation.Field()
| {"/animalcrossing_server/buys/schema.py": ["/animalcrossing_server/buys/models.py"], "/animalcrossing_server/islandReservations/schema.py": ["/animalcrossing_server/islandReservations/models.py"], "/animalcrossing_server/accountInfos/schema.py": ["/animalcrossing_server/accountInfos/models.py"], "/animalcrossing_server/islands/schema.py": ["/animalcrossing_server/islands/models.py"]} |
73,603 | paco-ambilab/animalcrossing_server | refs/heads/master | /animalcrossing_server/islandReservations/migrations/0001_initial.py | # Generated by Django 3.0.3 on 2020-04-12 12:54
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('islands', '0005_island_rule'),
('accountInfos', '0006_auto_20200410_1548'),
]
operations = [
migrations.CreateModel(
name='IslandReservation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('createTime', models.DateTimeField(auto_now=True, null=True)),
('accountInfo', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='reservedIslands', to='accountInfos.AccountInfo')),
('island', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='reservation', to='islands.Island')),
],
),
]
| {"/animalcrossing_server/buys/schema.py": ["/animalcrossing_server/buys/models.py"], "/animalcrossing_server/islandReservations/schema.py": ["/animalcrossing_server/islandReservations/models.py"], "/animalcrossing_server/accountInfos/schema.py": ["/animalcrossing_server/accountInfos/models.py"], "/animalcrossing_server/islands/schema.py": ["/animalcrossing_server/islands/models.py"]} |
73,604 | paco-ambilab/animalcrossing_server | refs/heads/master | /animalcrossing_server/islands/models.py | import datetime
from django.db import models
from django.conf import settings
class Island(models.Model):
accountInfo = models.ForeignKey('accountInfos.AccountInfo', related_name='postedIslands', on_delete=models.CASCADE)
islandPassCode = models.TextField(blank=True, null=True)
location = models.CharField(max_length=50, null=True)
hashTagDescription = models.TextField(blank=True, null=True)
rule = models.TextField(blank=True, null=True)
reportCount = models.IntegerField(default=0, null=True)
createTime = models.DateTimeField(auto_now=True, null=True)
close = models.BooleanField(default=False, null=True)
| {"/animalcrossing_server/buys/schema.py": ["/animalcrossing_server/buys/models.py"], "/animalcrossing_server/islandReservations/schema.py": ["/animalcrossing_server/islandReservations/models.py"], "/animalcrossing_server/accountInfos/schema.py": ["/animalcrossing_server/accountInfos/models.py"], "/animalcrossing_server/islands/schema.py": ["/animalcrossing_server/islands/models.py"]} |
73,605 | paco-ambilab/animalcrossing_server | refs/heads/master | /animalcrossing_server/accountInfos/migrations/0003_auto_20200410_1147.py | # Generated by Django 3.0.3 on 2020-04-10 11:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accountInfos', '0002_auto_20200410_0923'),
]
operations = [
migrations.RemoveField(
model_name='accountinfo',
name='islandOwnerID',
),
migrations.AddField(
model_name='accountinfo',
name='createTime',
field=models.TimeField(auto_now=True),
),
migrations.AddField(
model_name='accountinfo',
name='switchID',
field=models.TextField(blank=True),
),
]
| {"/animalcrossing_server/buys/schema.py": ["/animalcrossing_server/buys/models.py"], "/animalcrossing_server/islandReservations/schema.py": ["/animalcrossing_server/islandReservations/models.py"], "/animalcrossing_server/accountInfos/schema.py": ["/animalcrossing_server/accountInfos/models.py"], "/animalcrossing_server/islands/schema.py": ["/animalcrossing_server/islands/models.py"]} |
73,606 | paco-ambilab/animalcrossing_server | refs/heads/master | /animalcrossing_server/accountInfos/schema.py | import graphene
import datetime
import graphql_jwt
from django.contrib.auth import get_user_model
from graphene_django import DjangoObjectType
from .models import AccountInfo
class UserType(DjangoObjectType):
class Meta:
model = get_user_model()
exclude = ['password']
class AccountInfoType(DjangoObjectType):
class Meta:
model = AccountInfo
class CreateAccount(graphene.Mutation):
accountInfo = graphene.Field(AccountInfoType)
class Arguments:
username = graphene.String(required=True)
password = graphene.String(required=True)
email = graphene.String(required=True)
switchID = graphene.String(required=True)
def mutate(self, info, username, password, email, switchID):
user = get_user_model()(
username=username,
email=email,
)
user.set_password(password)
user.save()
accountInfo = AccountInfo(
user = user,
createTime = datetime.datetime.now(),
switchID = switchID
)
accountInfo.save()
return CreateAccount(accountInfo=accountInfo)
class Mutation(graphene.ObjectType):
token_auth = graphql_jwt.ObtainJSONWebToken.Field()
verify_token = graphql_jwt.Verify.Field()
refresh_token = graphql_jwt.Refresh.Field()
create_account = CreateAccount.Field()
class Query(graphene.ObjectType):
account = graphene.Field(AccountInfoType)
def resolve_account(self, info):
user = info.context.user
if user.is_anonymous:
raise Exception('You must be logged first!')
accountInfo = AccountInfo.objects.filter(user__id__contains=user.id).first()
if accountInfo is None:
raise Exception('Get accountInfo fail -> null accountInfo!')
return accountInfo
| {"/animalcrossing_server/buys/schema.py": ["/animalcrossing_server/buys/models.py"], "/animalcrossing_server/islandReservations/schema.py": ["/animalcrossing_server/islandReservations/models.py"], "/animalcrossing_server/accountInfos/schema.py": ["/animalcrossing_server/accountInfos/models.py"], "/animalcrossing_server/islands/schema.py": ["/animalcrossing_server/islands/models.py"]} |
73,607 | paco-ambilab/animalcrossing_server | refs/heads/master | /animalcrossing_server/buys/migrations/0003_auto_20200410_1533.py | # Generated by Django 3.0.3 on 2020-04-10 15:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('buys', '0002_auto_20200410_1147'),
]
operations = [
migrations.AlterField(
model_name='buy',
name='createTime',
field=models.DateTimeField(auto_now=True),
),
]
| {"/animalcrossing_server/buys/schema.py": ["/animalcrossing_server/buys/models.py"], "/animalcrossing_server/islandReservations/schema.py": ["/animalcrossing_server/islandReservations/models.py"], "/animalcrossing_server/accountInfos/schema.py": ["/animalcrossing_server/accountInfos/models.py"], "/animalcrossing_server/islands/schema.py": ["/animalcrossing_server/islands/models.py"]} |
73,608 | paco-ambilab/animalcrossing_server | refs/heads/master | /animalcrossing_server/accountInfos/migrations/0006_auto_20200410_1548.py | # Generated by Django 3.0.3 on 2020-04-10 15:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accountInfos', '0005_auto_20200410_1545'),
]
operations = [
migrations.AlterField(
model_name='accountinfo',
name='switchID',
field=models.TextField(blank=True, null=True),
),
]
| {"/animalcrossing_server/buys/schema.py": ["/animalcrossing_server/buys/models.py"], "/animalcrossing_server/islandReservations/schema.py": ["/animalcrossing_server/islandReservations/models.py"], "/animalcrossing_server/accountInfos/schema.py": ["/animalcrossing_server/accountInfos/models.py"], "/animalcrossing_server/islands/schema.py": ["/animalcrossing_server/islands/models.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.