index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
58,308 | we-chatter/wechatter | refs/heads/main | /wechatter/shared/dm/trackers.py | # -*- coding: utf-8 -*-
"""
@Author : Xu
@Software: PyCharm
@File : trackers.py
@Time : 2021/4/1 5:05 下午
@Desc : 对话状态跟踪
"""
import abc
import json
import logging
import re
from abc import ABC
import jsonpickle
import time
import uuid
from dateutil import parser
from datetime import datetime
from typing import (
List,
Dict,
Text,
Any,
Type,
Optional,
TYPE_CHECKING,
Iterable,
cast,
Tuple,
)
import wechatter.shared.utils.common
from typing import Union
from enum import Enum
from wechatter.shared.dialogue_config import DOCS_URL_TRAINING_DATA
from wechatter.shared.dm.dm_config import (
LOOP_NAME,
EXTERNAL_MESSAGE_PREFIX,
ACTION_NAME_SENDER_ID_CONNECTOR_STR,
IS_EXTERNAL,
USE_TEXT_FOR_FEATURIZATION,
LOOP_INTERRUPTED,
ENTITY_LABEL_SEPARATOR,
ACTION_SESSION_START_NAME,
ACTION_LISTEN_NAME,
)
from wechatter.shared.dm.slots import Slot
from wechatter.shared.exceptions import UnsupportedFeatureException
from wechatter.shared.nlu.nlu_config import (
ENTITY_ATTRIBUTE_TYPE,
INTENT,
TEXT,
ENTITIES,
ENTITY_ATTRIBUTE_VALUE,
ACTION_TEXT,
ACTION_NAME,
INTENT_NAME_KEY,
ENTITY_ATTRIBUTE_ROLE,
ENTITY_ATTRIBUTE_GROUP,
)
logger = logging.getLogger(__name__)
class EventVerbosity(Enum):
"""Filter on which events to include in tracker dumps."""
# no events will be included
NONE = 1
# all events, that contribute to the trackers state are included
# these are all you need to reconstruct the tracker state
APPLIED = 2
# include even more events, in this case everything that comes
# after the most recent restart event. this will also include
# utterances that got reverted and actions that got undone.
AFTER_RESTART = 3
# include every logged event
ALL = 4
class AnySlotDict(dict):
"""A slot dictionary that pretends every slot exists, by creating slots on demand.
This only uses the generic slot type! This means certain functionality wont work,
e.g. properly featurizing the slot."""
def __missing__(self, key) -> Slot:
value = self[key] = Slot(key)
return value
def __contains__(self, key) -> bool:
return True
class DialogueStateTracker:
"""
dst实现
"""
| {"/wechatter/shared/dm/slots.py": ["/wechatter/shared/utils/io.py"], "/wechatter/server/run_server.py": ["/wechatter/config/__init__.py", "/wechatter/__init__.py", "/wechatter/shared/utils/io.py", "/wechatter/model_training.py"], "/wechatter/model.py": ["/wechatter/shared/utils/io.py", "/wechatter/utils/io.py", "/wechatter/exceptions.py"], "/wechatter/dm/tracker_store.py": ["/wechatter/shared/dm/conversation.py"], "/wechatter/model_training.py": ["/wechatter/shared/importers/importer.py"], "/wechatter/shared/dm/trackers.py": ["/wechatter/shared/dm/slots.py"], "/wechatter/config/__init__.py": ["/wechatter/config/config.py"]} |
58,309 | we-chatter/wechatter | refs/heads/main | /wechatter/dm/interpreter.py | # -*- coding: utf-8 -*-
"""
@Author : Xu
@Software: PyCharm
@File : interpreter.py
@Time : 2021/4/6 2:14 下午
@Desc : 对话管理解释器
"""
import aiohttp
import logging
import os
from typing import Text, Dict, Any, Union, Optional
from wechatter.dm import dm_config
from wechatter.utils.endpoints import EndpointConfig
logger = logging.getLogger(__name__)
class WechatterNLUHttpInterpreter():
def __init__(self, endpoint_config: Optional[EndpointConfig] = None) -> None:
if endpoint_config:
self.endpoint_config = endpoint_config
else:
self.endpoint_config = EndpointConfig(dm_config.DEFAULT_SERVER_URL)
| {"/wechatter/shared/dm/slots.py": ["/wechatter/shared/utils/io.py"], "/wechatter/server/run_server.py": ["/wechatter/config/__init__.py", "/wechatter/__init__.py", "/wechatter/shared/utils/io.py", "/wechatter/model_training.py"], "/wechatter/model.py": ["/wechatter/shared/utils/io.py", "/wechatter/utils/io.py", "/wechatter/exceptions.py"], "/wechatter/dm/tracker_store.py": ["/wechatter/shared/dm/conversation.py"], "/wechatter/model_training.py": ["/wechatter/shared/importers/importer.py"], "/wechatter/shared/dm/trackers.py": ["/wechatter/shared/dm/slots.py"], "/wechatter/config/__init__.py": ["/wechatter/config/config.py"]} |
58,310 | we-chatter/wechatter | refs/heads/main | /wechatter/dm/dm_config.py | # -*- coding: utf-8 -*-
"""
@Author : Xu
@Software: PyCharm
@File : dm_config.py
@Time : 2021/4/1 5:26 下午
@Desc :
"""
DEFAULT_SERVER_PORT = 9004
DEFAULT_SERVER_FORMAT = "{}://localhost:{}"
DEFAULT_SERVER_URL = DEFAULT_SERVER_FORMAT.format("http", DEFAULT_SERVER_PORT)
DEFAULT_NLU_FALLBACK_THRESHOLD = 0.3
DEFAULT_NLU_FALLBACK_AMBIGUITY_THRESHOLD = 0.1
DEFAULT_CORE_FALLBACK_THRESHOLD = 0.3
DEFAULT_REQUEST_TIMEOUT = 60 * 5 # 5 minutes
DEFAULT_RESPONSE_TIMEOUT = 60 * 60 # 1 hour
DEFAULT_LOCK_LIFETIME = 60 # in seconds
BEARER_TOKEN_PREFIX = "Bearer "
# the lowest priority intended to be used by machine learning policies
DEFAULT_POLICY_PRIORITY = 1
# the priority intended to be used by mapping policies
MAPPING_POLICY_PRIORITY = 2
# the priority intended to be used by memoization policies
# it is higher than default and mapping to prioritize training stories
MEMOIZATION_POLICY_PRIORITY = 3
# the priority intended to be used by fallback policies
# it is higher than memoization to prioritize fallback
FALLBACK_POLICY_PRIORITY = 4
# the priority intended to be used by form policies
# it is the highest to prioritize form to the rest of the policies
FORM_POLICY_PRIORITY = 5
# The priority of the `RulePolicy` is higher than the priorities for `FallbackPolicy`,
# `TwoStageFallbackPolicy` and `FormPolicy` to make it possible to use the
# `RulePolicy` in conjunction with these deprecated policies.
RULE_POLICY_PRIORITY = 6
DIALOGUE = "dialogue"
# RabbitMQ message property header added to events published using `rasa export`
RASA_EXPORT_PROCESS_ID_HEADER_NAME = "rasa-export-process-id"
# Name of the environment variable defining the PostgreSQL schema to access. See
# https://www.postgresql.org/docs/9.1/ddl-schemas.html for more details.
POSTGRESQL_SCHEMA = "POSTGRESQL_SCHEMA"
# Names of the environment variables defining PostgreSQL pool size and max overflow
POSTGRESQL_POOL_SIZE = "SQL_POOL_SIZE"
POSTGRESQL_MAX_OVERFLOW = "SQL_MAX_OVERFLOW"
| {"/wechatter/shared/dm/slots.py": ["/wechatter/shared/utils/io.py"], "/wechatter/server/run_server.py": ["/wechatter/config/__init__.py", "/wechatter/__init__.py", "/wechatter/shared/utils/io.py", "/wechatter/model_training.py"], "/wechatter/model.py": ["/wechatter/shared/utils/io.py", "/wechatter/utils/io.py", "/wechatter/exceptions.py"], "/wechatter/dm/tracker_store.py": ["/wechatter/shared/dm/conversation.py"], "/wechatter/model_training.py": ["/wechatter/shared/importers/importer.py"], "/wechatter/shared/dm/trackers.py": ["/wechatter/shared/dm/slots.py"], "/wechatter/config/__init__.py": ["/wechatter/config/config.py"]} |
58,311 | we-chatter/wechatter | refs/heads/main | /wechatter/config/__init__.py | # -*- coding: utf-8 -*-
"""
@Author : Xu
@Software: PyCharm
@File : __init__.py.py
@Time : 2020/11/10 9:04 上午
@Desc :
"""
from wechatter.config.config import Config
def load_config():
"""
Load a config class
"""
return Config
CONFIG = load_config() | {"/wechatter/shared/dm/slots.py": ["/wechatter/shared/utils/io.py"], "/wechatter/server/run_server.py": ["/wechatter/config/__init__.py", "/wechatter/__init__.py", "/wechatter/shared/utils/io.py", "/wechatter/model_training.py"], "/wechatter/model.py": ["/wechatter/shared/utils/io.py", "/wechatter/utils/io.py", "/wechatter/exceptions.py"], "/wechatter/dm/tracker_store.py": ["/wechatter/shared/dm/conversation.py"], "/wechatter/model_training.py": ["/wechatter/shared/importers/importer.py"], "/wechatter/shared/dm/trackers.py": ["/wechatter/shared/dm/slots.py"], "/wechatter/config/__init__.py": ["/wechatter/config/config.py"]} |
58,312 | we-chatter/wechatter | refs/heads/main | /wechatter/utils/io.py | # -*- coding: utf-8 -*-
"""
@Author : Xu
@Software: PyCharm
@File : io.py
@Time : 2021/4/15 4:17 下午
@Desc :
"""
| {"/wechatter/shared/dm/slots.py": ["/wechatter/shared/utils/io.py"], "/wechatter/server/run_server.py": ["/wechatter/config/__init__.py", "/wechatter/__init__.py", "/wechatter/shared/utils/io.py", "/wechatter/model_training.py"], "/wechatter/model.py": ["/wechatter/shared/utils/io.py", "/wechatter/utils/io.py", "/wechatter/exceptions.py"], "/wechatter/dm/tracker_store.py": ["/wechatter/shared/dm/conversation.py"], "/wechatter/model_training.py": ["/wechatter/shared/importers/importer.py"], "/wechatter/shared/dm/trackers.py": ["/wechatter/shared/dm/slots.py"], "/wechatter/config/__init__.py": ["/wechatter/config/config.py"]} |
58,322 | saharmilis/StartApp-DataScienceChallenge | refs/heads/master | /draft.py | import numpy as np
import pandas as pd
from sklearn import model_selection
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
import pickle
df = pd.DataFrame.from_csv('train.csv');
print(df.shape)
print()
array_values = df.values
input = array_values[:,[0,1]]
output = array_values[:,2]
###
# vectorizer = CountVectorizer(min_df=1,stop_words='english');
vectorizer = TfidfVectorizer(min_df=1,stop_words='english');
descriptions = array_values[:,1]
bag_of_words = vectorizer.fit_transform(descriptions)
# vectorizer.vocabulary_.get("you");
# print(bag_of_words)
# print(vectorizer)
# print(len(vectorizer.get_feature_names()))
# print(bag_of_words.toarray())
###
validation_size = 0.2
seed = 7
scoring = 'accuracy'
X_train, X_validation, Y_train, Y_validation = model_selection.train_test_split(bag_of_words.toarray(),output, test_size=validation_size, random_state=seed,shuffle=True)
print(X_train.shape)
print(Y_train.shape)
print()
print(X_validation.shape)
print(Y_validation.shape)
# print('-----------------------------')
print('-----------------------------')
#
# # Spot Check Algorithms
models = []
models.append(('LR', LogisticRegression()))
models.append(('LDA', LinearDiscriminantAnalysis()))
models.append(('KNN', KNeighborsClassifier()))
models.append(('CART', DecisionTreeClassifier()))
models.append(('NB', GaussianNB()))
models.append(('SVM', SVC()))
# evaluate each model in turn
results = []
names = []
for name, model in models:
kfold = model_selection.KFold(n_splits=10, random_state=seed)
cv_results = model_selection.cross_val_score(model, X_train, Y_train, cv=kfold, scoring=scoring)
results.append(cv_results)
names.append(name)
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
print("-----------------------------------")
# model = KNeighborsClassifier()
# model.fit(X_train, Y_train)
# predictions = model.predict(X_validation)
# print(accuracy_score(Y_validation, predictions))
# print(confusion_matrix(Y_validation, predictions))
# print(classification_report(Y_validation, predictions))
# model = KNeighborsClassifier()
# # model = GaussianNB()
# kfold = model_selection.KFold(n_splits=10, random_state=seed)
# cv_results = model_selection.cross_val_score(model, X_train, Y_train, cv=kfold, scoring=scoring)
# msg = "%s: %f (%f)" % ('KNN', cv_results.mean(), cv_results.std())
# print(msg)
# # save the model to disk
# filename = 'finalized_model.sav'
# pickle.dump(model, open(filename, 'wb'))
#
# # load the model from disk
# loaded_model = pickle.load(open(filename, 'rb'))
# predictions = loaded_model.predict(X_validation)
# # result = loaded_model.score(X_validation, Y_validation)
# print(accuracy_score(Y_validation, predictions))
# # print(result)
# from sklearn.externals import joblib
# from sklearn.datasets import load_digits
# from sklearn.linear_model import SGDClassifier
# filename = 'digits_classifier.joblib.pkl'
# save the classifier
# _ = joblib.dump(model, filename, compress=9)
# load it again
# clf2 = joblib.load(filename)
# print(clf2.score(X_validation, Y_validation))
| {"/data_preprocess.py": ["/language_helper.py", "/model.py"]} |
58,323 | saharmilis/StartApp-DataScienceChallenge | refs/heads/master | /data_preprocess.py | import language_helper
# clear unused columns
# clear non english apps
# clear all non english words
# clear all stopwords english words - not a must
def clear_colums():
pass
def clear_non_english_apps():
pass
def clear_all_non_enghlish_words():
pass
def clear_all_stopwords():
pass
def predict_all_data_requested():
import pandas as pd
from pandas import ExcelWriter
from model import model_predict
from server import prediction
# from excel to DF
file_name = 'appDescriptions2.xlsx';
xl = pd.ExcelFile(file_name)
df = xl.parse("Classify")
print(df.shape)
print(df.values[:,3]) # segment
print(df.values[:,4]) # description
for x in range(1100):
pre = prediction(df.values[x,4]);
print(pre)
df.set_value(x,3,pre)
# print(df.iloc[x,'segment'])
# from DF to excel
writer = ExcelWriter('PythonExport.xlsx')
df.to_excel(writer, 'Sheet5')
writer.save()
predict_all_data_requested() | {"/data_preprocess.py": ["/language_helper.py", "/model.py"]} |
58,324 | saharmilis/StartApp-DataScienceChallenge | refs/heads/master | /language_helper.py | import langdetect
def detect(sentence):
return langdetect.detect(sentence) != 'en'
def clear_nonenglish_words(sentence):
sentence = sentence.lower();
english_sentence = ''
for word in sentence.split():
if _is_english(word):
english_sentence = english_sentence+' '+word
return english_sentence.replace(',','');
def _is_english(s):
try:
s.encode('ascii')
except UnicodeEncodeError:
return False
else:
return True
| {"/data_preprocess.py": ["/language_helper.py", "/model.py"]} |
58,325 | saharmilis/StartApp-DataScienceChallenge | refs/heads/master | /model.py | import numpy as np
import pandas as pd
from sklearn import model_selection
from sklearn.neighbors import KNeighborsClassifier
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.externals import joblib
import os.path
import time
file_name_data_train = 'train.csv'
file_name_model_trained = 'model_trained.joblib.pkl'
file_name_model_vectorizer = 'model_vectorizer.joblib.pkl'
model = KNeighborsClassifier()
vectorizer = TfidfVectorizer(stop_words='english');
def model_initialize():
print('model_initialize')
if os.path.isfile(file_name_model_trained):
# model exists - load
try:
_model_load()
return
except:
pass
# model no exists - train
_model_train()
def model_predict(description):
global model
global vectorizer
x_vector = vectorizer.transform([description]);
return model.predict(x_vector.toarray());
def model_insert_new_data(appId,segment,description):
# it's not idle
# did not find how to feed new data to the classifier!! tried StackOverFlow. will update on that.
indexing_lines = int(time.time())
f = open(file_name_data_train, 'a')
f.write('\n'+str(indexing_lines)+','+str(appId)+','+str(segment)+','+str(description)) # python will convert \n to os.linesep
f.close() # you can omit in most cases as the destructor will call it
# train
_model_train()
def _model_train():
print('model_train')
global model
global vectorizer
global indexing_lines
global segments
# get dataframe
df = pd.DataFrame.from_csv(file_name_data_train);
# split to X&Y
array_values = df.values
descriptions = array_values[:, 1]
output = array_values[:,2]
# from strings to numbers & normalize
bag_of_words = vectorizer.fit_transform(descriptions)
# set training parameters
validation_size = 0 # 0.2 for validation. 0 in production
seed = 7
# split data
x_train, x_validation, y_train, y_validation = model_selection.train_test_split(bag_of_words.toarray(),output, test_size=validation_size, random_state=seed,shuffle=True)
# set classifier
model = KNeighborsClassifier()
model.fit(x_train, y_train)
# predictions = model.predict(x_validation)
# print(predictions)
# save model
_model_save()
def _model_load():
# load the model instead of traning
global model
global vectorizer
model = joblib.load(file_name_model_trained)
vectorizer = joblib.load(file_name_model_vectorizer)
def _model_save():
# save the model after traning
global model
_ = joblib.dump(model, file_name_model_trained, compress=9)
_ = joblib.dump(vectorizer, file_name_model_vectorizer, compress=9)
pass
def get_segments():
df = pd.DataFrame.from_csv(file_name_data_train);
segments = df.segment.unique()
print(segments)
return segments;
def get_n_neighbors(description, n):
global model
global vectorizer
# get a representation of the app as vector
x_vector = vectorizer.transform([description]);
# get the n NN to the vector - return sorted by 'closeness'
results = model.kneighbors(X=x_vector.toarray(),n_neighbors=n,return_distance=True)
df = pd.DataFrame.from_csv(file_name_data_train);
# translate the results to the segments
results_segments = df.values[results[1][0], 2]
# Relative Score - scores indicate how 'close' is a segment relative to the closest segment
results_score = [x / results[0][0][0] for x in results[0][0]]
return zip(results_segments,results_score)
model_initialize();
if __name__ == '__main__':
print('main')
model_initialize();
| {"/data_preprocess.py": ["/language_helper.py", "/model.py"]} |
58,329 | rpb/Skittles-Bayes | refs/heads/master | /Skittles.py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.stats as stat
# prior = stat.norm.pdf(range(-50,50),0.9, 2)
# Model = np.linspace(0, 1, 100)
def runLoop(Theta = np.array([0.1, 0.2]), pTheta = np.array([0.5, 0.5]), nLoop=10, plotSpace = 2):
"""
Plots prior, likelihood and posterior probability over nLoop draws.
Each draw does NOT updates the prior probability. Old data is kept and
the posterior and likelihood are computed from the initial priors and
cumulative data
:Theta: array of possible outcomes
:pTheta: array of prior probabilities of outcomes
:nLoop: number of trials to run
:plotSpace: number of trials between plot updates
"""
# Dr. Lynch's two models
# 10% Green skittles
# 20% Green skittles
# Prior belief:
# Equal belief in either
# Prior belief 2:
# I believe there is only a 10% chance the bag was altered
# pTheta = [0.1, 0.9]
pTheta = pTheta/np.sum(pTheta)
# Initial belief
pTheta0 = pTheta
pTheta = np.array(pTheta)
print (pTheta)
data = []
for i in range(0, nLoop):
bagV = np.random.uniform(0,1)
if bagV >= 0.9:
data.append(1)
else:
data.append(0)
plotSpace = int(plotSpace)
nGreens = np.sum( data )
nOther = len( data ) - nGreens
if i % plotSpace == 0:
print "Cumulative number of green Skittles: ", nGreens
# I've left out the factor of N choose nGreen that is common
# to pData and pDataGivenTheta
# Compute the likelihood of the data for each value of theta:
pDataGivenTheta = Theta**nGreens * (1-Theta)**nOther
# Compute the posterior:
pData = sum( pDataGivenTheta * pTheta )
# Use Bayes' rule!
pThetaGivenData = pDataGivenTheta * pTheta / pData
checkNorm = sum(pThetaGivenData)
hBins = np.linspace(0,1, 101)
fig = plt.figure()
ax1 = fig.add_subplot(4,1,1)
ax1.hist(Theta, weights = pTheta0, bins = hBins, alpha = 0.4, label = "Starting Prior")
ax1.legend()
plt.xlim(0, 1)
ax2 = fig.add_subplot(4,1,2)
ax2.hist(Theta, weights = pTheta, bins = hBins, alpha = 0.4, label = "Current Prior")
ax2.legend()
plt.xlim(0, 1)
ax3 = fig.add_subplot(4,1,3)
ax3.hist(Theta, weights = pDataGivenTheta, bins = hBins, alpha = 0.4, label = "Likelihood")
ax3.legend()
plt.xlim(0, 1)
ax4 = fig.add_subplot(4,1,4)
ax4.hist(Theta, weights = pThetaGivenData, bins = hBins, alpha = 0.4, label = "Posterior")
ax4.legend()
plt.xlim(0, 1)
plt.show()
#pTheta = pThetaGivenData
print "Cumulative number of green Skittles: ", nGreens
# Compute the likelihood of the data for each value of theta:
pDataGivenTheta = Theta**nGreens * (1-Theta)**nOther
# Compute the posterior:
pData = sum( pDataGivenTheta * pTheta )
# Use Bayes' rule!
pThetaGivenData = pDataGivenTheta * pTheta / pData
checkNorm = sum(pThetaGivenData)
hBins = np.linspace(0,1, 101)
fig = plt.figure()
ax1 = fig.add_subplot(3,1,1)
ax1.hist(Theta, weights = pTheta0, bins = hBins, alpha = 0.4, label = "Prior")
ax1.legend()
plt.xlim(0, 1)
ax2 = fig.add_subplot(4,1,2)
ax2.hist(Theta, weights = pTheta, bins = hBins, alpha = 0.4, label = "Current Prior")
ax2.legend()
plt.xlim(0, 1)
ax3 = fig.add_subplot(4,1,3)
ax3.hist(Theta, weights = pDataGivenTheta, bins = hBins, alpha = 0.4, label = "Likelihood")
ax3.legend()
plt.xlim(0, 1)
ax4 = fig.add_subplot(4,1,4)
ax4.hist(Theta, weights = pThetaGivenData, bins = hBins, alpha = 0.4, label = "Posterior")
ax4.legend()
plt.xlim(0, 1)
plt.show()
def runLoop_2(Theta = np.array([0.1, 0.2]), pTheta = np.array([0.5, 0.5]), nLoop=10, plotSpace = 1):
"""
Plots prior, likelihood and posterior probability over nLoop draws.
Each draw updates the prior probability i.e. prior = posterior and
clears the 'old' data
:Theta: array of possible outcomes
:pTheta: array of prior probabilities of outcomes
:nLoop: number of trials to run
:plotSpace: number of trials between plot updates
"""
# Dr. Lynch's two models
# 10% Green skittles
# 20% Green skittles
# Prior belief:
# Equal belief in either
# Prior belief 2:
# I believe there is only a 10% chance the bag was altered
# pTheta = [0.1, 0.9]
pTheta = pTheta/np.sum(pTheta)
# Initial belief
pTheta0 = pTheta
pTheta = np.array(pTheta)
print (pTheta)
data = []
cumulativeG = 0
for i in range(0, nLoop):
bagV = np.random.uniform(0,1)
if bagV >= 0.9:
data.append(1)
else:
data.append(0)
if i % plotSpace == 0:
plotSpace = int(plotSpace)
nGreens = np.sum( data )
nOther = len( data ) - nGreens
cumulativeG += nGreens
print "Number of green Skittles in this bag of ", plotSpace , " = ", nGreens
# I've left out the factor of N choose nGreen that is common
# to pData and pDataGivenTheta
# Compute the likelihood of the data for each value of theta:
pDataGivenTheta = Theta**nGreens * (1-Theta)**nOther
# Compute the posterior:
pData = sum( pDataGivenTheta * pTheta )
# Use Bayes' rule!
pThetaGivenData = pDataGivenTheta * pTheta / pData
checkNorm = sum(pThetaGivenData)
hBins = np.linspace(0,1, 101)
fig = plt.figure()
ax1 = fig.add_subplot(4,1,1)
ax1.hist(Theta, weights = pTheta0, bins = hBins, alpha = 0.4, label = "Starting Prior")
ax1.legend()
plt.xlim(0, 1)
ax2 = fig.add_subplot(4,1,2)
ax2.hist(Theta, weights = pTheta, bins = hBins, alpha = 0.4, label = "Current Prior")
ax2.legend()
plt.xlim(0, 1)
ax3 = fig.add_subplot(4,1,3)
ax3.hist(Theta, weights = pDataGivenTheta, bins = hBins, alpha = 0.4, label = "Likelihood")
ax3.legend()
plt.xlim(0, 1)
ax4 = fig.add_subplot(4,1,4)
ax4.hist(Theta, weights = pThetaGivenData, bins = hBins, alpha = 0.4, label = "Posterior")
ax4.legend()
plt.xlim(0, 1)
plt.show()
pTheta = pThetaGivenData
data = []
print "Number of green Skittles in this bag: ", nGreens
print "Total number of green skittles found: ", cumulativeG
# Compute the likelihood of the data for each value of theta:
pDataGivenTheta = Theta**nGreens * (1-Theta)**nOther
# Compute the posterior:
pData = sum( pDataGivenTheta * pTheta )
# Use Bayes' rule!
pThetaGivenData = pDataGivenTheta * pTheta / pData
checkNorm = sum(pThetaGivenData)
hBins = np.linspace(0,1, 100)
fig = plt.figure()
ax1 = fig.add_subplot(3,1,1)
ax1.hist(Theta, weights = pTheta0, bins = hBins, alpha = 0.4, label = "Prior")
ax1.legend()
plt.xlim(0, 1)
ax2 = fig.add_subplot(4,1,2)
ax2.hist(Theta, weights = pTheta, bins = hBins, alpha = 0.4, label = "Current Prior")
ax2.legend()
plt.xlim(0, 1)
ax3 = fig.add_subplot(4,1,3)
ax3.hist(Theta, weights = pDataGivenTheta, bins = hBins, alpha = 0.4, label = "Likelihood")
ax3.legend()
plt.xlim(0, 1)
ax4 = fig.add_subplot(4,1,4)
ax4.hist(Theta, weights = pThetaGivenData, bins = hBins, alpha = 0.4, label = "Posterior")
ax4.legend()
plt.xlim(0, 1)
plt.show()
pTheta = pThetaGivenData
def SingleShot(N):
# Dr. Lynch's two models
# 10% Green skittles
# 20% Green skittles
Theta = np.array([0.1, 0.2])
# Prior belief:
# Equal belief in either
pTheta = np.array([0.5, 0.5])
# Prior belief 2:
# I believe there is only a 10% chance the bag was altered
# pTheta = [0.1, 0.9]
pTheta = pTheta/np.sum(pTheta)
# Initial belief
pTheta0 = pTheta
pTheta = np.array(pTheta)
print "Prior: ", (pTheta)
print "Models: ", (Theta)
nGreens = round(N*1.0/10.0)
nOther = N - nGreens
pDataGivenTheta = Theta**nGreens * (1-Theta)**nOther
# Compute the posterior:
pData = sum( pDataGivenTheta * pTheta )
# Use Bayes' rule!
pThetaGivenData = pDataGivenTheta * pTheta / pData
checkNorm = sum(pThetaGivenData)
pTheta = pThetaGivenData
| {"/runSkittles.py": ["/Skittles.py"]} |
58,330 | rpb/Skittles-Bayes | refs/heads/master | /runSkittles.py | from Skittles import *
#Flat prior
Model1 = np.linspace(0, 1, 100)
prior1 = [0.01]*100
#normal distribution
Model2 = np.linspace(0, 1, 100)
prior2 = stat.norm.pdf(range(-50,50),0.5, 2)
Model3 = np.linspace(0, 1, 1000)
prior3 = stat.norm.pdf(range(-500,500),0.5, 2)
#Update prob using cumulative data
runLoop(Model1, prior1, 10, 1)
#Update prob using last 1 draws
runLoop_2(Model1, prior1, 10, 1)
#Update prob using last 5 draws
runLoop_2(Model1, prior1, 10, 5)
| {"/runSkittles.py": ["/Skittles.py"]} |
58,373 | mingluzhao/Coord-Hunting-With-RL | refs/heads/master | /exec/evaluate.py | import os
import sys
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
dirName = os.path.dirname(__file__)
sys.path.append(os.path.join(dirName, '..'))
sys.path.append(os.path.join(dirName, '..', '..'))
import logging
import argparse
logging.getLogger('tensorflow').setLevel(logging.ERROR)
from src.environment.multiAgentEnv import *
from src.functionTools.loadSaveModel import *
from src.functionTools.trajectory import SampleTrajectory
from src.visualize.drawDemo import *
from src.environment.reward import *
from pygame.color import THECOLORS
from src.maddpg.trainer.MADDPG import *
maxEpisode = 60000
maxRunningStepsToSample = 75 # num of timesteps in one eps
class CalcPredatorsTrajKills:
def __init__(self, predatorsID, killReward):
self.predatorsID = predatorsID
self.killReward = killReward
self.rewardIDinTraj = 2
def __call__(self, traj):
getPredatorReward = lambda allAgentsReward: np.sum([allAgentsReward[predatorID] for predatorID in self.predatorsID])
rewardList = [getPredatorReward(timeStepInfo[self.rewardIDinTraj]) for timeStepInfo in traj]
trajReward = np.sum(rewardList)
trajKills = trajReward/self.killReward
return trajKills
def parse_args():
parser = argparse.ArgumentParser("Multi-agent chasing experiment evaluation")
parser.add_argument("--num-predators", type=int, default=3, help="number of predators")
parser.add_argument("--speed", type=float, default=1.0, help="prey speed multiplier")
parser.add_argument("--cost", type=float, default=0.0, help="cost-action ratio")
parser.add_argument("--selfish", type=float, default=0.0, help="selfish index")
parser.add_argument("--num-traj", type=int, default=10, help="number of trajectories to sample")
parser.add_argument("--visualize", type=int, default=1, help="generate demo = 1, otherwise 0")
parser.add_argument("--save-images", type=int, default=1, help="save demo images = 1, otherwise 0")
return parser.parse_args()
def main():
arglist = parse_args()
numPredators = arglist.num_predators
preySpeedMultiplier = arglist.speed
costActionRatio = arglist.cost
selfishIndex = arglist.selfish
numTrajToSample = arglist.num_traj
visualize = arglist.visualize
saveImage = arglist.save_images
numPrey = 1
numBlocks = 2
maxTimeStep = 75
killReward = 10
killProportion = 0.2
biteReward = 0.0
print("evaluate: {} predators, {} prey, {} blocks, {} episodes with {} steps each eps, preySpeed: {}x, cost: {}, selfish: {}, sample {} trajectories, demo: {}, save: {}".
format(numPredators, numPrey, numBlocks, maxEpisode, maxTimeStep, preySpeedMultiplier, costActionRatio, selfishIndex, numTrajToSample, visualize, saveImage))
numAgents = numPredators + numPrey
numEntities = numAgents + numBlocks
predatorsID = list(range(numPredators))
preyGroupID = list(range(numPredators, numAgents))
blocksID = list(range(numAgents, numEntities))
predatorSize = 0.075
preySize = 0.05
blockSize = 0.2
entitiesSizeList = [predatorSize] * numPredators + [preySize] * numPrey + [blockSize] * numBlocks
predatorMaxSpeed = 1.0
blockMaxSpeed = None
preyMaxSpeedOriginal = 1.3
preyMaxSpeed = preyMaxSpeedOriginal * preySpeedMultiplier
entityMaxSpeedList = [predatorMaxSpeed] * numPredators + [preyMaxSpeed] * numPrey + [blockMaxSpeed] * numBlocks
entitiesMovableList = [True] * numAgents + [False] * numBlocks
massList = [1.0] * numEntities
collisionReward = 10 # originalPaper = 10*3
isCollision = IsCollision(getPosFromAgentState)
punishForOutOfBound = PunishForOutOfBound()
rewardPrey = RewardPrey(predatorsID, preyGroupID, entitiesSizeList, getPosFromAgentState, isCollision,
punishForOutOfBound, collisionPunishment = collisionReward)
collisionDist = predatorSize + preySize
getAgentsPercentageOfRewards = GetAgentsPercentageOfRewards(selfishIndex, collisionDist)
terminalCheck = TerminalCheck()
getCollisionPredatorReward = GetCollisionPredatorReward(biteReward, killReward, killProportion, sampleFromDistribution, terminalCheck)
getPredatorPreyDistance = GetPredatorPreyDistance(computeVectorNorm, getPosFromAgentState)
rewardPredator = RewardPredatorsWithKillProb(predatorsID, preyGroupID, entitiesSizeList, isCollision, terminalCheck, getPredatorPreyDistance,
getAgentsPercentageOfRewards, getCollisionPredatorReward)
reshapeAction = ReshapeAction()
getActionCost = GetActionCost(costActionRatio, reshapeAction, individualCost=True)
getPredatorsAction = lambda action: [action[predatorID] for predatorID in predatorsID]
rewardPredatorWithActionCost = lambda state, action, nextState: np.array(rewardPredator(state, action, nextState)) - np.array(getActionCost(getPredatorsAction(action)))
rewardFunc = lambda state, action, nextState: \
list(rewardPredatorWithActionCost(state, action, nextState)) + list(rewardPrey(state, action, nextState))
reset = ResetMultiAgentChasing(numAgents, numBlocks)
observeOneAgent = lambda agentID: Observe(agentID, predatorsID, preyGroupID, blocksID, getPosFromAgentState,
getVelFromAgentState)
observe = lambda state: [observeOneAgent(agentID)(state) for agentID in range(numAgents)]
getCollisionForce = GetCollisionForce()
applyActionForce = ApplyActionForce(predatorsID, preyGroupID, entitiesMovableList)
applyEnvironForce = ApplyEnvironForce(numEntities, entitiesMovableList, entitiesSizeList,
getCollisionForce, getPosFromAgentState)
integrateState = IntegrateState(numEntities, entitiesMovableList, massList,
entityMaxSpeedList, getVelFromAgentState, getPosFromAgentState)
transit = TransitMultiAgentChasing(numEntities, reshapeAction, applyActionForce, applyEnvironForce, integrateState)
isTerminal = lambda state: terminalCheck.terminal
initObsForParams = observe(reset())
obsShape = [initObsForParams[obsID].shape[0] for obsID in range(len(initObsForParams))]
sampleTrajectory = SampleTrajectory(maxRunningStepsToSample, transit, isTerminal, rewardFunc, reset)
worldDim = 2
actionDim = worldDim * 2 + 1
layerWidth = [128, 128]
# model ------------------------
buildMADDPGModels = BuildMADDPGModels(actionDim, numAgents, obsShape)
modelsList = [buildMADDPGModels(layerWidth, agentID) for agentID in range(numAgents)]
dirName = os.path.dirname(__file__)
fileName = "model{}predators{}prey{}blocks{}episodes{}stepPreySpeed{}PredatorActCost{}sensitive{}biteReward{}killPercent{}_agent".format(
numPredators, numPrey, numBlocks, maxEpisode, maxTimeStep, preySpeedMultiplier, costActionRatio, selfishIndex, biteReward, killProportion)
modelPaths = [os.path.join(dirName, '..', 'trainedModels', fileName + str(i) ) for i in range(numAgents)]
[restoreVariables(model, path) for model, path in zip(modelsList, modelPaths)]
actOneStepOneModel = ActOneStep(actByPolicyTrainNoisy)
policy = lambda allAgentsStates: [actOneStepOneModel(model, observe(allAgentsStates)) for model in modelsList]
# generate trajectories ------------
numKillsList = []
trajToRender = []
trajList = []
calcPredatorsTrajKills = CalcPredatorsTrajKills(predatorsID, killReward)
for i in range(numTrajToSample):
traj = sampleTrajectory(policy)
numKills = calcPredatorsTrajKills(traj)
numKillsList.append(numKills)
trajToRender = trajToRender + list(traj)
trajList.append(traj)
meanTrajKill = np.mean(numKillsList)
seTrajKill = np.std(numKillsList) / np.sqrt(len(numKillsList) - 1)
print('meanTrajKill', meanTrajKill, 'se ', seTrajKill)
# save trajectories ------------
trajectoryDirectory = os.path.join(dirName, '..', 'trajectories')
if not os.path.exists(trajectoryDirectory):
os.makedirs(trajectoryDirectory)
trajFileName = "model{}predators{}prey{}blocks{}episodes{}stepPreySpeed{}PredatorActCost{}sensitive{}biteReward{}killPercent{}_Traj".format(
numPredators, numPrey, numBlocks, maxEpisode, maxTimeStep, preySpeedMultiplier, costActionRatio,
selfishIndex, biteReward, killProportion)
trajSavePath = os.path.join(trajectoryDirectory, trajFileName)
saveToPickle(trajList, trajSavePath)
# visualize ------------
if visualize:
trajList = loadFromPickle(trajSavePath)
screenWidth = 700
screenHeight = 700
screen = pg.display.set_mode((screenWidth, screenHeight))
screenColor = THECOLORS['black']
xBoundary = [0, 700]
yBoundary = [0, 700]
lineColor = THECOLORS['white']
lineWidth = 4
drawBackground = DrawBackground(screen, screenColor, xBoundary, yBoundary, lineColor, lineWidth)
FPS = 10
numBlocks = 2
predatorColor = THECOLORS['white']
preyColor = THECOLORS['green']
blockColor = THECOLORS['grey']
circleColorSpace = [predatorColor] * numPredators + [preyColor] * numPrey + [blockColor] * numBlocks
viewRatio = 1.5
preySize = int(0.05 * screenWidth / (2 * viewRatio))
predatorSize = int(0.075 * screenWidth / (3 * viewRatio)) # without boarder
blockSize = int(0.2 * screenWidth / (2 * viewRatio))
circleSizeSpace = [predatorSize] * numPredators + [preySize] * numPrey + [blockSize] * numBlocks
positionIndex = [0, 1]
agentIdsToDraw = list(range(numPredators + numPrey + numBlocks))
conditionName = "model{}predators{}prey{}blocks{}episodes{}stepPreySpeed{}PredatorActCost{}sensitive{}biteReward{}killPercent{}".format(
numPredators, numPrey, numBlocks, maxEpisode, maxTimeStep, preySpeedMultiplier, costActionRatio,
selfishIndex, biteReward, killProportion)
imageSavePath = os.path.join(dirName, '..', 'trajectories', conditionName)
if not os.path.exists(imageSavePath):
os.makedirs(imageSavePath)
imageFolderName = str('forDemo')
saveImageDir = os.path.join(os.path.join(imageSavePath, imageFolderName))
if not os.path.exists(saveImageDir):
os.makedirs(saveImageDir)
outsideCircleColor = [THECOLORS['red']] * numPredators
outsideCircleSize = int(predatorSize * 1.5)
drawCircleOutside = DrawCircleOutside(screen, predatorsID, positionIndex,
outsideCircleColor, outsideCircleSize, viewRatio= viewRatio)
drawState = DrawState(FPS, screen, circleColorSpace, circleSizeSpace, agentIdsToDraw,
positionIndex, saveImage, saveImageDir, preyGroupID, predatorsID,
drawBackground, drawCircleOutside=drawCircleOutside, viewRatio= viewRatio)
# MDP Env
stateID = 0
nextStateID = 3
predatorSizeForCheck = 0.075
preySizeForCheck = 0.05
checkStatus = CheckStatus(predatorsID, preyGroupID, isCollision, predatorSizeForCheck, preySizeForCheck, stateID, nextStateID)
chaseTrial = ChaseTrialWithKillNotation(stateID, drawState, checkStatus)
[chaseTrial(trajectory) for trajectory in np.array(trajList[:20])]
if __name__ == '__main__':
main() | {"/exec/evaluate.py": ["/src/functionTools/loadSaveModel.py", "/src/visualize/drawDemo.py", "/src/environment/reward.py", "/src/maddpg/trainer/MADDPG.py"], "/exec/train.py": ["/src/maddpg/trainer/MADDPG.py", "/src/maddpg/rlTools/RLrun.py", "/src/functionTools/loadSaveModel.py", "/src/environment/reward.py"]} |
58,374 | mingluzhao/Coord-Hunting-With-RL | refs/heads/master | /src/visualize/drawDemo.py | import pygame as pg
import numpy as np
import os
class DrawBackground:
def __init__(self, screen, screenColor, xBoundary, yBoundary, lineColor, lineWidth, xObstacles = None, yObstacles = None):
self.screen = screen
self.screenColor = screenColor
self.xBoundary = xBoundary
self.yBoundary = yBoundary
self.lineColor = lineColor
self.lineWidth = lineWidth
self.xObstacles = xObstacles
self.yObstacles = yObstacles
def __call__(self):
for event in pg.event.get():
if event.type == pg.QUIT:
pg.quit()
if event.type == pg.KEYDOWN:
if event.key == pg.K_ESCAPE:
exit()
self.screen.fill(self.screenColor)
rectPos = [self.xBoundary[0], self.yBoundary[0], self.xBoundary[1], self.yBoundary[1]]
pg.draw.rect(self.screen, self.lineColor, rectPos, self.lineWidth)
if self.xObstacles and self.yObstacles:
for xObstacle, yObstacle in zip(self.xObstacles, self.yObstacles):
rectPos = [xObstacle[0], yObstacle[0], xObstacle[1] - xObstacle[0], yObstacle[1] - yObstacle[0]]
pg.draw.rect(self.screen, self.lineColor, rectPos)
return
class DrawCircleOutside:
def __init__(self, screen, outsideCircleAgentIds, positionIndex, circleColors, circleSize, viewRatio = 1):
self.screen = screen
self.viewRatio = viewRatio
self.screenX, self.screenY = self.screen.get_width(), self.screen.get_height()
self.outsideCircleAgentIds = outsideCircleAgentIds
self.xIndex, self.yIndex = positionIndex
self.circleColors = circleColors
self.circleSize = circleSize
def __call__(self, state):
for agentIndex in self.outsideCircleAgentIds:
agentPos = [np.int((state[agentIndex][self.xIndex] / self.viewRatio + 1) * (self.screenX / 2)),
np.int((state[agentIndex][self.yIndex] / self.viewRatio + 1) * (self.screenY / 2))]
agentColor = tuple(self.circleColors[list(self.outsideCircleAgentIds).index(agentIndex)])
pg.draw.circle(self.screen, agentColor, agentPos, self.circleSize)
return
class DrawState:
def __init__(self, fps, screen, colorSpace, circleSizeSpace, agentIdsToDraw, positionIndex, saveImage,
imagePath, preyGroupID, predatorsID, drawBackGround, drawCircleOutside = None, viewRatio = 1):
self.fps = fps
self.screen = screen
self.viewRatio = viewRatio
self.screenX, self.screenY = self.screen.get_width(), self.screen.get_height()
self.colorSpace = colorSpace
self.circleSizeSpace = circleSizeSpace
self.agentIdsToDraw = agentIdsToDraw
self.xIndex, self.yIndex = positionIndex
self.saveImage = saveImage
self.imagePath = imagePath
self.drawBackGround = drawBackGround
self.drawCircleOutside = drawCircleOutside
self.preyGroupID = preyGroupID
self.predatorsID = predatorsID
self.biteCount = 0
self.killCount = 0
def __call__(self, state, agentsStatus, posterior = None):
fpsClock = pg.time.Clock()
self.drawBackGround()
circleColors = self.colorSpace
if self.drawCircleOutside:
self.drawCircleOutside(state)
for agentIndex in self.agentIdsToDraw:
agentPos = [np.int((state[agentIndex][self.xIndex] / self.viewRatio + 1) * (self.screenX / 2)),
np.int((state[agentIndex][self.yIndex] / self.viewRatio + 1) * (self.screenY / 2))]
agentColor = tuple(circleColors[agentIndex])
circleSize = self.circleSizeSpace[agentIndex]
if agentIndex in self.preyGroupID:
agentStatus = agentsStatus[agentIndex]
if agentStatus == 'kill' or self.killCount != 0:
killPreyColor = [0, 120, 0]
pg.draw.circle(self.screen, killPreyColor, agentPos, circleSize)
self.killCount += 1
if self.killCount == 2:
self.killCount = 0
elif agentStatus == 'bite' or self.biteCount != 0:
bitePreyColor = [200, 255, 200]
pg.draw.circle(self.screen, bitePreyColor, agentPos, circleSize)
self.biteCount += 1
if self.biteCount == 2:
self.biteCount = 0
else:
pg.draw.circle(self.screen, agentColor, agentPos, circleSize)
elif agentIndex in self.predatorsID:
agentStatus = agentsStatus[agentIndex]
agentColorToDraw = [100, 0, 0] if agentStatus == 'bite' else agentColor
pg.draw.circle(self.screen, agentColorToDraw, agentPos, circleSize)
else:
pg.draw.circle(self.screen, agentColor, agentPos, circleSize)
pg.display.flip()
if self.saveImage == True:
filenameList = os.listdir(self.imagePath)
pg.image.save(self.screen, self.imagePath + '/' + str(len(filenameList))+'.png')
fpsClock.tick(self.fps)
return self.screen
class ChaseTrialWithKillNotation:
def __init__(self, stateIndex, drawState, checkStatus):
self.stateIndex = stateIndex
self.drawState = drawState
self.checkStatus = checkStatus
def __call__(self, trajectory):
for timeStepIndex in range(len(trajectory)):
timeStep = trajectory[timeStepIndex]
nextTimeStep = trajectory[timeStepIndex+1] if timeStepIndex != len(trajectory)-1 else None
agentsStatus = self.checkStatus(timeStep, nextTimeStep)
state = timeStep[self.stateIndex]
posterior = None
screen = self.drawState(state, agentsStatus, posterior)
return
class CheckStatus:
def __init__(self, predatorsID, preyGroupID, isCollision, predatorSize, preySize, stateID, nextStateID):
self.predatorsID = predatorsID
self.preyGroupID = preyGroupID
self.isCollision = isCollision
self.predatorSize= predatorSize
self.preySize = preySize
self.stateID = stateID
self.nextStateID = nextStateID
def __call__(self, timeStep, nextTimeStep):
agentsStatus = [0] * (len(self.predatorsID) + len(self.preyGroupID))
killed = np.any([tuple(a) != tuple(b) for a, b in zip(nextTimeStep[self.stateID], timeStep[self.nextStateID])]) if nextTimeStep is not None else False
for predatorID in self.predatorsID:
for preyID in self.preyGroupID:
predatorNextState = timeStep[self.nextStateID][predatorID]
preyNextState = timeStep[self.nextStateID][preyID]
if self.isCollision(predatorNextState, preyNextState, self.predatorSize, self.preySize):
agentsStatus[predatorID] = 'bite'
agentsStatus[preyID] = 'kill' if killed else 'bite'
return agentsStatus
| {"/exec/evaluate.py": ["/src/functionTools/loadSaveModel.py", "/src/visualize/drawDemo.py", "/src/environment/reward.py", "/src/maddpg/trainer/MADDPG.py"], "/exec/train.py": ["/src/maddpg/trainer/MADDPG.py", "/src/maddpg/rlTools/RLrun.py", "/src/functionTools/loadSaveModel.py", "/src/environment/reward.py"]} |
58,375 | mingluzhao/Coord-Hunting-With-RL | refs/heads/master | /src/functionTools/loadSaveModel.py | import pickle
def saveVariables(model, path):
graph = model.graph
saver = graph.get_collection_ref("saver")[0]
saver.save(model, path)
print("Model saved in {}".format(path))
def saveToPickle(data, path):
pklFile = open(path, "wb")
pickle.dump(data, pklFile)
pklFile.close()
def loadFromPickle(path):
pickleIn = open(path, 'rb')
object = pickle.load(pickleIn)
pickleIn.close()
return object
def restoreVariables(model, path):
graph = model.graph
saver = graph.get_collection_ref("saver")[0]
saver.restore(model, path)
print("Model restored from {}".format(path))
return model | {"/exec/evaluate.py": ["/src/functionTools/loadSaveModel.py", "/src/visualize/drawDemo.py", "/src/environment/reward.py", "/src/maddpg/trainer/MADDPG.py"], "/exec/train.py": ["/src/maddpg/trainer/MADDPG.py", "/src/maddpg/rlTools/RLrun.py", "/src/functionTools/loadSaveModel.py", "/src/environment/reward.py"]} |
58,376 | mingluzhao/Coord-Hunting-With-RL | refs/heads/master | /exec/train.py | import os
import sys
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
os.environ['KMP_DUPLICATE_LIB_OK']='True'
dirName = os.path.dirname(__file__)
sys.path.append(os.path.join(dirName, '..'))
sys.path.append(os.path.join(dirName, '..', '..'))
import logging
logging.getLogger('tensorflow').setLevel(logging.ERROR)
import argparse
from src.maddpg.trainer.MADDPG import BuildMADDPGModels, TrainCritic, TrainActor, TrainCriticBySASR, \
TrainActorFromSA, TrainMADDPGModelsWithBuffer, ActOneStep, actByPolicyTrainNoisy, actByPolicyTargetNoisyForNextState
from src.maddpg.rlTools.RLrun import UpdateParameters, SampleOneStep, SampleFromMemory,\
RunTimeStep, RunEpisode, RunAlgorithm, getBuffer, SaveModel, StartLearn
from src.functionTools.loadSaveModel import saveVariables
from src.environment.multiAgentEnv import TransitMultiAgentChasing, ApplyActionForce, ApplyEnvironForce, \
ResetMultiAgentChasing, ReshapeAction, Observe, GetCollisionForce, IntegrateState, \
IsCollision, PunishForOutOfBound, getPosFromAgentState, getVelFromAgentState, GetActionCost
from src.environment.reward import *
# fixed training parameters
maxEpisode = 60000
learningRateActor = 0.01
learningRateCritic = 0.01
gamma = 0.95
tau=0.01
bufferSize = 1e6
minibatchSize = 1024
def parse_args():
parser = argparse.ArgumentParser("Multi-agent chasing experiment training")
parser.add_argument("--num-predators", type=int, default=3, help="number of predators")
parser.add_argument("--speed", type=float, default=1.0, help="prey speed multiplier")
parser.add_argument("--cost", type=float, default=0.0, help="cost-action ratio")
parser.add_argument("--selfish", type=float, default=0.0, help="selfish index")
return parser.parse_args()
def main():
arglist = parse_args()
numPredators = arglist.num_predators
preySpeedMultiplier = arglist.speed
costActionRatio = arglist.cost
selfishIndex = arglist.selfish
numPrey = 1
numBlocks = 2
saveAllmodels = 0 # save all models during training
maxTimeStep = 75
killReward = 10
killProportion = 0.2
biteReward = 0.0
print("train: {} predators, {} prey, {} blocks, {} episodes with {} steps each eps, preySpeed: {}x, cost: {}, selfish: {}".
format(numPredators, numPrey, numBlocks, maxEpisode, maxTimeStep, preySpeedMultiplier, costActionRatio, selfishIndex))
numAgents = numPredators + numPrey
numEntities = numAgents + numBlocks
predatorsID = list(range(numPredators))
preyGroupID = list(range(numPredators, numAgents))
blocksID = list(range(numAgents, numEntities))
predatorSize = 0.075
preySize = 0.05
blockSize = 0.2
entitiesSizeList = [predatorSize] * numPredators + [preySize] * numPrey + [blockSize] * numBlocks
predatorMaxSpeed = 1.0
blockMaxSpeed = None
preyMaxSpeedOriginal = 1.3
preyMaxSpeed = preyMaxSpeedOriginal * preySpeedMultiplier
entityMaxSpeedList = [predatorMaxSpeed] * numPredators + [preyMaxSpeed] * numPrey + [blockMaxSpeed] * numBlocks
entitiesMovableList = [True] * numAgents + [False] * numBlocks
massList = [1.0] * numEntities
collisionReward = 10 # originalPaper = 10*3
isCollision = IsCollision(getPosFromAgentState)
punishForOutOfBound = PunishForOutOfBound()
rewardPrey = RewardPrey(predatorsID, preyGroupID, entitiesSizeList, getPosFromAgentState, isCollision,
punishForOutOfBound, collisionPunishment = collisionReward)
collisionDist = predatorSize + preySize
getAgentsPercentageOfRewards = GetAgentsPercentageOfRewards(selfishIndex, collisionDist)
terminalCheck = TerminalCheck()
getCollisionPredatorReward = GetCollisionPredatorReward(biteReward, killReward, killProportion, sampleFromDistribution, terminalCheck)
getPredatorPreyDistance = GetPredatorPreyDistance(computeVectorNorm, getPosFromAgentState)
rewardPredator = RewardPredatorsWithKillProb(predatorsID, preyGroupID, entitiesSizeList, isCollision, terminalCheck, getPredatorPreyDistance,
getAgentsPercentageOfRewards, getCollisionPredatorReward)
reshapeAction = ReshapeAction()
getActionCost = GetActionCost(costActionRatio, reshapeAction, individualCost=True)
getPredatorsAction = lambda action: [action[predatorID] for predatorID in predatorsID]
rewardPredatorWithActionCost = lambda state, action, nextState: np.array(rewardPredator(state, action, nextState)) - np.array(getActionCost(getPredatorsAction(action)))
rewardFunc = lambda state, action, nextState: \
list(rewardPredatorWithActionCost(state, action, nextState)) + list(rewardPrey(state, action, nextState))
reset = ResetMultiAgentChasing(numAgents, numBlocks)
observeOneAgent = lambda agentID: Observe(agentID, predatorsID, preyGroupID, blocksID, getPosFromAgentState,
getVelFromAgentState)
observe = lambda state: [observeOneAgent(agentID)(state) for agentID in range(numAgents)]
getCollisionForce = GetCollisionForce()
applyActionForce = ApplyActionForce(predatorsID, preyGroupID, entitiesMovableList)
applyEnvironForce = ApplyEnvironForce(numEntities, entitiesMovableList, entitiesSizeList,
getCollisionForce, getPosFromAgentState)
integrateState = IntegrateState(numEntities, entitiesMovableList, massList,
entityMaxSpeedList, getVelFromAgentState, getPosFromAgentState)
transit = TransitMultiAgentChasing(numEntities, reshapeAction, applyActionForce, applyEnvironForce, integrateState)
isTerminal = lambda state: terminalCheck.terminal
initObsForParams = observe(reset())
obsShape = [initObsForParams[obsID].shape[0] for obsID in range(len(initObsForParams))]
worldDim = 2
actionDim = worldDim * 2 + 1
layerWidth = [128, 128]
#------------ models ------------------------
buildMADDPGModels = BuildMADDPGModels(actionDim, numAgents, obsShape)
modelsList = [buildMADDPGModels(layerWidth, agentID) for agentID in range(numAgents)]
trainCriticBySASR = TrainCriticBySASR(actByPolicyTargetNoisyForNextState, learningRateCritic, gamma)
trainCritic = TrainCritic(trainCriticBySASR)
trainActorFromSA = TrainActorFromSA(learningRateActor)
trainActor = TrainActor(trainActorFromSA)
paramUpdateInterval = 1 #
updateParameters = UpdateParameters(paramUpdateInterval, tau)
sampleBatchFromMemory = SampleFromMemory(minibatchSize)
learnInterval = 100
learningStartBufferSize = minibatchSize * maxTimeStep
startLearn = StartLearn(learningStartBufferSize, learnInterval)
trainMADDPGModels = TrainMADDPGModelsWithBuffer(updateParameters, trainActor, trainCritic, sampleBatchFromMemory, startLearn, modelsList)
actOneStepOneModel = ActOneStep(actByPolicyTrainNoisy)
actOneStep = lambda allAgentsStates, runTime: [actOneStepOneModel(model, allAgentsStates) for model in modelsList]
sampleOneStep = SampleOneStep(transit, rewardFunc)
runTimeStep = RunTimeStep(actOneStep, sampleOneStep, trainMADDPGModels, observe = observe)
runEpisode = RunEpisode(reset, runTimeStep, maxTimeStep, isTerminal)
getAgentModel = lambda agentId: lambda: trainMADDPGModels.getTrainedModels()[agentId]
getModelList = [getAgentModel(i) for i in range(numAgents)]
modelSaveRate = 1000
fileName = "model{}predators{}prey{}blocks{}episodes{}stepPreySpeed{}PredatorActCost{}sensitive{}biteReward{}killPercent{}_agent".format(
numPredators, numPrey, numBlocks, maxEpisode, maxTimeStep, preySpeedMultiplier, costActionRatio, selfishIndex, biteReward, killProportion)
modelDir = os.path.join(dirName, '..', 'trainedModels')
if not os.path.exists(modelDir):
os.makedirs(modelDir)
modelPath = os.path.join(modelDir, fileName)
saveModels = [SaveModel(modelSaveRate, saveVariables, getTrainedModel, modelPath + str(i), saveAllmodels) for i, getTrainedModel in enumerate(getModelList)]
maddpg = RunAlgorithm(runEpisode, maxEpisode, saveModels, numAgents)
replayBuffer = getBuffer(bufferSize)
meanRewardList = maddpg(replayBuffer)
if __name__ == '__main__':
main()
| {"/exec/evaluate.py": ["/src/functionTools/loadSaveModel.py", "/src/visualize/drawDemo.py", "/src/environment/reward.py", "/src/maddpg/trainer/MADDPG.py"], "/exec/train.py": ["/src/maddpg/trainer/MADDPG.py", "/src/maddpg/rlTools/RLrun.py", "/src/functionTools/loadSaveModel.py", "/src/environment/reward.py"]} |
58,377 | mingluzhao/Coord-Hunting-With-RL | refs/heads/master | /src/environment/reward.py | import numpy as np
import random
def sampleFromDistribution(distribution):
hypotheses = list(distribution.keys())
probs = list(distribution.values())
normlizedProbs = [prob / sum(probs) for prob in probs]
selectedIndex = list(np.random.multinomial(1, normlizedProbs)).index(1)
selectedHypothesis = hypotheses[selectedIndex]
return selectedHypothesis
def computeVectorNorm(vector):
return np.power(np.power(vector, 2).sum(), 0.5)
class GetAgentsPercentageOfRewards:
def __init__(self, selfishIndex, collisionMinDist):
self.selfishIndex = selfishIndex
self.collisionMinDist = collisionMinDist
self.getPercent = lambda dist: (dist + 1 - self.collisionMinDist) ** (-self.selfishIndex)
self.individualReward = (self.selfishIndex > 100)
# for computational purposes, selfish index > 100 is taken as purely selfish
def __call__(self, agentsDistanceList, predatorID):
if self.individualReward:
percentage = np.zeros(len(agentsDistanceList))
percentage[predatorID] = 1
return percentage
percentageRaw = [self.getPercent(dist) for dist in agentsDistanceList]
percentage = np.array(percentageRaw)/ np.sum(percentageRaw)
return percentage
class GetCollisionPredatorReward:
def __init__(self, biteReward, killReward, killProportion, sampleFromDistribution, terminalCheck):
self.biteReward = biteReward
self.killReward = killReward
self.killProportion = killProportion
self.sampleFromDistribution = sampleFromDistribution
self.terminalCheck = terminalCheck
def __call__(self, numPredators, killRewardPercent, collisionID):
if self.terminalCheck.terminal: # prey already killed
return [0]* numPredators
isKill = self.sampleFromDistribution({1: self.killProportion, 0: 1-self.killProportion})
if isKill:
reward = self.killReward * np.array(killRewardPercent)
self.terminalCheck.isTerminal()
else:
reward = [0]* numPredators
reward[collisionID] = self.biteReward
return reward
class GetPredatorPreyDistance:
def __init__(self, computeVectorNorm, getPosFromState):
self.computeVectorNorm = computeVectorNorm
self.getPosFromState = getPosFromState
def __call__(self, predatorsStates, preyState):
predatorsPosList = [self.getPosFromState(predatorState) for predatorState in predatorsStates]
preyPos = self.getPosFromState(preyState)
dists = [self.computeVectorNorm(np.array(preyPos) - np.array(predatorPos)) for predatorPos in predatorsPosList]
return dists
class TerminalCheck(object):
def __init__(self):
self.reset()
def reset(self):
self.terminal = False
def isTerminal(self):
self.terminal = True
class RewardPredatorsWithKillProb:
def __init__(self, predatorsID, preyGroupID, entitiesSizeList, isCollision, terminalCheck,
getPredatorPreyDistance, getAgentsPercentageOfRewards, getCollisionPredatorReward):
self.predatorsID = predatorsID
self.preyGroupID = preyGroupID
self.entitiesSizeList = entitiesSizeList
self.isCollision = isCollision
self.terminalCheck = terminalCheck
self.getPredatorPreyDistance = getPredatorPreyDistance
self.getAgentsPercentageOfRewards = getAgentsPercentageOfRewards
self.getCollisionPredatorReward = getCollisionPredatorReward
def __call__(self, state, action, nextState):
self.terminalCheck.reset()
predatorsNextState = [nextState[predatorID] for predatorID in self.predatorsID]
numPredators = len(self.predatorsID)
rewardList = np.zeros(numPredators)
for preyID in self.preyGroupID:
preyNextState = nextState[preyID]
preySize = self.entitiesSizeList[preyID]
predatorsPreyDistance = self.getPredatorPreyDistance(predatorsNextState, preyNextState)
# randomly order predators so that when more than one predator catches the prey, random one samples first
predatorsID = self.predatorsID.copy()
random.shuffle(predatorsID)
for predatorID in predatorsID:
predatorSize = self.entitiesSizeList[predatorID]
predatorNextState = nextState[predatorID]
if self.isCollision(predatorNextState, preyNextState, predatorSize, preySize):
killRewardPercent = self.getAgentsPercentageOfRewards(predatorsPreyDistance, predatorID)
predatorReward = self.getCollisionPredatorReward(numPredators, killRewardPercent, predatorID)
rewardList = rewardList + np.array(predatorReward)
return rewardList
class RewardPrey:
def __init__(self, predatorsID, preyGroupID, entitiesSizeList, getPosFromState, isCollision, punishForOutOfBound,
collisionPunishment):
self.predatorsID = predatorsID
self.getPosFromState = getPosFromState
self.entitiesSizeList = entitiesSizeList
self.preyGroupID = preyGroupID
self.isCollision = isCollision
self.collisionPunishment = collisionPunishment
self.punishForOutOfBound = punishForOutOfBound
def __call__(self, state, action, nextState):
reward = []
for preyID in self.preyGroupID:
preyReward = 0
preyNextState = nextState[preyID]
preyNextPos = self.getPosFromState(preyNextState)
preySize = self.entitiesSizeList[preyID]
preyReward -= self.punishForOutOfBound(preyNextPos)
for predatorID in self.predatorsID:
predatorSize = self.entitiesSizeList[predatorID]
predatorNextState = nextState[predatorID]
if self.isCollision(predatorNextState, preyNextState, predatorSize, preySize):
preyReward -= self.collisionPunishment
reward.append(preyReward)
return reward | {"/exec/evaluate.py": ["/src/functionTools/loadSaveModel.py", "/src/visualize/drawDemo.py", "/src/environment/reward.py", "/src/maddpg/trainer/MADDPG.py"], "/exec/train.py": ["/src/maddpg/trainer/MADDPG.py", "/src/maddpg/rlTools/RLrun.py", "/src/functionTools/loadSaveModel.py", "/src/environment/reward.py"]} |
58,378 | mingluzhao/Coord-Hunting-With-RL | refs/heads/master | /src/maddpg/trainer/MADDPG.py | import tensorflow as tf
import numpy as np
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
import tensorflow.contrib.layers as layers
import src.maddpg.rlTools.tf_util as U
class BuildMADDPGModels:
def __init__(self, actionDim, numAgents, obsShapeList, actionRange = 1):
self.actionDim = actionDim
self.numAgents = numAgents
self.obsShapeList = obsShapeList
self.actionRange = actionRange
self.gradNormClipping = 0.5
def __call__(self, layersWidths, agentID):
agentStr = 'Agent'+ str(agentID)
graph = tf.Graph()
with graph.as_default():
with tf.variable_scope("inputs/"+ agentStr):
allAgentsStates_ = [tf.placeholder(dtype=tf.float32, shape=[None, agentObsDim], name="state"+str(i)) for i, agentObsDim in enumerate(self.obsShapeList)]
allAgentsNextStates_ = [tf.placeholder(dtype=tf.float32, shape=[None, agentObsDim], name="nextState"+str(i)) for i, agentObsDim in enumerate(self.obsShapeList)]
allAgentsActions_ = [tf.placeholder(dtype=tf.float32, shape=[None, self.actionDim], name="action"+str(i)) for i in range(self.numAgents)]
allAgentsNextActionsByTargetNet_ = [tf.placeholder(dtype=tf.float32, shape=[None, self.actionDim], name= "actionTarget"+str(i)) for i in range(self.numAgents)]
agentReward_ = tf.placeholder(tf.float32, [None, 1], name='reward_')
tf.add_to_collection("allAgentsStates_", allAgentsStates_)
tf.add_to_collection("allAgentsNextStates_", allAgentsNextStates_)
tf.add_to_collection("allAgentsActions_", allAgentsActions_)
tf.add_to_collection("allAgentsNextActionsByTargetNet_", allAgentsNextActionsByTargetNet_)
tf.add_to_collection("agentReward_", agentReward_)
with tf.variable_scope("trainingParams" + agentStr):
learningRate_ = tf.constant(0, dtype=tf.float32)
tau_ = tf.constant(0, dtype=tf.float32)
gamma_ = tf.constant(0, dtype=tf.float32)
tf.add_to_collection("learningRate_", learningRate_)
tf.add_to_collection("tau_", tau_)
tf.add_to_collection("gamma_", gamma_)
with tf.variable_scope("actor/trainHidden/"+ agentStr): # act by personal observation
currentAgentState_ = allAgentsStates_[agentID]
actorTrainActivation_ = currentAgentState_
for i in range(len(layersWidths)):
actorTrainActivation_ = layers.fully_connected(actorTrainActivation_, num_outputs= layersWidths[i],
activation_fn=tf.nn.relu)
actorTrainActivation_ = layers.fully_connected(actorTrainActivation_, num_outputs= self.actionDim,
activation_fn= None)
with tf.variable_scope("actor/targetHidden/"+ agentStr):
currentAgentNextState_ = allAgentsNextStates_[agentID]
actorTargetActivation_ = currentAgentNextState_
for i in range(len(layersWidths)):
actorTargetActivation_ = layers.fully_connected(actorTargetActivation_, num_outputs= layersWidths[i],
activation_fn=tf.nn.relu)
actorTargetActivation_ = layers.fully_connected(actorTargetActivation_, num_outputs= self.actionDim,
activation_fn=None)
with tf.variable_scope("actorNetOutput/"+ agentStr):
trainAction_ = tf.multiply(actorTrainActivation_, self.actionRange, name='trainAction_')
targetAction_ = tf.multiply(actorTargetActivation_, self.actionRange, name='targetAction_')
sampleNoiseTrain_ = tf.random_uniform(tf.shape(trainAction_))
noisyTrainAction_ = U.softmax(trainAction_ - tf.log(-tf.log(sampleNoiseTrain_)), axis=-1)
sampleNoiseTarget_ = tf.random_uniform(tf.shape(targetAction_))
noisyTargetAction_ = U.softmax(targetAction_ - tf.log(-tf.log(sampleNoiseTarget_)), axis=-1)
tf.add_to_collection("trainAction_", trainAction_)
tf.add_to_collection("targetAction_", targetAction_)
tf.add_to_collection("noisyTrainAction_", noisyTrainAction_)
tf.add_to_collection("noisyTargetAction_", noisyTargetAction_)
with tf.variable_scope("critic/trainHidden/"+ agentStr):
criticTrainActivationOfGivenAction_ = tf.concat(allAgentsStates_ + allAgentsActions_, axis=1)
for i in range(len(layersWidths)):
criticTrainActivationOfGivenAction_ = layers.fully_connected(criticTrainActivationOfGivenAction_, num_outputs= layersWidths[i], activation_fn=tf.nn.relu)
criticTrainActivationOfGivenAction_ = layers.fully_connected(criticTrainActivationOfGivenAction_, num_outputs= 1, activation_fn= None)
with tf.variable_scope("critic/trainHidden/" + agentStr, reuse= True):
criticInputActionList = allAgentsActions_ + []
criticInputActionList[agentID] = noisyTrainAction_
criticTrainActivation_ = tf.concat(allAgentsStates_ + criticInputActionList, axis=1)
for i in range(len(layersWidths)):
criticTrainActivation_ = layers.fully_connected(criticTrainActivation_, num_outputs=layersWidths[i], activation_fn=tf.nn.relu)
criticTrainActivation_ = layers.fully_connected(criticTrainActivation_, num_outputs=1, activation_fn=None)
with tf.variable_scope("critic/targetHidden/"+ agentStr):
criticTargetActivation_ = tf.concat(allAgentsNextStates_ + allAgentsNextActionsByTargetNet_, axis=1)
for i in range(len(layersWidths)):
criticTargetActivation_ = layers.fully_connected(criticTargetActivation_, num_outputs= layersWidths[i],activation_fn=tf.nn.relu)
criticTargetActivation_ = layers.fully_connected(criticTargetActivation_, num_outputs= 1,activation_fn=None)
with tf.variable_scope("updateParameters/"+ agentStr):
actorTrainParams_ = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='actor/trainHidden/'+ agentStr)
actorTargetParams_ = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='actor/targetHidden/'+ agentStr)
actorUpdateParam_ = [actorTargetParams_[i].assign((1 - tau_) * actorTargetParams_[i] + tau_ * actorTrainParams_[i]) for i in range(len(actorTargetParams_))]
tf.add_to_collection("actorTrainParams_", actorTrainParams_)
tf.add_to_collection("actorTargetParams_", actorTargetParams_)
tf.add_to_collection("actorUpdateParam_", actorUpdateParam_)
hardReplaceActorTargetParam_ = [tf.assign(trainParam, targetParam) for trainParam, targetParam in zip(actorTrainParams_, actorTargetParams_)]
tf.add_to_collection("hardReplaceActorTargetParam_", hardReplaceActorTargetParam_)
criticTrainParams_ = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='critic/trainHidden/'+ agentStr)
criticTargetParams_ = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='critic/targetHidden/'+ agentStr)
criticUpdateParam_ = [criticTargetParams_[i].assign((1 - tau_) * criticTargetParams_[i] + tau_ * criticTrainParams_[i]) for i in range(len(criticTargetParams_))]
tf.add_to_collection("criticTrainParams_", criticTrainParams_)
tf.add_to_collection("criticTargetParams_", criticTargetParams_)
tf.add_to_collection("criticUpdateParam_", criticUpdateParam_)
hardReplaceCriticTargetParam_ = [tf.assign(trainParam, targetParam) for trainParam, targetParam in zip(criticTrainParams_, criticTargetParams_)]
tf.add_to_collection("hardReplaceCriticTargetParam_", hardReplaceCriticTargetParam_)
updateParam_ = actorUpdateParam_ + criticUpdateParam_
hardReplaceTargetParam_ = hardReplaceActorTargetParam_ + hardReplaceCriticTargetParam_
tf.add_to_collection("updateParam_", updateParam_)
tf.add_to_collection("hardReplaceTargetParam_", hardReplaceTargetParam_)
with tf.variable_scope("trainActorNet/"+ agentStr):
trainQ = criticTrainActivation_[:, 0]
pg_loss = -tf.reduce_mean(trainQ)
p_reg = tf.reduce_mean(tf.square(actorTrainActivation_))
actorLoss_ = pg_loss + p_reg * 1e-3
actorOptimizer = tf.train.AdamOptimizer(learningRate_, name='actorOptimizer')
actorTrainOpt_ = U.minimize_and_clip(actorOptimizer, actorLoss_, actorTrainParams_, self.gradNormClipping)
tf.add_to_collection("actorLoss_", actorLoss_)
tf.add_to_collection("actorTrainOpt_", actorTrainOpt_)
with tf.variable_scope("trainCriticNet/"+ agentStr):
yi_ = agentReward_ + gamma_ * criticTargetActivation_
criticLoss_ = tf.reduce_mean(tf.squared_difference(tf.squeeze(yi_), tf.squeeze(criticTrainActivationOfGivenAction_)))
tf.add_to_collection("yi_", yi_)
tf.add_to_collection("valueLoss_", criticLoss_)
criticOptimizer = tf.train.AdamOptimizer(learningRate_, name='criticOptimizer')
crticTrainOpt_ = U.minimize_and_clip(criticOptimizer, criticLoss_, criticTrainParams_, self.gradNormClipping)
tf.add_to_collection("crticTrainOpt_", crticTrainOpt_)
with tf.variable_scope("summary"+ agentStr):
criticLossSummary = tf.identity(criticLoss_)
tf.add_to_collection("criticLossSummary", criticLossSummary)
tf.summary.scalar("criticLossSummary", criticLossSummary)
fullSummary = tf.summary.merge_all()
tf.add_to_collection("summaryOps", fullSummary)
saver = tf.train.Saver(max_to_keep=None)
tf.add_to_collection("saver", saver)
model = tf.Session(graph=graph)
model.run(tf.global_variables_initializer())
return model
class ActOneStep:
def __init__(self, actByTrainNoisy):
self.actByTrain = actByTrainNoisy
def __call__(self, model, allAgentsStatesBatch):
allAgentsStates = np.array(allAgentsStatesBatch)[None]
actions = self.actByTrain(model, allAgentsStates)[0]
return actions
def actByPolicyTrainNoisy(model, allAgentsStatesBatch):
graph = model.graph
allAgentsStates_ = graph.get_collection_ref("allAgentsStates_")[0]
noisyTrainAction_ = graph.get_collection_ref("noisyTrainAction_")[0]
stateDict = {agentState_: [states[i] for states in allAgentsStatesBatch] for i, agentState_ in enumerate(allAgentsStates_)}
noisyTrainAction = model.run(noisyTrainAction_, feed_dict= stateDict)
return noisyTrainAction
def actByPolicyTargetNoisyForNextState(model, allAgentsNextStatesBatch):
graph = model.graph
allAgentsNextStates_ = graph.get_collection_ref("allAgentsNextStates_")[0]
noisyTargetAction_ = graph.get_collection_ref("noisyTargetAction_")[0]
nextStateDict = {agentNextState_: [states[i] for states in allAgentsNextStatesBatch] for i, agentNextState_ in enumerate(allAgentsNextStates_)}
noisyTargetAction = model.run(noisyTargetAction_, feed_dict= nextStateDict)
return noisyTargetAction
class TrainCriticBySASR:
def __init__(self, actByPolicyTargetNoisyForNextState, criticLearningRate, gamma):
self.actByPolicyTargetNoisyForNextState = actByPolicyTargetNoisyForNextState
self.criticLearningRate = criticLearningRate
self.gamma = gamma
self.runCount = 0
def __call__(self, agentID, allAgentsModels, allAgentsStateBatch, allAgentsActionsBatch, allAgentsNextStatesBatch, allAgentsRewardBatch):
agentModel = allAgentsModels[agentID]
agentReward = [[reward[agentID]] for reward in allAgentsRewardBatch]
graph = agentModel.graph
allAgentsStates_ = graph.get_collection_ref("allAgentsStates_")[0]#
allAgentsNextStates_ = graph.get_collection_ref("allAgentsNextStates_")[0]
allAgentsNextActionsByTargetNet_ = graph.get_collection_ref("allAgentsNextActionsByTargetNet_")[0]
agentReward_ = graph.get_collection_ref("agentReward_")[0]
allAgentsActions_ = graph.get_collection_ref("allAgentsActions_")[0]
learningRate_ = graph.get_collection_ref("learningRate_")[0]
gamma_ = graph.get_collection_ref("gamma_")[0]
valueLoss_ = graph.get_collection_ref("valueLoss_")[0]
crticTrainOpt_ = graph.get_collection_ref("crticTrainOpt_")[0]
criticSummary_ = graph.get_collection_ref("summaryOps")[0]
valueDict = {agentReward_: agentReward, learningRate_: self.criticLearningRate, gamma_: self.gamma}
stateDict = {agentState_: [states[i] for states in allAgentsStateBatch] for i, agentState_ in enumerate(allAgentsStates_)}
actionDict = {agentAction_: [actions[i] for actions in allAgentsActionsBatch] for i, agentAction_ in enumerate(allAgentsActions_)}
nextStateDict = {agentNextState_: [states[i] for states in allAgentsNextStatesBatch] for i, agentNextState_ in enumerate(allAgentsNextStates_)}
getAgentNextAction = lambda agentID: self.actByPolicyTargetNoisyForNextState(allAgentsModels[agentID], allAgentsNextStatesBatch)
nextActionDict = {nextAction_: getAgentNextAction(i) for i, nextAction_ in enumerate(allAgentsNextActionsByTargetNet_)}
criticSummary, criticLoss, crticTrainOpt = agentModel.run([criticSummary_, valueLoss_, crticTrainOpt_],
feed_dict={**stateDict, **nextStateDict, **nextActionDict, **actionDict, **valueDict} )
self.runCount += 1
return criticLoss, agentModel
class TrainCritic:
def __init__(self, trainCriticBySASR):
self.trainCriticBySASR = trainCriticBySASR
def __call__(self, agentID, allAgentsModels, miniBatch):
allAgentsStateBatch, allAgentsActionsBatch, allAgentsRewardBatch, allAgentsNextStatesBatch = list(zip(*miniBatch))
criticLoss, agentModel = self.trainCriticBySASR(agentID, allAgentsModels, allAgentsStateBatch, allAgentsActionsBatch, allAgentsNextStatesBatch, allAgentsRewardBatch)
return agentModel
class TrainActorFromSA:
def __init__(self, actorLearningRatte):
self.actorLearningRate = actorLearningRatte
def __call__(self, agentID, agentModel, allAgentsStateBatch, allAgentsActionsBatch):
graph = agentModel.graph
allAgentsStates_ = graph.get_collection_ref("allAgentsStates_")[0]#
allAgentsActions_ = graph.get_collection_ref("allAgentsActions_")[0]
learningRate_ = graph.get_collection_ref("learningRate_")[0]
actorTrainOpt_ = graph.get_collection_ref("actorTrainOpt_")[0]
stateDict = {agentState_: [states[i] for states in allAgentsStateBatch] for i, agentState_ in enumerate(allAgentsStates_)}
actionDict = {agentAction_: [actions[i] for actions in allAgentsActionsBatch] for i, agentAction_ in enumerate(allAgentsActions_)}
valueDict = {learningRate_: self.actorLearningRate}
actorTrainOpt = agentModel.run(actorTrainOpt_, feed_dict={**stateDict, **actionDict, **valueDict} )
return agentModel
class TrainActor:
def __init__(self, trainActorFromSA):
self.trainActorFromSA = trainActorFromSA
def __call__(self, agentID, allAgentsModels, miniBatch):
allAgentsStateBatch, allAgentsActionsBatch, allAgentsRewardBatch, allAgentsNextStatesBatch = list(zip(*miniBatch))
agentModel = self.trainActorFromSA(agentID, allAgentsModels, allAgentsStateBatch, allAgentsActionsBatch)
return agentModel
class TrainMADDPGModelsWithBuffer:
def __init__(self, updateParameters, trainActor, trainCritic, sampleFromBuffer, startLearn, allModels):
self.updateParameters = updateParameters
self.trainActor = trainActor
self.trainCritic = trainCritic
self.sampleFromBuffer = sampleFromBuffer
self.startLearn = startLearn
self.allModels = allModels
def __call__(self, buffer, runTime):
if not self.startLearn(runTime):
return
numAgents = len(self.allModels)
for agentID in range(numAgents):
miniBatch = self.sampleFromBuffer(buffer)
agentModel = self.trainCritic(agentID, self.allModels, miniBatch)
agentModel = self.trainActor(agentID, agentModel, miniBatch)
agentModel = self.updateParameters(agentModel)
self.allModels[agentID] = agentModel
def getTrainedModels(self):
return self.allModels | {"/exec/evaluate.py": ["/src/functionTools/loadSaveModel.py", "/src/visualize/drawDemo.py", "/src/environment/reward.py", "/src/maddpg/trainer/MADDPG.py"], "/exec/train.py": ["/src/maddpg/trainer/MADDPG.py", "/src/maddpg/rlTools/RLrun.py", "/src/functionTools/loadSaveModel.py", "/src/environment/reward.py"]} |
58,379 | mingluzhao/Coord-Hunting-With-RL | refs/heads/master | /src/maddpg/rlTools/RLrun.py | import numpy as np
import random
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
from collections import deque
class UpdateParameters:
def __init__(self, paramUpdateInterval, tau = None):
self.paramUpdateInterval = paramUpdateInterval
self.tau = tau
self.runTime = 0
def __call__(self, model):
if self.runTime % self.paramUpdateInterval == 0:
graph = model.graph
updateParam_ = graph.get_collection_ref("updateParam_")[0]
if self.tau is not None:
tau_ = graph.get_collection_ref("tau_")[0]
model.run(updateParam_, feed_dict={tau_: self.tau})
else:
model.run(updateParam_)
self.runTime += 1
return model
class SampleOneStep:
def __init__(self, transit, getReward):
self.transit = transit
self.getReward = getReward
def __call__(self, state, action):
nextState = self.transit(state, action)
reward = self.getReward(state, action, nextState)
return reward, nextState
class SampleFromMemory:
def __init__(self, minibatchSize):
self.minibatchSize = minibatchSize
def __call__(self, memoryBuffer):
sampleIndex = [random.randint(0, len(memoryBuffer) - 1) for _ in range(self.minibatchSize)]
sample = [memoryBuffer[index] for index in sampleIndex]
return sample
class RunTimeStep:
def __init__(self, actOneStep, sampleOneStep, learnFromBuffer, observe = None):
self.actOneStep = actOneStep
self.sampleOneStep = sampleOneStep
self.learnFromBuffer = learnFromBuffer
self.observe = observe
self.runTime = 0
def __call__(self, state, replayBuffer):
observation = self.observe(state) if self.observe is not None else state
action = self.actOneStep(observation, self.runTime)
reward, nextState = self.sampleOneStep(state, action)
nextObservation = self.observe(nextState) if self.observe is not None else nextState
replayBuffer.append((observation, action, reward, nextObservation))
isMultiAgent = isinstance(self.learnFromBuffer, list)
if isMultiAgent:
for id, agentLearn in enumerate(self.learnFromBuffer):
agentLearn(replayBuffer, self.runTime, id)
else:
self.learnFromBuffer(replayBuffer, self.runTime)
self.runTime += 1
return reward, nextState, replayBuffer
class StartLearn:
def __init__(self, learningStartBufferSize, learnInterval):
self.learningStartBufferSize = learningStartBufferSize
self.learnInterval = learnInterval
def __call__(self, runTime):
shouldStart = runTime >= self.learningStartBufferSize and runTime % self.learnInterval == 0
return shouldStart
def getBuffer(bufferSize):
replayBuffer = deque(maxlen=int(bufferSize))
return replayBuffer
class RunEpisode:
def __init__(self, reset, runTimeStep, maxTimeStep, isTerminal):
self.reset = reset
self.runTimeStep = runTimeStep
self.maxTimeStep = maxTimeStep
self.isTerminal = isTerminal
def __call__(self, replayBuffer):
state = self.reset()
reward, state, replayBuffer = self.runTimeStep(state, replayBuffer)
episodeReward = np.array(reward)
for timeStep in range(self.maxTimeStep-1):
reward, state, replayBuffer = self.runTimeStep(state, replayBuffer)
episodeReward = episodeReward + np.array(reward)
terminal = self.isTerminal(state)
terminalCheck = (np.sum(np.array(terminal)) != 0)
if terminalCheck:
break
return replayBuffer, episodeReward
class SaveModel:
def __init__(self, modelSaveRate, saveVariables, getCurrentModel, modelSavePath, saveAllmodels = False):
self.modelSaveRate = modelSaveRate
self.saveVariables = saveVariables
self.getCurrentModel = getCurrentModel
self.epsNum = 0
self.modelSavePath = modelSavePath
self.saveAllmodels = saveAllmodels
def __call__(self):
self.epsNum += 1
if self.epsNum % self.modelSaveRate == 0:
modelSavePathToUse = self.modelSavePath + str(self.epsNum) + "eps" if self.saveAllmodels else self.modelSavePath
model = self.getCurrentModel()
with model.as_default():
self.saveVariables(model, modelSavePathToUse)
class RunAlgorithm:
def __init__(self, runEpisode, maxEpisode, saveModels, numAgents = 1, printEpsFrequency = 1000):
self.runEpisode = runEpisode
self.maxEpisode = maxEpisode
self.saveModels = saveModels
self.numAgents = numAgents
self.printEpsFrequency = printEpsFrequency
self.multiAgent = (self.numAgents > 1)
def __call__(self, replayBuffer):
episodeRewardList = []
meanRewardList = []
agentsEpsRewardList = [list() for agentID in range(self.numAgents)] if self.multiAgent else []
for episodeID in range(self.maxEpisode):
replayBuffer, episodeReward = self.runEpisode(replayBuffer)
[saveModel() for saveModel in self.saveModels] if self.multiAgent else self.saveModels()
if self.multiAgent:
episodeRewardList.append(np.sum(episodeReward))
[agentRewardList.append(agentEpsReward) for agentRewardList, agentEpsReward in zip(agentsEpsRewardList, episodeReward)]
meanRewardList.append(np.mean(episodeRewardList))
if episodeID % self.printEpsFrequency == 0:
lastTimeSpanMeanReward = np.mean(episodeRewardList[-self.printEpsFrequency:])
print("episodes: {}, last {} eps mean episode reward: {}, agent mean reward: {}".format(
episodeID, self.printEpsFrequency, lastTimeSpanMeanReward,
[np.mean(rew[-self.printEpsFrequency:]) for rew in agentsEpsRewardList]))
else:
episodeRewardList.append(episodeReward)
print('episode {}: mean eps reward {}'.format(len(episodeRewardList), np.mean(episodeRewardList)))
return episodeRewardList
| {"/exec/evaluate.py": ["/src/functionTools/loadSaveModel.py", "/src/visualize/drawDemo.py", "/src/environment/reward.py", "/src/maddpg/trainer/MADDPG.py"], "/exec/train.py": ["/src/maddpg/trainer/MADDPG.py", "/src/maddpg/rlTools/RLrun.py", "/src/functionTools/loadSaveModel.py", "/src/environment/reward.py"]} |
58,418 | intake/intake-spark | refs/heads/master | /tests/test_spark.py | import os
import pandas as pd
from intake_spark import SparkDataFrame, SparkRDD, SparkTablesCatalog
from intake_spark.base import SparkHolder
import intake
TEST_DATA_DIR = 'tests'
TEST_DATA = 'sample1.csv'
fn = os.path.join(TEST_DATA_DIR, TEST_DATA)
df = pd.read_csv(fn)
def test_rdd():
text = SparkRDD(
[('textFile', (fn,)),
('map', (len,))])
expected = [len(l) - 1 for l in open(fn)] # spark trims newlines
assert text.read() == expected
def test_readtext():
source = intake.open_textfiles(fn)
rdd = source.to_spark()
out = rdd.collect()
assert '\n'.join(out) == open(fn).read().rstrip('\n')
def test_df():
text = SparkDataFrame([
('read', ),
('format', ('csv', )),
('option', ('header', 'true')),
('load', (fn, ))
], {})
d = text.read()
assert d.astype(df.dtypes).equals(df)
def test_cat():
import pyspark
h = SparkHolder(True, [('catalog', )], {})
h.setup() # create spark session early
session = h.session[0]
d = session.createDataFrame(df)
sql = pyspark.HiveContext(session.sparkContext)
sql.registerDataFrameAsTable(d, 'temp')
cat = SparkTablesCatalog()
assert 'temp' in list(cat)
s = cat.temp()
assert isinstance(s, SparkDataFrame)
out = s.read()
assert out.astype(df.dtypes).equals(df)
| {"/tests/test_spark.py": ["/intake_spark/base.py"], "/intake_spark/spark_cat.py": ["/intake_spark/spark_sources.py", "/intake_spark/base.py"], "/intake_spark/spark_sources.py": ["/intake_spark/base.py"]} |
58,419 | intake/intake-spark | refs/heads/master | /tests/util.py |
def verify_plugin_interface(plugin):
"""Assert types of plugin attributes."""
assert isinstance(plugin.version, str)
assert isinstance(plugin.container, str)
assert isinstance(plugin.partition_access, bool)
def verify_datasource_interface(source):
"""Assert presence of datasource attributes."""
for attr in ['container', 'description', 'datashape', 'dtype', 'shape',
'npartitions', 'metadata']:
assert hasattr(source, attr)
for method in ['discover', 'read', 'read_chunked', 'read_partition',
'to_dask', 'close']:
assert hasattr(source, method)
| {"/tests/test_spark.py": ["/intake_spark/base.py"], "/intake_spark/spark_cat.py": ["/intake_spark/spark_sources.py", "/intake_spark/base.py"], "/intake_spark/spark_sources.py": ["/intake_spark/base.py"]} |
58,420 | intake/intake-spark | refs/heads/master | /tests/__init__.py | import os
import sys
os.environ['PYSPARK_PYTHON'] = sys.executable
| {"/tests/test_spark.py": ["/intake_spark/base.py"], "/intake_spark/spark_cat.py": ["/intake_spark/spark_sources.py", "/intake_spark/base.py"], "/intake_spark/spark_sources.py": ["/intake_spark/base.py"]} |
58,421 | intake/intake-spark | refs/heads/master | /intake_spark/spark_cat.py | from intake.catalog.local import LocalCatalogEntry, Catalog
from .spark_sources import SparkDataFrame
from ._version import get_versions
from .base import SparkHolder
class SparkTablesCatalog(Catalog):
"""Intake automatically-generate catalog for tables stored in Spark
This driver will query Spark's Catalog object for any tables, and
create an entry for each which, when accessed, will instantiate
SparkDataFrame sources. Commonly, these table definitions will come
from Hive.
"""
name = 'spark_cat'
version = get_versions()['version']
def __init__(self, database=None, context_kwargs=None, metadata=None):
"""
Parameters
----------
database: str or None
If using a specific database, the name should be given here.
If not given, will attempt to query all defined databases.
context_kwargs: dict
Passed to SparkHolder for establishing the context on which to
communicate with Spark.
"""
self.database = database
self.context_args = context_kwargs
self.spark_cat = None
super(SparkTablesCatalog, self).__init__(metadata=metadata)
def _load(self):
if self.spark_cat is None:
self.spark_cat = SparkHolder(True, [('catalog', )],
self.context_args).setup()
self._entries = {}
dbs = (self.spark_cat.listDatabases()
if self.database is None else [self.database])
for db in dbs:
tables = self.spark_cat.listTables(dbName=db.name)
for table in tables:
if db.name:
description = ('Spark table %s in database %s'
'' % (table.name, db.name))
else:
description = ('Spark table %s in default database'
'' % table.name)
args = {'args': [
('table', (table.name, ))
]}
e = LocalCatalogEntry(
table.name, description, 'spark_dataframe', True,
args, cache=[], parameters=[], metadata={}, catalog_dir="",
getenv=False, getshell=False)
e._plugin = [SparkDataFrame]
self._entries[table.name] = e
| {"/tests/test_spark.py": ["/intake_spark/base.py"], "/intake_spark/spark_cat.py": ["/intake_spark/spark_sources.py", "/intake_spark/base.py"], "/intake_spark/spark_sources.py": ["/intake_spark/base.py"]} |
58,422 | intake/intake-spark | refs/heads/master | /intake_spark/base.py | import threading
class SparkHolder(object):
sc = [None]
session = [None]
lock = threading.Lock()
def set_context(self, sc=None, conf=None, master=None, app_name=None,
executor_env=None, spark_home=None, **kw):
"""Establish spark context for this session
Parameters
----------
sc: SparkContext instance or None
If given, this is the context that will be sued and all other
parameters are ignored
conf: dict or None
If given, the key/values of the SparkConf to set. Some values
may be over-written by the other kwargs given
master: str or None
The connection to establish
app_name: str or None
Identifies this usage
executor_env: dict or None
If given, environment variables values passed on
spark_home: str
Location of spark installation
"""
import pyspark
with self.lock:
if sc is None:
if self.sc[0] is not None:
return
config = pyspark.SparkConf()
config.setAll((conf or self.context_kwargs).items())
if master is not None:
config.setMaster(master)
if app_name is not None:
config.setAppName(app_name)
if executor_env is not None:
config.setExecutorEnv(pairs=list(executor_env.items()))
if spark_home is not None:
config.setSparkHome(spark_home)
sc = pyspark.SparkContext.getOrCreate(config)
self.sc[0] = sc
def set_session(self, session=None, hive=None, **kw):
"""Set global SQL SparkSession
Parameters
----------
session: SparkSession or None
Explicitly provide a session object, if you have one
hive: bool
Whether to enable Hive support when creating session
"""
with self.lock:
if session is None:
if self.session[0] is not None:
return
from pyspark import sql
if hive or self.hive:
session = sql.SparkSession.builder.enableHiveSupport(
).getOrCreate()
else:
session = sql.SparkSession.getOrCreate()
self.session[0] = session
@classmethod
def set_class_session(cls, **context_kwargs):
"""Create or use spark session/context for instances of this class
See the input parameters of ``set_context`` and ``set_session``.
"""
inst = cls(True, (), context_kwargs)
inst.set_context(**context_kwargs)
inst.set_session(**context_kwargs)
def __init__(self, sql, args, context_kwargs):
"""Create reference to spark resource
Parameters
----------
sql: bool
If True, will use SQLContext (i.e., Session), returning a dataframe,
if False, will use bare SparkContext, returning RDD (list-like)
args: list or tuples
Details of a sequence of spark methods to invoke.
Each element is must be a tuple tuple, (method_name, args, kwargs),
where method_name is a string corresponding to the attribute lookup
to perform on the result of the previous stage. args and kwargs,
if given, will be passed when calling the method; if not given,
it is assumes to be a simple attribute.
The starting object for the lookups is a SparkContext is sql=False,
or a Spark Session if True.
context_kwargs: dict
Used to create spark context and session *if* they do not already
exist globally on this class.
"""
self.sql = sql
self.args = args
self.context_kwargs = context_kwargs or {}
self.hive = self.context_kwargs.pop('hive', True)
def __getstate__(self):
fields = ['method', 'sql', 'args', 'kwargs', 'context_kwargs']
return {m: getattr(self, m) for m in fields}
def __setstate__(self, state):
self.__dict__.update(state)
def setup(self):
"""Call spark to instantiate resource"""
if self.sc[0] is None:
self.set_context(**self.context_kwargs)
if self.sql:
if self.session[0] is None:
self.set_session(**self.context_kwargs)
m = self.session[0]
else:
m = self.sc[0]
for state in self.args:
method = state[0]
if len(state) == 1:
m = getattr(m, method)
else:
args = state[1]
if len(state) > 2:
kwargs = state[2]
else:
kwargs = {}
m = getattr(m, method)(*args, **kwargs)
return m
| {"/tests/test_spark.py": ["/intake_spark/base.py"], "/intake_spark/spark_cat.py": ["/intake_spark/spark_sources.py", "/intake_spark/base.py"], "/intake_spark/spark_sources.py": ["/intake_spark/base.py"]} |
58,423 | intake/intake-spark | refs/heads/master | /intake_spark/spark_sources.py | from intake.source.base import DataSource, Schema
from .base import SparkHolder
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
class SparkRDD(DataSource):
"""A reference to an RDD definition in Spark
RDDs are list-of-things objects, evaluated lazily in Spark.
Examples
--------
>>> args = [('textFile', ('text.*.files', )),
... ('map', (len,))]
>>> context = {'master': 'spark://master.node:7077'}
>>> source = SparkRDD(args, context)
The output of `source.to_spark()` is an RDD object holding the lengths of
the lines of the input files.
"""
container = 'python'
version = __version__
name = 'spark_rdd'
partition_access = True
def __init__(self, args, context_kwargs=None, metadata=None):
"""
Parameters
----------
args, context_kwargs:
Passed on to SparkHolder, see its docstrings and the examples.
metadata: dict
Arbitrary data to associate with this source.
"""
super(SparkRDD, self).__init__(metadata)
self.holder = SparkHolder(False, args, context_kwargs)
self.ref = None
def _get_schema(self):
if self.ref is None:
self.ref = self.holder.setup()
self.npartitions = self.ref.getNumPartitions()
return Schema(npartitions=self.npartitions,
extra_metadata=self.metadata)
def read_partition(self, i):
"""Returns one of the partitions of the RDD as a list of objects"""
self._get_schema()
sc = self.holder.sc[0]
return sc.runJob(self.ref, lambda x: x, partitions=[i])
def to_spark(self):
"""Return the spark object for this data, an RDD"""
self._get_schema()
return self.ref
def read(self):
"""Materialise the whole RDD into a list of objects"""
self._get_schema()
return self.ref.collect()
def _close(self):
self.ref = None
class SparkDataFrame(DataSource):
"""A reference to a DataFrame definition in Spark
DataFrames are tabular spark objects containing a heterogeneous set of
columns and potentially a large number of rows. They are similar in concept
to Pandas or Dask data-frames. The Spark variety produced by this driver
will be a handle to a lazy object, where computation will be managed by
Spark.
Examples
--------
>>> args = [
... ['read', ],
... ['format', ['csv', ]],
... ['option', ['header', 'true']],
... ['load', ['data.*.csv', ]]
... ]
>>> context = {'master': 'spark://master.node:7077'}
>>> source = SparkDataFrame(args, context)
The output of `source.to_spark()` contains a spark object pointing to the
parsed contents of the indicated CSV files
"""
container = 'dataframe'
version = __version__
name = 'spark_dataframe'
partition_access = True
def __init__(self, args, context_kwargs=None, metadata=None):
"""
Parameters
----------
args, context_kwargs:
Passed on to SparkHolder, see its docstrings and the examples.
metadata: dict
Arbitrary data to associate with this source.
"""
super(SparkDataFrame, self).__init__(metadata)
self.holder = SparkHolder(True, args, context_kwargs)
self.ref = None
def _get_schema(self):
if self.ref is None:
self.ref = self.holder.setup()
self.npartitions = self.ref.rdd.getNumPartitions()
rows = self.ref.take(10)
self.dtype = pandas_dtypes(self.ref.schema, rows)
self.shape = (None, len(self.dtype))
return Schema(npartitions=self.npartitions,
extra_metadata=self.metadata,
dtype=self.dtype,
shape=self.shape)
def read_partition(self, i):
"""Returns one partition of the data as a pandas data-frame"""
import pandas as pd
self._get_schema()
sc = self.holder.sc[0]
out = sc.runJob(self.ref.rdd, lambda x: x, partitions=[i])
df = pd.DataFrame.from_records(out)
df.columns = list(self.dtype)
return df
def to_spark(self):
"""Return the Spark object for this data, a DataFrame"""
self._get_schema()
return self.ref
def read(self):
"""Read all of the data into an in-memory Pandas data-frame"""
self._get_schema()
return self.ref.toPandas()
def _close(self):
self.ref = None
def _to_corrected_pandas_type(dt):
# Copied from pyspark 2.3
"""
When converting Spark SQL records to Pandas DataFrame,
the inferred data type may be wrong. This method gets the corrected
data type for Pandas if that type may be inferred uncorrectly.
"""
import numpy as np
from pyspark.sql.types import ByteType, ShortType, IntegerType, FloatType
if type(dt) == ByteType:
return np.int8
elif type(dt) == ShortType:
return np.int16
elif type(dt) == IntegerType:
return np.int32
elif type(dt) == FloatType:
return np.float32
else:
return None
def pandas_dtypes(schema, rows):
"""Rough dtype for the given pyspark schema"""
import pandas as pd
from pyspark.sql.types import IntegralType
# copied from toPandas() method
df = pd.DataFrame.from_records(rows)
df.columns = [s.name for s in schema]
for field in schema:
pandas_type = _to_corrected_pandas_type(field.dataType)
if pandas_type is not None and not(
isinstance(field.dataType, IntegralType) and field.nullable):
df[field.name] = df[field.name].astype(pandas_type)
return {k: str(v) for k, v in df.dtypes.to_dict().items()}
| {"/tests/test_spark.py": ["/intake_spark/base.py"], "/intake_spark/spark_cat.py": ["/intake_spark/spark_sources.py", "/intake_spark/base.py"], "/intake_spark/spark_sources.py": ["/intake_spark/base.py"]} |
58,426 | IvanIvanov/Hack-Assembler | refs/heads/master | /assembler_test.py | #!/usr/bin/python
#
# Copyright (c) 2011 Ivan Vladimirov Ivanov (ivan.vladimirov.ivanov@gmail.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""
Test cases for the assembler module.
"""
__author__ = "Ivan Vladimirov Ivanov (ivan.vladimirov.ivanov@gmail.com)"
import unittest
import assembler
class TestAssembler(unittest.TestCase):
def testParseAInstruction(self):
result1 = assembler.AInstruction.Parse("@123")
self.assertTrue(result1 != False)
self.assertTrue(result1.value == 123)
result2 = assembler.AInstruction.Parse("@llama")
self.assertTrue(result2 != False)
self.assertTrue(result2.value == "llama")
result3 = assembler.AInstruction.Parse("@123456789")
self.assertFalse(result3)
result4 = assembler.AInstruction.Parse("@1llama")
self.assertFalse(result4)
def testParseLInstruction(self):
result1 = assembler.LInstruction.Parse("(foo)")
self.assertTrue(result1 != False)
self.assertTrue(result1.value == "foo")
result2 = assembler.LInstruction.Parse("(foo123..$$:_)")
self.assertTrue(result2 != False)
self.assertTrue(result2.value == "foo123..$$:_")
result3 = assembler.LInstruction.Parse("(1abc)")
self.assertFalse(result3)
result4 = assembler.LInstruction.Parse("foo")
self.assertFalse(result4)
def testParseCInstruction(self):
result1 = assembler.CInstruction.Parse("D=M;JMP")
self.assertTrue(result1 != False)
self.assertTrue(result1.dest == "D")
self.assertTrue(result1.comp == "M")
self.assertTrue(result1.jump == "JMP")
result2 = assembler.CInstruction.Parse("M=M+1")
self.assertTrue(result2 != False)
self.assertTrue(result2.dest == "M")
self.assertTrue(result2.comp == "M+1")
self.assertTrue(result2.jump == "")
result3 = assembler.CInstruction.Parse("0;JMP")
self.assertTrue(result3 != False)
self.assertTrue(result3.dest == "")
self.assertTrue(result3.comp == "0")
self.assertTrue(result3.jump == "JMP")
result4 = assembler.CInstruction.Parse("")
self.assertFalse(result4)
result5 = assembler.CInstruction.Parse("@1234")
self.assertFalse(result5)
result6 = assembler.CInstruction.Parse("llama")
self.assertFalse(result6)
def testParseEmptyInstruction(self):
result1 = assembler.EmptyInstruction.Parse("")
self.assertTrue(result1 != False)
result2 = assembler.EmptyInstruction.Parse(" ")
self.assertTrue(result2 != False)
result3 = assembler.EmptyInstruction.Parse("// I like pie!")
self.assertTrue(result3 != False)
result4 = assembler.EmptyInstruction.Parse("@123")
self.assertFalse(result4)
def testParseErrorInstruction(self):
result1 = assembler.ErrorInstruction.Parse("foo!")
self.assertTrue(result1 != False)
self.assertTrue(result1.line == "foo!")
def testParseInstruction(self):
result1 = assembler.ParseInstruction("@123")
self.assertTrue(
result1 and result1.__class__.__name__ == "AInstruction")
result2 = assembler.ParseInstruction("@loop")
self.assertTrue(
result2 and result2.__class__.__name__ == "AInstruction")
result3 = assembler.ParseInstruction("M=M+1")
self.assertTrue(
result3 and result3.__class__.__name__ == "CInstruction")
result4 = assembler.ParseInstruction("// I like pie!")
self.assertTrue(
result4 and result4.__class__.__name__ == "EmptyInstruction")
result5 = assembler.ParseInstruction("(loop)")
self.assertTrue(
result5 and result5.__class__.__name__ == "LInstruction")
result6 = assembler.ParseInstruction("Blah")
self.assertTrue(
result6 and result6.__class__.__name__ == "ErrorInstruction")
def testAssemblerError(self):
program = [ "I like pie!" ]
self.assertRaises(assembler.AssemblerError, assembler.Assemble, program)
def testAssembleAdd(self):
program = [
"// Computes R0 = 2 + 3",
"@2",
"D=A",
"@3",
"D=D+A",
"@0",
"M=D"]
result = [
"0000000000000010",
"1110110000010000",
"0000000000000011",
"1110000010010000",
"0000000000000000",
"1110001100001000"]
self.assertEqual(assembler.Assemble(program), result)
def testAssembleMax(self):
program = [
"// Computes M[2] = max(M[0], M[1]) where M stands for RAM",
"@0",
"D=M // D=first number",
"@1",
"D=D-M // D=first number - second number",
"@OUTPUT_FIRST",
"D;JGT // if D>0 (first is greater) goto output_first",
"@1",
"D=M // D=second number",
"@OUTPUT_D",
"0;JMP // goto output_d",
"(OUTPUT_FIRST)",
"@0",
"D=M // D=first number",
"(OUTPUT_D)",
"@2",
"M=D // M[2]=D (greatest number)",
"(INFINITE_LOOP)",
"@INFINITE_LOOP",
"0;JMP // infinite loop"
]
result = [
"0000000000000000",
"1111110000010000",
"0000000000000001",
"1111010011010000",
"0000000000001010",
"1110001100000001",
"0000000000000001",
"1111110000010000",
"0000000000001100",
"1110101010000111",
"0000000000000000",
"1111110000010000",
"0000000000000010",
"1110001100001000",
"0000000000001110",
"1110101010000111"
]
self.assertEqual(assembler.Assemble(program), result)
def testAssembleRect(self):
program = [
"// Draws a rectangle at the top left corner of the screen.",
"// The rectangle is 16 pixels wide and R0 pixels high.",
"@0",
"D=M",
"@INFINITE_LOOP",
"D;JLE",
"@counter",
"M=D",
"@SCREEN",
"D=A",
"@address",
"M=D",
"(LOOP)",
"@address",
"A=M",
"M=-1",
"@address",
"D=M",
"@32",
"D=D+A",
"@address",
"M=D",
"@counter",
"MD=M-1",
"@LOOP",
"D;JGT",
"(INFINITE_LOOP)",
"@INFINITE_LOOP",
"0;JMP"
]
result = [
"0000000000000000",
"1111110000010000",
"0000000000010111",
"1110001100000110",
"0000000000010000",
"1110001100001000",
"0100000000000000",
"1110110000010000",
"0000000000010001",
"1110001100001000",
"0000000000010001",
"1111110000100000",
"1110111010001000",
"0000000000010001",
"1111110000010000",
"0000000000100000",
"1110000010010000",
"0000000000010001",
"1110001100001000",
"0000000000010000",
"1111110010011000",
"0000000000001010",
"1110001100000001",
"0000000000010111",
"1110101010000111"
]
self.assertEqual(assembler.Assemble(program), result)
if __name__ == "__main__":
unittest.main()
| {"/assembler_test.py": ["/assembler.py"]} |
58,427 | IvanIvanov/Hack-Assembler | refs/heads/master | /assembler.py | #!/usr/bin/python
#
# Copyright (c) 2011 Ivan Vladimirov Ivanov (ivan.vladimirov.ivanov@gmail.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""
This module implements the Assembler for the Hack platform described in
Chapter 6 of the book "The Elements of Computing Systems: Building a
Modern Computer from First Principles" (http://www1.idc.ac.il/tecs/).
"""
__author__ = "Ivan Vladimirov Ivanov (ivan.vladimirov.ivanov@gmail.com)"
import os
import re
import sys
_RE_SYMBOL = r"[a-zA-Z_\$\.:][a-zA-Z0-9_\$\.:]*"
def RemoveTrailingComment(line):
"""Removes the trailing comment from a line if one exists."""
try:
return line[:line.index("//")]
except ValueError:
return line
class AInstruction(object):
"""Responsible for parsing and binary encoding of Addressing Instructions."""
_RE_AINSTRUCTION = re.compile(r"^@(\d*|" + _RE_SYMBOL + ")$")
def __init__(self, value):
self.value = value
def ToBinary(self):
"""Returns the binary encoding of the AInstruction instance."""
return "0" + self._ToBinary15(self.value)
def _ToBinary15(self, number):
"""Returns a 15-bit binary representation of number."""
result = ""
for i in range(15):
result = str(number % 2) + result
number = number / 2
return result
@staticmethod
def Parse(line):
"""Tries to parse a line of Hack assembly into an Addressing Instruction.
Args:
line: The line of Hack assembly to parse.
Returns:
On success an instance of AInstruction, on failure - False.
"""
match = re.match(
AInstruction._RE_AINSTRUCTION, RemoveTrailingComment(line).strip())
if match:
return AInstruction._ParseValue(match.group(1))
else:
return False
@staticmethod
def _ParseValue(value):
if value.isdigit():
int_value = int(value)
return AInstruction(int_value) if 0 <= int_value < (1 << 15) else False
else:
return AInstruction(value)
class CInstruction(object):
"""Responsible for parsing and binary encoding of Compute Instructions."""
_RE_DEST = r"(?:(M|D|MD|A|AM|AD|AMD)=)?"
_RE_JUMP = r"(?:;(JGT|JEQ|JGE|JLT|JNE|JLE|JMP))?"
_RE_COMP = (
r"(0|1|-1|D|A|!D|!A|-D|-A|D\+1|A\+1|D-1|A-1|D\+A|D-A|A-D|D&A|D\|A|"
r"M|!M|-M|M\+1|M-1|D\+M|D-M|M-D|D&M|D\|M)")
_RE_CINSTRUCTION = re.compile(r"^%s%s%s$" % (_RE_DEST, _RE_COMP, _RE_JUMP))
_COMP_TABLE = {
"0": "0101010",
"1": "0111111",
"-1": "0111010",
"D": "0001100",
"A": "0110000",
"!D": "0001101",
"!A": "0110001",
"-D": "0001111",
"D+1": "0011111",
"A+1": "0110111",
"D-1": "0001110",
"A-1": "0110010",
"D+A": "0000010",
"D-A": "0010011",
"A-D": "0000111",
"D&A": "0000000",
"D|A": "0010101",
"M": "1110000",
"!M": "1110001",
"-M": "1110011",
"M+1": "1110111",
"M-1": "1110010",
"D+M": "1000010",
"D-M": "1010011",
"M-D": "1000111",
"D&M": "1000000",
"D|M": "1010101"
}
_DEST_TABLE = {
"": "000",
"M": "001",
"D": "010",
"MD": "011",
"A": "100",
"AM": "101",
"AD": "110",
"AMD": "111"
}
_JUMP_TABLE = {
"": "000",
"JGT": "001",
"JEQ": "010",
"JGE": "011",
"JLT": "100",
"JNE": "101",
"JLE": "110",
"JMP": "111"
}
def __init__(self, dest, comp, jump):
self.dest = dest
self.comp = comp
self.jump = jump
def ToBinary(self):
"""Returns the binary encoding of the CInstruction instance."""
return "111%s%s%s" % (
CInstruction._COMP_TABLE[self.comp],
CInstruction._DEST_TABLE[self.dest],
CInstruction._JUMP_TABLE[self.jump]
)
@staticmethod
def Parse(line):
"""Tries to parse a line of Hack assembly into a Compute Instruction.
Args:
line: The line of Hack assembly to parse.
Returns:
On success an instance of CInstruction, on failure - False.
"""
match = re.match(
CInstruction._RE_CINSTRUCTION, RemoveTrailingComment(line).strip())
if match:
return CInstruction._ParseMatch(match)
else:
return False
@staticmethod
def _ParseMatch(match):
dest = match.group(1) if match.group(1) else ""
comp = match.group(2)
jump = match.group(3) if match.group(3) else ""
return CInstruction(dest, comp, jump)
class LInstruction(object):
"""Responsible for parsing and storing Hack assembly labels."""
_RE_LINSTRUCTION = re.compile(r"^\((" + _RE_SYMBOL + ")\)$")
def __init__(self, value):
self.value = value
@staticmethod
def Parse(line):
"""Tries to parse a line of Hack assembly into a Label.
Args:
line: The line of Hack assembly to parse.
Returns:
On success an instance of LInstruction, on failure - False.
"""
match = re.match(
LInstruction._RE_LINSTRUCTION, RemoveTrailingComment(line).strip())
if match:
return LInstruction(match.group(1))
else:
return False
class EmptyInstruction(object):
"""Represents a no op line of Hack assembly - empty line or comment."""
@staticmethod
def Parse(line):
"""Tries to parse a line of Hack assembly into a Label.
Args:
line: The line of Hack assembly to parse.
Returns:
On success an instance of EmptyInstruction, on failure - False.
"""
stripped_line = RemoveTrailingComment(line).strip()
if stripped_line == "":
return EmptyInstruction()
else:
return False
class ErrorInstruction(object):
"""Represents an invalid instruction Hack assembly instruction."""
def __init__(self, line):
self.line = line
@staticmethod
def Parse(line):
"""Always succeeds in creating an ErrorInstruction instance."""
return ErrorInstruction(line)
class AssemblerError(Exception):
"""Represents an error specific to the assembly process."""
def __init__(self, error_message):
self.error_message = error_message
# This list specifies the order in which the assembler will try to parse
# instructions in a line of Hack assembly.
_INSTRUCTIONS = [
AInstruction,
CInstruction,
LInstruction,
EmptyInstruction,
ErrorInstruction]
def ParseInstruction(line):
"""Given a line of Hack assembly, match it with the correct instruction type.
The order in which the various instruction types are tried is determined by
the _INSTRUCTIONS table.
Args:
line: The line of Hack assembly that is to be parsed.
Returns:
An instance of one of the instruction types. In case none of the valid
instructions are matched an instance of ErrorInstruction is returned.
"""
for instruction in _INSTRUCTIONS:
result = instruction.Parse(line)
if result:
return result
def Parse(program_lines):
"""Transforms a list of program lines into a list of parsed Instructions.
Args:
program_lines: A list of strings representing the list of lines in a
Hack assembly program.
Returns:
A list of parsed program Instructions. There is one instructions for
each line in the Hack assembly program.
Raises:
AssemblerError: At least one of the instructions is an ErrorInstruction.
"""
program_instructions = map(ParseInstruction, program_lines)
errors = []
line_number = 1
for instruction in program_instructions:
if instruction.__class__.__name__ == "ErrorInstruction":
errors.append("Error at line %d: %s" % (line_number, instruction.line))
line_number += 1
if len(errors) > 0:
raise AssemblerError(os.linesep.join(errors))
return program_instructions
def InitialSymbolTable():
"""Returns a dictionary filled with the initial symbol table values."""
symbol_table = {
"SP": 0,
"LCL": 1,
"ARG": 2,
"THIS": 3,
"THAT": 4,
"SCREEN": 16384,
"KBD": 24576
}
for i in range(16):
symbol_table["R%d" % (i,)] = i
return symbol_table
def AnalyzeSymbols(program_instructions):
"""Creates a symbol table with all variables and labels resolved.
Args:
program_instructions: A list of Hack assembly instructions from which
to create the symbol table.
Returns:
A dictionary representing the symbol table with mappings for all variables
and labels found in the program_instructions, as well as the initial
symbol mappings for the hack platform.
"""
symbol_table = InitialSymbolTable()
# Resolve labels.
instruction_address = 0
for instruction in program_instructions:
itype = instruction.__class__.__name__
if itype == "LInstruction":
symbol_table[instruction.value] = instruction_address
elif itype in ("AInstruction", "CInstruction"):
instruction_address += 1
# Resolve variables.
variable_address = 16
for instruction in program_instructions:
if instruction.__class__.__name__ == "AInstruction":
value = instruction.value
if type(value) == str and value not in symbol_table:
symbol_table[value] = variable_address
variable_address += 1
return symbol_table
def StripSymbols(program_instructions):
"""Removes all symbolic references from the program_instructions.
This function not only removes all symbolic references, but also
removes all instruction types except AInstruction and CInstruction.
No actual removals are done on the input parameter, instead a new
list of instructions is returned.
Args:
program_instructions: A list of Hack assembly instructions.
Returns:
A new list of Hack assembly instructions with all symbolic references
substituted with thier numerical equivalents, and all none AInstruction
and CInstruction instances removed.
"""
stripped_instructions = []
symbol_table = AnalyzeSymbols(program_instructions)
for instruction in program_instructions:
itype = instruction.__class__.__name__
if itype == "AInstruction":
if type(instruction.value) == str:
stripped_instructions.append(
AInstruction(symbol_table[instruction.value]))
else:
stripped_instructions.append(instruction)
elif itype == "CInstruction":
stripped_instructions.append(instruction)
return stripped_instructions
def TranslateToBinary(program_instructions):
"""Transforms a list of instructions into a list of their binary codes.
Args:
program_instructions: A list of Hack assembly instructions.
Returns:
A list of the binary machine codes for the given instructions.
"""
return map(lambda i: i.ToBinary(), program_instructions)
def Assemble(program_lines):
"""Transforms the lines of a program into a list of binary instructions.
Args:
program_lines: A list of strings representing the lines of a Hack program.
Returns:
A list of binary instructions for the assembled program.
"""
return TranslateToBinary(StripSymbols(Parse(program_lines)))
def main():
if len(sys.argv) != 2:
print "Please Specify exactly one argument, the program name."
return
asm_file = sys.argv[1]
if not asm_file.endswith(".asm"):
print "The file must end with: .asm"
return
try:
with open(asm_file, "r") as asm_program:
binary_lines = Assemble(asm_program.readlines())
with open(asm_file[:-4] + ".hack", "w") as hack_program:
hack_program.write(os.linesep.join(binary_lines))
except AssemblerError as error:
print error.error_message
except IOError as error:
print error
if __name__ == "__main__":
main()
| {"/assembler_test.py": ["/assembler.py"]} |
58,470 | hackforchange/GreatDebate | refs/heads/master | /greatdebate/apps/decisionMakers/models.py | from django.db import models
class DecisionMaker(models.Model):
name = models.CharField(max_length=255)
title = models.CharField(max_length=255)
def __unicode__(self):
return self.name[:10]
class DecisionMakerResponse(models.Model):
date = models.DateField(auto_now_add=True)
decision_maker = models.ForeignKey(DecisionMaker)
campaign = models.ManyToManyField('campaigns.Campaign')
response_url = models.TextField()
def __unicode__(self):
return self.response_url[:10]
| {"/greatdebate/apps/organizers/admin.py": ["/greatdebate/apps/organizers/models.py"], "/greatdebate/apps/campaigns/models.py": ["/greatdebate/apps/decisionMakers/models.py", "/greatdebate/apps/organizers/models.py"], "/greatdebate/apps/campaigns/tests.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/campaigns/views.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/organizers/models.py", "/greatdebate/apps/decisionMakers/models.py", "/greatdebate/apps/activists/models.py"], "/greatdebate/apps/activists/tests.py": ["/greatdebate/apps/activists/models.py", "/greatdebate/apps/campaigns/models.py"], "/greatdebate/apps/activists/views.py": ["/greatdebate/apps/activists/models.py", "/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/decisionMakers/tests.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/decisionMakers/admin.py": ["/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/activists/admin.py": ["/greatdebate/apps/activists/models.py"], "/greatdebate/apps/decisionMakers/views.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/campaigns/admin.py": ["/greatdebate/apps/campaigns/models.py"], "/greatdebate/apps/organizers/views.py": ["/greatdebate/apps/organizers/models.py"]} |
58,471 | hackforchange/GreatDebate | refs/heads/master | /greatdebate/apps/organizers/admin.py | from django.contrib import admin
from greatdebate.apps.organizers.models import Organizer
admin.site.register(Organizer)
| {"/greatdebate/apps/organizers/admin.py": ["/greatdebate/apps/organizers/models.py"], "/greatdebate/apps/campaigns/models.py": ["/greatdebate/apps/decisionMakers/models.py", "/greatdebate/apps/organizers/models.py"], "/greatdebate/apps/campaigns/tests.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/campaigns/views.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/organizers/models.py", "/greatdebate/apps/decisionMakers/models.py", "/greatdebate/apps/activists/models.py"], "/greatdebate/apps/activists/tests.py": ["/greatdebate/apps/activists/models.py", "/greatdebate/apps/campaigns/models.py"], "/greatdebate/apps/activists/views.py": ["/greatdebate/apps/activists/models.py", "/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/decisionMakers/tests.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/decisionMakers/admin.py": ["/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/activists/admin.py": ["/greatdebate/apps/activists/models.py"], "/greatdebate/apps/decisionMakers/views.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/campaigns/admin.py": ["/greatdebate/apps/campaigns/models.py"], "/greatdebate/apps/organizers/views.py": ["/greatdebate/apps/organizers/models.py"]} |
58,472 | hackforchange/GreatDebate | refs/heads/master | /greatdebate/apps/campaigns/models.py | from django.db import models
from greatdebate.apps.decisionMakers.models import DecisionMaker
from greatdebate.apps.organizers.models import Organizer
class Campaign(models.Model):
"""Stores Description of campaigns"""
name = models.CharField(max_length=255, null=True, blank=True)
organizer = models.ForeignKey(Organizer, null=True, blank=True)
decision_maker = models.ManyToManyField(DecisionMaker)
campaign_url = models.TextField()
def __unicode__(self):
return u'%s' % (self.campaign_url[:10])
| {"/greatdebate/apps/organizers/admin.py": ["/greatdebate/apps/organizers/models.py"], "/greatdebate/apps/campaigns/models.py": ["/greatdebate/apps/decisionMakers/models.py", "/greatdebate/apps/organizers/models.py"], "/greatdebate/apps/campaigns/tests.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/campaigns/views.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/organizers/models.py", "/greatdebate/apps/decisionMakers/models.py", "/greatdebate/apps/activists/models.py"], "/greatdebate/apps/activists/tests.py": ["/greatdebate/apps/activists/models.py", "/greatdebate/apps/campaigns/models.py"], "/greatdebate/apps/activists/views.py": ["/greatdebate/apps/activists/models.py", "/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/decisionMakers/tests.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/decisionMakers/admin.py": ["/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/activists/admin.py": ["/greatdebate/apps/activists/models.py"], "/greatdebate/apps/decisionMakers/views.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/campaigns/admin.py": ["/greatdebate/apps/campaigns/models.py"], "/greatdebate/apps/organizers/views.py": ["/greatdebate/apps/organizers/models.py"]} |
58,473 | hackforchange/GreatDebate | refs/heads/master | /greatdebate/apps/organizers/models.py | from django.db import models
class Organizer(models.Model):
email = models.TextField()
def __unicode__(self):
return u'%s' % (self.email[:10])
| {"/greatdebate/apps/organizers/admin.py": ["/greatdebate/apps/organizers/models.py"], "/greatdebate/apps/campaigns/models.py": ["/greatdebate/apps/decisionMakers/models.py", "/greatdebate/apps/organizers/models.py"], "/greatdebate/apps/campaigns/tests.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/campaigns/views.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/organizers/models.py", "/greatdebate/apps/decisionMakers/models.py", "/greatdebate/apps/activists/models.py"], "/greatdebate/apps/activists/tests.py": ["/greatdebate/apps/activists/models.py", "/greatdebate/apps/campaigns/models.py"], "/greatdebate/apps/activists/views.py": ["/greatdebate/apps/activists/models.py", "/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/decisionMakers/tests.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/decisionMakers/admin.py": ["/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/activists/admin.py": ["/greatdebate/apps/activists/models.py"], "/greatdebate/apps/decisionMakers/views.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/campaigns/admin.py": ["/greatdebate/apps/campaigns/models.py"], "/greatdebate/apps/organizers/views.py": ["/greatdebate/apps/organizers/models.py"]} |
58,474 | hackforchange/GreatDebate | refs/heads/master | /greatdebate/apps/campaigns/tests.py | from django.test import TestCase
from django.test.client import Client
from greatdebate.apps.campaigns.models import Campaign
from greatdebate.apps.decisionMakers.models import DecisionMaker
class CampaignsTestCase(TestCase):
def setUp(self):
self.client = Client()
def test_save_campaign_success_no_dm(self):
"""
Tests we can sucessfully save a campaign with no dm.
"""
post_params = {
'campaign_url': 'uniquetestcampaign.com',
'decision_makers': '1',
'email': 'test@test.com',
}
response = self.client.post('/save_campaign/', post_params)
new_campaign_count = Campaign.objects.filter(campaign_url=post_params['campaign_url']).count()
self.assertEqual(new_campaign_count, 1)
#import ipdb; ipdb.set_trace()
def test_save_campaign_success_with_dm(self):
"""
Tests we can sucessfully save a campaign with a dm.
"""
post_params = {
'campaign_url': 'uniquetestcampaign.com',
'decision_makers': '1',
'email': 'test@test.com',
}
new_dm = DecisionMaker(name='Barak Obama', title='Commander and Chef')
new_dm.save()
response = self.client.post('/save_campaign/', post_params)
new_campaign = Campaign.objects.filter(campaign_url=post_params['campaign_url'])
self.assertEqual(new_campaign.count(), 1)
self.assertEqual(new_campaign[0].decision_maker.all()[0], new_dm)
def test_create_campaign_template(self):
"""tests we can render create campaign page"""
response = self.client.get('/create_campaign/')
self.assertEqual(response.templates[0].name, 'create_campaign.html')
| {"/greatdebate/apps/organizers/admin.py": ["/greatdebate/apps/organizers/models.py"], "/greatdebate/apps/campaigns/models.py": ["/greatdebate/apps/decisionMakers/models.py", "/greatdebate/apps/organizers/models.py"], "/greatdebate/apps/campaigns/tests.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/campaigns/views.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/organizers/models.py", "/greatdebate/apps/decisionMakers/models.py", "/greatdebate/apps/activists/models.py"], "/greatdebate/apps/activists/tests.py": ["/greatdebate/apps/activists/models.py", "/greatdebate/apps/campaigns/models.py"], "/greatdebate/apps/activists/views.py": ["/greatdebate/apps/activists/models.py", "/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/decisionMakers/tests.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/decisionMakers/admin.py": ["/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/activists/admin.py": ["/greatdebate/apps/activists/models.py"], "/greatdebate/apps/decisionMakers/views.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/campaigns/admin.py": ["/greatdebate/apps/campaigns/models.py"], "/greatdebate/apps/organizers/views.py": ["/greatdebate/apps/organizers/models.py"]} |
58,475 | hackforchange/GreatDebate | refs/heads/master | /greatdebate/apps/campaigns/views.py | from csv import writer
from django.conf import settings
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.views.decorators.http import require_POST, require_GET
from greatdebate.apps.campaigns.models import Campaign
from greatdebate.apps.organizers.models import Organizer
from greatdebate.apps.decisionMakers.models import DecisionMaker, DecisionMakerResponse
from greatdebate.apps.activists.models import Activist, ActivistResponse
from StringIO import StringIO
from django.db.models import Q
from json import dumps
def create_campaign_template(request):
return render_to_response('create_campaign.html')
@require_POST
def save_campaign(request):
"""Creates a new campaign"""
required_fields_list = [
'campaign_url',
'decision_makers',
'email',
]
for field in required_fields_list:
if field not in request.POST:
return HttpResponse('Missing %s param in request' % (field))
email = request.POST.get('email', None)
name = request.POST.get('name', None)
new_organizer = None
if email is not None:
try:
new_organizer = Organizer.objects.get(email=email)
except Organizer.DoesNotExist:
new_organizer = Organizer(email=email)
new_organizer.save()
try:
campaign = Campaign.objects.get(campaign_url=request.POST['campaign_url'])
return HttpResponse('Campaign Already Exists with that url')
except Campaign.DoesNotExist:
new_campaign_params = {
'campaign_url': request.POST['campaign_url'],
'organizer': new_organizer,
'name': name,
}
campaign = Campaign(**new_campaign_params)
campaign.save()
decision_makers = request.POST['decision_makers'].split(',') # this will be a list of ids from the form
for dm_id in decision_makers:
try:
dm = DecisionMaker.objects.get(pk=dm_id)
except DecisionMaker.DoesNotExist:
continue
campaign.decision_maker.add(dm)
response_iframe = '<iframe src="%sresponses/?campaign_id=%s" height="200" height="200" scrolling="no" frameborder="0"></iframe>' % (settings.URL_ROOT, campaign.id)
takeaction_iframe = '<iframe src="%sbutton/?campaign_id=%s" height="90" width="150" scrolling="no" frameborder="0"></iframe>' % (settings.URL_ROOT, campaign.id)
return render_to_response('create_campaign.html', {'takeaction_iframe': takeaction_iframe, 'response_iframe': response_iframe}, context_instance=RequestContext(request))
def button_html(request):
# Returns markup for take action button
base_url = settings.URL_ROOT
campaign_id = request.GET.get('campaign_id', None)
if campaign_id is None:
return HttpResponse('No campaign id in request')
resp_count = ActivistResponse.objects.filter(campaign=campaign_id).count()
context = {
'campaign_id': campaign_id,
'base_url': base_url,
'resp_count': resp_count,
}
return render_to_response('button.html',context)
def current_campaigns(request):
campaigns = Campaign.objects.all()
return render_to_response('campaigns.html', {'campaigns': campaigns})
def campaign_responses(request):
#Returns all the responses by decision makers for a given campaign
base_url = settings.URL_ROOT
campaign = Campaign.objects.get(pk=request.GET['campaign_id'])
responses = DecisionMakerResponse.objects.filter(campaign=campaign)
#import ipdb; ipdb.set_trace();
context = {
'base_url': base_url,
'responses': responses,
}
return render_to_response('responses_widget.html',context)
def export_data(request, campaign_id):
"""exports data s csv for a certain campaign"""
try:
campaign = Campaign.objects.get(pk=campaign_id)
except Campaign.DoesNotExist:
return HttpResponse('No campaign exists with id %s' % (campaign_id))
activist_responses = ActivistResponse.objects.filter(campaign__id=campaign_id)
tmp_file = StringIO()
response_csv = writer(tmp_file)
response_csv.writerow(['First Name', 'Last Name', 'Email', 'Address', 'City', 'Zip', 'Message'])
for activist_response in activist_responses:
response_csv.writerow([
activist_response.activist.first_name or 'None',
activist_response.activist.last_name or 'None',
activist_response.activist.email or 'None',
activist_response.activist.address or 'None',
activist_response.activist.city or 'None',
activist_response.activist.zip or 'None',
activist_response.message or 'None',
])
tmp_file.seek(0)
response = HttpResponse(tmp_file)
response['Content-type'] = 'application/force-download'
response['Content-Disposition'] = 'attachement; filename=%s.csv' % (campaign.campaign_url[:10])
return response
def campaign_lookup(request, limit=5):
#Looks up campaigns for dm's to respond to
term = request.GET['term']
campaigns = Campaign.objects.filter(Q(name__icontains=term))[:limit]
results = []
for c in campaigns:
results.append({"label":c.name, "id":c.id})
return HttpResponse(dumps(results), mimetype='application/javascript')
@require_GET
def campaign_responses_get(request, campaign_id):
"""exports data as json for a certain campaign"""
try:
campaign = Campaign.objects.get(pk=campaign_id)
except Campaign.DoesNotExist:
return HttpResponse('No campaign exists with id %s' % (campaign_id))
activist_responses = ActivistResponse.objects.filter(campaign__id=campaign_id)
output_list = []
for activist_response in activist_responses:
activist_tmp_dict = {
'first_name': activist_response.activist.first_name or 'None',
'last_name': activist_response.activist.last_name or 'None',
'email': activist_response.activist.email or 'None',
'address': activist_response.activist.address or 'None',
'city': activist_response.activist.city or 'None',
'zip': activist_response.activist.zip or 'None',
'message': activist_response.message or 'None',
}
output_list.append(activist_tmp_dict)
return HttpResponse(dumps(output_list), mimetype='application/javascript')
| {"/greatdebate/apps/organizers/admin.py": ["/greatdebate/apps/organizers/models.py"], "/greatdebate/apps/campaigns/models.py": ["/greatdebate/apps/decisionMakers/models.py", "/greatdebate/apps/organizers/models.py"], "/greatdebate/apps/campaigns/tests.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/campaigns/views.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/organizers/models.py", "/greatdebate/apps/decisionMakers/models.py", "/greatdebate/apps/activists/models.py"], "/greatdebate/apps/activists/tests.py": ["/greatdebate/apps/activists/models.py", "/greatdebate/apps/campaigns/models.py"], "/greatdebate/apps/activists/views.py": ["/greatdebate/apps/activists/models.py", "/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/decisionMakers/tests.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/decisionMakers/admin.py": ["/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/activists/admin.py": ["/greatdebate/apps/activists/models.py"], "/greatdebate/apps/decisionMakers/views.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/campaigns/admin.py": ["/greatdebate/apps/campaigns/models.py"], "/greatdebate/apps/organizers/views.py": ["/greatdebate/apps/organizers/models.py"]} |
58,476 | hackforchange/GreatDebate | refs/heads/master | /greatdebate/apps/activists/tests.py | from django.test import TestCase
from django.test.client import Client
from greatdebate.apps.activists.models import Activist, ActivistResponse
from greatdebate.apps.campaigns.models import Campaign
class ActivistsTestCase(TestCase):
def setUp(self):
self.client = Client()
def test_proccess_takeaction_success(self):
"""
Tests that we can successfull process a take action.
"""
new_campaign = Campaign(campaign_url='test.com')
new_campaign.save()
post_params = {
'email': 'test@test.com',
'campaign_id': new_campaign.id,
}
response = self.client.post('/process_takeaction/', post_params)
activists = Activist.objects.all()
self.assertEqual(activists.count(), 1)
responses = ActivistResponse.objects.filter(activist=activists[0])
self.assertEqual(responses.count(), 1)
def test_take_action_template(self):
"""
tests we can server take_action_template
"""
new_campaign = Campaign(campaign_url='test.com')
new_campaign.save()
response = self.client.get('/takeaction/?campaign_id=%s' % (new_campaign.id))
self.assertEqual('takeaction.html', response.templates[0].name)
def test_button_html_success(self):
"""Test we can render button.html"""
new_campaign = Campaign(campaign_url='test.com')
new_campaign.save()
response = self.client.get('/button/?campaign_id=%s' % (new_campaign.id))
#import ipdb; ipdb.set_trace()
self.assertEqual('button.html', response.templates[0].name)
| {"/greatdebate/apps/organizers/admin.py": ["/greatdebate/apps/organizers/models.py"], "/greatdebate/apps/campaigns/models.py": ["/greatdebate/apps/decisionMakers/models.py", "/greatdebate/apps/organizers/models.py"], "/greatdebate/apps/campaigns/tests.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/campaigns/views.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/organizers/models.py", "/greatdebate/apps/decisionMakers/models.py", "/greatdebate/apps/activists/models.py"], "/greatdebate/apps/activists/tests.py": ["/greatdebate/apps/activists/models.py", "/greatdebate/apps/campaigns/models.py"], "/greatdebate/apps/activists/views.py": ["/greatdebate/apps/activists/models.py", "/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/decisionMakers/tests.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/decisionMakers/admin.py": ["/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/activists/admin.py": ["/greatdebate/apps/activists/models.py"], "/greatdebate/apps/decisionMakers/views.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/campaigns/admin.py": ["/greatdebate/apps/campaigns/models.py"], "/greatdebate/apps/organizers/views.py": ["/greatdebate/apps/organizers/models.py"]} |
58,477 | hackforchange/GreatDebate | refs/heads/master | /greatdebate/apps/activists/views.py | from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.views.decorators.http import require_POST, require_GET
from greatdebate.apps.activists.models import Activist, ActivistResponse
from greatdebate.apps.campaigns.models import Campaign
from greatdebate.apps.decisionMakers.models import DecisionMakerResponse
@require_POST
def process_takeaction(request):
"""
Allows activist to fillout form and take action
"""
if 'email' not in request.POST or 'campaign_id' not in request.POST:
return HttpResponse('Invalid Data. Email and Campaign Required.');
try:
campaign = Campaign.objects.get(pk=request.POST['campaign_id'])
except (Campaign.DoesNotExist, ValueError):
return HttpResponse('Invalid Campaign ID')
expected_params = [
'email',
'first_name',
'last_name',
'address',
'city',
'zip',
'message',
]
activist_add_params = {}
for param in expected_params:
if request.POST.get(param, False):
activist_add_params[param] = request.POST[param]
else:
activist_add_params[param] = None
message = activist_add_params['message']
del activist_add_params['message']
new_activist = Activist(**activist_add_params)
new_activist.save();
response_add_params = {
'campaign': campaign,
'activist': new_activist,
'message': message,
}
new_activist_response = ActivistResponse(**response_add_params)
new_activist_response.save()
dm_responses = DecisionMakerResponse.objects.filter(campaign=campaign)
context_dict = {
'dm_responses': dm_responses,
}
return render_to_response('takeaction_complete.html',context_dict)
def take_action_template(request):
"""
Requires campaign id in GET params, shows form that users can use to take action
"""
if 'campaign_id' not in request.GET:
return HttpResponse('campaign id not in request')
try:
campaign = Campaign.objects.get(pk=request.GET['campaign_id'])
except (Campaign.DoesNotExist, ValueError):
return HttpResponse('Campaign does not exist for %s' % (request.GET['campaign_id']))
#import ipdb; ipdb.set_trace()
dms = campaign.decision_maker.all()
context_dict = {
'dms': dms,
'campaign': campaign,
}
return render_to_response('takeaction.html',context_dict)
| {"/greatdebate/apps/organizers/admin.py": ["/greatdebate/apps/organizers/models.py"], "/greatdebate/apps/campaigns/models.py": ["/greatdebate/apps/decisionMakers/models.py", "/greatdebate/apps/organizers/models.py"], "/greatdebate/apps/campaigns/tests.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/campaigns/views.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/organizers/models.py", "/greatdebate/apps/decisionMakers/models.py", "/greatdebate/apps/activists/models.py"], "/greatdebate/apps/activists/tests.py": ["/greatdebate/apps/activists/models.py", "/greatdebate/apps/campaigns/models.py"], "/greatdebate/apps/activists/views.py": ["/greatdebate/apps/activists/models.py", "/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/decisionMakers/tests.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/decisionMakers/admin.py": ["/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/activists/admin.py": ["/greatdebate/apps/activists/models.py"], "/greatdebate/apps/decisionMakers/views.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/campaigns/admin.py": ["/greatdebate/apps/campaigns/models.py"], "/greatdebate/apps/organizers/views.py": ["/greatdebate/apps/organizers/models.py"]} |
58,478 | hackforchange/GreatDebate | refs/heads/master | /greatdebate/apps/decisionMakers/tests.py | from django.test import TestCase
from django.test.client import Client
from greatdebate.apps.campaigns.models import Campaign
from greatdebate.apps.decisionMakers.models import DecisionMaker
from json import loads
class DecisionMakersTestCase(TestCase):
def setUp(self):
self.client = Client()
def test_response_template(self):
"""TEsts we can render a respond.html template"""
response = self.client.get('/respond/')
self.assertEqual(response.templates[0].name, 'response.html')
def test_dm_lookup_success(self):
"""Tests we can lookup a dm"""
new_dm = DecisionMaker(name='barak obama', title='president of the us')
new_dm.save()
response = self.client.get('/decision_maker_lookup/?term=us')
resp_list = loads(response.content)
self.assertEqual(resp_list[0]['id'], new_dm.id)
self.assertTrue(new_dm.title in resp_list[0]['label'])
def test_post_response_success(self):
"""Tests we can successfull post a rsponse as a dm"""
new_campaign = Campaign(campaign_url='test.com')
new_campaign.save()
new_dm = DecisionMaker(name='barak', title='pres')
new_dm.save()
new_campaign.decision_maker.add(new_dm)
post_params = {
'campaign_ids': new_campaign.id,
'response_url': 'test.com',
}
response = self.client.post('/post_response/', post_params)
#import ipdb; ipdb.set_trace()
self.assertTrue('POST SUCCESSFUL' in response.content)
| {"/greatdebate/apps/organizers/admin.py": ["/greatdebate/apps/organizers/models.py"], "/greatdebate/apps/campaigns/models.py": ["/greatdebate/apps/decisionMakers/models.py", "/greatdebate/apps/organizers/models.py"], "/greatdebate/apps/campaigns/tests.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/campaigns/views.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/organizers/models.py", "/greatdebate/apps/decisionMakers/models.py", "/greatdebate/apps/activists/models.py"], "/greatdebate/apps/activists/tests.py": ["/greatdebate/apps/activists/models.py", "/greatdebate/apps/campaigns/models.py"], "/greatdebate/apps/activists/views.py": ["/greatdebate/apps/activists/models.py", "/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/decisionMakers/tests.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/decisionMakers/admin.py": ["/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/activists/admin.py": ["/greatdebate/apps/activists/models.py"], "/greatdebate/apps/decisionMakers/views.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/campaigns/admin.py": ["/greatdebate/apps/campaigns/models.py"], "/greatdebate/apps/organizers/views.py": ["/greatdebate/apps/organizers/models.py"]} |
58,479 | hackforchange/GreatDebate | refs/heads/master | /greatdebate/apps/decisionMakers/admin.py | from django.contrib import admin
from greatdebate.apps.decisionMakers.models import DecisionMaker, DecisionMakerResponse
class DecisionMakerAdmin(admin.ModelAdmin):
fields = ('name','title')
list_display = ('name','title')
admin.site.register(DecisionMaker,DecisionMakerAdmin)
class DecisionMakerResponseAdmin(admin.ModelAdmin):
fields = ('decision_maker', 'campaign', 'response_url')
list_display = ('decision_maker', 'response_url')
admin.site.register(DecisionMakerResponse, DecisionMakerResponseAdmin)
| {"/greatdebate/apps/organizers/admin.py": ["/greatdebate/apps/organizers/models.py"], "/greatdebate/apps/campaigns/models.py": ["/greatdebate/apps/decisionMakers/models.py", "/greatdebate/apps/organizers/models.py"], "/greatdebate/apps/campaigns/tests.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/campaigns/views.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/organizers/models.py", "/greatdebate/apps/decisionMakers/models.py", "/greatdebate/apps/activists/models.py"], "/greatdebate/apps/activists/tests.py": ["/greatdebate/apps/activists/models.py", "/greatdebate/apps/campaigns/models.py"], "/greatdebate/apps/activists/views.py": ["/greatdebate/apps/activists/models.py", "/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/decisionMakers/tests.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/decisionMakers/admin.py": ["/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/activists/admin.py": ["/greatdebate/apps/activists/models.py"], "/greatdebate/apps/decisionMakers/views.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/campaigns/admin.py": ["/greatdebate/apps/campaigns/models.py"], "/greatdebate/apps/organizers/views.py": ["/greatdebate/apps/organizers/models.py"]} |
58,480 | hackforchange/GreatDebate | refs/heads/master | /greatdebate/apps/activists/admin.py | from django.contrib import admin
from greatdebate.apps.activists.models import Activist, ActivistResponse
class ActivistAdmin(admin.ModelAdmin):
fields = ('first_name', 'last_name', 'email', 'address', 'city', 'zip')
list_display = ('first_name', 'last_name', 'email', 'address', 'city', 'zip')
admin.site.register(Activist, ActivistAdmin)
class ActivistResponseAdmin(admin.ModelAdmin):
fields = ('campaign', 'activist', 'message')
list_display = ('campaign', 'activist', 'message')
admin.site.register(ActivistResponse, ActivistResponseAdmin)
| {"/greatdebate/apps/organizers/admin.py": ["/greatdebate/apps/organizers/models.py"], "/greatdebate/apps/campaigns/models.py": ["/greatdebate/apps/decisionMakers/models.py", "/greatdebate/apps/organizers/models.py"], "/greatdebate/apps/campaigns/tests.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/campaigns/views.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/organizers/models.py", "/greatdebate/apps/decisionMakers/models.py", "/greatdebate/apps/activists/models.py"], "/greatdebate/apps/activists/tests.py": ["/greatdebate/apps/activists/models.py", "/greatdebate/apps/campaigns/models.py"], "/greatdebate/apps/activists/views.py": ["/greatdebate/apps/activists/models.py", "/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/decisionMakers/tests.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/decisionMakers/admin.py": ["/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/activists/admin.py": ["/greatdebate/apps/activists/models.py"], "/greatdebate/apps/decisionMakers/views.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/campaigns/admin.py": ["/greatdebate/apps/campaigns/models.py"], "/greatdebate/apps/organizers/views.py": ["/greatdebate/apps/organizers/models.py"]} |
58,481 | hackforchange/GreatDebate | refs/heads/master | /greatdebate/apps/decisionMakers/views.py | from django.db.models import Q
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.views.decorators.http import require_POST
from greatdebate.apps.campaigns.models import Campaign
from greatdebate.apps.decisionMakers.models import DecisionMaker, DecisionMakerResponse
from json import dumps
def decision_maker_lookup(request, limit=5):
"""
For finding a decision maker for organizers search
"""
term = request.GET['term']
decision_makers = DecisionMaker.objects.filter(Q(name__icontains=term) | Q(title__icontains=term))[:limit]
results = []
for dm in decision_makers:
results.append({"label":dm.name + ', ' + dm.title,"id":dm.id})
return HttpResponse(dumps(results), mimetype='application/javascript')
@require_POST
def post_response(request):
# Saves DM's response to a set of campaigns
campaigns = request.POST.getlist('campaign_ids')
campaigns_list = []
for campaign_id in campaigns:
campaign = Campaign.objects.get(pk=campaign_id)
campaigns_list.append(campaign)
response_add_params = {
'response_url': request.POST['response_url'],
'decision_maker': campaign.decision_maker.all()[0]
}
new_response = DecisionMakerResponse(**response_add_params)
new_response.save();
for campaign in campaigns_list:
new_response.campaign.add(campaign)
return render_to_response('response.html',{"response_msg":"Response Posted Successfully."})
def response_template(request):
# Renders DM response page
return render_to_response('response.html')
| {"/greatdebate/apps/organizers/admin.py": ["/greatdebate/apps/organizers/models.py"], "/greatdebate/apps/campaigns/models.py": ["/greatdebate/apps/decisionMakers/models.py", "/greatdebate/apps/organizers/models.py"], "/greatdebate/apps/campaigns/tests.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/campaigns/views.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/organizers/models.py", "/greatdebate/apps/decisionMakers/models.py", "/greatdebate/apps/activists/models.py"], "/greatdebate/apps/activists/tests.py": ["/greatdebate/apps/activists/models.py", "/greatdebate/apps/campaigns/models.py"], "/greatdebate/apps/activists/views.py": ["/greatdebate/apps/activists/models.py", "/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/decisionMakers/tests.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/decisionMakers/admin.py": ["/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/activists/admin.py": ["/greatdebate/apps/activists/models.py"], "/greatdebate/apps/decisionMakers/views.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/campaigns/admin.py": ["/greatdebate/apps/campaigns/models.py"], "/greatdebate/apps/organizers/views.py": ["/greatdebate/apps/organizers/models.py"]} |
58,482 | hackforchange/GreatDebate | refs/heads/master | /greatdebate/apps/campaigns/admin.py | from django.contrib import admin
from greatdebate.apps.campaigns.models import Campaign
class CampaignAdmin(admin.ModelAdmin):
list_display = ('name', 'organizer', 'campaign_url',)
fields = ('name', 'organizer', 'decision_maker', 'campaign_url',)
admin.site.register(Campaign, CampaignAdmin)
| {"/greatdebate/apps/organizers/admin.py": ["/greatdebate/apps/organizers/models.py"], "/greatdebate/apps/campaigns/models.py": ["/greatdebate/apps/decisionMakers/models.py", "/greatdebate/apps/organizers/models.py"], "/greatdebate/apps/campaigns/tests.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/campaigns/views.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/organizers/models.py", "/greatdebate/apps/decisionMakers/models.py", "/greatdebate/apps/activists/models.py"], "/greatdebate/apps/activists/tests.py": ["/greatdebate/apps/activists/models.py", "/greatdebate/apps/campaigns/models.py"], "/greatdebate/apps/activists/views.py": ["/greatdebate/apps/activists/models.py", "/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/decisionMakers/tests.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/decisionMakers/admin.py": ["/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/activists/admin.py": ["/greatdebate/apps/activists/models.py"], "/greatdebate/apps/decisionMakers/views.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/campaigns/admin.py": ["/greatdebate/apps/campaigns/models.py"], "/greatdebate/apps/organizers/views.py": ["/greatdebate/apps/organizers/models.py"]} |
58,483 | hackforchange/GreatDebate | refs/heads/master | /greatdebate/urls.py | from django.conf import settings
from django.conf.urls.defaults import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'greatdebate.views.home', name='home'),
# url(r'^greatdebate/', include('greatdebate.foo.urls')),
# Campaigns
url(r'^create_campaign/', 'greatdebate.apps.campaigns.views.create_campaign_template'),
url(r'^save_campaign/', 'greatdebate.apps.campaigns.views.save_campaign'),
url(r'^button/', 'greatdebate.apps.campaigns.views.button_html'),
url(r'^current_campaigns/', 'greatdebate.apps.campaigns.views.current_campaigns'),
url(r'^responses/', 'greatdebate.apps.campaigns.views.campaign_responses'),
url(r'^export_campaign_data/(?P<campaign_id>\d+)/', 'greatdebate.apps.campaigns.views.export_data'),
url(r'^api/campaign/responses/get/(?P<campaign_id>\d+)/', 'greatdebate.apps.campaigns.views.campaign_responses_get'),
# Activists
url(r'^takeaction/', 'greatdebate.apps.activists.views.take_action_template'),
url(r'^process_takeaction/', 'greatdebate.apps.activists.views.process_takeaction'),
url(r'^button/$', 'greatdebate.apps.campaigns.views.button_html'),
url(r'^campaign_lookup/$', 'greatdebate.apps.campaigns.views.campaign_lookup'),
# Decision Makers
url(r'^respond/', 'greatdebate.apps.decisionMakers.views.response_template'),
url(r'^decision_maker_lookup/', 'greatdebate.apps.decisionMakers.views.decision_maker_lookup'),
url(r'^post_response/', 'greatdebate.apps.decisionMakers.views.post_response'),
# Organizers
url(r'^$', 'greatdebate.apps.organizers.views.render_home'),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
if settings.SERVE_MEDIA:
urlpatterns += patterns("", url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT, 'show_indexes': True,}),)
| {"/greatdebate/apps/organizers/admin.py": ["/greatdebate/apps/organizers/models.py"], "/greatdebate/apps/campaigns/models.py": ["/greatdebate/apps/decisionMakers/models.py", "/greatdebate/apps/organizers/models.py"], "/greatdebate/apps/campaigns/tests.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/campaigns/views.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/organizers/models.py", "/greatdebate/apps/decisionMakers/models.py", "/greatdebate/apps/activists/models.py"], "/greatdebate/apps/activists/tests.py": ["/greatdebate/apps/activists/models.py", "/greatdebate/apps/campaigns/models.py"], "/greatdebate/apps/activists/views.py": ["/greatdebate/apps/activists/models.py", "/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/decisionMakers/tests.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/decisionMakers/admin.py": ["/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/activists/admin.py": ["/greatdebate/apps/activists/models.py"], "/greatdebate/apps/decisionMakers/views.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/campaigns/admin.py": ["/greatdebate/apps/campaigns/models.py"], "/greatdebate/apps/organizers/views.py": ["/greatdebate/apps/organizers/models.py"]} |
58,484 | hackforchange/GreatDebate | refs/heads/master | /greatdebate/apps/organizers/views.py | from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.views.decorators.http import require_POST
from greatdebate.apps.organizers.models import Organizer
def new_organizer(request):
required_fields_list = [
'email',
]
organizer_insert_dict = {'email':request.POST['email']}
organizer = Organizer(**organizer_insert_dict)
organizer.save()
return HttpResponseRedirect('/create_campaign/')
def render_home(request):
return render_to_response('home.html')
| {"/greatdebate/apps/organizers/admin.py": ["/greatdebate/apps/organizers/models.py"], "/greatdebate/apps/campaigns/models.py": ["/greatdebate/apps/decisionMakers/models.py", "/greatdebate/apps/organizers/models.py"], "/greatdebate/apps/campaigns/tests.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/campaigns/views.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/organizers/models.py", "/greatdebate/apps/decisionMakers/models.py", "/greatdebate/apps/activists/models.py"], "/greatdebate/apps/activists/tests.py": ["/greatdebate/apps/activists/models.py", "/greatdebate/apps/campaigns/models.py"], "/greatdebate/apps/activists/views.py": ["/greatdebate/apps/activists/models.py", "/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/decisionMakers/tests.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/decisionMakers/admin.py": ["/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/activists/admin.py": ["/greatdebate/apps/activists/models.py"], "/greatdebate/apps/decisionMakers/views.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/campaigns/admin.py": ["/greatdebate/apps/campaigns/models.py"], "/greatdebate/apps/organizers/views.py": ["/greatdebate/apps/organizers/models.py"]} |
58,485 | hackforchange/GreatDebate | refs/heads/master | /greatdebate/apps/activists/models.py | from django.db import models
class Activist(models.Model):
"""
Activists are people that take action on certain campaigns
"""
first_name = models.CharField(max_length=20, null=True, blank=True)
last_name = models.CharField(max_length=20, null=True, blank=True)
email = models.CharField(max_length=255, null=True, blank=True)
address = models.CharField(max_length=255, null=True, blank=True)
city = models.CharField(max_length=255, null=True, blank=True)
zip = models.IntegerField(null=True, blank=True)
def __unicode__(self):
return u'%s-%s' % (self.first_name,self.last_name)
class ActivistResponse(models.Model):
"""
This is the object for an activist's response for a specific campaign
"""
campaign = models.ForeignKey('campaigns.Campaign')
activist = models.ForeignKey(Activist)
message = models.TextField(null=True, blank=True)
def __unicode__(self):
return u'%s-%s' % (self.campaign.campaign_url[:8], self.activist.first_name)
| {"/greatdebate/apps/organizers/admin.py": ["/greatdebate/apps/organizers/models.py"], "/greatdebate/apps/campaigns/models.py": ["/greatdebate/apps/decisionMakers/models.py", "/greatdebate/apps/organizers/models.py"], "/greatdebate/apps/campaigns/tests.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/campaigns/views.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/organizers/models.py", "/greatdebate/apps/decisionMakers/models.py", "/greatdebate/apps/activists/models.py"], "/greatdebate/apps/activists/tests.py": ["/greatdebate/apps/activists/models.py", "/greatdebate/apps/campaigns/models.py"], "/greatdebate/apps/activists/views.py": ["/greatdebate/apps/activists/models.py", "/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/decisionMakers/tests.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/decisionMakers/admin.py": ["/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/activists/admin.py": ["/greatdebate/apps/activists/models.py"], "/greatdebate/apps/decisionMakers/views.py": ["/greatdebate/apps/campaigns/models.py", "/greatdebate/apps/decisionMakers/models.py"], "/greatdebate/apps/campaigns/admin.py": ["/greatdebate/apps/campaigns/models.py"], "/greatdebate/apps/organizers/views.py": ["/greatdebate/apps/organizers/models.py"]} |
58,486 | realduke2000/PublicAddressBook | refs/heads/master | /contact.py | import web,os,re
import globals
from web import form
from globals import auth_cache, render
if globals.platform=='sae':
import sae_model as model
from sae_model import contact_data
else:
import model
from model import contact_data
urls = (
"/", "contact",
"/del/(\d+)",'delete_contact',
"/vcf/(\d+|all)\.vcf",'contact_vcf',
)
class contact:
contact_form = form.Form(
form.Hidden("Contact_Id", description="contact_id"),
form.Textbox("Name", form.notnull, description="name"),
form.Textbox("Telephone1", description="telephone1"),
form.Textbox("Telephone2", description="telephone2"),
form.Textbox("Location", description="location"),
form.Textbox("Industry", description="industry")
)
def GET(self):
f = contact.contact_form()
if globals.has_loggedin():
# render contact data
data = model.get_all_contact()
return render.contact(f, data)
else:
raise web.seeother("/",True)
def POST(self):
'''
Add a new contact
'''
if not globals.has_loggedin():
raise web.seeother("/contact/", True)
f = contact.contact_form()
if not f.validates():
raise web.seeother('/contact/', True)
data = contact_data(f['Contact_Id'].value, f['Name'].value, f['Telephone1'].value, f['Telephone2'].value, f['Location'].value, f['Industry'].value)
# if this contact has been in db, del/new as update it
#if model.get_contact(data.id) is not None:
# model.del_contact(data.id)
model.new_contact(data)
web.seeother("/contact/", True)
class delete_contact:
def POST(self, id):
if globals.has_loggedin():
model.del_contact(id)
raise web.seeother('/contact/', True)
class contact_vcf:
def GET(self, id):
if globals.has_loggedin():
web.header('Contect-Type', 'text/x-vcard;charset=utf-8')
web.header("Content-Disposition", 'attachment')
if id=='all':
all_data = model.get_all_contact()
vcfs = ''
for d in all_data:
vcfs = vcfs + d.toVcard()
if vcfs is not None and vcfs!= '':
return vcfs
elif re.match('^(\d+)$', id) is not None:
id_raw = re.findall('\d+', id)
if id_raw is not None and len(id_raw) > 0:
id_raw = id_raw[0]
data = model.get_contact(id_raw)
if data is not None and data != '':
return data.toVcard()
else:
print("id error, id=" + id);
raise web.seeother('/contact/', True)
else:
raise web.seeother('/', True)
app_contact = web.application(urls, locals())
| {"/contact.py": ["/globals.py", "/model.py"], "/main.py": ["/login.py", "/contact.py"]} |
58,487 | realduke2000/PublicAddressBook | refs/heads/master | /login.py | import web
from web import form
import os
import hashlib
import datetime
import globals
from globals import token_md5, auth_cache, render
urls = (
"", "login"
)
class login:
login_form = form.Form(
form.Password("token", form.notnull, description="Token")
)
def GET(self):
# check auth status
if globals.has_loggedin():
raise web.seeother("/contact/",True)
f = login.login_form()
return render.login(f)
def POST(self):
f = login.login_form()
if not f.validates():
return render.login(f)
token = f['token'].value
try:
if token and token_md5 == hashlib.md5(token).hexdigest():
# set auth cookie
encryption = hashlib.md5(web.ctx.host + token).hexdigest()
web.setcookie('auth', encryption, 60*60*24*7,path='/') #cookie expired in one week
auth_cache[encryption]=str(datetime.datetime.today()) # for clean up cache
raise web.seeother('/contact/',True)
else:
return render.login(f)
except TypeError as ex:
print ex
app_login = web.application(urls, locals())
| {"/contact.py": ["/globals.py", "/model.py"], "/main.py": ["/login.py", "/contact.py"]} |
58,488 | realduke2000/PublicAddressBook | refs/heads/master | /globals.py | #platform='sae'
platform=''
import os
import web
import shelve
import datetime
import threading
import atexit
if platform=='sae':
import sae.const
import MySQLdb as mdb
token_md5 = "e191efb684695f634db7004986c81487"
global auth_cache
app_root = os.path.dirname(__file__)
templates_root = os.path.join(app_root, 'templates')
render = web.template.render(templates_root)
class shelf_cache:
def __init__(self,slef_path):
self.__cache = shelve.open(slef_path)
self.__cache.sync()
for k in self.__cache.keys():
cache_date = datetime.datetime.strptime(self.__cache[k][:10], "%Y-%m-%d")
now_date = datetime.datetime.now()
if (now_date - cache_date).days > 7:
del self.__cache[k]
def __del__(self):
self.__cache.close()
def has_key(self, k):
if self.__cache.has_key(k):
cache_date = datetime.datetime.strptime(self.__cache[k][:10], "%Y-%m-%d")
now_date = datetime.datetime.now()
if (now_date - cache_date).days > 7:# remove expired cache
del self.__cache[k]
return False
else:
return True
else:
return False
def __getitem__(self, index):
return self.__cache[index]
def __setitem__(self, index, value):
self.__cache[index] = value
def __delitem__(self, index):
del self.__cache[index]
class db_cache:
def __init__(self):
self.__conn = mdb.connect(charset='utf8', port=int(sae.const.MYSQL_PORT), host=sae.const.MYSQL_HOST, user=sae.const.MYSQL_USER, passwd=sae.const.MYSQL_PASS, db=sae.const.MYSQL_DB)
self.__cursor = self.__conn.cursor()
self.__cache={}
self.__cursor.execute("select auth_cookie, cookie_set_time from auth_cache")
results = self.__cursor.fetchall()
if results is not None and len(results) > 0:
for r in results:
self.__cache[r[0]] = r[1]
def __del__(self):
self.__conn.close()
def has_key(self, k):
if self.__cache.has_key(k):
cache_date = datetime.datetime.strptime(self.__cache[k][:10], "%Y-%m-%d")
now_date = datetime.datetime.now()
if (now_date - cache_date).days > 7 :
del self.__cache[k]
self.__cursor.execute("delete from auth_cache where auth_cookie=%s", (k,))
self.__conn.commit()
return False
else:
return True
else:
return False # Assume the cache in memory is the same as it in DB
def __getitem__(self, k):
return self.__cache[k]
def __setitem__(self, k, v):
if self.__cache.has_key(k):
self.__cursor.execute("delete from auth_cache where auth_cookie=%s", (k,)) # update cache
self.__cache[k] = v
self.__cursor.execute('insert into auth_cache(auth_cookie, cookie_set_time) values(%s, %s)', (k, self.__cache[k],))
self.__conn.commit()
def __delitem__(self, k):
del self.__cache[k]
self.__cursor.execute('delete from auth_cache where auth_cookie=%s',(k,))
self.__conn.commit()
if platform=='sae':
auth_cache = db_cache()
else:
auth_cache = shelf_cache(os.path.join(app_root, 'shelf'))
def has_loggedin():
auth_token = web.cookies().get('auth')
if auth_token and auth_cache.has_key(auth_token):
return True
else:
return False
| {"/contact.py": ["/globals.py", "/model.py"], "/main.py": ["/login.py", "/contact.py"]} |
58,489 | realduke2000/PublicAddressBook | refs/heads/master | /main.py | import web
import login
import contact
urls = (
'/contact', contact.app_contact,
'/', login.app_login,
)
def notfound():
raise web.seeother('/', True)
app_main = web.application(urls, locals())
app_main.notfound = notfound
if __name__=='__main__':
dir(app_main)
app_main.run()
| {"/contact.py": ["/globals.py", "/model.py"], "/main.py": ["/login.py", "/contact.py"]} |
58,490 | realduke2000/PublicAddressBook | refs/heads/master | /model.py | import web, os, datetime, sqlite3
import globals
class contact_data:
def __init__(self, id='', name='', tel1='', tel2='',loc='',industry='',lastupdate=''):
self.id = id
self.name = name
self.telephone1 = tel1
self.telephone2 = tel2
self.location = loc
self.industry = industry
if lastupdate == '':
self.lastupdate = str(datetime.datetime.now())
else:
self.lastupdate = lastupdate
def totuple(self):
return (self.name,self.telephone1,self.telephone2,self.location,self.industry,self.lastupdate)
def toVcard(self):
s = '''BEGIN:VCARD
FN;CHARSET=UTF-8:{0}
TEL;TYPE=cell:{1}
'''.format(self.name, self.telephone1)
if (self.telephone2 is not None) and (self.telephone2 != ''):
s = s + 'TEL;TYPE=WORK:{0}\r\n'.format(self.telephone2)
s = s + 'ORG;CHARSET=UTF-8:' + self.industry + '\r\n'
s = s + 'ADR;TYPE=home;CHARSET=UTF-8' + self.location + '\r\n'
s = s + 'END:VCARD\r\n'
return s
def get_all_contact():
conn = sqlite3.connect(os.path.join(globals.app_root, 'contact.db'))
conn.text_factory = str
cursor = conn.cursor()
data = []
try:
cursor.execute("select id, name, telephone1, telephone2, location, industry, lastupdate from contact")
results = cursor.fetchall()
for r in results:
data.append(contact_data(r[0],r[1],r[2],r[3],r[4],r[5],r[6]))
except Exception as e:
print e
conn.close()
return data
def get_contact(id):
if (id is None) or (id==''):
return None
conn = sqlite3.connect(os.path.join(globals.app_root, 'contact.db'))
conn.text_factory = str
cursor = conn.cursor()
data = None
try:
cursor.execute("select id, name, telephone1, telephone2, location, industry, lastupdate from contact where id=?", (id,))
r = cursor.fetchone()
if r:
data = contact_data(r[0],r[1],r[2],r[3],r[4],r[5],r[6])
except Exception as e:
print e
conn.close()
return data
def new_contact(data):
if data is None:
return
conn = sqlite3.connect(os.path.join(globals.app_root, 'contact.db'))
conn.text_factory = str
cursor = conn.cursor()
result = False
try:
cursor.execute("insert into contact(name, telephone1, telephone2, location, industry, lastupdate) values(?,?,?,?,?,?)", (data.name, data.telephone1, data.telephone2, data.location, data.industry, data.lastupdate))
conn.commit()
result = True
except Exception as e:
print e
conn.close()
return result
def del_contact(contact_id):
if (contact_id is None) or (contact_id == ''):
return
conn = sqlite3.connect(os.path.join(globals.app_root, 'contact.db'))
conn.text_factory = str
cursor = conn.cursor()
result = False
try:
cursor.execute("delete from contact where id = ?", (contact_id,))
conn.commit()
result = True
except Exception as e:
raise e
print e
conn.close()
return result
| {"/contact.py": ["/globals.py", "/model.py"], "/main.py": ["/login.py", "/contact.py"]} |
58,495 | zasexton/tetgen | refs/heads/master | /tetgen/__init__.py | from tetgen import _tetgen
from tetgen.pytetgen import TetGen
| {"/tetgen/__init__.py": ["/tetgen/pytetgen.py"], "/tetgen/pytetgen.py": ["/tetgen/__init__.py"], "/tests/test_tetgen.py": ["/tetgen/__init__.py"]} |
58,496 | zasexton/tetgen | refs/heads/master | /tetgen/_version.py | """ tetgen version """
# major, minor, patch, -extra
version_info = 0, 2, 2
# Nice string for the version
__version__ = '.'.join(map(str, version_info))
| {"/tetgen/__init__.py": ["/tetgen/pytetgen.py"], "/tetgen/pytetgen.py": ["/tetgen/__init__.py"], "/tests/test_tetgen.py": ["/tetgen/__init__.py"]} |
58,497 | zasexton/tetgen | refs/heads/master | /tetgen/pytetgen.py | """
Python module to interface with wrapped TetGen C++ code
"""
import sys
import os
import logging
import ctypes
import numpy as np
import vtki
from tetgen import _tetgen
log = logging.getLogger(__name__)
log.setLevel('CRITICAL')
invalid_input = Exception('Invalid input. Must be either a vtki.PolyData\n' +
'object or vertex and face arrays')
class TetGen(object):
"""
Class to input, clean, and tetrahedralize surface meshes using TetGen
Parameters
----------
args : vtki.PolyData or (np.ndarray, np.ndarray)
Either a vtki surface mesh or a nx3 vertex array and nx3 face
array.
"""
_updated = None
def __init__(self, *args):
""" initializes MeshFix using a mesh """
if not args:
raise invalid_input
elif isinstance(args[0], vtki.PolyData):
mesh = args[0]
self.v = mesh.points
faces = mesh.faces
if faces.size % 4:
raise Exception('Invalid mesh. Must be an all triangular mesh.')
self.f = np.ascontiguousarray(faces.reshape(-1 , 4)[:, 1:])
elif isinstance(args[0], np.ndarray):
self._load_arrays(args[0], args[1])
else:
raise invalid_input
def _load_arrays(self, v, f):
"""
Loads triangular mesh from vertex and face arrays
Face arrays/lists are v and f. Both vertex and face arrays should be
2D arrays with each vertex containing XYZ data and each face containing
three points
"""
# Check inputs
if not isinstance(v, np.ndarray):
try:
v = np.asarray(v, np.float)
if v.ndim != 2 and v.shape[1] != 3:
raise Exception(
'Invalid vertex format. Shape should be (npoints, 3)')
except BaseException:
raise Exception(
'Unable to convert vertex input to valid numpy array')
if not isinstance(f, np.ndarray):
try:
f = np.asarray(f, ctypes.c_int)
if f.ndim != 2 and f.shape[1] != 3:
raise Exception(
'Invalid face format. Shape should be (nfaces, 3)')
except BaseException:
raise Exception(
'Unable to convert face input to valid numpy array')
# Store to self
self.v = v
self.f = f
def make_manifold(self, verbose=False):
"""
Reconstruct a manifold clean surface from input mesh. Updates
mesh in-place.
Requires pymeshfix
Parameters
----------
verbose : bool, optional
Controls output printing. Default False.
"""
if 'pymeshfix' not in sys.modules:
raise Exception('pymeshfix not installed. Please run: \n' +
'pip install pymeshfix')
# Run meshfix
import pymeshfix
meshfix = pymeshfix.meshfix(self.v, self.f)
meshfix.repair(verbose)
# overwrite this object with cleaned mesh
self.v = meshfix.v
self.f = meshfix.f
def plot(self, **kwargs):
"""
Displays input mesh
See help(vtki.Plot) for available arguments.
"""
self.mesh.plot(**kwargs)
@property
def mesh(self):
""" Return the surface mesh """
triangles = np.empty((self.f.shape[0], 4))
triangles[:, -3:] = self.f
triangles[:, 0] = 3
return vtki.PolyData(self.v, triangles, deep=False)
def tetrahedralize(self,
switches='',
plc=1,
psc=0,
refine=0,
quality=1,
nobisect=True,
coarsen=0,
metric=0,
weighted=0,
brio_hilbert=1,
incrflip=0,
flipinsert=0,
varvolume=0,
fixedvolume=0,
noexact=0,
nostaticfilter=0,
insertaddpoints=0,
regionattrib=0,
cdtrefine=0,
diagnose=0,
convex=0,
zeroindex=0,
facesout=0,
edgesout=0,
neighout=0,
voroout=0,
meditview=0,
vtkview=0,
nobound=0,
nonodewritten=1,
noelewritten=1,
nofacewritten=1,
noiterationnum=0,
nomergefacet=0,
nomergevertex=0,
nojettison=0,
docheck=0,
quiet=0,
verbose=0,
vertexperblock=4092,
tetrahedraperblock=8188,
shellfaceperblock=4092,
nobisect_nomerge=1,
supsteiner_level=2,
addsteiner_algo=1,
coarsen_param=0,
weighted_param=0,
fliplinklevel=-1,
flipstarsize=-1,
fliplinklevelinc=1,
reflevel=3,
optscheme=7,
optlevel=2,
delmaxfliplevel=1,
order=2,
reversetetori=0,
steinerleft=10000,
no_sort=0,
hilbert_order=52,
hilbert_limit=8,
brio_threshold=64,
brio_ratio=0.125,
facet_separate_ang_tol=179.9,
facet_overlap_ang_tol=0.001,
facet_small_ang_tol=15.0,
maxvolume=-1.0,
minratio=2.0,
mindihedral=0.0,
optmaxdihedral=165.0,
optminsmtdihed=179.0,
optminslidihed=179.0,
epsilon=1.0e-8,
coarsen_percent=1.0):
"""
Generates tetrahedrals interior to the surface mesh described by the
vertex and face arrays already loaded. Returns nodes and elements
belonging to the all tetrahedral mesh.
The tetrehedral generator uses the C++ library TetGen and can be
configured by either using a string of 'switches' or by changing the
underlying behavior using optional inputs.
Should the user desire more control over the mesh tetrahedralization or
wish to control the tetrahedralization in a more pythonic manner, use
the optional inputs rather than inputting switches.
Parameters
----------
switches : string, optional
String containing the same switches as in the C++ standalone
implementation:
-p Tetrahedralizes a piecewise linear complex (PLC).
-Y Preserves the input surface mesh (does not modify it).
-q Refines mesh (to improve mesh quality).
-R Mesh coarsening (to reduce the mesh elements).
-A Assigns attributes to tetrahedra in different regions.
-a Applies a maximum tetrahedron volume constraint.
-m Applies a mesh sizing function.
-O Specifies the level of mesh optimization.
-S Specifies maximum number of added points.
-T Sets a tolerance for coplanar test (default 1E-8)
-X Suppresses use of exact arithmetic.
-M No merge of coplanar facets or very close vertices.
-w Generates weighted Delaunay (regular) triangulation.
-c Retains the convex hull of the PLC.
-d Detects self-intersections of facets of the PLC.
-z Numbers all output items starting from zero.
-f Outputs all faces to .face file.
-e Outputs all edges to .edge file.
-n Outputs tetrahedra neighbors to .neigh file.
-v Outputs Voronoi diagram to files.
-g Outputs mesh to .mesh file for viewing by Medit.
-k Outputs mesh to .vtk file for viewing by Paraview.
-J No jettison of unused vertices from output .node file.
-B Suppresses output of boundary information.
-N Suppresses output of .node file.
-E Suppresses output of .ele file.
-F Suppresses output of .face and .edge file.
-I Suppresses mesh iteration numbers.
-C Checks the consistency of the final mesh.
-Q Quiet: No terminal output except errors.
-V Verbose: Detailed information, more terminal output.
-h Help: A brief instruction for using TetGen.
plc : bool, optional
Enables/disables tetrahedral generation. Default True.
facet_overlap_ang_tol : double, optional
Threshold angle at which TetGen will consider to faces overlapping.
Raising this will require a higher quality mesh input and may cause
tetrahedralize to fail. Default 0.001.
quality : bool, optional
Enables/disables mesh improvement. Enabled by default. Disable
this to speed up mesh generation while sacrificing quality.
Default True.
minratio : double, optional.
Maximum allowable radius-edge ratio. Must be greater than 1.0
the closer to 1.0, the higher the quality of the mesh. Be sure
to raise steinerleft to allow for the addition of points to improve
the quality of the mesh. Avoid overly restrictive requirements,
otherwise, meshing will appear to hang. Default 2.0
Testing has showed that 1.1 is a reasonable input for a high quality
mesh.
mindihedral : double, optional
Minimum allowable dihedral angle. The larger this number, the
higher the quality of the resulting mesh. Be sure to raise
steinerleft to allow for the addition of points to improve
the quality of the mesh. Avoid overly restrictive requirements,
otherwise, meshing will appear to hang. Default 0.0
Testing has shown that 10 is a reasonable input
verbose : int, optional
Controls the underlying TetGen library to output text to console.
Users using iPython will not see this output. Setting to 1 enables
some information about the mesh generation while setting verbose to
2 enables more debug output. Default 0, or no output.
nobisect : bool, optional
Controls if Steiner points are added to the input surface
mesh. When enabled, the surface mesh will be modified. Default False.
Testing has shown that if your input surface mesh is already well
shaped, disabling this setting will improve meshing speed and
mesh quality.
steinerleft : int, optional
Steiner points are points added to the original surface mesh to
create a valid tetrahedral mesh. Settings this to -1 will allow
tetgen to create an unlimited number of steiner points, but the
program will likely hang if this is used in combination with narrow
quality requirements. Default 100000.
The first type of Steiner points are used in creating an initial
tetrahedralization of PLC. These Steiner points are mandatory in
order to create a valid tetrahedralization
The second type of Steiner points are used in creating quality tetra-
hedral meshes of PLCs. These Steiner points are optional, while they
may be necessary in order to improve the mesh quality or to conform
the size of mesh elements.
double : optmaxdihedral, optional
Setting unreachable using switches. Controls the optimial maximum
dihedral. Settings closer, but not exceeding, 180 degrees results
in a lower quality mesh. Should be between 135 and 180 degrees.
Default 165.0
order : int optional
Controls whether TetGen creates linear tetrahedrals or quadradic
tetrahedrals. Set order to 2 to output quadradic tetrahedrals.
Default 2.
Examples
--------
>>> node, elem = Tetrahedralize(switches='pq1.1/10Y')
>>> node, elem = Tetrahedralize(plc=1, nobisect=True, quality=True,
minratio=1.1, mindihedral=10)
Notes
-----
There are many other options and the TetGen documentation contains
descritpions only for the switches of the original C++ program. This
is the relationship between tetgen switches and python optinal inputs:
PYTHON OPTION TETGEN SWITCH
int plc; // -p
int psc; // -s
int refine; // -r
int quality; // -q
int nobisect; // -Y
int coarsen; // -R
int weighted; // -w
int brio_hilbert; // -b
int incrflip; // -l
int flipinsert; // -L
int metric; // -m
int varvolume; // -a
int fixedvolume; // -a
int regionattrib; // -A
int cdtrefine; // -D
int insertaddpoints; // -i
int diagnose; // -d
int convex; // -c
int nomergefacet; // -M
int nomergevertex; // -M
int noexact; // -X
int nostaticfilter; // -X
int zeroindex; // -z
int facesout; // -f
int edgesout; // -e
int neighout; // -n
int voroout; // -v
int meditview; // -g
int vtkview; // -k
int nobound; // -B
int nonodewritten; // -N
int noelewritten; // -E
int nofacewritten; // -F
int noiterationnum; // -I
int nojettison; // -J
int docheck; // -C
int quiet; // -Q
int verbose; // -V
PYTHON OPTION TETGEN SWITCH
int vertexperblock; // '-x', 4092.
int tetrahedraperblock; // '-x', 8188.
int shellfaceperblock; // '-x', 2044.
int nobisect_nomerge; // '-Y', 1.
int supsteiner_level; // '-Y/', 2.
int addsteiner_algo; // '-Y//', 1.
int coarsen_param; // '-R', 0.
int weighted_param; // '-w', 0.
int fliplinklevel; // -1.
int flipstarsize; // -1.
int fliplinklevelinc; // 1.
int reflevel; // '-D', 3.
int optlevel; // '-O', 2.
int optscheme; // '-O', 7.
int delmaxfliplevel; // 1.
int order; // '-o', 1.
int reversetetori; // '-o/', 0.
int steinerleft; // '-S', 0.
int no_sort; // 0.
int hilbert_order; // '-b///', 52.
int hilbert_limit; // '-b//' 8.
int brio_threshold; // '-b' 64.
REAL brio_ratio; // '-b/' 0.125.
REAL facet_separate_ang_tol; // '-p', 179.9.
REAL facet_overlap_ang_tol; // '-p/', 0.1.
REAL facet_small_ang_tol; // '-p//', 15.0.
REAL maxvolume; // '-a', -1.0.
REAL minratio; // '-q', 0.0.
REAL mindihedral; // '-q', 5.0.
REAL optmaxdihedral; // 165.0.
REAL optminsmtdihed; // 179.0.
REAL optminslidihed; // 179.0.
REAL epsilon; // '-T', 1.0e-8.
REAL coarsen_percent; // -R1/#, 1.0.
"""
# python 2/3 compatability
if not isinstance(switches, bytes):
switches = switches.encode()
# check verbose switch
if verbose == 0:
quiet = 1
# Call libary
try:
self.node, self.elem = _tetgen.Tetrahedralize(self.v,
self.f,
switches,
plc,
psc,
refine,
quality,
nobisect,
coarsen,
metric,
weighted,
brio_hilbert,
incrflip,
flipinsert,
varvolume,
fixedvolume,
noexact,
nostaticfilter,
insertaddpoints,
regionattrib,
cdtrefine,
diagnose,
convex,
zeroindex,
facesout,
edgesout,
neighout,
voroout,
meditview,
vtkview,
nobound,
nonodewritten,
noelewritten,
nofacewritten,
noiterationnum,
nomergefacet,
nomergevertex,
nojettison,
docheck,
quiet,
verbose,
vertexperblock,
tetrahedraperblock,
shellfaceperblock,
nobisect_nomerge,
supsteiner_level,
addsteiner_algo,
coarsen_param,
weighted_param,
fliplinklevel,
flipstarsize,
fliplinklevelinc,
reflevel,
optscheme,
optlevel,
delmaxfliplevel,
order,
reversetetori,
steinerleft,
no_sort,
hilbert_order,
hilbert_limit,
brio_threshold,
brio_ratio,
facet_separate_ang_tol,
facet_overlap_ang_tol,
facet_small_ang_tol,
maxvolume,
minratio,
mindihedral,
optmaxdihedral,
optminsmtdihed,
optminslidihed,
epsilon,
coarsen_percent)
except RuntimeError:
raise Exception('Failed to tetrahedralize.\n' +
'May need to repair surface by making it manifold')
# check if a mesh was generated
if not np.any(self.node):
raise Exception('Failed to tetrahedralize.\n' +
'May need to repair surface by making it manifold')
# Return nodes and elements
info = (self.node.shape[0], self.elem.shape[0])
log.info('Generated mesh with %d nodes and %d elements' % info)
self._updated = True
return self.node, self.elem
@property
def grid(self):
""" Returns a vtkInterface unstructured grid """
if not hasattr(self, 'node'):
raise Exception('Run Tetrahedralize first')
if hasattr(self, '_grid') and not self._updated:
return self._grid
buf = np.empty((self.elem.shape[0], 1), np.int64)
cell_type = np.empty(self.elem.shape[0], dtype='uint8')
if self.elem.shape[1] == 4: # linear
buf[:] = 4
cell_type[:] = 10
elif self.elem.shape[1] == 10: # quadradic
buf[:] = 10
cell_type[:] = 24
else:
raise Exception('Invalid element array shape %s' % str(self.elem.shape))
offset = np.cumsum(buf + 1) - (buf[0] + 1)
cells = np.hstack((buf, self.elem))
self._grid = vtki.UnstructuredGrid(offset, cells, cell_type, self.node)
self._updated = False
return self._grid
def write(self, filename, binary=False):
"""
Writes an unstructured grid to disk.
Parameters
----------
filename : str
Filename of grid to be written. The file extension will select the
type of writer to use.
- ".vtk" will use the vtk legacy writer, while
- ".vtu" will select the VTK XML writer.
- ".cdb" will write an ANSYS APDL archive file.
binary : bool, optional
Writes as a binary file by default. Set to False to write ASCII.
Ignored when output is a cdb.
Notes
-----
Binary files write much faster than ASCII, but binary files written on
one system may not be readable on other systems. Binary can be used
only with the legacy writer.
"""
self.grid.write(filename, binary)
| {"/tetgen/__init__.py": ["/tetgen/pytetgen.py"], "/tetgen/pytetgen.py": ["/tetgen/__init__.py"], "/tests/test_tetgen.py": ["/tetgen/__init__.py"]} |
58,498 | zasexton/tetgen | refs/heads/master | /tests/test_tetgen.py | import vtki
import tetgen
import numpy as np
def test_load_arrays():
sphere = vtki.Sphere()
v = sphere.points
f = sphere.faces.reshape(-1, 4)[:, 1:]
tet = tetgen.TetGen(v, f)
def test_vtk_tetrahedralize():
sphere = vtki.Sphere(theta_resolution=10, phi_resolution=10)
tet = tetgen.TetGen(sphere)
tet.tetrahedralize(order=1, mindihedral=20, minratio=1.5)
grid = tet.grid
assert grid.n_cells
assert grid.n_points
def functional_tet_example():
sphere = vtki.Sphere(theta_resolution=10, phi_resolution=10)
tet = tetgen.TetGen(sphere)
tet.tetrahedralize(order=1, mindihedral=20, minratio=1.5)
grid = tet.grid
assert grid.n_cells
assert grid.n_points
cells = grid.cells.reshape(-1, 5)[:, 1:]
cell_center = grid.points[cells].mean(1)
# extract cells below the 0 xy plane
mask = cell_center[:, 2] < 0
cell_ind = mask.nonzero()[0]
subgrid = grid.extract_cells(cell_ind)
# plot this
subgrid.plot(scalars=subgrid.quality, stitle='quality', cmap='bwr',
flip_scalars=True)
# advanced plotting
plotter = vtki.Plotter()
plotter.set_background('w')
plotter.add_mesh(subgrid, 'lightgrey', lighting=True)
plotter.add_mesh(grid, 'r', 'wireframe')
plotter.add_legend([[' Input Mesh ', 'r'],
[' Tesselated Mesh ', 'black']])
plotter.show()
plotter = vtki.Plotter()
plotter.set_background('w')
plotter.add_mesh(grid, 'r', 'wireframe')
plotter.plot(auto_close=False, interactive_update=True)
for i in range(500):
single_cell = grid.extract_cells([i])
plotter.add_mesh(single_cell)
plotter.update()
plotter.close()
| {"/tetgen/__init__.py": ["/tetgen/pytetgen.py"], "/tetgen/pytetgen.py": ["/tetgen/__init__.py"], "/tests/test_tetgen.py": ["/tetgen/__init__.py"]} |
58,499 | zasexton/tetgen | refs/heads/master | /setup.py | import os
import sys
import numpy
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext as _build_ext
from io import open as io_open
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
# Version from file
__version__ = None
version_file = os.path.join(os.path.dirname(__file__), 'tetgen', '_version.py')
with io_open(version_file, mode='r') as fd:
exec(fd.read())
# for: the cc1plus: warning: command line option '-Wstrict-prototypes'
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
# prevent numpy from thinking it is still in its setup process:
try:
del builtins.__NUMPY_SETUP__
except AttributeError:
pass
import numpy
self.include_dirs.append(numpy.get_include())
def build_extensions(self):
try:
self.compiler.compiler_so.remove("-Wstrict-prototypes")
except (AttributeError, ValueError):
pass
_build_ext.build_extensions(self)
# compiler args
macros = []
if os.name == 'nt': # windows
extra_compile_args = ['/openmp', '/O2', '/w', '/GS']
elif os.name == 'posix': # linux org mac os
extra_compile_args = ['-std=gnu++11', '-O3', '-w']
else:
raise Exception('Unsupported OS %s' % os.name)
setup(
name='tetgen',
packages = ['tetgen'],
version=__version__,
description='Python interface to tetgen',
long_description=open('README.rst').read(),
author='Alex Kaszynski',
author_email='akascap@gmail.com',
url = 'https://github.com/akaszynski/tetgen',
license='MIT',
classifiers=[
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Development Status :: 4 - Beta',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
# Build cython modules
cmdclass={'build_ext': build_ext},
ext_modules = [Extension("tetgen._tetgen",
['tetgen/cython/tetgen/_tetgen.pyx',
'tetgen/cython/tetgen/tetgen.cxx',
'tetgen/cython/tetgen/predicates.cxx',
'tetgen/cython/tetgen/tetgen_wrap.cxx'],
language='c++',
extra_compile_args=extra_compile_args,
define_macros=[('TETLIBRARY', None)]),
],
keywords='TetGen',
install_requires=['numpy>1.9.3',
'vtki>=0.16.1']
)
| {"/tetgen/__init__.py": ["/tetgen/pytetgen.py"], "/tetgen/pytetgen.py": ["/tetgen/__init__.py"], "/tests/test_tetgen.py": ["/tetgen/__init__.py"]} |
58,500 | lucas-deschamps/Django-Lista-de-Tarefas | refs/heads/master | /App/todolist/models.py | from django.db import models
class Lista(models.Model):
item = models.CharField(max_length=200)
completo = models.BooleanField(default=False)
def __str__(self):
return self.item | {"/App/todolist/forms.py": ["/App/todolist/models.py"], "/App/todolist/admin.py": ["/App/todolist/models.py"]} |
58,501 | lucas-deschamps/Django-Lista-de-Tarefas | refs/heads/master | /App/todolist/urls.py | from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name='home'),
path('sobre/', views.about, name='about'),
path('deletar/<list_id>', views.delete, name='delete'),
path('riscar/<list_id>', views.cross_off, name='cross_off'),
path('desfazer/<list_id>', views.uncross, name='uncross'),
path('editar/<list_id>', views.edit, name='edit'),
]
| {"/App/todolist/forms.py": ["/App/todolist/models.py"], "/App/todolist/admin.py": ["/App/todolist/models.py"]} |
58,502 | lucas-deschamps/Django-Lista-de-Tarefas | refs/heads/master | /App/todolist/forms.py | from django import forms
from .models import Lista
class Formulario(forms.ModelForm):
class Meta:
model = Lista
fields = ["item", "completo"] | {"/App/todolist/forms.py": ["/App/todolist/models.py"], "/App/todolist/admin.py": ["/App/todolist/models.py"]} |
58,503 | lucas-deschamps/Django-Lista-de-Tarefas | refs/heads/master | /App/todolist/admin.py | from django.contrib import admin
from .models import Lista
admin.site.register(Lista)
| {"/App/todolist/forms.py": ["/App/todolist/models.py"], "/App/todolist/admin.py": ["/App/todolist/models.py"]} |
58,515 | butyuhao/Rake_For_Chinese | refs/heads/master | /src/Rake.py | import operator
from typing import List, Tuple, Optional
import os
import jieba
import jieba.posseg as pseg
from .word import Word
from .utils import notNumStr
class Rake:
def __init__(self): # , stopwordPath: str = None, delimWordPath: str = None):
# If both Found and Initialized
self.initialized = False
self.stopWordList = list()
self.delimWordList = list()
def initializeFromPath(self, stopwordPath: str = "", delimWordPath: str = ""):
if not os.path.exists(stopwordPath):
print("Stop Word Path invalid")
return
if not os.path.exists(delimWordPath):
print("Delim Word Path Invalid")
return
swLibList = [line.rstrip('\n') for line in open(stopwordPath,'r')]
conjLibList = [line.rstrip('\n') for line in open("data/stoplist/中文分隔词词库.txt",'r')]
self.initializeFromList(swLibList, conjLibList)
return
def initializeFromList(self, swList : List = None, dwList : List = None):
self.stopWordList = swList
self.delimWordList = dwList
if len(self.stopWordList) == 0 or len(self.delimWordList) == 0:
print("Empty Stop word list or deliminator word list, uninitialized")
return
else:
self.initialized = True
def extractKeywordFromPath(self, text : str, num_kw : int = 10):
if not self.initialized:
print("Not initialized")
return
with open(text,'r') as fp:
text = fp.read()
return self.extractKeywordFromString(text, num_kw = num_kw)
def extractKeywordFromString(self, text : str, num_kw : int = 10):
rawtextList = pseg.cut(text)
# Construct List of Phrases and Preliminary textList
textList = []
listofSingleWord = dict()
lastWord = ''
poSPrty = ['m','x','uj','ul','mq','u','v','f']
meaningfulCount = 0
checklist = []
for eachWord, flag in rawtextList:
checklist.append([eachWord,flag])
if eachWord in self.delimWordList or not notNumStr(eachWord) or eachWord in self.stopWordList or flag in poSPrty or eachWord == '\n':
if lastWord != '|':
textList.append("|")
lastWord = "|"
elif eachWord not in self.stopWordList and eachWord != '\n':
textList.append(eachWord)
meaningfulCount += 1
if eachWord not in listofSingleWord:
listofSingleWord[eachWord] = Word(eachWord)
lastWord = ''
# Construct List of list that has phrases as wrds
newList = []
tempList = []
for everyWord in textList:
if everyWord != '|':
tempList.append(everyWord)
else:
newList.append(tempList)
tempList = []
tempStr = ''
for everyWord in textList:
if everyWord != '|':
tempStr += everyWord + '|'
else:
if tempStr[:-1] not in listofSingleWord:
listofSingleWord[tempStr[:-1]] = Word(tempStr[:-1])
tempStr = ''
# Update the entire List
for everyPhrase in newList:
res = ''
for everyWord in everyPhrase:
listofSingleWord[everyWord].updateOccur(len(everyPhrase))
res += everyWord + '|'
phraseKey = res[:-1]
if phraseKey not in listofSingleWord:
listofSingleWord[phraseKey] = Word(phraseKey)
else:
listofSingleWord[phraseKey].updateFreq()
# Get score for entire Set
outputList = dict()
for everyPhrase in newList:
if len(everyPhrase) > 5:
continue
score = 0
phraseString = ''
outStr = ''
for everyWord in everyPhrase:
score += listofSingleWord[everyWord].returnScore()
phraseString += everyWord + '|'
outStr += everyWord
phraseKey = phraseString[:-1]
freq = listofSingleWord[phraseKey].getFreq()
if freq / meaningfulCount < 0.01 and freq < 3 :
continue
outputList[outStr] = score
sorted_list = sorted(outputList.items(), key = operator.itemgetter(1), reverse = True)
return sorted_list[:num_kw]
| {"/src/Rake.py": ["/src/word.py", "/src/utils.py"], "/example.py": ["/src/Rake.py"]} |
58,516 | butyuhao/Rake_For_Chinese | refs/heads/master | /example.py | from src.Rake import Rake
obj = Rake()
stop_path = "data/stoplist/中文停用词表(1208个).txt"
conj_path = "data/stoplist/中文分隔词词库.txt"
obj.initializeFromPath(stop_path, conj_path)
path = 'data/testCase/文本1.txt'
result = obj.extractKeywordFromPath(path)
print(result) | {"/src/Rake.py": ["/src/word.py", "/src/utils.py"], "/example.py": ["/src/Rake.py"]} |
58,517 | butyuhao/Rake_For_Chinese | refs/heads/master | /src/word.py |
# Data structure for holding data
class Word():
def __init__(self, char, freq = 0, deg = 0):
self.freq = freq
self.deg = deg
self.char = char
def returnScore(self):
return self.deg/self.freq
def updateOccur(self, phraseLength):
self.freq += 1
self.deg += phraseLength
def getChar(self):
return self.char
def updateFreq(self):
self.freq += 1
def getFreq(self):
return self.freq | {"/src/Rake.py": ["/src/word.py", "/src/utils.py"], "/example.py": ["/src/Rake.py"]} |
58,518 | butyuhao/Rake_For_Chinese | refs/heads/master | /src/utils.py | import json
# Check if contains num
def notNumStr(instr):
for item in instr:
if '\u0041' <= item <= '\u005a' or ('\u0061' <= item <='\u007a') or item.isdigit():
return False
return True
# Read Target Case if Json
def readSingleTestCases(testFile):
with open(testFile) as json_data:
try:
testData = json.load(json_data)
except:
# This try block deals with incorrect json format that has ' instead of "
data = json_data.read().replace("'",'"')
try:
testData = json.loads(data)
# This try block deals with empty transcript file
except:
return ""
returnString = ""
for item in testData:
try:
returnString += item['text']
except:
returnString += item['statement']
return returnString
| {"/src/Rake.py": ["/src/word.py", "/src/utils.py"], "/example.py": ["/src/Rake.py"]} |
58,519 | KevinHuang8/COVID19-prediction | refs/heads/master | /models/utils/clustering.py | import pickle
import numpy as np
from collections import deque
from ..utils import dataloader as loader
def bfs(start, visited, n, adjacency_list):
cluster = []
Q = deque()
Q.append(start)
this_visited = set()
this_visited.add(start)
while Q:
curr = Q.popleft()
cluster.append(curr)
visited.add(curr)
if len(cluster) >= n:
break
for adj in adjacency_list.get(curr, []):
if adj in this_visited or adj in visited:
continue
Q.append(adj)
this_visited.add(adj)
return cluster
def fips_to_index(fips, info=None):
if info is None:
info = loader.load_info_raw(fips_info=True)
try:
return info[info['FIPS'] == fips].index[0]
except IndexError:
return None
def to_indices(cluster, info=None):
indices = []
for fips in cluster:
i = fips_to_index(fips, info)
if i is not None:
indices.append(i)
return indices
def cluster_counties(k=4, min_size=4, return_indices=True, save_file=True):
'''
k clusters per state
min_size - min cluster size
'''
info = loader.load_info_raw(fips_info=True)
data = loader.load_info_raw()
deaths = data.iloc[:, [1,-1]]
adjacency_list = loader.load_instate_adjacency_list()
all_states = info['State'].unique()
states_to_fips = {}
for state in all_states:
fips = info[info['State'] == state]['FIPS'].to_list()
states_to_fips[state] = fips
for node in adjacency_list:
adj = adjacency_list[node]
try:
adj = sorted(adj, key=lambda fips: deaths[
deaths['FIPS'] == fips].iloc[:, -1].to_list()[0], reverse=True)
except IndexError:
pass
adjacency_list[node] = adj
clusters = []
cluster_id = {}
count = 0
for state in states_to_fips:
counties = states_to_fips[state]
sorted_counties = sorted(counties, key=lambda fips: deaths[
deaths['FIPS'] == fips].iloc[:, -1].to_list()[0], reverse=True)
s = len(counties) // k
size = max(s, min_size)
visited = set()
for county in sorted_counties:
remaining = len(counties) - len(visited)
if 2*size > remaining:
size = remaining
if county not in visited:
cluster = bfs(county, visited, size, adjacency_list)
clusters.append(cluster)
for c in cluster:
cluster_id[c] = count
count += 1
if return_indices:
info = loader.load_info_raw(fips_info=True)
clusters = list(map(lambda clu: to_indices(clu, info), clusters))
new_cluster_id = {}
for fips in cluster_id:
i = cluster_id[fips]
new_cluster_id[fips_to_index(fips, info)] = i
cluster_id = new_cluster_id
if save_file:
loader.save_to_otherdata(clusters, 'clusters.dat')
loader.save_to_otherdata(cluster_id, 'cluster_id.dat')
return clusters, cluster_id | {"/models/gaussianprocess/gp_pipeline.py": ["/models/gaussianprocess/gp_model.py", "/models/utils/clustering.py"]} |
58,520 | KevinHuang8/COVID19-prediction | refs/heads/master | /models/curvefit/curvefit_models.py | import numpy as np
import pymc3 as pm
import scipy.stats as stats
from scipy.optimize import curve_fit
class CurvefitModel:
'''
Base model class for least squares curve fitting.
'''
def __init__(self, func, bounds):
'''
func - the function to fit
bounds - bounds for the parameters, in the format
to pass into scipy.optimize.curve_fit
'''
self.func = func
self.bounds = bounds
# Degenerate flag signifies not enough data
# Degenerate models should predict all zeros
self.degenerate = False
def fit(self, X_train, y_train):
'''
X_train, y_train - 1D np arrays
Estimate parameters of the function given data.
sef.popt will contain the parameter estimates that minimize
mean squared error
self.pcov will contain the estimated parameter covariances
'''
if not y_train.any():
self.degenerate = True
return
try:
self.popt, self.pcov = curve_fit(self.func, X_train, y_train,
bounds=self.bounds)
except RuntimeError:
self.degenerate = True
return
def predict(self, x, params=None):
'''
x - 1D np array
Evaluates the function to x.
If params is not None, then params are used as the parameters
of the function, instead of the fitted parameters.
'''
if self.degenerate:
return np.zeros((x.shape[0],))
if params is None:
return self.func(x, *self.popt)
else:
return self.func(x, *params)
def predict_quantiles(self, x, quantiles, samples=100):
'''
x - 1D np array. Gives the locations to predict at
quantiles - percentiles to compute
samples - number of samples to take to estimate the quantiles
Returns a 2D np array where axis 0 represents each x value
and axis 1 represents each predicted quantile.
We estimate quantiles by taking the error in the
parameter estimates and creating a parameter distribution.
We then sample the distribution, and take percentiles
'''
if self.degenerate:
return np.array([np.zeros(x.shape[0])
for i in range(len(quantiles))]).T
errors = np.sqrt(np.diag(self.pcov))
# High errors for pre-peak/mid-peak counties
# Make sure the uncertainty does not exceed
# threshold * actual value
count = 0
threshold = 2
while np.any(errors > threshold * self.popt):
errors = errors / 2
count += 1
if count > 50:
errors = np.zeros(self.popt.shape[0])
break
all_samples = []
for i in range(samples):
sample_params = np.random.normal(loc=self.popt, scale=errors)
for i, param in enumerate(sample_params):
lower_bound = self.bounds[0][i]
upper_bound = self.bounds[1][i]
if param < lower_bound:
sample_params[i] = lower_bound
elif param > upper_bound:
sample_params[i] = upper_bound
y = self.predict(x, sample_params)
if (np.any(np.isnan(y))):
continue
all_samples.append(y)
all_samples = np.array(all_samples)
quantile_predictions = np.array([np.percentile(all_samples, p, axis=0)
for p in quantiles])
quantile_predictions = quantile_predictions.T
# Remove spurrious predictions
quantile_predictions[quantile_predictions < 0] = 0
return quantile_predictions
class ExpNormModel(CurvefitModel):
'''
A model that uses an exponentially modified Gaussian curve.
There are two modes: to use the exponentially modified Gaussian curve
directly (meant for daily death data), or to use the cumulative
exponentially modified Gaussian curve (meant for cumulative death data).
In the latter case, results are always converted to the non-cumulative
version.
'''
def exp_model(x, max_val, loc, scale, K):
return max_val*stats.exponnorm.pdf(x, K, loc, scale)
def exp_model_cdf(x, max_val, loc, scale, K):
return max_val*stats.exponnorm.cdf(x, K, loc, scale)
def __init__(self, data_max):
func = ExpNormModel.exp_model
# bounds: max, loc, scale, K
# max value is between current max and 100 times the current max
# K is empirically capped at 10 as that has been found to give good
# results. This is a hyperparameter, but from empirical results needs
# to be upper bounded by a small number (though not too small).
bounds = ([data_max, 0, 0, 0],
[100*data_max, np.inf, np.inf, 10])
# This flag is True when the cumulative curve should be used
self.is_cumulative = False
super().__init__(func, bounds)
def set_cumulative(self, use_cumulative):
'''
use_cumulative - a boolean
Changes the mode of the model into cumulative vs. non-cumulative mode.
'''
if use_cumulative:
self.is_cumulative = True
self.func = ExpNormModel.exp_model_cdf
else:
self.is_cumulative = False
self.func = ExpNormModel.exp_model
def predict(self, x, params=None):
p = super().predict(x, params)
# Cumulative results are always converted to the non-cumulative version
# As a result, compared to the non-cumulative version, one extra data
# point should be included at the end of x to align the two versions.
if self.is_cumulative:
p = np.diff(p)
return p
class GPModel(ExpNormModel):
'''
This model uses a Gaussian process to refine the quantile estimates
from the exponentially modified gaussian curve fit. It still uses the
same curve, but then simulates data points based on that curve fit and then
trains a GP on those simulated data points.
'''
def fit(self, X_train, y_train, unsmoothed_y_train, **gp_params):
'''
unsmoothed_y_train - 1D array of the ground truth data points without
any smoothing
gp_params - to be passed into the GP, can contain (all optional)
- horizon: how many steps to predict in the future
- draw/tune: parameters for GP marginal likelihood sampling
- samples: number of samples to take when estimating quantiles
- trials: number of GP runs to average over
'''
super().fit(X_train, y_train)
if self.degenerate:
return
y = self.predict(X_train)
residuals = unsmoothed_y_train - y
# We average results across 'trials' runs.
try:
trials = gp_params['trials']
except KeyError:
trials = 5
outputs = []
for i in range(trials):
output = self.fit_gp(X_train, residuals, **gp_params)
outputs.append(output)
outputs = np.array(outputs)
self.quantiles = np.mean(outputs, axis=0)
def fit_gp(self, X_train, residuals, **gp_params):
'''
We use the residuals to create a distribution of the noise of the
exponentially modified Guassian prediction. We then hallucinate
data points based on our curve fit prediction with added noise, then
fit a GP.
'''
s = np.std(residuals)
m = np.mean(residuals)
try:
horizon = gp_params['horizon']
except KeyError:
horizon = 30
size = 100
if self.is_cumulative:
noise = np.random.normal(loc=m, scale=s, size=size - 1)
else:
noise = np.random.normal(loc=m, scale=s, size=size)
X_train2 = np.linspace(X_train[0], X_train[-1] + horizon, size)
y_train2 = self.predict(X_train2) + noise
if self.is_cumulative:
X_train2 = X_train2[:-1]
try:
draw = gp_params['draw']
except KeyError:
draw = 500
try:
tune = gp_params['tune']
except KeyError:
tune = 500
try:
samples = gp_params['samples']
except KeyError:
samples = 100
with pm.Model() as gp_model:
ρ = pm.HalfCauchy('ρ', 5)
η = pm.HalfCauchy('η', 5)
M = ExponentialGaussianMean(*self.popt)
K = (η**2) * pm.gp.cov.ExpQuad(1, ρ)
σ = pm.HalfNormal('σ', 50)
expnorm_gp = pm.gp.Marginal(mean_func=M, cov_func=K)
expnorm_gp.marginal_likelihood('expnorm', X=X_train2.reshape(-1,1),
y=y_train2, noise=σ)
with gp_model:
expnorm_gp_trace = pm.sample(draw, tune=tune, cores=1,
random_seed=42)
self.X_pred = np.arange(0, np.max(X_train2) + 30)
with gp_model:
expnorm_deaths_pred = expnorm_gp.conditional('expnorm_pred',
self.X_pred.reshape(-1, 1), pred_noise=True)
gp_samples = pm.sample_posterior_predictive(expnorm_gp_trace,
vars=[expnorm_deaths_pred], samples=samples, random_seed=42)
percentiles = [p for p in range(10, 100, 10)]
quantile_gp = [np.percentile(gp_samples['expnorm_pred'], q,
axis=0) for q in percentiles]
quantile_gp = np.array(quantile_gp)
return quantile_gp
def predict_quantiles(self, x, quantiles, *args):
if self.degenerate:
return np.array([np.zeros(x.shape[0])
for i in range(len(quantiles))]).T
quantile_predictions = []
for q in range(len(quantiles)):
y = self.quantiles[q][x]
quantile_predictions.append(y)
quantile_predictions = np.array(quantile_predictions)
quantile_predictions = quantile_predictions.T
quantile_predictions[quantile_predictions < 0] = 0
return quantile_predictions
class ExponentialGaussianMean(pm.gp.mean.Mean):
'''
We use an exponentially modified Gaussian as the mean function of the GP.
'''
def __init__(self, max_val, loc, scale, K):
pm.gp.mean.Mean.__init__(self)
self.max_val = max_val
self.loc = loc
self.scale = scale
self.K = K
def __call__(self, X):
return self.max_val*stats.exponnorm.pdf(X[0], self.K, self.loc,
self.scale) | {"/models/gaussianprocess/gp_pipeline.py": ["/models/gaussianprocess/gp_model.py", "/models/utils/clustering.py"]} |
58,521 | KevinHuang8/COVID19-prediction | refs/heads/master | /models/LSTM/lstm_data.py | from ..utils import dataloader as loader
import numpy as np
class TimeFeature:
def __init__(self, name, series, *kwargs):
self.series = series
self.name = name
if 'norm' in kwargs:
self.norm = kwargs['norm']
else:
self.norm = False
if 'diff_order' in kwargs:
self.diff_order = kwargs['diff_order']
self.initial_cond = []
else:
self.diff_order = 0
self.initial_cond = []
if 'target' in kwargs:
self.target = kwargs['target']
else:
self.target = True
def __repr__(self):
return f'[ name: {self.name}, norm: {self.norm}, ' + \
f'diff: {self.diff_order}, target: {self.target} ]'
class TimeIndependentFeature:
def __init__(self, name, series):
self.name = name
self.series = series
def __getitem__(self, index):
return self.series[index]
def __repr__(self):
return f'Feature: {self.name}'
class Data:
def __init__(self, data_format, loader_args={}):
self.datadict = loader.load_covid_timeseries(**loader_args)
self.demographics = loader.load_demographics_data()
self.data_format(data_format)
def data_format(self, config):
'''
time_features
- name
- norm
- diff_order
- target
'''
if 'time_features' not in config:
raise ValueError('Must include time features')
self.time_features = []
for feature in config['time_features']:
series = None
# Get time feature by name
if 'name' not in feature:
raise ValueError('Must specify name of time feature')
if feature['name'] in self.datadict.keys():
series = self.datadict[feature['name']]
#elif in 'mobility', etc.
else:
raise ValueError(f'{feature["name"]} is not a recognized ' +
'time feature name')
feature_obj = TimeFeature(feature['name'], series)
if 'target' in feature:
feature_obj.target = feature['target']
# Normalize by population?
if 'norm' in feature and feature['norm']:
series = self.norm(series, feature['norm'])
feature_obj.norm = feature['norm']
# Difference
if 'diff_order' in feature:
diff_order = feature['diff_order']
differenced, initial_cond = self.difference(series, diff_order)
feature_obj.series = differenced
feature_obj.diff_order = diff_order
feature_obj.initial_cond = initial_cond
self.time_features.append(feature_obj)
self.n_counties = self.time_features[0].series.shape[0]
for feature in self.time_features:
if feature.series.shape[0] != self.n_counties:
raise ValueError('Every time feature must have the same number' +
' of counties')
self.targets = [feature for feature in self.time_features if
feature.target]
self.n_targets = len(self.targets)
self.time_independent_features = []
if 'time_independent_features' in config:
for feature_name in config['time_independent_features']:
if feature_name in self.demographics.columns:
self.time_independent_features.append(
TimeIndependentFeature(feature_name,
self.demographics[feature_name]))
else:
raise ValueError(f'{feautre_name} is not a recognized ' +
'time independent feature name')
if 'time_context' in config:
self.time_context = config['time_context']
else:
self.time_context = False
self.n_time_features = len(self.time_features)
self.n_features = len(self.time_features) + len(
self.time_independent_features) + int(self.time_context)
self.tsteps = self.series_rep().shape[1]
def norm(self, series, val):
pop = np.expand_dims(self.demographics['total_pop'].values, axis=1)
series = series / pop * val
return series
def unnorm(self, series, val):
pop = np.expand_dims(self.demographics['total_pop'].values, axis=1)
series = series * pop / val
return series
def difference(self, series, order):
if order >= series.shape[1]:
raise ValueError('cannot difference that many times')
initial_conditions = []
for i in range(order):
d0 = np.expand_dims(series[:, 0], axis=1)
initial_conditions.append(d0)
series = np.diff(series)
return series, initial_conditions
def undifference(self, series, initial_conditions, order, axis=1):
if len(initial_conditions) != order:
print(len(initial_conditions), order)
raise ValueError('Invalid initial conditions')
for i in range(order):
d0 = initial_conditions[-1]
series = np.hstack([d0, series])
series = np.cumsum(series, axis=axis)
initial_conditions = initial_conditions[:-1]
return series
def series_rep(self):
return self.time_features[0].series
def get_training_data(self, lag=7, k=7, val_steps=9, dense=False):
'''
Assumes calibrated time series
lag - number of lag features to include
k - time steps to predict
val_steps - number of steps from the end to save for validation
'''
if dense:
k = 1
X_train = []
y_train = []
X_test = []
y_test = []
for county in range(self.n_counties):
# Skip all nan counties
if any([np.all(np.isnan(feature.series)) for feature
in self.time_features]):
continue
### Assumption: all time series have been calibrated together, and
### nans are at the end. (Result of calibration in dataloader)
f = self.series_rep()[county]
# index of first nan for that county
if np.all(~np.isnan(f)):
s = f.shape[0]
else:
s = np.argmax(np.isnan(f))
# t + lag is the prediction horizon (i.e. f[t + lag] is the first
# step to predict, with lag context from f[t])
for t in range(s):
if np.any(np.isnan(f[t:t+lag+k])) or t + lag + k > s:
break
x = np.vstack([feature.series[county, t:t + lag] for feature
in self.time_features])
x = np.vstack([x] + [np.full((lag, ), feature[county])
for feature in self.time_independent_features])
if self.time_context:
x = np.vstack([x, np.full((lag, ), t)])
x = x.T
if dense:
y = np.vstack([feature.series[county, t + 1:t + lag + 1]
for feature in self.targets])
else:
y = np.vstack([feature.series[county, t + lag:t + lag + k]
for feature in self.targets])
y = y.T
# t + lag + k <= s - v for training
if t <= s - val_steps - lag - k:
X_train.append(x)
y_train.append(y)
else:
X_test.append(x)
y_test.append(y)
X_train = np.array(X_train)
y_train = np.array(y_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
return X_train, y_train, X_test, y_test
def original(self):
undifferenced = []
for feature in self.time_features:
undiff = self.undifference(feature.series, feature.initial_cond,
feature.diff_order)
if feature.norm:
undiff = self.unnorm(undiff, feature.norm)
undifferenced.append(undiff)
return undifferenced
def original_with_predictions(self, prediction, t):
undifferenced = []
for i, feature in enumerate(self.time_features):
combined_feature = np.copy(feature.series)
horizon = prediction.shape[1]
combined_feature = np.concatenate([combined_feature,
np.full((self.n_counties, horizon), np.nan)], axis=1)
for county in range(self.n_counties):
f = combined_feature[county]
s = np.argmax(np.isnan(f))
if np.all(np.isnan(f)):
continue
if s - t < 0:
continue
combined_feature[county, (s - t):(s - t + horizon)] = \
prediction[county, :, i]
undiff = self.undifference(combined_feature, feature.initial_cond,
feature.diff_order)
if feature.norm:
undiff = self.unnorm(undiff, feature.norm)
undifferenced.append(undiff)
return undifferenced | {"/models/gaussianprocess/gp_pipeline.py": ["/models/gaussianprocess/gp_model.py", "/models/utils/clustering.py"]} |
58,522 | KevinHuang8/COVID19-prediction | refs/heads/master | /models/gaussianprocess/gp_model.py | import numpy as np
import pymc3 as pm
from ..utils import dataloader as loader
from scipy.optimize import curve_fit
from scipy.interpolate import interp1d
import random
# import warnings
# warnings.simplefilter('ignore')
def linear(x, m):
return x * m
def degenerate(x):
return np.zeros(x.shape)
class GPCasesDeathsModel:
'''
A simple Gaussian process for predicting deaths today given cases in the
past.
'''
def __init__(self, **params):
try:
self.draws = params['draws']
except KeyError:
self.draws = 100
try:
self.tune = params['tune']
except KeyError:
self.tune = 200
try:
self.samples = params['samples']
except KeyError:
self.samples = 100
def scale_data(self, cases_past, deaths_curr):
'''
Reduce X and y by a factor of 100, to make GP faster
'''
self.scale_factor = np.max(cases_past) / 100
deaths_curr2 = np.array(deaths_curr) / self.scale_factor
cases_past2 = np.array(cases_past) / self.scale_factor
return cases_past2, deaths_curr2
def fit(self, cases_past, deaths_curr,
quantiles=[10, 20, 30, 40, 50, 60, 70, 80, 90]):
'''
Use a GP to find the relationship between cases in the past and
deaths today.
'''
# If not enough data, return all zeros.
if len(cases_past) < 5:
self.quantile_gp = []
for q in quantiles:
self.quantile_gp.append(degenerate)
return self.quantile_gp
cases_past2, deaths_curr2 = self.scale_data(cases_past, deaths_curr)
# First, we do a simple linear fit, and use this as our mean prior.
mfit = curve_fit(linear, cases_past2, deaths_curr2)
slope = mfit[0]
with pm.Model() as gp_model:
ρ = pm.HalfCauchy('ρ', 5)
η = pm.HalfCauchy('η', 5)
M = pm.gp.mean.Linear(coeffs=slope)
K = (η**2) * pm.gp.cov.ExpQuad(1, ρ)
σ = pm.HalfNormal('σ', 50)
deaths_gp = pm.gp.Marginal(mean_func=M, cov_func=K)
deaths_gp.marginal_likelihood('deaths', X=cases_past2.reshape(-1,1),
y=deaths_curr2, noise=σ)
with gp_model:
gp_trace = pm.sample(self.draws, tune=self.tune, cores=1,
random_seed=random.randint(30, 80))
X_pred = np.arange(0, np.max(cases_past2)*5)
with gp_model:
deaths_pred = deaths_gp.conditional("deaths_pred_noise",
X_pred.reshape(-1, 1), pred_noise=True)
gp_samples = pm.sample_posterior_predictive(gp_trace,
vars=[deaths_pred], samples=self.samples)
quantile_gp = [np.percentile(
gp_samples['deaths_pred_noise'] * self.scale_factor, q, axis=0)
for q in quantiles]
# We interpolate our predicted function
X_pred2 = X_pred * self.scale_factor
self.quantile_gp = []
for i in range(len(quantiles)):
f = interp1d(X_pred2, quantile_gp[i], bounds_error=False,
fill_value='extrapolate')
self.quantile_gp.append(f)
def predict(self, cases_past):
deaths_curr = []
for f in self.quantile_gp:
deaths_curr.append(f(cases_past))
return np.array(deaths_curr)
def save(self, filename, folder):
loader.save_to_otherdata(self.quantile_gp, filename, folder)
def load(self, filename, folder):
self.quantile_gp = loader.load_from_otherdata(filename, folder) | {"/models/gaussianprocess/gp_pipeline.py": ["/models/gaussianprocess/gp_model.py", "/models/utils/clustering.py"]} |
58,523 | KevinHuang8/COVID19-prediction | refs/heads/master | /models/curvefit/curvefit_pipeline.py | import csv
import pickle
import datetime as dt
import numpy as np
import pandas as pd
from . import curvefit_models as models
from ..utils import dataloader as loader
class Data:
'''
Class that stores and processes the data.
'''
def __init__(self, data_format):
'''
data_format - a dict with the following entries
- 'name': cumulative data to load using
dataloader.load_covid_timeseries()
- either 'cases_raw' or 'deaths_raw'
- 'smoothing': size of moving average to smooth timeseries
- 'val_steps': number of data points from end to withold for
validation
- international: international county to use
'''
try:
country = data_format['international']
except KeyError:
datadict = loader.load_covid_timeseries()
name = data_format['name']
self.raw_data = datadict[name]
else:
self.raw_data = loader.load_international_data(country)
self.n_counties = self.raw_data.shape[0]
smoothing = data_format['smoothing']
self.val_steps = data_format['val_steps']
# Pre-processing steps:
# - only take cumulative data > 0
# - smooth
# - exclude counties with not enough >0 datapoints
self.cumulative_series = {}
self.daily_change = {}
self.daily_smoothed = {}
for county, series in enumerate(self.raw_data):
if series[series > 0].shape[0] < self.val_steps + 2:
continue
self.cumulative_series[county] = series[series > 0]
self.daily_change[county] = np.diff(self.cumulative_series[county])
self.daily_smoothed[county] = loader.smooth_timeseries(
self.daily_change[county], smoothing, axis=0)
def get_training_data(self, county, cumulative=False):
'''
county - a county index
cumulative - whether to use cumulative or daily data
Return a train/test split for the county
'''
if cumulative:
series = self.cumulative_series[county]
else:
series = self.daily_smoothed[county]
end = series.shape[0]
split = end - self.val_steps
X_train = np.arange(split)
y_train = series[:split]
X_test = np.arange(split, end)
y_test = series[split:]
return X_train, y_train, X_test, y_test
class Pipeline:
'''
Encapsulates the entire curvefit model training process.
'''
def __init__(self, data_format, model_params, horizon, use_cumulative=None):
'''
data_format - see Data
model_params - a dict with the following elements:
- 'name': curvefit model to use [required]
- 'params': parameters to pass into model constructor
- 'use_gp':
- 'all' means to use GP step on all counties
- None means to skip GP step for all counties
- a list of county indices to apply GP step to
- 'gp_params': parameters to pass into GP model, see curvefit_models
horizon - number of steps to predict, integer
use_cumulative - a dict that maps county indices to a boolean,
determining whether each county should use a cumulative model or not.
If not provided, it is automatically determined based on validation
set performance.
'''
self.data = Data(data_format)
self.data_format = data_format
try:
name = model_params['name']
except KeyError:
raise ValueError('Must specify model name')
try:
model_creator = getattr(models, name)
except AttributeError:
raise ValueError(f'{name} is not a valid model name')
gp_model = getattr(models, 'GPModel')
try:
params = model_params['params']
except KeyError:
params = {}
try:
self.use_gp = model_params['use_gp']
if self.use_gp is None:
self.use_gp = []
except KeyError:
self.use_gp = []
try:
self.gp_params = model_params['gp_params']
except KeyError:
self.gp_params = {}
self.horizon = horizon
# We have a separate model for each county
self.models = {}
for county in range(self.data.n_counties):
try:
data_max = self.data.cumulative_series[county].max()
except KeyError:
continue
if self.use_gp == 'all' or county in self.use_gp:
model = gp_model(data_max=data_max, *params)
else:
model = model_creator(data_max=data_max, *params)
self.models[county] = model
if use_cumulative:
self.use_cumulative = use_cumulative
else:
self.use_cumulative = None
def run(self):
'''
Train each model.
If use_cumulative is not determined yet, we need to run both models
for each county and determine which one is better, based on validation
set performance.
'''
if not self.use_cumulative:
self.blind_run()
else:
self.warm_run()
def blind_run(self):
'''
Train each county with a cumulative and non-cumulative model, keeping
the better one.
Note: We cannot use predict() or get_combined_predictions()
if doing a blind run. One blind run should be done to determine
use_cumulative, and then another run should be made for the actual
predictions.
'''
self.predictions = {}
self.use_cumulative = {}
for county in range(self.data.n_counties):
print(f'Fitting {county}/{self.data.n_counties - 1}', end='\r')
try:
model = self.models[county]
except KeyError:
shape = (self.horizon, )
s = loader.smooth_timeseries(
np.diff(self.data.raw_data[county]), axis=0)
self.predictions[county] = np.full(shape,
s[-1])
continue
### First train on differenced data
X_train1, y_train1, X_test1, y_test1 = \
self.data.get_training_data(county, cumulative = False)
model.set_cumulative(False)
model.fit(X_train1, y_train1)
y_pred1 = model.predict(X_test1)
series = self.data.daily_smoothed[county]
end = series.shape[0]
pred_x = np.arange(end - self.data.val_steps,
end - self.data.val_steps + self.horizon)
prediction1 = model.predict(pred_x)
### Then train on cumulative data
X_train2, y_train2, X_test2, y_test2 = \
self.data.get_training_data(county, cumulative = True)
model.set_cumulative(True)
model.fit(X_train2, y_train2)
series = self.data.cumulative_series[county]
end = series.shape[0]
x = np.concatenate([[X_test2[0] - 1], X_test2])
y_pred2 = model.predict(x)
pred_x = np.arange(end - self.data.val_steps,
end - self.data.val_steps + self.horizon + 1)
prediction2 = model.predict(pred_x)
### Then compare the two on the validation data
### and choose the better method
err1 = np.sum((y_test1 - y_pred1)**2)
err2 = np.sum((y_test1 - y_pred2)**2)
if err1 < err2:
self.use_cumulative[county] = False
self.predictions[county] = prediction1
else:
self.use_cumulative[county] = True
self.predictions[county] = prediction2
def warm_run(self):
'''
Train each model, with use_cumulative already determined.
'''
for county in range(self.data.n_counties):
print(f'Fitting {county}/{self.data.n_counties - 1}', end='\r')
try:
model = self.models[county]
except KeyError:
continue
try:
use_cumulative = self.use_cumulative[county]
except KeyError:
continue
if use_cumulative:
X_train, y_train, X_test, y_test = \
self.data.get_training_data(county, cumulative = True)
model.set_cumulative(True)
unsmoothed = self.data.cumulative_series[county][X_train]
unsmoothed = np.diff(unsmoothed)
else:
X_train, y_train, X_test, y_test = \
self.data.get_training_data(county, cumulative = False)
model.set_cumulative(False)
unsmoothed = self.data.daily_change[county][X_train]
if self.use_gp == 'all' or county in self.use_gp:
model.fit(X_train, y_train, unsmoothed, **self.gp_params)
else:
model.fit(X_train, y_train)
self.predict()
def predict(self, quantiles=False, samples=100):
'''
quantiles - either False, or a list of quantiles to predict for
samples - number of samples to take when determining quantiles
(does not apply to GP step)
Predict into the future, either a single value or quantiles as
specified.
Results stored in self.predictions, which is a dict that maps a
county index to a 2D np array. Axis 0 is time, and axis 1 is quantiles.
'''
self.predictions = {}
print('')
for county in range(self.data.n_counties):
print(f'Predicting {county}/{self.data.n_counties - 1}', end='\r')
# For any counties with not enough data (no model), then
# we simply predict a constant value, based on the last
# value known.
try:
model = self.models[county]
except KeyError:
if quantiles:
shape = (self.horizon, len(quantiles))
else:
shape = (self.horizon, )
s = loader.smooth_timeseries(
np.diff(self.data.raw_data[county]), axis=0)
self.predictions[county] = np.full(shape,
s[-(1 + self.data.val_steps)])
continue
try:
use_cumulative = self.use_cumulative[county]
except KeyError:
if quantiles:
shape = (self.horizon, len(quantiles))
else:
shape = (self.horizon, )
s = loader.smooth_timeseries(
np.diff(self.data.raw_data[county]), axis=0)
self.predictions[county] = np.full(shape,
s[-(1 + self.data.val_steps)])
continue
if use_cumulative:
series = self.data.cumulative_series[county]
end = series.shape[0]
# cumulative models need an extra step at the end, because
# the data is differenced
x = np.arange(end - self.data.val_steps,
end - self.data.val_steps + self.horizon + 1)
else:
series = self.data.daily_smoothed[county]
end = series.shape[0]
x = np.arange(end - self.data.val_steps,
end - self.data.val_steps + self.horizon)
if quantiles:
y_pred = model.predict_quantiles(x, quantiles, samples)
else:
y_pred = model.predict(x)
self.predictions[county] = y_pred
def get_combined_predictions(self, quantiles=False, samples=100):
'''
Computes predictions, but concatenates the future predicted values
with the past time series. This is mainly used for visualization
purposes.
'''
## note: rn, only works correctly w/ warm runs, otherwise models aren't
## correct versions
combined = {}
for county in range(self.data.n_counties):
print(f'Predicting {county}/{self.data.n_counties - 1}', end='\r')
n = self.data.raw_data[county].shape[0]
try:
model = self.models[county]
except KeyError:
if quantiles:
shape = (n, len(quantiles))
else:
shape = (n, )
s = loader.smooth_timeseries(
np.diff(self.data.raw_data[county]), axis=0)
combined[county] = np.full(shape,
s[-1])
continue
try:
use_cumulative = self.use_cumulative[county]
except KeyError:
if quantiles:
shape = (n, len(quantiles))
else:
shape = (n, )
s = loader.smooth_timeseries(
np.diff(self.data.raw_data[county]), axis=0)
combined[county] = np.full(shape,
s[-1])
continue
if use_cumulative:
series = self.data.cumulative_series[county]
end = series.shape[0]
x = np.arange(0, end - self.data.val_steps + self.horizon + 1)
else:
model.set_cumulative(False)
series = self.data.daily_smoothed[county]
end = series.shape[0]
x = np.arange(0, end - self.data.val_steps + self.horizon)
if quantiles:
y_pred = model.predict_quantiles(x, quantiles, samples)
else:
y_pred = model.predict(x)
combined[county] = y_pred
self.combined = combined
return combined
def write_to_file(self, filename, sample_dir, quantiles):
'''
filename - place to save predictions to, in csv format
sample_dir - the sample submission file. This will match all rows
present in the sample submission
quantiles - list of quantiles to report
- this must match quantiles used for prediction
Creates the submission file. Must have run .predict() before writing
to file.
'''
# Ensure that the created file has the same counties as the sample
df = pd.read_csv(sample_dir)
info = loader.load_info_raw()
i = df.set_index('id').sort_index().index
# sub_fips - all FIPS in the sample
sub_fips = np.unique([s[11:] for s in i])
# data_fips - all FIPS predicted
data_fips = [s.lstrip('0') for s in info['FIPS']]
# Get all FIPS that we have predicted for, but are not in the sample
dont_include = []
for fips in data_fips:
if fips not in sub_fips:
dont_include.append(fips)
# Get all FIPS that are in the sample, but we haven't predicted for
must_include = []
for fips in sub_fips:
if fips not in data_fips:
must_include.append(fips)
start_date = '04/01/2020'
end_date = '06/30/2020'
# %Y or %y randomly work sometimes for some reason
try:
predict_start = dt.datetime.strptime(info.columns[-1], '%m/%d/%Y') \
- dt.timedelta(days=self.data.val_steps) + dt.timedelta(days=1)
except ValueError:
predict_start = dt.datetime.strptime(info.columns[-1], '%m/%d/%y') \
- dt.timedelta(days=self.data.val_steps) + dt.timedelta(days=1)
predictions = self.predictions
# Write predictions to file, making sure to include must_include and
# excluding counties in dont_include
to_write = [['id'] + [str(q) for q in quantiles]]
for county in predictions:
print(f'writing {county}/{len(predictions) - 1}', end='\r')
fips = info.iloc[county]['FIPS'].lstrip('0')
if fips in dont_include:
continue
county_pred = predictions[county]
date = dt.datetime.strptime(start_date, '%m/%d/%Y')
end = dt.datetime.strptime(end_date, '%m/%d/%Y')
predict_end = predict_start + dt.timedelta(days=self.horizon - 1)
while date <= end:
id_ = date.strftime('%Y-%m-%d-') + fips
if predict_start <= date <= predict_end and (
fips not in ['36005', '36047', '36081', '36085']):
index = (date - predict_start).days
p = list(county_pred[index, :])
else:
p = [0 for i in range(len(quantiles))]
to_write.append([id_] + p)
date = date + dt.timedelta(days=1)
for fips in must_include:
date = dt.datetime.strptime(start_date, '%m/%d/%Y')
end = dt.datetime.strptime(end_date, '%m/%d/%Y')
while date <= end:
id_ = date.strftime('%Y-%m-%d-') + fips
p = [0 for i in range(len(quantiles))]
to_write.append([id_] + p)
date = date + dt.timedelta(days=1)
with open(filename, "w+", newline='') as f:
csv_writer = csv.writer(f, delimiter = ",")
csv_writer.writerows(to_write)
def save(self, filename):
'''
Save the pipeline to filename.
'''
with open(filename, 'wb') as file:
pickle.dump(self, file)
@staticmethod
def load(filename):
with open(filename, 'rb') as file:
saved = pickle.load(file)
return saved
| {"/models/gaussianprocess/gp_pipeline.py": ["/models/gaussianprocess/gp_model.py", "/models/utils/clustering.py"]} |
58,524 | KevinHuang8/COVID19-prediction | refs/heads/master | /models/LSTM/lstm_models.py | import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Model, Sequential, load_model
from tensorflow.keras.layers import Embedding, LSTM, Dense, TimeDistributed
from tensorflow.keras.layers import Activation, Dropout, Input
from tensorflow.keras.regularizers import L1L2
import tensorflow.keras.backend as K
def pinball_loss(q, pred, true):
e = true - pred
return K.mean(K.maximum(q*e, (q - 1)*e))
def quantile_head(inputs, timesteps, target_features, reg=(0.1, 0.1)):
if timesteps == 1:
x = Dense(target_features, kernel_regularizer=L1L2(*reg))(inputs)
return x
else:
x = TimeDistributed(Dense(target_features,
kernel_regularizer=L1L2(*reg)))(inputs)
return x
def standard_LSTM(X_train, y_train, quantiles=False, loss='mse', optimizer='adam',
units=64, layers=1, dropout=0, reg=(0.1, 0.1)):
timesteps = y_train.shape[1]
target_features = y_train.shape[2]
if timesteps == 1:
return_seq = False
else:
return_seq = True
inputs = Input(shape=X_train.shape[1:])
x = inputs
for i in range(layers):
x = LSTM(units, input_shape=X_train.shape[1:],
return_sequences=return_seq, activation='relu',
recurrent_regularizer=L1L2(0, 0), kernel_regularizer=L1L2(0, 0))(x)
if dropout:
x = Dropout(dropout)(x)
if not quantiles:
x = quantile_head(x, timesteps, target_features, reg)
model = Model(inputs=inputs, outputs=x)
else:
qheads = []
for q in range(quantiles):
qheads.append(quantile_head(x, timesteps, target_features, reg))
model = Model(inputs=inputs, outputs=qheads)
if quantiles:
loss = [lambda pred, true, q=q: pinball_loss(q, pred, true) for q
in np.linspace(0.1, 1, quantiles, endpoint=False)]
model.compile(loss=loss, optimizer=optimizer)
return model | {"/models/gaussianprocess/gp_pipeline.py": ["/models/gaussianprocess/gp_model.py", "/models/utils/clustering.py"]} |
58,525 | KevinHuang8/COVID19-prediction | refs/heads/master | /models/gaussianprocess/gp_pipeline.py | import numpy as np
import scipy
from scipy.interpolate import interp1d
from scipy.stats import norm
from ..utils import dataloader as loader
from .gp_model import GPCasesDeathsModel
from ..utils.clustering import cluster_counties
class Pipeline:
'''
Encapsulates the process of using our cases-deaths GP on the data.
'''
def __init__(self, cluster_params, gp_params, smoothing=7, min_delay=7,
val_steps=0, quantiles=None):
self.min_delay = min_delay
self.smoothing = smoothing
self.val_steps = 0
self.gp_params = gp_params
datadict = loader.load_covid_timeseries(cases_cutoff=0)
self.cases_all = datadict['cases_raw']
self.deaths_all = datadict['deaths_raw']
self.county_cases_smooth = loader.smooth_timeseries(
np.diff(self.cases_all, axis=1), smoothing, axis=1)
self.county_deaths_smooth = loader.smooth_timeseries(
np.diff(self.deaths_all, axis=1), smoothing, axis=1)
if quantiles is None:
self.quantiles = [10, 20, 30, 40, 50, 60, 70, 80, 90]
else:
self.quantiles = quantiles
print('Clustering...', end='')
try:
self.clusters = loader.load_from_otherdata('clusters.dat')
self.cluster_ids = loader.load_from_otherdata('cluster_id.dat')
except FileNotFoundError:
self.clusters, self.cluster_ids = cluster_counties(**cluster_params)
print('...done clustering.')
self.get_cluster_data()
self.calc_best_delay()
self.get_training_data()
def get_cluster_data(self):
self.cases_raw = {}
self.deaths_raw = {}
self.cases = {}
self.deaths = {}
self.cases_smooth = {}
self.deaths_smooth = {}
for i, cluster in enumerate(self.clusters):
print(f'Processing data for cluster {i + 1}/{len(self.clusters)}',
end='\r')
self.cases_raw[i] = np.array([self.cases_all[county]
for county in cluster])
self.deaths_raw[i] = np.array([self.deaths_all[county]
for county in cluster])
self.cases[i] = np.diff(self.cases_raw[i], axis=1)
self.deaths[i] = np.diff(self.deaths_raw[i], axis=1)
self.cases_smooth[i] = loader.smooth_timeseries(self.cases[i],
self.smoothing)
self.deaths_smooth[i] = loader.smooth_timeseries(self.deaths[i],
self.smoothing)
print()
def calc_best_delay(self):
self.best_delays = {}
delays = np.arange(1, 15)
for c in range(len(self.clusters)):
print(f'Calculating best delay for cluster ' + \
f'{c + 1}/{len(self.clusters)}', end='\r')
corr = []
for delay in delays:
cases_past = []
deaths_curr = []
for county in range(self.cases_smooth[c].shape[0]):
for i in range(delay,
self.cases_smooth[c].shape[1] - self.val_steps):
if i < 0.5 * self.cases_smooth[c].shape[1]:
continue
if (self.deaths[c][county, i] == 0
and self.cases[c][county, i - delay] < 5):
continue
cases_past.append(
self.cases_smooth[c][county, i - delay])
deaths_curr.append(self.deaths_smooth[c][county, i])
co = np.corrcoef(cases_past, deaths_curr)[0, 1]
corr.append(co)
best_delays = np.argsort(corr)[::-1] + 1
best_delay = best_delays[best_delays >= self.min_delay][0]
self.best_delays[c] = best_delay
print()
def get_training_data(self):
self.cases_past = {}
self.deaths_curr = {}
for c in range(len(self.clusters)):
print(f'Calculating training data for cluster ' +
f'{c + 1}/{len(self.clusters)}', end='\r')
delay = self.best_delays[c]
cases_past = []
deaths_curr = []
for county in range(self.cases_smooth[c].shape[0]):
for i in range(delay + 1,
self.cases_smooth[c].shape[1] - self.val_steps):
if i < 0.5 * self.cases_smooth[c].shape[1]:
continue
if (self.deaths[c][county, i] == 0
and self.cases[c][county, i - delay] < 10):
continue
cases_past.append(self.cases_smooth[c][county, i - delay])
deaths_curr.append(self.deaths_smooth[c][county, i])
self.cases_past[c] = cases_past
self.deaths_curr[c] = deaths_curr
print()
def run(self, start=0, load=True, folder=None):
if folder is None:
folder = 'gp_cluster'
self.models = {}
for c in range(start, len(self.clusters)):
print(f'fitting cluster {c + 1}/{len(self.clusters)}', end='\r')
self.models[c] = GPCasesDeathsModel(**self.gp_params)
if load:
try:
self.models[c].load(f'{c}.dat', folder)
continue
except FileNotFoundError:
pass
try:
self.models[c].fit(self.cases_past[c], self.deaths_curr[c])
except Exception as e:
#self.run(start=c)
raise e
return
self.models[c].save(f'{c}.dat', folder)
print()
def interpolate_percentiles(self, point, quantile_values, reverse=True):
y = quantile_values
x = self.quantiles
if reverse:
f = interp1d(y, x, bounds_error=False, fill_value='extrapolate')
else:
f = interp1d(x, y, bounds_error=False, fill_value='extrapolate')
return f(point)
def get_bias(self, county, c, delay, cases_smooth, deaths_smooth, t):
k = 10
cases_past = cases_smooth[county, t - k - delay:t - delay]
cases_past[cases_past < 0] = 0
try:
deaths_pred = self.models[c].predict(cases_past)
except ValueError:
deaths_pred = np.zeros((len(self.quantiles), cases_past.shape[0]))
deaths_curr = deaths_smooth[county, t - k:t]
biased_values = []
for i in range(deaths_curr.shape[0]):
actual = deaths_curr[i]
pred = deaths_pred[:, i]
biased_quantile = self.interpolate_percentiles(actual, pred, True)
biased_values.append(biased_quantile)
m = np.mean(biased_values)
s = np.std(biased_values)
biased_quantiles = []
for q in self.quantiles:
q /= 10
bq = norm.ppf(q, loc=m, scale=s)
biased_quantiles.append(q)
return np.array(biased_quantiles)
def adjust_quantiles(self, pred_quantiles, biased_quantiles):
adjusted_quantiles = []
for i in range(pred_quantiles.shape[1]):
pred_values = pred_quantiles[:, i]
biased = self.interpolate_percentiles(biased_quantiles,
pred_values, False)
adjusted = np.vstack([biased, pred_values])
adjusted = np.mean(adjusted, axis=0)
adjusted_quantiles.append(adjusted)
return np.array(adjusted_quantiles)
def extend_cases_smooth(self, cases_predictions):
end = self.county_cases_smooth.shape[1] - self.val_steps
h = end + cases_predictions[0].shape[0]
extended = np.zeros((self.cases_all.shape[0], h))
for county in range(self.cases_all.shape[0]):
extended[county] = np.concatenate([self.county_cases_smooth[county,:end],
cases_predictions[county]])
self.county_cases_smooth = extended
def predict(self, horizon, d=0):
cases_smooth = self.county_cases_smooth
deaths_smooth = self.county_deaths_smooth
self.predictions = {}
t = self.cases_all.shape[1] - self.val_steps - d
for county in range(self.cases_all.shape[0]):
print(f'Predicting county {county + 1}/{self.cases_all.shape[0]}',
end='\r')
c = self.cluster_ids[county]
delay = self.best_delays[c]
cases_past = cases_smooth[county, t - delay:t - delay + horizon]
try:
deaths_pred_quantiles = self.models[c].predict(cases_past)
except ValueError as e:
# cases_past contains negative numbers
cases_past[cases_past < 0] = 0
# print(county, c, cases_past)
try:
deaths_pred_quantiles = self.models[c].predict(cases_past)
except ValueError:
deaths_pred_quantiles = np.zeros((len(self.quantiles),
cases_past.shape[0]))
deaths_pred_quantiles = loader.smooth_timeseries(
deaths_pred_quantiles, self.smoothing, axis=1)
biased_quantiles = self.get_bias(county, c, delay, cases_smooth,
deaths_smooth, t)
adjusted_pred = self.adjust_quantiles(deaths_pred_quantiles,
biased_quantiles)
adjusted_pred[adjusted_pred < 0] = 0
self.predictions[county] = adjusted_pred
return self.predictions | {"/models/gaussianprocess/gp_pipeline.py": ["/models/gaussianprocess/gp_model.py", "/models/utils/clustering.py"]} |
58,526 | KevinHuang8/COVID19-prediction | refs/heads/master | /models/utils/dataloader.py | import os
import datetime as dt
from datetime import date, timedelta
import pandas as pd
import numpy as np
import censusdata as cd
import json
from pathlib import Path
MAIN_DIR = Path(__file__).parent.parent.parent.absolute()
DATA_DIR = MAIN_DIR / 'upstream' / 'data'
OTHER_DATA_DIR = MAIN_DIR / 'other data'
### FIPS changes: 51515 > 51019, 46113 > 46102, 2158 > 2270
def fix_county_FIPS(df):
d = df.copy()
# Add leading zero to FIPS codes with only 4 digits
d.loc[d['FIPS'].str.len() == 4, 'FIPS'] = \
'0' + d.loc[d['FIPS'].str.len() == 4, 'FIPS']
# Remove state FIPS
d = d[d['FIPS'].str.len() != 1]
# Remove FIPS codes not in population data
d = d[~d['FIPS'].isin(['02158', '06000'])]
d.sort_values('FIPS', inplace=True)
d.reset_index(drop=True, inplace=True)
return d
def calibrate_timeseries(t, *s, cutoff=50):
'''
all positional arguments are arrays of shape (n_counties, n_timesteps)
that represent time series
Shifts each time series in s & t so that they begin at the same index
as when the corresponding time series in t reaches the cutoff. For example,
only take data after the day where deaths reaches 50, and sets that day as
time = 0. Deaths would be passed in as t, and all the other series
(for example cases) that you want to calibrate with deaths is passed in
as s. The end of each time series becomes padded with nan.
**Assumes that all values are non-decreasing with time, or
else the order gets messed up for that row**
- There are a few instances where cumulative deaths
decreases in the data, which is impossible in real life
'''
row, col = np.indices(t.shape)
# Index of first day when value > cutoff for each row
calibrated_start = np.argmax(t > cutoff, axis=1)
calibrated_start = np.expand_dims(calibrated_start, axis=1)
# Rows that contain values all < cutoff
to_remove = np.all(t <= cutoff, axis=1)
# For each row, get all values after the calibrated
# start date, and move them to the front
mask = col >= calibrated_start
flipped_mask = mask[:,::-1]
calibrated = []
for x in ((t,) + s):
a = np.copy(x).astype(float)
a[flipped_mask] = a[mask]
# Everything not meeting cutoff is nan
a[~flipped_mask] = np.nan
a[to_remove] = np.nan
calibrated.append(a)
return calibrated
def smooth_timeseries(t, size=5, axis=1):
'''Smooth the function by taking a moving average of "size" time steps'''
average_filter = np.full((size, ), 1 / size)
if axis == 0:
padding = [(size // 2 + (not size % 2), size // 2)]
elif axis == 1:
padding = [(0, 0), (size // 2 + (not size % 2), size // 2)]
t = np.pad(t, padding, mode='median', stat_length=size)
return np.apply_along_axis(lambda r: np.convolve(r, average_filter,
mode='valid'), axis=axis, arr=t)
def load_covid_raw():
df_cases = pd.read_csv(DATA_DIR / 'us' / 'covid' / 'confirmed_cases.csv',
dtype={'countyFIPS':str})
df_cases = df_cases.rename(columns={'countyFIPS' : 'FIPS'})
df_deaths = pd.read_csv(DATA_DIR / 'us' / 'covid' / 'deaths.csv',
dtype={'countyFIPS':str})
df_deaths = df_deaths.rename(columns={'countyFIPS' : 'FIPS'})
df_deaths = fix_county_FIPS(df_deaths)
df_cases = fix_county_FIPS(df_cases)
df_cases.drop(['County Name', 'State', 'stateFIPS'], axis=1, inplace=True)
df_deaths.drop(['County Name', 'State', 'stateFIPS'], axis=1, inplace=True)
return df_cases, df_deaths
def reload_nyt_data(windows):
print('Reloading NYT data... May take a minute...')
rawdata = load_covid_raw()
rawcases = rawdata[0]
rawdeaths = rawdata[1]
dat = pd.read_csv(DATA_DIR / "us" / "covid" / "nyt_us_counties.csv",
parse_dates=[0],
dtype={'fips':str})
dat.loc[dat['county'] == 'New York City', 'fips'] = '36061'
dat.loc[dat['state'] == 'Guam', 'fips'] = '66010'
if windows:
dat['date'] = dat['date'].dt.strftime('%#m/%#d/%y')
else:
dat['date'] = dat['date'].dt.strftime('%-m/%-d/%y')
dat = dat.astype({'date' : str})
data_cases = pd.DataFrame()
data_deaths = pd.DataFrame()
curr = dt.datetime.strptime('1/21/2020', '%m/%d/%Y')
last = dt.datetime.strptime(dat.iloc[-1].date, '%m/%d/%y')
data_cases['FIPS'] = np.nan
while curr != last:
curr = curr + timedelta(days=1)
if windows:
data_cases[curr.strftime('%#m/%#d/%y')] = np.nan
else:
data_cases[curr.strftime('%-m/%-d/%y')] = np.nan
curr = dt.datetime.strptime('1/21/2020', '%m/%d/%Y')
last = dt.datetime.strptime(dat.iloc[-1].date, '%m/%d/%y')
data_deaths['FIPS'] = np.nan
while curr != last:
curr = curr + timedelta(days=1)
if windows:
data_deaths[curr.strftime('%#m/%#d/%y')] = np.nan
else:
data_deaths[curr.strftime('%-m/%-d/%y')] = np.nan
NYT_fips = dat['fips'].unique()
for index, row in rawcases.iterrows():
fips = row['FIPS']
if fips not in NYT_fips:
data_cases = data_cases.append(row, ignore_index=True)
continue
r = dat[dat['fips'] == fips].drop(['fips', 'county', 'state',
'deaths'], axis=1).T
r.columns = r.iloc[0]
r.drop('date', axis=0, inplace=True)
r['FIPS'] = fips
data_cases = data_cases.append(r, ignore_index=True, sort=False)
#print('cases: ' + str(index))
for index, row in rawdeaths.iterrows():
fips = row['FIPS']
if fips not in NYT_fips:
data_deaths = data_deaths.append(row, ignore_index=True)
continue
r = dat[dat['fips'] == fips].drop(['fips', 'county', 'state',
'cases'], axis=1).T
r.columns = r.iloc[0]
r.drop('date', axis=0, inplace=True)
r['FIPS'] = fips
data_deaths = data_deaths.append(r, ignore_index=True, sort=False)
#print('deaths: ' + str(index))
r = dat[dat['fips'] == '66010'].drop(['fips', 'county', 'state',
'deaths'], axis=1).T
r.columns = r.iloc[0]
r.drop('date', axis=0, inplace=True)
r['FIPS'] = '66010'
data_cases = data_cases.append(r, ignore_index=True, sort=False)
r = dat[dat['fips'] == '66010'].drop(['fips', 'county', 'state',
'cases'], axis=1).T
r.columns = r.iloc[0]
r.drop('date', axis=0, inplace=True)
r['FIPS'] = '66010'
data_deaths = data_deaths.append(r, ignore_index=True, sort=False)
data_deaths.drop('1/21/20', axis=1, inplace=True)
data_deaths.fillna(0, inplace=True)
data_deaths.to_csv(OTHER_DATA_DIR / 'nyt_deaths.csv')
data_cases.drop('1/21/20', axis=1, inplace=True)
data_cases.fillna(0, inplace=True)
data_cases.to_csv(OTHER_DATA_DIR / 'nyt_cases.csv')
def load_info_raw(fips_info=False):
if fips_info:
data = load_covid_static()
data = data.append({'FIPS' : '66010', 'County Name' : 'Guam',
'State': 'GU'}, ignore_index=True)
data.drop(['cases', 'deaths', 'log_cases', 'log_deaths'], axis=1,
inplace=True)
return data
return pd.read_csv(OTHER_DATA_DIR / 'nyt_deaths.csv', dtype={'FIPS':str})
def get_index_from_fips(fips):
df = load_info_raw(True)
try:
return df[df['FIPS'] == fips].index[0]
except IndexError:
raise ValueError(f'{fips} is not a valid FIPS code in the data')
def load_covid_timeseries(source='nytimes', smoothing=5, cases_cutoff=200, log=False,
deaths_cutoff=50, interval_change=1, reload_data=False, force_no_reload=False,
windows=True):
if source == 'nytimes':
if not reload_data and not force_no_reload:
df_cases = pd.read_csv(OTHER_DATA_DIR / 'nyt_cases.csv', dtype={'FIPS':str})
nyt_raw = pd.read_csv(DATA_DIR / "us" / "covid" / "nyt_us_counties.csv"
, dtype={'countyFIPS':str})
last_date_available = dt.datetime.strptime(nyt_raw.iloc[-1].date,
'%Y-%m-%d')
last_date_checked = dt.datetime.strptime(df_cases.columns[-1],
'%m/%d/%y')
if last_date_checked != last_date_available:
reload_data = True
if reload_data:
reload_nyt_data(windows)
df_cases = pd.read_csv(OTHER_DATA_DIR / 'nyt_cases.csv', dtype={'FIPS':str})
df_deaths = pd.read_csv(OTHER_DATA_DIR / 'nyt_deaths.csv', dtype={'FIPS':str})
df_deaths = df_deaths.iloc[:, 2:]
df_cases = df_cases.iloc[:, 2:]
elif source =='usafacts':
df_cases = pd.read_csv(DATA_DIR / 'us' / 'covid' / 'confirmed_cases.csv',
dtype={'countyFIPS':str})
df_cases = df_cases.rename(columns={'countyFIPS' : 'FIPS'})
df_deaths = pd.read_csv(DATA_DIR / 'us' / 'covid' / 'deaths.csv',
dtype={'countyFIPS':str})
df_deaths = df_deaths.rename(columns={'countyFIPS' : 'FIPS'})
df_deaths = fix_county_FIPS(df_deaths)
df_cases = fix_county_FIPS(df_cases)
# Get rid of every column except for time series
df_deaths = df_deaths.iloc[:, 4:]
df_cases = df_cases.iloc[:, 4:]
else:
raise ValueError('Invalid Source. Must be "nytimes" or "usafacts".')
if log:
df_cases = np.log10(df_cases).replace([np.inf, -np.inf], 0)
df_deaths = np.log10(df_deaths).replace([np.inf, -np.inf], 0)
# Calibrate cases based on a cases cutoff
cases, deaths = calibrate_timeseries(df_cases.values,
df_deaths.values, cutoff=cases_cutoff)
# This below does deaths calibration independently
# deaths = calibrate_timeseries(df_deaths.values, cutoff=deaths_cutoff)
# Get percentage change between 'interval_change' days
with np.errstate(divide='ignore', invalid='ignore'):
d = deaths[:, ::interval_change]
c = cases[:, ::interval_change]
deaths_pchange = np.diff(d) / d[:, :-1]
cases_pchange = np.diff(c) / c[:, :-1]
# all invalid percentage changes should be set to nan
deaths_pchange = np.nan_to_num(deaths_pchange, nan=np.nan, posinf=np.nan, neginf=np.nan)
cases_pchange = np.nan_to_num(cases_pchange, nan=np.nan, posinf=np.nan, neginf=np.nan)
d_smoothed = smooth_timeseries(deaths_pchange, smoothing)
c_smoothed = smooth_timeseries(cases_pchange, smoothing)
cases_calib_smooth = smooth_timeseries(cases, smoothing)
deaths_calib_smooth = smooth_timeseries(deaths, smoothing)
return {'deaths_pc' : deaths_pchange,
'deaths_pc_smoothed' : d_smoothed,
'deaths_calibrated' : deaths,
'deaths_raw' : df_deaths.values.astype(float),
'deaths_calibrated_smoothed' : deaths_calib_smooth,
'cases_pc' : cases_pchange,
'cases_pc_smoothed' : c_smoothed,
'cases_calibrated' : cases,
'cases_raw' : df_cases.values.astype(float),
'cases_calibrated_smoothed' : cases_calib_smooth}
def load_covid_static(source='usafacts', days_ago=2):
yesterday = date.today() - timedelta(days=days_ago)
if source == 'usafacts':
df_cases = pd.read_csv(os.path.join(DATA_DIR, 'us\\covid\\confirmed_cases.csv'),
dtype={'countyFIPS':str})
df_cases = df_cases.rename(columns={'countyFIPS' : 'FIPS'})
df_deaths = pd.read_csv(os.path.join(DATA_DIR, 'us\\covid\\deaths.csv'),
dtype={'countyFIPS':str})
df_deaths = df_deaths.rename(columns={'countyFIPS' : 'FIPS'})
# Get data from most recent day
yesterday = f'{yesterday.month}/{yesterday.day}/{yesterday.strftime("%y")}'
yesterday_cases = df_cases.loc[:, ['FIPS', 'County Name', 'State', yesterday]]
yesterday_deaths = df_deaths.loc[:, ['FIPS', 'County Name', 'State', yesterday]]
yesterday_cases = yesterday_cases.rename(columns={yesterday: 'cases'})
yesterday_deaths = yesterday_deaths.rename(columns={yesterday: 'deaths'})
# Combine cases and deaths into one table for easy access
cols_to_use = yesterday_deaths.columns.difference(yesterday_cases.columns)
cases_deaths = pd.merge(yesterday_cases, yesterday_deaths[cols_to_use], how='outer',
left_index=True, right_index=True)
cases_deaths = fix_county_FIPS(cases_deaths)
# Add log data for better graphing
logcases = np.log10(cases_deaths['cases']).replace([np.inf, -np.inf], 0)
logdeaths = np.log10(cases_deaths['deaths']).replace([np.inf, -np.inf], 0)
cases_deaths['log_cases'] = pd.Series(logcases, index=cases_deaths.index)
cases_deaths['log_deaths'] = pd.Series(logdeaths, index=cases_deaths.index)
cases_deaths.sort_values('FIPS', inplace=True)
cases_deaths = cases_deaths.reset_index(drop=True)
return cases_deaths
elif source == 'nytimes':
raise NotImplementedError
else:
raise ValueError('Source not recognized. Options are: usafacts, nytimes')
def load_international_data(country_name='Italy'):
df = pd.read_csv(DATA_DIR / 'international' / 'covid' / 'our_world_in_data' / 'full_data.csv')
deaths = df[df['location'] == 'Italy']['total_deaths'].to_list()
deaths = np.expand_dims(np.array(deaths), axis=0)
return deaths
def load_demographics_data(include_guam=True):
demographics = pd.read_csv(os.path.join(OTHER_DATA_DIR, 'county_demographics.csv'), dtype={'FIPS':str})
demographics.drop(['NAME'], axis=1, inplace=True)
if not include_guam:
demographics = demographics.iloc[:-1]
demographics['pop_density'] = demographics['total_pop'] / demographics['area']
demographics['p60_plus'] = demographics['60plus'] / demographics['total_pop']
return demographics
def save_to_otherdata(obj, filename, folder=None):
import pickle
if folder is not None:
with open(OTHER_DATA_DIR / folder / filename, 'wb+') as handle:
pickle.dump(obj, handle, protocol=4)
else:
with open(OTHER_DATA_DIR / filename, 'wb+') as handle:
pickle.dump(obj, handle, protocol=4)
def load_from_otherdata(filename, folder=None):
import pickle
if folder is not None:
with open(OTHER_DATA_DIR / folder / filename, 'rb') as handle:
return pickle.load(handle)
else:
with open(OTHER_DATA_DIR / filename, 'rb') as handle:
return pickle.load(handle)
def load_instate_adjacency_list():
import pickle
with open(OTHER_DATA_DIR /'instate_adjacency_list.dat', 'rb') as handle:
return pickle.load(handle)
def generate_instate_adjacency_list():
import pickle
neighbor_df = pd.read_csv(DATA_DIR / 'us' / 'geolocation' /
'neighborcounties.csv', dtype={'orgfips':str, 'adjfips': str})
unique_fips = neighbor_df['orgfips'].unique()
adjacency_list = {}
for fips in unique_fips:
neighbors = neighbor_df[(neighbor_df['orgfips'] == fips) &
(neighbor_df['instate'] == 1)]['adjfips'].to_list()
adjacency_list[fips] = neighbors
with open(OTHER_DATA_DIR / 'instate_adjacency_list.dat', 'wb') as handle:
pickle.dump(adjacency_list, handle, protocol=4)
def generate_demographics_data():
'''Don't really need to call this again after the data is already generated, since demographics data
doesn't change. Except if you want to change the demographics data.'''
d = cd.download('acs5', 2018, cd.censusgeo([('county', '*')]),
['DP05_0018E', 'DP05_0037PE', 'DP05_0038PE', 'DP05_0071PE',],
tabletype='profile')
#Find variable names for data you want here:
#https://api.census.gov/data/2018/acs/acs1/profile/groups/DP05.html
d = d.rename(columns={'DP05_0018E': 'median_age', 'DP05_0037PE':'pop_white', 'DP05_0038PE':'pop_black','DP05_0071PE':'pop_hispanic'})
d = d[['median_age', 'pop_white', 'pop_black', 'pop_hispanic']]
cd.exportcsv(os.path.join(OTHER_DATA_DIR, 'county_demographics_temp.csv'), d)
df = pd.read_csv(os.path.join(OTHER_DATA_DIR, 'county_demographics_temp.csv'), dtype={'state':str,'county':str})
df['FIPS'] = df['state'] + df['county']
df.drop(['state', 'county'], axis=1, inplace=True)
df = df[['FIPS', 'NAME', 'median_age', 'pop_white', 'pop_black', 'pop_hispanic']]
df.replace('02158', '02270', inplace=True)
# Remove puerto rico
df = df.drop(df[df['FIPS'].str[:2] == '72'].index)
df.sort_values('FIPS', inplace=True)
df.reset_index(drop=True, inplace=True)
# Get population data
population = pd.read_csv(os.path.join(DATA_DIR, 'us\\demographics\\county_populations.csv'), dtype={'FIPS':str})
population.loc[population['FIPS'].str.len() == 4, 'FIPS'] = '0' + population.loc[population['FIPS'].str.len() == 4, 'FIPS']
population.replace('02158', '02270', inplace=True)
population.sort_values('FIPS', inplace=True)
population.reset_index(drop=True, inplace=True)
# Get land area data
land = pd.read_csv(os.path.join(DATA_DIR, 'us\\demographics\\county_land_areas.csv'), dtype={'County FIPS':str}, engine='python')
land.rename(columns={'County FIPS': 'FIPS'}, inplace=True)
land = land.loc[:, ['FIPS', 'Area in square miles - Land area']]
land.rename(columns={'Area in square miles - Land area' : 'area'}, inplace=True)
land.loc[land['FIPS'].str.len() == 4, 'FIPS'] = '0' + land.loc[land['FIPS'].str.len() == 4, 'FIPS']
land.replace('46113', '46102', inplace=True)
land.drop(land[land['FIPS'] == '51515'].index, inplace=True)
# Remove Puerto Rican data
land.drop(land[land['FIPS'].str[:2] == '72'].index, inplace=True)
land.sort_values('FIPS', inplace=True)
land.reset_index(drop=True, inplace=True)
demographics = population.merge(land)
demographics = pd.merge(demographics, df, on=['FIPS'], how='outer')
demographics.to_csv(os.path.join(OTHER_DATA_DIR, 'county_demographics.csv'), index=False, sep=',')
def generate_demographics_data2(include_age_breakdown=False):
'''Use the other one. Here for reference'''
# Get population data
population = pd.read_csv(os.path.join(DATA_DIR, 'us\\demographics\\county_populations.csv'), dtype={'FIPS':str})
population.loc[population['FIPS'].str.len() == 4, 'FIPS'] = '0' + population.loc[population['FIPS'].str.len() == 4, 'FIPS']
population.replace('02158', '02270', inplace=True)
population.sort_values('FIPS', inplace=True)
population.reset_index(drop=True, inplace=True)
# Get land area data
land = pd.read_csv(os.path.join(DATA_DIR, 'us\\demographics\\county_land_areas.csv'), dtype={'County FIPS':str}, engine='python')
land.rename(columns={'County FIPS': 'FIPS'}, inplace=True)
land = land.loc[:, ['FIPS', 'Area in square miles - Land area']]
land.rename(columns={'Area in square miles - Land area' : 'area'}, inplace=True)
land.loc[land['FIPS'].str.len() == 4, 'FIPS'] = '0' + land.loc[land['FIPS'].str.len() == 4, 'FIPS']
land.replace('46113', '46102', inplace=True)
land.drop(land[land['FIPS'] == '51515'].index, inplace=True)
# Remove Puerto Rican data
land.drop(land[land['FIPS'].str[:2] == '72'].index, inplace=True)
land.sort_values('FIPS', inplace=True)
land.reset_index(drop=True, inplace=True)
# Get age/gender/race data (note, many counties missing)
df = pd.read_csv(os.path.join(DATA_DIR, 'us\\demographics\\acs_2018.csv'), dtype={'FIPS':str}, engine='python')
df.loc[df['FIPS'].str.len() == 4, 'FIPS'] = '0' + df.loc[df['FIPS'].str.len() == 4, 'FIPS']
df.sort_values('FIPS', inplace=True)
df.rename(columns={'Estimate!!SEX AND AGE!!Total population!!Sex ratio (males per 100 females)':'mf_ratio',
'Estimate!!SEX AND AGE!!Total population!!Median age (years)' : 'median_age',
'Percent Estimate!!SEX AND AGE!!Total population!!Under 5 years' : 'pop_under5',
'Percent Estimate!!SEX AND AGE!!Total population!!5 to 9 years' : 'pop_5to9',
'Percent Estimate!!SEX AND AGE!!Total population!!10 to 14 years' : 'pop_10to14',
'Percent Estimate!!SEX AND AGE!!Total population!!15 to 19 years' : 'pop_15to19',
'Percent Estimate!!SEX AND AGE!!Total population!!20 to 24 years' : 'pop_20to24',
'Percent Estimate!!SEX AND AGE!!Total population!!25 to 34 years' : 'pop_25to34',
'Percent Estimate!!SEX AND AGE!!Total population!!35 to 44 years' : 'pop_35to44',
'Percent Estimate!!SEX AND AGE!!Total population!!45 to 54 years' : 'pop_45to54',
'Percent Estimate!!SEX AND AGE!!Total population!!55 to 59 years' : 'pop_55to59',
'Percent Estimate!!SEX AND AGE!!Total population!!60 to 64 years' : 'pop_60to64',
'Percent Estimate!!SEX AND AGE!!Total population!!65 to 74 years' : 'pop_65to74',
'Percent Estimate!!SEX AND AGE!!Total population!!75 to 84 years' : 'pop_75to84',
'Percent Estimate!!SEX AND AGE!!Total population!!85 years and over' : 'pop_over85',
'Percent Estimate!!RACE!!Total population!!One race!!White' : 'pop_white',
'Percent Estimate!!RACE!!Total population!!One race!!Black or African American' : 'pop_black',
'Percent Estimate!!HISPANIC OR LATINO AND RACE!!Total population!!Hispanic or Latino (of any race)' : 'pop_hispanic'}, inplace=True)
if include_age_breakdown:
df = df.loc[:, ['FIPS', 'mf_ratio', 'median_age', 'pop_under5', 'pop_5to9', 'pop_15to19', 'pop_20to24', 'pop_25to34', 'pop_35to44', 'pop_45to54',
'pop_55to59', 'pop_60to64', 'pop_65to74', 'pop_75to84', 'pop_over85', 'pop_white', 'pop_black', 'pop_hispanic']]
else:
df = df.loc[:, ['FIPS', 'mf_ratio', 'median_age', 'pop_white', 'pop_black', 'pop_hispanic']]
df = df.drop(df[df['FIPS'].str[:2] == '72'].index)
demographics = population.merge(land)
demographics = pd.merge(demographics, df, on=['FIPS'], how='outer')
demographics[['pop_white', 'pop_black', 'pop_hispanic']] = demographics[['pop_white', 'pop_black', 'pop_hispanic']].apply(pd.to_numeric, errors='coerce')
d = demographics.copy()
# Fill in NaN values from acs data
# age breakdown and mf_ratio are taken as national average, while race is taken as state average
# more advanced/accurate technique would be to replace NaNs by average of similar counties, found through clustering
statedemo = pd.read_csv('../other data/state_demographics.csv', dtype={'stateFIPS':str})
statedemo.loc[:, ['White', 'Black', 'Hispanic']] *= 100
if include_age_breakdown:
demographics[['mf_ratio', 'median_age', 'pop_under5', 'pop_5to9', 'pop_15to19', 'pop_20to24', 'pop_25to34', 'pop_35to44', 'pop_45to54',
'pop_55to59', 'pop_60to64', 'pop_65to74', 'pop_75to84', 'pop_over85']] = \
demographics[['mf_ratio', 'median_age', 'pop_under5', 'pop_5to9', 'pop_15to19', 'pop_20to24', 'pop_25to34', 'pop_35to44', 'pop_45to54',
'pop_55to59', 'pop_60to64', 'pop_65to74', 'pop_75to84', 'pop_over85']].fillna(value=demographics.mean().round(1))
else:
demographics[['mf_ratio', 'median_age']] = demographics[['mf_ratio', 'median_age']].fillna(value=demographics.mean().round(1))
demographics['stateFIPS'] = demographics['FIPS'].str[:2]
t = pd.merge(demographics, statedemo, on=['stateFIPS'])
demographics['pop_white'].fillna(t['White'], inplace=True)
demographics['pop_hispanic'].fillna(t['Hispanic'], inplace=True)
demographics['pop_black'].fillna(t['Black'], inplace=True)
del demographics['stateFIPS']
d.to_csv(os.path.join(OTHER_DATA_DIR, 'demographics_raw.csv'), sep=',')
demographics.to_csv(os.path.join(OTHER_DATA_DIR, 'demographics.csv'), sep=',')
| {"/models/gaussianprocess/gp_pipeline.py": ["/models/gaussianprocess/gp_model.py", "/models/utils/clustering.py"]} |
58,527 | KevinHuang8/COVID19-prediction | refs/heads/master | /models/LSTM/lstm_pipeline.py | import numpy as np
#import tensorflow as tf
from . import lstm_data as dt
from . import lstm_models as models
from ..utils import dataloader as loader
class Pipeline:
def __init__(self, data_format, model_params, training_params, horizon,
loader_args={}):
'''
data_format,
model_params
- name
- lag_features
- prediction_horizon
- arguments passed into model function
- quantiles
- loss
- optimizer
training_params
- val_steps
- predict_time (when to start making predictions, in time from end)
- epochs
- batch_size
- dense
horizon: total horizon predictions needed for
'''
self.data = dt.Data(data_format, loader_args=loader_args)
try:
self.lag = model_params['lag_features']
self.k = model_params['prediction_horizon']
except KeyError:
raise ValueError('Musty specify lag features and prediction ' +
'horizon in model specification')
try:
val_steps = training_params['val_steps']
except KeyError:
raise ValueError('Must specify validation steps in training params')
if 'dense' in training_params:
self.dense = training_params['dense']
else:
self.dense = False
self.X_train, self.y_train, self.X_test, self.y_test = \
self.data.get_training_data(lag=self.lag, k=self.k,
val_steps=val_steps, dense=self.dense)
try:
name = model_params['name']
except KeyError:
raise ValueError('Must specify model name')
try:
model_creator = getattr(models, name)
except AttributeError:
raise ValueError(f'{name} is not a recognized model name')
self.model_params = model_params
model_params = {key: model_params[key] for key in model_params if key
not in ['name', 'lag_features', 'prediction_horizon']}
self.model = model_creator(self.X_train, self.y_train, **model_params)
if 'quantiles' in self.model_params and self.model_params['quantiles']:
self.quantiles = True
self.n_quantiles = self.model_params['quantiles']
else:
self.quantiles = False
if 'epochs' not in training_params:
raise ValueError('Must specify epochs in training params')
if 'batch_size' not in training_params:
raise ValueError('Must specify batch size in training params')
self.training_params = training_params
self.horizon = horizon
def run(self):
training_params = {key: self.training_params[key] for key in
self.training_params if key not in ['val_steps', 'predict_time',
'dense']}
self.model.fit(self.X_train, self.y_train,
validation_data=(self.X_test, self.y_test), shuffle=True,
**training_params)
if 'predict_time' not in self.training_params:
self.training_params['predict_time'] = 0
self.t = self.lag + self.training_params['predict_time']
self.predictions = self.predict(t=self.t, dense=self.dense)
def predict(self, t=0, dense=False):
if t < self.lag:
raise ValueError('Cannot predict from point less than number of lags')
if self.quantiles:
predictions = np.zeros((self.n_quantiles, self.data.n_counties,
self.horizon, self.data.n_targets))
else:
predictions = np.zeros((self.data.n_counties,
self.horizon, self.data.n_targets))
combined_features = np.full((self.data.n_time_features,
self.data.n_counties, self.data.tsteps), np.nan)
for i, feature in enumerate(self.data.time_features):
combined_features[i, :, :] = feature.series
combined_features = np.concatenate([combined_features,
np.full((self.data.n_time_features, self.data.n_counties,
self.horizon), np.nan)], axis=2)
# for county in range(self.data.n_counties):
# f = combined_features[0, county]
# if np.all(np.isnan(f)):
# continue
# # s is the end of the series
# s = np.argmax(np.isnan(f))
# combined_features[:, (s - t + self.lag):] = np.nan
for j in range(self.horizon // self.k):
X = np.zeros((self.data.n_counties, self.lag, self.data.n_features))
for county in range(self.data.n_counties):
f = combined_features[0, county]
if np.all(np.isnan(f)):
continue
# s is the end of the series
s = np.argmax(np.isnan(f))
#i = s - self.lag
i = s - t
if i < 0:
continue
x = combined_features[:, county, i:i + self.lag]
x = np.vstack([x] + [np.full((self.lag, ), feature[county])
for feature in self.data.time_independent_features])
if self.data.time_context:
x = np.vstack([x, np.full((self.lag, ), i)])
x = x.T
X[county, :] = x
y_pred = self.model.predict(X)
y_pred = np.array(y_pred)
if dense:
if self.quantiles:
y_pred = y_pred[:, :, -1:, :]
else:
y_pred = y_pred[:, -1:, :]
if self.quantiles:
# y_pred = y_pred.reshape(self.n_quantiles,
# self.data.n_counties, self.k, self.data.n_targets)
predictions[:, :, (j*self.k):((j + 1)*self.k), :] = y_pred
else:
# y_pred = y_pred.reshape(
# self.data.n_counties, self.k, self.data.n_targets)
predictions[:, (j*self.k):((j + 1)*self.k), :] = y_pred
for county in range(self.data.n_counties):
f = combined_features[0, county]
if np.all(np.isnan(f)):
continue
# s is the end of the series
s = np.argmax(np.isnan(f))
i = s
if self.quantiles:
combined_features[:, county, i:i + self.k] = \
np.moveaxis(y_pred, [1, 2, 3], [2, 3, 1])[4, :, county, :]
else:
combined_features[:, county, i:i + self.k] = \
np.moveaxis(y_pred, [0, 1, 2], [1, 2, 0])[:, county, :]
return predictions
def get_predictions(self):
if self.quantiles:
quantile_predictions = []
for q in range(self.n_quantiles):
p = self.data.original_with_predictions(self.predictions[q],
self.training_params['predict_time'])
quantile_predictions.append(p)
return quantile_predictions
else:
return self.data.original_with_predictions(self.predictions,
self.training_params['predict_time']) | {"/models/gaussianprocess/gp_pipeline.py": ["/models/gaussianprocess/gp_model.py", "/models/utils/clustering.py"]} |
58,635 | panda002/Python-Beginner | refs/heads/master | /MesoJarvis/user.py | from MesoJarvis.library1 import Base
from MesoJarvis.test_cal import isPrime
# first we need to check if the function
# is actually present in the base class
# before we can implement the same in our
# derived class
assert hasattr(Base, 'foo')
class Derived(Base):
def bar(self):
return 'bar'
a = int(input("Enter a number: "))
class Drived2(isPrime(a)):
def isPrime(self):
self.isPrime()
| {"/MesoJarvis/user.py": ["/MesoJarvis/library1.py", "/MesoJarvis/test_cal.py"], "/python-unit-testing/Checkout/test_Checkout.py": ["/python-unit-testing/Checkout/checkout.py"], "/python-unit-testing/Checkout/test_testdoubles.py": ["/python-unit-testing/Checkout/LineReader.py"]} |
58,636 | panda002/Python-Beginner | refs/heads/master | /MesoJarvis/Primelist.py | c = int(input('Enter No :'))
for num in range(0,c):
if num > 1:
for i in range(2,num):
if (num % i) == 0:
print(num,'is not prime')
break
else:
print(num, 'is prime')
print('enter the range')
lower = int(input("lower range"))
upper = int(input("Upper range"))
for a in range(lower, upper + 1):
if a > 1:
for b in range(2, a):
if a % b == 0:
print(a, 'number is not prime')
break
else:
print(a, 'number is prime')
| {"/MesoJarvis/user.py": ["/MesoJarvis/library1.py", "/MesoJarvis/test_cal.py"], "/python-unit-testing/Checkout/test_Checkout.py": ["/python-unit-testing/Checkout/checkout.py"], "/python-unit-testing/Checkout/test_testdoubles.py": ["/python-unit-testing/Checkout/LineReader.py"]} |
58,637 | panda002/Python-Beginner | refs/heads/master | /MesoJarvis/sum.py | num = [1, 3, 4, 5, 7, 8, 2, 4]
target = 5
for i in range(0, len(num)):
# print("i :", num[i])
for j in range(1, len(num)):
# print("j :", num[j])
if target == num[i] + num[j]:
print(num[i], num[j])
else:
pass
| {"/MesoJarvis/user.py": ["/MesoJarvis/library1.py", "/MesoJarvis/test_cal.py"], "/python-unit-testing/Checkout/test_Checkout.py": ["/python-unit-testing/Checkout/checkout.py"], "/python-unit-testing/Checkout/test_testdoubles.py": ["/python-unit-testing/Checkout/LineReader.py"]} |
58,638 | panda002/Python-Beginner | refs/heads/master | /MesoJarvis/calculator.py | from tkinter import *
def btnClick(numbers):
global operator
operator = operator + str(numbers)
text_Input.set(operator)
def btnEquals():
global operator
equal = str(eval(operator))
text_Input.set(equal)
operator = ""
cal = Tk()
cal.title("Calculator")
operator = ""
text_Input = StringVar()
txtDisplay = Entry(cal, font=('arial', 20, 'bold'), textvariable=text_Input, bd=30, insertwidth=4,
bg="red", justify='right').grid(columnspan=4)
btn1 = Button(cal, padx=16, bd=8, fg='black', font=('arial', 20, 'bold'),
text="1", command=lambda: btnClick(1)).grid(row=1, column=0)
btn2 = Button(cal, padx=16, bd=8, fg='black', font=('arial', 20, 'bold'),
text="2", command=lambda: btnClick(2)).grid(row=1, column=1)
btn3 = Button(cal, padx=16, bd=8, fg='black', font=('arial', 20, 'bold'),
text="3", command=lambda: btnClick(3)).grid(row=1, column=2)
btn4 = Button(cal, padx=16, bd=8, fg='black', font=('arial', 20, 'bold'),
text="4", command=lambda: btnClick(4)).grid(row=2, column=0)
btn5 = Button(cal, padx=16, bd=8, fg='black', font=('arial', 20, 'bold'),
text="5", command=lambda: btnClick(5)).grid(row=2, column=1)
btn6 = Button(cal, padx=16, bd=8, fg='black', font=('arial', 20, 'bold'),
text="6", command=lambda: btnClick(6)).grid(row=2, column=2)
btn7 = Button(cal, padx=16, bd=8, fg='black', font=('arial', 20, 'bold'),
text="7", command=lambda: btnClick(7)).grid(row=3, column=0)
btn8 = Button(cal, padx=16, bd=8, fg='black', font=('arial', 20, 'bold'),
text="8", command=lambda: btnClick(8)).grid(row=3, column=1)
btn9 = Button(cal, padx=16, bd=8, fg='black', font=('arial', 20, 'bold'),
text="9", command=lambda: btnClick(9)).grid(row=3, column=2)
btn0 = Button(cal, padx=16, bd=8, fg='black', font=('arial', 20, 'bold'),
text="0", command=lambda: btnClick(0)).grid(row=4, column=1)
Addition = Button(cal, padx=16, bd=8, fg='black', font=('arial', 20, 'bold'),
text="+", command=lambda: btnClick("+")).grid(row=1, column=3)
Substraction = Button(cal, padx=16, bd=8, fg='black', font=('arial', 20, 'bold'),
text="-", command=lambda: btnClick("-")).grid(row=2, column=3)
Multiply = Button(cal, padx=16, bd=8, fg='black', font=('arial', 20, 'bold'),
text="*", command=lambda: btnClick("*")).grid(row=3, column=3)
Divide = Button(cal, padx=16, bd=8, fg='black', font=('arial', 20, 'bold'),
text="/", command=lambda: btnClick("/")).grid(row=4, column=3)
Equals = Button(cal, padx=16, bd=8, fg='black', font=('arial', 20, 'bold'),
text="=", command=btnEquals).grid(row=4, column=2)
cal.mainloop()
| {"/MesoJarvis/user.py": ["/MesoJarvis/library1.py", "/MesoJarvis/test_cal.py"], "/python-unit-testing/Checkout/test_Checkout.py": ["/python-unit-testing/Checkout/checkout.py"], "/python-unit-testing/Checkout/test_testdoubles.py": ["/python-unit-testing/Checkout/LineReader.py"]} |
58,639 | panda002/Python-Beginner | refs/heads/master | /MesoJarvis/calc.py | x=int(input("enter a value"))
print(x)
if(x==10):
print("x is 10")
elif(x<10):
print("x is less")
| {"/MesoJarvis/user.py": ["/MesoJarvis/library1.py", "/MesoJarvis/test_cal.py"], "/python-unit-testing/Checkout/test_Checkout.py": ["/python-unit-testing/Checkout/checkout.py"], "/python-unit-testing/Checkout/test_testdoubles.py": ["/python-unit-testing/Checkout/LineReader.py"]} |
58,640 | panda002/Python-Beginner | refs/heads/master | /MesoJarvis/test.py | a = {1, 2, 3, 4, 5, 6}
print(len(a))
def sum1():
total = 0
for i in a:
total = total + i
return total
b = sum1()
print(b)
def avg():
return b / len(a)
c = avg()
print(c)
| {"/MesoJarvis/user.py": ["/MesoJarvis/library1.py", "/MesoJarvis/test_cal.py"], "/python-unit-testing/Checkout/test_Checkout.py": ["/python-unit-testing/Checkout/checkout.py"], "/python-unit-testing/Checkout/test_testdoubles.py": ["/python-unit-testing/Checkout/LineReader.py"]} |
58,641 | panda002/Python-Beginner | refs/heads/master | /PythonOOP/polymorphism.py | class India:
def language(self):
print("We speak Hindi in India")
class USA:
def language(self):
print("We speak English in US")
obj_india = India()
obj_usa = USA()
for country in obj_india, obj_usa:
country.language()
| {"/MesoJarvis/user.py": ["/MesoJarvis/library1.py", "/MesoJarvis/test_cal.py"], "/python-unit-testing/Checkout/test_Checkout.py": ["/python-unit-testing/Checkout/checkout.py"], "/python-unit-testing/Checkout/test_testdoubles.py": ["/python-unit-testing/Checkout/LineReader.py"]} |
58,642 | panda002/Python-Beginner | refs/heads/master | /MesoJarvis/test_cal.py | a = 0
def isPrime(value):
if(value % 2) == 0:
return True
else:
return False
print(isPrime(a))
a = 11
print("%", a % 2)
print("/", a / 2)
print("//", a // 2)
| {"/MesoJarvis/user.py": ["/MesoJarvis/library1.py", "/MesoJarvis/test_cal.py"], "/python-unit-testing/Checkout/test_Checkout.py": ["/python-unit-testing/Checkout/checkout.py"], "/python-unit-testing/Checkout/test_testdoubles.py": ["/python-unit-testing/Checkout/LineReader.py"]} |
58,643 | panda002/Python-Beginner | refs/heads/master | /MesoJarvis/substring.py | def ExactSame(str1, str2):
size1 = len(str1)
size2 = len(str2)
if size1 == 0:
return False
if size2 == 0:
return True
if str1[0] == str2[0]:
return ExactSame(str1[1:], str2[1:])
return False
def issubstring(str1, str2):
size1 = len(str1)
size2 = len(str2)
if size1 == 0:
return False
if size2 == 0:
return True
if str1[0] == str2[0]:
return ExactSame(str1, str2)
return issubstring(str1[1:], str2)
str1 = 'SIDHARTH'
str2 = 'SID'
print(issubstring(str1, str2))
| {"/MesoJarvis/user.py": ["/MesoJarvis/library1.py", "/MesoJarvis/test_cal.py"], "/python-unit-testing/Checkout/test_Checkout.py": ["/python-unit-testing/Checkout/checkout.py"], "/python-unit-testing/Checkout/test_testdoubles.py": ["/python-unit-testing/Checkout/LineReader.py"]} |
58,644 | panda002/Python-Beginner | refs/heads/master | /MesoJarvis/Factorization.py | a = int(input('Enter No :'))
b = 1
while a > 1:
b = b*a
a = a - 1
print(b)
# 5 = 5*4*3*2
| {"/MesoJarvis/user.py": ["/MesoJarvis/library1.py", "/MesoJarvis/test_cal.py"], "/python-unit-testing/Checkout/test_Checkout.py": ["/python-unit-testing/Checkout/checkout.py"], "/python-unit-testing/Checkout/test_testdoubles.py": ["/python-unit-testing/Checkout/LineReader.py"]} |
58,645 | panda002/Python-Beginner | refs/heads/master | /MesoJarvis/lc1.py | nums = [3,2,4]
target = 6
def twoSum(nums, target):
for i in range(len(nums)):
diff = target - nums[i]
if diff in nums:
return nums.index(nums[i]), nums.index(diff)
else:
return False
print(twoSum(nums,target)) | {"/MesoJarvis/user.py": ["/MesoJarvis/library1.py", "/MesoJarvis/test_cal.py"], "/python-unit-testing/Checkout/test_Checkout.py": ["/python-unit-testing/Checkout/checkout.py"], "/python-unit-testing/Checkout/test_testdoubles.py": ["/python-unit-testing/Checkout/LineReader.py"]} |
58,646 | panda002/Python-Beginner | refs/heads/master | /MesoJarvis/Pallindrome.py | num = str(input('enter a number : '))
print('the number entered is - ', num)
# print (len(num))
j = len(num)
# a= (list(range(j-1,-1,-1)))
# print (a)
for i in (list(range(j - 1, -1, -1))):
num2 = num[i]
print(num2)
for a in (list(range(0, j))):
num3 = num[a]
print("reverse is : ", num[a])
if num3 == num2:
print('Number is Pallindrome')
else:
print('Number is NOT Pallindrome')
| {"/MesoJarvis/user.py": ["/MesoJarvis/library1.py", "/MesoJarvis/test_cal.py"], "/python-unit-testing/Checkout/test_Checkout.py": ["/python-unit-testing/Checkout/checkout.py"], "/python-unit-testing/Checkout/test_testdoubles.py": ["/python-unit-testing/Checkout/LineReader.py"]} |
58,647 | panda002/Python-Beginner | refs/heads/master | /MesoJarvis/AutomateYoutube.py | age=int(input('Please enter your age :'))
a=list(range(1,16))
b=list(range(16,35))
if age in a:
print('What type of cartoon would you like to watch ?')
print('doremon\nnaruto\nchota bheem')
cart=str(input(''))
print('You have Choosen :',cart)
print('Please find the links below\nhttps://%s.com/'%(cart))
elif age in b:
print('Movies or Anime ?')
adult=str(input())
if adult == 'movies':
print('transformers\nmatrix\navengers')
mov=str(input(''))
print('You have Choosen :',mov)
print('Please find the links below\nhttps://%s.com/'%(mov))
else:
print('naruto\nbleach\nfullmetalalchemist')
anime=str(input(''))
print('You have Choosen :',anime)
print('Please find the links below\nhttps://%s.com/\nhttps://en.wikipedia.org/wiki/Bleach_(TV_series)'%(anime))
else:
print('Ohh!! we have something great for you\nhttps://www.youtube.com/watch?v=gLPrStGNfq4&list=RDgLPrStGNfq4&start_radio=1')
| {"/MesoJarvis/user.py": ["/MesoJarvis/library1.py", "/MesoJarvis/test_cal.py"], "/python-unit-testing/Checkout/test_Checkout.py": ["/python-unit-testing/Checkout/checkout.py"], "/python-unit-testing/Checkout/test_testdoubles.py": ["/python-unit-testing/Checkout/LineReader.py"]} |
58,648 | panda002/Python-Beginner | refs/heads/master | /python-unit-testing/Checkout/test_Checkout.py | from .checkout import Checkout
import pytest
@pytest.fixture()
def checkout():
checkout = Checkout()
checkout.add_item_price("a", 1)
checkout.add_item_price("b", 1)
return checkout
# def test_canadditemprice(checkout):
# checkout.addItemPrice("a", 1)
#
#
# def test_canadditem(checkout):
# checkout.addItem("a")
#
def test_can_calculate_total(checkout):
checkout.add_item("a")
assert checkout.calculate_total() == 1
def test_get_correct_total_for_multiple_items(checkout):
checkout.add_item("a")
checkout.add_item("b")
assert checkout.calculate_total() == 2
def test_can_add_discount_rule(checkout):
checkout.add_discount("a", 3, 2)
def test_can_apply_discount_rule(checkout):
checkout.add_discount("a", 3, 2)
checkout.add_item("a")
checkout.add_item("a")
checkout.add_item("a")
assert checkout.calculate_total() == 2
def test_exception_with_bad_item(checkout):
with pytest.raises(Exception):
checkout.add_item("c")
| {"/MesoJarvis/user.py": ["/MesoJarvis/library1.py", "/MesoJarvis/test_cal.py"], "/python-unit-testing/Checkout/test_Checkout.py": ["/python-unit-testing/Checkout/checkout.py"], "/python-unit-testing/Checkout/test_testdoubles.py": ["/python-unit-testing/Checkout/LineReader.py"]} |
58,649 | panda002/Python-Beginner | refs/heads/master | /PythonOOP/stringtupledict.py | a = [9, 2, 3, 4, 6, 7]
print(a[2:4])
print(len(a))
b = range(6)
for i in b:
print(i)
for i in range(len(a)):
"""
len(a) return 6 and range will return 0 - 5
i will iterate from 0 - 5
"""
print(a[i])
a.sort()
print(a)
b = []
print("empty list: ", b)
for j in range(10):
b.append(j)
print("List after appending", b)
# Tuple
# Python Tuple is used to store the sequence of immutable python objects.
# Tuple is similar to lists since the value of the items stored in the list
# can be changed whereas the tuple is immutable and the value of the items
# stored in the tuple can not be changed.
# Set
# The set in python can be defined as the unordered collection of various items
# enclosed within the curly braces. The elements of the set can not be duplicate.
# The elements of the python set must be immutable.
# Unlike other collections in python, there is no index attached to the elements
# of the set, i.e., we cannot directly access any element of the set by the index.
# However, we can print them all together or we can get the list of elements by looping
# through the set.
set1 = {"Ayush","John", "David", "Martin"}
set2 = {"Steave","Milan","David", "Martin"}
print(set1.intersection(set2)) # prints the intersection of the two sets
# Dictionary
# Dictionary is used to implement the key-value pair in python. The dictionary
# is the data type in python which can simulate the real-life data arrangement
# where some specific value exists for some particular key.
# In other words, we can say that a dictionary is the collection of key-value pairs
# where the value can be any python object whereas the keys are the immutable python
# object, i.e., Numbers, string or tuple.
# Dictionary simulates Java hash-map in python.
Employee = {"Name": "John", "Age": 29, "salary": 25000, "Company": "GOOGLE"}
dict1 = {k: v for k, v in Employee.items()}
print(dict1)
print("After Update")
print(dict1)
# Let's create a dictionary, the functional way
class Dictionary(dict):
def __init__(self):
self = dict()
def add(self, key, value):
self[key] = value
obj = Dictionary()
obj.add("name", "Sid")
obj.add("Mandu", "Aish")
print(obj) | {"/MesoJarvis/user.py": ["/MesoJarvis/library1.py", "/MesoJarvis/test_cal.py"], "/python-unit-testing/Checkout/test_Checkout.py": ["/python-unit-testing/Checkout/checkout.py"], "/python-unit-testing/Checkout/test_testdoubles.py": ["/python-unit-testing/Checkout/LineReader.py"]} |
58,650 | panda002/Python-Beginner | refs/heads/master | /MesoJarvis/linkedList.py | class Node:
def __init__(self, data):
self.data = data
self.next = None
self.previous = None
class LinkedList:
def __init__(self):
self.head = None
def length(self):
currentnode = self.head
count = 0
while currentnode is not None:
count = count + 1
currentnode = currentnode.next
return count
def insert(self, newnode):
if self.head is None:
self.head = newnode
else:
lastnode = self.head
while True:
if lastnode.next is None:
break
lastnode = lastnode.next
lastnode.next = newnode
def insertatpos(self, newnode, position):
currentpos = 0
currentnode = self.head
previousnode = currentnode
currentnode = currentnode.next
currentpos = currentpos + 1
while currentpos == position:
previousnode.next = newnode
newnode.next = currentnode
break
def inserthead(self, newnode):
tempnode = self.head
self.head = newnode
self.head.next = tempnode
del tempnode
def printlist(self):
if self.head is None:
print("the list is empty")
return
currentnode = self.head
while True:
if currentnode is None:
break
print(currentnode.data)
currentnode = currentnode.next
firstnode = Node(10)
LinkedList = LinkedList()
LinkedList.insert(firstnode)
secondnode = Node(20)
LinkedList.insert(secondnode)
thirdnode = Node(30)
LinkedList.insert(thirdnode)
fourthnode = Node(40)
LinkedList.insertatpos(fourthnode, 0)
LinkedList.printlist()
print(LinkedList.length())
| {"/MesoJarvis/user.py": ["/MesoJarvis/library1.py", "/MesoJarvis/test_cal.py"], "/python-unit-testing/Checkout/test_Checkout.py": ["/python-unit-testing/Checkout/checkout.py"], "/python-unit-testing/Checkout/test_testdoubles.py": ["/python-unit-testing/Checkout/LineReader.py"]} |
58,651 | panda002/Python-Beginner | refs/heads/master | /PythonOOP/classesnobjects.py | class Employee: # here Employee is a class adn we can create as many employees we want
id = 1
name = 'Sid'
dept = "CS"
def display_prof(self):
print("ID: %d\nName: %s" % (self.id, self.name))
def display_dept(self):
dept = self.dept
print("Dept: ", dept)
emp = Employee()
emp.display_prof()
emp.display_dept()
| {"/MesoJarvis/user.py": ["/MesoJarvis/library1.py", "/MesoJarvis/test_cal.py"], "/python-unit-testing/Checkout/test_Checkout.py": ["/python-unit-testing/Checkout/checkout.py"], "/python-unit-testing/Checkout/test_testdoubles.py": ["/python-unit-testing/Checkout/LineReader.py"]} |
58,652 | panda002/Python-Beginner | refs/heads/master | /python-unit-testing/Checkout/checkout.py | class Checkout(object):
class Discount:
def __init__(self, quantity, price):
self.quantity = quantity
self.price = price
def __init__(self):
self.discount = {}
self.prices = {}
self.items = {}
def add_discount(self, item, quantity, price):
discount = self.Discount(quantity, price)
self.discount[item] = discount
def add_item_price(self, item, price):
self.prices[item] = price
def add_item(self, item):
if item not in self.prices:
raise Exception("Bad Item")
if item in self.items:
self.items[item] += 1
else:
self.items[item] = 1
def calculate_total(self, a: int):
"""
:param a:
:return:
"""
total = 0
for item, cnt in self.items.items():
total += self.calculate_item_total(item, cnt)
return total
def calculate_item_total(self, item, cnt):
total = 0
if item in self.discount:
discount = self.discount[item]
if cnt >= discount.quantity:
nbrdiscount = cnt / discount.quantity
total += nbrdiscount * discount.price
remaining = cnt % discount.quantity
total += remaining * self.prices[item]
else:
total += self.price[item] * cnt
else:
total += self.prices[item] * cnt
return total
| {"/MesoJarvis/user.py": ["/MesoJarvis/library1.py", "/MesoJarvis/test_cal.py"], "/python-unit-testing/Checkout/test_Checkout.py": ["/python-unit-testing/Checkout/checkout.py"], "/python-unit-testing/Checkout/test_testdoubles.py": ["/python-unit-testing/Checkout/LineReader.py"]} |
58,653 | panda002/Python-Beginner | refs/heads/master | /python-unit-testing/test_file.py | import pytest
@pytest.fixture(autouse=True)
def setup1():
print("\nSetup 1")
yield
print("\nTeardown 1")
@pytest.fixture()
def setup2(request):
print("\nSetup 2")
def teardown_a():
print("\nTeardown A")
def teardown_b():
print("\nTeardown B")
request.addfinalizer(teardown_a)
request.addfinalizer(teardown_b)
def test1(setup1):
print("Executing test1!")
assert True
def test2(setup2):
print("Executing test2")
assert True
| {"/MesoJarvis/user.py": ["/MesoJarvis/library1.py", "/MesoJarvis/test_cal.py"], "/python-unit-testing/Checkout/test_Checkout.py": ["/python-unit-testing/Checkout/checkout.py"], "/python-unit-testing/Checkout/test_testdoubles.py": ["/python-unit-testing/Checkout/LineReader.py"]} |
58,654 | panda002/Python-Beginner | refs/heads/master | /MesoJarvis/library1.py | class Base:
def foo(self):
return self.bar
| {"/MesoJarvis/user.py": ["/MesoJarvis/library1.py", "/MesoJarvis/test_cal.py"], "/python-unit-testing/Checkout/test_Checkout.py": ["/python-unit-testing/Checkout/checkout.py"], "/python-unit-testing/Checkout/test_testdoubles.py": ["/python-unit-testing/Checkout/LineReader.py"]} |
58,655 | panda002/Python-Beginner | refs/heads/master | /MesoJarvis/prime.py | from builtins import sum
prices = [1, 3, 3, 2, 5]
def finalPrice(prices):
# Write your code here
stack = []
res = 0
for i in prices:
if stack:
if i > stack[-1]:
stack.append(i)
else:
while stack and i <= stack[-1]:
res += stack[-1] - i
stack.pop(-1)
stack.append(i)
else:
stack.append(i)
return res + sum(stack)
if __name__ == '__main__':
prices_count = 5
prices = [1, 3, 3, 2, 5]
for _ in range(prices_count):
prices_item = prices_count
prices.append(prices_item)
finalPrice(prices)
| {"/MesoJarvis/user.py": ["/MesoJarvis/library1.py", "/MesoJarvis/test_cal.py"], "/python-unit-testing/Checkout/test_Checkout.py": ["/python-unit-testing/Checkout/checkout.py"], "/python-unit-testing/Checkout/test_testdoubles.py": ["/python-unit-testing/Checkout/LineReader.py"]} |
58,656 | panda002/Python-Beginner | refs/heads/master | /python-unit-testing/FizzBuzzTest.py | # Test-Driven Development
# always start a test suite with test_
def fizzBuzz(param):
if (param % 3) == 0 and (param % 5) == 0:
return "FizzBuzz"
elif (param % 3) == 0:
return "Fizz"
elif (param % 5) == 0:
return "Buzz"
else:
return str(param)
def checkfizzBuzz(param, expectedvalue):
retval = fizzBuzz(param)
assert retval == expectedvalue
# not needed anymore since the below testcase
# confirms that the fizzbuzz() can be called
# def test_canCallFizzBuzz():
# fizzBuzz(1)
#
def test_get1whenpass1():
checkfizzBuzz(1, "1")
def test_get2whenpass2():
checkfizzBuzz(2, "2")
def test_getFizzwhenpass3():
retval = fizzBuzz(3)
assert retval == "Fizz"
def test_getBuzzwhenpass5():
retval = fizzBuzz(5)
assert retval == "Buzz"
def test_getFizzwhenpass6():
retval = fizzBuzz(6)
assert retval == "Fizz"
def test_getBuzzwhenpass10():
retval = fizzBuzz(10)
assert retval == "Buzz"
def test_getFizzBuzzwhenpass15():
retval = fizzBuzz(15)
assert retval == "FizzBuzz"
| {"/MesoJarvis/user.py": ["/MesoJarvis/library1.py", "/MesoJarvis/test_cal.py"], "/python-unit-testing/Checkout/test_Checkout.py": ["/python-unit-testing/Checkout/checkout.py"], "/python-unit-testing/Checkout/test_testdoubles.py": ["/python-unit-testing/Checkout/LineReader.py"]} |
58,657 | panda002/Python-Beginner | refs/heads/master | /MesoJarvis/shape.py | class Shape:
def __init__(self, color=None):
self.color = color
def get_color(self):
return self.color
def __str__(self):
return self.get_color() + ' Shape'
# Inheritance at PLay
class Rectangle(Shape):
def __init__(self, color, length, width):
super().__init__(color)
self.length = length
self.width = width
def get_area(self):
return self.width * self.length
def get_perimeter(self):
return 2 * (self.length + self.width)
def __str__(self):
return self.get_color() + '' + str(self.length) + str(self.width) + ' ' + type(self).__name__
def print_shape_data(self):
print('Shape: ', type(self).__name__)
print('Color: ', self.get_color())
print('Area : ', self.get_area())
print('Perimeter:', self.get_perimeter())
shape = Shape('red')
rectangle = Rectangle('red', 2, 4)
print_shape_data(rectangle)
print('Shape is ', shape.get_color())
print('area is ', rectangle.get_area())
| {"/MesoJarvis/user.py": ["/MesoJarvis/library1.py", "/MesoJarvis/test_cal.py"], "/python-unit-testing/Checkout/test_Checkout.py": ["/python-unit-testing/Checkout/checkout.py"], "/python-unit-testing/Checkout/test_testdoubles.py": ["/python-unit-testing/Checkout/LineReader.py"]} |
58,658 | panda002/Python-Beginner | refs/heads/master | /MesoJarvis/venv/Lib/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/certifi/__main__.py | from pip import where
print(where())
| {"/MesoJarvis/user.py": ["/MesoJarvis/library1.py", "/MesoJarvis/test_cal.py"], "/python-unit-testing/Checkout/test_Checkout.py": ["/python-unit-testing/Checkout/checkout.py"], "/python-unit-testing/Checkout/test_testdoubles.py": ["/python-unit-testing/Checkout/LineReader.py"]} |
58,659 | panda002/Python-Beginner | refs/heads/master | /PythonOOP/constructor.py | class Employee:
count = 0
def __init__(self, id, name, dept):
self.id = id
self.name = name
self.dept = dept
def display(self):
"""
dfsdfsdfs
:return: sdfdsfsdf
"""
print("ID: %d\nName: %s\nDept: %s" % (self.id, self.name, self.dept))
Employee.count = Employee.count + 1
emp1 = Employee(1, "Sid", "CS") # here emp1 is the object of the class Employee
emp2 = Employee(2, "Aish", "EE")
emp1.display()
emp2.display()
print("Employee added = ", Employee.count)
print(emp1.__dict__)
print(emp1.__doc__)
| {"/MesoJarvis/user.py": ["/MesoJarvis/library1.py", "/MesoJarvis/test_cal.py"], "/python-unit-testing/Checkout/test_Checkout.py": ["/python-unit-testing/Checkout/checkout.py"], "/python-unit-testing/Checkout/test_testdoubles.py": ["/python-unit-testing/Checkout/LineReader.py"]} |
58,660 | panda002/Python-Beginner | refs/heads/master | /MesoJarvis/solutions.py | class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def removeElements(self, head: ListNode, val: int) -> ListNode:
val = 6
head = [1, 2, 6, 3, 4, 5, 6]
temp = ListNode(-1)
temp.next = head
currentnode = head
print(currentnode)
while currentnode.next is None:
if currentnode.next.val == val:
currentnode.next = currentnode.next.next
else:
currentnode = currentnode.next
return head | {"/MesoJarvis/user.py": ["/MesoJarvis/library1.py", "/MesoJarvis/test_cal.py"], "/python-unit-testing/Checkout/test_Checkout.py": ["/python-unit-testing/Checkout/checkout.py"], "/python-unit-testing/Checkout/test_testdoubles.py": ["/python-unit-testing/Checkout/LineReader.py"]} |
58,661 | panda002/Python-Beginner | refs/heads/master | /python-unit-testing/Checkout/test_testdoubles.py | from .LineReader import readfromfile
from unittest.mock import MagicMock
# def test_cancallreadfromfile():
# readfromfile("blah")
def test_returnscorrectstring(monkeypatch):
mock_file = MagicMock()
mock_file.readline = MagicMock(return_value="test line")
mock_open = MagicMock(return_value=mock_file)
monkeypatch.setattr("builtins.open", mock_open)
result = readfromfile("blah")
mock_open.assert_called_once_with("blah", "r")
assert result == "test line"
| {"/MesoJarvis/user.py": ["/MesoJarvis/library1.py", "/MesoJarvis/test_cal.py"], "/python-unit-testing/Checkout/test_Checkout.py": ["/python-unit-testing/Checkout/checkout.py"], "/python-unit-testing/Checkout/test_testdoubles.py": ["/python-unit-testing/Checkout/LineReader.py"]} |
58,662 | panda002/Python-Beginner | refs/heads/master | /MesoJarvis/todo.py | from tkinter import *
def onclick():
print(Day1)
print(Day2)
print(Day3)
cal = Tk()
cal.title("ToDo List")
text_input = StringVar()
text_input1 = StringVar()
text_input2 = StringVar()
btn1 = Button(cal, padx=10, bd=4, fg='black', font=('arial', 20, 'bold'),
text="Submit", command=onclick).grid(row=0, column=5)
btn2 = Button(cal, padx=10, bd=4, fg='black', font=('arial', 20, 'bold'),
text="Submit", command=onclick).grid(row=1, column=5)
btn3 = Button(cal, padx=10, bd=4, fg='black', font=('arial', 20, 'bold'),
text="Submit", command=onclick).grid(row=2, column=5)
Day1 = Entry(cal, text="1. ", font=('arial', 20, 'bold'), textvariable=text_input, bd=5, insertwidth=10,
bg="light blue", justify='left').grid(columnspan=4)
Day2 = Entry(cal, font=('arial', 20, 'bold'), textvariable=text_input1, bd=5, insertwidth=10,
bg="light blue", justify='left').grid(columnspan=4)
Day3 = Entry(cal, font=('arial', 20, 'bold'), textvariable=text_input2, bd=5, insertwidth=10,
bg="light blue", justify='left').grid(columnspan=4)
var1 = IntVar()
Checkbutton(cal, text=Day1, variable=var1).grid(row=4, sticky=W)
var2 = IntVar()
Checkbutton(cal, text=Day2, variable=var2).grid(row=5, sticky=W)
var3 = IntVar()
Checkbutton(cal, text=Day3, variable=var3).grid(row=4, sticky=W)
cal.mainloop()
| {"/MesoJarvis/user.py": ["/MesoJarvis/library1.py", "/MesoJarvis/test_cal.py"], "/python-unit-testing/Checkout/test_Checkout.py": ["/python-unit-testing/Checkout/checkout.py"], "/python-unit-testing/Checkout/test_testdoubles.py": ["/python-unit-testing/Checkout/LineReader.py"]} |
58,663 | panda002/Python-Beginner | refs/heads/master | /PythonOOP/inheritance.py | class Animal:
def speak(self):
print("Animal Speaking")
class Dog(Animal):
def bark(self):
print("Dog Barking")
class DogActivity(Dog):
def eat(self):
print("Dog is eating")
d = DogActivity()
d.bark()
d.speak()
d.eat()
class Arithmetic:
def sum(self, a, b):
return a+b
def mul(self, a, b):
return a*b
def div(self, a, b):
return a/b
class Derived(Arithmetic):
def sum(self, a, b): # example of method overidding
return a * b
d = Derived()
print(d.div(1, 2))
print(d.mul(1, 2))
# the sum that is called is of the derived class and
# wont take into consideration the function of base class
print(d.sum(1, 2))
| {"/MesoJarvis/user.py": ["/MesoJarvis/library1.py", "/MesoJarvis/test_cal.py"], "/python-unit-testing/Checkout/test_Checkout.py": ["/python-unit-testing/Checkout/checkout.py"], "/python-unit-testing/Checkout/test_testdoubles.py": ["/python-unit-testing/Checkout/LineReader.py"]} |
58,664 | panda002/Python-Beginner | refs/heads/master | /python-unit-testing/Checkout/LineReader.py | def readfromfile(filename):
infile = open(filename, "r")
Line = infile.readline()
return Line
| {"/MesoJarvis/user.py": ["/MesoJarvis/library1.py", "/MesoJarvis/test_cal.py"], "/python-unit-testing/Checkout/test_Checkout.py": ["/python-unit-testing/Checkout/checkout.py"], "/python-unit-testing/Checkout/test_testdoubles.py": ["/python-unit-testing/Checkout/LineReader.py"]} |
58,666 | hungpham2511/NeuroKit | refs/heads/master | /neurokit2/ecg/ecg_ecgsynth.py | # -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import scipy
import math
from ..signal import signal_resample
def _ecg_simulate_ecgsynth(sfecg=256, N=256, Anoise=0, hrmean=60, hrstd=1, lfhfratio=0.5, sfint=256, ti=[-70, -15, 0, 15, 100], ai=[1.2, -5, 30, -7.5, 0.75], bi=[0.25, 0.1, 0.1, 0.1, 0.4]):
"""
References
-----------
This function is a python translation of the matlab script by Patrick McSharry & Gari Clifford (2013). All credits go to them.
"""
# Set parameter default values
# sfecg = 256;
# N = 256;
# Anoise = 0;
# hrmean = 60;
# hrstd = 1;
# lfhfratio = 0.5;
# sfint = 512;
# ti = [-70, -15, 0, 15, 100];
# ai=[1.2 -5 30 -7.5 0.75];
# bi=[0.25 0.1 0.1 0.1 0.4];
ti = np.array(ti)*np.pi/180
# Adjust extrema parameters for mean heart rate
hrfact = np.sqrt(hrmean/60)
hrfact2 = np.sqrt(hrfact)
bi = hrfact*np.array(bi)
ti = np.array([hrfact2, hrfact, 1, hrfact, hrfact2])*ti
# Check that sfint is an integer multiple of sfecg
q = np.round(sfint/sfecg)
qd = sfint/sfecg
if q != qd:
raise ValueError('Internal sampling frequency (sfint) must be an integer multiple of the ECG sampling frequency (sfecg). Your current choices are: sfecg = ' + str(sfecg) + ' and sfint = ' + str(sfint) + '.')
# Define frequency parameters for rr process
# flo and fhi correspond to the Mayer waves and respiratory rate respectively
flo = 0.1
fhi = 0.25
flostd = 0.01
fhistd = 0.01
fid = 1
# Calculate time scales for rr and total output
sfrr = 1
trr = 1/sfrr
tstep = 1/sfecg
rrmean = 60/hrmean
n = (np.ceil(np.log2(N*rrmean/trr)))**2
rr0 = _ecg_simulate_rrprocess(flo,fhi,flostd,fhistd,lfhfratio,hrmean,hrstd,sfrr,n)
# Upsample rr time series from 1 Hz to sfint Hz
rr = signal_resample(rr0, sampling_rate=1, desired_sampling_rate=sfint)
# Make the rrn time series
dt = 1/sfint
rrn = np.zeros(len(rr))
tecg=0
i = 0
while i < len(rr):
tecg = tecg+rr[i]
ip = int(np.round(tecg/dt))
rrn[i:ip] = rr[i]
i = ip+1
Nt = ip
# Integrate system using fourth order Runge-Kutta
x0 = [1, 0, 0.04]
# ------- THIS IS WHERE THINGS START TO GET COMPLICATED
# Tspan = np.arange(0, (Nt-1)*dt, dt)
Tspan = np.linspace(0, (Nt-1)*dt, len(rrn))
# T, X0 = _ecg_simulate_derivsecgsyn(t=Tspan, rr=rrn, ti=ti, x=x0, flag=[], sfint, ti, ai, bi)
dxdt = _ecg_simulate_derivsecgsyn(Tspan=Tspan, rrn=rrn, ti=ti, x0=x0, sfint=sfint, ai=ai, bi=bi)
# downsample to required sfecg
X = dxdt[np.arange(0,len(dxdt),q).astype(int)]
# Scale signal to lie between -0.4 and 1.2 mV
z = X.copy()
zmin = np.min(z)
zmax = np.max(z)
zrange = zmax - zmin
z = (z - zmin)*(1.6)/zrange -0.4
# include additive uniformly distributed measurement noise
eta = 2*np.random.uniform(len(z))-1
s = z + Anoise*eta
return(s)
def _ecg_simulate_derivsecgsyn(Tspan, rrn, ti, x0=[1, 0, 0.04], sfint=512, ai=[1.2, -5, 30, -7.5, 0.75], bi=[0.25, 0.1 , 0.1 , 0.1 , 0.4 ]):
xi = np.cos(ti)
yi = np.sin(ti)
ta = math.atan2(x0[1], x0[0])
r0 = 1
a0 = 1.0 - np.sqrt(x0[0]**2 + x0[1]**2)/r0
ip = np.floor(Tspan*sfint).astype(int)
# w0 = 2*np.pi/rrn[ip.astype(int)]
w0 = 2*np.pi/rrn[ip[ip <= np.max(rrn)]]
fresp = 0.25
zbase = 0.005*np.sin(2*np.pi*fresp*Tspan)
dx1dt = a0*x0[0] - w0*x0[1]
dx2dt = a0*x0[1] + w0*x0[0]
dti = np.remainder(ta - ti, 2*np.pi)
dx3dt = -np.sum(ai * dti * np.exp(-0.5*(dti/bi)**2)) - 1*(x0[2] - zbase)
dxdt = np.concatenate([dx1dt, dx2dt, dx3dt])
return(dxdt)
def _ecg_simulate_rrprocess(flo=0.1, fhi=0.25, flostd=0.01, fhistd=0.01, lfhfratio=0.5, hrmean=60, hrstd=1, sfrr=1, n=64):
w1 = 2*np.pi*flo
w2 = 2*np.pi*fhi
c1 = 2*np.pi*flostd
c2 = 2*np.pi*fhistd
sig2 = 1
sig1 = lfhfratio
rrmean = 60/hrmean
rrstd = 60*hrstd/(hrmean*hrmean)
df = sfrr/n
w = np.arange(n-1)*2*np.pi*df
dw1 = w-w1
dw2 = w-w2
Hw1 = sig1*np.exp(-0.5*(dw1/c1)**2)/np.sqrt(2*np.pi*c1**2)
Hw2 = sig2*np.exp(-0.5*(dw2/c2)**2)/np.sqrt(2*np.pi*c2**2)
Hw = Hw1 + Hw2
Hw0 = np.concatenate((Hw[0:int(n/2)], Hw[int(n/2)-1::-1]))
Sw = (sfrr/2)*np.sqrt(Hw0)
ph0 = 2*np.pi*np.random.uniform(size=int(n/2-1))
ph = np.concatenate([[0], ph0, [0], -np.flipud(ph0)])
SwC = Sw * np.exp(1j*ph)
x = (1/n)*np.real(np.fft.ifft(SwC))
xstd = np.std(x)
ratio = rrstd/xstd
rr = rrmean + x*ratio
return(rr)
| {"/neurokit2/ecg/ecg_ecgsynth.py": ["/neurokit2/signal/__init__.py"], "/tests/tests_eeg.py": ["/neurokit2/__init__.py"], "/neurokit2/stats/__init__.py": ["/neurokit2/stats/standardize.py"], "/neurokit2/ecg/__init__.py": ["/neurokit2/ecg/ecg_simulate.py"], "/neurokit2/events/__init__.py": ["/neurokit2/events/plot_events_in_signal.py", "/neurokit2/events/events_to_mne.py"], "/neurokit2/ecg/ecg_simulate.py": ["/neurokit2/signal/__init__.py"], "/neurokit2/ppg/ppg_simulate.py": ["/neurokit2/signal/__init__.py"], "/tests/tests_events.py": ["/neurokit2/__init__.py"], "/docs/img/README_examples.py": ["/neurokit2/__init__.py"], "/tests/tests_ecg.py": ["/neurokit2/__init__.py"], "/tests/test_data.py": ["/neurokit2/__init__.py"], "/neurokit2/ppg/__init__.py": ["/neurokit2/ppg/ppg_simulate.py"], "/tests/tests_signal.py": ["/neurokit2/__init__.py"], "/tests/tests_emg.py": ["/neurokit2/__init__.py"], "/neurokit2/__init__.py": ["/neurokit2/stats/__init__.py", "/neurokit2/signal/__init__.py", "/neurokit2/events/__init__.py", "/neurokit2/ecg/__init__.py", "/neurokit2/emg/__init__.py", "/neurokit2/ppg/__init__.py"], "/neurokit2/signal/__init__.py": ["/neurokit2/signal/signal_binarize.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.