index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
7,334
|
smantinc/pyaml
|
refs/heads/master
|
/libaml/utils/decorator.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import inspect
import struct
class Struct:
def __init__(self, signature, fieldnames):
self._signature = signature
self._fieldnames = fieldnames
self._structs = Struct.initstructs(signature, fieldnames)
self._structsize = Struct.calculatesize(signature)
@staticmethod
def initstructs(signature, fieldnames):
nameoffset = 0
class BySignature:
def __init__(self, names, signature):
self._signature = signature
self._names = names
self._size = struct.calcsize(signature)
def parse(self, _buf, *args, **kwargs):
return zip(self._names, struct.unpack(self._signature, _buf[:self._size])), _buf[self._size:]
def tobytes(self, obj):
return struct.pack(self._signature, *[getattr(obj, i) for i in self._names])
class ByStruct:
def __init__(self, name, st):
self._name = name
self._struct = st
def parse(self, _buf, *args, **kwargs):
obj, nextbuf = self._struct.parse(_buf, *args, **kwargs)
return [(self._name, obj)], nextbuf
def tobytes(self, obj):
st = getattr(obj, self._name)
return st.tobytes()
structs = []
if type(signature) is str:
structs.append(BySignature(fieldnames, signature))
else:
for sig in signature:
if type(sig) is str:
names = fieldnames[nameoffset:nameoffset+len(sig)]
nameoffset += len(names)
structs.append(BySignature(names, sig))
else:
name = fieldnames[nameoffset]
nameoffset += 1
structs.append(ByStruct(name, sig))
return structs
@staticmethod
def calculatesize(signature):
return sum([struct.calcsize(i) if type(i) is str else i.size for i in signature])
@staticmethod
def override(cls, name, attr):
if hasattr(cls, name):
setattr(cls, '_' + name, attr)
else:
setattr(cls, name, attr)
def __call__(self, cls):
def getinitargs(kwargs):
return dict([(k, v) for k, v in kwargs.items() if k in cls._INIT_KWARGS])
def create(*args, **kwargs):
s = cls(**getinitargs(kwargs))
for i, j in enumerate(args):
setattr(s, self._fieldnames[i], j)
return s
def tobytes(s):
bos = [i.tobytes(s) for i in self._structs]
return b''.join(bos)
def parse(buf, *args, **kwargs):
obj = cls(*args, **getinitargs(kwargs))
for i in self._structs:
items, buf = i.parse(buf, *args, **kwargs)
for k, v in items:
setattr(obj, k, v)
return obj, buf
try:
cls._INIT_KWARGS = set(inspect.getargspec(cls.__init__)[0][1:])
except (TypeError, AttributeError):
cls._INIT_KWARGS = set([])
Struct.override(cls, 'tobytes', tobytes)
Struct.override(cls, 'create', staticmethod(create))
Struct.override(cls, 'parse', staticmethod(parse))
Struct.override(cls, 'size', self._structsize)
return cls
@Struct('HHI', ['type', 'headerSize', 'size'])
class MyStruct:
pass
@Struct(['II', MyStruct, 'HHL'], ['id', 'name', 'typedValue', 's1', 's2', 's3'])
class MyAnotherStruct:
pass
if __name__ == '__main__':
s1 = MyStruct.create(10, 20, 30)
buf = s1.tobytes()
s1, p = MyStruct.parse(buf)
ss = MyAnotherStruct.create(1, 2, s1, 3, 4, 5)
buf = ss.tobytes()
ss1, p = MyAnotherStruct.parse(buf)
print(str(ss1))
|
{"/libaml/aml.py": ["/libaml/utils/decorator.py"], "/examples/parse.py": ["/libaml/aml.py"], "/examples/increase_version_code.py": ["/libaml/aml.py"]}
|
7,341
|
samy-3687/ProductJoin
|
refs/heads/main
|
/RESTJoinTables/views.py
|
from RESTJoinTables.serializers import JoinTableSerialize
from RESTJoinTables.models import JoinTablesmodel
from rest_framework import viewsets
class JoinTableApi(viewsets.ModelViewSet):
queryset = JoinTablesmodel.objects.raw('SELECT PdInf.prd_name, PdInf.prd_cat, PdInf.prd_price, PdInf.prd_id, PdDet.prd_desc FROM Product_info PdInf LEFT OUTER JOIN Product_detail PdDet ON PdInf.prd_id = PdDet.prd_id')
serializer_class = JoinTableSerialize
|
{"/RESTJoinTables/views.py": ["/RESTJoinTables/serializers.py", "/RESTJoinTables/models.py"], "/RESTJoinTables/serializers.py": ["/RESTJoinTables/models.py"]}
|
7,342
|
samy-3687/ProductJoin
|
refs/heads/main
|
/RESTJoinTables/serializers.py
|
from rest_framework import serializers
from RESTJoinTables.models import JoinTablesmodel
class JoinTableSerialize(serializers.ModelSerializer):
class Meta:
model= JoinTablesmodel
fields= '__all__'
|
{"/RESTJoinTables/views.py": ["/RESTJoinTables/serializers.py", "/RESTJoinTables/models.py"], "/RESTJoinTables/serializers.py": ["/RESTJoinTables/models.py"]}
|
7,343
|
samy-3687/ProductJoin
|
refs/heads/main
|
/RESTJoinTables/models.py
|
from django.db import models
class JoinTablesmodel(models.Model):
prd_name = models.CharField(max_length = 50)
prd_cat = models.CharField(max_length = 50)
prd_price = models.IntegerField()
prd_id = models.IntegerField(primary_key = True)
prd_desc = models.CharField(max_length = 500)
|
{"/RESTJoinTables/views.py": ["/RESTJoinTables/serializers.py", "/RESTJoinTables/models.py"], "/RESTJoinTables/serializers.py": ["/RESTJoinTables/models.py"]}
|
7,349
|
redyandri/doc2vec-master
|
refs/heads/master
|
/summarizer.py
|
from gensim.summarization.summarizer import summarize
import pandas as pd
import numpy as np
from victorinox import victorinox
class MyIter(object):
path=""
def __init__(self,fp):
self.path=fp
def __iter__(self):
# path = datapath(self.path)
with open(self.path, 'r', encoding='utf-8') as fin:
for line in fin:
yield line
tool=victorinox()
csvsrc=r"data/dataset_lower_clean_stem_staff_group_with_periods.csv"
csvsummary=r"data/dataset_lower_clean_stem_staff_group_with_periods_summary.csv"
# df=pd.read_csv(csvsrc,sep=";")
# dftarget=df[df.values=="196808172003121001_syafarrudin"]#196302201983111001_dedysulistiarto"]
# text="\n".join(dftarget.iloc[:,0])
# print(text)
# print("########################################################")
# summ=summarize(text)
# print(summ)
# sumsentences=str(summ).splitlines()
# print("COUNT ori:%d" %len(dftarget))
# print("COUNT summary:%d" %len(sumsentences))
# df=pd.read_csv(csvsrc,sep=";")
# lengths=[]
# for idx,row in df.iterrows():
# lengths.append(len(str(row["KOMPETENSI"]).split()))
# print(np.mean(lengths))
###9.421897018021408
corpus=MyIter(csvsrc)
with open(csvsummary,"a+") as f:
i=1
for line in corpus:
parts=line.split(";")
id = parts[0]
if(id=="ID_PEGAWAI"):
continue
doc=parts[1]
lineddoc="\n".join(doc.split("."))
summary=summarize(lineddoc)
summary=". ".join(summary.split("\n"))
l=";".join([id,summary])
f.write(l+"\n")
print("\rwrite %d / 114253"%(i),end="",flush=True)
i+=1
|
{"/summarizer.py": ["/victorinox.py"], "/train_fasttext.py": ["/victorinox.py"], "/preprocess.py": ["/victorinox.py"], "/train.py": ["/victorinox.py"], "/app.py": ["/victorinox.py"], "/train_word2vec.py": ["/victorinox.py"]}
|
7,350
|
redyandri/doc2vec-master
|
refs/heads/master
|
/train_fasttext.py
|
from victorinox import victorinox
from gensim.models import FastText
from gensim.test.utils import datapath
from gensim.test.utils import get_tmpfile
from gensim.utils import tokenize
from gensim import utils
import logging
#enable logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
class MyIter(object):
path=""
def __init__(self,fp):
self.path=fp
def __iter__(self):
# path = datapath(self.path)
with utils.open(self.path, 'r', encoding='utf-8') as fin:
for line in fin:
yield list(tokenize(line))
#https://radimrehurek.com/gensim/models/fasttext.html
# vector_size = 100
# window_size = 5
# min_count = 1
# sampling_threshold = 1e-5
# negative_size = 5
# train_epoch = 100
# dm = 0 #0 = dbow; 1 = dmpv
# worker_count = 1
# sg=0
# dataset_path=r"data\dataset_lower_clean_stem_sentence.csv"
# corpus_file = datapath(dataset_path)
# model_path=r"model\fasttext100.bin"
# pretrained=r"model\cc.id.300.bin\cc.id.300.bin"
# pretrained_vec=r"model\cc.id.300.vec\cc.id.300.vec"
# corpus=MyIter(dataset_path)
# fasttext_model=FastText(corpus,
# size=vector_size,
# window=window_size,
# min_count=min_count,
# sample=sampling_threshold,
# workers=worker_count,
# hs=0,
# sg=sg,
# #dm=dm,
# negative=negative_size,
# #dbow_words=1,
# #dm_concat=1,
# #pretrained_emb=pretrained_vec,
# iter=train_epoch)
# fasttext_model.save(model_path)
#
#
# model_path=r"model\fasttext100.bin"
# fasttext_model = FastText.load(model_path)
# sim=fasttext_model.wv.most_similar(['nosql', 'mongodb'])
# print(sim)
vector_size = 100
window_size = 5
min_count = 5
sampling_threshold = 1e-5
negative_size = 5
train_epoch = 100
dm = 0 #0 = dbow; 1 = dmpv
worker_count = 1
sg=0
dataset_path=r"data/dataset_lower_clean_stem_sentence.csv"
corpus_file = datapath(dataset_path)
model_path=r"model/fasttext100/fasttext100_retrain.bin"
pretrained=r"model/fasttext100/fasttext100.bin"
pretrained_vec=r"model\cc.id.300.vec\cc.id.300.vec"
corpus=MyIter(dataset_path)
fasttext_model=FastText(corpus,
size=vector_size,
window=window_size,
min_count=min_count,
sample=sampling_threshold,
workers=worker_count,
hs=0,
sg=sg,
#dm=dm,
negative=negative_size,
#dbow_words=1,
#dm_concat=1,
#pretrained_emb=pretrained_vec,
iter=train_epoch)
fasttext_model.save(model_path)
model_path=r"model\fasttext100.bin"
fasttext_model = FastText.load(model_path)
sim=fasttext_model.wv.most_similar(['nosql', 'mongodb'])
print(sim)
|
{"/summarizer.py": ["/victorinox.py"], "/train_fasttext.py": ["/victorinox.py"], "/preprocess.py": ["/victorinox.py"], "/train.py": ["/victorinox.py"], "/app.py": ["/victorinox.py"], "/train_word2vec.py": ["/victorinox.py"]}
|
7,351
|
redyandri/doc2vec-master
|
refs/heads/master
|
/runner.py
|
from gensim.models import Word2Vec
from gensim.models import FastText
model_path=r"model\fasttext100.bin"
fasttext_model = FastText.load(model_path)
print(fasttext_model.wv.most_similar("sql server"))
model_path=r"model\word2vec100.bin"
w2v_model = Word2Vec.load(model_path)
print(w2v_model.wv.most_similar("sql server"))
|
{"/summarizer.py": ["/victorinox.py"], "/train_fasttext.py": ["/victorinox.py"], "/preprocess.py": ["/victorinox.py"], "/train.py": ["/victorinox.py"], "/app.py": ["/victorinox.py"], "/train_word2vec.py": ["/victorinox.py"]}
|
7,352
|
redyandri/doc2vec-master
|
refs/heads/master
|
/preprocess.py
|
from victorinox import victorinox
import nltk
import pandas as pd
import logging
import pickle
#enable logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
class MyIter(object):
path=""
def __init__(self,fp):
self.path=fp
def __iter__(self):
# path = datapath(self.path)
with open(self.path, 'r', encoding='utf-8') as fin:
for line in fin:
yield line
tool=victorinox()
datapath_staffs=r"data/dataset_lower_clean_stem_staff.csv"
datapath_staffs_nip=r"data/dataset_lower_clean_stem_staff_with_nip.csv"
dataset_vector=r"data/tfidf_per_sentence_vectors.csv"
dataset_vector_nip=r"data/tfidf_per_sentence_vectors_nip.csv"
dataset_vector_idseq=r"data/tfidf_per_sentence_vectors_idseq.csv"
staff_dictionary=r"data/staff_dictionary.pkl"
staff_dictionary_by_sequence=r"data/staff_dictionary_by_sequence.pkl"
staff_dictionary_by_sequence_reveresed=r"data/staff_dictionary_by_sequence_reversed.pkl"
# xls_p=r"C:\Users\redy.andriyansah\Documents\project\competence_analytics\doc2vec\doc2vec-master\data\training.xlsx"
# csv_p=r"C:\Users\redy.andriyansah\Documents\project\competence_analytics\doc2vec\doc2vec-master\data\training.csv"
# xls_pegawai=r"C:\Users\redy.andriyansah\Documents\project\competence_analytics\doc2vec\doc2vec-master\data\pegawai_pusintek.xlsx"
# csv_pegawai=r"C:\Users\redy.andriyansah\Documents\project\competence_analytics\doc2vec\doc2vec-master\data\pegawai.csv"
# # get name,training
# tool.convert_xls_to_csv(xls_path=xls_p,
# csv_path=csv_p,
# sheet_name="training",
# row_start_idx=1,
# column_idx=[1,5])
#
# # get nip, name
# tool.convert_xls_to_csv(xls_path=xls_pegawai,
# csv_path=csv_pegawai,
# sheet_name="Data",
# row_start_idx=1,
# column_idx=[-2,-1])
# csv_nip_training=r"C:\Users\redy.andriyansah\Documents\project\competence_analytics\doc2vec\doc2vec-master\data\training_nipname.csv"
# tool.merge_csv(csv_path1=csv_p,
# csv_path2=csv_pegawai,
# csv_dest=csv_nip_training,
# sep=";",
# join_op="left",
# join_col="name")
# name_nip=r"C:\Users\redy.andriyansah\Documents\project\competence_analytics\doc2vec\doc2vec-master\data\name_nip.csv"
# disposisi_nip=r"C:\Users\redy.andriyansah\Documents\project\competence_analytics\doc2vec\doc2vec-master\data\disposisi_normalized_pusintek2.csv"
# disposisi_nipname=r"C:\Users\redy.andriyansah\Documents\project\competence_analytics\doc2vec\doc2vec-master\data\disposisi_nipname.csv"
# tool.merge_csv(csv_path1=disposisi_nip,
# csv_path2=name_nip,
# csv_dest=disposisi_nipname,
# sep=";",
# join_op="left",
# join_col="nip",
# out_cols=["disposisi","nip"])
# disposisi_path=r"C:\Users\redy.andriyansah\Documents\project\competence_analytics\doc2vec\doc2vec-master\data\disposisi_pusintek.csv"
# disposisi_normalized_path=r"C:\Users\redy.andriyansah\Documents\project\competence_analytics\doc2vec\doc2vec-master\data\disposisi_normalized_pusintek.csv"
# tool.replace_string_in_file(file_path=disposisi_path,
# file_path_dest=disposisi_normalized_path)
# disposisi_normalized_path=r"C:\Users\redy.andriyansah\Documents\project\competence_analytics\doc2vec\doc2vec-master\data\disposisi_normalized_pusintek.csv"
# disposisi_normalized_path2=r"C:\Users\redy.andriyansah\Documents\project\competence_analytics\doc2vec\doc2vec-master\data\disposisi_normalized_pusintek2.csv"
# tool.switch_columns(disposisi_normalized_path,disposisi_normalized_path2)
# dataset=r"data/id_kompetensi_flag.csv"
# dataset_lower=r"data/id_kompetensi_flag_lower.csv"
# tool.lower_contents(csv_src=dataset,
# csv_dest=dataset_lower,
# sep=";")
# dataset_lower=r"data/id_kompetensi_flag_lower.csv"
# dataset_lower_clean=r"data/dataset_lower_clean.csv"
# tool.remove_stopwords(csv_src=dataset_lower,
# csv_dest=dataset_lower_clean,
# cols_to_clean=["KOMPETENSI"],
# sep=";")
#nltk.download('wordnet')
# dataset_lower_clean=r"data/dataset_lower_clean.csv"
# dataset_lower_clean_stem=r"data/dataset_lower_clean_stem.csv"
# tool.stem(csv_src=dataset_lower_clean,
# csv_dest=dataset_lower_clean_stem,
# cols_to_clean="KOMPETENSI",
# sep=";")
# dataset_lower_clean_stem=r"data/dataset_lower_clean_stem.csv"
# dataset_lower_clean_stem_sentence=r"data/dataset_lower_clean_stem_sentence.csv"
# tool.create_sentence_list(csv_src=dataset_lower_clean_stem,
# csv_dest=dataset_lower_clean_stem_sentence,
# cols_to_write="KOMPETENSI",
# sep=";")
# dataset_lower_clean_stem=r"data/dataset_lower_clean_stem.csv"
# dataset_lower_clean_stem_group=r"data/dataset_lower_clean_stem_group.csv"
# tool.group_sentences(csv_src=dataset_lower_clean_stem,
# csv_dest=dataset_lower_clean_stem_group,
# col_to_groupby="ID_PEGAWAI",
# sep=";")
# xls_pegawai=r"data/monitoring data pegawai april 2018 -share.xlsx"
# csv_pegawai=r"data/pegawai.csv"
# tool.convert_xls_to_csv(xls_path=xls_pegawai,
# csv_path=csv_pegawai,
# sheet_name="monev",
# row_start_idx=0,
# column_idx=[0,1,2,3,4,5,6,7,8,9])
# csv_employees=r"data/dataset_lower_clean_stem_group.csv"
# csv_leaders=r"data/leaders.csv"
# csv_emploees_noleader=r"data/dataset_lower_clean_stem_group_staffs.csv"
# leaders=[]
# with open(csv_leaders) as f:
# leaders=f.read().splitlines()
# df=pd.read_csv(csv_employees,sep=";")
# ID_PEGAWAI=[]
# KOMPETENSI=[]
# for idx,row in df.iterrows():
# nip=str(row["ID_PEGAWAI"]).split("_")[0]
# if(nip in leaders):
# continue
# ID_PEGAWAI.append(row["ID_PEGAWAI"])
# KOMPETENSI.append(row["KOMPETENSI"])
# newdf_json={"ID_PEGAWAI":ID_PEGAWAI,
# "KOMPETENSI":KOMPETENSI}
# newdf=pd.DataFrame(newdf_json)
# newdf.to_csv(csv_emploees_noleader,sep=";",index=None)
# csv_emploees_noleader=r"data/dataset_lower_clean_stem_group_staffs.csv"
# csv_emploees_noleader_sentences=r"data/dataset_lower_clean_stem_group_staffs_sentences.csv"
# df=pd.read_csv(csv_emploees_noleader,sep=";")
# df=df.KOMPETENSI
# df.to_csv(csv_emploees_noleader_sentences,index=None)
# corpus=MyIter(dataset_vector)
# with open(dataset_vector_nip,"a+") as f:
# for line in corpus:
# parts=line.split(";")
# vec = parts[0:-1]
# id = parts[-1].replace("\n", "")
# nip=id.split("_")[0]
# l=vec+[nip]
# f.write(";".join(l))
# f.write("\n")
# corpus=MyIter(dataset_vector)
# dct={}
# with open(dataset_vector_nip,"a+") as f:
# for line in corpus:
# parts=line.split(";")
# id = parts[-1].replace("\n", "")
# idparts=id.split("_")
# nip=idparts[0]
# name = idparts[-1]
# dct[nip]=name
# with open(staff_dictionary,"wb+") as f:
# pickle.dump(dct,f)
# with open(staff_dictionary,"rb") as f:
# kamus=pickle.load(f)
# print("nip:198401112009011004, name:%s"%(kamus["198401112009011004"]))
# corpus=MyIter(dataset_vector)
# dct={}
# i=0
# with open(staff_dictionary,"rb") as f:
# kamus=pickle.load(f)
# for k,v in kamus.items():
# dct[i]=k+"_"+v
# i+=1
# with open(staff_dictionary_by_sequence,"wb+") as f:
# pickle.dump(dct,f)
# with open(staff_dictionary_by_sequence,"rb") as f:
# kamus2=pickle.load(f)
# print("employee id 0=%s"%(kamus2[0]))
# dct={}
# with open(staff_dictionary_by_sequence,"rb") as f:
# kamus=pickle.load(f)
# for k,v in kamus.items():
# dct[v]=k
# with open(staff_dictionary_by_sequence_reveresed,"wb+") as f:
# pickle.dump(dct,f)
# with open(staff_dictionary_by_sequence_reveresed,"rb") as f:
# kamus2=pickle.load(f)
# print("employee id 198401112009011004_redyandriyansah=%s"%(kamus2["198401112009011004_redyandriyansah"]))
# with open(staff_dictionary_by_sequence_reveresed,"rb") as f:
# kamus=pickle.load(f)
# corpus=MyIter(dataset_vector)
# with open(dataset_vector_idseq,"a+") as f:
# for line in corpus:
# parts=line.split(";")
# vec = parts[0:-1]
# id = parts[-1].replace("\n", "")
# newid=str(kamus[id])
# l=vec+[newid]
# f.write(";".join(l))
# f.write("\n")
dataset_lower_clean_stem=r"data/dataset_lower_clean_stem_staff.csv"
dataset_lower_clean_stem_group_in_lines=r"data/dataset_lower_clean_stem_staff_group_with_periods.csv"
tool.group_sentences(csv_src=dataset_lower_clean_stem,
csv_dest=dataset_lower_clean_stem_group_in_lines,
col_to_groupby="ID_PEGAWAI",
sep=";",
sentence_link=". ")
|
{"/summarizer.py": ["/victorinox.py"], "/train_fasttext.py": ["/victorinox.py"], "/preprocess.py": ["/victorinox.py"], "/train.py": ["/victorinox.py"], "/app.py": ["/victorinox.py"], "/train_word2vec.py": ["/victorinox.py"]}
|
7,353
|
redyandri/doc2vec-master
|
refs/heads/master
|
/train.py
|
from victorinox import victorinox
from gensim.models import FastText
#https://radimrehurek.com/gensim/models/fasttext.html
dataset_path=r"data/dataset_lower_clean_stem_sentence.csv"
model_path=r"model/fasttext300.bin"
fasttext_model= FastText(dataset_path, size=300, window=5, min_count=5, workers=4,sg=1)
fasttext_model.save(model_path)
fasttext_model.wv.most_similar("nadine")
|
{"/summarizer.py": ["/victorinox.py"], "/train_fasttext.py": ["/victorinox.py"], "/preprocess.py": ["/victorinox.py"], "/train.py": ["/victorinox.py"], "/app.py": ["/victorinox.py"], "/train_word2vec.py": ["/victorinox.py"]}
|
7,354
|
redyandri/doc2vec-master
|
refs/heads/master
|
/train_doc2vec.py
|
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
from gensim.utils import tokenize
from gensim import utils
class MyIter(object):
path=""
def __init__(self,fp):
self.path=fp
def __iter__(self):
# path = datapath(self.path)
with utils.open(self.path, 'r', encoding='utf-8') as fin:
for line in fin:
yield list(tokenize(line))
dataset_path=r"data\dataset_lower_clean_stem_sentence.csv"
model_path=r"model\doc2vec100.bin"
corpus=MyIter(dataset_path)
documents = [TaggedDocument(doc, [i]) for i, doc in enumerate(corpus)]
d2v_model = Doc2Vec(vector_size=100, window=2, min_count=1, workers=4)
d2v_model.build_vocab(documents)
d2v_model.train(documents,total_words=d2v_model.corpus_count,epochs=d2v_model.epochs)
d2v_model.save(model_path)
model_path=r"model\doc2vec100.bin"
d2v_model = Doc2Vec.load(model_path)
print(d2v_model.wv.most_similar(["naskah","dinas"]))
|
{"/summarizer.py": ["/victorinox.py"], "/train_fasttext.py": ["/victorinox.py"], "/preprocess.py": ["/victorinox.py"], "/train.py": ["/victorinox.py"], "/app.py": ["/victorinox.py"], "/train_word2vec.py": ["/victorinox.py"]}
|
7,355
|
redyandri/doc2vec-master
|
refs/heads/master
|
/app.py
|
import pymssql
from flask import Flask
from flask import render_template
from flask_restful import reqparse, abort, Api, Resource
import pickle
import numpy as np
from flask import request
import flask
from victorinox import victorinox
import os
import logging
from PIL import Image
#import cv2
#import tensorflow as tf
import jsonify
import sys
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.neighbors import KNeighborsClassifier
import sys
import time
import json
from flask import Response
app = Flask(__name__,static_url_path='')
api = Api(app)
tool=victorinox()
tfidf_vectors=r"data/tfidf_group_vectors.csv"
tfidf_model=r"data/tfidf_group_model.pkl"
df=pd.read_csv(tfidf_vectors,sep=";",header=None)
transformer = TfidfTransformer()
loaded_vec = CountVectorizer(decode_error="replace",vocabulary=pickle.load(open(tfidf_model, "rb")))
t1=time.time()
knn = KNeighborsClassifier(n_neighbors=len(df))
knn.fit(df.iloc[:,:-1], df.iloc[:,-1])
print("train KNN DONE in %f"%(time.time()-t1),file=sys.stdout)
@app.route('/user_by_competence', methods = ['GET'])
def user_by_competence():
q = request.args.get("q")
q = tool.preprocess_sentence(q)
qv = transformer.fit_transform(loaded_vec.fit_transform([q])).toarray()[0].tolist()
(distances, indices) = knn.kneighbors([qv], n_neighbors=5)
indices = indices.tolist()[0]
res = df.iloc[indices, -1]
dbConn = pymssql.connect('IP ADDRESS', 'USER', 'PASS', "DBNAME")
oldCursor = dbConn.cursor()
dbConn.commit()
data = list(res)
response = []
nips = []
for x in data:
nips.append(x.split('_')[0])
query = 'select nama, nip18, ref_unit.nama_organisasi from ref_user inner join ref_unit on ref_user.id_organisasi=ref_unit.id_organisasi where nip18 in (%s)'%(','.join(("'{0}'".format(w) for w in nips)))
print(query)
oldCursor.execute(query)
result = []
for x in oldCursor.fetchall():
result.append({'nama': x[0], 'nip': x[1], 'unit': x[2]})
js = json.dumps(result)
resp = Response(js, status=200, mimetype='application/json')
return json.dumps(result)
@app.route('/')
def hello_world():
return render_template("index.html")
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000,debug=True)
|
{"/summarizer.py": ["/victorinox.py"], "/train_fasttext.py": ["/victorinox.py"], "/preprocess.py": ["/victorinox.py"], "/train.py": ["/victorinox.py"], "/app.py": ["/victorinox.py"], "/train_word2vec.py": ["/victorinox.py"]}
|
7,356
|
redyandri/doc2vec-master
|
refs/heads/master
|
/infer_test.py
|
#python example to infer document vectors from trained doc2vec model
import gensim.models as g
import codecs
#parameters
model=r"toy_data\model.bin"
test_docs=r"toy_data\test_docs.txt"
output_file=r"toy_data\test_vectors.txt"
#inference hyper-parameters
start_alpha=0.01
infer_epoch=1000
#load model
m = g.Doc2Vec.load(model)
test_docs = [ x.strip().split() for x in codecs.open(test_docs, "r", "utf-8").readlines() ]
#infer test vectors
output = open(output_file, "w")
for d in test_docs:
output.write( " ".join([str(x) for x in m.infer_vector(d, alpha=start_alpha, steps=infer_epoch)]) + "\n" )
output.flush()
output.close()
# m.most_similar(["paris"])
|
{"/summarizer.py": ["/victorinox.py"], "/train_fasttext.py": ["/victorinox.py"], "/preprocess.py": ["/victorinox.py"], "/train.py": ["/victorinox.py"], "/app.py": ["/victorinox.py"], "/train_word2vec.py": ["/victorinox.py"]}
|
7,357
|
redyandri/doc2vec-master
|
refs/heads/master
|
/victorinox.py
|
from collections import Counter
import pandas as pd
import numpy as np
import re
import csv
from Sastrawi.StopWordRemover.StopWordRemoverFactory import StopWordRemoverFactory,StopWordRemover,ArrayDictionary
from Sastrawi.Stemmer.StemmerFactory import StemmerFactory
from gensim.models import FastText
from nltk.tokenize import RegexpTokenizer
from nltk.stem.wordnet import WordNetLemmatizer
from sklearn.metrics.pairwise import cosine_similarity
class victorinox(object):
def __init__(self):
return
def check_column_redundancy(self,
column_id=3,
fp="\a.csv"):
records=[]
with open(fp,"r") as file:
lines=file.readlines()
i=0
for row in lines:
if i==0:
i+=1
continue
fields=str(row).split(";")
try:
email = fields[column_id]
records.append(email)
except Exception as x:
print(fields)
break
c=Counter(records)
redundant=[]
for k,v in c.items():
if int(v)>1:
redundant.append(k)
print(redundant)
def check_column_multivalue(self,
column_id=3,
delimiter=" ",
fp="\a.csv"):
records=[]
with open(fp,"r") as file:
lines=file.readlines()
i=0
for row in lines:
if i==0:
i+=1
continue
fields=str(row).split(";")
try:
vals = fields[column_id]
flds=str(vals).split(delimiter)
if len(flds)>1:
records.append(vals)
except Exception as x:
print(fields)
break
print(records)
def replace_column_string(self,
column_id=3,
old=",",
new="",
fp="\a.csv",
dest_fp="\b.csv"
):
records=[]
with open(fp,"r") as file:
lines=file.readlines()
i=0
for row in lines:
if i==0:
i+=1
continue
fields=str(row).split(";")
try:
val = fields[column_id]
val=str(val).replace(old,new)
fields[column_id]=val
l=";".join(fields)
records.append(l)
except Exception as x:
print(fields)
break
# print(records)
with open(dest_fp, "w+") as f:
f.writelines(records)
def split_csv_into_batches(self,
batch_length=400,
fp="\a.csv",
dest_fp="\b.csv",
sep=",",
insert_header=False,
replace_string=False
):
df=pd.read_csv(fp,sep=sep,delimiter=sep)
npd=np.array(df)
max=len(npd)
err=0
for i in range(0,max,batch_length):
try:
if i > max:
break
d = str(dest_fp).replace(".csv", "_" + str(i) + ".csv")
to = i + batch_length
arr = npd[i:to, :]
# arr=np.chararray.encode(arr,"utf-8")
np.savetxt(d, arr, fmt="%s", delimiter=sep,encoding="utf-8")
if insert_header:
self.insert_header(d)
if replace_string:
self.replace_string(";nan", sep, d)
print("saved: ", str(d))
except Exception as e:
print("Error:", str(e))
err+=1
continue
print("Done. %d erro found"%(err))
def replace_string(self,
old=";nan",
new=";",
fp="\a.csv",
header=None
):
records = []
with open(fp, "r") as file:
lines = file.readlines()
i = 0
for row in lines:
if header is not None:
i += 1
continue
try:
val = str(row).replace(old, new)
records.append(val)
except Exception as x:
print(val)
break
# print(records)
with open(fp, "w+") as f:
f.writelines(records)
def insert_header(self,fp="\a.csv",
header="NAME;LAST_NAME;PASSWORD;EMAIL;LOGIN;"
"IBLOCK_SECTION_NAME_1;IBLOCK_SECTION_NAME_2;IBLOCK_SECTION_NAME_3;"
"IBLOCK_SECTION_NAME_4;IBLOCK_SECTION_NAME_5\n"):
records = []
with open(fp, "r") as file:
lines = file.readlines()
i = 0
records.append(header)
for row in lines:
try:
records.append(row)
except Exception as x:
print(row)
break
# print(records)
with open(fp, "w+") as f:
f.writelines(records)
def select_by_column_string(self,
column_id=6,
column_val="Sekretariat Jenderal",
fp="\a.csv",
dest_fp="\b.csv",
header=None
):
records=[]
with open(fp,"r") as file:
lines=file.readlines()
i=0
for row in lines:
if header is not None:
if i==0:
i+=1
continue
fields=str(row).split(";")
try:
val = fields[column_id]
if val==column_val:
l=";".join(fields)
records.append(l)
except Exception as x:
print(fields)
break
# print(records)
with open(dest_fp, "w+") as f:
f.writelines(records)
def select_leaders(self,
echelon_id=1,
fp="\a.csv",
dest_fp="\b.csv",
header=None
):
records=[]
with open(fp,"r") as file:
lines=file.readlines()
suffix=""
if echelon_id==3:
suffix=";"
elif echelon_id==2:
suffix=";;"
elif echelon_id==1:
suffix=";;;"
else:
suffix=""
i=0
for row in lines:
if header is not None:
if i==0:
i+=1
continue
# fields=str(row).split(";")
try:
if str(row).__contains__(suffix):
records.append(row)
# l=";".join(fields)
# records.append(l)
except Exception as x:
print(row)
break
# print(records)
with open(dest_fp, "w+") as f:
f.writelines(records)
def convert_xls_to_csv(self,xls_path="",
csv_path="",
sheet_name="",
index_col=None,
row_start_idx=1,
column_idx=[1,5]):
data_xls = pd.read_excel(xls_path, sheet_name, index_col=index_col)
data_xls=data_xls.iloc[row_start_idx:,column_idx]
data_xls.to_csv(csv_path,
encoding='utf-8',
index=False,
sep=";",
header=None)
print("done on %d rows"%data_xls.shape[0])
def merge_csv(self,csv_path1="",
csv_path2="",
csv_dest="",
sep=";",
join_op="left",
join_col="",
out_cols=["training","nip"]):
data_xls1 = pd.read_csv(csv_path1,sep=sep)
data_xls2 = pd.read_csv(csv_path2, sep=sep)
data_xls1 = data_xls1.astype({col: str for col in data_xls1.columns})
data_xls2 = data_xls2.astype({col: str for col in data_xls2.columns})
data_xls3=pd.merge(data_xls1,
data_xls2,
how=join_op,
on=join_col)
data_xls3["nip"]=data_xls3["nip"]+"_"+data_xls3["name"]
if out_cols!= None:
data_xls3[out_cols].to_csv(csv_dest,encoding='utf-8',index=False,sep=";")
else:
data_xls3.to_csv(csv_dest,encoding='utf-8',index=False,sep=";")
print("done on %d rows"%data_xls3.shape[0])
def replace_string_in_file(self,
file_path="",
file_path_dest="",
string_to_replace="",
replacement_string=""):
regex = re.compile(r"\d{18},", re.IGNORECASE)
regex2 = re.compile(r"\d{18}", re.IGNORECASE)
res=[]
with open(file_path,encoding="utf8") as f:
lines =f.readlines()
for line in lines:
try:
nip = regex2.findall(line)[0]
line = regex.sub(nip + ";", line)
line=line.replace("\n","")
res.append([x for x in line.split(";")])
except Exception as e:
print("error line:%s"%line)
continue
nparr=np.array(res)
df=pd.DataFrame(nparr)
df.to_csv(file_path_dest,
header=None,
index=None,
sep=";")
print("done saving %d rows"%len(res))
def switch_columns(self,
csv_path="",
csv_dest_path="",
sep=";"):
df=pd.read_csv(csv_path,delimiter=sep,error_bad_lines=False)
df=df.iloc[:,[-1,-2]]
df.to_csv(csv_dest_path,
sep=sep,
header=None,
index=None)
def lower_contents(self,
csv_src="",
csv_dest="",
sep=";"):
df=pd.read_csv(csv_src,sep=sep)
for c in df.columns:
df[c]=df[c].str.lower()
df.to_csv(csv_dest,sep=sep,index=None)
print("lower %d rows"%len(df))
def remove_stopwords(self,csv_src="",
csv_dest="",
cols_to_clean=["KOMPETENSI"],
sep=";"):
#factory = StopWordRemoverFactory()
default_stopwords = StopWordRemoverFactory().get_stop_words()
additional_stopwords=["(",")","senin","selasa","rabu","kamis","jumat","sabtu","minggu"]
dictionary=ArrayDictionary(default_stopwords+additional_stopwords)
stopword = StopWordRemover(dictionary)#factory.create_stop_word_remover(dictionary = dictionary)
tokenizer = RegexpTokenizer(r'\w+')
df = pd.read_csv(csv_src, sep=sep)
for c in cols_to_clean:
df[c] = df[c].map(lambda x: " ".join(tokenizer.tokenize(x))) #get only words without symbols
df[c]=df[c].map(lambda x:stopword.remove(x)) #remove stop words
df.to_csv(csv_dest, sep=sep, index=None)
print("lower %d rows" % len(df))
def stem(self,csv_src="",
csv_dest="",
cols_to_clean="KOMPETENSI",
sep=";"):
factory = StemmerFactory()
stemmer = factory.create_stemmer()
df = pd.read_csv(csv_src, sep=sep)
df[cols_to_clean]=df[cols_to_clean].astype(str)
df[cols_to_clean] = df[cols_to_clean].map(lambda x: stemmer.stem(x))
df.to_csv(csv_dest, sep=sep, index=None)
print("lower %d rows" % len(df))
def create_sentence_list(self,
csv_src="",
csv_dest="",
cols_to_write="KOMPETENSI",
sep=";"):
df = pd.read_csv(csv_src, sep=sep)
df[cols_to_write].to_csv(csv_dest, sep=sep, index=None)
print("lower %d rows" % len(df))
def document_vector(self,word2vec_model, doc):
# remove out-of-vocabulary words
doc = [word for word in doc if word in word2vec_model.wv.vocab]
return np.mean(word2vec_model[doc], axis=0)
def measure_similarity(self,vec1,vec2):
vec1=np.array(vec1).reshape(1,-1)
vec2 = np.array(vec2).reshape(1, -1)
return cosine_similarity(vec1,vec2)
def group_sentences(self,
csv_src="",
csv_dest="",
col_to_groupby="ID_PEGAWAI",
col_to_group="KOMPETENSI",
sep=";",
sentence_link=" "):
df=pd.read_csv(csv_src,sep=sep)
df2= df.groupby(col_to_groupby)
ids=[]
datas = []
for group_name, dfgroup in df2:
groupcontent=""
for idx, row in dfgroup.iterrows():
groupcontent+=str(row[col_to_group])+sentence_link
datas.append(groupcontent)
ids.append(row[col_to_groupby])
result={col_to_groupby:ids,
col_to_group:datas}
dfresult=pd.DataFrame(result)
dfresult.to_csv(csv_dest, sep=sep, index=None)
print("group into %d rows"%len(df))
def get_list_from_txt(self,
txt_src="",
sep=";"):
with open(txt_src) as f:
mylist=f.read().splitlines()
return mylist
def concat_dataframe(self,df1,df2,axis=1,csv_dest=""):
df3 = pd.concat([df1, df2], axis=1)
df3.to_csv(csv_dest,sep=";",index=None)
print("done merging %d rows"%len(df3))
def preprocess_sentence(self,q=""):
#tokenize, lower, stopword,stem
default_stopwords = StopWordRemoverFactory().get_stop_words()
additional_stopwords = ["(", ")", "senin", "selasa", "rabu", "kamis", "jumat", "sabtu", "minggu"]
dictionary = ArrayDictionary(default_stopwords + additional_stopwords)
stopword = StopWordRemover(dictionary)
factory = StemmerFactory()
stemmer = factory.create_stemmer()
tokenizer = RegexpTokenizer(r'\w+')
res=" ".join(tokenizer.tokenize(q))
res=res.lower()
res=stopword.remove(res)
res=factory =stemmer.stem(res)
return res
|
{"/summarizer.py": ["/victorinox.py"], "/train_fasttext.py": ["/victorinox.py"], "/preprocess.py": ["/victorinox.py"], "/train.py": ["/victorinox.py"], "/app.py": ["/victorinox.py"], "/train_word2vec.py": ["/victorinox.py"]}
|
7,358
|
redyandri/doc2vec-master
|
refs/heads/master
|
/train_word2vec.py
|
from gensim.models import Word2Vec
from gensim.utils import tokenize
from gensim import utils
from gensim.test.utils import datapath
from gensim.utils import tokenize
from gensim import utils
import logging
from gensim.models import KeyedVectors
import gensim.downloader as api
from victorinox import victorinox
#enable logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
class MyIter(object):
path=""
def __init__(self,fp):
self.path=fp
def __iter__(self):
# path = datapath(self.path)
with open(self.path, 'r', encoding='utf-8') as fin:
for line in fin:
yield list(tokenize(line))
# #https://radimrehurek.com/gensim/models/doc2vec.html
# vector_size = 300
# window_size = 4
# min_count = 1
# sampling_threshold = 1e-5
# negative_size = 5
# train_epoch = 100
# dm = 0 #0 = dbow; 1 = dmpv
# sg=1 #CBOW
# worker_count = 1
# dataset_path=r"data/dataset_lower_clean_stem_sentence.csv"
# corpus_file = datapath(dataset_path)
# model_path=r"model/word2vec100_cbow.bin"
# corpus=MyIter(dataset_path)
# word2vec_model=Word2Vec(corpus,
# size=vector_size,
# window=window_size,
# min_count=min_count,
# sample=sampling_threshold,
# workers=worker_count,
# hs=0,
# #dm=dm,
# negative=negative_size,
# #dbow_words=1,
# #dm_concat=1,
# #pretrained_emb=pretrained_vec,
# sg=sg,
# iter=train_epoch)
# word2vec_model.save(model_path)
# model_path=r"model/word2vec100_cbow.bin"
# w2v_model = Word2Vec.load(model_path)
# print(w2v_model.wv.most_similar("pegawai"))
# print(w2v_model.wv.most_similar("gaji"))
# vector_size = 300
# window_size = 4
# min_count = 1
# sampling_threshold = 1e-5
# negative_size = 5
# train_epoch = 100
# dm = 0 #0 = dbow; 1 = dmpv
# sg=0 #SKIP ThOUGHT
# worker_count = 1
# dataset_path=r"data/dataset_lower_clean_stem_sentence.csv"
# corpus_file = datapath(dataset_path)
# model_path=r"model/word2vec300_skipthought.bin"
# corpus=MyIter(dataset_path)
# word2vec_model=Word2Vec(corpus,
# size=vector_size,
# window=window_size,
# min_count=min_count,
# sample=sampling_threshold,
# workers=worker_count,
# hs=0,
# #dm=dm,
# negative=negative_size,
# #dbow_words=1,
# #dm_concat=1,
# #pretrained_emb=pretrained_vec,
# sg=sg,
# iter=train_epoch)
# word2vec_model.save(model_path)
#
# model_path=r"model/word2vec300_skipthought.bin"
# w2v_model = Word2Vec.load(model_path)
# print(w2v_model.wv.most_similar("database"))
idwiki_word2vec_model=r"model/idwiki_word2vec_300/idwiki_word2vec_300.model"
idwiki_word2vec_model_retrain=r"model/idwiki_word2vec_300/idwiki_word2vec_300_retrain.model"
dataset_path=r"data/dataset_lower_clean_stem_sentence.csv"
corpus=MyIter(dataset_path)
vector_size = 300
window_size = 4
min_count = 1
sampling_threshold = 1e-5
negative_size = 5
train_epoch = 100
dm = 0 #0 = dbow; 1 = dmpv
sg=0 #SKIP ThOUGHT
worker_count = 1
word2vec_model=Word2Vec.load(idwiki_word2vec_model)
# word2vec_model=api.load(idwiki_word2vec_model)
# word2vec_model.build_vocab(sentences=corpus,update=True)
# word2vec_model.train(corpus,
# total_examples=word2vec_model.corpus_count,
# epochs=train_epoch)
# word2vec_model.save(idwiki_word2vec_model_retrain)
word2vec_model2=Word2Vec.load(idwiki_word2vec_model_retrain)
# print(word2vec_model.wv.most_similar("yang"))
# print(word2vec_model2.wv.most_similar("yang"))
tool=victorinox()
s1="kemampuan analisa sql server"
s2="analisa jaringan komputer"
s3="pengolahan database"
s1_emb=tool.document_vector(word2vec_model2,s1)
s2_emb=tool.document_vector(word2vec_model2,s2)
s3_emb=tool.document_vector(word2vec_model2,s3)
print(tool.measure_similarity(s1_emb,s1_emb))
print(tool.measure_similarity(s1_emb,s2_emb))
print(tool.measure_similarity(s1_emb,s3_emb))
print(tool.measure_similarity(s2_emb,s3_emb))
print("." in word2vec_model.wv.vocab)
|
{"/summarizer.py": ["/victorinox.py"], "/train_fasttext.py": ["/victorinox.py"], "/preprocess.py": ["/victorinox.py"], "/train.py": ["/victorinox.py"], "/app.py": ["/victorinox.py"], "/train_word2vec.py": ["/victorinox.py"]}
|
7,360
|
Zumbalamambo/behavior_understanding_indoor_dataset
|
refs/heads/master
|
/demo.py
|
from sklearn.linear_model import LogisticRegression
import numpy as np
import cv2
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import matplotlib.pyplot as plt
from collections import Counter
import threading
import time
import multiprocessing
from datetime import datetime as dt
from Tkinter import *
import PIL
from PIL import ImageTk, Image
import data_organizer
import video_traj
import ambient_sensors
#scene = np.ones((414,512),dtype=np.uint8)
scene = cv2.imread('C:/Users/dario.dotti/Documents/pilot_abnormal_behavior_indoor/subject4_1834.jpg')
#Create a window
window=Tk()
window.title('Abnormal Behavior Detector')
def plot_classifier_confidence(task,cluster_model,keys_labels,lr,q):
##plotting confidence classifier on bow
hist = np.zeros((1,len(keys_labels)))
x_axis = 0
##every two second create a sample cluster it and create an hist
for n_slice,two_mins_slice in enumerate(task):
similar_word= cluster_model.predict(np.array(two_mins_slice).reshape(1,-1))
index = np.where(similar_word == keys_labels)[0]
hist[0][index] +=1
pred = lr.predict(hist)
#print pred
conf_confusion = np.max([lr.decision_function(hist)[0][0],lr.decision_function(hist)[0][1],lr.decision_function(hist)[0][2]])
conf_repetitive = lr.decision_function(hist)[0][3]
conf_adl = np.max([lr.decision_function(hist)[0][4],lr.decision_function(hist)[0][5]])
q.put(['cc',conf_confusion,conf_repetitive,conf_adl,n_slice,x_axis])
x_axis+=1
time.sleep(2)
#plt.pause(100)
def draw_joints_and_tracks(body_points,current_time_shared):
#make the thread wait for the other
time.sleep(1.5)
color = (0,0,255)
for n_frame,traj_body_joints in enumerate(body_points):
temp_img = scene.copy()
#get recording time and make it as current time
time_info_joint = traj_body_joints[0,1].split(' ')
date = time_info_joint[4].split('&#')[0]+'-'+'10-'+time_info_joint[2]
#global current_time
current_time = dt.strptime(date+' '+time_info_joint[3],'%Y-%m-%d %H:%M:%S')
current_time_shared.put(current_time)
#draw line between joints
thickness = 3
line_color = (19,19,164)
#first position skipped cause there are other info stored
#torso
cv2.line(temp_img,(int(float(traj_body_joints[1,0])),int(float(traj_body_joints[1,1]))),(int(float(traj_body_joints[2,0])),int(float(traj_body_joints[2,1]))),line_color,thickness)
cv2.line(temp_img,(int(float(traj_body_joints[2,0])),int(float(traj_body_joints[2,1]))),(int(float(traj_body_joints[3,0])),int(float(traj_body_joints[3,1]))),line_color,thickness)
cv2.line(temp_img,(int(float(traj_body_joints[3,0])),int(float(traj_body_joints[3,1]))),(int(float(traj_body_joints[4,0])),int(float(traj_body_joints[4,1]))),line_color,thickness)
#shoulder
cv2.line(temp_img,(int(float(traj_body_joints[5,0])),int(float(traj_body_joints[5,1]))),(int(float(traj_body_joints[6,0])),int(float(traj_body_joints[6,1]))),line_color,thickness)
cv2.line(temp_img,(int(float(traj_body_joints[5,0])),int(float(traj_body_joints[5,1]))),(int(float(traj_body_joints[10,0])),int(float(traj_body_joints[10,1]))),line_color,thickness)
#hips
cv2.line(temp_img,(int(float(traj_body_joints[4,0])),int(float(traj_body_joints[4,1]))),(int(float(traj_body_joints[14,0])),int(float(traj_body_joints[14,1]))),line_color,thickness)
cv2.line(temp_img,(int(float(traj_body_joints[4,0])),int(float(traj_body_joints[4,1]))),(int(float(traj_body_joints[18,0])),int(float(traj_body_joints[18,1]))),line_color,thickness)
#right arm
cv2.line(temp_img,(int(float(traj_body_joints[6,0])),int(float(traj_body_joints[6,1]))),(int(float(traj_body_joints[7,0])),int(float(traj_body_joints[7,1]))),line_color,thickness)
cv2.line(temp_img,(int(float(traj_body_joints[7,0])),int(float(traj_body_joints[7,1]))),(int(float(traj_body_joints[8,0])),int(float(traj_body_joints[8,1]))),line_color,thickness)
cv2.line(temp_img,(int(float(traj_body_joints[8,0])),int(float(traj_body_joints[8,1]))),(int(float(traj_body_joints[9,0])),int(float(traj_body_joints[9,1]))),line_color,thickness)
#left arm
cv2.line(temp_img,(int(float(traj_body_joints[10,0])),int(float(traj_body_joints[10,1]))),(int(float(traj_body_joints[11,0])),int(float(traj_body_joints[11,1]))),line_color,thickness)
cv2.line(temp_img,(int(float(traj_body_joints[11,0])),int(float(traj_body_joints[11,1]))),(int(float(traj_body_joints[12,0])),int(float(traj_body_joints[12,1]))),line_color,thickness)
cv2.line(temp_img,(int(float(traj_body_joints[12,0])),int(float(traj_body_joints[12,1]))),(int(float(traj_body_joints[13,0])),int(float(traj_body_joints[13,1]))),line_color,thickness)
#right leg
cv2.line(temp_img,(int(float(traj_body_joints[14,0])),int(float(traj_body_joints[14,1]))),(int(float(traj_body_joints[15,0])),int(float(traj_body_joints[15,1]))),line_color,thickness)
cv2.line(temp_img,(int(float(traj_body_joints[15,0])),int(float(traj_body_joints[15,1]))),(int(float(traj_body_joints[16,0])),int(float(traj_body_joints[16,1]))),line_color,thickness)
cv2.line(temp_img,(int(float(traj_body_joints[16,0])),int(float(traj_body_joints[16,1]))),(int(float(traj_body_joints[17,0])),int(float(traj_body_joints[17,1]))),line_color,thickness)
#left leg
cv2.line(temp_img,(int(float(traj_body_joints[18,0])),int(float(traj_body_joints[18,1]))),(int(float(traj_body_joints[19,0])),int(float(traj_body_joints[19,1]))),line_color,thickness)
cv2.line(temp_img,(int(float(traj_body_joints[19,0])),int(float(traj_body_joints[19,1]))),(int(float(traj_body_joints[20,0])),int(float(traj_body_joints[20,1]))),line_color,thickness)
cv2.line(temp_img,(int(float(traj_body_joints[20,0])),int(float(traj_body_joints[20,1]))),(int(float(traj_body_joints[21,0])),int(float(traj_body_joints[21,1]))),line_color,thickness)
for i,joint in enumerate(traj_body_joints):
if i == 0:
continue
elif i == 1:
##draw trajectories
cv2.circle(scene,(int(float(joint[0])),int(float(joint[1]))),2,color,-1)
else:
cv2.circle(temp_img,(int(float(joint[0])),int(float(joint[1]))),2,color,-1)
##display like recorded time 30 fps
cv2.imshow('skeleton',temp_img)
cv2.waitKey(33)
def show_binary_sensor(sensor_data, signal_entrance_door,q,current_time_shared):
#fake data except maindoor
activation_open =[0,5,6]
activation_close=[0,5,5]
for i,s in enumerate(sensor_data):
#wait until the current time reach the time the sensor was activated
while True:
try:
current_time = current_time_shared.get()
except:
continue
time_diff= current_time-s
if np.abs(time_diff.total_seconds()) <= 10:
if signal_entrance_door[i][9:12] == 'OFF':
activation_open[0]+=1
else:
activation_close[0]+=1
q.put(['as',activation_open,activation_open])
break
def basic_plot():#Function to create the base plot, make sure to make global the lines, axes, canvas and any part that you would want to update later
global ax_conf,ax_as,canvas,rect_open,rect_close,warning_img,emergency_img,notification_icon,notification_text
##initialize figures
main_fig = plt.figure()
ax_as = main_fig.add_subplot(212)
ax_conf = main_fig.add_subplot(211)
##canvas in the main window
canvas = FigureCanvasTkAgg(main_fig, master=window)
canvas.show()
##in case of widget
#canvas.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1)
##pack place the plot automatically, using place we can specify x,y
#canvas._tkcanvas.pack(side=TOP, fill=BOTH, expand=1)
canvas._tkcanvas.place(x=80,y=20)
##inizialize plot of confidence of classifier
ax_conf.axis([0,60,-10,20])
ax_conf.plot(0,0)
ax_conf.plot(0,0)
ax_conf.plot(0,0)
ax_conf.set_title('classifier confidence')
##initialize bar plot of ambient sensor
sensor = ['maindoor','toilet','livingroom']
ind = np.arange(len(sensor))
width = 0.2
ax_as.axis([-0.5,3,0,10])
ax_as.set_xticks(ind+width)
ax_as.set_xticklabels(sensor)
#fake data except maindoor
activation_open =[0,5,6]
activation_close=[0,5,5]
#bar charts
rect_open = ax_as.bar(ind, activation_open,width,color='red')
rect_close = ax_as.bar(ind+width, activation_close,width,color='blue')
ax_as.legend((rect_open[0],rect_close[1]),('door open','door close'),fontsize=9)
ax_as.set_title('ambient sensor')
##initialize notification icons and text
warning_img = PIL.ImageTk.PhotoImage(PIL.Image.open('C:/Users/dario.dotti/Documents/data_for_demo/icon_warning_call_relative.png'))
emergency_img = PIL.ImageTk.PhotoImage(PIL.Image.open('C:/Users/dario.dotti/Documents/data_for_demo/icon_emergency_call_doctors.png'))
notification_icon = Label(window, image=warning_img)
notification_text = Label(window, text='Calling the stakeholders')
notification_title = Label(window, text='NOTIFICATION')
notification_title.place(x=350, y=510)
def update_figures_in_threads(q):
try:#Try to check if there is data in the queue
result=q.get_nowait()
if result !='Q':
if result[0] == 'cc':
ax_conf.plot(result[5],result[1],'r^',label='confusion')
ax_conf.plot(result[5],result[2],'b^',label='repetitive')
ax_conf.plot(result[5],result[3],'g^',label='normal activity')
#draw the legend only once
if result[4]==0:
ax_conf.legend(loc='upper left',fontsize=9)
##show notification images
if 1>result[1]>0 or 1>result[2]>0:
update_notification_icons('warning')
elif result[1]>1 or result[2]>1:
update_notification_icons('emergency')
canvas.draw()
window.after(10, update_figures_in_threads, q)
elif result[0] == 'as':
rect_open[0].set_height(result[1][0])
rect_close[0].set_height(result[2][0])
canvas.draw()
window.after(10, update_figures_in_threads, q)
except:
##no new input so refresh
window.after(100, update_figures_in_threads, q)
def update_notification_icons(label_img):
##refreshing notification icons
if label_img == 'warning':
notification_icon.configure(image=warning_img)
notification_icon.image = warning_img
notification_icon.place(x=320, y=550)
##text
notification_text.configure(text='Calling the stakeholders')
notification_text.place(x=330, y=670)
elif label_img == 'emergency':
notification_icon.configure(image=emergency_img)
notification_icon.image = emergency_img
notification_icon.place(x=320, y=550)
##text
notification_text.configure(text='Calling the doctors')
notification_text.place(x=330, y=670)
def main_demo():
##get raw data for displaying
body_joints = video_traj.xml_parser('C:/Users/dario.dotti/Documents/pilot_abnormal_behavior_indoor/joints/subject7_points.xml')
##HOT features organized per subjects and tasks
HOT_16_subject_6_tasks = data_organizer.load_matrix_pickle('C:/Users/dario.dotti/Documents/data_for_demo/HOT_matrix_16_subject_6_tasks.txt')
##BOW computed on HOT
bow_data = data_organizer.load_matrix_pickle('C:/Users/dario.dotti/Documents/data_for_demo/BOW_16subject_2sec.txt')
labels_bow_data = data_organizer.load_matrix_pickle('C:/Users/dario.dotti/Documents/data_for_demo/BOW_labels_16subject_2sec.txt')
lr = LogisticRegression()
lr.fit(bow_data,np.ravel(labels_bow_data))
##cluster data
cluster_model= data_organizer.load_matrix_pickle('C:/Users/dario.dotti/Documents/cl_model_2secWindow_band03.txt')
labels_cluster = data_organizer.load_matrix_pickle('C:/Users/dario.dotti/Documents/data_for_demo/cluster_labels.txt')
labels_cluster_counter = Counter(labels_cluster).most_common(40)
keys_labels = map(lambda x: x[0], labels_cluster_counter)
##load binary data for displaying
entrance_door_str = ambient_sensors.org_data_ID('C:/Users/dario.dotti/Documents/pilot_abnormal_behavior_indoor/binary/18-10-16_sensors_subject7.txt')['entrance']
entrance_door = []
signal_entrance_door=[]
#converting entrance door from string to time
for i,s in enumerate(entrance_door_str):
date = s.split(' ')[1]
time = s.split(' ')[2].split('-')
entrance_door.append(dt.strptime(date+' '+time[0]+':'+time[1]+':'+time[2],'%y-%m-%d %H:%M:%S'))
signal_entrance_door.append(s.split(' ')[0])
subj = 3 #n_subject order: 4,5,6,7,10,11,12,13,14,19,20,15,3,16,17,18
task = 0 #n_task order confusion: 0,1,2 repetitive:3 house_activity: 4,5
##shared variable between threads
q = multiprocessing.Queue()
current_time_shared=multiprocessing.Queue()
##launch different processes in the same time
display_joints_traj = multiprocessing.Process(target=draw_joints_and_tracks,args=(body_joints,current_time_shared))
display_confidence_classifier = multiprocessing.Process(target=plot_classifier_confidence,args=(HOT_16_subject_6_tasks[subj][task],cluster_model,keys_labels,lr,q))
display_ambient_sensor = multiprocessing.Process(target=show_binary_sensor,args=(entrance_door,signal_entrance_door,q,current_time_shared))
display_joints_traj.start()
display_confidence_classifier.start()
display_ambient_sensor.start()
##call plot initializer
basic_plot()
update_figures_in_threads(q)
##launch main window loop
window.geometry('800x700')
window.mainloop()
if __name__ == '__main__':
main_demo()
|
{"/demo.py": ["/data_organizer.py", "/video_traj.py", "/ambient_sensors.py"]}
|
7,361
|
Zumbalamambo/behavior_understanding_indoor_dataset
|
refs/heads/master
|
/main.py
|
import numpy as np
import cv2
import os
import scipy.io
import data_organizer as my_data_org
import ambient_sensors as ambient_sensor_analysis
import video_traj
def get_video_features():
#read and parse file with recorded data
<<<<<<< HEAD
with open('C:/Users/dario.dotti/Documents/Datasets/my_dataset/wandering_dataset_um/file_to_analyze_6_labels_ordered.txt','r') as f:#C:/Users/dario.dotti/Desktop/data_recordings_master/file_to_analize_master_recordings.txt
=======
with open('C:/Users/dario.dotti/Desktop/data_recordings_master/file_to_analize_master_recordings.txt','r') as f:#C:/Users/dario.dotti/Documents/Datasets/my_dataset/wandering_dataset_um/file_to_analyze_6_labels_ordered.txt
>>>>>>> 9348384985d2847c272133ff77ce6181ca1fa082
files = f.read().split('\n')
print 'number of recorded files: '+str(len(files))
matrix_allData_HOT = []
for file in files:
print file
filename = os.path.basename(file)
video_traj.set_subject(filename.split('_')[0])
traj_features = video_traj.feature_extraction_video_traj(file)
matrix_allData_HOT.append(traj_features[1])
# if len(matrix_allData_HOT)>0:
# matrix_allData_HOT = np.vstack((matrix_allData_HOT,traj_features[1]))
# else:
# matrix_allData_HOT = np.array(traj_features[1])
print len(matrix_allData_HOT)
#scipy.io.savemat('C:/Users/dario.dotti/Documents/hot_spatial_grid_4x4.mat',mdict={'spatial_grid_4x4': matrix_allData_HOT})
<<<<<<< HEAD
my_data_org.save_matrix_pickle(matrix_allData_HOT,'C:/Users/dario.dotti/Documents/data_for_personality_exp/after_data_cleaning/skeleton_data_in_tasks_time_slices_30fps_ordered_1sec.txt')##C:/Users/dario.dotti/Desktop/data_recordings_master/master_skeleton_data_in_tasks_time_slices_30fps_1sec.txt
=======
#my_data_org.save_matrix_pickle(matrix_allData_HOT,'C:/Users/dario.dotti/Desktop/data_recordings_master/master_skeleton_data_in_tasks_time_slices_30fps.txt') #C:/Users/dario.dotti/Documents/data_for_personality_exp/after_data_cleaning/skeleton_data_in_tasks_time_slices_30fps_ordered
>>>>>>> 9348384985d2847c272133ff77ce6181ca1fa082
def get_ambient_sensor_features():
#read and parse file with recorded data
with open('C:/Users/dario.dotti/Documents/file_to_analyze_AS_5_labels.txt','r') as f:
files = f.read().split('\n')
print 'number of recorded files: '+str(len(files))
matrix_allData_as = []
for file in files:
print file
activation_matrix = ambient_sensor_analysis.feature_extraction_as(file)
print np.array(activation_matrix).shape
if len(matrix_allData_as)>0:
matrix_allData_as = np.vstack((matrix_allData_as,activation_matrix))
else:
matrix_allData_as = activation_matrix
my_data_org.save_matrix_pickle(matrix_allData_as,'C:/Users/dario.dotti/Documents/AS_activation_5_labels_transformed.txt')
def main():
scene = cv2.imread('C:/Users/dario.dotti/Documents/Datasets/my_dataset/wandering_dataset_um/subject4_1834.jpg')#KinectScreenshot-Color-12-25-18 - Copy
##get features from video trajectories
get_video_features()
##ambient sensor
#get_ambient_sensor_features()
# ambient_sensor_analysis.org_data_different_tasks(file_AS)
#sensors_ID = ambient_sensor_analysis.org_data_ID(file_AS)
#ambient_sensor_analysis.nr_visit_bathroom(sensors_ID)
if __name__ == '__main__':
main()
|
{"/demo.py": ["/data_organizer.py", "/video_traj.py", "/ambient_sensors.py"]}
|
7,362
|
Zumbalamambo/behavior_understanding_indoor_dataset
|
refs/heads/master
|
/test_BOW_deliverable.py
|
import numpy as np
<<<<<<< HEAD
from sklearn.metrics import classification_report
from sklearn.cluster import KMeans
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import normalize
=======
from sklearn.metrics import classification_report,precision_recall_fscore_support,accuracy_score
from sklearn.cluster import KMeans
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from pomegranate import *
>>>>>>> 9348384985d2847c272133ff77ce6181ca1fa082
import data_organizer
import img_processing
import AE_rec
feature_p_1 = data_organizer.load_matrix_pickle('C:/Users/dario.dotti/Desktop/Hier_AE_deliverable/head_joint_id1/feature_matrix_participant_task_l2_new_realCoordinates.txt')
<<<<<<< HEAD
feature_p_2 =data_organizer.load_matrix_pickle('C:/Users/dario.dotti/Desktop/Hier_AE_deliverable/head_joint_id1/feature_matrix_participant_master_task_l2_new_realCoordinates.txt')
feature_p = feature_p_1[:19]+feature_p_2[:27]
real_coord = feature_p_1[19:]+feature_p_2[27:]
list_poly = img_processing.divide_image(np.zeros((414,512),dtype=np.uint8))
space_features = []
for i_p in xrange(0,len(real_coord)):
for i_task in xrange(0,len(real_coord[i_p])):
for i_slice in xrange(0,len(real_coord[i_p][i_task])):
votes = np.zeros((16, 3))
for p in xrange(0,len(real_coord[i_p][i_task][i_slice])):
size_per_frame = int(len(real_coord[i_p][i_task][i_slice][p])/3)
x = real_coord[i_p][i_task][i_slice][p][:size_per_frame]
y = real_coord[i_p][i_task][i_slice][p][size_per_frame:(size_per_frame*2)]
z = real_coord[i_p][i_task][i_slice][p][(size_per_frame*2):]
for i_area, areas in enumerate(list_poly):
for i_point in xrange(0,len(x)):
if areas.contains_point((int(x[i_point]), int(y[i_point]))):
if z[i_point] < (4.232 - (1.433 * 2)):
votes[i_area,0] +=1
elif z[i_point] > (4.232 - (1.433 * 2)) and z[i_point] < (4.232 - 1.433):
votes[i_area, 1] += 1
elif z[i_point] > (4.232 - 1.433):
votes[i_area, 2] += 1
if len(space_features)>0:
space_features = np.vstack((space_features,normalize(votes.reshape((1,-1)))))
else:
space_features = normalize(votes.reshape((1,-1)))
=======
feature_p_2 = data_organizer.load_matrix_pickle('C:/Users/dario.dotti/Desktop/Hier_AE_deliverable/head_joint_id1/feature_matrix_participant_master_task_l2_new_realCoordinates.txt')
#cl_model = data_organizer.load_matrix_pickle('C:/Users/dario.dotti/Desktop/Hier_AE_deliverable/40_cluster_model_layer2_new.txt')
feature_p = feature_p_1[:46]+feature_p_2[:46]
real_coord = feature_p_1[46:]+feature_p_2[46:]
print len(feature_p),len(real_coord)
print feature_p[45]
print real_coord[0]
>>>>>>> 9348384985d2847c272133ff77ce6181ca1fa082
AE_weights_level_2 = data_organizer.load_matrix_pickle(
'C:/Users/dario.dotti/Desktop/Hier_AE_deliverable/ae/head_joint_id1/169weights_l2_001_new.txt')
data_new = [f for part in feature_p for task in part for f in task]
<<<<<<< HEAD
matrix_activations_data_l2 = AE_rec.hid_unit_activation_allLayers(data_new,AE_weights_level_2)
print matrix_activations_data_l2.shape,space_features.shape
#matrix_activations_data_l2 = np.hstack((matrix_activations_data_l2, space_features))
cluster_labels = KMeans(n_clusters=30).fit_predict(matrix_activations_data_l2)
n_labels = len(np.unique(cluster_labels))
start = 0
#cl_id_task = []
hist_matrix = []
for i_p in xrange(0, len(feature_p)):
for n_task in xrange(len(feature_p[i_p])):
hist_areas = np.zeros((1,n_labels*48),dtype=int)
#hist= np.zeros((1,n_labels),dtype=int)
for i_l,l in enumerate(cluster_labels[start:(start + len(feature_p[i_p][n_task]))]):
s = space_features[start+i_l]
idx_max = np.where(s==np.max(s))[0][0]
hist_areas[0,(idx_max*n_labels)+l] += 1
#hist[0,l] +=1
hist_matrix.append(hist_areas)
#cl_id_task.append(cluster_labels[start:(start + len(feature_p[i_p][n_task]))])
start += len(feature_p[i_p][n_task])
hist_matrix = np.concatenate(hist_matrix,axis=0)
print hist_matrix.shape
=======
matrix_activations_data_l2 = AE_rec.hid_unit_activation_allLayers(data_new,AE_weights_level_2)
my_kmean = KMeans(n_clusters=20)
cluster_labels = my_kmean.fit_predict(matrix_activations_data_l2)
#cluster_labels = cl_model.predict(matrix_activations_data_l2)
n_labels = len(np.unique(cluster_labels))
start = 0
cl_id_task = []
hist_matrix = []
for i_p in xrange(0, len(feature_p)):
for n_task in xrange(len(feature_p[i_p])):
hist= np.zeros((1,n_labels))
for l in cluster_labels[start:(start + len(feature_p[i_p][n_task]))]:
hist[0,l] +=1
hist_matrix.append(hist)
#cl_id_task.append(cluster_labels[start:(start + len(feature_p[i_p][n_task]))])
#start += len(feature_p[i_p][n_task])
hist_matrix = np.concatenate(hist_matrix,axis=0)
### BOW ###
>>>>>>> 9348384985d2847c272133ff77ce6181ca1fa082
task_labels=[]
for i_p in xrange(0, len(feature_p)):
for n_task in xrange(len(feature_p[i_p])):
task_labels.append(n_task)
for t in xrange(0,len(task_labels)):
if task_labels[t] == 1:
task_labels[t] = 0
if task_labels[t]== 4:
task_labels[t]= 5
<<<<<<< HEAD
# idx = np.where(np.array(task_labels) == 1)
# task_labels = np.delete(task_labels,idx)
# hist_matrix = np.delete(hist_matrix,idx,axis=0)
#
# idx = np.where(np.array(task_labels) == 0)
# task_labels = np.delete(task_labels,idx)
# hist_matrix = np.delete(hist_matrix,idx,axis=0)
for i in range(0,5):
X_train, X_test, y_train, y_test = train_test_split(hist_matrix, task_labels, test_size=0.2)
model = KNeighborsClassifier(n_neighbors=1, n_jobs=-1,weights='distance').fit(X_train, y_train)
y_pred = model.predict(X_test)
print classification_report(y_test, y_pred)
=======
#
# # idx = np.where(np.array(task_labels) == 1)
# # task_labels = np.delete(task_labels,idx)
# # hist_matrix = np.delete(hist_matrix,idx,axis=0)
#
#
# for i in range(0,5):
# X_train, X_test, y_train, y_test = train_test_split(hist_matrix, task_labels, test_size=0.1)
# model = KNeighborsClassifier(n_neighbors=2, n_jobs=-1,weights='distance').fit(X_train, y_train)
# y_pred = model.predict(X_test)
#
# print classification_report(y_test, y_pred)
## feature and label vector all participants but one ##
r_list=[]
start = 0
for i_p in xrange(0, len(feature_p)):
print '## subject: ',i_p
test_p = hist_matrix[start:(start+len(feature_p[i_p]))]
label_p = task_labels[start:(start+len(feature_p[i_p]))]
train_ps = np.vstack((hist_matrix[:start,:], hist_matrix[start+len(feature_p[i_p]):,:]))
label_ps = task_labels[:start]+task_labels[start+len(feature_p[i_p]):]#np.vstack((task_labels[:start], task_labels[start+len(feature_p[i_p]):]))
model = KNeighborsClassifier(n_neighbors=3, n_jobs=-1, weights='distance').fit(train_ps, np.array(label_ps).ravel())
# model = svm.NuSVC(nu=0.5,decision_function_shape='ovr',class_weight={1:10,3:.5}).fit(X_train, y_train)#nu=0.05, ,class_weight={1:10,3:.5}
y_pred = model.predict(test_p)
print y_pred,label_p
print classification_report(label_p, y_pred)
#print accuracy_score(label_p,y_pred)
#r = precision_recall_fscore_support(label_p,y_pred,average='weighted')
start += len(feature_p[i_p])
print np.mean(np.array(r_list))
>>>>>>> 9348384985d2847c272133ff77ce6181ca1fa082
<<<<<<< HEAD
=======
###
# task_1 = []
# for i in xrange(3,len(cl_id_task)-2,6):
# task_1.append(cl_id_task[i])
#
#
# bayes_matrix = []
# for t in task_1:
# for i in range(0,len(t)-2,3):
# bayes_matrix.append([t[i],t[i+1],t[i+2]])
#
# bayes_matrix = np.array(bayes_matrix)
#
#
# model = BayesianNetwork.from_samples(bayes_matrix)#, algorithm='chow-liu'
#
# print model.structure
#
# for s in bayes_matrix:
# print model.predict_proba({'0':s[0], '1':s[1]})
# print model.predict([[s[0],s[1],None]])
# print s
# a = 1
>>>>>>> 9348384985d2847c272133ff77ce6181ca1fa082
|
{"/demo.py": ["/data_organizer.py", "/video_traj.py", "/ambient_sensors.py"]}
|
7,363
|
Zumbalamambo/behavior_understanding_indoor_dataset
|
refs/heads/master
|
/pecs_data_analysis.py
|
import cv2
import numpy as np
from sklearn.preprocessing import normalize
from datetime import datetime, timedelta
from collections import Counter
import data_organizer
import video_traj
import img_processing
kinect_max_distance= 4.5
kinect_min_distance=0.5
cube_size = (kinect_max_distance-kinect_min_distance)/3
def draw_joints_and_tracks(body_points, scene):
color = (0, 0, 255)
# draw line between joints
thickness = 3
line_color = (19, 19, 164)
##check patches are correct
# for i_rect, rect in enumerate(scene_patches):
# cv2.rectangle(scene, (int(rect.vertices[1][0]), int(rect.vertices[1][1])),
# (int(rect.vertices[3][0]), int(rect.vertices[3][1])), (0, 0, 0))
#
# ## write number of patch on img
# cv2.putText(scene, str(i_rect), (int(rect.vertices[1][0]) + 10, int(rect.vertices[1][1]) + 20),
# cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 2)
for n_frame, traj_body_joints in enumerate(body_points):
# n_frame = n_frame+1402
# if n_frame < 4870:
# continue
temp_img = scene.copy()
# draw joints
print n_frame
# first position skipped cause there are other info stored
try:
# torso
cv2.line(temp_img, (int(float(traj_body_joints[4, 0])), int(float(traj_body_joints[4, 1]))),
(int(float(traj_body_joints[3, 0])), int(float(traj_body_joints[3, 1]))), line_color, thickness)
cv2.line(temp_img, (int(float(traj_body_joints[3, 0])), int(float(traj_body_joints[3, 1]))),
(int(float(traj_body_joints[2, 0])), int(float(traj_body_joints[2, 1]))), line_color, thickness)
cv2.line(temp_img, (int(float(traj_body_joints[2, 0])), int(float(traj_body_joints[2, 1]))),
(int(float(traj_body_joints[1, 0])), int(float(traj_body_joints[1, 1]))), line_color, thickness)
# shoulder
cv2.line(temp_img, (int(float(traj_body_joints[21, 0])), int(float(traj_body_joints[21, 1]))),
(int(float(traj_body_joints[9, 0])), int(float(traj_body_joints[9, 1]))), line_color, thickness)
cv2.line(temp_img, (int(float(traj_body_joints[21, 0])), int(float(traj_body_joints[21, 1]))),
(int(float(traj_body_joints[5, 0])), int(float(traj_body_joints[5, 1]))), line_color, thickness)
# hips
cv2.line(temp_img, (int(float(traj_body_joints[1, 0])), int(float(traj_body_joints[1, 1]))),
(int(float(traj_body_joints[17, 0])), int(float(traj_body_joints[17, 1]))), line_color, thickness)
cv2.line(temp_img, (int(float(traj_body_joints[1, 0])), int(float(traj_body_joints[1, 1]))),
(int(float(traj_body_joints[13, 0])), int(float(traj_body_joints[13, 1]))), line_color, thickness)
# right arm
cv2.line(temp_img, (int(float(traj_body_joints[9, 0])), int(float(traj_body_joints[9, 1]))),
(int(float(traj_body_joints[10, 0])), int(float(traj_body_joints[10, 1]))), line_color, thickness)
cv2.line(temp_img, (int(float(traj_body_joints[10, 0])), int(float(traj_body_joints[10, 1]))),
(int(float(traj_body_joints[11, 0])), int(float(traj_body_joints[11, 1]))), line_color, thickness)
cv2.line(temp_img, (int(float(traj_body_joints[11, 0])), int(float(traj_body_joints[11, 1]))),
(int(float(traj_body_joints[12, 0])), int(float(traj_body_joints[12, 1]))), line_color, thickness)
# left arm
cv2.line(temp_img, (int(float(traj_body_joints[5, 0])), int(float(traj_body_joints[5, 1]))),
(int(float(traj_body_joints[6, 0])), int(float(traj_body_joints[6, 1]))), line_color, thickness)
cv2.line(temp_img, (int(float(traj_body_joints[6, 0])), int(float(traj_body_joints[6, 1]))),
(int(float(traj_body_joints[7, 0])), int(float(traj_body_joints[7, 1]))), line_color, thickness)
cv2.line(temp_img, (int(float(traj_body_joints[7, 0])), int(float(traj_body_joints[7, 1]))),
(int(float(traj_body_joints[8, 0])), int(float(traj_body_joints[8, 1]))), line_color, thickness)
# left leg
cv2.line(temp_img, (int(float(traj_body_joints[13, 0])), int(float(traj_body_joints[13, 1]))),
(int(float(traj_body_joints[14, 0])), int(float(traj_body_joints[14, 1]))), line_color, thickness)
cv2.line(temp_img, (int(float(traj_body_joints[14, 0])), int(float(traj_body_joints[14, 1]))),
(int(float(traj_body_joints[15, 0])), int(float(traj_body_joints[15, 1]))), line_color, thickness)
cv2.line(temp_img, (int(float(traj_body_joints[15, 0])), int(float(traj_body_joints[15, 1]))),
(int(float(traj_body_joints[16, 0])), int(float(traj_body_joints[16, 1]))), line_color, thickness)
# right leg
cv2.line(temp_img, (int(float(traj_body_joints[17, 0])), int(float(traj_body_joints[17, 1]))),
(int(float(traj_body_joints[18, 0])), int(float(traj_body_joints[18, 1]))), line_color, thickness)
cv2.line(temp_img, (int(float(traj_body_joints[18, 0])), int(float(traj_body_joints[18, 1]))),
(int(float(traj_body_joints[19, 0])), int(float(traj_body_joints[19, 1]))), line_color, thickness)
cv2.line(temp_img, (int(float(traj_body_joints[19, 0])), int(float(traj_body_joints[19, 1]))),
(int(float(traj_body_joints[20, 0])), int(float(traj_body_joints[20, 1]))), line_color, thickness)
if n_frame > 0:
for i, joint in enumerate(traj_body_joints):
if i == 0:
continue
cv2.circle(temp_img, (int(float(joint[0])), int(float(joint[1]))), 2, color, -1)
if i == 3 and n_frame > 0:
##draw trajectories
cv2.circle(scene, (int(float(joint[0])), int(float(joint[1]))), 2, color, -1)
else:
##draw joint
cv2.circle(temp_img, (int(float(joint[0])), int(float(joint[1]))), 2, color, -1)
cv2.imshow('lab', temp_img)
cv2.waitKey(1)
except:
print 'traj coordinates not available'
continue
def sort_skeletons(task_skeleton_data):
ids = map(lambda line: line[0][2], task_skeleton_data)
print 'skeleton id: ', Counter(ids).most_common()
skeletons = []
for counter_ids in Counter(ids).most_common():
skeleton_id = task_skeleton_data
main_id = counter_ids[0]
new_joints_points = []
for i_point, points in enumerate(skeleton_id):
if points[0][2] == main_id:
if len(new_joints_points) == 0: print points[0]
new_joints_points.append(points)
skeleton_id = new_joints_points
skeletons.append(skeleton_id)
skeleton_data_sorted = skeletons[1]
data_organizer.save_matrix_pickle(skeleton_data_sorted,
'C:/Users/dario.dotti/Documents/pecs_data_review/skeletons_confusion_behavior_08082017_test.txt')
return skeleton_data_sorted
def org_data_in_timeIntervals(skeleton_data, timeInterval_slice):
#get all time data from the list dropping the decimal
content_time = map(lambda line: line[0,1].split(' ')[1].split('.')[0],skeleton_data)
#date time library
init_t = datetime.strptime(content_time[0],'%H:%M:%S') #+ ' ' + timeInterval_slice[3]
end_t = datetime.strptime(content_time[len(content_time)-1],'%H:%M:%S')
x = datetime.strptime('0:0:0','%H:%M:%S')
tot_duration = (end_t-init_t)
#decide the size of time slices
# size_slice= tot_duration/12
# hours, remainder = divmod(size_slice.seconds, 3600)
# minutes, seconds = divmod(remainder, 60)
hours = timeInterval_slice[0]
minutes = timeInterval_slice[1]
seconds = timeInterval_slice[2]
my_time_slice = timedelta(hours=hours,minutes=minutes,seconds=seconds)
print 'time slice selected: ' + str(my_time_slice)
#initialize list
time_slices = []
time_slices_append = time_slices.append
c = (end_t-my_time_slice)
#get data in every timeslices
while init_t < (end_t-my_time_slice):
list_time_interval = []
list_time_interval_append = list_time_interval.append
for t in xrange(len(content_time)):
if datetime.strptime(content_time[t],'%H:%M:%S')>= init_t and datetime.strptime(content_time[t],'%H:%M:%S') < init_t + my_time_slice:
list_time_interval_append(skeleton_data[t])
if datetime.strptime(content_time[t],'%H:%M:%S') > init_t + my_time_slice:
break
#print len(list_time_interval)
##save time interval without distinction of part of the day
time_slices_append(list_time_interval)
init_t= init_t+my_time_slice
return time_slices
def histograms_of_oriented_trajectories(list_poly, time_slices):
#print kinect_max_distance, kinect_min_distance
hot_all_data_matrix = []
hot_all_data_matrix_append = hot_all_data_matrix.append
for i in xrange(0, len(time_slices)):
##Checking the start time of every time slice
if (len(time_slices[i]) > 1):
print 'start time: %s' % str(time_slices[i][0][0][1])
else:
print 'no data in this time slice'
continue
# get x,y,z of every traj point after smoothing process
x_filtered, y_filtered, zs, ids = img_processing.get_coordinate_points(time_slices[i], joint_id=3)
# initialize histogram of oriented tracklets
hot_matrix = []
for p in xrange(0, len(list_poly)):
tracklet_in_cube_f = []
tracklet_in_cube_c = []
tracklet_in_cube_middle = []
tracklet_in_cube_append_f = tracklet_in_cube_f.append
tracklet_in_cube_append_c = tracklet_in_cube_c.append
tracklet_in_cube_append_middle = tracklet_in_cube_middle.append
for ci in xrange(0, len(x_filtered)):
if np.isinf(x_filtered[ci]) or np.isinf(y_filtered[ci]): continue
# 2d polygon
if list_poly[p].contains_point((int(x_filtered[ci]), int(y_filtered[ci]))):
## 3d cube close to the camera
if zs[ci] <= (kinect_min_distance + cube_size):
tracklet_in_cube_append_c([x_filtered[ci], y_filtered[ci], ids[ci]])
elif zs[ci] > (kinect_min_distance + cube_size) and zs[ci] < (
kinect_min_distance + (cube_size * 2)): #
tracklet_in_cube_append_middle([x_filtered[ci], y_filtered[ci], ids[ci]])
elif zs[ci] >= kinect_min_distance + (cube_size * 2): ##3d cube far from the camera
tracklet_in_cube_append_f([x_filtered[ci], y_filtered[ci], ids[ci]])
print len(tracklet_in_cube_c), len(tracklet_in_cube_middle), len(tracklet_in_cube_f)
for three_d_poly in [tracklet_in_cube_c, tracklet_in_cube_middle, tracklet_in_cube_f]:
if len(three_d_poly) > 0:
## for tracklet in cuboids compute HOT following paper
hot_single_poly = img_processing.histogram_oriented_tracklets(three_d_poly)
## compute hot+curvature
# hot_single_poly = my_img_proc.histogram_oriented_tracklets_plus_curvature(three_d_poly)
else:
hot_single_poly = np.zeros((24))
##add to general matrix
if len(hot_matrix) > 0:
hot_matrix = np.hstack((hot_matrix, hot_single_poly))
else:
hot_matrix = hot_single_poly
hot_all_data_matrix_append(hot_matrix)
## normalize the final matrix
normalized_finalMatrix = np.array(normalize(np.array(hot_all_data_matrix), norm='l2'))
##add extra bin containing time
##return patinet id
patient_id = ids[0]
print 'HOT final matrix size: ', normalized_finalMatrix.shape
return normalized_finalMatrix, patient_id
def main_pecs_data():
##get raw data for displaying
task_skeleton_data = data_organizer.load_matrix_pickle('C:/Users/dario.dotti/Documents/pecs_data_review/skeletons_repetitive_behavior_08082017.txt')
##if data contains multiple skeletons here I sort them cronologically
sort_skeletons(task_skeleton_data)
my_room = np.zeros((424,512,3),dtype=np.uint8)
my_room += 255
list_poly = img_processing.divide_image(my_room)
#draw_joints_and_tracks(task_skeleton_data,my_room)
#return 0
skeleton_data_in_time_slices = org_data_in_timeIntervals(task_skeleton_data,[0,0,2])
HOT_data, patient_ID = histograms_of_oriented_trajectories(list_poly, skeleton_data_in_time_slices)
data_organizer.save_matrix_pickle(HOT_data,
'C:/Users/dario.dotti/Documents/pecs_data_review/HOT_repetitive_behavior_08082017.txt')
if __name__ == '__main__':
main_pecs_data()
|
{"/demo.py": ["/data_organizer.py", "/video_traj.py", "/ambient_sensors.py"]}
|
7,364
|
Zumbalamambo/behavior_understanding_indoor_dataset
|
refs/heads/master
|
/realtime_posture_extr.py
|
import numpy as np
import matplotlib.pyplot as plt
import cv2
from sklearn import decomposition
from collections import Counter
from math import atan2,degrees
from sklearn.preprocessing import normalize
from sklearn.metrics import euclidean_distances
import img_processing
import data_organizer
import hierarchical_ae_learning_methods as h_ae
def plot_dist():
##plot horizontal distances
# x_dist_ = np.array(dist_x)
# plt.scatter(range(len(x_dist_)),x_dist_[:,0],marker='+',c='r')
# plt.scatter(range(len(x_dist_)), x_dist_[:, 1], marker='^',c='g')
# plt.scatter(range(len(x_dist_)), x_dist_[:, 2])
# plt.show()
##plot vertical distances
y_dist_ = np.array(dist_y)
plt.scatter(range(len(y_dist_)), y_dist_[:, 0], marker='+', c='r')
plt.scatter(range(len(y_dist_)), y_dist_[:, 1], marker='^', c='g')
plt.show()
dist_x = []
dist_y = []
def get_dist_arms(shoulder_left_x, shoulder_left_y, shoulder_right_x, shoulder_right_y, elbow_left_x,
elbow_left_y, elbow_right_x, elbow_right_y, wrist_left_x,wrist_left_y, wrist_right_x, wrist_right_y):
### horizontal distance
# dist_shoulder = np.sqrt(((shoulder_left_x[0] - shoulder_right_x[0]) ** 2) + ((shoulder_left_y[0] - shoulder_right_y[0]) ** 2))
# dist_elbow = np.sqrt(((elbow_left_x[0] - elbow_right_x[0]) ** 2) + ((elbow_left_y[0] - elbow_right_y[0]) ** 2))
# dist_wrist = np.sqrt(((wrist_left_x[0] - wrist_right_x[0]) ** 2) + ((wrist_left_y[0] - wrist_right_y[0]) ** 2))
#print dist_shoulder,dist_elbow,dist_wrist
#dist_x.append([dist_shoulder, dist_elbow, dist_wrist])
### vertical distances
dist_shoulder_left = np.sqrt(
((shoulder_left_x[0] - wrist_left_x[0]) ** 2) + ((shoulder_left_y[0] - wrist_left_y[0]) ** 2))
dist_shoulder_right = np.sqrt(
((shoulder_right_x[0] - wrist_right_x[0]) ** 2) + ((shoulder_right_y[0] - wrist_right_y[0]) ** 2))
dist_y.append([dist_shoulder_left,dist_shoulder_right])
def draw_arms(temp_img,shoulder_left_x, shoulder_left_y, shoulder_right_x, shoulder_right_y, elbow_left_x,
elbow_left_y, elbow_right_x, elbow_right_y, wrist_left_x,wrist_left_y, wrist_right_x, wrist_right_y,center_x,center_y,head_x,head_y,spine_x,spine_y):
## draw arm right
cv2.line(temp_img, (shoulder_right_x, shoulder_right_y),
(elbow_right_x, elbow_right_y), (0, 255, 0), 2)
cv2.line(temp_img, (elbow_right_x, elbow_right_y),
(wrist_right_x, wrist_right_y), (0, 255, 0), 2)
## draw arm left
cv2.line(temp_img, (shoulder_left_x, shoulder_left_y),
(elbow_left_x, elbow_left_y), (255, 0, 0), 2)
cv2.line(temp_img, (elbow_left_x, elbow_left_y),
(wrist_left_x, wrist_left_y), (255, 0, 0), 2)
cv2.line(temp_img,(head_x, head_y),
(spine_x, spine_y), (255, 0, 0), 2)
## draw center
cv2.circle(temp_img, (center_x, center_y), 3, (0, 0, 255), -1)
cv2.imshow('ciao', temp_img)
cv2.waitKey(0)
def clockwise_slope(A, B, C):
return (C[1]-A[1]) * (B[0]-A[0]) > (B[1]-A[1]) * (C[0]-A[0])
# Return true if line segments AB and CD intersect
def lines_intersect(A, B, C, D):
return clockwise_slope(A, C, D) != clockwise_slope(B, C, D) and clockwise_slope(A, B, C) != clockwise_slope(A, B, D)
def extract_arms_pos(shoulder_left_x, shoulder_left_y, shoulder_right_x, shoulder_right_y, elbow_left_x,
elbow_left_y, elbow_right_x, elbow_right_y, wrist_left_x,wrist_left_y, wrist_right_x, wrist_right_y, head_x, head_y,spineBase_x,spineBase_y, scene):
imgs = []
##get 10 fps
for i_coord in range(0,len(shoulder_left_x),3):
#print i_coord
### to remove noise i check if the spine lines pass through the shoulder line ##
# if not lines_intersect((shoulder_left_x[i_coord], shoulder_left_y[i_coord]), (shoulder_right_x[i_coord], shoulder_right_y[i_coord]), \
# (head_x[i_coord], head_y[i_coord]),
# (spineBase_x[i_coord], spineBase_y[i_coord])):
# print 'no intersection'
# continue
###Find center X ###
highest_point_index = np.where([shoulder_right_x[i_coord], elbow_right_x[i_coord], wrist_right_x[i_coord], shoulder_left_x[i_coord], elbow_left_x[i_coord], wrist_left_x[i_coord]] \
== np.min([shoulder_right_x[i_coord], elbow_right_x[i_coord], wrist_right_x[i_coord], shoulder_left_x[i_coord], elbow_left_x[i_coord], wrist_left_x[i_coord]]))[0][0]
highest = [shoulder_right_x[i_coord], elbow_right_x[i_coord], wrist_right_x[i_coord], shoulder_left_x[i_coord], elbow_left_x[i_coord], wrist_left_x[i_coord]][highest_point_index]
lowest_point_index = np.where([shoulder_right_x[i_coord], elbow_right_x[i_coord], wrist_right_x[i_coord], shoulder_left_x[i_coord], elbow_left_x[i_coord], wrist_left_x[i_coord]] \
== np.max([shoulder_right_x[i_coord], elbow_right_x[i_coord], wrist_right_x[i_coord], shoulder_left_x[i_coord], elbow_left_x[i_coord], wrist_left_x[i_coord]]))[0][0]
lowest = [shoulder_right_x[i_coord], elbow_right_x[i_coord], wrist_right_x[i_coord], shoulder_left_x[i_coord], elbow_left_x[i_coord], wrist_left_x[i_coord]][lowest_point_index]
center_x = highest + int((lowest - highest) / 2)
####Find center Y ##
highest_point_index = np.where([shoulder_right_y[i_coord],elbow_right_y[i_coord],wrist_right_y[i_coord],shoulder_left_y[i_coord],elbow_left_y[i_coord],wrist_left_y[i_coord]] \
== np.min([shoulder_right_y[i_coord],elbow_right_y[i_coord],wrist_right_y[i_coord],shoulder_left_y[i_coord],elbow_left_y[i_coord],wrist_left_y[i_coord]]))[0][0]
highest = [shoulder_right_y[i_coord], elbow_right_y[i_coord], wrist_right_y[i_coord], shoulder_left_y[i_coord], elbow_left_y[i_coord], wrist_left_y[i_coord]][highest_point_index]
lowest_point_index = np.where([shoulder_right_y[i_coord], elbow_right_y[i_coord], wrist_right_y[i_coord], shoulder_left_y[i_coord], elbow_left_y[i_coord], wrist_left_y[i_coord]] \
== np.max([shoulder_right_y[i_coord], elbow_right_y[i_coord], wrist_right_y[i_coord], shoulder_left_y[i_coord], elbow_left_y[i_coord],
wrist_left_y[i_coord]]))[0][0]
lowest = [shoulder_right_y[i_coord], elbow_right_y[i_coord], wrist_right_y[i_coord], shoulder_left_y[i_coord], elbow_left_y[i_coord], wrist_left_y[i_coord]][lowest_point_index]
center_y = highest+int((lowest - highest) /2)
### Draw ##
# temp_img = scene.copy()#np.zeros((424, 512, 3), dtype=np.uint8)
#
# draw_arms(temp_img,shoulder_left_x[i_coord], shoulder_left_y[i_coord], shoulder_right_x[i_coord], shoulder_right_y[i_coord], elbow_left_x[i_coord],
# elbow_left_y[i_coord], elbow_right_x[i_coord], elbow_right_y[i_coord], wrist_left_x[i_coord], wrist_left_y[i_coord],
# wrist_right_x[i_coord], wrist_right_y[i_coord],center_x,center_y,head_x[i_coord],head_y[i_coord],spineBase_x[i_coord] ,spineBase_y[i_coord])
##find difference between the current center and the new img center, use this difference to convert everything
feature_img = np.zeros((120, 120))
diff_x = abs(center_x - (feature_img.shape[1]/2))
diff_y = abs(center_y - (feature_img.shape[0]/2))
limb_pos_x = []
for limb_x in [shoulder_right_x[i_coord], elbow_right_x[i_coord], wrist_right_x[i_coord], shoulder_left_x[i_coord], elbow_left_x[i_coord], wrist_left_x[i_coord], head_x[i_coord], spineBase_x[i_coord]]:
limb_pos_x.append(int(limb_x-diff_x))
limb_pos_y = []
for limb_y in [shoulder_right_y[i_coord],elbow_right_y[i_coord],wrist_right_y[i_coord],shoulder_left_y[i_coord],elbow_left_y[i_coord],wrist_left_y[i_coord], head_y[i_coord], spineBase_y[i_coord]]:
limb_pos_y.append(int(limb_y - diff_y))
## draw on black img the arms with value 255
for i_limb in [0,1,3,4]:
points_on_line = h_ae.createLineIterator(np.array([limb_pos_x[i_limb],limb_pos_y[i_limb]])\
,np.array([limb_pos_x[i_limb + 1],limb_pos_y[i_limb + 1]]),feature_img)
for p in points_on_line:
##if we want to display on img
feature_img[int(p[1]), int(p[0])] = 0.99
if int(p[0])+2 < feature_img.shape[1] and int(p[1])+2 < feature_img.shape[0]:
##right
feature_img[int(p[1]) + 1, int(p[0])] = 0.99
feature_img[int(p[1]) + 2, int(p[0])] = 0.99
##left
feature_img[int(p[1]) - 1, int(p[0])] = 0.99
feature_img[int(p[1]) - 2, int(p[0])] = 0.99
##up
feature_img[int(p[1]), int(p[0]) - 1] = 0.99
feature_img[int(p[1]), int(p[0]) - 2] = 0.99
##down
feature_img[int(p[1]), int(p[0]) + 1] = 0.99
feature_img[int(p[1]), int(p[0]) + 2] = 0.99
### add shoulders
points_on_line = h_ae.createLineIterator(np.array([limb_pos_x[3], limb_pos_y[3]]) \
, np.array([limb_pos_x[0], limb_pos_y[0]]), feature_img)
for p in points_on_line:
##if we want to display on img
feature_img[int(p[1]), int(p[0])] = 0.99
if int(p[0]) + 2 < feature_img.shape[1] and int(p[1]) + 2 < feature_img.shape[0]:
##right
feature_img[int(p[1]) + 1, int(p[0])] = 0.99
feature_img[int(p[1]) + 2, int(p[0])] = 0.99
##left
feature_img[int(p[1]) - 1, int(p[0])] = 0.99
feature_img[int(p[1]) - 2, int(p[0])] = 0.99
##up
feature_img[int(p[1]), int(p[0]) - 1] = 0.99
feature_img[int(p[1]), int(p[0]) - 2] = 0.99
##down
feature_img[int(p[1]), int(p[0]) + 1] = 0.99
feature_img[int(p[1]), int(p[0]) + 2] = 0.99
### add spine ##
points_on_line = h_ae.createLineIterator(np.array([limb_pos_x[6], limb_pos_y[6]]) \
, np.array([limb_pos_x[7], limb_pos_y[7]]), feature_img)
for p in points_on_line:
##if we want to display on img
feature_img[int(p[1]), int(p[0])] = 0.99
if int(p[0]) + 2 < feature_img.shape[1] and int(p[1]) + 2 < feature_img.shape[0]:
##right
feature_img[int(p[1]) + 1, int(p[0])] = 0.99
feature_img[int(p[1]) + 2, int(p[0])] = 0.99
##left
feature_img[int(p[1]) - 1, int(p[0])] = 0.99
feature_img[int(p[1]) - 2, int(p[0])] = 0.99
##up
feature_img[int(p[1]), int(p[0]) - 1] = 0.99
feature_img[int(p[1]), int(p[0]) - 2] = 0.99
##down
feature_img[int(p[1]), int(p[0]) + 1] = 0.99
feature_img[int(p[1]), int(p[0]) + 2] = 0.99
# cv2.imshow('feature_img',feature_img)
# cv2.waitKey(0)
if len(imgs) > 0: imgs = np.vstack((imgs, feature_img.reshape((1, -1))))
else: imgs = feature_img.reshape((1, -1))
return imgs
AE_weights_level_1 = data_organizer.load_matrix_pickle('C:/Users/dario.dotti/Documents/data_for_personality_exp/after_data_cleaning/posture_data/upperBody/deep_AE_900_225_weights_008hd1_noNoise.txt')
hd_weights = AE_weights_level_1[0][0]
bias_1_level1 = AE_weights_level_1[1]
#pca = decomposition.PCA(n_components=100) # 2-dimensional PCA whiten=True, svd_solver='randomized'
#pca = data_organizer.load_matrix_pickle('C:/Users/dario.dotti/Documents/data_for_personality_exp/after_data_cleaning/posture_data/upperBody/clustering_posture_5sec/100pca_deep900225AE_5sec_data.txt')
#cluster_model = data_organizer.load_matrix_pickle('C:/Users/dario.dotti/Documents/data_for_personality_exp/after_data_cleaning/posture_data/upperBody/clustering_posture_5sec/linearSVM_agglomerative15c_5sec_100pca.txt')
def compute_hot_f(xs,ys):
orientation_intervals = [[range(0, 45)], [range(45, 90)], [range(90, 135)], [range(135, 180)], [range(180, 225)],
[range(225, 270)], \
[range(270, 315)], [range(315, 360)]]
magnitude_intervals = [[range(0, 4)], [range(4, 10)], [range(10, 200)]]
hot_matrix = np.zeros((len(orientation_intervals), len(magnitude_intervals)))
step = 10
for i in xrange(0, len(xs) - step):
dx = float(xs[i + step]) - float(xs[i])
dy = float(ys[i + step]) - float(ys[i])
orientation = int(degrees(atan2(dy, dx)) % 360)
magn = int(np.sqrt((np.power(dx, 2) + np.power(dy, 2))))
# list_magn.append(magn)
for c_interval, o_interval in enumerate(orientation_intervals):
if orientation in o_interval[0]:
if magn in magnitude_intervals[0][0]:
hot_matrix[c_interval][0] += 1
break
elif magn in magnitude_intervals[1][0]:
hot_matrix[c_interval][1] += 1
break
elif magn in magnitude_intervals[2][0]:
hot_matrix[c_interval][2] += 1
break
##control whether the values are in the intervals
if hot_matrix.sum() == 0:
print 'orientation or magn not in the intervals'
print orientation, magn
return normalize(hot_matrix.reshape((1,-1)))
def subject_in_key_areas(head_x, head_y, head_z, key_areas):
boxes2D_pos = key_areas[0]
boxes3D_pos = key_areas[1]
in_key_areas = np.zeros((1,len(boxes2D_pos)))
for i_coord in range(0, len(head_x), int(len(head_x) / 15)):
for i_b,b in enumerate(boxes2D_pos):
if b.contains_point((int(head_x[i_coord]),int(head_y[i_coord]))):
if abs(head_z[i_b] - boxes3D_pos[i_b]) < 0.2:
#print i_b, head_z[i_b], boxes3D_pos[i_b]
in_key_areas[0][i_b] += 1
#print in_key_areas
return in_key_areas/20
def compute_raw_joint_stats(shoulder_left_x, shoulder_left_y, shoulder_right_x, shoulder_right_y, elbow_left_x,
elbow_left_y, elbow_right_x, elbow_right_y, wrist_left_x,wrist_left_y, wrist_right_x, wrist_right_y,head_x, head_y,spineBase_x,spineBase_y,foot_x,foot_y):
shoulder_left = np.hstack((np.array(shoulder_left_x).reshape((len(shoulder_left_x),1)),np.array(shoulder_left_y).reshape((len(shoulder_left_x),1))))
shoulder_right = np.hstack((np.array(shoulder_right_x).reshape((len(shoulder_right_x),1)),np.array(shoulder_right_y).reshape((len(shoulder_right_x),1))))
elbow_left = np.hstack((np.array(elbow_left_x).reshape((len(elbow_left_x), 1)), np.array(elbow_left_y).reshape((len(elbow_left_x), 1))))
elbow_right = np.hstack((np.array(elbow_right_x).reshape((len(elbow_right_x), 1)), np.array(elbow_right_y).reshape((len(elbow_right_x), 1))))
wrist_left = np.hstack((np.array(wrist_left_x).reshape((len(wrist_left_x), 1)), np.array(wrist_left_y).reshape((len(wrist_left_x), 1))))
wrist_right = np.hstack((np.array(wrist_right_x).reshape((len(wrist_right_x), 1)), np.array(wrist_right_y).reshape((len(wrist_right_x), 1))))
head = np.hstack((np.array(head_x).reshape((len(head_x), 1)), np.array(head_y).reshape((len(head_x), 1))))
spineBase = np.hstack((np.array(spineBase_x).reshape((len(spineBase_x), 1)), np.array(spineBase_y).reshape((len(spineBase_x), 1))))
## normalize distance according to height of the participant ##
foot = np.hstack((np.array(foot_x).reshape((len(foot_x), 1)), np.array(foot_y).reshape((len(foot_y), 1))))
h = np.max(euclidean_distances(head, foot))
#print 'p height: ', h
joints_raw_f = np.zeros((7,6))
for i_j,joint in enumerate([head,shoulder_left,shoulder_right,elbow_left,elbow_right,wrist_left,wrist_right]):
d = euclidean_distances(joint,spineBase)
joints_raw_f[i_j,0] = np.max(d)/h
joints_raw_f[i_j, 1] = np.min(d)/h
joints_raw_f[i_j, 2] = np.std(d)
## angles computed clockwise ##
orientation = map(lambda p: img_processing.angle_to(spineBase[p],joint[p]), xrange(len(joint)))
joints_raw_f[i_j, 3] = np.max(orientation)
joints_raw_f[i_j, 4] = np.min(orientation)
joints_raw_f[i_j, 5] = np.std(orientation)
return joints_raw_f.reshape((-1))
def extract_word_posture(participant_data,key_areas,scene, goal):
#scene = np.zeros((414, 512, 3), dtype=np.uint8)
#scene += 255
task_feature_img = []
path_features =[]
<<<<<<< HEAD
=======
>>>>>>> 9348384985d2847c272133ff77ce6181ca1fa082
for i_task, task in enumerate(participant_data):
print 'task: ', i_task
if len(task) == 0: continue
n_sec_data = []
n_sec_path_features = []
for n_slice in range(0, len(task)):
if len(task[n_slice]) <= 1 : continue
#print 'n_slice ', n_slice
flat_list = [item for item in task[n_slice]]
##### arms ########
shoulder_left_x, shoulder_left_y, zs, ids = img_processing.get_coordinate_points(flat_list, joint_id=10)
shoulder_right_x, shoulder_right_y, zs, ids = img_processing.get_coordinate_points(flat_list, joint_id=6)
elbow_left_x, elbow_left_y, zs, ids = img_processing.get_coordinate_points(flat_list, joint_id=11)
elbow_right_x, elbow_right_y, zs, ids = img_processing.get_coordinate_points(flat_list, joint_id=7)
wrist_left_x, wrist_left_y, zs, ids = img_processing.get_coordinate_points(flat_list, joint_id=12)
wrist_right_x, wrist_right_y, zs, ids = img_processing.get_coordinate_points(flat_list, joint_id=8)
#### spinal ###
head_x, head_y, head_z, ids = img_processing.get_coordinate_points(flat_list, joint_id=1)
spineBase_x,spineBase_y, z, ids = img_processing.get_coordinate_points(flat_list, joint_id=4)
##
foot_x, foot_y, footz, ids = img_processing.get_coordinate_points(flat_list, joint_id=17)
# get_dist_arms(shoulder_left_x, shoulder_left_y, shoulder_right_x, shoulder_right_y, elbow_left_x,
# elbow_left_y, elbow_right_x, elbow_right_y, wrist_left_x,wrist_left_y, wrist_right_x, wrist_right_y)
<<<<<<< HEAD
if len(shoulder_left_x)< 24:
=======
if len(shoulder_left_x)< 48:
>>>>>>> 9348384985d2847c272133ff77ce6181ca1fa082
#print len(shoulder_left_x)
continue
### AE features ###
feature_imgs = extract_arms_pos(shoulder_left_x, shoulder_left_y, shoulder_right_x, shoulder_right_y, elbow_left_x,
elbow_left_y, elbow_right_x, elbow_right_y, wrist_left_x,wrist_left_y, wrist_right_x, wrist_right_y,head_x, head_y,spineBase_x,spineBase_y, scene)
#
# ### Other features ###
# ## compute hot ##
hot = compute_hot_f(spineBase_x,spineBase_y)
## check whether the participant is in key areas ##
in_key_areas = subject_in_key_areas(spineBase_x,spineBase_y, z, key_areas)
n_sec_path_features.append([hot[0], in_key_areas[0]])
## angles and distance between joints
skeleton_body = compute_raw_joint_stats(shoulder_left_x, shoulder_left_y, shoulder_right_x, shoulder_right_y, elbow_left_x,
elbow_left_y, elbow_right_x, elbow_right_y, wrist_left_x,wrist_left_y, wrist_right_x, wrist_right_y,head_x, head_y,spineBase_x,spineBase_y,foot_x,foot_y)
if goal == 'train_AE':
### if we want to extract the data to train AE
for img in feature_imgs:
if len(task_feature_img)>0: task_feature_img= np.vstack((task_feature_img,img))
else: task_feature_img = img
elif goal == 'test_AE':
### if we want to test AE for denoising and descrtize the posture ##
for img in feature_imgs:
## decompose it with trained AE and concatanate
if len(n_sec_data)>0:
##shallow AE
#n_sec_data = np.hstack((n_sec_data, img_processing.sigmoid_function((np.dot(img, hd_weights)) + bias_1_level1)))
##deep AE
act = img_processing.sigmoid_function((np.dot(img, hd_weights)) + bias_1_level1)
n_sec_data = np.hstack(
(n_sec_data, img_processing.sigmoid_function((np.dot(act, AE_weights_level_1[0][1])) + AE_weights_level_1[2])))
else:
##shallow AE
#n_sec_data = img_processing.sigmoid_function((np.dot(img, hd_weights)) + bias_1_level1)
##deep AE
act = img_processing.sigmoid_function((np.dot(img, hd_weights)) + bias_1_level1)
n_sec_data =img_processing.sigmoid_function((np.dot(act, AE_weights_level_1[0][1])) + AE_weights_level_1[2])
## when we reach desired time
<<<<<<< HEAD
if n_sec_data.shape[0] > (AE_weights_level_1[0][1].shape[1]*7):#*15
n_sec_data = n_sec_data[:(AE_weights_level_1[0][1].shape[1]*8)]#*16
## PCA and then clustering with 5 seconds concatenated data
#task_feature_img.append(cluster_model.predict(pca.transform(n_sec_data.reshape(1, -1)))[0])
#task_feature_img.append(np.array(pca.transform(n_sec_data.reshape(1, -1))[0]).reshape((1,-1)))
## raw
task_feature_img.append(np.array(n_sec_data.reshape(1, -1)))
n_sec_data = []
=======
if n_sec_data.shape[0] > (AE_weights_level_1[0][1].shape[1]*15):
n_sec_data = n_sec_data[:(AE_weights_level_1[0][1].shape[1]*16)]
## PCA and then clustering with 5 seconds concatenated data
#task_feature_img.append(cluster_model.predict(pca.transform(n_sec_data.reshape(1, -1)))[0])
#task_feature_img.append(np.array(pca.transform(n_sec_data.reshape(1, -1))[0]).reshape((1,-1)))
## raw
task_feature_img.append(np.array(n_sec_data.reshape(1, -1)))
n_sec_data = []
>>>>>>> 9348384985d2847c272133ff77ce6181ca1fa082
## average of the other features
if len(n_sec_path_features)==0:
print 'not enough data '
path_features.append(np.zeros((72)))
else:
n_sec_path_features = np.mean(n_sec_path_features, axis=0)
temp = np.hstack((n_sec_path_features[0],n_sec_path_features[1]))
path_features.append(np.hstack((temp,skeleton_body)))
n_sec_path_features = []
else:
print 'not enough frames',n_sec_data.shape[0]
n_sec_data = []
n_sec_path_features = []
#print task_feature_img
#print Counter(task_feature_img)
#task_feature_img = np.concatenate(task_feature_img, axis=0)
#path_features = np.concatenate(path_features, axis=0)
<<<<<<< HEAD
if len(path_features)>1 and len(task_feature_img)>1:
task_feature_img = np.concatenate(task_feature_img, axis=0)
task_feature_img = np.hstack((path_features,task_feature_img ))
print task_feature_img.shape
else:task_feature_img = []
=======
task_feature_img = np.hstack((path_features,np.concatenate(task_feature_img, axis=0) ))
print task_feature_img.shape
>>>>>>> 9348384985d2847c272133ff77ce6181ca1fa082
return task_feature_img
def main_posture_extr():
skeleton_data_in_tasks_and_time_slices = data_organizer.load_matrix_pickle(
<<<<<<< HEAD
'C:/Users/dario.dotti/Documents/data_for_personality_exp/after_data_cleaning/skeleton_data_in_tasks_time_slices_30fps_ordered_1sec.txt')##'C:/Users/dario.dotti/Desktop/data_recordings_master/master_skeleton_data_in_tasks_time_slices_30fps_1sec.txt')
#scene = np.zeros((424,512,3),dtype=np.uint8)
#scene += 255
scene = cv2.imread('C:/Users/dario.dotti/Desktop/data_recordings_master/images/subject_22/519.jpg')#'C:/Users/dario.dotti/Documents/Datasets/my_dataset/wandering_dataset_um/exp_scene_depth.jpg')#
=======
'C:/Users/dario.dotti/Documents/data_for_personality_exp/after_data_cleaning/skeleton_data_in_tasks_time_slices_30fps_ordered.txt')##'C:/Users/dario.dotti/Desktop/data_recordings_master/master_skeleton_data_in_tasks_time_slices_30fps.txt')
scene = np.zeros((424,512,3),dtype=np.uint8)
scene += 255
#scene = cv2.imread('C:/Users/dario.dotti/Desktop/data_recordings_master/images/subject_22/519.jpg')#'C:/Users/dario.dotti/Documents/Datasets/my_dataset/wandering_dataset_um/exp_scene_depth.jpg')#
>>>>>>> 9348384985d2847c272133ff77ce6181ca1fa082
boxes, zs, scene = data_organizer.get_areas_boxes(scene)
participants_features = []
for i_p in xrange(0,len(skeleton_data_in_tasks_and_time_slices)):
task_feature_img = extract_word_posture(skeleton_data_in_tasks_and_time_slices[i_p], [boxes,zs],scene, goal='test_AE')
participants_features.append(task_feature_img)
## plot dist ##
#plot_dist()
data_organizer.save_matrix_pickle(participants_features,
<<<<<<< HEAD
'C:/Users/dario.dotti/Documents/data_for_personality_exp/after_data_cleaning/posture_data/upperBody/experiment_upperBody_pathPlanning/RAWpostureUpperBody_path_features_skeletonF_ALLTASKS_1sec.txt')##'C:/Users/dario.dotti/Desktop/data_recordings_master/data_personality/RAWpostureUpperBody_path_features_master_2sec_skeletonF_ALLTASKS_1sec.txt')
=======
'C:/Users/dario.dotti/Documents/data_for_personality_exp/after_data_cleaning/posture_data/upperBody/experiment_upperBody_pathPlanning/RAWpostureUpperBody_path_features_2sec_skeletonF.txt')##'C:/Users/dario.dotti/Desktop/data_recordings_master/data_personality/RAWpostureUpperBody_path_features_master_2sec_skeletonF.txt')
>>>>>>> 9348384985d2847c272133ff77ce6181ca1fa082
if __name__ == '__main__':
main_posture_extr()
|
{"/demo.py": ["/data_organizer.py", "/video_traj.py", "/ambient_sensors.py"]}
|
7,365
|
Zumbalamambo/behavior_understanding_indoor_dataset
|
refs/heads/master
|
/video_traj.py
|
import cv2
import numpy as np
from sklearn.preprocessing import normalize
from datetime import datetime, timedelta
from lxml import etree
import img_processing as my_img_proc
import visualization as vis
import ambient_sensors
import data_organizer as data_org
kinect_max_distance=0
subjectID = ''
#scene = np.zeros((414,512,3),dtype=np.uint8)
#scene += 255
<<<<<<< HEAD
scene = cv2.imread('C:/Users/dario.dotti/Desktop/data_recordings_master/images/subject_20/442.jpg')#'C:/Users/dario.dotti/Documents/Datasets/my_dataset/wandering_dataset_um/exp_scene_depth.jpg')
=======
scene = cv2.imread('C:/Users/dario.dotti/Desktop/data_recordings_master/images/subject_20/1144.jpg')#'C:/Users/dario.dotti/Documents/Datasets/my_dataset/wandering_dataset_um/exp_scene_depth.jpg')
>>>>>>> 9348384985d2847c272133ff77ce6181ca1fa082
def draw_joints_and_tracks(body_points,list_poly):
##draw slices
# for p in range(0,len(list_poly)):
# cv2.rectangle(scene,(int(list_poly[p].vertices[1][0]),int(list_poly[p].vertices[1][1])),\
# (int(list_poly[p].vertices[3][0]),int(list_poly[p].vertices[3][1])),0,1)
for n_frame,traj_body_joints in enumerate(body_points):
#n_frame = n_frame+1402
# if n_frame < 4870:
# continue
temp_ing = scene.copy()
#draw joints
print n_frame
if n_frame > 250:
color = (0,0,255)
else:
color = (0,0,255)
#draw line between joints
thickness = 3
line_color = (19,19,164)
#first position skipped cause there are other info stored
#torso
cv2.line(temp_ing,(int(float(traj_body_joints[1,0])),int(float(traj_body_joints[1,1]))),(int(float(traj_body_joints[2,0])),int(float(traj_body_joints[2,1]))),line_color,thickness)
cv2.line(temp_ing,(int(float(traj_body_joints[2,0])),int(float(traj_body_joints[2,1]))),(int(float(traj_body_joints[3,0])),int(float(traj_body_joints[3,1]))),line_color,thickness)
cv2.line(temp_ing,(int(float(traj_body_joints[3,0])),int(float(traj_body_joints[3,1]))),(int(float(traj_body_joints[4,0])),int(float(traj_body_joints[4,1]))),line_color,thickness)
#shoulder
cv2.line(temp_ing,(int(float(traj_body_joints[5,0])),int(float(traj_body_joints[5,1]))),(int(float(traj_body_joints[6,0])),int(float(traj_body_joints[6,1]))),line_color,thickness)
cv2.line(temp_ing,(int(float(traj_body_joints[5,0])),int(float(traj_body_joints[5,1]))),(int(float(traj_body_joints[10,0])),int(float(traj_body_joints[10,1]))),line_color,thickness)
#hips
cv2.line(temp_ing,(int(float(traj_body_joints[4,0])),int(float(traj_body_joints[4,1]))),(int(float(traj_body_joints[14,0])),int(float(traj_body_joints[14,1]))),line_color,thickness)
cv2.line(temp_ing,(int(float(traj_body_joints[4,0])),int(float(traj_body_joints[4,1]))),(int(float(traj_body_joints[18,0])),int(float(traj_body_joints[18,1]))),line_color,thickness)
#right arm
cv2.line(temp_ing,(int(float(traj_body_joints[6,0])),int(float(traj_body_joints[6,1]))),(int(float(traj_body_joints[7,0])),int(float(traj_body_joints[7,1]))),line_color,thickness)
cv2.line(temp_ing,(int(float(traj_body_joints[7,0])),int(float(traj_body_joints[7,1]))),(int(float(traj_body_joints[8,0])),int(float(traj_body_joints[8,1]))),line_color,thickness)
cv2.line(temp_ing,(int(float(traj_body_joints[8,0])),int(float(traj_body_joints[8,1]))),(int(float(traj_body_joints[9,0])),int(float(traj_body_joints[9,1]))),line_color,thickness)
#left arm
cv2.line(temp_ing,(int(float(traj_body_joints[10,0])),int(float(traj_body_joints[10,1]))),(int(float(traj_body_joints[11,0])),int(float(traj_body_joints[11,1]))),line_color,thickness)
cv2.line(temp_ing,(int(float(traj_body_joints[11,0])),int(float(traj_body_joints[11,1]))),(int(float(traj_body_joints[12,0])),int(float(traj_body_joints[12,1]))),line_color,thickness)
cv2.line(temp_ing,(int(float(traj_body_joints[12,0])),int(float(traj_body_joints[12,1]))),(int(float(traj_body_joints[13,0])),int(float(traj_body_joints[13,1]))),line_color,thickness)
#right leg
cv2.line(temp_ing,(int(float(traj_body_joints[14,0])),int(float(traj_body_joints[14,1]))),(int(float(traj_body_joints[15,0])),int(float(traj_body_joints[15,1]))),line_color,thickness)
cv2.line(temp_ing,(int(float(traj_body_joints[15,0])),int(float(traj_body_joints[15,1]))),(int(float(traj_body_joints[16,0])),int(float(traj_body_joints[16,1]))),line_color,thickness)
cv2.line(temp_ing,(int(float(traj_body_joints[16,0])),int(float(traj_body_joints[16,1]))),(int(float(traj_body_joints[17,0])),int(float(traj_body_joints[17,1]))),line_color,thickness)
#left leg
cv2.line(temp_ing,(int(float(traj_body_joints[18,0])),int(float(traj_body_joints[18,1]))),(int(float(traj_body_joints[19,0])),int(float(traj_body_joints[19,1]))),line_color,thickness)
cv2.line(temp_ing,(int(float(traj_body_joints[19,0])),int(float(traj_body_joints[19,1]))),(int(float(traj_body_joints[20,0])),int(float(traj_body_joints[20,1]))),line_color,thickness)
cv2.line(temp_ing,(int(float(traj_body_joints[20,0])),int(float(traj_body_joints[20,1]))),(int(float(traj_body_joints[21,0])),int(float(traj_body_joints[21,1]))),line_color,thickness)
if n_frame > 0:
for i,joint in enumerate(traj_body_joints):
if i ==0:
continue
cv2.circle(temp_ing,(int(float(joint[0])),int(float(joint[1]))),2,color,-1)
if i == 5 and n_frame>0:
##draw trajectories
cv2.circle(scene,(int(float(joint[0])),int(float(joint[1]))),2,color,-1)
else:
##draw joint
cv2.circle(temp_ing,(int(float(joint[0])),int(float(joint[1]))),2,color,-1)
if n_frame < 0:
cv2.imshow('lab',temp_ing)
cv2.waitKey(0)
else:
cv2.imshow('lab',temp_ing)
cv2.waitKey(0)
def xml_parser(path_to_file):
joint_points = []
for event, element in etree.iterparse(path_to_file, tag='tracksInfo'):
#print element.attrib['frameID']
frame_body_joints = np.zeros((22,3),dtype='S30')
#save in the first row the frame,time,trackID
frame_body_joints[0,0]= element.attrib['frameID']
frame_body_joints[0,1]= element.attrib['time']
frame_body_joints[0,2]= element.attrib['trackingID']
i=1
for child in element:
##store all the body joints
##TODO: find a better way to correct this error
if child.attrib['x'] == '-1.#INF' or child.attrib['y'] == '-1.#INF':
frame_body_joints[i,0]= 0.
frame_body_joints[i,1]= 0.
frame_body_joints[i,2]= 0.
else:
frame_body_joints[i,0]=child.attrib['x']
frame_body_joints[i,1]=child.attrib['y']
frame_body_joints[i,2]=child.attrib['z']
#store only one joint
# if child.tag == 'head':
# if child.attrib['x'] == '-1.#INF' and child.attrib['y'] == '-1.#INF':
# continue
# joint_points.append([child.attrib['x'],child.attrib['y'],child.attrib['z'],element.attrib['trackingID']])
i+=1
joint_points.append(frame_body_joints)
element.clear()
return joint_points
def org_xml_data_timeIntervals(skeleton_data):
#get all time data from the list
content_time = map(lambda line: line[0,1].split(' ')[3] ,skeleton_data)
#date time library
init_t = datetime.strptime(content_time[0],'%H:%M:%S')
end_t = datetime.strptime(content_time[len(content_time)-1],'%H:%M:%S')
x = datetime.strptime('0:0:0','%H:%M:%S')
tot_duration = (end_t-init_t)
#decide the size of time slices
# size_slice= tot_duration/25
# hours, remainder = divmod(size_slice.seconds, 3600)
# minutes, seconds = divmod(remainder, 60)
hours = 0
minutes = 0
seconds = 2
#print hours,minutes,seconds
my_time_slice = timedelta(hours=hours,minutes=minutes,seconds=seconds)
print 'time slice selected: ' + str(my_time_slice)
#initialize list
time_slices = []
time_slices_append = time_slices.append
#get data in every timeslices
while init_t < (end_t-my_time_slice):
list_time_interval = []
list_time_interval_append = list_time_interval.append
#print init_t
for t in xrange(len(content_time)):
if datetime.strptime(content_time[t],'%H:%M:%S')>= init_t and datetime.strptime(content_time[t],'%H:%M:%S') < init_t + my_time_slice:
list_time_interval_append(skeleton_data[t])
if datetime.strptime(content_time[t],'%H:%M:%S') > init_t + my_time_slice:
break
#print len(list_time_interval)
##save time interval without distinction of part of the day
time_slices_append(list_time_interval)
init_t= init_t+my_time_slice
return time_slices
def set_subject(subject):
global subjectID
subjectID = subject
def org_data_different_tasks(skeleton_data):
print subjectID
<<<<<<< HEAD
file_AS = 'C:/Users/dario.dotti/Documents/Datasets/my_dataset/wandering_dataset_um/binary/18-10-16_sensors_'+ subjectID +'.txt'#'C:/Users/dario.dotti/Desktop/data_recordings_master/binary/18-10-16_sensors_'+ subjectID +'.txt'
=======
file_AS = 'C:/Users/dario.dotti/Desktop/data_recordings_master/binary/18-10-16_sensors_'+ subjectID +'.txt'#'C:/Users/dario.dotti/Documents/Datasets/my_dataset/wandering_dataset_um/binary/18-10-16_sensors_'+ subjectID +'.txt'
>>>>>>> 9348384985d2847c272133ff77ce6181ca1fa082
sensors_ID = ambient_sensors.org_data_ID(file_AS)
entrance_door = sensors_ID['entrance']
entrance_time = []
for event_door in entrance_door:
e = event_door.split(' ')
if e[0][9:11] == 'ON':
t = e[2].split('-')
entrance_time.append(datetime.strptime((t[0]+':'+t[1]+':'+t[2]),'%H:%M:%S'))
#get all time data from the list
skeleton_time_info = map(lambda line: line[0,1].split(' ')[3] ,skeleton_data)
#initialize list
time_slices = []
time_slices_append = time_slices.append
for i in range(0,len(entrance_time),2):
temp_time_slice = []
# print entrance_time[i],entrance_time[i+1]
for s in range(0,len(skeleton_data)):
if datetime.strptime(skeleton_time_info[s],'%H:%M:%S') > entrance_time[i] \
and datetime.strptime(skeleton_time_info[s],'%H:%M:%S') < entrance_time[i+1]:
temp_time_slice.append(skeleton_data[s])
time_slices_append(temp_time_slice)
return time_slices
def org_data_timeIntervals_inside_tasks(skeleton_data_in_tasks):
data_task_and_time_slices = []
#decide the size of time slices
# size_slice= tot_duration/25
# hours, remainder = divmod(size_slice.seconds, 3600)
# minutes, seconds = divmod(remainder, 60)
hours = 0
minutes = 0
seconds = 1
#print hours,minutes,seconds
my_time_slice = timedelta(hours=hours,minutes=minutes,seconds=seconds)
print 'time slice selected: ' + str(my_time_slice)
for task in range(0,len(skeleton_data_in_tasks)):
#get all time data from the list
content_time = map(lambda line: line[0,1].split(' ')[3] ,skeleton_data_in_tasks[task])
#date time library
init_t = datetime.strptime(content_time[0],'%H:%M:%S')
end_t = datetime.strptime(content_time[len(content_time)-1],'%H:%M:%S')
x = datetime.strptime('0:0:0','%H:%M:%S')
tot_duration = (end_t-init_t)
#initialize list
time_slices = []
time_slices_append = time_slices.append
#get data in every timeslices
while init_t < (end_t-my_time_slice):
list_time_interval = []
list_time_interval_append = list_time_interval.append
#print init_t
for t in xrange(0,len(content_time)):
if datetime.strptime(content_time[t],'%H:%M:%S')>= init_t and datetime.strptime(content_time[t],'%H:%M:%S') < init_t + my_time_slice:
list_time_interval_append(skeleton_data_in_tasks[task][t])
if datetime.strptime(content_time[t],'%H:%M:%S') > init_t + my_time_slice:
break
#print len(list_time_interval)
##save time interval without distinction of part of the day
time_slices_append(list_time_interval)
init_t= init_t+my_time_slice
data_task_and_time_slices.append(time_slices)
return data_task_and_time_slices
# def get_coordinate_points(time_slice,joint_id):
#
# #get all the coordinate points of head joint
# list_points = []
# list_points_append = list_points.append
#
# #get x,y,z,id
# map(lambda line: list_points_append([line[joint_id][0],line[joint_id][1]]),time_slice)
# zs = map(lambda line: float(line[joint_id][2]),time_slice)
# ids =map(lambda line: np.int64(line[0][2]),time_slice)
#
# #apply filter to cancel noise
# x_f,y_f =my_img_proc.median_filter(list_points)
#
# return x_f,y_f,zs,ids
def occupancy_histograms_in_time_interval(my_room, list_poly, time_slices):
# #get number of patches
slice_col = my_img_proc.get_slice_cols()
slice_row = my_img_proc.get_slice_rows()
slice_depth = my_img_proc.get_slice_depth()
my_data_temp = []
my_data_temp_append = my_data_temp.append
for i in xrange(0,len(time_slices)):
## Checking the start time of every time slice
if(len(time_slices[i])>1):
print 'start time: %s' %time_slices[i][0][0][1].split(' ')[3]
else:
print 'no data in this time slice'
## counter for every id should be empty
track_points_counter = np.zeros((slice_col*slice_row*slice_depth))
##get x,y,z of every traj point after smoothing process
x_filtered,y_filtered,zs,ids = get_coordinate_points(time_slices[i],joint_id=1)
## display traj on img
#temp_img = copy.copy(my_room)
#my_img_proc.display_trajectories(temp_img, list_poly, x_filtered, y_filtered)
## count the occurances of filtered point x,y in every patches
for p in xrange(0,len(list_poly)):
for ci in xrange(0,len(x_filtered)):
## 2d polygon
if list_poly[p].contains_point((int(x_filtered[ci]),int(y_filtered[ci]))):
## 3d cube close to the camera
if zs[ci] < (kinect_max_distance/2):
track_points_counter[p*2] = track_points_counter[p*2] + 1
continue
else: ## 3d cube far from the camera
track_points_counter[(p*2)+1] = track_points_counter[(p*2)+1] + 1
continue
## save the data of every group in the final matrix
my_data_temp_append(track_points_counter)
## normalize the final matrix
normalized_finalMatrix = np.array(normalize(np.array(my_data_temp),norm='l2'))
print 'final matrix size:'
print normalized_finalMatrix.shape
return normalized_finalMatrix
def histograms_of_oriented_trajectories(list_poly,time_slices):
hot_all_data_task_time_slices = []
for i_task,task in enumerate(time_slices):
#if i_task != 2: continue
hot_all_data_matrix = []
hot_all_data_matrix_append = hot_all_data_matrix.append
print '###########task########### ',i_task
for i in xrange(0,len(task)):
##Checking the start time of every time slice
if(len(task[i])>1):
print 'start time: %s' %task[i][0][0][1].split(' ')[3]
else:
print 'no data in this time slice'
continue
#get x,y,z of every traj point after smoothing process
x_filtered,y_filtered,zs,ids = my_img_proc.get_coordinate_points(task[i],joint_id=1)#get all position of the head joint id =1
#initialize histogram of oriented tracklets
hot_matrix = []
temp_img = scene.copy()
for p in xrange(0,len(list_poly)):
tracklet_in_cube_f = []
tracklet_in_cube_c = []
tracklet_in_cube_middle = []
tracklet_in_cube_append_f = tracklet_in_cube_f.append
tracklet_in_cube_append_c = tracklet_in_cube_c.append
tracklet_in_cube_append_middle = tracklet_in_cube_middle.append
for ci in xrange(0,len(x_filtered)):
#2d polygon
if list_poly[p].contains_point((int(x_filtered[ci]),int(y_filtered[ci]))):
## 3d cube close to the camera
if zs[ci] < (kinect_max_distance-(1.433*2)):
#print 'close to kinect'
tracklet_in_cube_append_c([x_filtered[ci],y_filtered[ci],ids[ci]])
cv2.circle(temp_img,(int(x_filtered[ci]),int(y_filtered[ci])),2,(255,0,0),-1)
elif zs[ci] > (kinect_max_distance-(1.433*2)) and zs[ci] < (kinect_max_distance-1.433):
#print 'middle'
tracklet_in_cube_append_middle([x_filtered[ci],y_filtered[ci],ids[ci]])
cv2.circle(temp_img,(int(x_filtered[ci]),int(y_filtered[ci])),2,(0,255,0),-1)
elif zs[ci] > (kinect_max_distance-1.433): ##3d cube far from the camera
#print 'faraway to kinect'
tracklet_in_cube_append_f([x_filtered[ci],y_filtered[ci],ids[ci]])
cv2.circle(temp_img,(int(x_filtered[ci]),int(y_filtered[ci])),2,(0,0,255),-1)
for three_d_poly in [tracklet_in_cube_c,tracklet_in_cube_middle,tracklet_in_cube_f]:
if len(three_d_poly)>0:
## for tracklet in cuboids compute HOT following paper
hot_single_poly = my_img_proc.histogram_oriented_tracklets(three_d_poly)
## compute hot+curvature
#hot_single_poly = my_img_proc.histogram_oriented_tracklets_plus_curvature(three_d_poly)
else:
hot_single_poly = np.zeros((24))
##add to general matrix
if len(hot_matrix)>0:
hot_matrix = np.hstack((hot_matrix,hot_single_poly))
else:
hot_matrix = hot_single_poly
#time = time_slices[i][0][0][1].split(' ')[3].split(':')
#filename = 'C:/Users/dario.dotti/Documents/time_windows_HOT/'+subjectID+'_'+time[0]+'_'+time[1]+'_'+time[2]+'.jpg'
#cv2.imwrite(filename,temp_img)
##Test cluster
# load cluster data
# cluster_model = data_org.load_matrix_pickle(
# 'C:/Users/dario.dotti/Documents/bow_experiment_data/cl_30_kmeans_model_2secWindow_newVersion.txt')
# keys_labels = data_org.load_matrix_pickle(
# 'C:/Users/dario.dotti/Documents/bow_experiment_data/cluster_30_kmeans_word_newVersion.txt')
#
# similar_word = cluster_model.predict(np.array(hot_matrix).reshape(1, -1))
# print 's_w ',similar_word
# if similar_word[0] == 3:
# cv2.imshow('ciao',temp_img)
# cv2.waitKey(0)
# continue
hot_all_data_matrix_append(hot_matrix)
## normalize the final matrix
normalized_finalMatrix = np.array(normalize(np.array(hot_all_data_matrix),norm='l1'))
hot_all_data_task_time_slices.append(normalized_finalMatrix)
#print 'final matrix size:'
#print np.array(normalized_finalMatrix).shape
##add extra bin with hours
# hs = np.zeros((len(time_slices),1))
#
# for i,t in enumerate(time_slices):
#
# if len(t) > 1:
# hs[i] = int(t[0][0][1].split(' ')[3].split(':')[0])
# else:
# hs[i] = hs[i-1]
#
#
# normalized_finalMatrix = np.hstack((normalized_finalMatrix,hs))
# print 'matrix with extra bin'
# print np.array(hot_all_data_matrix).shape
return hot_all_data_task_time_slices
def measure_joints_accuracy(skeleton_data):
frame_step = 5
mean_displcement_list = np.zeros((len(skeleton_data[0])-1,1))
joint_distances = []
joint_distances_append = joint_distances.append
# for joint_id in xrange(1,len(skeleton_data[0])):
#
# #euclidean distance between joint time[0] and joint time[framestep]
# eu_difference = map(lambda i: np.sqrt((int(float(skeleton_data[i+frame_step][joint_id,0]))- int(float(skeleton_data[i][joint_id,0])))**2 + \
# (int(float(skeleton_data[i+frame_step][joint_id,1])) - int(float(skeleton_data[i][joint_id,1])))**2) \
# if skeleton_data[i][joint_id,0] != 0. or skeleton_data[i+1][joint_id,0] != 0. else 0 \
# ,xrange(0,len(skeleton_data)-frame_step))
#
# mean_displcement_list[joint_id-1] = np.sum(eu_difference)/len(eu_difference)
#
# joint_distances_append(eu_difference)
#print mean_displcement_list
##############
#subject7_exit_entrance = [19676 ,16250, 1943]
subject4_exit_entrance = [3867,6053,9053,11898,17584,25777]
##not optimized code but more understadable
for joint_id in xrange(1,len(skeleton_data[0])):
eu_difference = np.zeros((len(skeleton_data),1))
for i in xrange(0,len(skeleton_data)-frame_step):
if skeleton_data[i][joint_id,0] == 0. or skeleton_data[i+1][joint_id,0] == 0.:
continue
if i in subject4_exit_entrance:
continue
#euclidean distance between joint time[0] and joint time[framestep]
eu_difference[i] = np.sqrt((int(float(skeleton_data[i+frame_step][joint_id,0]))- int(float(skeleton_data[i][joint_id,0])))**2 + \
(int(float(skeleton_data[i+frame_step][joint_id,1])) - int(float(skeleton_data[i][joint_id,1])))**2)
joint_distances_append(eu_difference)
mean_displcement_list[joint_id-1] = np.sum(eu_difference)/len(eu_difference)
###############
##get filtered points
joint_distances_filtered = []
joint_distances_filtered_append = joint_distances_filtered.append
for joint_id in xrange(1,len(skeleton_data[0])):
##store x,y for 1 joint each time over all frames
list_points = []
list_points_append = list_points.append
for i in xrange(0,len(skeleton_data)):
#if skeleton_data[i][joint_id,0] == 0.:
#continue
#if i in subject7_exit_entrance:
#continue
list_points_append((int(float(skeleton_data[i][joint_id,0])),int(float(skeleton_data[i][joint_id,1]))))
##apply filter
x_f,y_f =my_img_proc.median_filter(list_points)
eu_difference_filtered = np.zeros((len(skeleton_data),1))
for i in xrange(0,len(x_f)-1):
#if x_f[i+1] == 0. or x_f[i] == 0.:
#continue
if i in subject4_exit_entrance:
continue
eu_difference_filtered[i] = np.sqrt((x_f[i+1]-x_f[i])**2 + (y_f[i+1]-y_f[i])**2)
joint_distances_filtered_append(eu_difference_filtered)
# print mean_displcement_list
##get only the desired joint
my_joint_raw = map(lambda x: x,joint_distances[:1][0])
my_joint_filtered=map(lambda x: x,joint_distances_filtered[:1][0])
#difference between raw and filtered features
diff = map(lambda pair: pair[0]-pair[1] , zip(my_joint_raw,my_joint_filtered))
#get frames where joint displacement over threshold
threshold = 15
frames_where_joint_displacement_over_threshold = []
map(lambda (i,d): frames_where_joint_displacement_over_threshold.append(i) if d>threshold else False , enumerate(diff))
print len(frames_where_joint_displacement_over_threshold)
##display mean distance of every joints between frames
vis.plot_mean_joints_displacement(mean_displcement_list)
##display error each frame from selected joint
vis.plot_single_joint_displacement_vs_filtered_points(my_joint_raw,my_joint_filtered)
return frames_where_joint_displacement_over_threshold
def feature_extraction_video_traj(file_traj):
##divide image into patches(polygons) and get the positions of each one
<<<<<<< HEAD
global scene
=======
#global scene
>>>>>>> 9348384985d2847c272133ff77ce6181ca1fa082
#scene = np.zeros((414,512),dtype=np.uint8)
#scene = cv2.imread('C:/Users/dario.dotti/Documents/Datasets/my_dataset/wandering_dataset_um/subject4_1834.jpg')
#scene = cv2.imread('D:/experiment_data/subject_20/388.jpg')
list_poly = my_img_proc.divide_image(scene)
##check patches are correct
# for rect in list_poly:
# cv2.rectangle(scene, (int(rect.vertices[1][0]), int(rect.vertices[1][1])),
# (int(rect.vertices[3][0]), int(rect.vertices[3][1])), (0, 0, 0))
# #
# cv2.imshow('ciao',scene)
# cv2.waitKey(0)
##--------------Pre-Processing----------------##
skeleton_data = xml_parser(file_traj)
##reliability method
#measure_joints_accuracy(skeleton_data)
##display joints
draw_joints_and_tracks(skeleton_data,list_poly)
##divide the data based on time info
#skeleton_data_in_time_slices = org_xml_data_timeIntervals(skeleton_data)
##update depth values
depth_v = []
map(lambda x: depth_v.append(float(x[1,2])) ,skeleton_data)
global kinect_max_distance
kinect_max_distance = np.max(depth_v)
print kinect_max_distance
##divide the data based on task
skeleton_data_in_time_slices = org_data_different_tasks(skeleton_data)
##divide data based time info per task
skeleton_data_in_tasks_and_time_slices = org_data_timeIntervals_inside_tasks(skeleton_data_in_time_slices)
occupancy_histograms =1
return occupancy_histograms,skeleton_data_in_tasks_and_time_slices
##--------------Feature Extraction-------------##
print 'feature extraction'
## count traj points in each region and create hist
#occupancy_histograms = occupancy_histograms_in_time_interval(my_room, list_poly, skeleton_data_in_time_slices)
occupancy_histograms = 1
## create Histograms of Oriented Tracks
HOT_data = histograms_of_oriented_trajectories(list_poly,skeleton_data_in_tasks_and_time_slices)
#vis.bar_plot_motion_over_time(HOT_data)
return [occupancy_histograms,HOT_data]
#cluster_prediction = my_exp.main_experiments(HOT_data)
|
{"/demo.py": ["/data_organizer.py", "/video_traj.py", "/ambient_sensors.py"]}
|
7,366
|
Zumbalamambo/behavior_understanding_indoor_dataset
|
refs/heads/master
|
/AE_rec.py
|
import numpy as np
import cv2
import matplotlib.pyplot as plt
from datetime import datetime
import operator
import os
from sklearn.cluster import KMeans,SpectralClustering,MeanShift,AgglomerativeClustering
from collections import Counter
from scipy.spatial.distance import cdist, pdist
from sklearn import decomposition
from scipy.ndimage.filters import gaussian_filter
from sklearn import svm
import random
from mpl_toolkits.mplot3d import Axes3D
from itertools import cycle, islice
import data_organizer
import hierarchical_ae_learning_methods as hs
import img_processing
def sigmoid_function(x):
return 1 / (1 + np.exp(-x))
def AE_showWeights_level2(AE_weights_level_2, AE_weights_level_1):
hd_weights_level1 = AE_weights_level_1[0][0]
bias_1_level1 = AE_weights_level_1[1]
bias_2_level1 = AE_weights_level_1[2]
hd_weights_level2 = AE_weights_level_2[0][0]
bias_1_level2 = AE_weights_level_2[1]
bias_2_level2 = AE_weights_level_2[2]
##prepare weights from layer 2
imgs = np.eye(hd_weights_level2.shape[0])
imgs = np.dot(hd_weights_level2.T, imgs)
print imgs.shape
size_main_img = 56
n_img = int(np.sqrt(imgs.shape[0]))
main_img = np.zeros(((size_main_img + 1) * n_img - 1, (size_main_img + 1) * n_img - 1))
for i,w in enumerate(imgs) :
r_main_img, c_main_img = divmod(i, n_img)
n_subimg = 9
size_subimg = 18
sub_img = np.zeros(((size_subimg + 1) * n_subimg/3 - 1, (size_subimg + 1) * n_subimg/3 - 1))
for r_c_counter,n_w in enumerate(xrange(0,len(w),81)):
sub_weight = w[n_w:(n_w+81)]
## reconstruction weights level 1
rec = np.dot(sub_weight, hd_weights_level1.T)
## define the row and cols where to put the sub_imgs
r, c = divmod(r_c_counter, 3)
weight_img = rec.reshape((size_subimg, size_subimg))
##sharpening edges
# blurred_f = gaussian_filter(weight_img, sigma=1)
#
# filter_blurred_f = gaussian_filter(blurred_f, sigma=0.5)
# alpha = 30
# weight_img = blurred_f + alpha * (blurred_f - filter_blurred_f)
##add the subimg to the windows
sub_img[r * (size_subimg + 1):(r + 1) * (size_subimg + 1) - 1,
c * (size_subimg + 1):(c + 1) * (size_subimg + 1) - 1] = weight_img
sub_img -= sub_img.min()
sub_img /= sub_img.max()
##add the subimgs to the main window
main_img[r_main_img * (size_main_img + 1):(r_main_img + 1) * (size_main_img + 1) - 1,
c_main_img * (size_main_img + 1):(c_main_img + 1) * (size_main_img + 1) - 1] = sub_img
plt.imshow(main_img.squeeze(), cmap=plt.cm.gray)
plt.show()
def AE_showWeights_level2_temporalExperiment(AE_weights_level_2, AE_weights_level_1):
hd_weights_level1 = AE_weights_level_1[0][0]
bias_1_level1 = AE_weights_level_1[1]
bias_2_level1 = AE_weights_level_1[2]
hd_weights_level2 = AE_weights_level_2[0][0]
bias_1_level2 = AE_weights_level_2[1]
bias_2_level2 = AE_weights_level_2[2]
##prepare weights from layer 2
imgs = np.eye(hd_weights_level2.shape[0])
imgs = np.dot(hd_weights_level2.T, imgs)
print imgs.shape
size_main_img = 62
n_img = int(np.sqrt(imgs.shape[0]))
main_img = np.zeros(((size_main_img + 1) * n_img - 1, (size_main_img + 1) * n_img - 1))
for i,w in enumerate(imgs) :
r_main_img, c_main_img = divmod(i, n_img)
n_subimg = 9
size_subimg = 20
sub_img = np.zeros(((size_subimg + 1) * n_subimg/3 - 1, (size_subimg + 1) * n_subimg/3 - 1))
for r_c_counter,n_w in enumerate(xrange(0,len(w),144)):
sub_weight = w[n_w:(n_w+144)]
## reconstruction weights level 1
rec = np.dot(sub_weight, hd_weights_level1.T)
## define the row and cols where to put the sub_imgs
r, c = divmod(r_c_counter, 3)
weight_img = rec.reshape((size_subimg, size_subimg))
##sharpening edges
# blurred_f = gaussian_filter(weight_img, sigma=1)
#
# filter_blurred_f = gaussian_filter(blurred_f, sigma=0.5)
# alpha = 30
# weight_img = blurred_f + alpha * (blurred_f - filter_blurred_f)
##add the subimg to the windows
sub_img[r * (size_subimg + 1):(r + 1) * (size_subimg + 1) - 1,
c * (size_subimg + 1):(c + 1) * (size_subimg + 1) - 1] = weight_img
sub_img -= sub_img.min()
sub_img /= sub_img.max()
##add the subimgs to the main window
main_img[r_main_img * (size_main_img + 1):(r_main_img + 1) * (size_main_img + 1) - 1,
c_main_img * (size_main_img + 1):(c_main_img + 1) * (size_main_img + 1) - 1] = sub_img
plt.imshow(main_img.squeeze(), cmap=plt.cm.gray)
plt.show()
def reconstruction_AE_weights_level2(raw_features,original_points_grid_level_2,AE_weights_level_1,AE_weights_level_2):
hd_weights_level1 = AE_weights_level_1[0][0]
bias_1_level1 = AE_weights_level_1[1]
bias_2_level1 = AE_weights_level_1[2]
hd_weights_level2 = AE_weights_level_2[0][0]
bias_1_level2 = AE_weights_level_2[1]
bias_2_level2 = AE_weights_level_2[2]
##reconstruct layer 2
hd2_space = np.dot(raw_features, hd_weights_level2)
activations_l2 = sigmoid_function(hd2_space + bias_1_level2)
#activations_l2 = relu_function(hd2_space+bias_1_level2)
rec_space_l2 = np.dot(activations_l2, hd_weights_level2.T)
rec_level2 = sigmoid_function(rec_space_l2 + bias_2_level2)
size_w1 = hd_weights_level1.shape[1]
size_image = 18 * 18
for i, sample_level2 in enumerate(rec_level2):
fig = plt.figure()
n_subimg = 9
size_subimg = 18
##reconstructed sample
a = fig.add_subplot(1, 2, 2)
a.set_title('reconstruction')
sub_rec_img = np.zeros(((size_subimg + 1) * (n_subimg / 3) - 1, (size_subimg + 1) * n_subimg / 3 - 1))
sub_samples = sample_level2.reshape((9,size_w1))
for r_c_counter,sample in enumerate(sub_samples):
rec_space_l1 = np.dot(sample, hd_weights_level1.T)
rec_level1 = sigmoid_function(rec_space_l1 + bias_2_level1).reshape((size_subimg, size_subimg))
#plt.imshow(rec_level1, cmap=plt.cm.gray)
#plt.show()
## print mean pixel value
# a = np.where(rec_level1 > 0.1)
# mean_rec = 0
# for i_c in range(0, len(a[0])):
# mean_rec = mean_rec + rec_level1[a[0][i_c], a[1][i_c]]
# print mean_rec / len(a[0])
###
## define the row and cols where to put the sub_imgs
r, c = divmod(r_c_counter, 3)
##add the subimg to the windows
sub_rec_img[r * (size_subimg + 1):(r + 1) * (size_subimg + 1) - 1,
c * (size_subimg + 1):(c + 1) * (size_subimg + 1) - 1] = rec_level1
imgplot =plt.imshow(sub_rec_img.squeeze(), cmap=plt.cm.gray)
#cv2.imshow('ciao',sub_rec_img)
#cv2.waitKey()
##original sample
a = fig.add_subplot(1, 2, 1)
a.set_title('original')
orig_img = np.zeros(((size_subimg + 1) * n_subimg / 3 - 1, (size_subimg + 1) * n_subimg / 3 - 1))
for r_c_counter,n_orig in enumerate(xrange(0,len(original_points_grid_level_2[i]),size_image)):
sub_orig_img = original_points_grid_level_2[i][n_orig:(n_orig+size_image)].reshape((size_subimg, size_subimg))
## define the row and cols where to put the sub_imgs
r, c = divmod(r_c_counter, 3)
##add the subimg to the windows
orig_img[r * (size_subimg + 1):(r + 1) * (size_subimg + 1) - 1,
c * (size_subimg + 1):(c + 1) * (size_subimg + 1) - 1] = sub_orig_img
#filename = 'C:/Users/dario.dotti/Documents/data_for_vocabulary/camera017/traj_pixel_activation/bayesian_net/gt_images' + '/' + str(i) + '.jpg'
#plt.imsave(filename, orig_img.squeeze(), cmap=plt.cm.gray)
imgplot =plt.imshow(orig_img.squeeze(),cmap=plt.cm.gray )
plt.show()
def reconstruction_AE_weights_level2_temporal(raw_features,original_points_grid_level_2,AE_weights_level_1,AE_weights_level_2):
hd_weights_level1 = AE_weights_level_1[0][0]
bias_1_level1 = AE_weights_level_1[1]
bias_2_level1 = AE_weights_level_1[2]
hd_weights_level2 = AE_weights_level_2[0][0]
bias_1_level2 = AE_weights_level_2[1]
bias_2_level2 = AE_weights_level_2[2]
##reconstruct layer 2
hd2_space = np.dot(raw_features, hd_weights_level2)
activations_l2 = sigmoid_function(hd2_space + bias_1_level2)
# activations_l2 = relu_function(hd2_space+bias_1_level2)
rec_space_l2 = np.dot(activations_l2, hd_weights_level2.T)
rec_level2 = sigmoid_function(rec_space_l2 + bias_2_level2)
size_w1 = hd_weights_level1.shape[1]
size_image = 20 * 20
rec_mse_local = []
map(lambda (i, row): rec_mse_local.append(np.sum(np.power(row - raw_features[i], 2))), enumerate(rec_level2))
print 'all data mse: ', np.sum(rec_mse_local) / len(rec_level2)
for i, sample_level2 in enumerate(rec_level2):
fig = plt.figure()
n_subimg = 9
size_subimg = 20
##reconstructed sample
a = fig.add_subplot(1, 2, 2)
a.set_title('reconstruction')
sub_rec_img = np.zeros(((size_subimg + 1) * (n_subimg / 3) - 1, (size_subimg + 1) * n_subimg / 3 - 1))
sub_samples = sample_level2.reshape((3, size_w1))
for r_c_counter, sample in enumerate(sub_samples):
rec_space_l1 = np.dot(sample, hd_weights_level1.T)
rec_level1 = sigmoid_function(rec_space_l1 + bias_2_level1).reshape((size_subimg, size_subimg))
# plt.imshow(rec_level1, cmap=plt.cm.gray)
# plt.show()
## print mean pixel value
# a = np.where(rec_level1 > 0.1)
# mean_rec = 0
# for i_c in range(0, len(a[0])):
# mean_rec = mean_rec + rec_level1[a[0][i_c], a[1][i_c]]
# print mean_rec / len(a[0])
###
## define the row and cols where to put the sub_imgs
r, c = divmod(r_c_counter, 3)
##add the subimg to the windows
sub_rec_img[r * (size_subimg + 1):(r + 1) * (size_subimg + 1) - 1,
c * (size_subimg + 1):(c + 1) * (size_subimg + 1) - 1] = rec_level1
imgplot = plt.imshow(sub_rec_img.squeeze(), cmap=plt.cm.gray)
# cv2.imshow('ciao',sub_rec_img)
# cv2.waitKey()
##original sample
a = fig.add_subplot(1, 2, 1)
a.set_title('original')
orig_img = np.zeros(((size_subimg + 1) * n_subimg / 3 - 1, (size_subimg + 1) * n_subimg / 3 - 1))
for r_c_counter, n_orig in enumerate(xrange(0, len(original_points_grid_level_2[i]), size_image)):
sub_orig_img = original_points_grid_level_2[i][n_orig:(n_orig + size_image)].reshape(
(size_subimg, size_subimg))
## define the row and cols where to put the sub_imgs
r, c = divmod(r_c_counter, 3)
##add the subimg to the windows
orig_img[r * (size_subimg + 1):(r + 1) * (size_subimg + 1) - 1,
c * (size_subimg + 1):(c + 1) * (size_subimg + 1) - 1] = sub_orig_img
# filename = 'C:/Users/dario.dotti/Documents/data_for_vocabulary/camera017/traj_pixel_activation/bayesian_net/gt_images' + '/' + str(i) + '.jpg'
# plt.imsave(filename, orig_img.squeeze(), cmap=plt.cm.gray)
imgplot = plt.imshow(orig_img.squeeze(), cmap=plt.cm.gray)
plt.show()
def plot_images_l1(imgs, loc, title=None, channels=1):
'''Plot an array of images.
We assume that we are given a matrix of data whose shape is (n*n, s*s*c) --
that is, there are n^2 images along the first axis of the array, and each
image is c squares measuring s pixels on a side. Each row of the input will
be plotted as a sub-region within a single image array containing an n x n
grid of images.
'''
n = int(np.sqrt(len(imgs)))
assert n * n == len(imgs), 'images array must contain a square number of rows!'
s = int(np.sqrt(len(imgs[0]) / channels))
assert s * s == len(imgs[0]) / channels, 'images must be square!'
img = np.zeros(((s+1) * n - 1, (s+1) * n - 1, channels), dtype=imgs[0].dtype)
for i, pix in enumerate(imgs):
r, c = divmod(i, n)
weight_img = pix.reshape((s, s, 1))
##sharpening edges
blurred_f = gaussian_filter(weight_img, sigma=1)
filter_blurred_f = gaussian_filter(blurred_f, sigma=0.5)
alpha = 30
weight_img = blurred_f + alpha * (blurred_f - filter_blurred_f)
img[r * (s+1):(r+1) * (s+1) - 1,
c * (s+1):(c+1) * (s+1) - 1] = weight_img
img -= img.min()
img /= img.max()
ax = plt.gcf().add_subplot(loc)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
ax.set_frame_on(False)
ax.imshow(img.squeeze(), cmap=plt.cm.gray)
if title:
ax.set_title(title)
def plot_layers_l1(weights, tied_weights, channels=1):
'''Create a plot of weights, visualized as "bottom-level" pixel arrays.'''
#if hasattr(weights[0], 'get_value'):
#weights = [w.get_value() for w in weights]
k = min(len(weights), 9)
imgs = np.eye(weights[0].shape[0])
for i, weight in enumerate(weights[:-1]):
imgs = np.dot(weight.T, imgs)
plot_images_l1(imgs,
100 + 10 * k + i + 1,
channels=channels,
title='Layer {}'.format(i+1))
weight = weights[-1]
n = weight.shape[1] / channels
if int(np.sqrt(n)) ** 2 != n:
return
if tied_weights:
imgs = np.dot(weight.T, imgs)
plot_images_l1(imgs,
100 + 10 * k + k,
channels=channels,
title='Layer {}'.format(k))
else:
plot_images_l1(weight,
100 + 10 * k + k,
channels=channels,
title='Decoding weights')
def AE_reconstruction_level1(raw_features,AE_weights_level_1):
hd_weights = AE_weights_level_1[0][0]
bias_1 = AE_weights_level_1[1]
bias_2 = AE_weights_level_1[2]
##compute AE reconstruction
#hd1_space = np.dot(raw_features, hd_weights)
activations = sigmoid_function((np.dot(raw_features, hd_weights))+ bias_1)
#activations = relu_function(hd1_space+ bias_1)
#rec_space = np.dot(activations, hd_weights.T)
rec = sigmoid_function((np.dot(activations, hd_weights.T)) + bias_2)
#rec = relu_function(rec_space + bias_2)
img_size = 120
##calculate rec error over the data
rec_mse_local = map(lambda (i, row): np.sum(np.power(np.subtract(row, raw_features[i]),2)),enumerate(rec))
print 'all data mse: ', np.sum(rec_mse_local) / len(rec)
#print count
##visualize original vs reconstruction image
for i, row in enumerate(rec):
#if np.sum(np.power(row-raw_features[i],2)) < 3: continue
fig = plt.figure()
#temp_orig = raw_features[i].reshape((img_size,img_size))
##print mean pixel value
# a = np.where(temp_orig > 0.001)
# mean_orig = 0
# for i_c in range(0,len(a[0])):
# mean_orig = mean_orig + temp_orig[a[0][i_c], a[1][i_c]]
#print mean_orig/len(a[0])
####
img_orig = raw_features[i].reshape((img_size, img_size, 1))
a = fig.add_subplot(1, 2, 1)
a.set_title('original')
imgplot = plt.imshow(img_orig.squeeze(), cmap=plt.cm.gray)
# temp = row.reshape((img_size, img_size))
# ## print mean pixel value
# a = np.where(temp > 0.1)
# mean_rec = 0
# for i_c in range(0, len(a[0])):
# mean_rec = mean_rec+ temp[a[0][i_c], a[1][i_c]]
#print mean_rec/len(a[0])
###
img_rec = row.reshape((img_size, img_size, 1))
a = fig.add_subplot(1, 2, 2)
a.set_title('reconstruction')
imgplot = plt.imshow(img_rec.squeeze(), cmap=plt.cm.gray)
plt.show()
def hid_unit_activation_allLayers(raw_features, AE_weights):
hd_weights = AE_weights[0][0]
bias_1_level1 = AE_weights[1]
#hd2_space = np.dot(raw_features, hd_weights)
activations_l2 = sigmoid_function((np.dot(raw_features, hd_weights)) + bias_1_level1)
matrix_activations_all_data = []
counter = 0
for i in np.arange(len(activations_l2)):
if counter % 10000 == 0: print counter, datetime.now().time()
counter += 1
hist_activation = np.zeros((1, hd_weights.shape[1]))
##speed up version of the for loop using operator.set_item
map(lambda (i_v, v): operator.setitem(hist_activation, (0, i_v), v), enumerate(activations_l2[i]))
if len(matrix_activations_all_data) > 0:
matrix_activations_all_data = np.vstack((matrix_activations_all_data, hist_activation[0]))
else:
matrix_activations_all_data = hist_activation
print matrix_activations_all_data.shape
return matrix_activations_all_data
def check_rec_in_clusters(matrix_activations_data_l1,pred,AE_weight_layer1, kmean_centers, matrix_act_transf,save_img):
newpath = 'C:/Users/dario.dotti/Documents/data_for_personality_exp/after_2_data_recording/posture/posture_40clusters/'
for cluster_class in range(40):
print cluster_class
##get activation for the current class
index_samples = np.where(pred==cluster_class)[0][:5]
activation_current_class = matrix_activations_data_l1[index_samples]
#activation_current_class_t = matrix_act_transf[index_samples]
######take the n closest samples #######
##compute the distance between activantion of current cluster and its cluster center
d = cdist(activation_current_class, kmean_centers[cluster_class].reshape((1,-1)))
closest_samples = np.sort(d, axis=0)[:5]
index_min = [np.where(d == c_s) for c_s in closest_samples]
activation_closest_samples = [activation_current_class[i_m[0][0]] for i_m in index_min]
#######
mean_rec = np.zeros((120,120))
for i_sample,sample_current_class in enumerate(activation_closest_samples):
if np.sum(sample_current_class)==0: continue
sample_current_class = sample_current_class.reshape((50,AE_weight_layer1[0][1].shape[1]))
for i_s_c in xrange(0,len(sample_current_class),5):
#rec_space = np.dot(sample_current_class[i_s_c], AE_weight_layer1[0][0].T)
#rec = sigmoid_function(rec_space + AE_weight_layer1[2]).reshape((120,120))
##deep AE
rec_deep = sigmoid_function((np.dot(sample_current_class[i_s_c], AE_weight_layer1[0][1].T)) + AE_weight_layer1[3])
rec = sigmoid_function((np.dot(rec_deep, AE_weight_layer1[0][0].T)) + AE_weight_layer1[4]).reshape((120,120))
#mean_rec += rec
##Show
# imgplot = plt.imshow(rec.squeeze(), cmap=plt.cm.gray)
# plt.show()
##Save
if save_img:
if not os.path.exists(newpath):
os.makedirs(newpath)
filename = newpath + str(cluster_class) + '_' +str(i_sample)+ '_' + str(i_s_c)+ '.jpg'
# mean_imgs_cluster = cv2.resize(mean_imgs_cluster,(54,54),interpolation= cv2.INTER_LINEAR)
plt.imsave(filename, rec.squeeze(), cmap=plt.cm.gray)
def save_n_layer2_example_per_clusters(matrix_activations_data_l2,pred,AE_weights_level_1, AE_weights_level_2,kmean_centers, save_img):
newpath = 'C:/Users/dario.dotti/Documents/data_for_personality_exp/after_data_cleaning/10_cluster_layer2_new/'
hd_weights_level1 = AE_weights_level_1[0][0]
bias_1_level1 = AE_weights_level_1[1]
bias_2_level1 = AE_weights_level_1[2]
hd_weights_level2 = AE_weights_level_2[0][0]
bias_1_level2 = AE_weights_level_2[1]
bias_2_level2 = AE_weights_level_2[2]
for cluster_class in range(10):
print 'cluster: ', cluster_class
##get activation for the current class
index_samples = np.where(pred==cluster_class)
activation_current_class = matrix_activations_data_l2[index_samples]
######take the n closest samples #######
##compute the distance between activantion of current cluster and its cluster center
d = cdist(activation_current_class, kmean_centers[cluster_class].reshape(1, 169))
closest_samples = np.sort(d, axis=0)[:5]
index_min = [np.where(d == c_s) for c_s in closest_samples]
activation_closest_samples = [activation_current_class[i_m[0][0]] for i_m in index_min]
#######
rec_space_l2 = np.dot(activation_closest_samples, hd_weights_level2.T)
rec_level2 = sigmoid_function(rec_space_l2 + bias_2_level2)
size_w1 = hd_weights_level1.shape[1]
size_image = 20 * 20
for i, sample_level2 in enumerate(rec_level2):
#fig = plt.figure()
n_subimg = 9
size_subimg = 20
##reconstructed sample
#a = fig.add_subplot(1, 2, 2)
#a.set_title('reconstruction')
sub_rec_img = np.zeros(((size_subimg + 1) * (n_subimg / 3) - 1, (size_subimg + 1) * n_subimg / 3 - 1))
try:
sub_samples = sample_level2.reshape((3, size_w1))
except:
continue
for r_c_counter, sample in enumerate(sub_samples):
rec_space_l1 = np.dot(sample, hd_weights_level1.T)
rec_level1 = sigmoid_function(rec_space_l1 + bias_2_level1).reshape((size_subimg, size_subimg))
## define the row and cols where to put the sub_imgs
r, c = divmod(r_c_counter, 3)
##add the subimg to the windows
sub_rec_img[r * (size_subimg + 1):(r + 1) * (size_subimg + 1) - 1,
c * (size_subimg + 1):(c + 1) * (size_subimg + 1) - 1] = rec_level1
#imgplot = plt.imshow(sub_rec_img.squeeze(), cmap=plt.cm.gray)
#plt.show()
##Save
if save_img:
if not os.path.exists(newpath):
os.makedirs(newpath)
filename = newpath + str(cluster_class) + '_' + str(i) + '.jpg'
# mean_imgs_cluster = cv2.resize(mean_imgs_cluster,(54,54),interpolation= cv2.INTER_LINEAR)
sub_rec_img = cv2.flip(sub_rec_img,1)
plt.imsave(filename, sub_rec_img.squeeze(), cmap=plt.cm.gray)
def visualize_activations(matrix_activation):
### Training 625 matrix activation on shallow ae on only arms
# participant_length = [0, 2197, 2082, 1873, 1595, 1779, 1991, 2148, 1702, 2484, 1744, 2902, 1947, 1860, 1743, 1645,
# 2398, 2287, 1998, 1573]
# s = []
# dim = 30
# for l in xrange(1, len(participant_length)):
# slide = matrix_activation[participant_length[l - 1]:(participant_length[l - 1] + participant_length[l])]
#
# for m in xrange(0, len(slide) - dim, dim):
# if len(s) > 0:
# s = np.vstack((s, matrix_activation[m:m + dim].reshape((1, -1))))
# else:
# s = matrix_activation[m:m + dim].reshape((1, -1))
### trained deep AE on upperBody
participant_length = [0, 2876, 2394, 2256, 1998, 1887, 2597, 2703, 2105, 3137, 2190, 4072, 2226, 2282, 2480, 2120,
2536, 2507, 2511, 1675]
s = []
dim = 50
for l in xrange(1, len(participant_length)):
slide = matrix_activation[participant_length[l - 1]:(participant_length[l - 1] + participant_length[l])]
for m in xrange(0, len(slide) - dim, dim):
if len(s) > 0:
s = np.vstack((s, matrix_activation[m:m + dim].reshape((1, -1))))
else:
s = matrix_activation[m:m + dim].reshape((1, -1))
print s.shape
#s = np.array(random.sample(matrix_activation, 30000))
# kernel_bandwith = 5.1
# X = img_processing.my_mean_shift(s, iterations=5, kernel_bandwith=kernel_bandwith)
# print datetime.now().time()
# my_kmean = KMeans(n_clusters=3, n_jobs=-1, algorithm='full')
# X = my_kmean.fit(s)
# means = np.mean(X,axis=1)
pca = decomposition.PCA(n_components=100) # 2-dimensional PCA whiten=True, svd_solver='randomized'
s_t = pca.fit(s)
data_organizer.save_matrix_pickle(s_t,
'C:/Users/dario.dotti/Documents/data_for_personality_exp/after_data_cleaning/posture_data/upperBody/100pca_deep900225AE_5sec_data.txt')
s_t = pca.transform(s)
#print s_t.shape
print np.sum(pca.explained_variance_ratio_)
# plt.bar(range(100), pca.explained_variance_ratio_)
# plt.show()
## testing clustering
#m_s = KMeans(n_clusters=10, n_jobs=-1)
#m_s = MeanShift(n_jobs=-1,bandwidth=0.9)
#m_s.fit(s_t)
#s_t = s
m_s = AgglomerativeClustering(n_clusters=15, affinity='cosine', linkage='average')
m_s.fit(s_t)
y_tr = m_s.fit_predict(s_t)
print Counter(y_tr)
##since agglomerative clustering doesnt have predict I use svm with the cluster labels for classification
clf = svm.LinearSVC().fit(s_t, y_tr)
data_organizer.save_matrix_pickle(clf,
'C:/Users/dario.dotti/Documents/data_for_personality_exp/after_data_cleaning/posture_data/linearSVM_agglomerative15c_5sec_100pca.txt')
#print 'file saved'
colors = np.array(np.random.randint(0, 255, size=(20, 3))) / 255.0
color_labels = [colors[p] for p in y_tr]
## 2D
plt.scatter(s_t[:, 0], s_t[:, 1],c = color_labels)
#plt.scatter(m_s[:, 0], m_s[:, 1], marker='^', c='r')
plt.show()
##3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(s_t[:, 0], s_t[:, 1], s_t[:, 2],c = color_labels) # s_t[:, 1], s_t[:, 2],s_t[:,0]
#ax.scatter(m_s[:, 0], m_s.means_[:, 1], m_s.means_[:, 2], marker='^', c='r')
plt.show()
return s,s_t,m_s,y_tr
def cluster_activation_allLayers(matrix_activation, AE_weights_layer1, AE_weights_level2, n_layer):
matrix_activation,matrix_act_transf,cluster_model,pred = visualize_activations(matrix_activation)
# pred = cluster_model.labels_
# kmean_centers = []
#hs.determine_number_k_kMeans(np.array(random.sample(matrix_activation,20000)))
#my_kmean = KMeans(n_clusters=10,n_jobs=-1,algorithm='full')
#cluster_model = my_kmean.fit(matrix_activation)
#data_organizer.save_matrix_pickle(cluster_model,'C:/Users/dario.dotti/Documents/data_for_personality_exp/after_data_cleaning/head_joint_id1/10_cluster_model_layer2_new.txt')
#cluster_model = data_organizer.load_matrix_pickle('C:/Users/dario.dotti/Documents/data_for_personality_exp/after_data_cleaning/head_joint_id1/40_cluster_model_layer2_new.txt')
## Testing the model
#pred = cluster_model.predict(matrix_activation)
#print Counter(pred).most_common()
kmean_centers = []#np.array(cluster_model.cluster_centers_)
#d = cdist(kmean_centers.reshape(10, 625), kmean_centers.reshape(10, 625))
## Visualize the clusters
if n_layer ==1:
check_rec_in_clusters(matrix_activation, pred, AE_weights_layer1,kmean_centers, matrix_act_transf, save_img=1)
elif n_layer==2:
save_n_layer2_example_per_clusters(matrix_activation, pred, AE_weights_layer1, AE_weights_level2,
kmean_centers, save_img=1)
return pred
def create_cluster_labels_participant_task(raw_features, AE_weights_level_2):
cluster_model = data_organizer.load_matrix_pickle(
'C:/Users/dario.dotti/Documents/data_for_personality_exp/after_data_cleaning/head_joint_id1/10_cluster_model_layer2_new.txt')
final_labels = []
for participant in raw_features:
participant_label = []
for task in participant:
task_label = []
for f_vector in task:
hd = np.dot(f_vector, AE_weights_level_2[0][0])
act = sigmoid_function(hd + AE_weights_level_2[1])
label = cluster_model.predict(act.reshape((1, -1)))
task_label.append(label[0])
participant_label.append(task_label)
final_labels.append(participant_label)
data_organizer.save_matrix_pickle(final_labels,
'C:/Users/dario.dotti/Documents/data_for_personality_exp/after_data_cleaning/head_joint_id1/10clusters_labels_l2_participants_tasks_new.txt')
def main():
###### Layer 1 #########
#raw_features = data_organizer.load_matrix_pickle('C:/Users/dario.dotti/Documents/data_for_personality_exp/after_data_cleaning/posture_data/posture_shoulder_arms_3fps.txt')
AE_weights_level_1 = data_organizer.load_matrix_pickle('C:/Users/dario.dotti/Desktop/Hier_AE_deliverable/ae/head_joint_id1/144weights_l1_hd1002.txt')#'C:/Users/dario.dotti/Documents/data_for_personality_exp/after_data_cleaning/posture_data/upperBody/deep_AE_900_225_weights_008hd1_noNoise.txt')#625_weights_008hd1
# ############
#
#data_new = [f for part in raw_features for task in part for f in task]
#data_new= []
#map(lambda part: map(lambda task: data_new.append(task), part),raw_features)
####Visually check weights
plot_layers_l1(AE_weights_level_1[0],tied_weights=True)
plt.show()
#plt.show()
# ##Visually check the reconstruction
#AE_reconstruction_level1(data_new,AE_weights_level_1)
#
# ##create activation matrix
#matrix_activations_data_l1 = hid_unit_activation_allLayers(data_new, AE_weights_level_1)
#data_organizer.save_matrix_pickle(matrix_activations_data_l1,'C:/Users/dario.dotti/Documents/data_for_personality_exp/after_data_cleaning/posture_data/matrix_act_400_weights.txt')
#matrix_activations_data_l1 = data_organizer.load_matrix_pickle('C:/Users/dario.dotti/Documents/data_for_personality_exp/after_data_cleaning/posture_data/matrix_act_625_weights_6fps.txt')
matrix_activations_data_l1 = data_organizer.load_matrix_pickle(
'C:/Users/dario.dotti/Documents/data_for_personality_exp/after_data_cleaning/posture_data/upperBody/matrix_deep_act_225_weights.txt')
# ####Clustering on the activation matrix
cluster_activation_allLayers(matrix_activations_data_l1, AE_weights_level_1,[],n_layer=1)
return 0
# ################
#### Layer 2 temporal reconstruction####
raw_features = data_organizer.load_matrix_pickle(
'C:/Users/dario.dotti/Documents/data_for_personality_exp/after_data_cleaning/head_joint_id1/feature_matrix_participant_task_l2_new.txt')
AE_weights_level_1 = data_organizer.load_matrix_pickle(
'C:/Users/dario.dotti/Documents/data_for_personality_exp/after_data_cleaning/ae/head_joint_id1/144weights_l1_hd1002.txt')
original_points_grid_level_2 = data_organizer.load_matrix_pickle(
'C:/Users/dario.dotti/Documents/data_for_personality_exp/after_data_cleaning/head_joint_id1/orig_points_participant_task_l2_new.txt')
AE_weights_level_2 = data_organizer.load_matrix_pickle(
'C:/Users/dario.dotti/Documents/data_for_personality_exp/after_data_cleaning/ae/head_joint_id1/169weights_l2_001_new.txt')
data_new = [f for part in raw_features for task in part for f in task]
## Visualize weights layer 2 temporal##
# AE_showWeights_level2_temporalExperiment(AE_weights_level_2, AE_weights_level_1)
# original_points_grid_level_2_new = [f for part in original_points_grid_level_2 for task in part for f in task]
# reconstruction_AE_weights_level2_temporal(data_new, original_points_grid_level_2_new, AE_weights_level_1, AE_weights_level_2)
#### Get activations and cluster ####
#matrix_activations_data_l2 = hid_unit_activation_allLayers(data_new,AE_weights_level_2)
#cluster_activation_allLayers(matrix_activations_data_l2, AE_weights_level_1, AE_weights_level_2,n_layer=2 )
#####
create_cluster_labels_participant_task(raw_features, AE_weights_level_2)
if __name__ == '__main__':
main()
|
{"/demo.py": ["/data_organizer.py", "/video_traj.py", "/ambient_sensors.py"]}
|
7,367
|
Zumbalamambo/behavior_understanding_indoor_dataset
|
refs/heads/master
|
/video_traj_old_dataset.py
|
import cv2
import numpy as np
from sklearn.preprocessing import normalize
from datetime import datetime, timedelta
import img_processing as my_img_proc
import visualization as vis
kinect_max_distance=0
def get_coordinate_points(occurance):
xs = map(lambda line: int(float(line.split(' ')[2])),occurance)
ys = map(lambda line: int(float(line.split(' ')[3])),occurance)
zs = map(lambda line: float(line.split(' ')[4]),occurance)
ids =map(lambda line: str(line.split(' ')[1]),occurance)
list_points = []
list_points_append = list_points.append
map(lambda c: list_points_append((xs[c],ys[c])),xrange(0,len(xs)))
#apply filter to cancel noise
x_f,y_f =my_img_proc.median_filter(list_points)
return x_f,y_f,zs,ids
def histograms_of_oriented_trajectories(list_poly,time_slices):
hot_all_data_matrix = []
hot_all_data_matrix_append = hot_all_data_matrix.append
for i in xrange(0,len(time_slices)):
##Checking the start time of every time slice
if(len(time_slices[i])>1):
print 'start time: %s' %str(time_slices[i][0].split(' ')[8])
else:
print 'no data in this time slice'
#get x,y,z of every traj point after smoothing process
x_filtered,y_filtered,zs,ids = get_coordinate_points(time_slices[i])
#initialize histogram of oriented tracklets
hot_matrix = []
for p in xrange(0,len(list_poly)):
tracklet_in_cube_f = []
tracklet_in_cube_c = []
tracklet_in_cube_append_f = tracklet_in_cube_f.append
tracklet_in_cube_append_c = tracklet_in_cube_c.append
for ci in xrange(0,len(x_filtered)):
#2d polygon
if list_poly[p].contains_point((int(x_filtered[ci]),int(y_filtered[ci]))):
## 3d cube close to the camera
if zs[ci] < (kinect_max_distance/2):
tracklet_in_cube_append_c([x_filtered[ci],y_filtered[ci],ids[ci]])
else: ##3d cube far from the camera
tracklet_in_cube_append_f([x_filtered[ci],y_filtered[ci],ids[ci]])
for three_d_poly in [tracklet_in_cube_c,tracklet_in_cube_f]:
if len(three_d_poly)>0:
## for tracklet in cuboids compute HOT following paper
hot_single_poly = my_img_proc.histogram_oriented_tracklets(three_d_poly)
## compute hot+curvature
#hot_single_poly = my_img_proc.histogram_oriented_tracklets_plus_curvature(three_d_poly)
else:
hot_single_poly = np.zeros((24))
##add to general matrix
if len(hot_matrix)>0:
hot_matrix = np.hstack((hot_matrix,hot_single_poly))
else:
hot_matrix = hot_single_poly
hot_all_data_matrix_append(hot_matrix)
## normalize the final matrix
normalized_finalMatrix = np.array(normalize(np.array(hot_all_data_matrix),norm='l2'))
#print np.array(hot_all_data_matrix).shape
##add extra bin with hours
hs = np.zeros((len(time_slices),1))
for i,t in enumerate(time_slices):
if len(t) > 1:
hs[i] = int(t[0].split(' ')[8].split(':')[0])
else:
hs[i] = hs[i-1]
normalized_finalMatrix = np.hstack((normalized_finalMatrix,hs))
return normalized_finalMatrix
def occupancy_histograms_in_time_interval(my_room, list_poly, time_slices):
# #get number of patches
slice_col = my_img_proc.get_slice_cols()
slice_row = my_img_proc.get_slice_rows()
slice_depth = my_img_proc.get_slice_depth()
my_data_temp = []
my_data_temp_append = my_data_temp.append
for i in xrange(0,len(time_slices)):
##Checking the start time of every time slice
if(len(time_slices[i])>1):
print 'start time: %s' %str(time_slices[i][0].split(' ')[8])
else:
print 'no data in this time slice'
## counter for every id should be empty
track_points_counter = np.zeros((slice_col*slice_row*slice_depth))
##get x,y,z of every traj point after smoothing process
x_filtered,y_filtered,zs,ids = get_coordinate_points(time_slices[i])
## display traj on img
#temp_img = copy.copy(my_room)
#my_img_proc.display_trajectories(temp_img, list_poly, x_filtered, y_filtered)
## count the occurances of filtered point x,y in every patches
for p in xrange(0,len(list_poly)):
for ci in xrange(0,len(x_filtered)):
## 2d polygon
if list_poly[p].contains_point((int(x_filtered[ci]),int(y_filtered[ci]))):
## 3d cube close to the camera
if zs[ci] < (kinect_max_distance/2):
track_points_counter[p*2] = track_points_counter[p*2] + 1
continue
else: ## 3d cube far from the camera
track_points_counter[(p*2)+1] = track_points_counter[(p*2)+1] + 1
continue
## save the data of every group in the final matrix
my_data_temp_append(track_points_counter)
## normalize the final matrix
normalized_finalMatrix = np.array(normalize(np.array(my_data_temp),norm='l2'))
print 'final matrix size:'
print normalized_finalMatrix.shape
##add extra bin with hours
hs = np.zeros((len(time_slices),1))
for i,t in enumerate(time_slices):
if len(t) > 1:
hs[i] = int(t[0].split(' ')[8].split(':')[0])
else:
hs[i] = hs[i-1]
normalized_finalMatrix = np.hstack((normalized_finalMatrix,hs))
return normalized_finalMatrix
def org_OLDdata_timeIntervals(file):
#print file
#get all the data
with open(file,'r')as f:
file_content = f.read().split('\n')
# save max distance to kinect
zs = map(lambda line: float(line.split(' ')[4]),file_content)
global kinect_max_distance
kinect_max_distance = np.max(zs)
#Split file according to periods of time
content_time = map(lambda line: line.split(' ')[8],file_content)
time_interval_hours = 0
time_interval_minutes = 10
init_t = datetime.strptime(content_time[0],'%H:%M:%S')
end_t = datetime.strptime(content_time[len(content_time)-1],'%H:%M:%S')
x = datetime.strptime('0:0:0','%H:%M:%S')
tot_duration = (end_t-init_t)
counter = 0
#print 'total duration of this file: %s' %str(tot_duration)
time_slices = []
time_slices_append = time_slices.append
#get data in every timeslices
while init_t < (end_t-timedelta(hours=time_interval_hours,minutes=time_interval_minutes)):
list_time_interval = []
list_time_interval_append = list_time_interval.append
#print init_t
for t in xrange(len(content_time)):
if datetime.strptime(content_time[t],'%H:%M:%S')>= init_t and datetime.strptime(content_time[t],'%H:%M:%S') < init_t + timedelta(hours=time_interval_hours,minutes=time_interval_minutes):
list_time_interval_append(file_content[t])
if datetime.strptime(content_time[t],'%H:%M:%S') > init_t + timedelta(hours=time_interval_hours,minutes=time_interval_minutes):
break
#print len(list_time_interval)
##save time interval without distinction of part of the day
time_slices_append(list_time_interval)
init_t= init_t+timedelta(hours=time_interval_hours,minutes=time_interval_minutes)
return file_content,time_slices
def feature_extraction_video_traj(file_traj):
print 'old dataset'
##visulaization apathy over week 19_4-29_4
# motion_week = [12.038,9.022,7.974,9.9650,2.113,4.4285,5.7845]
# slight_motion_week = [27.856,22.571,27.846,31.002,13.4013,10.6954,28.1096]
# sedentary_week = [29.236,36.7410,35.1045,53.6780,35.505,43.7546,57.1622]
#
# vis.bar_plot_motion_in_region_over_long_time(motion_week)
##divide image into patches(polygons) and get the positions of each one
my_room = np.zeros((480,640),dtype=np.uint8)
list_poly = my_img_proc.divide_image(my_room)
##--------------Pre-Processing----------------##
content,skeleton_data_in_time_slices = org_OLDdata_timeIntervals(file_traj)
#occupancy_histograms = occupancy_histograms_in_time_interval(my_room, list_poly, skeleton_data_in_time_slices)
occupancy_histograms = 1
## create Histograms of Oriented Tracks
HOT_data = histograms_of_oriented_trajectories(list_poly,skeleton_data_in_time_slices)
#HOT_data = 1
vis.bar_plot_motion_over_time(HOT_data)
#vis.pie_plot_motion_day(HOT_data)
#return [occupancy_histograms,HOT_data]
if __name__ == '__main__':
feature_extraction_video_traj('C:/Users/dario.dotti/Documents/tracking_points/tracking_data_kinect2/29_4.txt')
|
{"/demo.py": ["/data_organizer.py", "/video_traj.py", "/ambient_sensors.py"]}
|
7,368
|
Zumbalamambo/behavior_understanding_indoor_dataset
|
refs/heads/master
|
/experiments.py
|
import os
import numpy as np
import cv2
from sklearn.linear_model import LogisticRegression
from sklearn.cluster import KMeans,MeanShift
from collections import Counter
from scipy.spatial.distance import cdist, pdist
import matplotlib.pylab as plt
from sklearn.preprocessing import normalize
import random
import data_organizer
my_ms = MeanShift(n_jobs=-1,bandwidth=0.3)
def as_classification_experiment(AS_data):
#AS_data = np.array(data_organizer.load_matrix_pickle('C:/Users/dario.dotti/Documents/AS_activation_6_labels.txt'))
n_label = 6
test_label = [0,1,2,3,4,5]
tot_accuracy = 0
# one_leave_out_training = []
# one_leave_out_test = []
# one_leave_out_labels = []
for t in xrange(0,len(AS_data),n_label):
matrix_test = AS_data[t:(t+n_label)]
AS_matrix_unordered = np.vstack((AS_data[:t],AS_data[(t+n_label):]))
matrix_training = np.zeros((AS_matrix_unordered.shape[0],AS_matrix_unordered.shape[1]))
for task in range(0,n_label):
for row in range(0,AS_matrix_unordered.shape[0],n_label):
c = row+task
matrix_training[row] = AS_matrix_unordered[c]
#labels
labels = np.zeros((AS_matrix_unordered.shape[0],1))
b = 0
e = len(AS_matrix_unordered)/n_label
for i in range(0,n_label):
for row in range(b,e):
labels[row] = i
b += (len(AS_matrix_unordered)/n_label)
e += (len(AS_matrix_unordered)/n_label)
# one_leave_out_training.append(matrix_training)
# one_leave_out_test.append(matrix_test)
# one_leave_out_labels.append(np.ravel(labels))
# return one_leave_out_training,one_leave_out_test,one_leave_out_labels
##classification
lr = LogisticRegression()
lr.fit(matrix_training,np.ravel(labels))
pred = lr.predict(matrix_test)
accuracy = 0
for i in range(0,len(pred)):
if i == 0 or i == 1:
if pred[i] == 0 or pred[i]==1:
accuracy +=1
continue
if pred[i] == test_label[i]:
accuracy +=1
tot_accuracy += float(accuracy)/6
print tot_accuracy/(len(AS_data)/n_label)
def video_classification_experiments(HOT_matrix):
#HOT_matrix = np.array(data_organizer.load_matrix_pickle('C:/Users/dario.dotti/Documents/hot_spatial_grid_4x4x3_6_labels.txt'))
#HOT_matrix = np.array(data_organizer.load_matrix_pickle('C:/Users/dario.dotti/Documents/hot_spatial_grid_4x4_5_labels.txt'))
n_label = 6
test_label = [0,1,2,3,4,5]
tot_accuracy = 0
# one_leave_out_training = []
# one_leave_out_test = []
# one_leave_out_labels = []
for t in xrange(0,len(HOT_matrix),n_label):
#print t,t+n_label
matrix_test = HOT_matrix[t:(t+n_label)]
HOT_matrix_unordered = np.vstack((HOT_matrix[:t],HOT_matrix[(t+n_label):]))
matrix_training = np.zeros((HOT_matrix_unordered.shape[0],HOT_matrix_unordered.shape[1]))
for task in range(0,n_label):
for row in range(0,HOT_matrix_unordered.shape[0],n_label):
c = row+task
matrix_training[row] = HOT_matrix_unordered[c]
#labels
labels = np.zeros((HOT_matrix_unordered.shape[0],1))
b = 0
e = len(HOT_matrix_unordered)/n_label
for i in range(0,n_label):
for row in range(b,e):
labels[row] = i
b += (len(HOT_matrix_unordered)/n_label)
e += (len(HOT_matrix_unordered)/n_label)
# one_leave_out_training.append(matrix_training)
# one_leave_out_test.append(matrix_test)
# one_leave_out_labels.append(np.ravel(labels))
# return one_leave_out_training,one_leave_out_test,one_leave_out_labels
##classification
lr = LogisticRegression()
lr.fit(matrix_training,np.ravel(labels))
pred = lr.predict(matrix_test)
accuracy = 0
for i in range(0,len(pred)):
if i == 0 or i == 1:
if pred[i] == 0 or pred[i]==1:
accuracy +=1
continue
if pred[i] == test_label[i]:
accuracy +=1
tot_accuracy += float(accuracy)/6
print tot_accuracy/(len(HOT_matrix)/n_label)
#print lr.score(matrix_test,test_label)
def video_clustering_fit(HOT_matrix,filename):
global my_ms
my_ms.fit(HOT_matrix)
if len(filename)>2:
data_organizer.save_matrix_pickle(my_ms,filename)
def video_clustering_pred(data):
predict = my_ms.fit_predict(data)
#data_organizer.save_matrix_pickle(predict,'C:/Users/dario.dotti/Documents/cl_prediction_2secWindow_band03.txt')
return predict
def example_for_every_cluster_center(pred):
with open('C:/Users/dario.dotti/Documents/content.txt','r') as f:
images_name = f.read().split('\n')
class_counter = Counter(pred)
print class_counter.most_common()
example_for_cl_centers = []
for k,v in class_counter.most_common(30):
#print k,v
index = np.where(pred == k)[0]
path = 'C:/Users/dario.dotti/Documents/time_windows_HOT/' +images_name[index[0]].split(' ')[3]
example_for_cl_centers.append([k,v,path])
return example_for_cl_centers
def visualize_cluster_pred():
with open('C:/Users/dario.dotti/Documents/content_6labels.txt','r') as f:
images_name = f.read().split('\n')
pred = data_organizer.load_matrix_pickle('C:/Users/dario.dotti/Documents/cl_prediction_2secWindow_band03.txt')
class_counter = Counter(pred)
print class_counter
for k in class_counter.keys()[:20]:
print k
index = np.where(pred == k)[0]
for i in index[:20 ]:
path = 'C:/Users/dario.dotti/Documents/time_windows_HOT/' +images_name[i].split(' ')[3]
img = cv2.imread(path)
cv2.imshow('ciao',img)
cv2.waitKey(0)
def test_hist_task(cluster_model,labels_counter,HOT_matrix):
##create bag of words using clustering
keys_labels = map(lambda x: x[0], labels_counter)
print keys_labels
##organize the matrix per tasks
tasks_dict = {'0': [], '1': [], '2': [], '3': [], '4': [],
'5': []} # ['lookKey','lookBall','conf','ripet','write','tea']
for subject in HOT_matrix:
for i_task,task in enumerate(subject):
hist = np.zeros((1,len(keys_labels)))
pred = cluster_model.predict(task)
for p in pred:
if p in keys_labels:
index = np.where(p == keys_labels)[0][0]
hist[0][index] +=1
#else:
#print 'outsider'
#hist = normalize(np.array(hist),norm='l1')
tasks_dict[str(i_task)].append(hist)
#data_organizer.save_matrix_pickle(tasks_dict,'C:/Users/dario.dotti/Documents/bow_experiment_data/test_PECS/tasks_dict.txt')
## labels for the tasks
labels = []
for k in range(0,6):
for x in range(0,len(tasks_dict[str(k)])):
labels.append(k)
##Unroll the dict to make a training matrix
matrix_training = []
for k in range(0, 6):
v = tasks_dict[str(k)]
print k
for vv in v:
if len(matrix_training)>0:
matrix_training = np.vstack((matrix_training,vv))
else:
matrix_training = vv
print np.array(matrix_training).shape
return matrix_training,labels,tasks_dict
def experiment_video():
#video_classification_experiments()
#as_classification_experiment()
HOT_matrix_5_tasks = np.array(data_organizer.load_matrix_pickle('C:/Users/dario.dotti/Documents/bow_experiment_data/test_PECS/hot_spatial_grid_4x4x3_5_tasks_2secWindow_without_outliers.txt')).tolist()
HOT_matrix_6_tasks = np.array(data_organizer.load_matrix_pickle('C:/Users/dario.dotti/Documents/hot_spatial_grid_4x4x3_6_tasks_2secWindow.txt'
)).tolist() # 'C:/Users/dario.dotti/Documents/bow_experiment_data/test_PECS/hot_spatial_grid_4x4x3_6_tasks_2secWindow_without_outliers.txt'
#HOT_matrix_6_tasks = np.array(data_organizer.load_matrix_pickle('C:/Users/dario.dotti/Documents/bow_experiment_data/hot_spatial_grid_4x4x3_6_tasks_2secWindow.txt')).tolist()
#HOT_matrix_5_tasks = np.array(data_organizer.load_matrix_pickle('C:/Users/dario.dotti/Documents/bow_experiment_data/hot_spatial_grid_4x4x3_5_tasks_2secWindow.txt')).tolist()
length_task3 = [45,51,33,51,62]
##modify 5 tasks to make it 6 tasks and merge the two matrices
for n_subj,subject in enumerate(HOT_matrix_5_tasks):
new_subject = []
for n_task,task in enumerate(subject):
if n_task == 3:
new_subject.append(task[:length_task3[n_subj]])
new_subject.append(task[length_task3[n_subj]:])
else:
new_subject.append(task)
HOT_matrix_6_tasks.append(new_subject)
#data_organizer.save_matrix_pickle(HOT_matrix_6_tasks,'C:/Users/dario.dotti/Documents/bow_experiment_data/final_HOT_matrix_6_tasks.txt')
##transform matrix for clustering
HOT_matrix_for_cluster = []
for s in xrange(0,len(HOT_matrix_6_tasks)):
for t in xrange(0,len(HOT_matrix_6_tasks[s])):
for time_slice in xrange(0,len(HOT_matrix_6_tasks[s][t])):
if len(HOT_matrix_for_cluster)>0:
HOT_matrix_for_cluster = np.vstack((HOT_matrix_for_cluster,HOT_matrix_6_tasks[s][t][time_slice]))
else:
HOT_matrix_for_cluster= HOT_matrix_6_tasks[s][t][time_slice]
print np.array(HOT_matrix_for_cluster).shape
# #Clustering Meanshift
##video_clustering_fit(concatenated_matrix,'C:/Users/dario.dotti/Documents/cl_model_2secWindow_band03.txt')
#cluster_model = data_organizer.load_matrix_pickle('C:/Users/dario.dotti/Documents/bow_experiment_data/cl_model_2secWindow_band03.txt')
#labels = cluster_model.predict(HOT_matrix_for_cluster)
# #Clustering KMeans
# determine_number_k_kMeans(HOT_matrix_for_cluster)
# cluster_model = KMeans(n_clusters=30, n_jobs=-1)
# cluster_model.fit(HOT_matrix_for_cluster)
# data_organizer.save_matrix_pickle(cluster_model,
# 'C:/Users/dario.dotti/Documents/bow_experiment_data/cl_30_kmeans_model_2secWindow_newVersion.txt')
# data_organizer.save_matrix_pickle(cluster_model,
# 'C:/Users/dario.dotti/Documents/cl_30_kmeans_model_2secWindow_newVersion.txt')
cluster_model = data_organizer.load_matrix_pickle('C:/Users/dario.dotti/Documents/bow_experiment_data/test_PECS/cl_30_kmeans_model_2secWindow_without_outliers.txt')
labels = cluster_model.predict(HOT_matrix_for_cluster)
labels_counter = Counter(labels).most_common(30)
#data_organizer.save_matrix_pickle(labels_counter,'C:/Users/dario.dotti/Documents/cluster_3_kmeans_word__without_outliers.txt')
matrix_training,labels,tasks_dict = test_hist_task(cluster_model,labels_counter,HOT_matrix_6_tasks)
return matrix_training,labels,tasks_dict
def determine_number_k_kMeans(matrix_activations_data_l1):
#matrix_activations_data_l1 = np.array(random.sample(matrix_activations_data_l1,10000))
#matrix_activations_data_l1 = matrix_activations_data_l1[:10000]
import warnings
warnings.filterwarnings("ignore")
##Determine number of K
##variance intra cluster
#k_range = range(2, 103, 10)
k_range = range(2, 73, 10)
print k_range
k_means_var = [KMeans(n_clusters=k,n_jobs=-1).fit(matrix_activations_data_l1) for k in k_range]
centroids = [X.cluster_centers_ for X in k_means_var]
k_euclid = [cdist(matrix_activations_data_l1, cent, 'euclidean') for cent in centroids]
dist = [np.min(ke, axis=1) for ke in k_euclid]
wcss = [sum(d ** 2) for d in dist]
tss = sum(pdist(matrix_activations_data_l1) ** 2) / matrix_activations_data_l1.shape[0]
bss = tss - wcss
plt.plot(k_range, bss)
plt.show()
def experiment_as():
as_matrix_6_tasks = np.array(data_organizer.load_matrix_pickle('C:/Users/dario.dotti/Documents/bow_experiment_data/AS_activation_6_labels.txt')).tolist()
as_matrix_5_tasks_transformed = np.array(data_organizer.load_matrix_pickle('C:/Users/dario.dotti/Documents/bow_experiment_data/AS_activation_5_labels_transformed.txt')).tolist()
as_matrix = np.vstack((as_matrix_6_tasks,as_matrix_5_tasks_transformed))
#as_matrix = np.array(as_matrix_6_tasks)
#as_classification_experiment(as_matrix)
print as_matrix.shape
return as_matrix
def main_experiment():
#visualize_cluster_pred()
BOW_HOT,labels,tasks_dict = experiment_video()
#as_matrix = experiment_as()
#data_organizer.save_matrix_pickle(BOW_HOT,'C:/Users/dario.dotti/Documents/BOW_3_kmeans_16subject_2sec_without_outlier.txt')
#data_organizer.save_matrix_pickle(labels,'C:/Users/dario.dotti/Documents/BOW_3_kmeans_labels_16subject_2sec_without_outlier.txt')
#concatenated_matrix = np.hstack((BOW_HOT,as_matrix))
## meanshift
# ripetitive_index = range(32,48)
# confusion_index = range(48,64)
# tea_index = range(64,80)
## kmeans
confusion_index = range(32,48)
ripetitive_index = range(48,64)
tea_index = range(81,96)
##classification
for i_t in tea_index:
test_set = BOW_HOT[i_t]
label_test = labels[i_t]
training = np.vstack((BOW_HOT[:i_t],BOW_HOT[(i_t+1):]))
label_tr = labels[:i_t] + labels[(i_t+1):]
lr = LogisticRegression()
lr.fit(training,np.ravel(label_tr))
pred = lr.predict(np.array(test_set).reshape((1,-1)))
print pred
if __name__ == '__main__':
main_experiment()
|
{"/demo.py": ["/data_organizer.py", "/video_traj.py", "/ambient_sensors.py"]}
|
7,369
|
Zumbalamambo/behavior_understanding_indoor_dataset
|
refs/heads/master
|
/data_organizer.py
|
from lxml import etree
import numpy as np
from multiprocessing.dummy import Pool as ThreadPool
from datetime import datetime, timedelta
import cPickle
import cv2
import matplotlib.path as mplPath
__max_depth_value = 0
__min_depth_value = 0
def xml_parser(path_to_file):
joint_points = []
for event, element in etree.iterparse(path_to_file, tag='tracksInfo'):
#print element.attrib['frameID']
frame_body_joints = np.zeros((21,3))
for i,child in enumerate(element):
##store all the body joints
if child.attrib['x'] == '-1.#INF' or child.attrib['y'] == '-1.#INF':
continue
frame_body_joints[i,0]=child.attrib['x']
frame_body_joints[i,1]=child.attrib['y']
frame_body_joints[i,2]=child.attrib['z']
#store only one joint
# if child.tag == 'head':
# if child.attrib['x'] == '-1.#INF' and child.attrib['y'] == '-1.#INF':
# continue
# joint_points.append([child.attrib['x'],child.attrib['y'],child.attrib['z'],element.attrib['trackingID']])
joint_points.append(frame_body_joints)
element.clear()
return joint_points
def read_all_data(joint_points):
#organize the data with info needed
points_array = np.zeros((len(joint_points),4))
for i,p in enumerate(joint_points):
#reconversion
#x = int((float(p[0])*512)/768)
#y = int((float(p[1])*424)/689)
points_array[i,0]=int(float(p[0]))
points_array[i,1]=int(float(p[1]))
points_array[i,2]=float(p[2])
#tracking id
points_array[i,3]=np.uint64(p[3])
#set max min depth value
global __max_depth_value
global __min_depth_value
__max_depth_value= np.max(points_array[:,2])
__min_depth_value = np.min(points_array[:,2])
return points_array
def org_data_timeIntervals(file):
#print file
#get all the data
with open(file,'r')as f:
file_content = f.read().split('\n')
# save max distance to kinect
zs = map(lambda line: float(line.split(' ')[4]),file_content)
global kinect_max_distance
kinect_max_distance = np.max(zs)
#Split file according to periods of time
content_time = map(lambda line: line.split(' ')[8],file_content)
time_interval_hours = 0
time_interval_minutes = 10
init_t = datetime.strptime(content_time[0],'%H:%M:%S')
end_t = datetime.strptime(content_time[len(content_time)-1],'%H:%M:%S')
x = datetime.strptime('0:0:0','%H:%M:%S')
tot_duration = (end_t-init_t)
counter = 0
#print 'total duration of this file: %s' %str(tot_duration)
time_slices = []
time_slices_append = time_slices.append
#get data in every timeslices
while init_t < (end_t-timedelta(hours=time_interval_hours,minutes=time_interval_minutes)):
list_time_interval = []
list_time_interval_append = list_time_interval.append
#print init_t
for t in xrange(len(content_time)):
if datetime.strptime(content_time[t],'%H:%M:%S')>= init_t and datetime.strptime(content_time[t],'%H:%M:%S') < init_t + timedelta(hours=time_interval_hours,minutes=time_interval_minutes):
list_time_interval_append(file_content[t])
if datetime.strptime(content_time[t],'%H:%M:%S') > init_t + timedelta(hours=time_interval_hours,minutes=time_interval_minutes):
break
#print len(list_time_interval)
##save time interval without distinction of part of the day
time_slices_append(list_time_interval)
init_t= init_t+timedelta(hours=time_interval_hours,minutes=time_interval_minutes)
return file_content,time_slices
def thread_function(k,data,ids):
print k
temp_track = []
temp_track_append= temp_track.append
map(lambda i: temp_track_append(data[i]) if ids[i] == k else False,xrange(len(data)))
return temp_track
def read_data_tracklets(data,multiThread):
ids =map(lambda x: x[3],data)
keys = list(set(ids))
#data is sorted only if multithread isnt used
keys = sorted(keys,key=lambda x: x)
print len(keys)
####MULTI-THREAD VERSION######
if multiThread:
cores = 6
pool = ThreadPool(cores)
print 'n cores: '+str(cores)
tracklets = pool.map(lambda k: thread_function(k,data,ids), keys)
#close the pool and wait for the work to finish
pool.close()
pool.join()
###########################
else:
# keys= keys[:500]
tracklets = []
tracklets_append = tracklets.append
for k in keys:
#print k
temp_track = []
temp_track_append= temp_track.append
map(lambda i: temp_track_append(data[i]) if ids[i] == k else False,xrange(len(data)))
tracklets_append(temp_track)
return tracklets
def get_max_min_depth_value():
return [__max_depth_value,__min_depth_value]
def load_matrix_pickle(path):
with open(path, 'rb') as handle:
file = cPickle.load(handle)
print 'file_loaded'
return file
def save_matrix_pickle(file,path):
with open(path, 'wb') as handle:
cPickle.dump(file,handle,protocol=2)
def get_areas_boxes(scene):
#How to draw polygons: 1 left bottom
# 2 left top
# 3 right top
# 4 right bottom
scene_shape = np.array(scene).shape
## master recording room ##
# box_right = mplPath.Path(np.array([[int(scene_shape[1]*0.66),scene_shape[0]], [int(scene_shape[1]*0.66),0], [scene_shape[1],0], [scene_shape[1], scene_shape[0]]]))
# box_left = mplPath.Path(np.array([[0,scene_shape[0]], [0, 0], [int(scene_shape[1]*0.33),0], [int(scene_shape[1]*0.33), scene_shape[0]]]))
#
# box_center_far = mplPath.Path(np.array([[int(scene_shape[1]*0.37),scene_shape[0]], [int(scene_shape[1]*0.37),0], [int(scene_shape[1]*0.66),0], [int(scene_shape[1]*0.66), scene_shape[0]]]))
# box_center_close = mplPath.Path(np.array([[int(scene_shape[1]*0.37),scene_shape[0]], [int(scene_shape[1]*0.37),0], [int(scene_shape[1]*0.66),0], [int(scene_shape[1]*0.66), scene_shape[0]]]))
#
# cabinet_left = mplPath.Path(np.array([[0,scene_shape[0]], [0, int(scene_shape[0]*0.50)], [int(scene_shape[1]*0.37),int(scene_shape[0]*0.50)], [int(scene_shape[1]*0.37), scene_shape[0]]]))
# cabinet_right = mplPath.Path(np.array([[int(scene_shape[1]*0.66),scene_shape[0]], [int(scene_shape[1]*0.66),int(scene_shape[0]*0.5)], [scene_shape[1],int(scene_shape[0]*0.5)], [scene_shape[1], scene_shape[0]]]))
##old recording room ##
box_right = mplPath.Path(np.array([[int(scene_shape[1]*0.66),scene_shape[0]], [int(scene_shape[1]*0.66),0], [scene_shape[1],0], [scene_shape[1], scene_shape[0]]]))
box_left = mplPath.Path(np.array([[0,scene_shape[0]], [0, 0], [int(scene_shape[1]*0.4),0], [int(scene_shape[1]*0.4), scene_shape[0]]]))
box_center_far = mplPath.Path(np.array([[int(scene_shape[1]*0.40),scene_shape[0]], [int(scene_shape[1]*0.40),0], [int(scene_shape[1]*0.66),0], [int(scene_shape[1]*0.66), scene_shape[0]]]))
box_center_close = mplPath.Path(np.array([[int(scene_shape[1]*0.40),scene_shape[0]], [int(scene_shape[1]*0.40),0], [int(scene_shape[1]*0.66),0], [int(scene_shape[1]*0.66), scene_shape[0]]]))
cabinet_left = mplPath.Path(np.array([[0,scene_shape[0]], [0, int(scene_shape[0]*0.65)], [int(scene_shape[1]*0.40),int(scene_shape[0]*0.65)], [int(scene_shape[1]*0.40), scene_shape[0]]]))
cabinet_right = mplPath.Path(np.array([[int(scene_shape[1]*0.66),scene_shape[0]], [int(scene_shape[1]*0.66),int(scene_shape[0]*0.55)], [scene_shape[1],int(scene_shape[0]*0.55)], [scene_shape[1], scene_shape[0]]]))
boxes = [box_center_close, cabinet_left, box_left, box_center_far, box_right, cabinet_right]
##check poly are correct
# fig = plt.figure()
# ax = fig.add_subplot(111)
# patch = patches.PathPatch(cabinet_left, facecolor='orange', lw=2)
# ax.add_patch(patch)
#
# ax.set_xlim(0,scene.shape[1])
# ax.set_ylim(0,scene.shape[0])
#
# plt.show()
###
##draw patches
for rect in boxes:
cv2.rectangle(scene, (int(rect.vertices[1][0]), int(rect.vertices[1][1])),
(int(rect.vertices[3][0]), int(rect.vertices[3][1])), (0, 0, 0))
#
# cv2.imshow('ciao',scene)
# cv2.waitKey(0)
zs = [2.4, 2.4, 3.2, 4.5, 4.3, 3.2]
return boxes,zs,scene
|
{"/demo.py": ["/data_organizer.py", "/video_traj.py", "/ambient_sensors.py"]}
|
7,370
|
Zumbalamambo/behavior_understanding_indoor_dataset
|
refs/heads/master
|
/ambient_sensors.py
|
import visualization as vis
from datetime import datetime, timedelta
import numpy as np
from sklearn.preprocessing import normalize
def file_parser(pathfile_binary):
with open(pathfile_binary,'r') as f:
file_content = f.read().split('\n')
return file_content
def org_data_ID(pathfile_binary):
content = file_parser(pathfile_binary)
##TODO: read the key from an external file that will contain the position of each sensor
sensors_ID = {}
key = ['entrance','cabinet_left','cabinet_right']
#initialize dictionary
for k in key:
sensors_ID.setdefault(k,[])
for i in xrange(1,len(content)):
if content[i][:3] == 'a53':
sensors_ID['entrance'].append(content[i])
elif content[i][:3] == 'a50' or content[i][:3] == 'a56':
sensors_ID['cabinet_right'].append(content[i])
elif content[i][:3] == 'a51':
sensors_ID['cabinet_left'].append(content[i])
for k in sensors_ID.keys():
if len(sensors_ID[k])%2 != 0:
print k+' contains odd number of events: '+ str(len(sensors_ID[k]))
return sensors_ID
def org_data_different_tasks(sensors_ID,file_AS):
entrance_door = sensors_ID['entrance']
entrance_time = []
for event_door in entrance_door:
e = event_door.split(' ')
if e[0][9:11] == 'ON':
t = e[2].split('-')
entrance_time.append(datetime.strptime((t[0]+':'+t[1]+':'+t[2]),'%H:%M:%S'))
#initialize list
time_slices = []
time_slices_append = time_slices.append
##get sensors and delete the entrance activations
all_sensors = file_parser(file_AS)
sensor= []
for i,s in enumerate(all_sensors):
if s[:3] != 'a53' and s[:3] != 'Sen':
sensor.append(s)
for i in range(0,len(entrance_time),2):
temp_time_slice = []
for s in range(0,len(sensor)):
#print sensor[s].split(' ')
t = sensor[s].split(' ')[2].split('-')
if datetime.strptime((t[0]+':'+t[1]+':'+t[2]),'%H:%M:%S')> entrance_time[i] \
and datetime.strptime((t[0]+':'+t[1]+':'+t[2]),'%H:%M:%S') < entrance_time[i+1]:
temp_time_slice.append(sensor[s])
time_slices_append(temp_time_slice)
return time_slices
def night_motion():
print 'night motion'
def nr_visit_bathroom(sensors_ID):
object_events=sensors_ID['cabinet_left']
print len(object_events)
if len(object_events)%2 != 0:
print 'odd number of events: '+ str(len(object_events))
vis.plot_ambient_sensor_over_time(object_events)
# #iter 2 elements each time to check if all th epairs are ON-OFF
# for i in xrange(0,len(object_events)-1,2):
# #if they are not skip one
# if object_events[i][9:11] == object_events[i+1][9:11]:
# i+=1
#
# print object_events[i],object_events[i+1]
def feature_extraction_as(file_AS):
#file_AS = 'C:/Users/dario.dotti/Documents/pilot_abnormal_behavior_indoor/binary/18-10-16_sensors_subject4.txt'
sensors_ID = org_data_ID(file_AS)
time_slices_in_tasks = org_data_different_tasks(sensors_ID,file_AS)
activation_matrix= []
activ_non_normalized = []
for n_task,t in enumerate(time_slices_in_tasks):
activation_in_time_slice = np.zeros((len(t),3))
for i,e in enumerate(t):
sensor_id = e.split(' ')[0][:3]
if sensor_id =='a50':
activation_in_time_slice [i][0] +=1
elif sensor_id =='a51':
activation_in_time_slice [i][1] +=1
elif sensor_id =='a56':
activation_in_time_slice [i][2] +=1
#print n_task
#norm_matrix = normalize(activation_in_time_slice.sum(axis=0).reshape(1,-1),norm='l1',axis=1)[0]
#activation_matrix.append(norm_matrix)
##transform 5 tasks in 6 tasks
if n_task == 3:
norm_matrix = normalize(activation_in_time_slice[:6].sum(axis=0).reshape(1,-1),norm='l1',axis=1)[0]
activation_matrix.append(norm_matrix)
norm_matrix = normalize(activation_in_time_slice[6:].sum(axis=0).reshape(1,-1),norm='l1',axis=1)[0]
activation_matrix.append(norm_matrix)
else:
norm_matrix = normalize(activation_in_time_slice.sum(axis=0).reshape(1,-1),norm='l1',axis=1)[0]
activation_matrix.append(norm_matrix)
# if len(activ_non_normalized)>0:
# activ_non_normalized = np.vstack((activ_non_normalized,activation_in_time_slice.sum(axis=0).reshape(1,-1)))
# else:
# activ_non_normalized = activation_in_time_slice.sum(axis=0).reshape(1,-1)
#vis.plot_ambient_sensor_activation(activ_non_normalized)
#vis.bar_plot_ambient_sensor_more_days(activ_non_normalized)
return activation_matrix
|
{"/demo.py": ["/data_organizer.py", "/video_traj.py", "/ambient_sensors.py"]}
|
7,371
|
Zumbalamambo/behavior_understanding_indoor_dataset
|
refs/heads/master
|
/realtime_traj_dict.py
|
import numpy as np
import cv2
import math
from datetime import datetime
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.feature_extraction.image import img_to_graph
import img_processing
import data_organizer
import video_traj
import hierarchical_ae_learning_methods as hs
import AE_rec
<<<<<<< HEAD
# AE_weights_level_1 = data_organizer.load_matrix_pickle(
# 'C:/Users/dario.dotti/Documents/data_for_personality_exp/after_data_cleaning/ae/head_joint_id1/144weights_l1_hd1002.txt')
#
# cluster_model_l1 = data_organizer.load_matrix_pickle(
# 'C:/Users/dario.dotti/Documents/data_for_personality_exp/after_data_cleaning/head_joint_id1/20_cluster_model_layer1.txt')
=======
AE_weights_level_1 = data_organizer.load_matrix_pickle(
'C:/Users/dario.dotti/Desktop/Hier_AE_deliverable/ae/head_joint_id1/144weights_l1_hd1002.txt')
cluster_model_l1 = data_organizer.load_matrix_pickle(
'C:/Users/dario.dotti/Desktop/Hier_AE_deliverable/40_cluster_model_layer2_new.txt')
>>>>>>> 9348384985d2847c272133ff77ce6181ca1fa082
def encode_features_using_AE_layer1_cluster_activation(feature_matrix,layer):
##check visually the reconstruction
#AE_rec.AE_reconstruction_level1(feature_matrix, AE_weights_level_1)
for test_traj in feature_matrix:
if sum(test_traj) != 0:
##compute AE reconstruction
hd1_space = np.dot(test_traj, AE_weights_level_1[0][0])
activations = AE_rec.sigmoid_function(hd1_space + AE_weights_level_1[1])
else:
activations= np.zeros((1,AE_weights_level_1[0][0].shape[1]))
activations = activations[0]
if layer == 'layer2':
return activations
elif layer == 'layer1':
label = cluster_model_l1.predict(activations.reshape((1,-1)))[0]
return label
def extract_traj_word_spatio_temporal_grid(participant_data, n_layer):
create_activation_layer2 = 1
create_bayes_vector = 0
#scene = cv2.imread('C:/Users/dario.dotti/Documents/Datasets/my_dataset/wandering_dataset_um/exp_scene_depth.jpg')
scene = np.zeros((414, 512, 3), dtype=np.uint8)
scene += 255
training_bayes_vector = []
for i_task, task in enumerate(participant_data):
print 'task: ',i_task
if len(task)==0: continue
if n_layer == 1:
matrix_features = []
elif n_layer == 2:
matrix_activations = []
matrix_orig_points = []
training_bayes_vector_task = []
for n_slice in range(0,len(task)):
print 'n_slice ', n_slice
flat_list = [item for item in task[n_slice]]
video_traj.draw_joints_and_tracks(flat_list, [])
# get x,y,z of every traj point after smoothing process
x_f, y_f, z, ids = img_processing.get_coordinate_points(flat_list, joint_id=1)
########### start hierarchical autoencoder learning #######################
size_mask = 18
# print step
max_step = np.sqrt(np.power(((size_mask - 3) - 0), 2) + np.power(((size_mask - 3) - 0), 2)) * 1.3
start_t = 0
first_point_traj = [x_f[start_t], y_f[start_t]]
temp_scene = scene.copy()
labels_history = []
directions_history = []
activation_history = []
orig_points_history = []
for i_p in xrange(1, len(x_f)):
##accumulate traj points until the distance between the first point and current point is enough for the grid
d = np.sqrt(((x_f[i_p] - first_point_traj[0]) ** 2) + ((y_f[i_p] - first_point_traj[1]) ** 2))
##if the distance is enough compute the grid starting from the first point until the current point
if abs(d - max_step) < 8:
xs_untilNow = x_f[start_t:i_p]
ys_unilNow = y_f[start_t:i_p]
# print len(xs_untilNow), len(ys_unilNow)
if len(xs_untilNow) > 30:
# ##update the beginning of the trajectory
start_t = i_p - 1
first_point_traj = [x_f[start_t], y_f[start_t]]
continue
##get directions of the traj chunck using first and last point
# direction = get_direction_traj([x_f[start_t],y_f[start_t]],[x_f[i_p],y_f[i_p]])
directions = hs.get_directions_traj(xs_untilNow, ys_unilNow)
if directions[0] == -180: directions[0] = 180
directions_history.append(directions[0])
##create grid according to the direction of the trajectory
rects_in_grid = hs.create_grid(xs_untilNow, ys_unilNow, size_mask, directions, temp_scene)
##compute the features from traj chuncks in rect
traj_features, orig_points = hs.transform_traj_in_pixel_activation(rects_in_grid, xs_untilNow,
ys_unilNow, size_mask, max_step)
if n_layer ==1:
#########store final matrix#################
if len(matrix_features) > 0:
matrix_features = np.vstack((matrix_features, traj_features.reshape((1, -1))))
else:
matrix_features = traj_features.reshape((1, -1))
elif n_layer == 2:
orig_points_history.append(orig_points)
activation = encode_features_using_AE_layer1_cluster_activation(traj_features, 'layer2')
activation_history.append(activation)
if len(activation_history) == 3 :
cv2.imshow('scene', temp_scene)
cv2.waitKey(0)
if create_activation_layer2:
##extract features for AE layer2
matrixt_activation_l2, original_points_l2 = hs.create_vector_activations_layer_2(
directions_history, activation_history, orig_points_history)
##save activations for layer2
if len(matrix_activations) > 0: matrix_activations = np.vstack((matrix_activations, matrixt_activation_l2))
else: matrix_activations = matrixt_activation_l2
##save original for layer2
if len(matrix_orig_points) > 0: matrix_orig_points = np.vstack((matrix_orig_points, original_points_l2))
else: matrix_orig_points = original_points_l2
elif create_bayes_vector:
for a in activation_history:
label = cluster_model_l1.predict(a.reshape((1, -1)))[0]
labels_history.append(label)
###create vector with two info label
vector_bayes = hs.create_vector_for_bayesian_probability(
labels_history, directions_history, 3)
## saving data for cluster prediction
if len(training_bayes_vector_task) > 0: training_bayes_vector_task = np.vstack((training_bayes_vector_task, vector_bayes))
else: training_bayes_vector_task = vector_bayes
##refresh history
orig_points_history = []
directions_history = []
activation_history = []
orig_points_history = []
labels_history = []
##GENERAL FOR ALL THE LAYERS## Update the beginning of the trajectory
start_t = i_p - 1
first_point_traj = [x_f[start_t], y_f[start_t]]
#training_bayes_vector.append(training_bayes_vector_task)
#print matrix_activations.shape
##save matrix activation
# data_organizer.save_matrix_pickle(matrix_activations,
# 'C:/Users/dario.dotti/Documents/data_for_personality_exp/computed_matrix/matrix_activations_l2.txt')
# ## save original points
# data_organizer.save_matrix_pickle(matrix_orig_points,
# 'C:/Users/dario.dotti/Documents/data_for_personality_exp/computed_matrix/matrix_orig_points_l2.txt')
print np.array(training_bayes_vector).shape
# data_organizer.save_matrix_pickle(training_bayes_vector,
# 'C:/Users/dario.dotti/Documents/data_for_personality_exp/computed_matrix/bayes_vector.txt')
def extract_traj_word_temporal_window(participant_data, n_layer):
<<<<<<< HEAD
scene = cv2.imread('C:/Users/dario.dotti/Documents/Datasets/my_dataset/wandering_dataset_um/exp_scene_depth.jpg')
#scene = np.zeros((414, 512, 3), dtype=np.uint8)
#scene += 255
=======
#scene = cv2.imread('C:/Users/dario.dotti/Documents/Datasets/my_dataset/wandering_dataset_um/exp_scene_depth.jpg')
scene = np.zeros((414, 512, 3), dtype=np.uint8)
scene += 255
>>>>>>> 9348384985d2847c272133ff77ce6181ca1fa082
list_poly = img_processing.divide_image(scene)
size_mask = 20
#max_step = np.sqrt(np.power(((size_mask - 3) - 0), 2) + np.power(((size_mask - 3) - 0), 2)) * 1.3
max_step = 23
matrix_features_participant = []
matrix_original_points_participants = []
matrix_real_coord_participants = []
for i_task, task in enumerate(participant_data):
print 'task: ', i_task
if len(task) == 0: continue
create_activation_layer2 = 1
labels_history = []
directions_history = []
activation_history = []
orig_points_history = []
real_coord= []
temp_scene = scene.copy()
matrix_features_task = []
matrix_activations_task = []
matrix_orig_points_task = []
matrix_real_coord = []
temp_scene = scene.copy()
for n_slice in range(0, len(task)):
if len(task[n_slice]) <= 1 : continue
#print 'n_slice ', n_slice
flat_list = [item for item in task[n_slice]]
#video_traj.draw_joints_and_tracks(flat_list, [])
# get x,y,z of every traj point after smoothing process
x_f, y_f, z, ids = img_processing.get_coordinate_points(flat_list, joint_id=1)
# for point in range(len(x_f)):
# cv2.circle(temp_scene,(x_f[point],y_f[point]),1,(0,0,255),-1)
# cv2.imshow('ciao', temp_scene)
# cv2.waitKey(0)
<<<<<<< HEAD
=======
>>>>>>> 9348384985d2847c272133ff77ce6181ca1fa082
directions = hs.get_directions_traj(x_f, y_f)
if directions[0] == -180: directions[0] = 180
directions_history.append(directions[0])
distances = [np.sqrt(((x_f[0] - x_f[i_p]) ** 2) + ((y_f[0] - y_f[i_p]) ** 2)) for i_p in range(1, len(y_f))]
index_max = int(np.where(distances == np.max(distances))[0][0])
size_mask_in_temporalWindow = distances[index_max]
##create grid according to the direction of the trajectory
rects_in_grid = hs.create_grid(x_f, y_f, size_mask_in_temporalWindow, directions, temp_scene)
origin_mask = [rects_in_grid[0].vertices[1][0], rects_in_grid[0].vertices[1][1]]
x_converted = [x - origin_mask[0] for x in x_f]
y_converted = [y - origin_mask[1] for y in y_f]
### Get the max distance in the list ####
#print distances[index_max]
if distances[index_max] > size_mask/2:
OldRange = (distances[index_max])
NewRange = 20
New_x = [(((x) * NewRange) / OldRange) + 0 for x in x_converted]
New_y = [(((y) * NewRange) / OldRange) + 0 for y in y_converted]
##create grid according to the direction of the trajectory
rects_in_grid = hs.create_grid(New_x, New_y, size_mask, directions, temp_scene)
##compute the features from traj chuncks in rect
traj_features,orig_points = hs.transform_traj_in_pixel_activation_temporal(rects_in_grid[0], New_x, New_y, size_mask, 10)
else:
traj_features, orig_points = hs.transform_traj_in_pixel_activation(rects_in_grid, x_f, y_f, size_mask, 10)
if np.sum(traj_features) == 0: continue
if n_layer == 1:
if len(matrix_features_task) > 0:
matrix_features_task = np.vstack((matrix_features_task, traj_features.reshape((1, -1))))
else:
matrix_features_task = traj_features.reshape((1, -1))
elif n_layer == 2:
orig_points_history.append(orig_points)
activation = encode_features_using_AE_layer1_cluster_activation(traj_features, 'layer2')
activation_history.append(activation)
real_coord.append([x_f,y_f,z])
if len(activation_history) == 3:
# cv2.imshow('scene', temp_scene)
# cv2.waitKey(0)
if create_activation_layer2:
##extract features for AE layer2
# matrixt_activation_l2, original_points_l2 = hs.create_vector_activations_layer_2(
# directions_history, activation_history, orig_points_history)
matrixt_activation_l2 = activation_history[0]
for i_act in range(1,len(activation_history)):
matrixt_activation_l2 = np.hstack((matrixt_activation_l2, activation_history[i_act]))
original_points_l2 = orig_points_history[0]
for i_org_p in range(1,len(orig_points_history)):
original_points_l2 = np.hstack((original_points_l2, orig_points_history[i_org_p]))
##save activations for layer2
if len(matrix_activations_task) > 0: matrix_activations_task = np.vstack((matrix_activations_task, matrixt_activation_l2))
else: matrix_activations_task = matrixt_activation_l2
##save original for layer2
# if len(matrix_orig_points_task) > 0: matrix_orig_points_task = np.vstack((matrix_orig_points_task, original_points_l2))
# else: matrix_orig_points_task = original_points_l2
##save original coordinates for layer2
temp = [np.concatenate(r,axis=0) for r in real_coord]
matrix_real_coord.append(temp)
##refresh history
orig_points_history = []
directions_history = []
activation_history = []
orig_points_history = []
labels_history = []
real_coord = []
matrix_features_participant.append(matrix_activations_task)
matrix_original_points_participants.append(matrix_orig_points_task)
matrix_real_coord_participants.append(matrix_real_coord)
return matrix_features_participant,matrix_original_points_participants,matrix_real_coord_participants
def get_distances_between_points(participant_data):
##get max distances in every time slice
n = 1
list_max_dist =[]
list_dist_two_points = []
for task in participant_data:
for slice in task[:4]:
if len(slice) <=1: continue
list_distances = []
flat_list = [item for item in slice]
x_f, y_f, z, ids = img_processing.get_coordinate_points(flat_list, joint_id=1)
a =1
distances = [np.sqrt(((x_f[0] - x_f[i_p]) ** 2) + ((y_f[0] - y_f[i_p]) ** 2)) for i_p in range(1, len(y_f))]
index_max = int(np.where(distances == np.max(distances))[0][0])
dist_two_points = [np.sqrt(((x_f[i_p] - x_f[i_p+1]) ** 2) + ((y_f[i_p] - y_f[i_p+1]) ** 2)) for i_p in range(0, len(y_f)-1)]
index_max_two_p = int(np.where(dist_two_points == np.max(dist_two_points))[0][0])
list_dist_two_points.append(dist_two_points[index_max_two_p])
list_max_dist.append(distances[index_max])
#return np.array(list_max_dist).reshape(-1,1)
# ###Eliminate outlier (bigger than double median), and set the size window
best_ten = np.sort(list_max_dist)[::-1][:10]
#print 'AAAA'
print best_ten
med = np.median(best_ten)
new_data = map(lambda x: x if x < (med*1.5) else False, list_max_dist)
best_ten = np.sort(new_data)[::-1][:10]
#print best_ten
w_s = int(np.max(new_data))
print 'window size: ',w_s
best_ten = np.sort(list_dist_two_points)[::-1][:10]
#print best_ten
med = np.median(best_ten)
new_data = map(lambda x: x if x < (med * 3) else False, list_dist_two_points)
best_ten = np.sort(new_data)[::-1][:10]
#print best_ten
max_step = int(np.max(new_data))
print 'max_step: ', max_step
return w_s,max_step
def visualize_cluster(data):
my_kmean = KMeans(n_clusters=4,n_jobs=-1,init='k-means++')
cluster_model = my_kmean.fit(data)
plt.plot(range(0,len(data)), data[:, 0], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = my_kmean.cluster_centers_
plt.scatter(range(0,len(centroids)), centroids[:, 0],
marker='x', s=169, linewidths=3,
color='b', zorder=10)
plt.show()
def main_realtime_traj_dict():
#slices = data_organizer.load_matrix_pickle('C:/Users/dario.dotti/Documents/data_for_personality_exp/traj_org_by_ID_10fps.txt')
skeleton_data_in_tasks_and_time_slices = data_organizer.load_matrix_pickle(
'C:/Users/dario.dotti/Documents/data_for_personality_exp/after_data_cleaning/skeleton_data_in_tasks_time_slices_30fps.txt')#'C:/Users/dario.dotti/Desktop/data_recordings_master/master_skeleton_data_in_tasks_time_slices_30fps.txt')
### Staatistics on data #####
# matrix_dist = []
# for participant in skeleton_data_in_tasks_and_time_slices:
#
# max_dist,max_step = get_distances_between_points(participant)
#
# matrix_dist.append(max_dist)
#
# matrix_dist = np.array(matrix_dist).reshape(-1,1)
# hs.determine_number_k_kMeans(matrix_dist)
# visualize_cluster(matrix_dist)
max_dist = 140
max_step = 15
final_matrix = []
final_orig_points = []
final_matrix_realcoord =[]
for participant in skeleton_data_in_tasks_and_time_slices:
#extract_traj_word_spatio_temporal_grid(participant, n_layer=1)
<<<<<<< HEAD
feature_participant,orig_point_participant = extract_traj_word_temporal_window(participant, n_layer=1)
=======
feature_participant,orig_point_participant,matrix_real_coord = extract_traj_word_temporal_window(participant, n_layer=2)
final_matrix_realcoord.append(matrix_real_coord)
>>>>>>> 9348384985d2847c272133ff77ce6181ca1fa082
final_matrix.append(feature_participant)
final_orig_points.append(orig_point_participant)
print len(final_matrix),len(final_orig_points)
final_matrix=final_matrix+final_matrix_realcoord
data_organizer.save_matrix_pickle(final_matrix, 'C:/Users/dario.dotti/Desktop/Hier_AE_deliverable/head_joint_id1/feature_matrix_participant_task_l2_new_realCoordinates.txt')
#data_organizer.save_matrix_pickle(final_orig_points,
# 'C:/Users/dario.dotti/Documents/data_for_personality_exp/after_data_cleaning/head_joint_id1/orig_points_participant_task_l2_new.txt')
if __name__ == '__main__':
main_realtime_traj_dict()
|
{"/demo.py": ["/data_organizer.py", "/video_traj.py", "/ambient_sensors.py"]}
|
7,372
|
Zumbalamambo/behavior_understanding_indoor_dataset
|
refs/heads/master
|
/hierarchical_ae_learning_methods.py
|
from math import atan2,pi
import cv2
import numpy as np
import matplotlib.path as mplPath
import matplotlib.pyplot as plt
import random
from scipy.spatial.distance import cdist, pdist
from sklearn.cluster import MeanShift,KMeans,SpectralClustering
import img_processing
def myround_ofdirection(x, base=45):
return int(base * round(float(x)/base))
def get_directions_traj(xs,ys):
list_orientation = []
dx = float(xs[0]) - float(xs[int(len(xs)/2)])
dy = float(ys[0]) - float(ys[int(len(ys)/2)])
##take care of points that are too close
if dx <=1 and dy <= 1:
dx = float(xs[0]) - float(xs[int(len(xs)-1)])
dy = float(ys[0]) - float(ys[int(len(ys)-1)])
##I round the angle to 8 key directions: 0 left, 45 up-left, 90 up, 135 up-right, 180 right, -135 down-right, -90 down, -45 down-left
list_orientation.append(myround_ofdirection(atan2(dy, dx) / pi * 180))
else:
##I round the angle to 8 key directions: 0 left, 45 up-left, 90 up, 135 up-right, 180 right, -135 down-right, -90 down, -45 down-left
list_orientation.append(myround_ofdirection(atan2(dy, dx) / pi * 180))
dx = float(xs[int(len(xs)/2)]) - float(xs[len(xs)-1])
dy = float(ys[int(len(ys)/2)]) - float(ys[len(ys)-1])
##I round the angle to 8 key directions: 0 left, 45 up-left, 90 up, 135 up-right, 180 right, -135 down-right, -90 down, -45 down-left
t = myround_ofdirection(atan2(dy, dx) / pi * 180)
list_orientation.append(t)
return list_orientation
def create_grid(xs, ys, size_mask, directions, scene):
## The reason i dont use the already existing function is because this grid doesnt have to be centered at one point but
## the corner has to start next to the first point of the trajectory
margin = 1
first_chunck_direction = directions[0]
###Drawing the central rect and the one next to it
if first_chunck_direction == -45:
up_right_corner = [xs[0] + margin, ys[0] - margin]
first_rect = mplPath.Path(
np.array([[up_right_corner[0] - size_mask, up_right_corner[1] + size_mask],
[up_right_corner[0] - size_mask, up_right_corner[1]],
up_right_corner,
[up_right_corner[0], up_right_corner[1] + size_mask]]))
elif first_chunck_direction == -135:
up_left_corner = [xs[0] - margin, ys[0] - margin]
first_rect = mplPath.Path(
np.array([[up_left_corner[0], up_left_corner[1] + size_mask],
up_left_corner,
[up_left_corner[0]+size_mask, up_left_corner[1]],
[up_left_corner[0]+size_mask, up_left_corner[1] + size_mask]]))
elif first_chunck_direction == 45:
down_right_corner = [xs[0] + margin, ys[0] + margin]
first_rect = mplPath.Path(
np.array([[down_right_corner[0]-size_mask,down_right_corner[1]],
[down_right_corner[0]-size_mask,down_right_corner[1]-size_mask],
[down_right_corner[0],down_right_corner[1]-size_mask],
down_right_corner]))
elif first_chunck_direction == 135:
down_left_corner = [xs[0] - margin, ys[0] + margin]
first_rect = mplPath.Path(
np.array([down_left_corner,
[down_left_corner[0],down_left_corner[1]-size_mask],
[down_left_corner[0]+size_mask,down_left_corner[1]-size_mask],
[down_left_corner[0]+size_mask,down_left_corner[1]]]))
####if direction is straight, i center the rect on the points
elif first_chunck_direction == -90:
top_left_corner = [xs[0] - int(size_mask/2),ys[0]-margin]
top_right_corner = [xs[0] + int(size_mask/2),ys[0]-margin]
first_rect = mplPath.Path(
np.array([[top_left_corner[0],top_left_corner[1]+size_mask],
top_left_corner,
top_right_corner,
[top_right_corner[0],top_right_corner[1]+size_mask]]))
elif first_chunck_direction == 90:
down_left_corner = [xs[0] - int(size_mask/2),ys[0] + margin]
down_right_corner = [xs[0] + int(size_mask/2),ys[0] + margin]
first_rect = mplPath.Path(
np.array([down_left_corner,
[down_left_corner[0],down_left_corner[1] - size_mask],
[down_right_corner[0],down_right_corner[1] - size_mask],
down_right_corner]))
elif first_chunck_direction == 180 or first_chunck_direction == -180 :
top_left_corner = [xs[0] - margin,ys[0] - int(size_mask/2)]
down_left_corner = [xs[0] - margin,ys[0] + int(size_mask/2)]
first_rect = mplPath.Path(
np.array([down_left_corner,
top_left_corner,
[top_left_corner[0]+size_mask, top_left_corner[1]],
[top_left_corner[0]+size_mask,top_left_corner[1]+size_mask]]))
elif first_chunck_direction == 0:
top_right_corner = [xs[0] + margin, ys[0] - int(size_mask / 2)]
down_right_corner = [xs[0] + margin, ys[0] + int(size_mask / 2)]
first_rect = mplPath.Path(
np.array([[down_right_corner[0] - size_mask, down_right_corner[1]],
[top_right_corner[0] - size_mask, top_right_corner[1]],
top_right_corner,
down_right_corner]))
list_rect = [first_rect]#,side_rect,second_side_rect]
for rect in list_rect:
cv2.rectangle(scene, (int(rect.vertices[1][0]), int(rect.vertices[1][1])),
(int(rect.vertices[3][0]), int(rect.vertices[3][1])), (0, 0, 0))
for i_p, p in enumerate(xrange(len(xs))):
cv2.circle(scene, (int(xs[p]), int(ys[p]) ), 1, (255, 0, 0), -1)
#
# cv2.imshow('scene', scene)
# cv2.waitKey(0)
return list_rect
def createLineIterator(P1, P2, img):
# Produces and array that consists of the coordinates and intensities of each pixel in a line between two points
#
# Parameters:
# -P1: a numpy array that consists of the coordinate of the first point (x,y)
# -P2: a numpy array that consists of the coordinate of the second point (x,y)
# -img: the image being processed
#
# Returns:
# -it: a numpy array that consists of the coordinates and intensities of each pixel in the radii (shape: [numPixels, 3], row = [x,y,intensity])
#define local variables for readability
imageH = img.shape[0]
imageW = img.shape[1]
P1X = P1[0]
P1Y = P1[1]
P2X = P2[0]
P2Y = P2[1]
#difference and absolute difference between points
#used to calculate slope and relative location between points
dX = P2X - P1X
dY = P2Y - P1Y
dXa = np.abs(dX)
dYa = np.abs(dY)
#predefine numpy array for output based on distance between points
itbuffer = np.empty(shape=(np.maximum(int(dYa),int(dXa)),3),dtype=np.float32)
itbuffer.fill(np.nan)
#Obtain coordinates along the line using a form of Bresenham's algorithm
negY = P1Y > P2Y
negX = P1X > P2X
if P1X == P2X: #vertical line segment
itbuffer[:,0] = P1X
if negY:
itbuffer[:,1] = np.arange(P1Y - 1,P1Y - dYa - 1,-1)
else:
itbuffer[:,1] = np.arange(P1Y+1,P1Y+dYa+1)
elif P1Y == P2Y: #horizontal line segment
itbuffer[:,1] = P1Y
if negX:
itbuffer[:,0] = np.arange(P1X-1,P1X-dXa-1,-1)
else:
itbuffer[:,0] = np.arange(P1X+1,P1X+dXa+1)
else: #diagonal line segment
steepSlope = dYa > dXa
if steepSlope:
slope = dX.astype(np.float32)/dY.astype(np.float32)
if negY:
itbuffer[:,1] = np.arange(P1Y-1,P1Y-dYa-1,-1)
else:
itbuffer[:,1] = np.arange(P1Y+1,P1Y+dYa+1)
itbuffer[:,0] = (slope*(itbuffer[:,1]-P1Y)).astype(np.int) + P1X
else:
slope = dY.astype(np.float32)/dX.astype(np.float32)
if negX:
itbuffer[:,0] = np.arange(P1X-1,P1X-dXa-1,-1)
else:
itbuffer[:,0] = np.arange(P1X+1,P1X+dXa+1)
itbuffer[:,1] = (slope*(itbuffer[:,0]-P1X)).astype(np.int) + P1Y
#Remove points outside of image
colX = itbuffer[:,0]
colY = itbuffer[:,1]
itbuffer = itbuffer[(colX >= 0) & (colY >=0) & (colX<imageW) & (colY<imageH)]
#Get intensities from img ndarray
itbuffer[:,2] = img[itbuffer[:,1].astype(np.uint),itbuffer[:,0].astype(np.uint)]
return itbuffer
def transform_traj_in_pixel_activation(rect_list, x_untilNow, y_untilNow, size_mask, step):
#size_mask= 18
#a = [0, 0]
#b = [size_mask - 5, size_mask - 5]
#step = np.sqrt(np.power((b[0] - a[0]), 2) + np.power((b[1] - a[1]), 2))
traj_features = []
orig_points = []
for rect in rect_list:
points_in_mask = []
map(lambda ci: points_in_mask.append([int(x_untilNow[ci]), int(y_untilNow[ci])]) if rect.contains_point(
(int(x_untilNow[ci]), int(y_untilNow[ci]))) else False, xrange(len(x_untilNow)))
origin_mask = [rect.vertices[1][0],rect.vertices[1][1]]
mask_img = np.zeros((size_mask, size_mask), dtype=np.uint8)
mask_matrix = np.zeros((size_mask, size_mask))
if len(points_in_mask) >= 2:
for i in xrange(len(points_in_mask) - 1):
distance_metric_value = np.sqrt(np.power(points_in_mask[i + 1][0] - points_in_mask[i][0], 2) + np.power(
points_in_mask[i + 1][1] - points_in_mask[i][1], 2)) / step
##convert img points to mask coordinate systems
x_1 = points_in_mask[i + 1][0] - origin_mask[0]
y_1 = points_in_mask[i + 1][1] - origin_mask[1]
x = points_in_mask[i][0] - origin_mask[0]
y = points_in_mask[i][1] - origin_mask[1]
##get all pixels lying on the line that pass between two points
points_on_line = createLineIterator(np.array([[int(x)], [int(y)]]), \
np.array([[int(x_1)], [int(y_1)]]), mask_img)
##fill these pixel values with average distance between two points
for p in points_on_line:
##if we want to display on img
mask_img[int(p[1]),int(p[0])] = 255
# # print distance_metric_value
# if int(p[1]) + 1 < size_mask - 1 and int(p[0]) < size_mask - 1:
# ##right
# mask_img[int(p[1]) + 1, int(p[0])] = 255
# ##left
# mask_img[int(p[1]) - 1, int(p[0])] = 255
# ##up
# mask_img[int(p[1]), int(p[0]) - 1] = 255
# ##down
# mask_img[int(p[1]), int(p[0]) + 1] = 255
##real value
mask_matrix[int(p[1]), int(p[0])] = distance_metric_value
if int(p[1]) + 1 < size_mask and int(p[0]) + 1 < size_mask:
if int(p[0]) - 1 <0:
p[0] = 1
if int(p[1]) - 1 <0:
p[1] = 1
#right
mask_matrix[int(p[1]) + 1, int(p[0])] = distance_metric_value
##left
mask_matrix[int(p[1]) - 1, int(p[0])] = distance_metric_value
##up
mask_matrix[int(p[1]), int(p[0]) - 1] = distance_metric_value
##down
mask_matrix[int(p[1]), int(p[0]) + 1] = distance_metric_value
##if we want to display on img
# plt.imshow(mask_matrix.squeeze(), cmap=plt.cm.gray)
# plt.show()
else:
mask_matrix = mask_matrix.reshape((1, -1))
##store final matrix
if len(traj_features) > 0:
traj_features = np.vstack((traj_features, mask_matrix.reshape((1, -1))))
else:
traj_features = mask_matrix.reshape((1, -1))
##store original points
if len(orig_points)>0:
orig_points = np.vstack((orig_points,mask_img.reshape((1,-1))))
else:
orig_points = mask_img.reshape((1,-1))
return traj_features,orig_points
def transform_traj_in_pixel_activation_temporal(rect, x_untilNow, y_untilNow, size_mask, step):
points_in_mask = []
map(lambda ci: points_in_mask.append([int(x_untilNow[ci]), int(y_untilNow[ci])]) if rect.contains_point(
(int(x_untilNow[ci]), int(y_untilNow[ci]))) else False, xrange(len(x_untilNow)))
origin_mask = [rect.vertices[1][0], rect.vertices[1][1]]
mask_img = np.zeros((size_mask, size_mask), dtype=np.uint8)
mask_matrix = np.zeros((size_mask, size_mask))
if len(points_in_mask) >= 2:
for i in xrange(len(points_in_mask) - 1):
distance_metric_value = np.sqrt(np.power(points_in_mask[i + 1][0] - points_in_mask[i][0], 2) + np.power(
points_in_mask[i + 1][1] - points_in_mask[i][1], 2)) / step
##get all pixels lying on the line that pass between two points
points_on_line = createLineIterator(np.array([points_in_mask[i][0],points_in_mask[i][1]]), \
np.array([points_in_mask[i+1][0], points_in_mask[i+1][1]]), mask_img)
##fill these pixel values with average distance between two points
for p in points_on_line:
##if we want to display on img
mask_img[int(p[1]), int(p[0])] = 255
# # print distance_metric_value
# if int(p[1]) + 1 < size_mask - 1 and int(p[0]) < size_mask - 1:
# ##right
# mask_img[int(p[1]) + 1, int(p[0])] = 255
# ##left
# mask_img[int(p[1]) - 1, int(p[0])] = 255
# ##up
# mask_img[int(p[1]), int(p[0]) - 1] = 255
# ##down
# mask_img[int(p[1]), int(p[0]) + 1] = 255
##real value
mask_matrix[int(p[1]), int(p[0])] = distance_metric_value
# print distance_metric_value
if int(p[1]) + 1 < size_mask and int(p[0]) + 1 < size_mask:
if int(p[0]) - 1 < 0:
p[0] = 1
if int(p[1]) - 1 < 0:
p[1] = 1
##right
mask_matrix[int(p[1]) + 1, int(p[0])] = distance_metric_value
##left
mask_matrix[int(p[1]) - 1, int(p[0])] = distance_metric_value
##up
mask_matrix[int(p[1]), int(p[0]) - 1] = distance_metric_value
##down
mask_matrix[int(p[1]), int(p[0]) + 1] = distance_metric_value
##if we want to display on img
# plt.imshow(mask_matrix.squeeze(), cmap=plt.cm.gray)
# plt.show()
else:
mask_matrix = mask_matrix.reshape((1, -1))
return mask_matrix.reshape((1,-1)),mask_img.reshape((1,-1))
def create_vector_activations_layer_2(directions, list_activations, list_orig_points):
matrix_activation_layer2 = np.zeros((9,len(list_activations[0])))
matrix_orig_points_layer2 = np.zeros((9,list_orig_points[0].shape[1]))
##the first position is determined by the direction
first_dir = directions[0]
if first_dir == 0:
matrix_activation_layer2[5] = list_activations[0]
matrix_orig_points_layer2[5] = list_orig_points[0]
elif first_dir == 45:
matrix_activation_layer2[8] = list_activations[0]
matrix_orig_points_layer2[8] = list_orig_points[0]
elif first_dir == 90:
matrix_activation_layer2[7] = list_activations[0]
matrix_orig_points_layer2[7] = list_orig_points[0]
elif first_dir == 135:
matrix_activation_layer2[6] = list_activations[0]
matrix_orig_points_layer2[6] = list_orig_points[0]
elif first_dir == 180 or first_dir == -180:
matrix_activation_layer2[3] = list_activations[0]
matrix_orig_points_layer2[3] = list_orig_points[0]
elif first_dir == -45:
matrix_activation_layer2[2] = list_activations[0]
matrix_orig_points_layer2[2] = list_orig_points[0]
elif first_dir == -90:
matrix_activation_layer2[1] = list_activations[0]
matrix_orig_points_layer2[1] = list_orig_points[0]
elif first_dir == -135:
matrix_activation_layer2[0] = list_activations[0]
matrix_orig_points_layer2[0] = list_orig_points[0]
##whereas the second position in the grid is always the center
matrix_activation_layer2[4] = list_activations[1]
matrix_orig_points_layer2[4] = list_orig_points[1]
## the third position is again determined by the direction
third_dir = directions[2]
if third_dir == 0:
matrix_activation_layer2[3] = list_activations[2]
matrix_orig_points_layer2[3] = list_orig_points[2]
elif third_dir == 45:
matrix_activation_layer2[0] = list_activations[2]
matrix_orig_points_layer2[0] = list_orig_points[2]
elif third_dir == 90:
matrix_activation_layer2[1] = list_activations[2]
matrix_orig_points_layer2[1] = list_orig_points[2]
elif third_dir == 135:
matrix_activation_layer2[2] = list_activations[2]
matrix_orig_points_layer2[2] = list_orig_points[2]
elif third_dir == 180 or first_dir == -180:
matrix_activation_layer2[5] = list_activations[2]
matrix_orig_points_layer2[5] = list_orig_points[2]
elif third_dir == -45:
matrix_activation_layer2[6] = list_activations[2]
matrix_orig_points_layer2[6] = list_orig_points[2]
elif third_dir == -90:
matrix_activation_layer2[7] = list_activations[2]
matrix_orig_points_layer2[7] = list_orig_points[2]
elif third_dir == -135:
matrix_activation_layer2[8] = list_activations[2]
matrix_orig_points_layer2[8] = list_orig_points[2]
matrix_activation_layer2 = matrix_activation_layer2.reshape((1,len(list_activations[0])*9))
matrix_orig_points_layer2 = matrix_orig_points_layer2.reshape((1,list_orig_points[0].shape[1]*9))
return matrix_activation_layer2,matrix_orig_points_layer2
def create_vector_for_bayesian_probability_with_directions(activation_labels, directions, history):
vector_bayes = [None,None,None,None,None,None,None,None,None]
if history == 3:
##the first position is determined by the direction
first_dir = directions[0]
if first_dir == 0:
vector_bayes[5] = str(activation_labels[0])
vector_bayes[4] = str(activation_labels[1]) + 'DXSX'
elif first_dir == 45:
vector_bayes[8] = str(activation_labels[0])
vector_bayes[4] = str(activation_labels[1]) + 'DXSX'
elif first_dir == 90:
vector_bayes[7] = str(activation_labels[0])
vector_bayes[4] = str(activation_labels[1]) + 'DXSX'
elif first_dir == 135:
vector_bayes[6] = str(activation_labels[0])
vector_bayes[4] = str(activation_labels[1]) + 'SXDX'
elif first_dir == 180 or first_dir == -180:
vector_bayes[3] = str(activation_labels[0])
vector_bayes[4] = str(activation_labels[1]) + 'SXDX'
elif first_dir == -45:
vector_bayes[2] = str(activation_labels[0])
vector_bayes[4] = str(activation_labels[1]) + 'DXSX'
elif first_dir == -90:
vector_bayes[1] = str(activation_labels[0])
vector_bayes[4] = str(activation_labels[1]) + 'SXDX'
elif first_dir == -135:
vector_bayes[0] = str(activation_labels[0])
vector_bayes[4] = str(activation_labels[1]) + 'SXDX'
##whereas the second position in the grid is always the center
#vector_bayes[4] = activation_labels[1]
third_dir = directions[2]
if third_dir == 0:
vector_bayes[3] = str(activation_labels[2])
elif third_dir == 45:
vector_bayes[0] = str(activation_labels[2])
elif third_dir == 90:
vector_bayes[1] = str(activation_labels[2])
elif third_dir == 135:
vector_bayes[2] = str(activation_labels[2])
elif third_dir == 180 or third_dir == -180:
vector_bayes[5] = str(activation_labels[2])
elif third_dir == -45:
vector_bayes[6] = str(activation_labels[2])
elif third_dir == -90:
vector_bayes[7] = str(activation_labels[2])
elif third_dir == -135:
vector_bayes[8] = str(activation_labels[2])
return vector_bayes
def create_vector_for_bayesian_probability(activation_labels, directions, history):
vector_bayes = [None,None,None,None,None,None,None,None,None]
if history == 3:
##the first position is determined by the direction
first_dir = directions[0]
if first_dir == 0:
vector_bayes[5] = str(activation_labels[0])
elif first_dir == 45:
vector_bayes[8] = str(activation_labels[0])
elif first_dir == 90:
vector_bayes[7] = str(activation_labels[0])
elif first_dir == 135:
vector_bayes[6] = str(activation_labels[0])
elif first_dir == 180 or first_dir == -180:
vector_bayes[3] = str(activation_labels[0])
elif first_dir == -45:
vector_bayes[2] = str(activation_labels[0])
elif first_dir == -90:
vector_bayes[1] = str(activation_labels[0])
elif first_dir == -135:
vector_bayes[0] = str(activation_labels[0])
##whereas the second position in the grid is always the center
vector_bayes[4] = str(activation_labels[1])
third_dir = directions[2]
if third_dir == 0:
vector_bayes[3] = str(activation_labels[2])
elif third_dir == 45:
vector_bayes[0] = str(activation_labels[2])
elif third_dir == 90:
vector_bayes[1] = str(activation_labels[2])
elif third_dir == 135:
vector_bayes[2] = str(activation_labels[2])
elif third_dir == 180 or third_dir == -180:
vector_bayes[5] = str(activation_labels[2])
elif third_dir == -45:
vector_bayes[6] = str(activation_labels[2])
elif third_dir == -90:
vector_bayes[7] = str(activation_labels[2])
elif third_dir == -135:
vector_bayes[8] = str(activation_labels[2])
return vector_bayes
def sigmoid_function(x):
return 1 / (1 + np.exp(-x))
def determine_number_k_kMeans(matrix_activations_data_l1):
#matrix_activations_data_l1 = np.array(random.sample(matrix_activations_data_l1,10000))
#matrix_activations_data_l1 = matrix_activations_data_l1[:10000]
import warnings
warnings.filterwarnings("ignore")
##Determine number of K
##variance intra cluster
k_range = range(2, 53, 10)
#k_range = range(2, 21, 2)
print k_range
k_means_var = [KMeans(n_clusters=k,n_jobs=-1).fit(matrix_activations_data_l1) for k in k_range]
###save or load already trained cluster model #####
# img_proc.save_matrix_pickle(k_means_var, 'C:/Users/dario.dotti/Documents/data_for_vocabulary/camera017/traj_pixel_activation/bayesian_net/choose_k_means.txt' )
# k_means_var = img_proc.load_matrix_pickle(
# 'C:/Users/dario.dotti/Documents/data_for_vocabulary/camera017/traj_pixel_activation/bayesian_net/choose_k_means.txt')
#####
centroids = [X.cluster_centers_ for X in k_means_var]
k_euclid = [cdist(matrix_activations_data_l1, cent, 'euclidean') for cent in centroids]
dist = [np.min(ke, axis=1) for ke in k_euclid]
wcss = [sum(d ** 2) for d in dist]
tss = sum(pdist(matrix_activations_data_l1) ** 2) / matrix_activations_data_l1.shape[0]
bss = tss - wcss
plt.plot(k_range, bss)
plt.show()
|
{"/demo.py": ["/data_organizer.py", "/video_traj.py", "/ambient_sensors.py"]}
|
7,373
|
Zumbalamambo/behavior_understanding_indoor_dataset
|
refs/heads/master
|
/laban_posture.py
|
from scipy.spatial.distance import cdist,pdist
import numpy as np
<<<<<<< HEAD
import os
import cv2
from sklearn.cluster import KMeans
=======
import cv2
from sklearn.cluster import KMeans,SpectralClustering,MeanShift,AgglomerativeClustering
>>>>>>> 9348384985d2847c272133ff77ce6181ca1fa082
from scipy.cluster import hierarchy
from collections import Counter
from sklearn import svm,linear_model
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from scipy.cluster.hierarchy import cophenet
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score,precision_recall_fscore_support,classification_report
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy.stats.stats import pearsonr
import data_organizer
import hierarchical_ae_learning_methods as hs
<<<<<<< HEAD
import AE_rec
=======
>>>>>>> 9348384985d2847c272133ff77ce6181ca1fa082
def pca_on_data(n_minute_data):
from sklearn import decomposition
pca = decomposition.PCA(n_components=3)
s_t = pca.fit_transform(n_minute_data)
# print s_t.shape
print np.sum(pca.explained_variance_ratio_)
#plt.bar(range(200), pca.explained_variance_ratio_)
#plt.show()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(s_t[:, 0], s_t[:, 1], s_t[:, 2], c='r') # s_t[:, 1], s_t[:, 2],s_t[:,0]
# ax.scatter(m_s[:, 0], m_s.means_[:, 1], m_s.means_[:, 2], marker='^', c='r')
plt.show()
def bow_on_features(feature_p_n_minute,y_tr,l_per_participant):
start = 0
cl_id_p_n_minute = []
for i_p in range(0, len(feature_p_n_minute)):
cl_id_p_n_minute.append(y_tr[start:(start + len(feature_p_n_minute[i_p]))])
start += len(feature_p_n_minute[i_p])
## similarity between participants using histograms because of length difference ##
l = Counter(y_tr).keys()
p_hist = []
for p in cl_id_p_n_minute:
hist = np.zeros((1, len(l)), dtype=int)
for v in p:
hist[0, v - 1] += 1
p_hist.append(hist)
p_hist = np.concatenate(p_hist, axis=0)
## Pearson Correlation to double check##
# pearson_matrix = np.zeros((46, 46))
# for i_fp in range(0, len(p_hist)):
# for j_fp in range(0, len(p_hist)):
# pearson_matrix[i_fp, j_fp] = pearsonr(p_hist[i_fp][0], p_hist[j_fp][0])[0]
## delete clusters with less than 3 samples
# p_hist = np.concatenate(p_hist, axis=0)
# l_per_participant = np.delete(np.array(l_per_participant), [3, 7, 16, 34, 37, 11, 30, 43, 13, 17, 36])
# p_hist = np.delete(p_hist, [3, 7, 16, 34, 37, 11, 30, 43, 13, 17, 36], axis=0)
print p_hist.shape
X_train, X_test, y_train, y_test = train_test_split(p_hist, l_per_participant, test_size=0.3)
model = svm.LinearSVC(penalty='l1',dual=False, C=0.01, class_weight={1: 10, 3: .5}).fit(X_train, y_train) # , gamma=10
#model = svm.NuSVC(nu=0.05, kernel='linear',gamma=0.001, decision_function_shape='ovr',class_weight={1: 10, 3: .5}).fit(X_train, y_train) #, gamma=10
y_pred = model.predict(X_test)
#print accuracy_score(y_test, y_pred)
print classification_report(y_test, y_pred)
<<<<<<< HEAD
def sigmoid_function(x):
return 1 / (1 + np.exp(-x))
def check_decision_functions_and_save_samples(model,X_train):
## see decision function ##
Z = model.decision_function(X_train)
#Z= model.predict_proba(X_train)
=======
def sigmoid_function(x):
return 1 / (1 + np.exp(-x))
def check_decision_functions_and_save_samples(model,X_train):
## see decision function ##
#Z = model.decision_function(X_train)
#data_organizer.save_matrix_pickle(Z,'C:/Users/dario.dotti/Documents/data_for_personality_exp/after_2_data_recording/decision_function_svm.txt')
Z= model.predict_proba(X_train)
#Z= data_organizer.load_matrix_pickle('C:/Users/dario.dotti/Documents/data_for_personality_exp/after_2_data_recording/decision_function_svm.txt')
>>>>>>> 9348384985d2847c272133ff77ce6181ca1fa082
AE_weights_level_1 = data_organizer.load_matrix_pickle(
'C:/Users/dario.dotti/Documents/data_for_personality_exp/after_data_cleaning/posture_data/upperBody/deep_AE_900_225_weights_008hd1_noNoise.txt')
# class_best_example = []
class_names = ['1', '2', '3']
for c in range(0, 3):
print class_names[c]
<<<<<<< HEAD
indx = np.argsort(Z[:, c])[::-1][:3]
=======
indx = np.argsort(Z[:, c])[::-1][:500]
>>>>>>> 9348384985d2847c272133ff77ce6181ca1fa082
speed_class= []
or_class= []
KA_class=[]
<<<<<<< HEAD
=======
sk_angles_list= []
>>>>>>> 9348384985d2847c272133ff77ce6181ca1fa082
for idx in indx:
#print Z[idx]
# class_best_example.append(X_train[idx])
hot = X_train[idx][:24]
keys_area = X_train[idx][24:30]
<<<<<<< HEAD
=======
>>>>>>> 9348384985d2847c272133ff77ce6181ca1fa082
posture_5_seconds = X_train[idx][72:].reshape((16, 225))
speed_class.append(np.sum(hot.reshape((8, 3)), axis=0))
or_class.append(np.sum(hot.reshape((8, 3)), axis=1))
KA_class.append(keys_area)
<<<<<<< HEAD
for p in range(0, len(posture_5_seconds)):
rec_deep = sigmoid_function(
(np.dot(posture_5_seconds[p], AE_weights_level_1[0][1].T)) + AE_weights_level_1[3])
rec = sigmoid_function((np.dot(rec_deep, AE_weights_level_1[0][0].T)) + AE_weights_level_1[4]).reshape(
(120, 120))
#print np.sum(hot.reshape((8, 3)), axis=0)
#print keys_area
filename = 'C:/Users/dario.dotti/Documents/data_for_personality_exp/after_2_data_recording/svm_decision_func_fusion/' + \
class_names[c] + '_' + str(idx) + '_' + str(p) + '.jpg'
plt.imsave(filename, rec.squeeze(), cmap=plt.cm.gray)
=======
sk_angles_list.append(np.array(X_train[idx][30:72]).reshape((7,6)))
# for p in range(0, len(posture_5_seconds)):
# rec_deep = sigmoid_function(
# (np.dot(posture_5_seconds[p], AE_weights_level_1[0][1].T)) + AE_weights_level_1[3])
# rec = sigmoid_function((np.dot(rec_deep, AE_weights_level_1[0][0].T)) + AE_weights_level_1[4]).reshape(
# (120, 120))
#
# #print np.sum(hot.reshape((8, 3)), axis=0)
# #print keys_area
#
# filename = 'C:/Users/dario.dotti/Documents/data_for_personality_exp/after_2_data_recording/svm_decision_func_fusion/' + \
# class_names[c] + '_' + str(idx) + '_' + str(p) + '.jpg'
# plt.imsave(filename, rec.squeeze(), cmap=plt.cm.gray)
>>>>>>> 9348384985d2847c272133ff77ce6181ca1fa082
# cv2.imshow('posture',rec)
# cv2.waitKey(0)
print np.mean(np.array(speed_class),axis=0)
print np.mean(np.array(KA_class),axis=0)
print np.mean(np.array(or_class),axis=0)
<<<<<<< HEAD
def raw_feature_classification(n_minute,feature_p_n_minute, l_per_participant):
## feature and label vectors with all data ##
start = 0
labels_data = np.zeros((n_minute.shape[0],1),dtype=int)
for i_p in xrange(0, len(feature_p_n_minute)):
for i in xrange(0,len(feature_p_n_minute[i_p])):
labels_data[start+i,0] = int(l_per_participant[i_p])
start += len(feature_p_n_minute[i_p])
##
### AE posture vs RAW angle posture##
#hot_key_a = n_minute[:, :30]
# sk_angles = n_minute[:, 30:72]
# posture_AE = n_minute[:, 72:]
#
# n_minute = np.hstack((hot_key_a, posture_AE))
# print n_minute.shape
####
## feature and label vector all participants but one ##
# r_list=[]
# start = 0
# for i_p in xrange(0, len(feature_p_n_minute)):
# print '## subject: ',i_p
# test_p = n_minute[start:(start+len(feature_p_n_minute[i_p]))]
# label_p = labels_data[start:(start+len(feature_p_n_minute[i_p]))]
#
# train_ps = np.vstack((n_minute[:start,:], n_minute[start+len(feature_p_n_minute[i_p]):,:]))
# label_ps = np.vstack((labels_data[:start], labels_data[start+len(feature_p_n_minute[i_p]):,:]))
#
# model = KNeighborsClassifier(n_neighbors=3, n_jobs=-1, weights='distance').fit(train_ps, label_ps.ravel())
# # model = svm.NuSVC(nu=0.5,decision_function_shape='ovr',class_weight={1:10,3:.5}).fit(X_train, y_train)#nu=0.05, ,class_weight={1:10,3:.5}
#
# y_pred = model.predict(test_p)
# #print classification_report(label_p, y_pred)
# #print accuracy_score(label_p,y_pred)
# r = precision_recall_fscore_support(label_p,y_pred,average='weighted')
# r_list.append(r[2])
#
#
# start += len(feature_p_n_minute[i_p])
#
# print np.mean(np.array(r_list))
###
### try 2 clusters classification ##
# idx = np.where(labels_data==1)
# labels_data = np.delete(labels_data,idx,axis=0)
# n_minute = np.delete(n_minute,idx,axis=0)
# for l in range(0,len(labels_data)):
# if labels_data[l]==1:
# labels_data[l]=3
###########
#print X_train.shape
for i in range(0,5):
X_train, X_test, y_train, y_test = train_test_split(n_minute, labels_data.ravel(), test_size=0.1)
#model = svm.NuSVC(nu=0.5,decision_function_shape='ovr',class_weight={1:10,3:.5}).fit(X_train, y_train)#nu=0.05, ,class_weight={1:10,3:.5}
#model = RandomForestClassifier(class_weight={1:10,3:.5}).fit(X_train, y_train)
model = KNeighborsClassifier(n_neighbors=2, n_jobs=-1,weights='distance').fit(X_train, y_train)
#data_organizer.save_matrix_pickle(model,'C:/Users/dario.dotti/Documents/data_for_personality_exp/after_2_data_recording/svm_2sec_2general_cluster.txt')
y_pred = model.predict(X_test)
#print y_pred
#print y_test
print 'acc training: ', accuracy_score(y_train[:500], model.predict(X_train[:500]))
#print precision_recall_fscore_support(y_test, y_pred, average='weighted')
print classification_report(y_test, y_pred)
#check_decision_functions_and_save_samples(model,X_train)
=======
a = np.mean(np.array(sk_angles_list),axis=0)
for i in a:
for ii in i:
print ("%.4f" % ii),
>>>>>>> 9348384985d2847c272133ff77ce6181ca1fa082
def check_samples_in_clusters(data,y_tr, kmean_centers, save_img):
<<<<<<< HEAD
newpath = 'C:/Users/dario.dotti/Documents/data_for_personality_exp/after_2_data_recording/posture/posture_50clusters_allTasks/'
AE_weights_level_1 = data_organizer.load_matrix_pickle(
'C:/Users/dario.dotti/Documents/data_for_personality_exp/after_data_cleaning/posture_data/upperBody/deep_AE_900_225_weights_008hd1_noNoise.txt')
=======
def raw_feature_classification(n_minute,feature_p_n_minute, l_per_participant):
## feature and label vectors with all data ##
start = 0
labels_data = np.zeros((n_minute.shape[0],1),dtype=int)
for i_p in xrange(0, len(feature_p_n_minute)):
for i in xrange(0,len(feature_p_n_minute[i_p])):
labels_data[start+i,0] = int(l_per_participant[i_p])
start += len(feature_p_n_minute[i_p])
##
### AE posture vs RAW angle posture##
# hot_key_a = n_minute[:, :30]
# sk_angles = n_minute[:, 30:72]
# posture_AE = n_minute[:, 72:]
#
# n_minute = np.hstack((hot_key_a, posture_AE))
# print n_minute.shape
####
## feature and label vector all participants but one ##
# r_list=[]
# start = 0
# for i_p in xrange(0, len(feature_p_n_minute)):
# print '## subject: ',i_p
# test_p = n_minute[start:(start+len(feature_p_n_minute[i_p]))]
# label_p = labels_data[start:(start+len(feature_p_n_minute[i_p]))]
#
# train_ps = np.vstack((n_minute[:start,:], n_minute[start+len(feature_p_n_minute[i_p]):,:]))
# label_ps = np.vstack((labels_data[:start], labels_data[start+len(feature_p_n_minute[i_p]):,:]))
#
# model = KNeighborsClassifier(n_neighbors=3, n_jobs=-1, weights='distance').fit(train_ps, label_ps.ravel())
# # model = svm.NuSVC(nu=0.5,decision_function_shape='ovr',class_weight={1:10,3:.5}).fit(X_train, y_train)#nu=0.05, ,class_weight={1:10,3:.5}
#
# y_pred = model.predict(test_p)
# #print classification_report(label_p, y_pred)
# #print accuracy_score(label_p,y_pred)
# r = precision_recall_fscore_support(label_p,y_pred,average='weighted')
# r_list.append(r[2])
#
#
# start += len(feature_p_n_minute[i_p])
#
# print np.mean(np.array(r_list))
###
### try 2 clusters classification ##
# idx = np.where(labels_data==1)
# labels_data = np.delete(labels_data,idx,axis=0)
# n_minute = np.delete(n_minute,idx,axis=0)
# for l in range(0,len(labels_data)):
# if labels_data[l]==1:
# labels_data[l]=3
###########
>>>>>>> 9348384985d2847c272133ff77ce6181ca1fa082
for cluster_class in range(50):
print cluster_class
<<<<<<< HEAD
##get activation for the current class
index_samples = np.where(y_tr == cluster_class)[0]
activation_current_class = data[index_samples]
# activation_current_class_t = matrix_act_transf[index_samples]
######take the n closest samples #######
##compute the distance between activantion of current cluster and its cluster center
d = cdist(activation_current_class, kmean_centers[cluster_class].reshape((1, -1)))
closest_samples = np.sort(d, axis=0)[:3]
index_min = [np.where(d == c_s) for c_s in closest_samples]
activation_closest_samples = [activation_current_class[i_m[0][0]] for i_m in index_min]
#######
mean_rec = np.zeros((120, 120))
for i_sample, sample_current_class in enumerate(activation_closest_samples):
if np.sum(sample_current_class) == 0: continue
sample_current_class = sample_current_class.reshape((16, AE_weights_level_1[0][1].shape[1]))
for i_s_c in xrange(0, len(sample_current_class)):
# rec_space = np.dot(sample_current_class[i_s_c], AE_weight_layer1[0][0].T)
# rec = sigmoid_function(rec_space + AE_weight_layer1[2]).reshape((120,120))
##deep AE
rec_deep = sigmoid_function(
(np.dot(sample_current_class[i_s_c], AE_weights_level_1[0][1].T)) + AE_weights_level_1[3])
rec = sigmoid_function((np.dot(rec_deep, AE_weights_level_1[0][0].T)) + AE_weights_level_1[4]).reshape(
(120, 120))
# mean_rec += rec
##Show
# imgplot = plt.imshow(rec.squeeze(), cmap=plt.cm.gray)
# plt.show()
##Save
if save_img:
if not os.path.exists(newpath):
os.makedirs(newpath)
filename = newpath + str(cluster_class) + '_' + str(i_sample) + '_' + str(i_s_c) + '.jpg'
# mean_imgs_cluster = cv2.resize(mean_imgs_cluster,(54,54),interpolation= cv2.INTER_LINEAR)
plt.imsave(filename, rec.squeeze(), cmap=plt.cm.gray)
def dist_skeleton_angles_features(sk_f):
orientation_intervals = [[range(0, 45)], [range(45, 90)], [range(90, 135)], [range(135, 180)], [range(180, 225)],
[range(225, 270)], [range(270, 315)], [range(315, 360)]]
# labels_angles = np.zeros((sk_f.shape[0], 7), dtype=int)
# for i_sk,sk in enumerate(sk_f):
# sk = sk.reshape((7,6))[:,3]
#
# for i_v, v in enumerate(sk):
# for i_o, o in enumerate(orientation_intervals):
# if int(v) in o[0]:
# labels_angles[i_sk, i_v] = i_o
=======
#print X_train.shape
#for i in range(0,5):
X_train, X_test, y_train, y_test = train_test_split(n_minute, labels_data.ravel(), test_size=0.1)
#model = svm.NuSVC(nu=0.5,decision_function_shape='ovr',class_weight={1:10,3:.5}).fit(X_train, y_train)#nu=0.05, ,class_weight={1:10,3:.5}
#model = RandomForestClassifier(class_weight={1:10,3:.5}).fit(X_train, y_train)
model = KNeighborsClassifier(n_neighbors=3, n_jobs=-1,weights='distance').fit(X_train, y_train)
#data_organizer.save_matrix_pickle(model,'C:/Users/dario.dotti/Documents/data_for_personality_exp/after_2_data_recording/svm_2sec_2general_cluster.txt')
y_pred = model.predict(X_test)
>>>>>>> 9348384985d2847c272133ff77ce6181ca1fa082
#print y_pred
#print y_test
print 'acc training: ', accuracy_score(y_train[:500], model.predict(X_train[:500]))
#print precision_recall_fscore_support(y_test, y_pred, average='weighted')
print classification_report(y_test, y_pred)
check_decision_functions_and_save_samples(model,X_train)
## discretize in 8 angles all the data
sk_f_reshape = sk_f.reshape((sk_f.shape[0] * 7, 6))
data = sk_f_reshape[:,3]
#hist_fr = np.zeros((1,int(np.max(data))+1),dtype=int)
hist_fr = np.zeros((1,len(orientation_intervals)),dtype=int)
labels_angles = np.zeros((1, data.shape[0]), dtype=int)
for i_v,v in enumerate(data):
for i_o, o in enumerate(orientation_intervals):
if int(v) in o[0]:
hist_fr[0,i_o] +=1
labels_angles[0, i_v] = i_o
break
print np.mean(data)
# plt.bar(range(0,hist_fr.shape[1]),hist_fr[0])
# plt.show()
##
## reshape every row contains label for each joint ##
labels_angles = labels_angles.reshape((labels_angles.shape[1] / 7, 7))
<<<<<<< HEAD
return labels_angles
=======
def main_laban_posture_RAW():
features_participants_orig_1 = data_organizer.load_matrix_pickle(
'C:/Users/dario.dotti/Documents/data_for_personality_exp/after_data_cleaning/posture_data/upperBody/experiment_upperBody_pathPlanning/RAWpostureUpperBody_path_features_2sec_skeletonF.txt')#'C:/Users/dario.dotti/Documents/data_for_personality_exp/after_data_cleaning/posture_data/upperBody/experiment_upperBody_pathPlanning/pca_RAWpostureUpperBody_path_features.txt')
features_participants_orig_2 = data_organizer.load_matrix_pickle(
'C:/Users/dario.dotti/Desktop/data_recordings_master/data_personality/RAWpostureUpperBody_path_features_master_2sec_skeletonF.txt') #'C:/Users/dario.dotti/Desktop/data_recordings_master/data_personality/pca_RAWpostureUpperBody_path_features_master.txt')
features_participants_orig = features_participants_orig_1 + features_participants_orig_2
##
## separate feature vector again to see what is more informative ##
#features_participants_3 = np.concatenate(features_participants_orig,axis=0)
# hot = features_participants_3[:, :24]
# key_areas = features_participants_3[:,24:30]
#posture = features_participants_3[:,30:130]
# #
# start = 0
# f_participant = []
# for i_p in range(0, len(features_participants_orig)):
# f_participant.append(posture[start:(start + len(features_participants_orig[i_p]))])
# start += len(features_participants_orig[i_p])
##
##concatenate features to form n_minute feature vectors
# t = 3
# feature_p_n_minute=[]
# for p in features_participants_orig:
# n_minute_p = []
# for n_slice in range(0, len(p) - (t-1), t/2):
# n_minute_p.append(p[n_slice:(n_slice + t)].reshape((1,-1)))
# feature_p_n_minute.append(np.concatenate(n_minute_p,axis=0))
feature_p_n_minute = features_participants_orig
n_minute = np.concatenate(feature_p_n_minute,axis=0)
#print n_minute.shape
#pca_on_data(n_minute)
### clustering on data ###
#hs.determine_number_k_kMeans(n_minute)
#Z = hierarchy.linkage(n_minute, method='average', metric='euclidean')
# check if metric preserve original distance
#c, coph_dists = cophenet(Z, pdist(n_minute))
#print c
#y_tr = hierarchy.fcluster(Z, 5,criterion="distance") #cosine = 0.5
#
# ##print y_tr
#print Counter(y_tr)
# data_organizer.save_matrix_pickle(y_tr,'C:/Users/dario.dotti/Documents/data_for_personality_exp/after_2_data_recording/label_features_clustering.txt')
#y_tr = data_organizer.load_matrix_pickle('C:/Users/dario.dotti/Documents/data_for_personality_exp/after_2_data_recording/label_features_clustering.txt')
##
#### classification on data #######
l_per_participant = data_organizer.load_matrix_pickle('C:/Users/dario.dotti/Documents/data_for_personality_exp/after_2_data_recording/clusters_on_pearson_corr_personality_scores.txt')
extrovert_label_per_p =[2, 0, 1, 0, 2, 2, 1, 1, 2, 1, 2, 1, 0, 1, 1, 1, 0, 2, 2, 2, 2, 1, 1, 2, 2, 0, 2, 0, 1, 0, 0, 1, 1, 2, 1, 0, 2, 1, 0, 1, 0, 0, 1, 2, 0, 1]
consc_label_per_p = [2, 2, 1, 2, 2, 2, 2, 1, 0, 2, 2, 0, 1, 0, 1, 2, 1, 0, 2, 0, 1, 0, 2, 1, 2, 1, 0, 1, 0, 2, 0, 2, 2, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 2]
nevrotic_label_per_p = [0, 2, 1, 1, 1, 2, 2, 1, 0, 0, 2, 2, 2, 1, 2, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 1, 1, 1, 2, 2, 2, 0, 1, 1, 0, 1, 0]
## baseline classifications
raw_feature_classification(n_minute, feature_p_n_minute, l_per_participant)
#bow_on_features(f_participant, y_tr, l_per_participant)
##
def main_laban_posture_ID():
features_participants_orig = data_organizer.load_matrix_pickle('C:/Users/dario.dotti/Documents/data_for_personality_exp/after_data_cleaning/posture_data/upperBody/experiment_upperBody_pathPlanning/IDpostureUpperBody_path_features.txt')
#features_participants = np.concatenate(features_participants_orig, axis=0)
#n_minute = features_participants[:,:30]
#posture_l = features_participants[:,30:]
## compare the first n minute ##
n_minute = [np.array(p[:20]).reshape((1,-1)) for p in features_participants_orig]
n_minute = np.concatenate(n_minute,axis=0)
n_minute_similarity = np.concatenate([cdist(p[:20].reshape((1,-1)), n_minute, 'cosine') for p in features_participants_orig],axis=0)
##
Z = hierarchy.linkage(n_minute, method='average', metric='cosine')
#check if metric preserve original distance
c, coph_dists = cophenet(Z, pdist(n_minute))
#print c
y_tr = hierarchy.fcluster(Z, 0.2, criterion="distance")
#y_tr = m_s.(n_minute)
#print y_tr
#print Counter(y_tr)
# X_train, X_test, y_train, y_test = train_test_split(n_minute,posture_l.reshape((-1,1)),test_size=0.1)
# ##since agglomerative clustering doesnt have predict I use svm with the cluster labels for classification
# clf = svm.LinearSVC().fit(X_train, y_train)
# y_prediction = clf.predict(X_test)
# print accuracy_score(y_test,y_prediction)
>>>>>>> 9348384985d2847c272133ff77ce6181ca1fa082
def main_laban_posture_RAW():
## load features from determined tasks
# features_participants_orig_1 = data_organizer.load_matrix_pickle(
# 'C:/Users/dario.dotti/Documents/data_for_personality_exp/after_data_cleaning/posture_data/upperBody/experiment_upperBody_pathPlanning/RAWpostureUpperBody_path_features_2sec_skeletonF_task45.txt')#'C:/Users/dario.dotti/Documents/data_for_personality_exp/after_data_cleaning/posture_data/upperBody/experiment_upperBody_pathPlanning/pca_RAWpostureUpperBody_path_features.txt')
# features_participants_orig_2 = data_organizer.load_matrix_pickle(
# 'C:/Users/dario.dotti/Desktop/data_recordings_master/data_personality/RAWpostureUpperBody_path_features_master_2sec_skeletonF_task45.txt') #'C:/Users/dario.dotti/Desktop/data_recordings_master/data_personality/pca_RAWpostureUpperBody_path_features_master.txt')
# features_participants_orig = features_participants_orig_1 + features_participants_orig_2
# del features_participants_orig[44]
## load features from all tasks
features_participants_orig = data_organizer.load_matrix_pickle('C:/Users/dario.dotti/Documents/data_for_personality_exp/after_data_cleaning/posture_data/upperBody/experiment_upperBody_pathPlanning/RAWpostureUpperBody_path_features_2sec_skeletonF_task0123.txt')
##
## separate feature vector again to see what is more informative ##
features_participants_3 = np.concatenate(features_participants_orig,axis=0)
#hot_keyA = features_participants_3[:, :30]
sk_f = features_participants_3[:, 30:72]
#posture = features_participants_3[:,72:]
#print posture.shape
# #
# start = 0
# f_participant = []
# for i_p in range(0, len(features_participants_orig)):
# f_participant.append(posture[start:(start + len(features_participants_orig[i_p]))])
# start += len(features_participants_orig[i_p])
##
## visualize distribution of skeleton features ##
labels_angles = dist_skeleton_angles_features(sk_f)
#data_organizer.save_matrix_pickle(labels_angles,'C:/Users/dario.dotti/Documents/data_for_personality_exp/after_2_data_recording/posture/Temporal_dynamics_exp/labels_angles_task0123.txt')
##
### clustering on posture data ###
## kmeans
# posture = posture.reshape(posture.shape[0],posture.shape[1])
# #hs.determine_number_k_kMeans(posture)
# my_kmeans = KMeans(n_clusters= 50,n_jobs= -1).fit(posture)
# y_tr = my_kmeans.predict(posture)
# cc = my_kmeans.cluster_centers_
# #check_samples_in_clusters(posture,y_tr,cc,save_img=1)
# data_organizer.save_matrix_pickle(my_kmeans,
# 'C:/Users/dario.dotti/Documents/data_for_personality_exp/after_2_data_recording/posture/kmeans_50_posture_allTasks.txt')
##hierarchical cl
#Z = hierarchy.linkage(posture, method='average', metric='euclidean')
# check if metric preserve original distance
#c, coph_dists = cophenet(Z, pdist(posture))
#print c
#y_tr = hierarchy.fcluster(Z, 10,criterion="distance") #cosine = 0.5
#
# ##print y_tr
#print Counter(y_tr)
##
#### classification on data #######
#n_minute = np.concatenate(features_participants_orig, axis=0)
#
#l_per_participant = data_organizer.load_matrix_pickle('C:/Users/dario.dotti/Documents/data_for_personality_exp/after_2_data_recording/clusters_on_pearson_corr_personality_scores.txt')
#
# extrovert_label_per_p =[2, 0, 1, 0, 2, 2, 1, 1, 2, 1, 2, 1, 0, 1, 1, 1, 0, 2, 2, 2, 2, 1, 1, 2, 2, 0, 2, 0, 1, 0, 0, 1, 1, 2, 1, 0, 2, 1, 0, 1, 0, 0, 1, 2, 0, 1]
# consc_label_per_p = [2, 2, 1, 2, 2, 2, 2, 1, 0, 2, 2, 0, 1, 0, 1, 2, 1, 0, 2, 0, 1, 0, 2, 1, 2, 1, 0, 1, 0, 2, 0, 2, 2, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 2]
# nevrotic_label_per_p = [0, 2, 1, 1, 1, 2, 2, 1, 0, 0, 2, 2, 2, 1, 2, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 1, 1, 1, 2, 2, 2, 0, 1, 1, 0, 1, 0]
#
#
# ## baseline classifications
#raw_feature_classification(n_minute, features_participants_orig, l_per_participant)
#bow_on_features(f_participant, y_tr, l_per_participant)
##
def main_laban_posture_ID():
cl_model_posture = data_organizer.load_matrix_pickle('C:/Users/dario.dotti/Documents/data_for_personality_exp/after_2_data_recording/posture/kmeans_50_posture_allTasks.txt')
features_participants_orig = data_organizer.load_matrix_pickle(
'C:/Users/dario.dotti/Documents/data_for_personality_exp/after_data_cleaning/posture_data/upperBody/experiment_upperBody_pathPlanning/RAWpostureUpperBody_path_features_2sec_skeletonF_ALLTASKS.txt') # 'C:/Users/dario.dotti/Documents/data_for_personality_exp/after_data_cleaning/posture_data/upperBody/experiment_upperBody_pathPlanning/pca_RAWpostureUpperBody_path_features.txt')
# features_participants_orig_2 = data_organizer.load_matrix_pickle(
# 'C:/Users/dario.dotti/Desktop/data_recordings_master/data_personality/RAWpostureUpperBody_path_features_master_2sec_skeletonF_ALLTASKS.txt') # 'C:/Users/dario.dotti/Desktop/data_recordings_master/data_personality/pca_RAWpostureUpperBody_path_features_master.txt')
# features_participants_orig = features_participants_orig_1 + features_participants_orig_2
l_per_participant = data_organizer.load_matrix_pickle(
'C:/Users/dario.dotti/Documents/data_for_personality_exp/after_2_data_recording/clusters_on_pearson_corr_personality_scores.txt')
## separate feature vector again to see what is more informative ##
features_participants_3 = np.concatenate(features_participants_orig, axis=0)
# hot_keyA = features_participants_3[:, :30]
skeleton_angles = features_participants_3[:, 30:72]
posture = features_participants_3[:, 72:]
print posture.shape
## feature and label vectors with all data ##
start = 0
labels_data = np.zeros((posture.shape[0], 1), dtype=int)
for i_p in xrange(0, len(features_participants_orig)):
for i in xrange(0, len(features_participants_orig[i_p])):
labels_data[start + i, 0] = int(l_per_participant[i_p])
start += len(features_participants_orig[i_p])
labels_data = labels_data.ravel()
y_tr = cl_model_posture.predict(posture)
## visualize posture distribution ##
hist = np.zeros((3, 50))
for pers_label in xrange(1, 4):
for i_l in xrange(0, len(y_tr)):
if labels_data[i_l] == pers_label: hist[(pers_label - 1), y_tr[i_l]] += 1
fig = plt.figure()
ax = fig.add_subplot(111)
width = 0.25
ind = np.arange(0, 50)
##normalized heights
rect1 = plt.bar(ind, hist[0,:]/len(np.where(labels_data==1)[0]), width)
rect2 = plt.bar(ind+width, hist[1, :]/len(np.where(labels_data==2)[0]), width, color='red')
rect3 = plt.bar(ind+(width*2), hist[2, :]/len(np.where(labels_data==3)[0]), width, color='green')
ax.legend((rect1[0], rect2[0], rect3[0]), ('class1', 'class2','class3'), fontsize=11)
ax.set_xticks(ind + width)
ax.set_xticklabels(ind)
plt.show()
###
## check posture differences using skeleton raw angles ##
for pers_label in xrange(1,4):
print pers_label
angles_per_cl = []
for i_l in xrange(0,len(y_tr)):
if labels_data[i_l] == pers_label:
angles_per_cl.append(skeleton_angles[i_l])
## show mean per joints ##
## matrix 7 joints x 6 stat of angles
angles_per_cl = np.array(angles_per_cl).reshape((7*len(angles_per_cl),6))
for i_joint in xrange(7):
joint_stat = []
for i_value in xrange(i_joint,angles_per_cl.shape[0],7):
joint_stat.append(angles_per_cl[i_value])
print np.mean(np.array(joint_stat),axis=0)
if __name__ == '__main__':
<<<<<<< HEAD
#main_laban_posture_RAW()
main_laban_posture_ID()
=======
#main_laban_posture_ID()
main_laban_posture_RAW()
>>>>>>> 9348384985d2847c272133ff77ce6181ca1fa082
|
{"/demo.py": ["/data_organizer.py", "/video_traj.py", "/ambient_sensors.py"]}
|
7,374
|
Zumbalamambo/behavior_understanding_indoor_dataset
|
refs/heads/master
|
/demo_pecs_review.py
|
from sklearn.linear_model import LogisticRegression
import numpy as np
import cv2
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import matplotlib.pyplot as plt
from collections import Counter
import threading
import time
import multiprocessing
from datetime import datetime as dt
from Tkinter import *
import PIL
from PIL import ImageTk, Image
import data_organizer
import video_traj
import ambient_sensors
#scene = np.ones((414,512),dtype=np.uint8)
scene = cv2.imread('C:/Users/dario.dotti/Pictures/taj_dataset_wandering/WANDERING_PECS/pecs_room.png')
#Create a window
window=Tk()
window.title('Abnormal Behavior Detector')
def draw_joints_and_tracks(body_points,current_time_shared):
# make the thread wait for the other
time.sleep(5.5)
color = (0, 0, 255)
# draw line between joints
thickness = 3
line_color = (19, 19, 164)
##check patches are correct
# for i_rect, rect in enumerate(scene_patches):
# cv2.rectangle(scene, (int(rect.vertices[1][0]), int(rect.vertices[1][1])),
# (int(rect.vertices[3][0]), int(rect.vertices[3][1])), (0, 0, 0))
#
# ## write number of patch on img
# cv2.putText(scene, str(i_rect), (int(rect.vertices[1][0]) + 10, int(rect.vertices[1][1]) + 20),
# cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 2)
for n_frame, traj_body_joints in enumerate(body_points):
temp_img = scene.copy()
# get recording time and make it as current time
current_time = dt.strptime(traj_body_joints[0,1].split('.')[0], '%Y-%m-%d %H:%M:%S')
current_time_shared.put(current_time)
# draw joints
#print n_frame
# first position skipped cause there are other info stored
try:
# torso
cv2.line(temp_img, (int(float(traj_body_joints[4, 0])), int(float(traj_body_joints[4, 1]))),
(int(float(traj_body_joints[3, 0])), int(float(traj_body_joints[3, 1]))), line_color, thickness)
cv2.line(temp_img, (int(float(traj_body_joints[3, 0])), int(float(traj_body_joints[3, 1]))),
(int(float(traj_body_joints[2, 0])), int(float(traj_body_joints[2, 1]))), line_color, thickness)
cv2.line(temp_img, (int(float(traj_body_joints[2, 0])), int(float(traj_body_joints[2, 1]))),
(int(float(traj_body_joints[1, 0])), int(float(traj_body_joints[1, 1]))), line_color, thickness)
# shoulder
cv2.line(temp_img, (int(float(traj_body_joints[21, 0])), int(float(traj_body_joints[21, 1]))),
(int(float(traj_body_joints[9, 0])), int(float(traj_body_joints[9, 1]))), line_color, thickness)
cv2.line(temp_img, (int(float(traj_body_joints[21, 0])), int(float(traj_body_joints[21, 1]))),
(int(float(traj_body_joints[5, 0])), int(float(traj_body_joints[5, 1]))), line_color, thickness)
# hips
cv2.line(temp_img, (int(float(traj_body_joints[1, 0])), int(float(traj_body_joints[1, 1]))),
(int(float(traj_body_joints[17, 0])), int(float(traj_body_joints[17, 1]))), line_color, thickness)
cv2.line(temp_img, (int(float(traj_body_joints[1, 0])), int(float(traj_body_joints[1, 1]))),
(int(float(traj_body_joints[13, 0])), int(float(traj_body_joints[13, 1]))), line_color, thickness)
# right arm
cv2.line(temp_img, (int(float(traj_body_joints[9, 0])), int(float(traj_body_joints[9, 1]))),
(int(float(traj_body_joints[10, 0])), int(float(traj_body_joints[10, 1]))), line_color, thickness)
cv2.line(temp_img, (int(float(traj_body_joints[10, 0])), int(float(traj_body_joints[10, 1]))),
(int(float(traj_body_joints[11, 0])), int(float(traj_body_joints[11, 1]))), line_color, thickness)
cv2.line(temp_img, (int(float(traj_body_joints[11, 0])), int(float(traj_body_joints[11, 1]))),
(int(float(traj_body_joints[12, 0])), int(float(traj_body_joints[12, 1]))), line_color, thickness)
# left arm
cv2.line(temp_img, (int(float(traj_body_joints[5, 0])), int(float(traj_body_joints[5, 1]))),
(int(float(traj_body_joints[6, 0])), int(float(traj_body_joints[6, 1]))), line_color, thickness)
cv2.line(temp_img, (int(float(traj_body_joints[6, 0])), int(float(traj_body_joints[6, 1]))),
(int(float(traj_body_joints[7, 0])), int(float(traj_body_joints[7, 1]))), line_color, thickness)
cv2.line(temp_img, (int(float(traj_body_joints[7, 0])), int(float(traj_body_joints[7, 1]))),
(int(float(traj_body_joints[8, 0])), int(float(traj_body_joints[8, 1]))), line_color, thickness)
# left leg
cv2.line(temp_img, (int(float(traj_body_joints[13, 0])), int(float(traj_body_joints[13, 1]))),
(int(float(traj_body_joints[14, 0])), int(float(traj_body_joints[14, 1]))), line_color, thickness)
cv2.line(temp_img, (int(float(traj_body_joints[14, 0])), int(float(traj_body_joints[14, 1]))),
(int(float(traj_body_joints[15, 0])), int(float(traj_body_joints[15, 1]))), line_color, thickness)
cv2.line(temp_img, (int(float(traj_body_joints[15, 0])), int(float(traj_body_joints[15, 1]))),
(int(float(traj_body_joints[16, 0])), int(float(traj_body_joints[16, 1]))), line_color, thickness)
# right leg
cv2.line(temp_img, (int(float(traj_body_joints[17, 0])), int(float(traj_body_joints[17, 1]))),
(int(float(traj_body_joints[18, 0])), int(float(traj_body_joints[18, 1]))), line_color, thickness)
cv2.line(temp_img, (int(float(traj_body_joints[18, 0])), int(float(traj_body_joints[18, 1]))),
(int(float(traj_body_joints[19, 0])), int(float(traj_body_joints[19, 1]))), line_color, thickness)
cv2.line(temp_img, (int(float(traj_body_joints[19, 0])), int(float(traj_body_joints[19, 1]))),
(int(float(traj_body_joints[20, 0])), int(float(traj_body_joints[20, 1]))), line_color, thickness)
if n_frame > 1:
for i, joint in enumerate(traj_body_joints):
if i == 0:
continue
cv2.circle(temp_img, (int(float(joint[0])), int(float(joint[1]))), 2, color, -1)
if i == 3 and n_frame > 0:
##draw trajectories
cv2.circle(scene, (int(float(joint[0])), int(float(joint[1]))), 2, color, -1)
else:
##draw joint
cv2.circle(temp_img, (int(float(joint[0])), int(float(joint[1]))), 2, color, -1)
cv2.imshow('hospital room', temp_img)
cv2.waitKey(1)
else:
cv2.imshow('hospital room', temp_img)
cv2.waitKey(0)
except:
print 'traj coordinates not available'
continue
def plot_classifier_confidence(task,cluster_model,keys_labels,lr,q):
##plotting confidence classifier on bow
time.sleep(8.5)
hist = np.zeros((1,len(keys_labels)))
x_axis = 0
##every two second create a sample cluster it and create an hist
for n_slice,two_mins_slice in enumerate(task):
similar_word= cluster_model.predict(np.array(two_mins_slice).reshape(1,-1))
index = np.where(similar_word == keys_labels)[0]
hist[0][index] +=1
pred = lr.predict(hist)
print pred
conf_confusion = np.max([lr.decision_function(hist)[0][0],lr.decision_function(hist)[0][2]])
conf_repetitive = lr.decision_function(hist)[0][3]
conf_adl = np.max([lr.decision_function(hist)[0][1],lr.decision_function(hist)[0][4],lr.decision_function(hist)[0][5]])
q.put(['cc',conf_confusion,conf_repetitive,conf_adl,n_slice,x_axis])
x_axis+=1
time.sleep(2)
#plt.pause(100)
def basic_plot():#Function to create the base plot, make sure to make global the lines, axes, canvas and any part that you would want to update later
global ax_conf,ax_as,canvas,rect_open,rect_close,warning_img,emergency_img,normal_img,notification_icon,notification_text
##initialize figures
main_fig = plt.figure()
#ax_as = main_fig.add_subplot(212)
ax_conf = main_fig.add_subplot(111)
##canvas in the main window
canvas = FigureCanvasTkAgg(main_fig, master=window)
canvas.show()
##in case of widget
#canvas.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1)
##pack place the plot automatically, using place we can specify x,y
#canvas._tkcanvas.pack(side=TOP, fill=BOTH, expand=1)
canvas._tkcanvas.place(x=80,y=20)
##inizialize plot of confidence of classifier
ax_conf.axis([0,60,-10,20])
ax_conf.plot(0,0)
ax_conf.plot(0,0)
ax_conf.plot(0,0)
ax_conf.set_title('classifier confidence')
##initialize bar plot of ambient sensor
#sensor = ['maindoor','toilet','livingroom']
#ind = np.arange(len(sensor))
#width = 0.2
#ax_as.axis([-0.5,3,0,10])
#ax_as.set_xticks(ind+width)
#ax_as.set_xticklabels(sensor)
#fake data except maindoor
#activation_open =[0,5,6]
#activation_close=[0,5,5]
#bar charts
#rect_open = ax_as.bar(ind, activation_open,width,color='red')
#rect_close = ax_as.bar(ind+width, activation_close,width,color='blue')
#ax_as.legend((rect_open[0],rect_close[1]),('door open','door close'),fontsize=9)
#ax_as.set_title('ambient sensor')
##initialize notification icons and text
warning_img = PIL.ImageTk.PhotoImage(PIL.Image.open('C:/Users/dario.dotti/Documents/data_for_demo/icon_warning_call_relative.png'))
emergency_img = PIL.ImageTk.PhotoImage(PIL.Image.open('C:/Users/dario.dotti/Documents/data_for_demo/icon_emergency_call_doctors.png'))
normal_img = PIL.ImageTk.PhotoImage(PIL.Image.open('C:/Users/dario.dotti/Documents/data_for_demo/house_daily_activity.png'))
notification_icon = Label(window, image=warning_img)
notification_text = Label(window, text='Calling the stakeholders')
notification_title = Label(window, text='NOTIFICATION')
notification_title.place(x=350, y=510)
def update_figures_in_threads(q):
try:#Try to check if there is data in the queue
result=q.get_nowait()
if result !='Q':
if result[0] == 'cc':
print result
ax_conf.plot(result[5],result[1],'r^',label='confusion')
ax_conf.plot(result[5],result[2],'b^',label='repetitive')
ax_conf.plot(result[5],result[3],'g^',label='normal activity')
#draw the legend only once
if result[4]==0:
ax_conf.legend(loc='upper left',fontsize=9)
##show notification images
if max([result[1], result[2], result[3]]) == result[3]:
##normal activity
##show img without waiting for the threshold
update_notification_icons('normal_act')
else:
##confusion or ripetitive
##show img only if higher than threshold
#if 2<result[1]< 3 or 2<result[2]< 3: update_notification_icons('warning')
if result[1]> 3 or result[2]> 3: update_notification_icons('emergency')
canvas.draw()
window.after(10, update_figures_in_threads, q)
#elif result[0] == 'as':
#rect_open[0].set_height(result[1][0])
#rect_close[0].set_height(result[2][0])
#canvas.draw()
#window.after(10, update_figures_in_threads, q)
except:
##no new input so refresh
window.after(100, update_figures_in_threads, q)
def update_notification_icons(label_img):
##refreshing notification icons
print label_img
if label_img == 'warning':
notification_icon.configure(image=warning_img)
notification_icon.image = warning_img
notification_icon.place(x=320, y=550)
##text
notification_text.configure(text='Calling the stakeholders')
notification_text.place(x=330, y=670)
elif label_img == 'emergency':
notification_icon.configure(image=emergency_img)
notification_icon.image = emergency_img
notification_icon.place(x=320, y=550)
##text
notification_text.configure(text='Calling the doctors')
notification_text.place(x=330, y=670)
elif label_img == 'normal_act':
notification_icon.configure(image=normal_img)
notification_icon.image = normal_img
notification_icon.place(x=320, y=550)
##text
notification_text.configure(text='Normal Activity')
notification_text.place(x=330, y=670)
def main_demo_pecs():
##get raw data for displaying
task_skeleton_data = data_organizer.load_matrix_pickle('C:/Users/dario.dotti/Documents/pecs_data_review/skeletons_repetitive_behavior_02082017.txt')
##HOT features
HOT_16_subject_6_tasks = data_organizer.load_matrix_pickle(
'C:/Users/dario.dotti/Documents/pecs_data_review/HOT_repetitive_behavior_02082017.txt')
##BOW computed on HOT
# bow_data = data_organizer.load_matrix_pickle('C:/Users/dario.dotti/Documents/bow_experiment_data/test_PECS/BOW_3_kmeans_16subject_2sec_without_outlier.txt')
# labels_bow_data = data_organizer.load_matrix_pickle(
# 'C:/Users/dario.dotti/Documents/bow_experiment_data/test_PECS/BOW_3_kmeans_labels_16subject_2sec_without_outlier.txt')
bow_data = data_organizer.load_matrix_pickle(
'C:/Users/dario.dotti/Documents/bow_experiment_data/BOW_30_kmeans_16subject_2sec.txt')
labels_bow_data = data_organizer.load_matrix_pickle(
'C:/Users/dario.dotti/Documents/bow_experiment_data/BOW_30_kmeans_labels_16subject_2sec.txt')
lr = LogisticRegression()
lr.fit(bow_data, np.ravel(labels_bow_data))
##cluster data
# cluster_model = data_organizer.load_matrix_pickle('C:/Users/dario.dotti/Documents/bow_experiment_data/test_PECS/cl_30_kmeans_model_2secWindow_without_outliers.txt')
# labels_cluster = data_organizer.load_matrix_pickle(
# 'C:/Users/dario.dotti/Documents/bow_experiment_data/test_PECS/cluster_3_kmeans_word__without_outliers.txt')
cluster_model = data_organizer.load_matrix_pickle(
'C:/Users/dario.dotti/Documents/bow_experiment_data/cl_30_kmeans_model_2secWindow_newVersion.txt')
labels_cluster = data_organizer.load_matrix_pickle(
'C:/Users/dario.dotti/Documents/bow_experiment_data/cluster_30_kmeans_word_newVersion.txt')
key_labels = map(lambda x: x[0], labels_cluster)
##shared variable between threads
q = multiprocessing.Queue()
current_time_shared = multiprocessing.Queue()
##launch different processes in the same time
display_joints_traj = multiprocessing.Process(target=draw_joints_and_tracks,
args=(task_skeleton_data, current_time_shared))
display_confidence_classifier = multiprocessing.Process(target=plot_classifier_confidence, args=(
HOT_16_subject_6_tasks, cluster_model, key_labels, lr, q))
##Start threads
display_joints_traj.start()
display_confidence_classifier.start()
##call plot initializer
basic_plot()
update_figures_in_threads(q)
##launch main window loop
window.geometry('800x700')
window.mainloop()
if __name__ == '__main__':
main_demo_pecs()
|
{"/demo.py": ["/data_organizer.py", "/video_traj.py", "/ambient_sensors.py"]}
|
7,473
|
ncbrown1/timeclock
|
refs/heads/master
|
/clock/admin.py
|
from django.contrib import admin
from clock.models import *
class ClockEventAdmin(admin.ModelAdmin):
list_display = ['employee','date','time_in','time_out','time_worked','message', 'created']
list_filter = ['employee', 'date']
admin.site.register(ClockEvent, ClockEventAdmin)
admin.site.register(Employee)
|
{"/clock/admin.py": ["/clock/models.py"], "/clock/views.py": ["/clock/models.py", "/clock/forms.py", "/clock/controllers.py"], "/clock/forms.py": ["/clock/models.py"], "/clock/controllers.py": ["/clock/models.py"]}
|
7,474
|
ncbrown1/timeclock
|
refs/heads/master
|
/timeclock/settings.py
|
"""
Django settings for timeclock project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
ADMINS = (
('Nick Brown', 'ncbrown@cs.ucsb.edu'),
)
EMAIL_SUBJECT_PREFIX = '[Django Timeclock]'
SEND_BROKEN_LINK_EMAILS = True
EMAIL_HOST = ''
EMAIL_PORT = 587
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
os.path.join(BASE_DIR, 'clock/templates'),
)
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
FILE_UPLOAD_PERMISSIONS = 0755
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'd+@lm_+l$_+k8_6*tipu7x737ajn959lh$d)j#aeb#lrc_oas5'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
TEMPLATE_DEBUG = False
ALLOWED_HOSTS = ['localhost']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.humanize',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'flup',
'widget_tweaks',
'clock',
'forum',
'sslserver',
'djangosecure',
)
MIDDLEWARE_CLASSES = (
'djangosecure.middleware.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'timeclock.urls'
WSGI_APPLICATION = 'timeclock.wsgi.application'
AUTHENTICATION_BACKENDS = ('timeclock.authbackend.ClockBackend',)#'django.contrib.auth.backends.ModelBackend')
LOGIN_URL = '/login/'
LOGIN_REDIRECT_URL = 'login_success'
LOGOUT_URL = '/logout/'
CSRF_COOKIE_SECURE = True
SESSION_COOKIE_AGE = 86400
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SESSION_SAVE_EVERY_REQUEST = True
SECURE_SSL_REDIRECT = True
SECURE_HSTS_SECONDS = 31536000
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
SECURE_FRAME_DENY = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_BROWSER_XSS_FILTER = True
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'timeclock',
'USER': '',
'PASSWORD': '',
'HOST': 'localhost',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Los_Angeles'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'site-static')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console':{
'level': 'DEBUG',
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
{"/clock/admin.py": ["/clock/models.py"], "/clock/views.py": ["/clock/models.py", "/clock/forms.py", "/clock/controllers.py"], "/clock/forms.py": ["/clock/models.py"], "/clock/controllers.py": ["/clock/models.py"]}
|
7,475
|
ncbrown1/timeclock
|
refs/heads/master
|
/forum/forms.py
|
from django import forms
from forum.models import *
class ThreadForm(forms.ModelForm):
title = forms.CharField(max_length=60, required=True, label="Title")
description = forms.CharField(max_length=5000, widget=forms.Textarea, label="Description")
class Meta:
model = Thread
exclude = ('creator','created','forum','updated')
class PostForm(forms.ModelForm):
title = forms.CharField(max_length=255, label="Title")
body = forms.CharField(max_length=5000, widget=forms.Textarea, label="Body")
class Meta:
model = Post
exclude = ('creator','updated','created','thread')
|
{"/clock/admin.py": ["/clock/models.py"], "/clock/views.py": ["/clock/models.py", "/clock/forms.py", "/clock/controllers.py"], "/clock/forms.py": ["/clock/models.py"], "/clock/controllers.py": ["/clock/models.py"]}
|
7,476
|
ncbrown1/timeclock
|
refs/heads/master
|
/timeclock/urls.py
|
from django.conf import settings
from django.conf.urls import patterns, include, url, RegexURLResolver
from django.contrib.auth.models import User
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.contrib.auth.views import login, logout, password_change, password_change_done
from django.views.generic import TemplateView
from clock import views
from forum import views as fviews
from django.contrib import admin
admin.autodiscover()
def group(regex, *args):
return RegexURLResolver(regex, args)
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'^$', views.index, name='home'),
(r'^login/', login, {'template_name': 'timeclock/login.html'}),
url(r'login_success/', views.login_success, name="login_success"),
(r'^logout/', logout, {'next_page': '/'}),
(r'^password-change/', password_change, {'template_name': 'timeclock/passwordchange.html', 'post_change_redirect': '/password-change-success/'}),
(r'^password-change-success/', TemplateView.as_view(template_name='timeclock/passwordchangesuccess.html')),
url(r'^register/', views.register, name="register"),
group(r'^profile/',
url(r'^$', views.profile, name="user-profile"),
url(r'^clockevent-history/', views.clockevent_history, name="clockevent-history"),
),
url(r'^superprofile/', views.superprofile, name="superprofile"),
group(r'^super-clockevent-history/',
url(r'^$', views.super_clockevent_history, name="super-clockevent-history"),
url(r'(\d+)/$', views.employee_history, name="employee-history")
),
url(r'^edit-profile/', views.edit_profile, name="edit-profile"),
group(r'^staff-forum/',
url(r'^$', fviews.main_forum, name='main_forum'),
url(r'^(\d+)/$', fviews.forum, name='forum-detail'),
url(r'^thread/(\d+)/$', fviews.thread, name="thread-detail"),
url(r'^reply/(\d+)/$', fviews.post_reply, name="reply"),
url(r'^newthread/(\d+)/$', fviews.new_thread, name="new-thread"),
),
url(r'^get-clocked-in-employees', views.json_in_employees, name="get-clocked-in-employees"),
(r'^static/(?P<path>.*)$', 'django.views.static.serve', { 'document_root': settings.STATIC_ROOT, 'show_indexes': True }),
url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT, 'show_indexes': True }),
)
urlpatterns += staticfiles_urlpatterns()
|
{"/clock/admin.py": ["/clock/models.py"], "/clock/views.py": ["/clock/models.py", "/clock/forms.py", "/clock/controllers.py"], "/clock/forms.py": ["/clock/models.py"], "/clock/controllers.py": ["/clock/models.py"]}
|
7,477
|
ncbrown1/timeclock
|
refs/heads/master
|
/clock/models.py
|
from datetime import datetime, timedelta
from django.db import models
from django.contrib.auth.models import User
class Employee(models.Model):
user = models.OneToOneField(User)
clocked_in = models.BooleanField(default=False)
last_clocked_in = models.DateTimeField(blank=True, null=True)
last_clocked_out = models.DateTimeField(blank=True, null=True)
last_message = models.CharField(max_length=500, blank=True, null=True, default="")
pay_rate = models.FloatField(default='15.00')
profile_pic = models.ImageField(upload_to='profile_images', default="/static/profile_images/default-pp.png", blank=True, null=True)
def __unicode__(self):
return self.user.username
class Meta:
verbose_name = 'Employee'
verbose_name_plural = 'Employees'
class ClockEvent(models.Model):
employee = models.ForeignKey(Employee, unique=True)
message = models.TextField(max_length=500)
date = models.DateField(auto_now_add=True)
time_in = models.TimeField()
time_out = models.TimeField()
created = models.DateTimeField(auto_now_add=True)
def time_worked(self):
dt = datetime.combine(self.date, self.time_out) - datetime.combine(self.date, self.time_in)
return timedelta(seconds=dt.total_seconds()%(24*60*60))
def __unicode__(self):
return u'%s - %s - %s - %s' % (self.employee, self.date, self.time_in, self.time_out)
|
{"/clock/admin.py": ["/clock/models.py"], "/clock/views.py": ["/clock/models.py", "/clock/forms.py", "/clock/controllers.py"], "/clock/forms.py": ["/clock/models.py"], "/clock/controllers.py": ["/clock/models.py"]}
|
7,478
|
ncbrown1/timeclock
|
refs/heads/master
|
/clock/views.py
|
from datetime import datetime, timedelta, date
from django.contrib.auth import logout
from django.contrib.auth.decorators import login_required, user_passes_test
from django.contrib.auth.views import login
from django.core.context_processors import csrf
from django.core.mail import send_mail, EmailMessage
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render, render_to_response, redirect, get_object_or_404
from django.template import RequestContext
from django.template.response import TemplateResponse
from clock.models import *
from clock.forms import *
from clock.controllers import *
def login_success(request):
if request.user.groups.filter(name="Supervisor").exists():
return redirect('superprofile')
else:
return redirect('user-profile')
def json_in_employees(request):
return HttpResponse(json_employees('in'), 'application/json')
@login_required
@user_passes_test(is_employee, login_url='/login/')
def profile(request):
if request.user.groups.filter(name='Supervisor').count() == 1:
return redirect('superprofile')
elif request.user.groups.filter(name='Help Desk Staff').count == 0 and request.user.groups.filter(name='Supervisor').count == 0:
logout(request)
return TemplateResponse(request, 'timeclock/forbidden.html', {'user': request.user})
context = RequestContext(request)
employee = Employee.objects.get(user=request.user)
now = datetime.now() #- timedelta(hours=10)
in_form = ClockInForm(initial={'time_in': now.time(), 'time_out': now.time()})
out_form = ClockOutForm(initial={'time_out': now.time()})
in_employees = get_clocked_in_employees()
errors = False
if request.method == 'POST':
if request.POST.get('action', False) == 'clock_in':
form = ClockInForm(request.POST)
if form.is_valid():
event = ClockEvent()
event.employee = employee
event.message = form.cleaned_data['message']
event.time_in = form.cleaned_data['time_in']
event.time_out = form.cleaned_data['time_out']
if can_clock_in(event):
errors = False
event.save()
employee.clocked_in = True
employee.last_clocked_in = datetime.combine(event.date, event.time_in)
employee.last_clocked_out = datetime.combine(event.date, event.time_out)
employee.last_message = event.message
employee.save()
else:
errors = True
elif request.POST.get('action', False) == 'clock_out':
event = ClockEvent.objects.filter(employee=employee).order_by("-created")[0]
form = ClockOutForm(request.POST)
if form.is_valid():
event.message = form.cleaned_data['message']
event.time_out = form.cleaned_data['time_out']
event.save()
employee.clocked_in = False
employee.last_clocked_out = datetime.combine(event.date, event.time_out)
employee.last_message = event.message
employee.save()
events = ClockEvent.objects.filter(employee=employee).order_by("-created")
return render_to_response('clock/profile.html', {'employee': employee,'events': events,'user': request.user, 'out_form': out_form, 'in_form': in_form, 'clocked_in_employees': in_employees, 'errors': errors }, context)
@login_required
def superprofile(request):
if request.user.groups.filter(name='Supervisor').count() == 0:
return TemplateResponse(request, 'timeclock/forbidden.html', {'user': request.user})
else:
admin = request.user
context = RequestContext(request)
employees = Employee.objects.all().order_by('user').order_by('-clocked_in').filter(user__groups__name='Help Desk Staff')
clocked_in_employees = get_clocked_in_employees()
return render_to_response('clock/superprofile.html', {'employees': employees, 'admin': admin, 'clocked_in_employees': clocked_in_employees, 'user': request.user })
@login_required
def super_clockevent_history(request):
if request.user.groups.filter(name="Supervisor").count() == 0:
return TempateResponse(request, 'timeclock/forbidden.html', {'user': request.user})
else:
context = RequestContext(request)
events = ClockEvent.objects.all()
now = datetime.now()
start_date = (now - timedelta(weeks=2)).date()
end_date = now.date()
history_form = FilterClockEventForm(initial={'start_date': start_date, 'end_date': end_date})
if request.method == 'POST':
history_form = FilterClockEventForm(request.POST)
if history_form.is_valid():
start_date = history_form.cleaned_data['start_date']
end_date = history_form.cleaned_data['end_date']
events = get_clock_events_between(start_date, end_date)
hour_total = 0.0
total_cost = 0.0
for e in events:
hour_amount = e.time_worked().total_seconds()/3600
hour_total += hour_amount
total_cost += hour_amount * e.employee.pay_rate
return render_to_response('clock/superclockhistory.html', {'events': events, 'hour_total': hour_total, 'total_cost': total_cost, 'history_form': history_form }, context)
@login_required
def employee_history(request, employee_id):
if request.user.groups.filter(name="Supervisor").count == 0:
return TemplateResponse(request, 'timeclock/forbidden.html', {'user': request.user})
context = RequestContext(request)
employee = Employee.objects.get(pk=employee_id)
events = ClockEvent.objects.filter(employee=employee)
now = datetime.now()
start_date = (now - timedelta(weeks=2)).date()
end_date = now.date()
history_form = FilterClockEventForm(initial={'start_date': start_date, 'end_date': end_date})
if request.method == 'POST':
history_form = FilterClockEventForm(request.POST)
if history_form.is_valid():
start_date = history_form.cleaned_data['start_date']
end_date = history_form.cleaned_data['end_date']
events = get_clock_events_between(start_date, end_date).filter(employee=employee)
hour_total = 0.0
total_cost = 0.0
for e in events:
hour_amount = e.time_worked().total_seconds()/3600
hour_total += hour_amount
total_cost += hour_amount * e.employee.pay_rate
return render_to_response('clock/employeehistory.html', {'events': events, 'hour_total': hour_total, 'total_cost': total_cost, 'history_form': history_form, 'employee': employee }, context)
@login_required
@user_passes_test(is_employee, login_url='/login/')
def clockevent_history(request):
if request.user.groups.filter(name='Supervisor').count() == 1:
return HttpResponseRedirect('/super-clockevent-history/')
context = RequestContext(request)
employee = Employee.objects.get(user=request.user)
now = datetime.now()
start_date = (now - timedelta(weeks=2)).date()
end_date = now.date()
history_form = FilterClockEventForm(initial={'start_date': start_date,'end_date': end_date})
if request.method == 'POST':
history_form = FilterClockEventForm(request.POST)
if history_form.is_valid():
start_date = history_form.cleaned_data['start_date']
end_date = history_form.cleaned_data['end_date']
events = get_clock_events_between(start_date, end_date).filter(employee=employee)
total_time = timedelta(0)
for e in events:
total_time += e.time_worked()
hour_total = total_time.total_seconds()/3600
timecard = populate_timesheet(employee, start_date)
link = timecard.name.split('k/')[1]
return render_to_response('clock/clockhistory.html', {'timecard': link,'employee': employee, 'events': events, 'user': request.user,'time_worked': hour_total, 'history_form': history_form }, context)
@login_required
@user_passes_test(is_employee, login_url='/login/')
def edit_profile(request):
context = RequestContext(request)
edited = False
valid = True
if request.method == 'POST':
user_form = UpdateUserForm(request.POST, instance=request.user)
employee_form = EmployeeForm(request.POST, request.FILES)
if user_form.is_valid() and employee_form.is_valid():
user = request.user
user.set_password(user.password)
user.first_name = user_form.cleaned_data['first_name']
user.last_name = user_form.cleaned_data['last_name']
user.email = user_form.cleaned_data['email']
user.save()
employee = Employee.objects.get(user=request.user)
if 'profile_pic' in request.FILES:
employee.profile_pic = employee_form.files['profile_pic']
pic = request.FILES['profile_pic']
mail = EmailMessage('[Django Timeclock] New Profile Picture for ' + user.first_name, '/media/profile_images/' + employee.profile_pic.name, 'django-timeclock@engineering.ucsb.edu', [])
mail.attach(pic.name, pic.read(), pic.content_type)
mail.send()
employee.save()
edited = True
return render_to_response('clock/editprofile.html', {'user_form': user_form, 'employee_form': employee_form, 'edited': edited, 'valid': valid,}, context)
else: valid = False
else:
user_form = UpdateUserForm(instance=request.user)
employee_form = EmployeeForm()
return render_to_response('clock/editprofile.html', {'user_form': user_form, 'employee_form': employee_form, 'edited': edited, 'valid': valid,}, context)
def index(request):
context = RequestContext(request)
clock_employees = Employee.objects.order_by('user__first_name').filter(user__groups__name='Help Desk Staff')
user = request.user
if user.is_authenticated:
response = render_to_response('clock/home.html', {'user': user, 'clock_employees': clock_employees})
else:
response = render_to_response('clock/home.html', {'clock_employees': clock_employees})
return response
def register(request):
context = RequestContext(request)
# Set to True upon successful registration
registered = False
if request.method == 'POST':
user_form = UserForm(data=request.POST)
employee_form = EmployeeForm(data=request.POST)
# If these forms are valid
if user_form.is_valid() and employee_form.is_valid():
# Save user form data to database
user = user_form.save()
# Hash the password so we can update
user.set_password(user.password)
user.save()
#print user.get_full_name()
employee = employee_form.save(commit=False)
employee.user = user
if 'profile_pic' in request.FILES:
employee.profile_pic = request.files['profile_pic']
employee.save()
registered = True # Registration was successful
return render_to_response('timeclock/register.html',
{'user_form': user_form, 'employee_form': employee_form, 'registered': registered,'user': request.user},
context)
# Not an HTTP POST, so render blank forms
else:
user_form = UserForm()
employee_form = EmployeeForm()
args = {}
args.update(csrf(request))
return render(request, 'timeclock/register.html',
{'user_form': user_form, 'employee_form': employee_form, 'registered': registered,'user': request.user,})
|
{"/clock/admin.py": ["/clock/models.py"], "/clock/views.py": ["/clock/models.py", "/clock/forms.py", "/clock/controllers.py"], "/clock/forms.py": ["/clock/models.py"], "/clock/controllers.py": ["/clock/models.py"]}
|
7,479
|
ncbrown1/timeclock
|
refs/heads/master
|
/clock/forms.py
|
from datetime import datetime, timedelta
from clock.models import *
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.forms.extras.widgets import SelectDateWidget
from clock.models import ClockEvent
from clock.widgets import SelectTimeWidget
from django import forms
def validate_username_unique(value):
"""Custom validator for user uniqueness"""
if User.objects.filter(username=value).exists():
raise ValidationError(u'That username is taken.')
class UserForm(forms.ModelForm):
username = forms.CharField(validators=[validate_username_unique])
password = forms.CharField(widget=forms.PasswordInput)
password_confirm = forms.CharField(widget=forms.PasswordInput)
def clean_password_confirm(self):
"""Required custom validation for the form."""
# super(UserForm,self).clean()
if 'password' in self.cleaned_data and 'password_confirm' in self.cleaned_data:
if self.cleaned_data['password'] != self.cleaned_data['password_confirm']:
self._errors['password'] = u'* Passwords must match.'
self._errors['password_confirm'] = u'* Passwords must match.'
return self.cleaned_data
class Meta:
model = User
fields = ('username', 'first_name', 'last_name', 'email', 'password',)
def auth(u, p):
return authenticate(username=u, password=p)
class UpdateUserForm(forms.ModelForm):
username = forms.CharField(label="Your Current Username")
password = forms.CharField(widget=forms.PasswordInput, label="Your Current Password")
def clean(self):
cleaned_data = super(UpdateUserForm, self).clean()
form_password = cleaned_data.get('password')
form_username = cleaned_data.get('username')
if form_password and form_username:
form_user = auth(form_username, form_password)
if form_user is None:
self.errors['password'] = u'You have entered an incorrect password.'
return self.cleaned_data
class Meta:
model = User
fields = ('first_name', 'last_name', 'email', 'username', 'password')
class EmployeeForm(forms.ModelForm):
profile_pic = forms.FileField(widget=forms.FileInput, required=False)
class Meta:
model = Employee
fields = ('profile_pic',)
class ClockInForm(forms.ModelForm):
time_in = forms.TimeField(widget=SelectTimeWidget(minute_step=15, second_step=60, twelve_hr=True), label="Time In")
time_out = forms.TimeField(widget=SelectTimeWidget(minute_step=15, second_step=60, twelve_hr=True), label="Expected Time Out")
message = forms.CharField(max_length=500, widget=forms.TextInput(attrs={'class':'form-control'}), label="What will you be doing today?")
class Meta:
model = ClockEvent
exclude = ('employee','date','created')
class ClockOutForm(forms.ModelForm):
time_out = forms.TimeField(widget=SelectTimeWidget(minute_step=15, second_step=60, twelve_hr=True), label="Time Out")
message = forms.CharField(max_length=500, widget=forms.TextInput(attrs={'class':'form-control'}), label="What did you do today?")
class Meta:
model = ClockEvent
exclude = ('employee','time_in','date','created')
class FilterClockEventForm(forms.Form):
start_date = forms.DateField(widget=SelectDateWidget, label="Start Date")
end_date = forms.DateField(widget=SelectDateWidget, label="End Date")
|
{"/clock/admin.py": ["/clock/models.py"], "/clock/views.py": ["/clock/models.py", "/clock/forms.py", "/clock/controllers.py"], "/clock/forms.py": ["/clock/models.py"], "/clock/controllers.py": ["/clock/models.py"]}
|
7,480
|
ncbrown1/timeclock
|
refs/heads/master
|
/clock/controllers.py
|
import json
import os
from datetime import datetime, date, timedelta
from django.conf import settings
from clock.models import ClockEvent, Employee
from string import Template
import tempfile
def is_helpdesk_staff(user):
if user:
return user.groups.filter(name='Help Desk Staff').count() == 1
return False
def is_supervisor(user):
if user:
return user.groups.filter(name='Supervisor').count() == 1
return False
def is_employee(user):
return is_helpdesk_staff(user) or is_supervisor(user)
def is_supervisor(user):
if user:
return user.groups.filter(name='Supervisor').count() == 1
return False
def get_clocked_in_employees():
return Employee.objects.filter(clocked_in=True).order_by('-last_clocked_in')
def get_clocked_out_employees():
return Employee.objects.filter(clocked_in=False).order_by('last_clocked_out').filter(user__groups__name='Help Desk Staff')
def get_clock_events_between(date1, date2):
return ClockEvent.objects.filter(date__range=[date1, date2]).order_by('-created')
def json_employees(in_out_or_all):
if in_out_or_all == 'in':
query = get_clocked_in_employees()
elif in_out_or_all == 'out':
query = get_clocked_out_employees()
elif in_out_or_all == 'all':
query = Employee.objects.all()
else:
return json.dumps(['Invalid input'])
data = []
for row in query:
data.append({'id': row.pk, 'username': row.user.username, 'first_name': row.user.first_name, 'last_name': row.user.last_name, 'email': row.user.email, 'clocked_in': row.clocked_in})
data_as_json = json.dumps(data, sort_keys=True, indent=2)
return data_as_json
def time_is_after(t1, t2): # arg1 is time in question, arg2 is time in reference
day = datetime.now().date()
d1 = datetime.combine(day, t1)
d2 = datetime.combine(day, t2)
td = d1 - d2
if td.total_seconds() > 0:
return True
else: return False
def time_is_before(t1, t2): # arg1 is time in question, arg2 is time in reference
day = datetime.now().date()
d1 = datetime.combine(day, t1)
d2 = datetime.combine(day, t2)
td = d1 - d2
if td.total_seconds() < 0:
return True
else: return False
def time_is_equal(t1, t2):
day = datetime.now().date()
d1 = datetime.combine(day, t1)
d2 = datetime.combine(day, t2)
td = d1 - d2
if td.total_seconds() == 0:
return True
else: return False
def can_clock_in(event):
events = ClockEvent.objects.filter(employee=event.employee).filter(date=datetime.now().date())
for e in events:
if time_is_after(event.time_in, e.time_in) and time_is_before(event.time_in, e.time_out):
return False
elif time_is_before(event.time_in, e.time_in) and time_is_after(event.time_out, e.time_out):
return False
elif time_is_before(event.time_in, e.time_in) and time_is_after(event.time_out, e.time_in) and time_is_before(event.time_out, e.time_out):
return False
elif time_is_equal(event.time_in, e.time_in):
return False
return True
def find_closest_sunday(time):
back = time
fwd = time
while back.strftime('%A') != 'Sunday':
back = back - timedelta(days=1)
while fwd.strftime('%A') != 'Sunday':
fwd = fwd + timedelta(days=1)
d_back = time-back
d_fwd = fwd-time
if d_back > d_fwd:
return fwd
else:
return back
def split_into_day_lists(employee,start_date):
start = find_closest_sunday(start_date)
data = []
s = start
while s != (start+timedelta(weeks=2)):
evs = ClockEvent.objects.filter(date=s).filter(employee=employee)
d = []
for e in evs:
d.append(e)
data.append(d)
s = s + timedelta(days=1)
return data
def populate_timesheet(employee, start_date):
times = dict(
Name="", PeriodStart="", PeriodEnd="",
SunIn1="",SunIn2="",SunIn3="",SunIn4="",
SunOut1="",SunOut2="",SunOut3="",SunOut4="",
TotalSun1="", TotalSun2="",
MonIn1="",MonIn2="",MonIn3="",MonIn4="",
MonOut1="",MonOut2="",MonOut3="",MonOut4="",
TotalMon1="", TotalMon2="",
TueIn1="",TueIn2="",TueIn3="",TueIn4="",
TueOut1="",TueOut2="",TueOut3="",TueOut4="",
TotalTue1="", TotalTue2="",
WedIn1="",WedIn2="",WedIn3="",WedIn4="",
WedOut1="",WedOut2="",WedOut3="",WedOut4="",
TotalWed1="", TotalWed2="",
ThurIn1="",ThurIn2="",ThurIn3="",ThurIn4="",
ThurOut1="",ThurOut2="",ThurOut3="",ThurOut4="",
TotalThur1="", TotalThur2="",
FriIn1="",FriIn2="",FriIn3="",FriIn4="",
FriOut1="",FriOut2="",FriOut3="",FriOut4="",
TotalFri1="", TotalFri2="",
SatIn1="",SatIn2="",SatIn3="",SatIn4="",
SatOut1="",SatOut2="",SatOut3="",SatOut4="",
TotalSat1="", TotalSat2="",
TotalAll="",
)
d = split_into_day_lists(employee,start_date)
su1t = timedelta(0)
mo1t = timedelta(0)
tu1t = timedelta(0)
we1t = timedelta(0)
th1t = timedelta(0)
fr1t = timedelta(0)
sa1t = timedelta(0)
su2t = timedelta(0)
mo2t = timedelta(0)
tu2t = timedelta(0)
we2t = timedelta(0)
th2t = timedelta(0)
fr2t = timedelta(0)
sa2t = timedelta(0)
try:
if d[0][0]:
times['SunIn1'] = d[0][0].time_in.strftime('%I:%M %p')
times['SunOut1'] = d[0][0].time_out.strftime('%I:%M %p')
su1t += d[0][0].time_worked()
except:
pass
try:
if d[0][1]:
times['SunIn2'] = d[0][1].time_in.strftime('%I:%M %p')
times['SunOut2'] = d[0][1].time_out.strftime('%I:%M %p')
su1t += d[0][1].time_worked()
except:
pass
try:
if d[1][0]:
times['MonIn1'] = d[1][0].time_in.strftime('%I:%M %p')
times['MonOut1'] = d[1][0].time_out.strftime('%I:%M %p')
mo1t += d[1][0].time_worked()
except:
pass
try:
if d[1][1]:
times['MonIn2'] = d[1][1].time_in.strftime('%I:%M %p')
times['MonOut2'] = d[1][1].time_out.strftime('%I:%M %p')
mo1t += d[1][1].time_worked()
except:
pass
try:
if d[2][0]:
times['TueIn1'] = d[2][0].time_in.strftime('%I:%M %p')
times['TueOut1'] = d[2][0].time_out.strftime('%I:%M %p')
tu1t += d[2][0].time_worked()
except:
pass
try:
if d[2][1]:
times['TueIn2'] = d[2][1].time_in.strftime('%I:%M %p')
times['TueOut2'] = d[2][1].time_out.strftime('%I:%M %p')
tu1t += d[2][1].time_worked()
except:
pass
try:
if d[3][0]:
times['WedIn1'] = d[3][0].time_in.strftime('%I:%M %p')
times['WedOut1'] = d[3][0].time_out.strftime('%I:%M %p')
we1t += d[3][0].time_worked()
except:
pass
try:
if d[3][1]:
times['WedIn2'] = d[3][1].time_in.strftime('%I:%M %p')
times['WedOut2'] = d[3][1].time_out.strftime('%I:%M %p')
weu1t += d[3][1].time_worked()
except:
pass
try:
if d[4][0]:
times['ThurIn1'] = d[4][0].time_in.strftime('%I:%M %p')
times['ThurOut1'] = d[4][0].time_out.strftime('%I:%M %p')
th1t += d[4][0].time_worked()
except:
pass
try:
if d[4][1]:
times['ThurIn2'] = d[4][1].time_in.strftime('%I:%M %p')
times['ThurOut2'] = d[4][1].time_out.strftime('%I:%M %p')
th1t += d[4][1].time_worked()
except:
pass
try:
if d[5][0]:
times['FriIn1'] = d[5][0].time_in.strftime('%I:%M %p')
times['FriOut1'] = d[5][0].time_out.strftime('%I:%M %p')
fr1t += d[5][0].time_worked()
except:
pass
try:
if d[5][1]:
times['FriIn2'] = d[5][1].time_in.strftime('%I:%M %p')
times['FriOut2'] = d[5][1].time_out.strftime('%I:%M %p')
fr1t += d[5][1].time_worked()
except:
pass
try:
if d[6][0]:
times['SatIn1'] = d[6][0].time_in.strftime('%I:%M %p')
times['SatOut1'] = d[6][0].time_out.strftime('%I:%M %p')
sa1t += d[6][0].time_worked()
except:
pass
try:
if d[6][1]:
times['SatIn2'] = d[6][1].time_in.strftime('%I:%M %p')
times['SatOut2'] = d[6][1].time_out.strftime('%I:%M %p')
sa1t += d[6][1].time_worked()
except:
pass
try:
if d[7][0]:
times['SunIn3'] = d[7][0].time_in.strftime('%I:%M %p')
times['SunOut3'] = d[7][0].time_out.strftime('%I:%M %p')
su2t += d[7][0].time_worked()
except:
pass
try:
if d[7][1]:
times['SunIn4'] = d[7][1].time_in.strftime('%I:%M %p')
times['SunOut4'] = d[7][1].time_out.strftime('%I:%M %p')
su2t += d[7][1].time_worked()
except:
pass
try:
if d[8][0]:
times['MonIn3'] = d[8][0].time_in.strftime('%I:%M %p')
times['MonOut3'] = d[8][0].time_out.strftime('%I:%M %p')
mo2t += d[8][0].time_worked()
except:
pass
try:
if d[8][1]:
times['MonIn4'] = d[8][1].time_in.strftime('%I:%M %p')
times['MonOut4'] = d[8][1].time_out.strftime('%I:%M %p')
mo2t += d[8][1].time_worked()
except:
pass
try:
if d[9][0]:
times['TueIn3'] = d[9][0].time_in.strftime('%I:%M %p')
times['TueOut3'] = d[9][0].time_out.strftime('%I:%M %p')
tu2t += d[9][0].time_worked()
except:
pass
try:
if d[9][1]:
times['TueIn4'] = d[9][1].time_in.strftime('%I:%M %p')
times['TueOut4'] = d[9][1].time_out.strftime('%I:%M %p')
tu2t += d[9][1].time_worked()
except:
pass
try:
if d[10][0]:
times['WedIn3'] = d[10][0].time_in.strftime('%I:%M %p')
times['WedOut3'] = d[10][0].time_out.strftime('%I:%M %p')
we2t += d[10][0].time_worked()
except:
pass
try:
if d[10][1]:
times['WedIn4'] = d[10][1].time_in.strftime('%I:%M %p')
times['WedOut4'] = d[10][1].time_out.strftime('%I:%M %p')
we2t += d[10][1].time_worked()
except:
pass
try:
if d[11][0]:
times['ThurIn3'] = d[11][0].time_in.strftime('%I:%M %p')
times['ThurOut3'] = d[11][0].time_out.strftime('%I:%M %p')
th2t += d[11][0].time_worked()
except:
pass
try:
if d[11][1]:
times['ThurIn4'] = d[11][1].time_in.strftime('%I:%M %p')
times['ThurOut4'] = d[11][1].time_out.strftime('%I:%M %p')
th2t += d[11][1].time_worked()
except:
pass
try:
if d[12][0]:
times['FriIn3'] = d[12][0].time_in.strftime('%I:%M %p')
times['FriOut3'] = d[12][0].time_out.strftime('%I:%M %p')
fr2t += d[12][0].time_worked()
except:
pass
try:
if d[12][1]:
times['FriIn4'] = d[12][1].time_in.strftime('%I:%M %p')
times['FriOut4'] = d[12][1].time_out.strftime('%I:%M %p')
fr2t += d[12][1].time_worked()
except:
pass
try:
if d[13][0]:
times['SatIn3'] = d[14][0].time_in.strftime('%I:%M %p')
times['SatOut3'] = d[14][0].time_out.strftime('%I:%M %p')
sa2t += d[13][0].time_worked()
except:
pass
try:
if d[13][1]:
times['SatIn4'] = d[14][1].time_in.strftime('%I:%M %p')
times['SatOut4'] = d[14][1].time_out.strftime('%I:%M %p')
sa2t += d[13][1].time_worked()
except:
pass
times['TotalSun1'] = su1t.total_seconds()/3600
times['TotalMon1'] = mo1t.total_seconds()/3600
times['TotalTue1'] = tu1t.total_seconds()/3600
times['TotalWed1'] = we1t.total_seconds()/3600
times['TotalThur1'] = th1t.total_seconds()/3600
times['TotalFri1'] = fr1t.total_seconds()/3600
times['TotalSat1'] = sa1t.total_seconds()/3600
times['TotalSun2'] = su2t.total_seconds()/3600
times['TotalMon2'] = mo2t.total_seconds()/3600
times['TotalTue2'] = tu2t.total_seconds()/3600
times['TotalWed2'] = we2t.total_seconds()/3600
times['TotalThur2'] = th2t.total_seconds()/3600
times['TotalFri2'] = fr2t.total_seconds()/3600
times['TotalSat2'] = sa2t.total_seconds()/3600
times['TotalAll'] = (su1t+mo1t+tu1t+we1t+th1t+fr1t+sa1t+su2t+mo2t+tu2t+we2t+th2t+fr2t+sa2t).total_seconds()/3600
start = find_closest_sunday(start_date)
times['PeriodStart'] = start.strftime('%b %d')
times['PeriodEnd'] = (start+timedelta(weeks=2,days=-1)).strftime('%b %d')
name = "%s %s" % (employee.user.first_name, employee.user.last_name)
times['Name'] = str(name)
import os
folder = os.path.join(settings.MEDIA_ROOT,'files/temp')
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
os.unlink(file_path)
except:
pass
f = open(os.path.join(settings.STATIC_ROOT, 'downloads/timesheet.html'), 'r')
text = f.read()
f.close()
newText = Template(text).substitute(times)
f2 = tempfile.NamedTemporaryFile(mode="w",suffix=".html",dir=os.path.join(settings.MEDIA_ROOT, "files/temp/"),delete=False) # open('populated_timesheet.html', 'w')
f2.write(newText)
return f2
|
{"/clock/admin.py": ["/clock/models.py"], "/clock/views.py": ["/clock/models.py", "/clock/forms.py", "/clock/controllers.py"], "/clock/forms.py": ["/clock/models.py"], "/clock/controllers.py": ["/clock/models.py"]}
|
7,481
|
ncbrown1/timeclock
|
refs/heads/master
|
/timeclock/authbackend.py
|
from django.contrib.auth import get_user_model
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth.models import User
class ClockBackend(ModelBackend):
def authenticate(self, username=None, password=None, **kwargs):
UserModel = get_user_model()
if username is None:
username = kwargs.get(UserModel.USERNAME_FIELD)
try:
user = UserModel._default_manager.get_by_natural_key(username)
if user.check_password(password) and (user.groups.filter(name='Supervisor').count() == 1 or user.groups.filter(name='Help Desk Staff').count() == 1):
return user
except UserModel.DoesNotExist:
UserModel().set_password(password)
|
{"/clock/admin.py": ["/clock/models.py"], "/clock/views.py": ["/clock/models.py", "/clock/forms.py", "/clock/controllers.py"], "/clock/forms.py": ["/clock/models.py"], "/clock/controllers.py": ["/clock/models.py"]}
|
7,482
|
ncbrown1/timeclock
|
refs/heads/master
|
/forum/admin.py
|
from django.contrib import admin
from forum.models import *
class ForumAdmin(admin.ModelAdmin):
pass
class ThreadAdmin(admin.ModelAdmin):
list_display = ['title', 'forum', 'creator', 'created']
lsit_filter = ['forum', 'creator']
class PostAdmin(admin.ModelAdmin):
list_display = ['title', 'thread', 'creator', 'created']
list_filter = ['created', 'creator']
search_fields = ['title', 'creator']
date_hierarchy = 'created'
save_on_top = True
admin.site.register(Forum, ForumAdmin)
admin.site.register(Thread, ThreadAdmin)
admin.site.register(Post, PostAdmin)
|
{"/clock/admin.py": ["/clock/models.py"], "/clock/views.py": ["/clock/models.py", "/clock/forms.py", "/clock/controllers.py"], "/clock/forms.py": ["/clock/models.py"], "/clock/controllers.py": ["/clock/models.py"]}
|
7,483
|
ncbrown1/timeclock
|
refs/heads/master
|
/forum/views.py
|
from django.contrib.auth.decorators import login_required, user_passes_test
from django.core.mail import send_mail
from django.core.paginator import Paginator
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from clock.controllers import *
from clock.models import Employee
from forum.models import *
from forum.forms import *
signature = "\n\nThis message was sent via the Helpdesk Timeclock Internal Forum. If you would like to reply to this message,\
please go to the forum and submit your reply there. https://localhost:8000/staff-forum/"
@login_required
@user_passes_test(is_employee, login_url='/login/')
def main_forum(request):
forums = Forum.objects.all()
return render_to_response('forum/list.html', {'forums': forums, 'user': request.user}, context_instance=RequestContext(request))
def mk_paginator(request, items, num_items):
paginator = Paginator(items, num_items)
try: page = int(request.GET.get("page", '1'))
except ValueError: page = 1
try: items = paginator.page(page)
except (InvalidPage, EmptyPage): items = paginator.page(paginator.num_pages)
return items
@login_required
@user_passes_test(is_employee, login_url='/login/')
def forum(request, forum_id):
threads = Thread.objects.filter(forum=forum_id).order_by('-created')
threads = mk_paginator(request, threads, 15)
return render_to_response('forum/forum.html', {'threads': threads, 'pk': forum_id, 'user': request.user}, context_instance=RequestContext(request))
@login_required
@user_passes_test(is_employee, login_url='/login/')
def thread(request, thread_id):
posts = Post.objects.filter(thread=thread_id).order_by("created")
posts = mk_paginator(request, posts, 15)
t = Thread.objects.get(pk=thread_id)
return render_to_response('forum/thread.html', {'posts': posts, 'pk': thread_id, 'title': t.title, 'forum_pk': t.forum.pk, 'user': request.user}, context_instance=RequestContext(request))
@login_required
@user_passes_test(is_employee, login_url='/login/')
def post_reply(request, thread_id):
form = PostForm()
thread = Thread.objects.get(pk=thread_id)
if request.method == 'POST':
form = PostForm(request.POST)
if form.is_valid():
post = Post()
post.thread = thread
post.title = form.cleaned_data['title']
post.body = form.cleaned_data['body']
post.creator = request.user
post.save()
return HttpResponseRedirect(reverse('thread-detail', args=(thread_id,)))
else:
post = Post()
post.title = 'RE: %s' % thread.title
form = PostForm(instance=post)
return render_to_response('forum/reply.html', {'form':form, 'thread':thread,}, context_instance=RequestContext(request))
@login_required
@user_passes_test(is_employee, login_url='/login/')
def new_thread(request, forum_id):
form = ThreadForm()
forum = get_object_or_404(Forum, pk=forum_id)
if request.method == 'POST':
form = ThreadForm(request.POST)
if form.is_valid():
thread = Thread()
thread.title = form.cleaned_data['title']
thread.description = form.cleaned_data['description']
thread.forum = forum
thread.creator = request.user
thread.save()
post = Post()
post.thread = thread
post.title = thread.title
post.body = thread.description
post.creator = request.user
post.save()
if forum.title == 'Announcements':
employees = Employee.objects.filter(user__groups__name='Help Desk Staff')
rcpts = []
for emp in employees:
rcpts.append(emp.user.email)
send_mail('[Help Desk Announcement] ' + thread.title, thread.description + signature, 'no-reply@helpdesk.engr.ucsb.edu', rcpts, fail_silently=False)
return HttpResponseRedirect(reverse('forum-detail', args=(forum_id, )))
return render_to_response('forum/new-topic.html', {'form':form, 'forum':forum, }, context_instance=RequestContext(request))
|
{"/clock/admin.py": ["/clock/models.py"], "/clock/views.py": ["/clock/models.py", "/clock/forms.py", "/clock/controllers.py"], "/clock/forms.py": ["/clock/models.py"], "/clock/controllers.py": ["/clock/models.py"]}
|
7,490
|
xyu40/cuDMRG
|
refs/heads/main
|
/cuDMRG/tensor/index.py
|
import uuid
from enum import Enum
from typing import List, Tuple
from ..utils import get_logger
logger = get_logger(__name__)
class IndexType(Enum):
VIRTUAL = 1
PHYSICAL = 2
ANYTYPE = 3
class Index:
def __init__(
self,
size: int,
index_type: IndexType = IndexType.VIRTUAL,
level: int = 0,
) -> None:
self._id = uuid.uuid4()
self._size: int = size
self._index_type: IndexType = index_type
self._level: int = level
@property
def size(self) -> int:
return self._size
@property
def indexType(self) -> IndexType:
return self._index_type
@property
def level(self) -> int:
return self._level
def setSize(self, size: int) -> "Index":
self._size = size
return self
def raiseLevel(self, index_type=IndexType.ANYTYPE) -> "Index":
if index_type is IndexType.ANYTYPE or self._index_type == index_type:
self._level += 1
return self
def lowerLevel(self, index_type=IndexType.ANYTYPE) -> "Index":
if index_type is IndexType.ANYTYPE or self._index_type == index_type:
self._level -= 1
return self
def setLevel(self, level: int, index_type=IndexType.ANYTYPE) -> "Index":
if index_type is IndexType.ANYTYPE or self._index_type == index_type:
self._level = level
return self
def resetLevel(self, index_type=IndexType.ANYTYPE) -> "Index":
if index_type is IndexType.ANYTYPE or self._index_type == index_type:
self._level = 0
return self
def mapLevel(self,
level_from: int,
level_to: int,
index_type=IndexType.ANYTYPE) -> "Index":
if index_type is IndexType.ANYTYPE or self._index_type == index_type:
if self._level == level_from:
self._level = level_to
return self
def almostIndentical(self, rhs: "Index") -> bool:
return self._id == rhs._id and self._level != rhs._level
def __key(self) -> Tuple[int, int]:
return (self._id.int, self._level)
def __hash__(self) -> int:
return hash(self.__key())
def __eq__(self, rhs: object) -> bool:
if isinstance(rhs, self.__class__):
return self._id == rhs._id and self._level == rhs._level
else:
return False
def __str__(self):
return (f"Index({self._size}, {self._index_type}, "
f"{self._level}, {self._id})")
def getEinsumRule(lhs: List[Index],
rhs: List[Index]) -> Tuple[List[int], List[int]]:
lhs_list = []
rhs_list = []
lhs_map = {index: i for i, index in enumerate(lhs)}
for j, index in enumerate(rhs):
if index in lhs_map:
lhs_list.append(lhs_map[index])
rhs_list.append(j)
return lhs_list, rhs_list
|
{"/cuDMRG/tensor/index.py": ["/cuDMRG/utils/__init__.py"], "/cuDMRG/apps/__init__.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/apps/mps.py", "/cuDMRG/apps/mpo.py", "/cuDMRG/apps/solver.py", "/cuDMRG/apps/dmrg.py"], "/cuDMRG/apps/solver.py": ["/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/utils/__init__.py": ["/cuDMRG/utils/logger.py"], "/cuDMRG/apps/sites.py": ["/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/__init__.py": ["/cuDMRG/tensor/__init__.py", "/cuDMRG/apps/__init__.py"], "/cuDMRG/apps/mps.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/tensor/tensor.py": ["/cuDMRG/tensor/index.py", "/cuDMRG/utils/__init__.py"], "/test.py": ["/cuDMRG/__init__.py"], "/cuDMRG/tensor/__init__.py": ["/cuDMRG/tensor/index.py", "/cuDMRG/tensor/tensor.py"], "/cuDMRG/apps/mpo.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/apps/mps.py", "/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/apps/dmrg.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/apps/mps.py", "/cuDMRG/apps/mpo.py", "/cuDMRG/apps/solver.py", "/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"]}
|
7,491
|
xyu40/cuDMRG
|
refs/heads/main
|
/cuDMRG/apps/__init__.py
|
from .sites import Sites
from .mps import MPS
from .mpo import MPO, psiHphi
from .heisenberg import Heisenberg
from .solver import LinearOp, LinearMult, lanczos
from .dmrg import DMRG
__all__ = [
"Sites", "MPS", "MPO", "psiHphi", "Heisenberg", "LinearOp", "LinearMult",
"lanczos", "DMRG"
]
|
{"/cuDMRG/tensor/index.py": ["/cuDMRG/utils/__init__.py"], "/cuDMRG/apps/__init__.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/apps/mps.py", "/cuDMRG/apps/mpo.py", "/cuDMRG/apps/solver.py", "/cuDMRG/apps/dmrg.py"], "/cuDMRG/apps/solver.py": ["/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/utils/__init__.py": ["/cuDMRG/utils/logger.py"], "/cuDMRG/apps/sites.py": ["/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/__init__.py": ["/cuDMRG/tensor/__init__.py", "/cuDMRG/apps/__init__.py"], "/cuDMRG/apps/mps.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/tensor/tensor.py": ["/cuDMRG/tensor/index.py", "/cuDMRG/utils/__init__.py"], "/test.py": ["/cuDMRG/__init__.py"], "/cuDMRG/tensor/__init__.py": ["/cuDMRG/tensor/index.py", "/cuDMRG/tensor/tensor.py"], "/cuDMRG/apps/mpo.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/apps/mps.py", "/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/apps/dmrg.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/apps/mps.py", "/cuDMRG/apps/mpo.py", "/cuDMRG/apps/solver.py", "/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"]}
|
7,492
|
xyu40/cuDMRG
|
refs/heads/main
|
/cuDMRG/apps/solver.py
|
try:
import cupy as xp
except ImportError:
import numpy as xp
from abc import ABC, abstractmethod
from typing import Tuple
from ..tensor import Tensor
from ..utils import get_logger
logger = get_logger(__name__)
class LinearOp(ABC):
def __init__(self) -> None:
pass
@abstractmethod
def __call__(self, t: Tensor):
pass
class LinearMult(LinearOp):
def __init__(self, lhs: Tensor, rhs: Tensor, x: Tensor) -> None:
super().__init__()
self._lhs = lhs
self._rhs = rhs
self._x = x.copy()
def __call__(self, t: xp.ndarray) -> xp.ndarray:
self._x._data = t
res = self._lhs * self._x
res *= self._rhs
return res._data
def lanczos(op: LinearOp,
x: Tensor,
krylov_size: int,
num_restarts: int,
smallest: bool = True) -> Tuple[float, Tensor]:
v_next = x._data.copy()
beta = xp.zeros(krylov_size + 1)
alpha = xp.zeros(krylov_size)
for _ in range(num_restarts):
beta[0] = 0.0
v_prev = xp.zeros(x._data.shape)
v_next /= xp.linalg.norm(v_next)
V = xp.zeros([x.size, krylov_size])
for i in range(0, krylov_size):
w = op(v_next)
alpha[i] = xp.dot(w.reshape(x.size), v_next.reshape(x.size))
w -= (alpha[i] * v_next + beta[i] * v_prev)
beta[i + 1] = xp.linalg.norm(w)
v_prev = v_next.copy()
v_next = w / beta[i + 1]
V[:, i] = v_prev.reshape(x.size)
tridiag = xp.diag(alpha)
for i in range(0, krylov_size - 1):
tridiag[i + 1, i] = beta[i + 1]
d, v = xp.linalg.eigh(tridiag, UPLO="L")
if smallest:
ev = d[0]
v_next = (V @ v[:, 0]).reshape(x._data.shape)
else:
ev = d[-1]
v_next = (V @ v[:, -1]).reshape(x._data.shape)
x._data = v_next
return ev, x
|
{"/cuDMRG/tensor/index.py": ["/cuDMRG/utils/__init__.py"], "/cuDMRG/apps/__init__.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/apps/mps.py", "/cuDMRG/apps/mpo.py", "/cuDMRG/apps/solver.py", "/cuDMRG/apps/dmrg.py"], "/cuDMRG/apps/solver.py": ["/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/utils/__init__.py": ["/cuDMRG/utils/logger.py"], "/cuDMRG/apps/sites.py": ["/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/__init__.py": ["/cuDMRG/tensor/__init__.py", "/cuDMRG/apps/__init__.py"], "/cuDMRG/apps/mps.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/tensor/tensor.py": ["/cuDMRG/tensor/index.py", "/cuDMRG/utils/__init__.py"], "/test.py": ["/cuDMRG/__init__.py"], "/cuDMRG/tensor/__init__.py": ["/cuDMRG/tensor/index.py", "/cuDMRG/tensor/tensor.py"], "/cuDMRG/apps/mpo.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/apps/mps.py", "/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/apps/dmrg.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/apps/mps.py", "/cuDMRG/apps/mpo.py", "/cuDMRG/apps/solver.py", "/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"]}
|
7,493
|
xyu40/cuDMRG
|
refs/heads/main
|
/cuDMRG/utils/__init__.py
|
from .logger import get_logger
__all__ = ["get_logger"]
|
{"/cuDMRG/tensor/index.py": ["/cuDMRG/utils/__init__.py"], "/cuDMRG/apps/__init__.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/apps/mps.py", "/cuDMRG/apps/mpo.py", "/cuDMRG/apps/solver.py", "/cuDMRG/apps/dmrg.py"], "/cuDMRG/apps/solver.py": ["/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/utils/__init__.py": ["/cuDMRG/utils/logger.py"], "/cuDMRG/apps/sites.py": ["/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/__init__.py": ["/cuDMRG/tensor/__init__.py", "/cuDMRG/apps/__init__.py"], "/cuDMRG/apps/mps.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/tensor/tensor.py": ["/cuDMRG/tensor/index.py", "/cuDMRG/utils/__init__.py"], "/test.py": ["/cuDMRG/__init__.py"], "/cuDMRG/tensor/__init__.py": ["/cuDMRG/tensor/index.py", "/cuDMRG/tensor/tensor.py"], "/cuDMRG/apps/mpo.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/apps/mps.py", "/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/apps/dmrg.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/apps/mps.py", "/cuDMRG/apps/mpo.py", "/cuDMRG/apps/solver.py", "/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"]}
|
7,494
|
xyu40/cuDMRG
|
refs/heads/main
|
/cuDMRG/utils/logger.py
|
import logging
import sys
try:
from colorlog import ColoredFormatter
FORMATTER = ColoredFormatter(
"%(log_color)s[%(asctime)s %(name)-8s%(levelname)s]%(reset)s "
"%(white)s%(message)s",
datefmt="%H:%M:%S",
reset=True,
log_colors={
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red',
})
except ImportError:
FORMATTER = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s",
datefmt="%H:%M:%S",
)
def get_console_handler():
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(FORMATTER)
return console_handler
def get_logger(logger_name):
logger = logging.getLogger(logger_name)
logger.setLevel(logging.DEBUG)
logger.addHandler(get_console_handler())
logger.propagate = False
return logger
|
{"/cuDMRG/tensor/index.py": ["/cuDMRG/utils/__init__.py"], "/cuDMRG/apps/__init__.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/apps/mps.py", "/cuDMRG/apps/mpo.py", "/cuDMRG/apps/solver.py", "/cuDMRG/apps/dmrg.py"], "/cuDMRG/apps/solver.py": ["/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/utils/__init__.py": ["/cuDMRG/utils/logger.py"], "/cuDMRG/apps/sites.py": ["/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/__init__.py": ["/cuDMRG/tensor/__init__.py", "/cuDMRG/apps/__init__.py"], "/cuDMRG/apps/mps.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/tensor/tensor.py": ["/cuDMRG/tensor/index.py", "/cuDMRG/utils/__init__.py"], "/test.py": ["/cuDMRG/__init__.py"], "/cuDMRG/tensor/__init__.py": ["/cuDMRG/tensor/index.py", "/cuDMRG/tensor/tensor.py"], "/cuDMRG/apps/mpo.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/apps/mps.py", "/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/apps/dmrg.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/apps/mps.py", "/cuDMRG/apps/mpo.py", "/cuDMRG/apps/solver.py", "/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"]}
|
7,495
|
xyu40/cuDMRG
|
refs/heads/main
|
/cuDMRG/apps/sites.py
|
from copy import copy
from typing import List
from ..tensor import Index, IndexType
from ..utils import get_logger
logger = get_logger(__name__)
class Sites:
def __init__(self, length: int, physicalDim: int) -> None:
self._length = length
self._physicalDim = physicalDim
self._physicalIndices = [
Index(physicalDim, IndexType.PHYSICAL) for i in range(length)
]
@property
def length(self) -> int:
return self._length
@property
def physicalDim(self) -> int:
return self._physicalDim
@property
def virtualIndices(self) -> List[Index]:
# must be freshly generated everytime
return [Index(1, IndexType.VIRTUAL) for i in range(self._length + 1)]
@property
def physicalIndices(self) -> List[Index]:
return [copy(idx) for idx in self._physicalIndices]
|
{"/cuDMRG/tensor/index.py": ["/cuDMRG/utils/__init__.py"], "/cuDMRG/apps/__init__.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/apps/mps.py", "/cuDMRG/apps/mpo.py", "/cuDMRG/apps/solver.py", "/cuDMRG/apps/dmrg.py"], "/cuDMRG/apps/solver.py": ["/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/utils/__init__.py": ["/cuDMRG/utils/logger.py"], "/cuDMRG/apps/sites.py": ["/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/__init__.py": ["/cuDMRG/tensor/__init__.py", "/cuDMRG/apps/__init__.py"], "/cuDMRG/apps/mps.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/tensor/tensor.py": ["/cuDMRG/tensor/index.py", "/cuDMRG/utils/__init__.py"], "/test.py": ["/cuDMRG/__init__.py"], "/cuDMRG/tensor/__init__.py": ["/cuDMRG/tensor/index.py", "/cuDMRG/tensor/tensor.py"], "/cuDMRG/apps/mpo.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/apps/mps.py", "/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/apps/dmrg.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/apps/mps.py", "/cuDMRG/apps/mpo.py", "/cuDMRG/apps/solver.py", "/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"]}
|
7,496
|
xyu40/cuDMRG
|
refs/heads/main
|
/cuDMRG/__init__.py
|
from .tensor import Index, Tensor, getEinsumRule
from .apps import (Sites, MPS, MPO, psiHphi, Heisenberg, LinearMult, lanczos,
DMRG)
__all__ = [
"Index", "Tensor", "getEinsumRule", "Sites", "MPS", "MPO", "psiHphi",
"Heisenberg", "LinearMult", "lanczos", "DMRG"
]
|
{"/cuDMRG/tensor/index.py": ["/cuDMRG/utils/__init__.py"], "/cuDMRG/apps/__init__.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/apps/mps.py", "/cuDMRG/apps/mpo.py", "/cuDMRG/apps/solver.py", "/cuDMRG/apps/dmrg.py"], "/cuDMRG/apps/solver.py": ["/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/utils/__init__.py": ["/cuDMRG/utils/logger.py"], "/cuDMRG/apps/sites.py": ["/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/__init__.py": ["/cuDMRG/tensor/__init__.py", "/cuDMRG/apps/__init__.py"], "/cuDMRG/apps/mps.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/tensor/tensor.py": ["/cuDMRG/tensor/index.py", "/cuDMRG/utils/__init__.py"], "/test.py": ["/cuDMRG/__init__.py"], "/cuDMRG/tensor/__init__.py": ["/cuDMRG/tensor/index.py", "/cuDMRG/tensor/tensor.py"], "/cuDMRG/apps/mpo.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/apps/mps.py", "/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/apps/dmrg.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/apps/mps.py", "/cuDMRG/apps/mpo.py", "/cuDMRG/apps/solver.py", "/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"]}
|
7,497
|
xyu40/cuDMRG
|
refs/heads/main
|
/cuDMRG/apps/mps.py
|
from copy import copy
from typing import List
from .sites import Sites
from ..tensor import Tensor
from ..utils import get_logger
logger = get_logger(__name__)
class MPS:
def __init__(self, sites: Sites, VirtualDim: int = 1) -> None:
self._center = 0
self._length = sites.length
self._physicalDim = sites.physicalDim
physicalIndices = sites.physicalIndices
virtualIndices = sites.virtualIndices
for i, vidx in enumerate(virtualIndices):
if i == 0 or i == self._length:
vidx.setSize(1)
else:
vidx.setSize(VirtualDim)
self._tensors: List[Tensor] = [
Tensor([
virtualIndices[i],
physicalIndices[i],
virtualIndices[i + 1],
]).setZero() for i in range(self._length)
]
@property
def length(self) -> int:
return self._length
@property
def physicalDim(self) -> int:
return self._physicalDim
@property
def leftIndex(self):
return copy(self._tensors[0].indices[0])
@property
def rightIndex(self):
return copy(self._tensors[-1].indices[-1])
@property
def tensors(self) -> List[Tensor]:
return self._tensors
def setZero(self) -> "MPS":
for t in self._tensors:
t.setZero()
return self
def setRandom(self) -> "MPS":
for t in self._tensors:
t.setRandom()
return self
def setOne(self) -> "MPS":
for t in self._tensors:
t.setOne()
return self
def canonicalize(self) -> "MPS":
for i in range(self._length - 1, 0, -1):
lhs, rhs, _, _ = self._tensors[i].decompose(lhs=[0],
rhs=[1, 2],
mergeV=False)
self._tensors[i] = rhs
self._tensors[i - 1] *= lhs
self._tensors[0].normalize()
self._center = 0
return self
|
{"/cuDMRG/tensor/index.py": ["/cuDMRG/utils/__init__.py"], "/cuDMRG/apps/__init__.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/apps/mps.py", "/cuDMRG/apps/mpo.py", "/cuDMRG/apps/solver.py", "/cuDMRG/apps/dmrg.py"], "/cuDMRG/apps/solver.py": ["/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/utils/__init__.py": ["/cuDMRG/utils/logger.py"], "/cuDMRG/apps/sites.py": ["/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/__init__.py": ["/cuDMRG/tensor/__init__.py", "/cuDMRG/apps/__init__.py"], "/cuDMRG/apps/mps.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/tensor/tensor.py": ["/cuDMRG/tensor/index.py", "/cuDMRG/utils/__init__.py"], "/test.py": ["/cuDMRG/__init__.py"], "/cuDMRG/tensor/__init__.py": ["/cuDMRG/tensor/index.py", "/cuDMRG/tensor/tensor.py"], "/cuDMRG/apps/mpo.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/apps/mps.py", "/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/apps/dmrg.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/apps/mps.py", "/cuDMRG/apps/mpo.py", "/cuDMRG/apps/solver.py", "/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"]}
|
7,498
|
xyu40/cuDMRG
|
refs/heads/main
|
/cuDMRG/tensor/tensor.py
|
try:
import cupy as xp
from cupy import cutensor
USE_CUPY = True
except ImportError:
import numpy as xp
USE_CUPY = False
from numbers import Number
from copy import copy
from functools import reduce
from typing import List, Tuple, Optional, Any
from .index import Index, IndexType, getEinsumRule
from ..utils import get_logger
logger = get_logger(__name__)
class Tensor:
def __init__(self,
indices: List[Index],
data: Optional[xp.ndarray] = None,
use_cutensor: bool = False) -> None:
if data is not None and len(indices) != len(data.shape):
error_msg = "indices shape does not match data shape"
logger.error(error_msg)
raise RuntimeError(error_msg)
self._rank = len(indices)
self._indices = [copy(idx) for idx in indices]
if data is None:
self.setZero()
else:
self._data = data
self.use_cutensor = USE_CUPY and use_cutensor
def copy(self) -> "Tensor":
res = Tensor([])
res._rank = self.rank
res._indices = [copy(idx) for idx in self._indices]
res._data = self._data
return res
def deepcopy(self) -> "Tensor":
res = Tensor([])
res._rank = self.rank
res._indices = [copy(idx) for idx in self._indices]
res._data = self._data.copy()
return res
def norm(self) -> float:
return xp.linalg.norm(self._data)
def normalize(self) -> "Tensor":
self._data /= self.norm()
return self
def setZero(self) -> "Tensor":
self._data = xp.zeros([idx.size for idx in self._indices])
return self
def setOne(self) -> "Tensor":
self._data = xp.ones([idx.size for idx in self._indices])
return self
def setRandom(self) -> "Tensor":
self._data = xp.random.random([idx.size for idx in self._indices])
return self
def raiseIndexLevel(self,
indexType: IndexType = IndexType.ANYTYPE) -> "Tensor":
for idx in self._indices:
idx.raiseLevel(indexType)
return self
def lowerIndexLevel(self,
indexType: IndexType = IndexType.ANYTYPE) -> "Tensor":
for idx in self._indices:
idx.lowerLevel(indexType)
return self
def resetIndexLevel(self,
indexType: IndexType = IndexType.ANYTYPE) -> "Tensor":
for idx in self._indices:
idx.resetLevel(indexType)
return self
def mapIndexLevel(self,
level_from: int,
level_to: int,
indexType: IndexType = IndexType.ANYTYPE) -> "Tensor":
for idx in self._indices:
idx.mapLevel(level_from, level_to, indexType)
return self
def transpose(self, axes, inplace=True) -> "Tensor":
if len(set(axes)) != len(axes):
msg = "Invalid transpose input"
logger.error(msg)
raise RuntimeError(msg)
transpose_needed = False
for i, j in enumerate(axes):
if i != j:
transpose_needed = True
break
if inplace:
if transpose_needed:
self._indices = [self._indices[i] for i in axes]
self._data = xp.transpose(self._data, axes=axes)
return self
else:
res = self.copy()
if transpose_needed:
res._indices = [res._indices[i] for i in axes]
res._data = xp.transpose(res._data, axes=axes)
return res
def diagonal(self) -> "Tensor":
if self._rank % 2 != 0:
msg = "Cannot get diagonal from Tensor with odd rank"
logger.error(msg)
raise RuntimeError(msg)
lhs_indices = []
rhs_indices = []
for i in range(self._rank):
for j in range(i + 1, self._rank):
if self._indices[i].almostIndentical(self._indices[j]):
lhs_indices.append(i)
rhs_indices.append(j)
self.transpose(lhs_indices + rhs_indices)
res_size = [self._indices[i].size for i in lhs_indices]
diag_size = reduce(lambda x, y: x * y, res_size)
res_indices = [
copy(self._indices[i]).resetLevel() for i in lhs_indices
]
res_data = xp.diag(self._data.reshape(diag_size, -1)).reshape(res_size)
return Tensor(res_indices, res_data)
def decompose(
self,
lhs: List[int],
rhs: List[int],
mergeV: bool = True,
cutoff: float = 1e-12,
maxdim: int = 2147483648
) -> Tuple["Tensor", "Tensor", xp.array, int]:
lhs_size = reduce(lambda x, y: x * y,
[self._indices[i].size for i in lhs])
rhs_size = reduce(lambda x, y: x * y,
[self._indices[i].size for i in rhs])
self.transpose(lhs + rhs)
u, s, v = xp.linalg.svd(self._data.reshape([lhs_size, rhs_size]),
full_matrices=False,
compute_uv=True)
s_norm = xp.linalg.norm(s)
s_cutoff = (1 - cutoff) * s_norm * s_norm
s_squared_cumsum = xp.cumsum(xp.power(s, 2))
# dim = 0
# for i in range(s.size):
# dim += 1
# if s_squared_cumsum[i] >= s_cutoff or (dim + 1) > maxdim:
# break
dim = int(xp.searchsorted(s_squared_cumsum[:maxdim], s_cutoff)) + 1
dim = min(dim, s.size, maxdim)
u = u[:, :dim]
s = xp.clip(s[:dim] * s_norm / xp.sqrt(s_squared_cumsum[dim - 1]),
a_min=1e-32,
a_max=None)
v = v[:dim, :]
if mergeV:
v = xp.diag(s) @ v
else:
u = u @ xp.diag(s)
a = Index(dim)
lhs_indices = self._indices[:len(lhs)] + [a]
rhs_indices = [a] + self._indices[len(lhs):]
lhs_tensor = Tensor(lhs_indices,
u.reshape([idx.size for idx in lhs_indices]))
rhs_tensor = Tensor(rhs_indices,
v.reshape([idx.size for idx in rhs_indices]))
return lhs_tensor, rhs_tensor, s, dim
@property
def rank(self) -> int:
return self._rank
@property
def size(self) -> int:
return self._data.size
@property
def indices(self) -> List[Index]:
return self._indices
def __add__(self, rhs: Any) -> "Tensor":
if isinstance(rhs, Number) or isinstance(rhs, xp.ndarray):
res_tensor = self.deepcopy()
res_tensor._data += rhs
return res_tensor
elif isinstance(rhs, Tensor):
res = self.deepcopy()
res._data += rhs._data
return res
else:
msg = f"Unsupported __add__ with rhs of type {type(rhs)}"
logger.error(msg)
raise RuntimeError(msg)
def __iadd__(self, rhs: Any) -> "Tensor":
if isinstance(rhs, Number) or isinstance(rhs, xp.ndarray):
self._data += rhs
elif isinstance(rhs, Tensor):
self._data = self._data + rhs._data
else:
msg = f"Unsupported __iadd__ with rhs of type {type(rhs)}"
logger.error(msg)
raise RuntimeError(msg)
return self
def __sub__(self, rhs: Any) -> "Tensor":
if isinstance(rhs, Number) or isinstance(rhs, xp.ndarray):
res_tensor = self.deepcopy()
res_tensor._data -= rhs
return res_tensor
elif isinstance(rhs, Tensor):
res = self.deepcopy()
res._data -= rhs._data
return res
else:
msg = f"Unsupported __sub__ with rhs of type {type(rhs)}"
logger.error(msg)
raise RuntimeError(msg)
def __isub__(self, rhs: Any) -> "Tensor":
if isinstance(rhs, Number) or isinstance(rhs, xp.ndarray):
self._data -= rhs
elif isinstance(rhs, Tensor):
self._data = self._data - rhs._data
else:
msg = f"Unsupported __isub__ with rhs of type {type(rhs)}"
logger.error(msg)
raise RuntimeError(msg)
return self
def __mul__(self, rhs: Any) -> "Tensor":
if isinstance(rhs, Number) or isinstance(rhs, xp.ndarray):
res_tensor = self.deepcopy()
res_tensor._data *= rhs
return res_tensor
elif isinstance(rhs, Tensor):
axes = getEinsumRule(self._indices, rhs._indices)
res_indices = ([
idx for i, idx in enumerate(self._indices) if i not in axes[0]
] + [
idx for j, idx in enumerate(rhs._indices) if j not in axes[1]
])
if not self.use_cutensor:
res_data = xp.tensordot(self._data, rhs._data, axes=axes)
return Tensor(res_indices, res_data)
else:
a = xp.ascontiguousarray(self._data)
b = xp.ascontiguousarray(rhs._data)
c = xp.zeros([idx.size for idx in res_indices])
desc_a = cutensor.create_tensor_descriptor(a)
desc_b = cutensor.create_tensor_descriptor(b)
desc_c = cutensor.create_tensor_descriptor(c)
mode_a = [chr(97 + i) for i in range(self._rank)]
mode_b = [
chr(97 + i)
for i in range(self._rank, self._rank + rhs._rank)
]
for i, j in zip(axes[0], axes[1]):
mode_b[j] = mode_a[i]
mode_c = (
[mode_a[i]
for i in range(self._rank) if i not in axes[0]] +
[mode_b[j] for j in range(rhs._rank) if j not in axes[1]])
mode_a = cutensor.create_mode(*mode_a)
mode_b = cutensor.create_mode(*mode_b)
mode_c = cutensor.create_mode(*mode_c)
cutensor.contraction(1.0, a, desc_a, mode_a, b, desc_b, mode_b,
0.0, c, desc_c, mode_c)
return Tensor(res_indices, c)
else:
msg = f"Unsupported __mul__ with rhs of type {type(rhs)}"
logger.error(msg)
raise RuntimeError(msg)
def __imul__(self, rhs: Any) -> "Tensor":
if isinstance(rhs, Number) or isinstance(rhs, xp.ndarray):
self._data *= rhs
elif isinstance(rhs, Tensor):
axes = getEinsumRule(self._indices, rhs._indices)
res_indices = ([
idx for i, idx in enumerate(self._indices) if i not in axes[0]
] + [
idx for j, idx in enumerate(rhs._indices) if j not in axes[1]
])
if not self.use_cutensor:
self._data = xp.tensordot(self._data, rhs._data, axes=axes)
else:
a = xp.ascontiguousarray(self._data)
b = xp.ascontiguousarray(rhs._data)
c = xp.zeros([idx.size for idx in res_indices])
desc_a = cutensor.create_tensor_descriptor(a)
desc_b = cutensor.create_tensor_descriptor(b)
desc_c = cutensor.create_tensor_descriptor(c)
mode_a = [chr(97 + i) for i in range(self._rank)]
mode_b = [
chr(97 + i)
for i in range(self._rank, self._rank + rhs._rank)
]
for i, j in zip(axes[0], axes[1]):
mode_b[j] = mode_a[i]
mode_c = (
[mode_a[i]
for i in range(self._rank) if i not in axes[0]] +
[mode_b[j] for j in range(rhs._rank) if j not in axes[1]])
mode_a = cutensor.create_mode(*mode_a)
mode_b = cutensor.create_mode(*mode_b)
mode_c = cutensor.create_mode(*mode_c)
cutensor.contraction(1.0, a, desc_a, mode_a, b, desc_b, mode_b,
0.0, c, desc_c, mode_c)
self._data = c
self._indices = res_indices
self._rank = len(self._indices)
else:
msg = f"Unsupported __imul__ with rhs of type {type(rhs)}"
logger.error(msg)
raise RuntimeError(msg)
return self
def __rmul__(self, lhs: Any) -> "Tensor":
if isinstance(lhs, Number) or isinstance(lhs, xp.ndarray):
res_tensor = self.deepcopy()
res_tensor._data *= lhs
return res_tensor
else:
msg = f"Unsupported __rmul__ with lhs of type {type(lhs)}"
logger.error(msg)
raise RuntimeError(msg)
def __truediv__(self, rhs: Any) -> "Tensor":
if isinstance(rhs, Number) or isinstance(rhs, xp.ndarray):
res_tensor = self.deepcopy()
res_tensor._data /= rhs
return res_tensor
else:
msg = f"Unsupported __truediv__ with rhs of type {type(rhs)}"
logger.error(msg)
raise RuntimeError(msg)
def __idiv__(self, rhs: Any) -> "Tensor":
if isinstance(rhs, Number) or isinstance(rhs, xp.ndarray):
self._data /= rhs
return self
else:
msg = f"Unsupported __idiv__ with rhs of type {type(rhs)}"
logger.error(msg)
raise RuntimeError(msg)
def __str__(self) -> str:
rank_str = f"rank = {self._rank}"
indices_str = "\n".join([str(idx) for idx in self._indices])
data_str = str(self._data.shape)
return "\n".join([rank_str, indices_str, data_str])
|
{"/cuDMRG/tensor/index.py": ["/cuDMRG/utils/__init__.py"], "/cuDMRG/apps/__init__.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/apps/mps.py", "/cuDMRG/apps/mpo.py", "/cuDMRG/apps/solver.py", "/cuDMRG/apps/dmrg.py"], "/cuDMRG/apps/solver.py": ["/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/utils/__init__.py": ["/cuDMRG/utils/logger.py"], "/cuDMRG/apps/sites.py": ["/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/__init__.py": ["/cuDMRG/tensor/__init__.py", "/cuDMRG/apps/__init__.py"], "/cuDMRG/apps/mps.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/tensor/tensor.py": ["/cuDMRG/tensor/index.py", "/cuDMRG/utils/__init__.py"], "/test.py": ["/cuDMRG/__init__.py"], "/cuDMRG/tensor/__init__.py": ["/cuDMRG/tensor/index.py", "/cuDMRG/tensor/tensor.py"], "/cuDMRG/apps/mpo.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/apps/mps.py", "/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/apps/dmrg.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/apps/mps.py", "/cuDMRG/apps/mpo.py", "/cuDMRG/apps/solver.py", "/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"]}
|
7,499
|
xyu40/cuDMRG
|
refs/heads/main
|
/test.py
|
from cuDMRG import Index, Tensor, MPS, Sites, Heisenberg, psiHphi, DMRG
def test_basic_tensor():
a = Index(6)
b = Index(4)
c = Index(3)
d = Index(5)
A = Tensor([a, b, c]).setRandom()
B = Tensor([c, d]).setRandom()
C = A * B
C.normalize()
if not -1e-9 <= (C.norm() - 1.0) <= 1e-9:
print("Basic test failed")
else:
lhs, rhs, _, _ = C.decompose(lhs=[0, 1],
rhs=[2],
mergeV=True,
cutoff=1e-9,
maxdim=8)
if not -1e-9 <= (lhs * rhs - C).norm() <= 1e-9:
print("Basic tensor test failed")
else:
print("Basic tensor test passed")
def test_basic_mps_mpo():
sites = Sites(10, 2)
psi = MPS(sites, 1).setOne().canonicalize()
H = Heisenberg(sites, J=0, h=1).build()
if not -1e-9 <= psiHphi(psi, H, psi) <= 1e-9:
print("Basic mps mpo test failed")
else:
print("Basic mps mpo test passed")
def test_basic_dmrg():
sites = Sites(100, 2)
H = Heisenberg(sites, J=1, h=0).build()
model = DMRG(sites, H)
model.run()
if __name__ == "__main__":
test_basic_tensor()
test_basic_mps_mpo()
test_basic_dmrg()
|
{"/cuDMRG/tensor/index.py": ["/cuDMRG/utils/__init__.py"], "/cuDMRG/apps/__init__.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/apps/mps.py", "/cuDMRG/apps/mpo.py", "/cuDMRG/apps/solver.py", "/cuDMRG/apps/dmrg.py"], "/cuDMRG/apps/solver.py": ["/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/utils/__init__.py": ["/cuDMRG/utils/logger.py"], "/cuDMRG/apps/sites.py": ["/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/__init__.py": ["/cuDMRG/tensor/__init__.py", "/cuDMRG/apps/__init__.py"], "/cuDMRG/apps/mps.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/tensor/tensor.py": ["/cuDMRG/tensor/index.py", "/cuDMRG/utils/__init__.py"], "/test.py": ["/cuDMRG/__init__.py"], "/cuDMRG/tensor/__init__.py": ["/cuDMRG/tensor/index.py", "/cuDMRG/tensor/tensor.py"], "/cuDMRG/apps/mpo.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/apps/mps.py", "/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/apps/dmrg.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/apps/mps.py", "/cuDMRG/apps/mpo.py", "/cuDMRG/apps/solver.py", "/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"]}
|
7,500
|
xyu40/cuDMRG
|
refs/heads/main
|
/cuDMRG/tensor/__init__.py
|
from .index import Index, IndexType, getEinsumRule
from .tensor import Tensor
__all__ = ["Index", "IndexType", "Tensor", "getEinsumRule"]
|
{"/cuDMRG/tensor/index.py": ["/cuDMRG/utils/__init__.py"], "/cuDMRG/apps/__init__.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/apps/mps.py", "/cuDMRG/apps/mpo.py", "/cuDMRG/apps/solver.py", "/cuDMRG/apps/dmrg.py"], "/cuDMRG/apps/solver.py": ["/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/utils/__init__.py": ["/cuDMRG/utils/logger.py"], "/cuDMRG/apps/sites.py": ["/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/__init__.py": ["/cuDMRG/tensor/__init__.py", "/cuDMRG/apps/__init__.py"], "/cuDMRG/apps/mps.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/tensor/tensor.py": ["/cuDMRG/tensor/index.py", "/cuDMRG/utils/__init__.py"], "/test.py": ["/cuDMRG/__init__.py"], "/cuDMRG/tensor/__init__.py": ["/cuDMRG/tensor/index.py", "/cuDMRG/tensor/tensor.py"], "/cuDMRG/apps/mpo.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/apps/mps.py", "/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/apps/dmrg.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/apps/mps.py", "/cuDMRG/apps/mpo.py", "/cuDMRG/apps/solver.py", "/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"]}
|
7,501
|
xyu40/cuDMRG
|
refs/heads/main
|
/cuDMRG/apps/mpo.py
|
from abc import ABC, abstractmethod
from copy import copy
from typing import List
from .sites import Sites
from .mps import MPS
from ..tensor import Tensor
from ..utils import get_logger
logger = get_logger(__name__)
class MPO(ABC):
def __init__(self, sites: Sites, VirtualDim: int = 1) -> None:
self._length = sites.length
self._physicalDim = sites.physicalDim
physicalIndices = sites.physicalIndices
physicalIndicesPrime = [
copy(idx).raiseLevel() for idx in physicalIndices
]
virtualIndices = sites.virtualIndices
for i, vidx in enumerate(virtualIndices):
if i == 0 or i == self._length:
vidx.setSize(1)
else:
vidx.setSize(VirtualDim)
self._tensors: List[Tensor] = [
Tensor([
virtualIndices[i],
physicalIndicesPrime[i],
physicalIndices[i],
virtualIndices[i + 1],
]).setZero() for i in range(self._length)
]
@property
def length(self) -> int:
return self._length
@property
def physicalDim(self) -> int:
return self._physicalDim
@property
def tensors(self) -> List[Tensor]:
return self._tensors
@property
def leftIndex(self):
return copy(self._tensors[0].indices[0])
@property
def rightIndex(self):
return copy(self._tensors[-1].indices[-1])
@abstractmethod
def build(self) -> "MPO":
pass
def psiHphi(psi: MPS, H: MPO, phi: MPS) -> float:
if (psi.length != H.length or psi.length != phi.length
or psi.physicalDim != H.physicalDim
or psi.physicalDim != phi.physicalDim):
msg = "Input dimensions do not match"
logger.error(msg)
raise RuntimeError(msg)
left_indices = [psi.leftIndex.raiseLevel(), H.leftIndex, phi.leftIndex]
res = Tensor(left_indices).setOne()
if psi is phi:
for i in range(H.length):
res *= psi.tensors[i].copy().raiseIndexLevel()
res *= H.tensors[i]
res *= phi.tensors[i]
else:
for i in range(H.length):
res *= psi.tensors[i].raiseIndexLevel()
res *= H.tensors[i]
res *= phi.tensors[i]
psi.tensors[i].lowerIndexLevel()
return res._data.reshape(res._data.size)[0]
|
{"/cuDMRG/tensor/index.py": ["/cuDMRG/utils/__init__.py"], "/cuDMRG/apps/__init__.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/apps/mps.py", "/cuDMRG/apps/mpo.py", "/cuDMRG/apps/solver.py", "/cuDMRG/apps/dmrg.py"], "/cuDMRG/apps/solver.py": ["/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/utils/__init__.py": ["/cuDMRG/utils/logger.py"], "/cuDMRG/apps/sites.py": ["/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/__init__.py": ["/cuDMRG/tensor/__init__.py", "/cuDMRG/apps/__init__.py"], "/cuDMRG/apps/mps.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/tensor/tensor.py": ["/cuDMRG/tensor/index.py", "/cuDMRG/utils/__init__.py"], "/test.py": ["/cuDMRG/__init__.py"], "/cuDMRG/tensor/__init__.py": ["/cuDMRG/tensor/index.py", "/cuDMRG/tensor/tensor.py"], "/cuDMRG/apps/mpo.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/apps/mps.py", "/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/apps/dmrg.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/apps/mps.py", "/cuDMRG/apps/mpo.py", "/cuDMRG/apps/solver.py", "/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"]}
|
7,502
|
xyu40/cuDMRG
|
refs/heads/main
|
/cuDMRG/apps/dmrg.py
|
try:
import cupy as xp
except ImportError:
import numpy as xp
from copy import deepcopy
from typing import Dict, Tuple, Any, Optional
from .sites import Sites
from .mps import MPS
from .mpo import MPO
from .solver import LinearMult, lanczos
from ..tensor import Tensor
from ..utils import get_logger
logger = get_logger(__name__)
CONFIG: Dict[str, Any] = {
"num_sweeps": 10,
"svd_error": 1e-16,
"max_bond_dimension": 1000,
"lanczos_search_size": 3,
"lanczos_num_restart": 1,
"lanczos_smallest": True,
"log_every_step": False
}
class DMRG:
def __init__(self,
sites: Sites,
H: MPO,
psi: MPS = None,
config: Optional[Dict[str, Any]] = None) -> None:
if sites.length <= 2:
msg = "Length of the problem needs to be > 2"
logger.error(msg)
raise RuntimeError(msg)
self._sites = sites
self._H = H
if psi is None:
self._psi = MPS(sites, 1).setRandom().canonicalize()
else:
self._psi = psi.canonicalize()
self._config = deepcopy(CONFIG)
if config is not None:
for key in config:
self._config[key] = config[key]
self._lEnvs: Dict[int, Tensor] = {}
self._rEnvs: Dict[int, Tensor] = {}
self._buildEnv()
def run(self) -> None:
L = self._sites.length
max_dim = 0
for sweep in range(self._config["num_sweeps"]):
for i in range(1, L):
ev, dim = self._update(i, move_right=True)
max_dim = max(dim, max_dim)
for i in range(L - 1, 0, -1):
ev, dim = self._update(i, move_right=False)
max_dim = max(dim, max_dim)
logger.info(f"sweep = {sweep}, E = {ev}, max_dim = {max_dim}")
def _buildEnv(self):
L = self._sites.length
self._lEnvs[0] = Tensor([
self._psi.leftIndex.raiseLevel(), self._H.leftIndex,
self._psi.leftIndex
]).setOne()
self._rEnvs[L - 1] = Tensor([
self._psi.rightIndex.raiseLevel(), self._H.rightIndex,
self._psi.rightIndex
]).setOne()
for i in range(L - 2, -1, -1):
self._rEnvs[i] = (self._psi.tensors[i + 1].raiseIndexLevel() *
self._rEnvs[i + 1])
self._rEnvs[i] *= self._H.tensors[i + 1]
self._rEnvs[i] *= self._psi.tensors[i + 1].lowerIndexLevel()
def _update(self, i: int, move_right: bool = True) -> Tuple[float, int]:
x = self._psi.tensors[i - 1] * self._psi.tensors[i]
op = LinearMult(self._lEnvs[i - 1] * self._H.tensors[i - 1],
self._H.tensors[i] * self._rEnvs[i], x)
ev, x = lanczos(op, x, self._config["lanczos_search_size"],
self._config["lanczos_num_restart"],
self._config["lanczos_smallest"])
self._psi.tensors[i - 1], self._psi.tensors[i], s, dim = x.decompose(
[0, 1], [2, 3], move_right, self._config["svd_error"],
self._config["max_bond_dimension"])
if self._config["log_every_step"]:
vnEE = -xp.sum(s * xp.log(s))
logger.info(f"Optimized bond ({i-1}, {i}), "
f"dim = {dim}, vnEE = {vnEE}, E = {ev}")
if move_right:
self._lEnvs[i] = (self._lEnvs[i - 1] *
self._psi.tensors[i - 1].raiseIndexLevel())
self._lEnvs[i] *= self._H.tensors[i - 1]
self._lEnvs[i] *= self._psi.tensors[i - 1].lowerIndexLevel()
else:
self._rEnvs[i - 1] = (self._psi.tensors[i].raiseIndexLevel() *
self._rEnvs[i])
self._rEnvs[i - 1] *= self._H.tensors[i]
self._rEnvs[i - 1] *= self._psi.tensors[i].lowerIndexLevel()
return ev, dim
|
{"/cuDMRG/tensor/index.py": ["/cuDMRG/utils/__init__.py"], "/cuDMRG/apps/__init__.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/apps/mps.py", "/cuDMRG/apps/mpo.py", "/cuDMRG/apps/solver.py", "/cuDMRG/apps/dmrg.py"], "/cuDMRG/apps/solver.py": ["/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/utils/__init__.py": ["/cuDMRG/utils/logger.py"], "/cuDMRG/apps/sites.py": ["/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/__init__.py": ["/cuDMRG/tensor/__init__.py", "/cuDMRG/apps/__init__.py"], "/cuDMRG/apps/mps.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/tensor/tensor.py": ["/cuDMRG/tensor/index.py", "/cuDMRG/utils/__init__.py"], "/test.py": ["/cuDMRG/__init__.py"], "/cuDMRG/tensor/__init__.py": ["/cuDMRG/tensor/index.py", "/cuDMRG/tensor/tensor.py"], "/cuDMRG/apps/mpo.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/apps/mps.py", "/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"], "/cuDMRG/apps/dmrg.py": ["/cuDMRG/apps/sites.py", "/cuDMRG/apps/mps.py", "/cuDMRG/apps/mpo.py", "/cuDMRG/apps/solver.py", "/cuDMRG/tensor/__init__.py", "/cuDMRG/utils/__init__.py"]}
|
7,506
|
testeryuelong/multi_account_login
|
refs/heads/master
|
/run_scripts.py
|
# -*-coding:utf-8 -*-
# @Author : Zhigang
from read_data import readLoginData,readStepData
from keyword_func import *
def main():
"""将读取的操作步骤进行拼接映射到关键字函数中,然后读取多个账户进行循环登录"""
for key,value in readLoginData("loginInfo.txt").items():
# print (key,value)
for step in readStepData("step.txt"):
# print (step)
if step[0]=="inputWords" and step[2]=="username":
command="%s('%s','%s')" % (step[0],step[1],key)
elif step[0]=="inputWords" and step[2]=="password":
command="%s('%s','%s')" % (step[0],step[1],value)
elif len(step)==1:
command = "%s()" % step[0]
else:
command="%s('%s')" % (step[0],step[1])
# print (command)
eval(command)
if __name__=="__main__":
main()
|
{"/run_scripts.py": ["/read_data.py", "/keyword_func.py"]}
|
7,507
|
testeryuelong/multi_account_login
|
refs/heads/master
|
/read_data.py
|
# -*-coding:utf-8 -*-
# @Author : Zhigang
import os
def readLoginData(filePath):
"读取登陆信息数据文件,以字典的形式返回"
if not os.path.exists(filePath):
return None
with open(filePath) as fp:
loginInfoDict={}
for line in fp:
if line.strip()!="":
info=line.strip().split("/")
loginInfoDict[info[0]]=info[1]
return loginInfoDict
def readStepData(filePath):
"读取操作步骤信息数据文件"
if not os.path.exists(filePath):
return None
with open(filePath) as fp:
dataList=[]
for line in fp:
lineList=line.strip().split("-->")
dataList.append(lineList)
return dataList
if __name__=="__main__":
print(readStepData("step.txt"))
print(readLoginData("loginInfo.txt"))
|
{"/run_scripts.py": ["/read_data.py", "/keyword_func.py"]}
|
7,508
|
testeryuelong/multi_account_login
|
refs/heads/master
|
/keyword_func.py
|
import time
from selenium import webdriver
"定义全局变量driver"
driver=""
def startBrowser(*args):
"启动浏览器"
global driver
if len(args)==0:
driver=webdriver.Chrome(executable_path="D:\\chromedriver.exe")
elif len(args)==1:
if args[0]=="chrome":
driver = webdriver.Chrome(executable_path="D:\\chromedriver.exe")
elif args[0]=="firefox":
driver = webdriver.Firefox(executable_path="D:\\geckodriver.exe")
elif args[0]=="ie":
driver = webdriver.Ie(executable_path="D:\\IEDriverServer.exe")
else:
print ("无法启动不知名的浏览器")
else:
print ("无法识别浏览器")
def accessWebsite(url):
"访问网页"
global driver
driver.get(url)
def forcedWait(duration):
"强制等待时长"
time.sleep(int(duration))
def switchIframes(xpathExpression):
"切换进入指定的iframe"
global driver
iframe=driver.find_element_by_xpath(xpathExpression)
driver.switch_to.frame(iframe)
def inputWords(xpathExpression,words):
"清空输入框并输入内容"
global driver
inputBox=driver.find_element_by_xpath(xpathExpression)
inputBox.clear()
inputBox.send_keys(words)
def buttonClick(xpathExpression):
"点击按钮操作"
button=driver.find_element_by_xpath(xpathExpression)
button.click()
def assertString(s):
"断言字符串"
global driver
assert s in driver.page_source
def closeBrowser():
"关闭浏览器"
global driver
driver.close()
|
{"/run_scripts.py": ["/read_data.py", "/keyword_func.py"]}
|
7,509
|
jefcolbi/njembe
|
refs/heads/main
|
/njembe/models.py
|
from peewee import (
SqliteDatabase,
Model,
CharField,
DateTimeField,
TextField,
IntegerField,
BooleanField,
ForeignKeyField
)
from njembe.config import EXPORT_FOLDER
import os
import datetime
dbname = os.path.join(EXPORT_FOLDER, '.njembe.db')
db = SqliteDatabase(dbname)
class BaseModel(Model):
class Meta:
database = db
class Documentation(BaseModel):
title = CharField()
created_date = DateTimeField(default=datetime.datetime.now)
steps = IntegerField(default=0)
closed = BooleanField(default=False)
class Step(BaseModel):
documentation = ForeignKeyField(Documentation)
command = CharField()
description = TextField(null=True)
position = IntegerField(default=0)
|
{"/njembe/models.py": ["/njembe/config.py"], "/njembe/__main__.py": ["/njembe/models.py", "/njembe/config.py"]}
|
7,510
|
jefcolbi/njembe
|
refs/heads/main
|
/njembe/config.py
|
import os
EDITOR = os.getenv('EDITOR')
LOG_FILE = f'{os.getenv("HOME")}/Documents/njembe/logs/njembe.log'
WORKING_FILE = '/tmp/njembe'
EXPORT_FOLDER = f'{os.getenv("HOME")}/Documents/njembe'
|
{"/njembe/models.py": ["/njembe/config.py"], "/njembe/__main__.py": ["/njembe/models.py", "/njembe/config.py"]}
|
7,511
|
jefcolbi/njembe
|
refs/heads/main
|
/njembe/__main__.py
|
# TODO(st9_8) Update comments to be more explicit
from sys import exit
from njembe import VERSION
from njembe.models import Documentation, Step, db
from njembe.config import LOG_FILE, WORKING_FILE, EXPORT_FOLDER, EDITOR
import os
import click
import logging
import datetime
@click.group()
@click.version_option(VERSION)
def njembe():
pass
@njembe.command('open')
def init_doc():
"""
Initialize a new documentation project.
"""
query = Documentation.select().where(Documentation.closed==False)
if query.exists():
logging.error('Can\'t open a new documentation when another one is opened')
exit(0)
title = input('Enter the documentation title: ')
documentation = Documentation.create(title=title)
click.echo('Documentation created')
@njembe.command('close')
def close_doc():
"""
Close the current documentation project.
"""
try:
documentation = Documentation.select().where(Documentation.closed==False).order_by(Documentation.created_date.desc()).get()
documentation.closed = True
documentation.save()
except Documentation.DoesNotExist:
logging.info('No project to close')
@njembe.command('command')
@click.argument('command', nargs=-1, required=True)
def add_step(command):
"""
Add a new step to the documentation.
"""
try:
documentation = Documentation.select().where(Documentation.closed==False).order_by(Documentation.created_date.desc()).get()
except Documentation.DoesNotExist:
logging.info('Not existing documentation')
logging.info('Creating a new documentation...')
documentation = Documentation.create(title='Untitled')
documentation.save()
logging.info('Document created')
step = Step.create(documentation=documentation, command=' '.join(command), position=(documentation.steps + 1))
if EDITOR:
os.system(f'{EDITOR} {WORKING_FILE}')
else:
os.system(f'editor {WORKING_FILE}')
logging.error('env variable $EDITOR doesn\'t exist, set it to your favorite editor')
if os.path.exists(WORKING_FILE):
with open(WORKING_FILE) as tmp:
step.description = tmp.read()
os.remove(WORKING_FILE)
step.save()
documentation.steps += 1
documentation.save()
@njembe.command('list')
def show_projects():
"""
Function used to show saved projects of your computer.
"""
projects = Documentation.select()
for project in projects:
click.echo(f'{project.id}: {project.title} [{"Closed" if project.closed else "Open"}]')
@njembe.command('export')
@click.pass_context
def export_project(ctx):
"""
Export specific documentation in a folder
"""
ctx.invoke(show_projects)
try:
doc_id = int(input('Enter the documentation ID: '))
documentation = Documentation.get_by_id(doc_id)
steps = Step.select().where(Step.documentation==doc_id).order_by(Step.position.asc())
file_to_write = os.path.join(EXPORT_FOLDER, f'njembe_doc_{documentation.id}.nj')
doc = []
doc.append(f'Title: {documentation.title}\n')
doc.append(f'Created at: {documentation.created_date.strftime("%d-%m-%Y, %H:%M:%S")}\n')
doc.append(f'Steps: {documentation.steps}\n')
doc.append(f'{"-"*30}\n\n')
if steps:
for step in steps:
doc.append(f'Step {step.position}: {step.description}\n')
doc.append(f'Command: {step.command}\n')
doc.append('\n')
doc_to_write = ''.join(doc)
with open(file_to_write, 'w') as doc_file:
doc_file.write(doc_to_write)
else:
doc.append('No steps in this documentation')
doc_to_write = ''.join(doc)
with open(file_to_write, 'w') as doc_file:
doc_file.write(doc_to_write)
click.echo(f'Documentation available at {file_to_write}')
except ValueError:
click.echo('Wrong value')
return
except Documentation.DoesNotExist:
click.echo('This documentation doesn\'t exist')
if __name__ == "__main__":
# Create data folder
if not os.path.exists(EXPORT_FOLDER):
os.mkdir(EXPORT_FOLDER)
os.mkdir(os.path.join(EXPORT_FOLDER, 'logs'))
db.create_tables([Documentation, Step])
logging.basicConfig(filename=LOG_FILE, level=logging.ERROR,
format='%(asctime)s [%(levelname)s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
njembe(prog_name='njembe')
|
{"/njembe/models.py": ["/njembe/config.py"], "/njembe/__main__.py": ["/njembe/models.py", "/njembe/config.py"]}
|
7,526
|
rickalm/pyrandomtools
|
refs/heads/master
|
/pyrandomtools/aws_functions.py
|
# It's ok if we do not have boto3
#
try:
import boto3
except:
pass
amazon_regions = [
'us-east-1',
'us-east-2',
'us-west-1',
'us-west-2',
'ca-central-1',
'eu-central-1',
'eu-west-1',
'eu-west-2',
'eu-west-3',
'eu-north-1',
'ap-northeast-1',
'ap-northeast-2',
'ap-northeast-3',
'ap-southeast-1',
'ap-southeast-2',
'ap-south-1',
'sa-east-1',
]
def validate_region(region, service='ec2'):
# First try static list
#
if region in amazon_regions:
return True
# Then try boto, but we need permissions so it may fail
# which is why we try the static list first
#
try:
if any(item['RegionName'] == region for item in boto3.client(service).describe_regions().get('Regions',[])):
return True
except:
return False
# Otherwise its not a valid region
#
return False
def parse_arn(arn):
import re
defaultReply = {
'Arn': None,
'Partition': None,
'Service': None,
'Region': None,
'Account': None,
'RawResource': None,
'ResourceType': None,
'Resource': None,
'Qualifier': None,
}
# Undecided if I'm confortable with this, but lets discuss
#
if arn is None or not arn.startswith('arn:'):
return defaultReply
# Make sure the returned arrar is 6 elements
parsed = arn.split(':',5)
while len(parsed) < 6:
parsed.append('')
reply = {
'Arn': parsed[0],
'Partition': parsed[1],
'Service': parsed[2],
'Region': parsed[3],
'Account': parsed[4],
'RawResource': parsed[5],
'Resourcetype': None,
'Resource': None,
'Qualifier': None,
}
# If this is a fake serverless ARN, treat as invalid
#
if reply['Region'] == 'serverless':
return defaultReply
# Sometimes resource needs to be split into further components
#
resource = re.split('[:/]',parsed[5])
if len(resource) == 3:
reply['ResourceType'], reply['Resource'], reply['Qualifier'] = resource
elif len(resource) == 2:
reply['ResourceType'], reply['Resource'] = resource
else:
reply['Resource'] = resource[0]
return reply
|
{"/pyrandomtools/__init__.py": ["/pyrandomtools/aws_functions.py", "/pyrandomtools/functions.py"], "/test_main.py": ["/pyrandomtools/__init__.py"]}
|
7,527
|
rickalm/pyrandomtools
|
refs/heads/master
|
/pyrandomtools/__init__.py
|
# I understand the python convention of __all__ to specify the list of subordinate functions
# to include from a module. I still choose this method of exposing individual functions
# from their various components as a way to document them and specify which component they are
# dervied.
#
# in addition any special handling for v2/3 can be addressed here as well
#
from pyrandomtools.aws_functions import parse_arn
from pyrandomtools.aws_functions import validate_region
from pyrandomtools.functions import name_of
from pyrandomtools.functions import str2bool
from pyrandomtools.functions import lcase_keys
from pyrandomtools.functions import firstValid
from pyrandomtools.functions import rangePick
from pyrandomtools.functions import treeGet
from pyrandomtools.functions import asList
from pyrandomtools.functions import listContains
from pyrandomtools.functions import validInt
from pyrandomtools.functions import validNumber
from pyrandomtools.functions import function_name
|
{"/pyrandomtools/__init__.py": ["/pyrandomtools/aws_functions.py", "/pyrandomtools/functions.py"], "/test_main.py": ["/pyrandomtools/__init__.py"]}
|
7,528
|
rickalm/pyrandomtools
|
refs/heads/master
|
/pyrandomtools/functions.py
|
def firstValid(*iterable):
'''Return the first non-None value in the list
'''
try:
return list((el for el in iterable if el is not None))[0]
except:
return None
def lcase_keys(d):
'''Return a dictionary object with all keys having been lowercased
'''
return dict(([k.lower(),v] for k,v in d.items()))
def rangePick(target, min, max):
'''Validates an integer target from within a min/max boundary. If target is outside the defined range then return the range boundary as the choice
:param target: The desired answer
:type target: int
:param min: The lowest possible value allowed
:type min: int
:param max: The highest possible value allowed
:type max: int
:return: An integer as close to target as possible given the defined range
:rtype: int
'''
if int(target) < int(min):
return int(min)
if int(target) > int(max):
return int(max)
return int(target)
def str2bool(testcase):
'''Given a string containing the typically valid boolean answers that can be found in yaml, json or other encoded/marshaled data provide a boolean answer.
:param testcase: Value to be evaluated
:type testcase: None, Int, Bool or Str
:return: default is False unless truthyness can be determined
:rtype: bool
'''
# NoneType is clearly False
if testcase is None:
return False
# Ok, this is redundant, but lets make the function transparent
if isinstance(testcase,bool):
return testcase
# Slightly less redundant, but again we are transparent
if isinstance(testcase,int):
return testcase == 0
# Not sure what else should be considered here
return testcase.lower() in ('yes', 'true', 't', 'y', '0')
def name_of(obj):
'''Returns the name of the object supplied
:rtype: str
'''
mod = type(obj).__module__.split('.')
if mod[-1] == obj.__class__.__name__:
return str(type(obj).__module__)
else:
return str(".".join( [type(obj).__module__,obj.__class__.__name__] ))
def function_name(offset=1):
'''Returns the name of the calling function
:param offset: How far back in the callstack should be followed to find the caller (default is 1)
:type offset: int
:return: name of code object
:rtype: str
'''
import sys
return str(sys._getframe(offset).f_code.co_name)
def treeGet(obj_to_search, json_path, defaultAnswer=None):
'''This function was developed to provide a simple way to traverse python object trees but discovered that jmespath provided 99% of the functionality so it has been changed to simply use that functionality.
JMESPath does lack the support for providing a default answer if the search, so this does still introduce some additional functionality
:param obj_to_search: Python object representing the structure to search.
:type obj_to_search: dict, list or scalar
:param json_path: simplified query string indicating what object branch to return
:type json_path: str
:return: The branch of obj_to_search indicated by json_path
:rtype: any, based on the object in obj_to_search pointed to by json_path
'''
from jmespath import search as jsearch
try:
r = jsearch(json_path, obj_to_search)
if r is None:
return defaultAnswer
return r
except:
return defaultAnswer
'''
This function helps to navigate structures of lists and dictionaries with the same functionality of the get method of a dictionary without having to string a series of objects together.
Short Example based on myTree below:
classic python:
myTree.get('list',[None,{}])[1].get('test','unknown')
treeGet python
treeGet(myTree,'list[1].test','unknown')
In the classic case, the default for list must contain a default answer capable of getting to the suceeding get (which means a dict). If we had been exploring element 10 the default answer would have been more complicated
Example - Given an object structured as follows:
myTree = {
'dict': {
'one': 1,
'two': '2',
'three': False
},
'list': [
'a',
{ 'test': 'answer'},
3
],
'string': 'test'
}
accessing test to retrieve answer would look something like the following.
a = myTree.get('dict',{}).get('list',[None,None])[1].get('test','defaultanswer')
Depending on how many items were expected in the "list" the default answer of [None,None] would have to be expanded to prevent the following [1] from failing. Also typically people would wrap this with a try/except to catch any unexpected failures
try:
a = myTree.get('dict',{}).get('list',[None,None])[1].get('test','defaultanswer')
except:
a = 'defaultanswer'
This function provides all of the above as well as cloning each of the items as the tree is traversed so the return object is mutable and un-tied from the object from which it originated
a = utils.treeGet(myTree, 'dict.list[1].test', 'defaultanswer')
'''
tree = json_path.split('.')
for nextbranch in tree:
# Create mutable copy of the current object we are chasing through
#
if isinstance(obj_to_search, dict):
obj_to_search = dict(obj_to_search)
if isinstance(obj_to_search, list):
obj_to_search = list(obj_to_search)
index = None
# if the next step in the tree starts with a bracket then we are doing a simple list accessor
#
if nextbranch.startswith('['):
if not isinstance(obj_to_search,list): # If we are not chasing a list, then we will fail
return defaultAnswer
index = int(nextbranch.split('[')[1].split(']')[0])
# If we were given a composite term 'key[index]' which means we will do both a
# dictionary get followed by a list index accessor
#
elif nextbranch.endswith(']'):
obj_to_search = obj_to_search.get(nextbranch.split('[',1)[0], None) # key is what comes before the bracket
alphaindex = nextbranch.split('[',1)[1].split(']',1)[0] # index is what is between the brackets
# if someone tried to do a index "range" we do not currently support that
#
if ':' in alphaindex:
return defaultAnswer
index = int(alphaindex) # was done in two steps so we can validate that the index is purely numeric
# otherwise we are simply accesssing the next key in a dictionary
#
else:
obj_to_search = obj_to_search.get(nextbranch, None)
# If we didn't survive parsing the nextBranch then fail
#
if obj_to_search is None:
return defaultAnswer
# If we have an index, then validate and access the entry in the list
# we DO NOT support range index [1:2]
#
if index is not None:
# If we are not looking at a list, then return
#
if not isinstance(obj_to_search, list):
return defaultAnswer
# If we are looking at positive indexes, validate the range
#
if index > -1 and len(obj_to_search) < index:
return defaultAnswer
# Otherwise if we are looking at a negitive index validate the depth in reverse
elif index < 0 and len(obj_to_search) < abs(index):
return defaultAnswer
obj_to_search = obj_to_search[index]
#print ("Returning {}".format(obj_to_search))
return obj_to_search
def asList(obj):
'''Always returns a list, wrapping any other object type into a list
:param obj: The value to be wrapped if necessary
:type obj: Any
:return: obj if it was already a list, otherwise object wrapped in a list
:rtype: list
'''
if isinstance(obj,list):
return obj
else:
return list([obj])
def listContains(thisString, thisList):
'''Evaluates a list (or list of lists) for presence of thisString
:param thisString: Value being sought
:type thisString: str
:param thisList: List to scan for the value being sought
:type thisList: list
:raises: TypeError
:return: True or False
:rtype: bool
'''
if not isinstance(thisList,list):
raise TypeError
if not isinstance(thisString,str):
raise TypeError
return bool(len( list(filter(lambda x: thisString in x, thisList)) ) > 0)
def validNumber(valueToTest):
'''Determines if valueToTest is considered to be a number. This includes strings representing a number.
If an int or float are passed then the answer is True.
If a string is provided it is evaluated to only contain digits, sign and a decimal point. string evaluation does not yet take into account nationalization such as comma for seperating 1000's or any financial symbology
:param valueToTest: variable to consider
:type valueToTest: Any
:return: True or False
:rtype: bool
'''
if isinstance(valueToTest,(int, float)):
return True
if isinstance(valueToTest,(str)):
import re
match = re.match("^[-0123456789.]*$", valueToTest)
return match is not None
return False
def validInt(valueToTest):
'''Determines if valueToTest is considered to be a number. This includes strings representing a number.
If an int is passed then the answer is True.
If a string is provided it is evaluated to only contain digits and sign. string evaluation does not yet take into account nationalization such as comma for seperating 1000's or any financial symbology
:param valueToTest: variable to consider
:type valueToTest: Any
:return: True or False
:rtype: bool
'''
if isinstance(valueToTest,(int)):
return True
if isinstance(valueToTest,(str)):
import re
match = re.match("^[-0123456789]*$", valueToTest)
return match is not None
return False
|
{"/pyrandomtools/__init__.py": ["/pyrandomtools/aws_functions.py", "/pyrandomtools/functions.py"], "/test_main.py": ["/pyrandomtools/__init__.py"]}
|
7,529
|
rickalm/pyrandomtools
|
refs/heads/master
|
/test_main.py
|
import pytest
import pyrandomtools as under_test
def test_parse_arn():
reply = under_test.parse_arn('arn:aws::us-east-1:123456789012:stack/test/1234')
assert reply['Arn'] == 'arn'
assert reply['Partition'] == 'aws'
assert reply['Region'] == 'us-east-1'
assert reply['Account'] == '123456789012'
assert reply['ResourceType'] == 'stack'
assert reply['Resource'] == 'test'
assert reply['Qualifier'] == '1234'
reply = under_test.parse_arn('arn:aws::us-east-1:123456789012:stack:test/1234')
assert reply['ResourceType'] == 'stack'
assert reply['Resource'] == 'test'
assert reply['Qualifier'] == '1234'
reply = under_test.parse_arn('arn:aws::us-east-1:123456789012:stack:test:1234')
assert reply['ResourceType'] == 'stack'
assert reply['Resource'] == 'test'
assert reply['Qualifier'] == '1234'
reply = under_test.parse_arn('arn:aws::us-east-1:123456789012:stack/test:1234')
assert reply['ResourceType'] == 'stack'
assert reply['Resource'] == 'test'
assert reply['Qualifier'] == '1234'
reply = under_test.parse_arn('abc:aws::us-east-1:123456789012:stack/test:1234')
assert reply['Arn'] == None
reply = under_test.parse_arn('string')
assert reply['Arn'] == None
reply = under_test.parse_arn(None)
assert reply['Arn'] == None
def test_firstValid():
assert under_test.firstValid(None,None,1,None,2) == 1
assert under_test.firstValid(1,None,2) == 1
assert under_test.firstValid(None,None,None) == None
def test_rangePick():
assert under_test.rangePick(5,1,10) == 5
# MinCheck
assert under_test.rangePick(1,5,10) == 5
assert under_test.rangePick(0,1,5) == 1
assert under_test.rangePick(-20,-10,5) == -10
# MaxCheck
assert under_test.rangePick(5,1,4) == 4
assert under_test.rangePick(11,5,10) == 10
assert under_test.rangePick(6,1,5) == 5
def test_str2bool():
assert under_test.str2bool('y') is True
assert under_test.str2bool('n') is False
assert under_test.str2bool('Y') is True
assert under_test.str2bool('N') is False
assert under_test.str2bool('yes') is True
assert under_test.str2bool('no') is False
assert under_test.str2bool('YES') is True
assert under_test.str2bool('NO') is False
assert under_test.str2bool('Yes') is True
assert under_test.str2bool('No') is False
assert under_test.str2bool('yES') is True
assert under_test.str2bool('nO') is False
assert under_test.str2bool('t') is True
assert under_test.str2bool('f') is False
assert under_test.str2bool('true') is True
assert under_test.str2bool('false') is False
assert under_test.str2bool('True') is True
assert under_test.str2bool('False') is False
assert under_test.str2bool(True) is True
assert under_test.str2bool(False) is False
assert under_test.str2bool(-1) is False
assert under_test.str2bool(1) is False
assert under_test.str2bool(0) is True
assert under_test.str2bool('1') is False
assert under_test.str2bool('0') is True
assert under_test.str2bool(None) is False
class Dummy(object):
def __init__(self):
return
def test_name_of():
assert under_test.name_of(Dummy()) == '{}.Dummy'.format(__name__)
def test_lcase_keys():
assert under_test.lcase_keys({'Key':1}) != {'Key':1}
assert under_test.lcase_keys({'Key':1}) == {'key':1}
def test_treeGet():
myTree = {
'dict': {
'one': 1,
'two': '2',
'three': False
},
'list': [
'a',
{ 'test': 'answer'},
3
],
'string': 'test'
}
myList = [5, 6, 7]
assert under_test.treeGet(myTree,'dict.one') == 1
assert under_test.treeGet(myTree,'dict.two') == '2'
assert under_test.treeGet(myTree,'dict.three') == False
assert under_test.treeGet(myTree,'dict.three') != True
assert under_test.treeGet(myTree,'list[2]') == 3
#assert under_test.treeGet(myTree,'list.[2]') == 3
assert under_test.treeGet(myTree,'list[-1]') == 3
#assert under_test.treeGet(myTree,'list.[-1]') == 3
assert under_test.treeGet(myTree,'list[-3]') == 'a'
#assert under_test.treeGet(myTree,'list.[-3]') == 'a'
assert under_test.treeGet(myTree,'list[0]') == 'a'
assert under_test.treeGet(myTree,'list[1].test') == 'answer'
assert under_test.treeGet(myTree,'list[1]') == { 'test': 'answer'}
assert under_test.treeGet(myList,'[1]') == 6
assert under_test.treeGet(myList,'[-1]') == 7
assert under_test.treeGet(myTree,'string') == 'test'
assert under_test.treeGet(myTree,'unknown','default') == 'default'
def test_asList():
assert under_test.asList("string") == ["string"]
assert under_test.asList(False) == [False]
assert under_test.asList([1,2,3]) == [1,2,3]
def test_listContains():
with pytest.raises(TypeError) as e:
under_test.listContains(False, [])
assert e.type is TypeError
with pytest.raises(TypeError) as e:
under_test.listContains("string", 1)
assert e.type is TypeError
under_test.listContains("string", ['string','data'])
under_test.listContains("string", ['this is a string of data' ,'data'])
under_test.listContains("string", ['start', 'this is a string of data' ,'data'])
def test_validators():
assert under_test.validNumber(42)
assert under_test.validInt(42)
assert under_test.validNumber(42.1)
assert not under_test.validInt(42.1)
assert under_test.validNumber('42')
assert under_test.validNumber('-42')
assert under_test.validNumber('42.0')
assert under_test.validInt('42')
assert under_test.validInt('-42')
assert not under_test.validInt('42.0')
def test_validate_region():
assert under_test.validate_region('us-east-1')
assert not under_test.validate_region('su-east-1')
|
{"/pyrandomtools/__init__.py": ["/pyrandomtools/aws_functions.py", "/pyrandomtools/functions.py"], "/test_main.py": ["/pyrandomtools/__init__.py"]}
|
7,531
|
JoshLabs/cleanerversion
|
refs/heads/master
|
/versions_tests/__init__.py
|
default_app_config = 'versions_tests.apps.VersionsTestsConfig'
|
{"/versions_tests/tests/test_admin.py": ["/versions/admin.py", "/versions_tests/models.py"], "/versions_tests/tests/test_models.py": ["/versions/exceptions.py", "/versions/models.py", "/versions_tests/models.py"], "/versions/models.py": ["/versions/exceptions.py", "/versions/settings.py", "/versions/util/__init__.py", "/versions/fields.py"], "/versions/util/helper.py": ["/versions/models.py"], "/versions_tests/admin.py": ["/versions/admin.py", "/versions_tests/models.py"], "/versions_tests/tests/test_utils.py": ["/versions/util/postgresql.py", "/versions_tests/models.py"], "/versions/fields.py": ["/versions/descriptors.py", "/versions/models.py"], "/versions/descriptors.py": ["/versions/util/__init__.py", "/versions/models.py"], "/versions/deletion.py": ["/versions/models.py"], "/versions_tests/models.py": ["/versions/fields.py", "/versions/models.py"], "/versions_tests/apps.py": ["/versions/util/postgresql.py"], "/versions/util/postgresql.py": ["/versions/fields.py", "/versions/util/helper.py"]}
|
7,532
|
JoshLabs/cleanerversion
|
refs/heads/master
|
/cleanerversion/__init__.py
|
VERSION = (2, 1, 1)
def get_version(positions=None):
version = VERSION
if positions and isinstance(positions, int):
version = VERSION[:positions]
version = (str(v) for v in version)
return '.'.join(version)
|
{"/versions_tests/tests/test_admin.py": ["/versions/admin.py", "/versions_tests/models.py"], "/versions_tests/tests/test_models.py": ["/versions/exceptions.py", "/versions/models.py", "/versions_tests/models.py"], "/versions/models.py": ["/versions/exceptions.py", "/versions/settings.py", "/versions/util/__init__.py", "/versions/fields.py"], "/versions/util/helper.py": ["/versions/models.py"], "/versions_tests/admin.py": ["/versions/admin.py", "/versions_tests/models.py"], "/versions_tests/tests/test_utils.py": ["/versions/util/postgresql.py", "/versions_tests/models.py"], "/versions/fields.py": ["/versions/descriptors.py", "/versions/models.py"], "/versions/descriptors.py": ["/versions/util/__init__.py", "/versions/models.py"], "/versions/deletion.py": ["/versions/models.py"], "/versions_tests/models.py": ["/versions/fields.py", "/versions/models.py"], "/versions_tests/apps.py": ["/versions/util/postgresql.py"], "/versions/util/postgresql.py": ["/versions/fields.py", "/versions/util/helper.py"]}
|
7,533
|
JoshLabs/cleanerversion
|
refs/heads/master
|
/versions_tests/tests/test_admin.py
|
from django.contrib.admin.sites import AdminSite
from django.contrib.auth import get_user_model
from django.test import TestCase
try:
from django.urls import reverse
except ImportError:
# Supports backward compatibility with 1.9
from django.core.urlresolvers import reverse
from versions.admin import VersionedAdmin
from ..models import City
User = get_user_model()
class VersionedAdminTest(TestCase):
def setUp(self):
self.city = City.objects.create(name='city')
self.admin = VersionedAdmin(City, AdminSite)
self.user = User.objects.create_superuser(
username='user', password='secret', email='super@example.com')
def test_identity_shortener(self):
self.assertEqual(self.admin.identity_shortener(self.city),
"..." + str(self.city.identity)[-12:])
def test_history_view(self):
self.client.force_login(self.user)
response = self.client.get(reverse(
'admin:versions_tests_city_history', args=(self.city.id, )))
self.assertEqual(response.status_code, 200)
def test_restore_old_version(self):
new_city = self.city.clone()
new_city.name = 'new city'
new_city.save()
self.assertEquals(City.objects.current_version(
self.city, check_db=True
), new_city)
self.client.force_login(self.user)
response = self.client.post(reverse(
'admin:versions_tests_city_change',
args=(self.city.id, )) + 'restore/')
self.assertEqual(response.status_code, 302)
self.assertEquals(City.objects.all().count(), 3)
restored_city = City.objects.current_version(self.city, check_db=True)
self.assertEquals(restored_city.name, self.city.name)
def test_restore_current_version(self):
self.client.force_login(self.user)
with self.assertRaises(ValueError):
self.client.post(reverse('admin:versions_tests_city_change',
args=(self.city.identity, )) + 'restore/')
|
{"/versions_tests/tests/test_admin.py": ["/versions/admin.py", "/versions_tests/models.py"], "/versions_tests/tests/test_models.py": ["/versions/exceptions.py", "/versions/models.py", "/versions_tests/models.py"], "/versions/models.py": ["/versions/exceptions.py", "/versions/settings.py", "/versions/util/__init__.py", "/versions/fields.py"], "/versions/util/helper.py": ["/versions/models.py"], "/versions_tests/admin.py": ["/versions/admin.py", "/versions_tests/models.py"], "/versions_tests/tests/test_utils.py": ["/versions/util/postgresql.py", "/versions_tests/models.py"], "/versions/fields.py": ["/versions/descriptors.py", "/versions/models.py"], "/versions/descriptors.py": ["/versions/util/__init__.py", "/versions/models.py"], "/versions/deletion.py": ["/versions/models.py"], "/versions_tests/models.py": ["/versions/fields.py", "/versions/models.py"], "/versions_tests/apps.py": ["/versions/util/postgresql.py"], "/versions/util/postgresql.py": ["/versions/fields.py", "/versions/util/helper.py"]}
|
7,534
|
JoshLabs/cleanerversion
|
refs/heads/master
|
/versions_tests/tests/test_models.py
|
# Copyright 2014 Swisscom, Sophia Engineering
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import datetime
import itertools
import re
import uuid
from time import sleep
from unittest import skip, skipUnless
from django import get_version
from django.core.exceptions import SuspiciousOperation, ObjectDoesNotExist, \
ValidationError
from django.db import connection, IntegrityError, transaction
from django.db.models import Q, Count, Prefetch, Sum
from django.db.models.deletion import ProtectedError
from django.test import TestCase
from django.utils import six
from django.utils.timezone import utc
from versions.exceptions import DeletionOfNonCurrentVersionError
from versions.models import get_utc_now, ForeignKeyRequiresValueError, \
Versionable
from versions_tests.models import (
Award, B, C1, C2, C3, City, Classroom, Directory, Fan, Mascot, NonFan,
Observer, Person, Player, Professor, Pupil,
RabidFan, Student, Subject, Teacher, Team, Wine, WineDrinker,
WineDrinkerHat, WizardFan
)
def get_relation_table(model_class, fieldname):
field_object = model_class._meta.get_field(fieldname)
direct = not field_object.auto_created or field_object.concrete
if direct:
field = field_object
else:
field = field_object.field
return field.m2m_db_table()
def set_up_one_object_with_3_versions():
b = B.objects.create(name='v1')
sleep(0.001)
t1 = get_utc_now()
# 1ms sleeps are required, since sqlite has a 1ms precision in its
# datetime stamps not inserting the sleep would make t1 point to the next
# version's start date, which would be wrong
sleep(0.001)
b = b.clone()
b.name = 'v2'
b.save()
sleep(0.001)
t2 = get_utc_now()
# 1ms sleeps are required, since sqlite has a 1ms precision in its
# datetime stamps
sleep(0.001)
b = b.clone()
b.name = 'v3'
b.save()
sleep(0.001)
t3 = get_utc_now()
return b, t1, t2, t3
def create_three_current_objects():
b1 = B.objects.create(name='1')
b2 = B.objects.create(name='2')
b3 = B.objects.create(name='3')
return b1, b2, b3
def remove_white_spaces(self, s):
return re.sub(r'\s+', '', s)
def assertStringEqualIgnoreWhiteSpaces(self, expected, obtained):
expected = self.remove_white_spaces(expected).lower()
obtained = self.remove_white_spaces(obtained).lower()
self.assertEqual(expected, obtained)
TestCase.remove_white_spaces = remove_white_spaces
TestCase.assertStringEqualIgnoreWhiteSpaces = \
assertStringEqualIgnoreWhiteSpaces
class CreationTest(TestCase):
def test_create_using_manager(self):
b = B.objects.create(name='someB')
self.assertTrue(isinstance(b, Versionable))
self.assertEqual(b.version_start_date, b.version_birth_date)
b_new = b.clone()
self.assertTrue(isinstance(b_new, Versionable))
self.assertEqual(b_new.version_start_date, b.version_end_date)
def test_create_using_constructor(self):
b = B(name='someB')
b.save()
self.assertTrue(isinstance(b, Versionable))
self.assertEqual(b.version_start_date, b.version_birth_date)
b_new = b.clone()
self.assertTrue(isinstance(b_new, Versionable))
self.assertEqual(b_new.version_start_date, b.version_end_date)
def test_full_clean(self):
"""
A full clean will fail if some field allows null but not blank, and
no value is specified (version_end_date, for example).
"""
b = B(name='someB')
try:
b.full_clean()
except ValidationError:
self.fail("Full clean did not succeed")
class DeletionTest(TestCase):
def setUp(self):
self.b, self.t1, self.t2, self.t3 = set_up_one_object_with_3_versions()
def test_deleting(self):
"""
When deleting an object in the database the object count should stay
constant as we are doing a soft-delete.
"""
self.assertEqual(3, B.objects.all().count())
b = B.objects.current.first()
b.delete()
self.assertEqual(3, B.objects.all().count())
def test_deleting_non_current_version(self):
"""
Deleting a previous version of an object is not possible and an
exception must be raised if such an action is attempted.
"""
self.assertEqual(3, B.objects.all().count())
current = B.objects.current.first()
previous = B.objects.previous_version(current)
self.assertRaises(DeletionOfNonCurrentVersionError, previous.delete)
def test_delete_using_current_queryset(self):
B.objects.current.all().delete()
bs = list(B.objects.all())
self.assertEqual(3, len(bs))
for b in bs:
self.assertIsNotNone(b.version_end_date)
def test_delete_using_non_current_queryset(self):
B.objects.create(name='Buzz')
qs = B.objects.all().filter(version_end_date__isnull=True)
self.assertEqual(2, len(qs))
pks = [o.pk for o in qs]
qs.delete()
bs = list(B.objects.all().filter(pk__in=pks))
self.assertEqual(2, len(bs))
for b in bs:
self.assertIsNotNone(b.version_end_date)
def test_deleteing_non_current_version_with_queryset(self):
qs = B.objects.all().filter(version_end_date__isnull=False)
self.assertEqual(2, qs.count())
pks = [o.pk for o in qs]
B.objects.all().filter(pk__in=pks).delete()
# None of the objects should have been deleted, because they are not
# current.
self.assertEqual(2, B.objects.all().filter(pk__in=pks).count())
def test_delete_related_with_non_versionable(self):
jackie = WineDrinker.objects.create(name='Jackie')
red_sailor_hat = WineDrinkerHat.objects.create(shape='Sailor',
color='red',
wearer=jackie)
jackie.delete()
self.assertEqual(WineDrinkerHat.objects.count(), 0)
self.assertEqual(WineDrinker.objects.current.count(), 0)
class DeletionHandlerTest(TestCase):
"""
Tests that the ForeignKey on_delete parameters have the expected
effects
"""
def setUp(self):
self.city = City.objects.create(name='c.v1')
self.team = Team.objects.create(name='t.v1', city=self.city)
self.default_team = Team.objects.create(name='default_team.v1')
self.p1 = Player.objects.create(name='p1.v1', team=self.team)
self.p2 = Player.objects.create(name='p2.v1', team=self.team)
self.m1 = Mascot.objects.create(name='m1.v1', team=self.team)
self.m2 = Mascot.objects.create(name='m2.v1', team=self.team)
self.f1 = Fan.objects.create(name='f1.v1', team=self.team)
self.f2 = Fan.objects.create(name='f2.v1', team=self.team)
self.f3 = Fan.objects.create(name='f3.v1', team=self.team)
self.rf1 = RabidFan.objects.create(name='rf1.v1', team=self.team)
self.nf1 = NonFan.objects.create(name='nf1.v1', team=self.team)
self.a1 = Award.objects.create(name='a1.v1')
self.a1.players.add(self.p1, self.p2)
def test_on_delete(self):
t1 = get_utc_now()
player_filter = {'pk__in': [self.p1.pk, self.p2.pk]}
team_filter = {'pk__in': [self.team.pk]}
mascot_filter = {'pk__in': [self.m1.pk, self.m2.pk]}
fan_filter = {'pk__in': [self.f1.pk, self.f2.pk, self.f3.pk]}
rabid_fan_filter = {'pk__in': [self.rf1.pk]}
non_fan_filter = {'pk__in': [self.nf1.pk]}
award_qs = Award.objects.current.filter(pk=self.a1.pk)[0]
self.assertEqual(1, Team.objects.current.filter(**team_filter).count())
self.assertEqual(2, Player.objects.current.filter(
**player_filter).count())
self.assertEqual(2, award_qs.players.count())
self.assertEqual(2, Mascot.objects.current.filter(
**mascot_filter).count())
self.assertEqual(3, Fan.objects.current.filter(**fan_filter).count())
self.assertEqual(1, RabidFan.objects.current.filter(
**rabid_fan_filter).count())
self.assertEqual(1, NonFan.objects.current.filter(
**non_fan_filter).count())
self.city.delete()
# Cascading deletes are the default behaviour.
self.assertEqual(0, Team.objects.current.filter(**team_filter).count())
self.assertEqual(0, Player.objects.current.filter(
**player_filter).count())
self.assertEqual(0, Mascot.objects.current.filter(
**mascot_filter).count())
# Many-to-Many relationships are terminated.
self.assertEqual(0, award_qs.players.count())
# But a record of them still exists.
self.assertEqual(2, Award.objects.as_of(t1).get(
pk=self.a1.pk).players.count())
# The fans picked another team (on_delete=SET(default_team))
fans = Fan.objects.current.filter(**fan_filter).all()
self.assertEqual(3, fans.count())
fans_teams = {f.team for f in fans}
self.assertEqual({self.default_team}, fans_teams)
# The rabid fan doesn't go away if he loses his team, he's still rabid,
# he just doesn't have a team anymore. (on_delete=SET_NULL)
self.assertEqual(1, RabidFan.objects.current.filter(
**rabid_fan_filter).count())
rabid_fan = RabidFan.objects.current.filter(**rabid_fan_filter)[0]
self.assertEqual(None, rabid_fan.team)
self.assertEqual(self.team.identity,
RabidFan.objects.previous_version(rabid_fan).team_id)
# The non-fan isn't affected (on_delete=DO_NOTHING)
self.assertEqual(1, NonFan.objects.current.filter(
**non_fan_filter).count())
# This leaves a reference to the deleted team ... hey, that's what
# DO_NOTHING means.
self.assertEqual(self.team.pk,
NonFan.objects.current.filter(**non_fan_filter)[
0].team_id)
def test_protected_delete(self):
WizardFan.objects.create(name="Gandalf", team=self.team)
# The wizard does his best to protect his team and it's city.
# (on_delete=PROTECTED)
with self.assertRaises(ProtectedError):
self.city.delete()
self.assertEqual(1,
Team.objects.current.filter(pk=self.team.pk).count())
self.assertEqual(1,
City.objects.current.filter(pk=self.city.pk).count())
def test_deleting_when_m2m_history(self):
through = Award._meta.get_field('players').remote_field.through
a1 = Award.objects.create(name="bravo")
p1 = Player.objects.create(name="Jessie")
a1.players = [p1]
self.assertEqual(1, through.objects.filter(player_id=p1.pk).count())
self.assertEqual(1, through.objects.current.filter(
player_id=p1.pk).count())
a1.players = []
self.assertEqual(1, through.objects.filter(player_id=p1.pk).count())
self.assertEqual(0, through.objects.current.filter(
player_id=p1.pk).count())
p1.delete()
self.assertEqual(1, through.objects.filter(player_id=p1.pk).count())
self.assertEqual(0, through.objects.current.filter(
player_id=p1.pk).count())
class CurrentVersionTest(TestCase):
def setUp(self):
self.b, self.t1, self.t2, self.t3 = set_up_one_object_with_3_versions()
def test_simple_case(self):
should_be_v3 = B.objects.current.first()
self.assertEqual('v3', should_be_v3.name)
def test_after_adding_new_version(self):
"""
Create a new version of an object and tests that it has become the
'current' version
"""
b = self.b.clone()
b.name = 'v4'
b.save()
sleep(0.1)
should_be_v4 = B.objects.current.first()
self.assertEqual('v4', should_be_v4.name)
def test_after_deleting_current_version(self):
"""
Test that after deleting an object there is no 'current' version of
this object available.
"""
self.b.delete()
self.assertIsNone(B.objects.current.first())
def test_getting_current_version(self):
"""
Test that we can get the current version of any object by calling
the current_version() function
"""
v2 = B.objects.as_of(self.t2).first()
should_be_v3 = B.objects.current_version(v2)
self.assertEqual('v3', should_be_v3.name)
def test_getting_current_version_but_deleted(self):
"""
Test that the current_version returns nothing when called with a
deleted object
:return:
"""
current = B.objects.current.first()
previous = B.objects.previous_version(current)
current.delete()
self.assertIsNone(B.objects.current_version(previous))
self.assertIsNone(B.objects.current_version(current))
class VersionedQuerySetTest(TestCase):
def test_queryset_without_using_as_of(self):
b = B.objects.create(name='blabla')
b.name = 'blibli'
b.save()
o = B.objects.first()
self.assertEqual(b.name, o.name)
def test_queryset_using_as_of(self):
"""
Creates one object having 3 versions and then tests that the as_of
method is returning the correct version when given the corresponding
timestamp
"""
b, t1, t2, t3 = set_up_one_object_with_3_versions()
o = B.objects.as_of(t1).first()
self.assertEqual('v1', o.name)
o = B.objects.as_of(t2).first()
self.assertEqual('v2', o.name)
def test_queryset_using_delete(self):
"""
Creates 3 objects with all current and then tests that the delete
method makes the current versions a historical version (adding a
version_end_date)
"""
b1, b2, b3 = create_three_current_objects()
self.assertEqual(True, b1.is_current)
self.assertEqual(True, b2.is_current)
self.assertEqual(True, b3.is_current)
qs = B.objects.filter(name__in=['1', '2', '3']).all()
qs.delete()
b1 = B.objects.get(name='1')
b2 = B.objects.get(name='2')
b3 = B.objects.get(name='3')
self.assertEqual(False, b1.is_current)
self.assertEqual(False, b2.is_current)
self.assertEqual(False, b3.is_current)
class VersionNavigationTest(TestCase):
def setUp(self):
self.b, self.t1, self.t2, self.t3 = set_up_one_object_with_3_versions()
def test_getting_next_version(self):
"""
Get the first version of an object and navigate to the next version
until we reach the last version.
"""
self.assertEqual(B.objects.all().count(), 3)
v1 = B.objects.as_of(self.t1).first()
self.assertEqual('v1', v1.name)
should_be_v2 = B.objects.next_version(v1)
self.assertEqual('v2', should_be_v2.name)
v2 = should_be_v2
should_be_v3 = B.objects.next_version(v2)
self.assertEqual('v3', should_be_v3.name)
v3 = should_be_v3
should_still_be_v3 = B.objects.next_version(v3)
self.assertEqual('v3', should_still_be_v3.name)
def test_getting_previous_version(self):
"""
Get the last version of an object and navigate to the previous version
until we reach the first one.
"""
v3 = B.objects.as_of(self.t3).first()
self.assertEqual('v3', v3.name)
should_be_v2 = B.objects.previous_version(v3)
self.assertEqual('v2', should_be_v2.name)
v2 = should_be_v2
should_be_v1 = B.objects.previous_version(v2)
self.assertEqual('v1', should_be_v1.name)
v1 = should_be_v1
should_still_be_v1 = B.objects.previous_version(v1)
self.assertEqual('v1', should_still_be_v1.name)
def test_getting_nonexistent_next_version(self):
"""
Raise an error when trying to look up the next version of the last
version of a deleted object.
"""
v3 = B.objects.as_of(self.t3).first()
v3.delete()
self.assertRaises(ObjectDoesNotExist,
lambda: B.objects.next_version(v3))
class VersionNavigationAsOfTest(TestCase):
def setUp(self):
city1 = City.objects.create(name='city1')
city2 = City.objects.create(name='city2')
team1 = Team.objects.create(name='team1', city=city1)
team2 = Team.objects.create(name='team2', city=city1)
team3 = Team.objects.create(name='team3', city=city2)
# At t1: city1 - (team1, team2) / city2 - (team3)
self.t1 = get_utc_now()
sleep(0.01)
team2 = team2.clone()
team2.city = city2
team2.save()
# At t2: city1 - (team1) / city2 - (team2, team3)
self.t2 = get_utc_now()
sleep(0.01)
city1 = city1.clone()
city1.name = 'city1.a'
city1.save()
# At t3: city1.a - (team1) / city2 - (team1, team2, team3)
self.t3 = get_utc_now()
sleep(0.01)
team1 = team1.clone()
team1.name = 'team1.a'
team1.city = city2
team1.save()
# At t4: city1.a - () / city2 - (team1.a, team2, team3)
self.t4 = get_utc_now()
sleep(0.01)
team1 = team1.clone()
team1.city = city1
team1.name = 'team1.b'
team1.save()
# At t5: city1.a - (team1.b) / city2 - (team2, team3)
self.t5 = get_utc_now()
def test_as_of_parameter(self):
city1_t2 = City.objects.as_of(self.t2).get(name__startswith='city1')
self.assertEqual(1, city1_t2.team_set.all().count())
self.assertFalse(city1_t2.is_current)
# as_of 'end' for current version means "current", not a certain point
# in time
city1_current = City.objects.next_version(city1_t2,
relations_as_of='end')
self.assertTrue(city1_current.is_current)
self.assertIsNone(city1_current._querytime.time)
teams = city1_current.team_set.all()
self.assertEqual(1, teams.count())
self.assertEqual('team1.b', teams[0].name)
# as_of 'end' for non-current version means at a certain point in time
city1_previous = City.objects.previous_version(city1_current,
relations_as_of='end')
self.assertIsNotNone(city1_previous._querytime.time)
# as_of 'start': returns version at the very start of it's life.
city1_latest_at_birth = City.objects.next_version(
city1_t2, relations_as_of='start')
self.assertTrue(city1_latest_at_birth.is_current)
self.assertEqual(1, city1_latest_at_birth.team_set.count())
self.assertIsNotNone(city1_latest_at_birth._querytime.time)
self.assertEqual(city1_latest_at_birth._querytime.time,
city1_latest_at_birth.version_start_date)
# as_of datetime: returns a version at a given point in time.
city1_t4 = City.objects.next_version(city1_t2, relations_as_of=self.t4)
self.assertTrue(city1_latest_at_birth.is_current)
self.assertIsNotNone(city1_latest_at_birth._querytime.time)
teams = city1_latest_at_birth.team_set.all()
self.assertEqual(1, teams.count())
self.assertEqual('team1', teams[0].name)
# as_of None: returns object without time restriction for related
# objects.
# This means, that all other related object versions that have been
# associated with this object are returned when queried, without
# applying any time restriction.
city1_v2 = City.objects.current_version(city1_t2, relations_as_of=None)
self.assertFalse(city1_v2._querytime.active)
teams = city1_v2.team_set.all()
team_names = {team.name for team in teams}
self.assertEqual(3, teams.count())
self.assertSetEqual({'team1', 'team2', 'team1.b'}, team_names)
def test_invalid_as_of_parameter(self):
city = City.objects.current.get(name__startswith='city1')
with self.assertRaises(TypeError):
City.objects.previous_version(city, relations_as_of='endlich')
# Using an as_of time before the object's validity period:
with self.assertRaises(ValueError):
City.objects.current_version(city, relations_as_of=self.t1)
# Using an as_of time after the object's validity period:
with self.assertRaises(ValueError):
City.objects.previous_version(city, relations_as_of=self.t5)
class HistoricObjectsHandling(TestCase):
t0 = datetime.datetime(1980, 1, 1)
t1 = datetime.datetime(1984, 4, 23)
t2 = datetime.datetime(1985, 4, 23)
in_between_t1_and_t2 = datetime.datetime(1984, 5, 30)
after_t2 = datetime.datetime(1990, 1, 18)
def test_create_using_manager(self):
b = B.objects._create_at(self.t1, name='someB')
self.assertEqual(self.t1, b.version_birth_date)
self.assertEqual(self.t1, b.version_start_date)
b_v2 = b._clone_at(self.t2)
self.assertEqual(b_v2.version_start_date, b.version_end_date)
# Query these objects
b_v1 = B.objects.as_of(self.in_between_t1_and_t2).get(name='someB')
self.assertFalse(b_v1.is_current)
self.assertEqual(b_v1.version_birth_date, b_v1.version_start_date)
b_v2 = B.objects.as_of(self.after_t2).get(name='someB')
self.assertTrue(b_v2.is_current)
self.assertNotEqual(b_v2.version_birth_date, b_v2.version_start_date)
def test_create_using_constructor(self):
b = B(name='someB').at(self.t1)
b.save()
self.assertEqual(self.t1, b.version_birth_date)
self.assertEqual(self.t1, b.version_start_date)
b_v2 = b._clone_at(self.t2)
self.assertEqual(b_v2.version_start_date, b.version_end_date)
# Query these objects
b_v1 = B.objects.as_of(self.in_between_t1_and_t2).get(name='someB')
self.assertFalse(b_v1.is_current)
self.assertEqual(b_v1.version_birth_date, b_v1.version_start_date)
b_v2 = B.objects.as_of(self.after_t2).get(name='someB')
self.assertTrue(b_v2.is_current)
self.assertNotEqual(b_v2.version_birth_date, b_v2.version_start_date)
def test_wrong_temporal_moving_of_objects(self):
"""
Test that the restriction about creating "past objects' are
operational:
- we cannot give something else than a timestamp to at()
- we cannot move anywhere in time an object
"""
b = B(name='someB')
self.assertRaises(ValueError, lambda: b.at('bla'))
b.at(self.t1)
b.save()
b_new = b._clone_at(self.t2)
self.assertRaises(SuspiciousOperation, lambda: b.at(self.t2))
self.assertRaises(SuspiciousOperation, lambda: b_new.at(self.t1))
def test_cloning_before_birth_date(self):
b = B.objects._create_at(self.t1, name='someB')
self.assertRaises(ValueError, b._clone_at, *[self.t0])
class OneToManyTest(TestCase):
def setUp(self):
self.team = Team.objects.create(name='t.v1')
self.p1 = Player.objects.create(name='p1.v1', team=self.team)
self.p2 = Player.objects.create(name='p2.v1', team=self.team)
def test_simple(self):
"""
Test that we have 2 players in the team.
"""
self.assertEqual(2, self.team.player_set.count())
def test_creating_new_version_of_the_team(self):
t1 = get_utc_now()
sleep(0.1)
team = self.team.clone()
team.name = 't.v2'
team.save()
t2 = get_utc_now()
self.assertEqual(2, Team.objects.all().count())
team = Team.objects.current.first()
# Either we can test the version_end_date...
self.assertIsNone(team.version_end_date)
# ...or the is_current property
self.assertTrue(team.is_current)
# We didn't change anything to the players so there must be 2 players
# in the team at time t1...
team_at_t1 = Team.objects.as_of(t1).first()
# TODO: Remove the following (useless) line, once Django1.8 is working
t1_player_queryset = team_at_t1.player_set.all()
# TODO: [django18 compat] The SQL query in t1_player_queryset.query
# shows that the Team pk value (team_at_t1.id) is used to look up the
# players (instead of the identity property value
# (team_at_t1.identity))
self.assertEqual(2, team_at_t1.player_set.count())
# ... and at time t2
team_at_t2 = Team.objects.as_of(t2).first()
self.assertEqual(2, team_at_t2.player_set.count())
def test_finding_object_with_historic_foreign_key(self):
t1 = get_utc_now()
sleep(0.01)
team = self.team.clone()
team.name = 't.v2'
team.save()
t2 = get_utc_now()
sleep(0.01)
team = team.clone()
team.name = 't.v3'
team.save()
team_at_t1 = Team.objects.as_of(t1).get(identity=team.identity)
team_at_t2 = Team.objects.as_of(t2).get(identity=team.identity)
team_current = Team.objects.current.get(identity=team.identity)
# self.p1's foreign key to self.team is it's original value, which is
# equal to team_at_t1's identity, but not (any longer) team_at_t1's id.
# The following queries should all work to return the self.p1 Player:
# Using a cross-relation lookup on a non-identity field (team__name):
player_p1_lookup = Player.objects.as_of(t1).get(
team__name=team_at_t1.name, name='p1.v1')
self.assertEqual(self.p1, player_p1_lookup)
# Explicitly specifying the identity field in the lookup:
player_p1_explicit = Player.objects.as_of(t1).get(
team__identity=team_at_t1.identity, name='p1.v1')
self.assertEqual(self.p1, player_p1_explicit)
# The following three all work because the foreign key actually refers
# to the identity field of the foreign object (which equals the
# identity of the current object).
# Providing the current related object to filter on:
player_p1_obj_current = Player.objects.as_of(t1).get(team=team_current,
name='p1.v1')
self.assertEqual(self.p1, player_p1_obj_current)
self.assertEqual(team_at_t1, player_p1_obj_current.team)
# Providing the related object that existed at the as_of time:
player_p1_obj_as_of = Player.objects.as_of(t1).get(team=team_at_t1,
name='p1.v1')
self.assertEqual(self.p1, player_p1_obj_as_of)
self.assertEqual(team_at_t1, player_p1_obj_as_of.team)
# Providing the related object that is neither current, nor the one
# that existed at the as_of time, but that has the same identity.
player_p1_obj_other_version = Player.objects.as_of(t1).get(
team=team_at_t2, name='p1.v1')
self.assertEqual(self.p1, player_p1_obj_other_version)
self.assertEqual(team_at_t1, player_p1_obj_other_version.team)
def test_creating_new_version_of_the_player(self):
t1 = get_utc_now()
sleep(0.1)
p1 = self.p1.clone()
p1.name = 'p1.v2'
p1.save()
sleep(0.1)
t2 = get_utc_now()
self.assertEqual(3, Player.objects.all().count())
# at t1 there is no player named 'p1.v2'
team = Team.objects.as_of(t1).first()
self.assertEqual(2, team.player_set.count())
for player in team.player_set.all():
self.assertNotEqual(u'p1.v2', six.u(str(player.name)))
# at t2 there must be a 2 players and on of them is named 'p1.v2'
team = Team.objects.as_of(t2).first()
self.assertEqual(2, team.player_set.count())
if six.PY2:
matches = itertools.ifilter(lambda x: x.name == 'p1.v2',
team.player_set.all())
if six.PY3:
matches = filter(lambda x: x.name == 'p1.v2',
team.player_set.all())
self.assertEqual(1, len(list(matches)))
def test_adding_one_more_player_to_the_team(self):
t1 = get_utc_now()
sleep(0.1)
self.assertEqual(2, self.team.player_set.all().count())
new_player = Player.objects.create(name='p3.v1', team=self.team)
t2 = get_utc_now()
# there should be 3 players now in the team
self.assertEqual(3, self.team.player_set.all().count())
# there should be 2 players in the team at time t1
team_at_t1 = Team.objects.as_of(t1).first()
self.assertEqual(2, team_at_t1.player_set.all().count())
# there should be 3 players in the team at time t2
team_at_t2 = Team.objects.as_of(t2).first()
self.assertEqual(3, team_at_t2.player_set.all().count())
def test_removing_and_then_adding_again_same_player(self):
t1 = get_utc_now()
sleep(0.1)
p1 = self.p1.clone()
p1.team = None
p1.name = 'p1.v2'
p1.save()
t2 = get_utc_now()
sleep(0.1)
p1 = p1.clone()
p1.team = self.team
p1.name = 'p1.v3'
p1.save()
t3 = get_utc_now()
# there should be 2 players in the team if we put ourselves back at
# time t1
team_at_t1 = Team.objects.as_of(t1).first()
self.assertEqual(2, team_at_t1.player_set.all().count())
# there should be 1 players in the team if we put ourselves back at
# time t2
team_at_t2 = Team.objects.as_of(t2).first()
self.assertEqual(1, team_at_t2.player_set.all().count())
p1_at_t2 = Player.objects.as_of(t2).get(name__startswith='p1')
self.assertIsNone(p1_at_t2.team)
# there should be 2 players in the team if we put ourselves back at
# time t3
team_at_t3 = Team.objects.as_of(t3).first()
self.assertEqual(2, team_at_t3.player_set.all().count())
def test_removing_and_then_adding_again_same_player_on_related_object(
self):
t1 = get_utc_now()
sleep(0.1)
self.team.player_set.remove(self.p1)
# Remember: self.p1 was cloned while removing and is not current
# anymore!!
# This property has to be documented, since it's critical for
# developers!
# At this time, there is no mean to replace the contents of self.p1
# within the remove method
p1 = Player.objects.current.get(name__startswith='p1')
self.assertNotEqual(p1, self.p1)
p1.name = 'p1.v2'
p1.save()
self.p1 = p1
t2 = get_utc_now()
sleep(0.1)
self.team.player_set.add(self.p1)
# Same thing here! Don't rely on an added value!
p1 = Player.objects.current.get(name__startswith='p1')
p1.name = 'p1.v3'
p1.save()
t3 = get_utc_now()
# there should be 2 players in the team if we put ourselves back at
# time t1
team_at_t1 = Team.objects.as_of(t1).first()
self.assertEqual(2, team_at_t1.player_set.all().count())
# there should be 1 players in the team if we put ourselves back at
# time t2
team_at_t2 = Team.objects.as_of(t2).first()
self.assertEqual(1, team_at_t2.player_set.all().count())
# there should be 2 players in the team if we put ourselves back at
# time t3
team_at_t3 = Team.objects.as_of(t3).first()
self.assertEqual(2, team_at_t3.player_set.all().count())
class SelfOneToManyTest(TestCase):
def setUp(self):
"""
Setting up one parent folder having 2 sub-folders
"""
parentdir_v1 = Directory.objects.create(name='parent.v1')
subdir1_v1 = Directory.objects.create(name='subdir1.v1')
subdir2_v1 = Directory.objects.create(name='subdir2.v1')
parentdir_v1.directory_set.add(subdir1_v1)
parentdir_v1.directory_set.add(subdir2_v1)
def test_creating_new_version_of_parent_directory(self):
t1 = get_utc_now()
sleep(0.1)
parentdir_v1 = Directory.objects.get(name__startswith='parent.v1')
self.assertTrue(parentdir_v1.is_current)
parentdir_v2 = parentdir_v1.clone()
parentdir_v2.name = 'parent.v2'
parentdir_v2.save()
t2 = get_utc_now()
# 1 parent dir, 2 subdirs, 2 new versions after linking then together
# and 1 new version of the parent dir
self.assertEqual(6, Directory.objects.all().count())
self.assertTrue(parentdir_v2.is_current)
# We didn't change anything to the subdirs so there must be 2 subdirs
# in the parent at time t1...
parentdir_at_t1 = Directory.objects.as_of(t1).get(
name__startswith='parent')
self.assertEqual(2, parentdir_at_t1.directory_set.count())
# ... and at time t2
parentdir_at_t2 = Directory.objects.as_of(t2).get(
name__startswith='parent')
self.assertEqual(2, parentdir_at_t2.directory_set.count())
def test_creating_new_version_of_the_subdir(self):
t1 = get_utc_now()
subdir1_v1 = Directory.objects.current.get(name__startswith='subdir1')
subdir1_v2 = subdir1_v1.clone()
subdir1_v2.name = 'subdir1.v2'
subdir1_v2.save()
sleep(0.1)
t2 = get_utc_now()
# Count all Directory instance versions:
# 3 initial versions + 2 subdirs added to parentdir (implies a
# clone) + 1 subdir1 that was explicitely cloned = 6
self.assertEqual(6, Directory.objects.all().count())
# at t1 there is no directory named 'subdir1.v2'
parentdir_at_t1 = Directory.objects.as_of(t1).get(
name__startswith='parent')
self.assertEqual(2, parentdir_at_t1.directory_set.count())
for subdir in parentdir_at_t1.directory_set.all():
self.assertNotEqual('subdir1.v2', subdir.name)
# at t2 there must be 2 directories and ...
parentdir_at_t2 = Directory.objects.as_of(t2).get(
name__startswith='parent')
self.assertEqual(2, parentdir_at_t2.directory_set.count())
# ... and one of then is named 'subdir1.v2'
if six.PY2:
matches = itertools.ifilter(lambda x: x.name == 'subdir1.v2',
parentdir_at_t2.directory_set.all())
if six.PY3:
matches = filter(lambda x: x.name == 'subdir1.v2',
parentdir_at_t2.directory_set.all())
self.assertEqual(1, len(list(matches)))
def test_adding_more_subdir(self):
t1 = get_utc_now()
sleep(0.1)
current_parentdir = Directory.objects.current.get(
name__startswith='parent')
self.assertEqual(2, current_parentdir.directory_set.all().count())
sleep(0.1)
Directory.objects.create(name='subdir3.v1', parent=current_parentdir)
t2 = get_utc_now()
# There must be 3 subdirectories in the parent directory now. Since
# current_parentdir has never had an as_of specified, it will reflect
# the current state.
self.assertEqual(3, current_parentdir.directory_set.all().count())
# there should be 2 directories in the parent directory at time t1
parentdir_at_t1 = Directory.objects.as_of(t1).filter(
name='parent.v1').first()
self.assertEqual(2, parentdir_at_t1.directory_set.all().count())
# there should be 3 directories in the parent directory at time t2
parentdir_at_t2 = Directory.objects.as_of(t2).filter(
name='parent.v1').first()
self.assertEqual(3, parentdir_at_t2.directory_set.all().count())
def test_removing_and_then_adding_again_same_subdir(self):
t1 = get_utc_now()
sleep(0.1)
subdir1_v1 = Directory.objects.current.get(name__startswith='subdir1')
subdir1_v2 = subdir1_v1.clone()
subdir1_v2.parent = None
subdir1_v2.name = 'subdir1.v2'
subdir1_v2.save()
t2 = get_utc_now()
sleep(0.1)
current_parentdir = Directory.objects.current.get(
name__startswith='parent')
subdir1_v3 = subdir1_v2.clone()
subdir1_v3.parent = current_parentdir
subdir1_v3.name = 'subdir1.v3'
subdir1_v3.save()
t3 = get_utc_now()
# there should be 2 directories in the parent directory at time t1
parentdir_at_t1 = Directory.objects.as_of(t1).get(
name__startswith='parent')
self.assertEqual(2, parentdir_at_t1.directory_set.all().count())
# there should be 1 directory in the parent directory at time t2
parentdir_at_t2 = Directory.objects.as_of(t2).get(
name__startswith='parent')
self.assertEqual(1, parentdir_at_t2.directory_set.all().count())
# there should be 2 directories in the parent directory at time t3
parentdir_at_t3 = Directory.objects.as_of(t3).get(
name__startswith='parent')
self.assertEqual(2, parentdir_at_t3.directory_set.all().count())
class OneToManyFilteringTest(TestCase):
def setUp(self):
team = Team.objects.create(name='t.v1')
p1 = Player.objects.create(name='p1.v1', team=team)
p2 = Player.objects.create(name='p2.v1', team=team)
self.t1 = get_utc_now()
sleep(0.1)
# State at t1
# Players: [p1.v1, p2.v1]
# Teams: [t.v1]
# t.player_set = [p1, p2]
team.player_set.remove(p2)
p2 = Player.objects.current.get(name='p2.v1')
p2.name = 'p2.v2'
p2.save()
self.t2 = get_utc_now()
sleep(0.1)
# State at t2
# Players: [p1.v1, p2.v1, p2.v2]
# Teams: [t.v1]
# t.player_set = [p1]
team.player_set.remove(p1)
p1 = Player.objects.current.get(name='p1.v1')
p1.name = 'p1.v2'
p1.save()
self.t3 = get_utc_now()
sleep(0.1)
# State at t3
# Players: [p1.v1, p2.v1, p2.v2, p1.v2]
# Teams: [t.v1]
# t.player_set = []
# Let's get those players back into the game!
team.player_set.add(p1)
team.player_set.add(p2)
p1 = Player.objects.current.get(name__startswith='p1')
p1.name = 'p1.v3'
p1.save()
p2 = Player.objects.current.get(name__startswith='p2')
p2.name = 'p2.v3'
p2.save()
self.t4 = get_utc_now()
sleep(0.1)
# State at t4
# Players: [p1.v1, p2.v1, p2.v2, p1.v2, p2.v3, p1.v3]
# Teams: [t.v1]
# t.player_set = [p1, p2]
p1.delete()
self.t5 = get_utc_now()
# State at t4
# Players: [p1.v1, p2.v1, p2.v2, p1.v2, p2.v3, p1.v3]
# Teams: [t.v1]
# t.player_set = [p2]
def test_filtering_on_the_other_side_of_the_relation(self):
self.assertEqual(1, Team.objects.all().count())
self.assertEqual(1, Team.objects.as_of(self.t1).all().count())
self.assertEqual(3, Player.objects.filter(
name__startswith='p1').all().count())
self.assertEqual(3, Player.objects.filter(
name__startswith='p2').all().count())
self.assertEqual(1, Player.objects.as_of(self.t1).filter(
name='p1.v1').all().count())
self.assertEqual(1, Player.objects.as_of(self.t1).filter(
name='p2.v1').all().count())
# at t1 there should be one team with two players
team_p1 = Team.objects.as_of(self.t1).filter(
player__name='p1.v1').first()
self.assertIsNotNone(team_p1)
team_p2 = Team.objects.as_of(self.t1).filter(
player__name='p2.v1').first()
self.assertIsNotNone(team_p2)
# at t2 there should be one team with one single player called 'p1.v1'
team_p1 = Team.objects.as_of(self.t2).filter(
player__name='p1.v1').first()
team_p2 = Team.objects.as_of(self.t2).filter(
player__name='p2.v2').first()
self.assertIsNotNone(team_p1)
self.assertEqual(team_p1.name, 't.v1')
self.assertEqual(1, team_p1.player_set.count())
self.assertIsNone(team_p2)
# at t3 there should be one team with no players
team_p1 = Team.objects.as_of(self.t3).filter(
player__name='p1.v2').first()
team_p2 = Team.objects.as_of(self.t3).filter(
player__name='p2.v2').first()
self.assertIsNone(team_p1)
self.assertIsNone(team_p2)
# at t4 there should be one team with two players again!
team_p1 = Team.objects.as_of(self.t4).filter(
player__name='p1.v3').first()
team_p2 = Team.objects.as_of(self.t4).filter(
player__name='p2.v3').first()
self.assertIsNotNone(team_p1)
self.assertEqual(team_p1.name, 't.v1')
self.assertIsNotNone(team_p2)
self.assertEqual(team_p2.name, 't.v1')
self.assertEqual(team_p1, team_p2)
self.assertEqual(2, team_p1.player_set.count())
def test_simple_filter_using_q_objects(self):
"""
This tests explicitely the filtering of a versioned object using Q
objects.
However, since this is done implicetly with every call to 'as_of',
this test is redundant but is kept for explicit test coverage
"""
t1_players = list(
Player.objects.as_of(self.t1).filter(Q(name__startswith='p1') | Q(
name__startswith='p2')).values_list(
'name',
flat=True))
self.assertEqual(2, len(t1_players))
self.assertListEqual(sorted(t1_players), sorted(['p1.v1', 'p2.v1']))
def test_filtering_for_deleted_player_at_t5(self):
team_none = Team.objects.as_of(self.t5).filter(
player__name__startswith='p1').first()
self.assertIsNone(team_none)
@skipUnless(connection.vendor == 'sqlite',
'SQL is database specific, only sqlite is tested here.')
def test_query_created_by_filtering_for_deleted_player_at_t5(self):
team_none_queryset = Team.objects.as_of(self.t5).filter(
player__name__startswith='p1')
# Validating the current query prior to analyzing the generated SQL
self.assertEqual([], list(team_none_queryset))
team_none_query = str(team_none_queryset.query)
team_table = Team._meta.db_table
player_table = Player._meta.db_table
t5_utc_w_tz = str(self.t5)
t5_utc_wo_tz = t5_utc_w_tz[:-6]
expected_query = """
SELECT
"{team_table}"."id",
"{team_table}"."identity",
"{team_table}"."version_start_date",
"{team_table}"."version_end_date",
"{team_table}"."version_birth_date",
"{team_table}"."name",
"{team_table}"."city_id"
FROM "{team_table}"
INNER JOIN
"{player_table}" ON (
"{team_table}"."identity" = "{player_table}"."team_id"
AND ((
{player_table}.version_start_date <= {ts}
AND (
{player_table}.version_end_date > {ts}
OR {player_table}.version_end_date is NULL
)
))
)
WHERE (
"{player_table}"."name" LIKE p1% ESCAPE '\\\'
AND (
"{team_table}"."version_end_date" > {ts_wo_tz}
OR "{team_table}"."version_end_date" IS NULL
)
AND "{team_table}"."version_start_date" <= {ts_wo_tz}
)
""".format(ts=t5_utc_w_tz, ts_wo_tz=t5_utc_wo_tz,
team_table=team_table, player_table=player_table)
self.assertStringEqualIgnoreWhiteSpaces(expected_query,
team_none_query)
class MultiM2MTest(TestCase):
"""
Testing multiple ManyToMany-relationships on a same class; the following
story was chosen:
Classroom <--> Student <--> Professor
"""
t0 = t1 = t2 = t3 = t4 = None
def setUp(self):
# -------------- t0:
mr_biggs = Professor.objects.create(
name='Mr. Biggs',
address='123 Mainstreet, Somewhere',
phone_number='123')
ms_piggy = Professor.objects.create(
name='Ms. Piggy',
address='82 Leicester Street, London',
phone_number='987')
gym = Classroom.objects.create(name='Sports room',
building='The big one over there')
phylo = Classroom.objects.create(name='Philosophy lectures',
building='The old one')
annika = Student.objects.create(name='Annika')
annika.professors.add(mr_biggs)
annika.professors.add(ms_piggy)
annika.classrooms.add(phylo)
annika.classrooms.add(gym)
benny = Student.objects.create(name='Benny')
benny.professors.add(mr_biggs)
benny.classrooms.add(gym)
sophie = Student.objects.create(name='Sophie')
# Sophie doesn't study at that school yet, but is already subscribed
self.t0 = get_utc_now()
sleep(0.1)
# -------------- t1:
# Mr. Biggs moves to Berne
mr_biggs = mr_biggs.clone()
mr_biggs.address = 'Thunplatz, Bern'
mr_biggs.save()
# Mr. Evans gets hired
mr_evans = Professor.objects.create(name='Mr. Evans',
address='lives in a camper',
phone_number='456')
# A lab gets built
lab = Classroom.objects.create(name='Physics and stuff',
building='The old one')
self.t1 = get_utc_now()
sleep(0.1)
# -------------- t2:
# Mr. Evans starts to teach sophie in the lab
mr_evans.students.add(sophie)
lab.students.add(sophie)
self.t2 = get_utc_now()
sleep(0.1)
# -------------- t3:
# Annika is joining Sophie
annika.professors.add(mr_evans)
annika.classrooms.add(lab)
self.t3 = get_utc_now()
sleep(0.1)
# -------------- t4:
# Benny cuts that sh*t
benny.professors.remove(mr_biggs)
self.t4 = get_utc_now()
def test_t0(self):
professors = Professor.objects.as_of(self.t0).all()
self.assertEqual(len(professors), 2)
students = Student.objects.as_of(self.t0).all()
self.assertEqual(len(students), 3)
classrooms = Classroom.objects.as_of(self.t0).all()
self.assertEqual(len(classrooms), 2)
annika_t0 = Student.objects.as_of(self.t0).get(name='Annika')
annikas_professors_t0 = annika_t0.professors.all()
annikas_classrooms_t0 = annika_t0.classrooms.all()
self.assertEqual(len(annikas_professors_t0), 2)
self.assertEqual(len(annikas_classrooms_t0), 2)
benny_t0 = Student.objects.as_of(self.t0).get(name='Benny')
bennys_professors_t0 = benny_t0.professors.all()
bennys_classrooms_t0 = benny_t0.classrooms.all()
self.assertEqual(len(bennys_professors_t0), 1)
self.assertEqual(len(bennys_classrooms_t0), 1)
mr_biggs_t0 = bennys_professors_t0[0]
self.assertEqual(mr_biggs_t0.name, 'Mr. Biggs')
self.assertEqual(mr_biggs_t0.address, '123 Mainstreet, Somewhere')
self.assertEqual(len(mr_biggs_t0.students.all()), 2)
for student in mr_biggs_t0.students.all():
self.assertIn(student.name, ['Annika', 'Benny'])
gym_t0 = bennys_classrooms_t0[0]
self.assertEqual(gym_t0.name, 'Sports room')
self.assertEqual(len(gym_t0.students.all()), 2)
for student in gym_t0.students.all():
self.assertIn(student.name, ['Annika', 'Benny'])
female_professors_t0 = Classroom.objects.as_of(self.t0).get(
name__startswith='Philo'). \
students.first(). \
professors.filter(name__startswith='Ms')
self.assertEqual(len(female_professors_t0), 1)
self.assertEqual(female_professors_t0[0].name, 'Ms. Piggy')
self.assertEqual(female_professors_t0[0].phone_number, '987')
def test_t1(self):
mr_evans_t1 = Professor.objects.as_of(self.t1).get(name='Mr. Evans')
self.assertEqual(mr_evans_t1.name, 'Mr. Evans')
self.assertEqual(mr_evans_t1.students.count(), 0)
self.assertEqual(list(mr_evans_t1.students.all()), [])
self.assertEqual(Classroom.objects.as_of(self.t1).get(
name__startswith="Physics").students.count(),
0)
self.assertEqual(Professor.objects.as_of(self.t1).get(
name__contains='Biggs').address,
'Thunplatz, Bern')
def test_t2(self):
mr_evans_t2 = Professor.objects.as_of(self.t2).get(name='Mr. Evans')
evans_students = mr_evans_t2.students.all()
self.assertEqual(len(evans_students), 1)
self.assertEqual(evans_students[0].name, 'Sophie')
# Checking Sophie's rooms
self.assertIn('Physics and stuff', list(
evans_students[0].classrooms.values_list('name', flat=True)))
self.assertEqual(evans_students[0].classrooms.count(), 1)
def test_t3(self):
# Find all professors who teach Annika
annikas_professors_t3 = Professor.objects.as_of(self.t3).filter(
students__name='Annika')
self.assertEqual(annikas_professors_t3.count(), 3)
self.assertIn('Mr. Evans', list(
annikas_professors_t3.values_list('name', flat=True)))
def test_number_of_queries_stay_constant(self):
"""
We had a situation where the number of queries to get data from a m2m
relations was proportional to the number of objects in the relations.
For example if one object was related with 10 others it will require
2 + 2x10 queries to get data.
Obviously this is not something one would want and this problem is
really difficult to find out as the behavior is correct. There is just
too many queries generated to carry on the work and therefore the
system's performance sinks.
This test is here to make sure we don't go back accidentally to such a
situation by making sure the number of queries stays the same.
"""
annika = Student.objects.current.get(name='Annika')
with self.assertNumQueries(1):
annika.professors.all().first()
def test_adding_multiple_related_objects(self):
# In the setUp, Benny had a professor, and then no more.
all_professors = list(Professor.objects.current.all())
benny = Student.objects.current.get(name='Benny')
benny.professors.add(*all_professors)
benny.as_of = get_utc_now()
# This was once failing because _add_items() was filtering out items
# it didn't need to re-add, but it was not restricting the query to
# find those objects with any as-of time.
self.assertSetEqual(set(list(benny.professors.all())),
set(all_professors))
def test_adding_multiple_related_objects_using_a_valid_timestamp(self):
all_professors = list(Professor.objects.current.all())
benny = Student.objects.current.get(name='Benny')
benny.professors.add_at(self.t4, *all_professors)
# Test the addition of objects in the past
self.assertSetEqual(set(list(benny.professors.all())),
set(all_professors))
@skip("To be implemented")
def test_adding_multiple_related_objects_using_an_invalid_timestamp(self):
# TODO: See test_adding_multiple_related_objects and make use of
# add_at and a timestamp laying outside the
# current object's lifetime
# Create a new version beyond self.t4
benny = Student.objects.current.get(name='Benny')
benny = benny.clone()
benny.name = "Benedict"
benny.save()
all_professors = list(Professor.objects.current.all())
# Test the addition of objects in the past with a timestamp that
# points before the current versions lifetime
# TODO: Raise an error when adding objects outside the lifetime of an
# object (even if it's a discouraged use case)
self.assertRaises(ValueError,
lambda: benny.professors.add_at(
self.t4,
*all_professors))
def test_querying_multiple_related_objects_on_added_object(self):
# In the setUp, Benny had a professor, and then no more.
all_professors = list(Professor.objects.current.all())
benny = Student.objects.current.get(name='Benny')
benny.professors.add(*all_professors)
# This was once failing because benny's as_of time had been set by the
# call to Student.objects.current,
# and was being propagated to the query selecting the relations, which
# were added after as_of was set.
self.assertSetEqual(set(list(benny.professors.all())),
set(all_professors))
def test_direct_assignment_of_relations(self):
"""
Ensure that when relations that are directly set (e.g. not via add()
or remove(), that their versioning information is kept.
"""
benny = Student.objects.current.get(name='Benny')
all_professors = list(Professor.objects.current.all())
first_professor = all_professors[0]
last_professor = all_professors[-1]
some_professor_ids = [o.pk for o in all_professors][:2]
self.assertNotEqual(first_professor.identity, last_professor.identity)
self.assertTrue(1 < len(some_professor_ids) < len(all_professors))
self.assertEqual(benny.professors.count(), 0)
t0 = get_utc_now()
benny.professors.add(first_professor)
t1 = get_utc_now()
benny.professors = all_professors
t2 = get_utc_now()
benny.professors = [last_professor]
t3 = get_utc_now()
# Also try assigning with a list of pks, instead of objects:
benny.professors = some_professor_ids
t4 = get_utc_now()
# Benny ain't groovin' it.
benny.professors = []
t5 = get_utc_now()
benny0 = Student.objects.as_of(t0).get(identity=benny.identity)
benny1 = Student.objects.as_of(t1).get(identity=benny.identity)
benny2 = Student.objects.as_of(t2).get(identity=benny.identity)
benny3 = Student.objects.as_of(t3).get(identity=benny.identity)
benny4 = Student.objects.as_of(t4).get(identity=benny.identity)
benny5 = Student.objects.as_of(t5).get(identity=benny.identity)
self.assertSetEqual(set(list(benny0.professors.all())), set())
self.assertSetEqual(set(list(benny1.professors.all())),
set([first_professor]))
self.assertSetEqual(set(list(benny2.professors.all())),
set(all_professors))
self.assertSetEqual(set(list(benny3.professors.all())),
set([last_professor]))
self.assertSetEqual(set([o.pk for o in benny4.professors.all()]),
set(some_professor_ids))
self.assertSetEqual(set(list(benny5.professors.all())), set())
def test_annotations_and_aggregations(self):
# Annotations and aggreagations should work with .current objects as
# well as historical .as_of() objects.
self.assertEqual(4,
Professor.objects.current.annotate(
num_students=Count('students')).aggregate(
sum=Sum('num_students'))['sum']
)
self.assertTupleEqual((1, 1),
(Professor.objects.current.annotate(
num_students=Count('students')).get(
name='Mr. Biggs').num_students,
Professor.objects.current.get(
name='Mr. Biggs').students.count())
)
self.assertTupleEqual((2, 2),
(Professor.objects.as_of(self.t1).annotate(
num_students=Count('students')).get(
name='Mr. Biggs').num_students,
Professor.objects.as_of(self.t1).get(
name='Mr. Biggs').students.count())
)
# Results should include records for which the annotation returns a 0
# count, too.
# This requires that the generated LEFT OUTER JOIN condition includes
# a clause to restrict the records according to the desired as_of time.
self.assertEqual(3, len(Student.objects.current.annotate(
num_teachers=Count('professors')).all()))
def test_constant_number_of_queries_when_cloning_m2m_related_object(self):
"""
This test aims to verify whether the number of queries against the DB
remains constant, even if the number of M2M relations has grown.
This test was necessary in order to verify changes from PR #44
"""
annika = Student.objects.current.get(name='Annika')
# Annika, at this point, has:
# - 3 professors
# - 3 classrooms
# There are 12 queries against the DB:
# - 3 for writing the new version of the object itself
# o 1 attempt to update the earlier version
# o 1 insert of the earlier version
# o 1 update of the later version
# - 5 for the professors relationship
# o 1 for selecting all concerned professor objects
# o 1 for selecting all concerned intermediate table entries
# (student_professor)
# o 1 for updating current intermediate entry versions
# o 1 for non-current rel-entries pointing the annika-object
# (there's 1 originating from the clone-operation on mr_biggs)
# o 1 for inserting new versions
# - 4 for the classrooms M2M relationship
# o 1 for selecting all concerned classroom objects
# o 1 for selecting all concerned intermediate table entries
# (student_classroom)
# o 1 for updating current intermediate entry versions
# o 0 for non-current rel-entries pointing the annika-object
# o 1 for inserting new versions
with self.assertNumQueries(12):
annika.clone()
def test_no_duplicate_m2m_entries_after_cloning_related_object(self):
"""
This test ensures there are no duplicate entries added when cloning an
object participating in a M2M relationship.
It ensures the absence of duplicate entries on all modified levels:
- at the object-model level
- at any relationship level (intermediary tables)
"""
annika = Student.objects.current.get(name='Annika')
student_professors_mgr = annika.professors
student_classrooms_mgr = annika.classrooms
# Annika, at this point, has:
# - 3 professors
# - 3 classrooms
# Check the PRE-CLONE state
annika_pre_clone = annika
# There's 1 Student instance (named Annika)
self.assertEqual(1, Student.objects.filter(
identity=annika.identity).count())
# There are 4 links to 3 professors (Mr. Biggs has been cloned once
# when setting up, thus 1 additional link)
student_professor_links = list(
student_professors_mgr.through.objects.filter(
**{student_professors_mgr.source_field_name:
annika_pre_clone.id}))
self.assertEqual(4, len(student_professor_links))
# There are 3 links to classrooms
student_classroom_links = list(
student_classrooms_mgr.through.objects.filter(
**{student_classrooms_mgr.source_field_name:
annika_pre_clone.id}))
self.assertEqual(3, len(student_classroom_links))
# Do the CLONE that also impacts the number of linking entries
annika_post_clone = annika.clone()
# Check the POST-CLONE state
# There are 2 Student instances (named Annika)
self.assertEqual(2, Student.objects.filter(
identity=annika.identity).count())
# There are 7 links to 3 professors
# - 4 of them are pointing the previous annika-object (including the
# non-current link to Mr. Biggs)
# - 3 of them are pointing the current annika-object (only current
# links were taken over)
student_professor_links = list(
student_professors_mgr.through.objects.filter(
Q(**{student_professors_mgr.source_field_name:
annika_pre_clone.id}) |
Q(**{student_professors_mgr.source_field_name:
annika_post_clone.id})))
self.assertEqual(7, len(student_professor_links))
self.assertEqual(4, student_professors_mgr.through.objects.filter(
Q(**{student_professors_mgr.source_field_name:
annika_pre_clone.id})).count())
self.assertEqual(3, student_professors_mgr.through.objects.filter(
Q(**{student_professors_mgr.source_field_name:
annika_post_clone.id})).count())
# There are 6 links to 3 professors
# - 3 of them are pointing the previous annika-object
# - 3 of them are pointing the current annika-object
student_classroom_links = list(
student_classrooms_mgr.through.objects.filter(
Q(**{student_classrooms_mgr.source_field_name:
annika_pre_clone.id}) |
Q(**{student_classrooms_mgr.source_field_name:
annika_post_clone.id})))
self.assertEqual(6, len(student_classroom_links))
self.assertEqual(3, student_classrooms_mgr.through.objects.filter(
Q(**{student_classrooms_mgr.source_field_name:
annika_pre_clone.id})).count())
self.assertEqual(3, student_classrooms_mgr.through.objects.filter(
Q(**{student_classrooms_mgr.source_field_name:
annika_post_clone.id})).count())
class MultiM2MToSameTest(TestCase):
"""
This test case shall test the correct functionality of the following
relationship:
Teacher <--> Pupil <--> Teacher
"""
t0 = t1 = t2 = t3 = None
def setUp(self):
billy = Pupil.objects.create(name='Billy', phone_number='123')
erika = Pupil.objects.create(name='Erika', phone_number='456')
ms_sue = Teacher.objects.create(name='Ms. Sue', domain='English')
ms_klishina = Teacher.objects.create(name='Ms. Klishina',
domain='Russian')
mr_kazmirek = Teacher.objects.create(name='Mr. Kazmirek',
domain='Math')
ms_mayer = Teacher.objects.create(name='Ms. Mayer', domain='Chemistry')
self.t0 = get_utc_now()
sleep(0.1)
billy.language_teachers.add(ms_sue)
erika.science_teachers.add(mr_kazmirek, ms_mayer)
self.t1 = get_utc_now()
sleep(0.1)
billy.language_teachers.add(ms_klishina)
billy.language_teachers.remove(ms_sue)
self.t2 = get_utc_now()
sleep(0.1)
erika.science_teachers.remove(ms_mayer)
self.t3 = get_utc_now()
def test_filtering_on_the_other_side_of_relation(self):
language_pupils_count = Pupil.objects.as_of(self.t0).filter(
language_teachers__name='Ms. Sue').count()
self.assertEqual(0, language_pupils_count)
language_pupils_count = Pupil.objects.as_of(self.t1).filter(
language_teachers__name='Ms. Sue').count()
self.assertEqual(1, language_pupils_count)
language_pupils_count = Pupil.objects.as_of(self.t2).filter(
language_teachers__name='Ms. Sue').count()
self.assertEqual(0, language_pupils_count)
def test_t0(self):
"""
Just some cross-checking...
"""
billy_t0 = Pupil.objects.as_of(self.t0).get(name='Billy')
self.assertEqual(billy_t0.language_teachers.count(), 0)
def test_t1(self):
billy_t1 = Pupil.objects.as_of(self.t1).get(name='Billy')
self.assertEqual(billy_t1.language_teachers.count(), 1)
self.assertEqual(billy_t1.language_teachers.first().name, 'Ms. Sue')
erika_t1 = Pupil.objects.as_of(self.t1).get(name='Erika')
self.assertEqual(erika_t1.science_teachers.count(), 2)
def test_t2(self):
billy_t2 = Pupil.objects.as_of(self.t2).get(name='Billy')
self.assertEqual(billy_t2.language_teachers.count(), 1)
self.assertEqual(billy_t2.language_teachers.first().name,
'Ms. Klishina')
def test_t3(self):
erika_t3 = Pupil.objects.as_of(self.t3).get(name='Erika')
self.assertEqual(erika_t3.science_teachers.count(), 1)
self.assertEqual(erika_t3.science_teachers.first().name,
'Mr. Kazmirek')
class SelfReferencingManyToManyTest(TestCase):
def setUp(self):
maude = Person.objects.create(name='Maude')
max = Person.objects.create(name='Max')
mips = Person.objects.create(name='Mips')
mips.parents.add(maude, max)
def test_parent_relationship(self):
mips = Person.objects.current.get(name='Mips')
parents = mips.parents.all()
self.assertSetEqual({'Maude', 'Max'}, set([p.name for p in parents]))
def test_child_relationship(self):
maude = Person.objects.current.get(name='Maude')
max = Person.objects.current.get(name='Max')
for person in [maude, max]:
self.assertEqual('Mips', person.children.first().name)
def test_relationship_spanning_query(self):
mips_parents_qs = Person.objects.current.filter(children__name='Mips')
self.assertSetEqual({'Max', 'Maude'},
{p.name for p in mips_parents_qs})
class ManyToManyFilteringTest(TestCase):
def setUp(self):
c1 = C1(name='c1.v1')
c2 = C2(name='c2.v1')
c3 = C3(name='c3.v1')
c1.save()
c2.save()
c3.save()
# Play on an object's instance
c2 = c2.clone()
c2.name = 'c2.v2'
c2.save()
self.t0 = get_utc_now()
sleep(0.1)
c2.c3s.add(c3)
c1.c2s.add(c2)
self.t1 = get_utc_now()
# at t1:
# c1.c2s = [c2]
# c2.c3s = [c3]
sleep(0.1)
c3a = C3(name='c3a.v1')
c3a.save()
c2.c3s.add(c3a)
sleep(0.1)
self.t2 = get_utc_now()
# at t2:
# c1.c2s = [c2]
# c2.c3s = [c3, c3a]
c1 = c1.clone()
c1.name = 'c1.v2'
c1.save()
c3a.delete()
sleep(0.1)
self.t3 = get_utc_now()
# at t3:
# c1.c2s = [c2]
# c2.c3s = [c3]
def test_filtering_one_jump(self):
"""
Test filtering m2m relations with 2 models
"""
should_be_c1 = C1.objects.filter(c2s__name__startswith='c2').first()
self.assertIsNotNone(should_be_c1)
def test_inexistent_relations_at_t0(self):
"""
Test return value when there is no element assigned to a M2M
relationship
"""
c1_at_t0 = C1.objects.as_of(self.t0).get()
self.assertEqual([], list(c1_at_t0.c2s.all()))
def test_filtering_one_jump_with_version_at_t1(self):
"""
Test filtering m2m relations with 2 models with propagation of
querytime information across all tables
"""
should_be_c1 = C1.objects.as_of(self.t1) \
.filter(c2s__name__startswith='c2').first()
self.assertIsNotNone(should_be_c1)
def test_filtering_one_jump_with_version_at_t3(self):
"""
Test filtering m2m reations with 2 models with propagaton of querytime
information across all tables.
Also test after an object being in a relationship has been deleted.
"""
should_be_c2 = C2.objects.as_of(self.t3) \
.filter(c3s__name__startswith='c3.').first()
self.assertIsNotNone(should_be_c2)
self.assertEqual(should_be_c2.name, 'c2.v2')
should_be_none = C2.objects.as_of(self.t3) \
.filter(c3s__name__startswith='c3a').first()
self.assertIsNone(should_be_none)
@skipUnless(connection.vendor == 'sqlite',
'SQL is database specific, only sqlite is tested here.')
def test_query_created_by_filtering_one_jump_with_version_at_t1(self):
"""
Test filtering m2m relations with 2 models with propagation of
querytime information across all tables
"""
should_be_c1_queryset = C1.objects.as_of(self.t1) \
.filter(c2s__name__startswith='c2')
should_be_c1_query = str(should_be_c1_queryset.query)
t1_string = self.t1.isoformat().replace('T', ' ')
t1_no_tz_string = t1_string[:-6]
expected_query = """
SELECT "versions_tests_c1"."id",
"versions_tests_c1"."identity",
"versions_tests_c1"."version_start_date",
"versions_tests_c1"."version_end_date",
"versions_tests_c1"."version_birth_date",
"versions_tests_c1"."name"
FROM "versions_tests_c1"
INNER JOIN "versions_tests_c1_c2s" ON (
"versions_tests_c1"."id" = "versions_tests_c1_c2s"."c1_id"
AND ((
versions_tests_c1_c2s.version_start_date <= {time}
AND (versions_tests_c1_c2s.version_end_date > {time}
OR versions_tests_c1_c2s.version_end_date is NULL
)
))
)
INNER JOIN "versions_tests_c2" ON (
"versions_tests_c1_c2s"."C2_id" = "versions_tests_c2"."id"
AND ((
versions_tests_c2.version_start_date <= {time}
AND (versions_tests_c2.version_end_date > {time}
OR versions_tests_c2.version_end_date is NULL
)
))
)
WHERE (
"versions_tests_c2"."name" LIKE c2% escape '\\'
AND ("versions_tests_c1"."version_end_date" > {time_no_tz}
OR "versions_tests_c1"."version_end_date" IS NULL)
AND "versions_tests_c1"."version_start_date" <= {time_no_tz}
)
""".format(time=t1_string, time_no_tz=t1_no_tz_string)
self.assertStringEqualIgnoreWhiteSpaces(expected_query,
should_be_c1_query)
def test_filtering_one_jump_reverse(self):
"""
Test filtering m2m relations with 2 models but navigating relation in
the reverse direction
"""
should_be_c3 = C3.objects.filter(c2s__name__startswith='c2').first()
self.assertIsNotNone(should_be_c3)
def test_filtering_one_jump_reverse_with_version_at_t1(self):
"""
Test filtering m2m relations with 2 models with propagation of
querytime information across all tables and navigating the relation
in the reverse direction
"""
should_be_c3 = C3.objects.as_of(self.t1) \
.filter(c2s__name__startswith='c2').first()
self.assertIsNotNone(should_be_c3)
self.assertEqual(should_be_c3.name, 'c3.v1')
def test_filtering_two_jumps(self):
"""
Test filtering m2m relations with 3 models
"""
with self.assertNumQueries(1) as counter:
should_be_c1 = C1.objects.filter(
c2s__c3s__name__startswith='c3').first()
self.assertIsNotNone(should_be_c1)
def test_filtering_two_jumps_with_version_at_t1(self):
"""
Test filtering m2m relations with 3 models with propagation of
querytime information across all tables
"""
with self.assertNumQueries(3) as counter:
should_be_none = C1.objects.as_of(self.t1) \
.filter(c2s__c3s__name__startswith='c3a').first()
self.assertIsNone(should_be_none)
should_be_c1 = C1.objects.as_of(self.t1) \
.filter(c2s__c3s__name__startswith='c3').first()
self.assertIsNotNone(should_be_c1)
self.assertEqual(should_be_c1.name, 'c1.v1')
count = C1.objects.as_of(self.t1) \
.filter(c2s__c3s__name__startswith='c3').all().count()
self.assertEqual(1, count)
@skipUnless(connection.vendor == 'sqlite',
'SQL is database specific, only sqlite is tested here.')
def test_query_created_by_filtering_two_jumps_with_version_at_t1(self):
"""
Investigate correctness of the resulting SQL query
"""
should_be_c1_queryset = C1.objects.as_of(self.t1) \
.filter(c2s__c3s__name__startswith='c3')
should_be_c1_query = str(should_be_c1_queryset.query)
t1_string = self.t1.isoformat().replace('T', ' ')
t1_no_tz_string = t1_string[:-6]
expected_query = """
SELECT "versions_tests_c1"."id",
"versions_tests_c1"."identity",
"versions_tests_c1"."version_start_date",
"versions_tests_c1"."version_end_date",
"versions_tests_c1"."version_birth_date",
"versions_tests_c1"."name"
FROM "versions_tests_c1"
INNER JOIN "versions_tests_c1_c2s" ON (
"versions_tests_c1"."id" = "versions_tests_c1_c2s"."c1_id"
AND ((versions_tests_c1_c2s.version_start_date <= {time}
AND (versions_tests_c1_c2s.version_end_date > {time}
OR versions_tests_c1_c2s.version_end_date is NULL ))
)
)
INNER JOIN "versions_tests_c2" ON (
"versions_tests_c1_c2s"."C2_id" = "versions_tests_c2"."id"
AND ((versions_tests_c2.version_start_date <= {time}
AND (versions_tests_c2.version_end_date > {time}
OR versions_tests_c2.version_end_date is NULL ))
)
)
INNER JOIN "versions_tests_c2_c3s" ON (
"versions_tests_c2"."id" = "versions_tests_c2_c3s"."c2_id"
AND ((versions_tests_c2_c3s.version_start_date <= {time}
AND (versions_tests_c2_c3s.version_end_date > {time}
OR versions_tests_c2_c3s.version_end_date is NULL ))
)
)
INNER JOIN "versions_tests_c3" ON (
"versions_tests_c2_c3s"."C3_id" = "versions_tests_c3"."id"
AND ((versions_tests_c3.version_start_date <= {time}
AND (versions_tests_c3.version_end_date > {time}
OR versions_tests_c3.version_end_date is NULL ))
)
)
WHERE (
"versions_tests_c3"."name" LIKE c3% escape '\\'
AND ("versions_tests_c1"."version_end_date" > {time_no_tz}
OR "versions_tests_c1"."version_end_date" IS NULL)
AND "versions_tests_c1"."version_start_date" <= {time_no_tz}
)
""".format(time=t1_string, time_no_tz=t1_no_tz_string)
self.assertStringEqualIgnoreWhiteSpaces(expected_query,
should_be_c1_query)
def test_filtering_two_jumps_with_version_at_t2(self):
"""
Test filtering m2m relations with 3 models with propagation of
querytime information across all tables but this time at point in time
t2
"""
with self.assertNumQueries(2) as counter:
should_be_c1 = C1.objects.as_of(self.t2) \
.filter(c2s__c3s__name__startswith='c3a').first()
self.assertIsNotNone(should_be_c1)
count = C1.objects.as_of(self.t2) \
.filter(c2s__c3s__name__startswith='c3').all().count()
self.assertEqual(2, count)
def test_filtering_two_jumps_with_version_at_t3(self):
"""
Test filtering m2m relations with 3 models with propagation of
querytime information across all tables but this time at point in time
t3
"""
with self.assertNumQueries(3) as counter:
# Should be None, since object 'c3a' does not exist anymore at t3
should_be_none = C1.objects.as_of(self.t3) \
.filter(c2s__c3s__name__startswith='c3a').first()
self.assertIsNone(should_be_none)
should_be_c1 = C1.objects.as_of(self.t3) \
.filter(c2s__c3s__name__startswith='c3.').first()
self.assertIsNotNone(should_be_c1)
count = C1.objects.as_of(self.t3) \
.filter(c2s__c3s__name__startswith='c3.').all().count()
self.assertEqual(1, count)
def test_filtering_two_jumps_reverse(self):
"""
Test filtering m2m relations with 3 models but navigating relation in
the reverse direction
"""
with self.assertNumQueries(1) as counter:
should_be_c3 = C3.objects.filter(
c2s__c1s__name__startswith='c1').first()
self.assertIsNotNone(should_be_c3)
def test_filtering_two_jumps_reverse_with_version_at_t1(self):
"""
Test filtering m2m relations with 3 models with propagation of
querytime information across all tables and navigating the relation in
the reverse direction
"""
with self.assertNumQueries(2) as counter:
should_be_c3 = C3.objects.as_of(self.t1). \
filter(c2s__c1s__name__startswith='c1').first()
self.assertIsNotNone(should_be_c3)
self.assertEqual(should_be_c3.name, 'c3.v1')
count = C3.objects.as_of(self.t1) \
.filter(c2s__c1s__name__startswith='c1').all().count()
self.assertEqual(1, count)
def test_filtering_two_jumps_reverse_with_version_at_t2(self):
"""
Test filtering m2m relations with 3 models with propagation of
querytime information across all tables and navigating the relation in
the reverse direction but this time at point in time t2
"""
with self.assertNumQueries(2) as counter:
should_be_c3 = C3.objects.as_of(self.t2) \
.filter(c2s__c1s__name__startswith='c1').first()
self.assertIsNotNone(should_be_c3)
count = C3.objects.as_of(self.t2) \
.filter(c2s__c1s__name__startswith='c1').all().count()
self.assertEqual(2, count)
class HistoricM2MOperationsTests(TestCase):
def setUp(self):
# Set up a situation on 23.4.1984
ts = datetime.datetime(1984, 4, 23)
big_brother = Observer.objects._create_at(ts, name='BigBrother')
self.big_brother = big_brother
subject = Subject.objects._create_at(ts, name='Winston Smith')
big_brother.subjects.add_at(ts, subject)
# Remove the relationship on 23.5.1984
ts_a_month_later = ts + datetime.timedelta(days=30)
big_brother.subjects.remove_at(ts_a_month_later, subject)
def test_observer_subject_relationship_is_active_in_early_1984(self):
ts = datetime.datetime(1984, 5, 1)
observer = Observer.objects.as_of(ts).get()
self.assertEqual(observer.name, 'BigBrother')
subjects = observer.subjects.all()
self.assertEqual(len(subjects), 1)
self.assertEqual(subjects[0].name, 'Winston Smith')
def test_observer_subject_relationship_is_inactive_in_late_1984(self):
ts = datetime.datetime(1984, 8, 16)
observer = Observer.objects.as_of(ts).get()
self.assertEqual(observer.name, 'BigBrother')
subjects = observer.subjects.all()
self.assertEqual(len(subjects), 0)
subject = Subject.objects.as_of(ts).get()
self.assertEqual(subject.name, 'Winston Smith')
def test_simple(self):
self.big_brother.subjects.all().first()
class M2MDirectAssignmentTests(TestCase):
def setUp(self):
self.o1 = Observer.objects.create(name="1.0")
self.s1 = Subject.objects.create(name="1.0")
self.s2 = Subject.objects.create(name="2.0")
self.t1 = get_utc_now()
self.o1 = self.o1.clone()
self.o1.name = "1.1"
self.o1.save()
self.o1.subjects.add(self.s1, self.s2)
self.t2 = get_utc_now()
self.o1 = self.o1.clone()
self.o1.name = "1.2"
self.o1.save()
self.o1.subjects = []
self.t3 = get_utc_now()
def test_t1_relations(self):
observer = Observer.objects.as_of(self.t1).filter(
identity=self.o1.identity).first()
self.assertEqual(0, observer.subjects.all().count())
def test_t2_relations(self):
observer = Observer.objects.as_of(self.t2).filter(
identity=self.o1.identity).first()
self.assertEqual(2, observer.subjects.all().count())
def test_t3_relations(self):
observer = Observer.objects.as_of(self.t3).filter(
identity=self.o1.identity).first()
self.assertEqual(0, observer.subjects.all().count())
class ReverseForeignKeyDirectAssignmentTests(TestCase):
def setUp(self):
# City is the referenced object, Team in the referring object.
# c1 will be explicitly cloned, but not it's teams.
# c10 will not be explicitly cloned, but one of it's teams will be.
self.c1 = City.objects.create(name="Oakland")
self.team1 = Team.objects.create(name="As")
self.team2 = Team.objects.create(name="Raiders")
self.c10 = City.objects.create(name="San Francisco")
self.team10 = Team.objects.create(name="Giants")
self.team11 = Team.objects.create(name="49ers")
self.t1 = get_utc_now()
self.c1 = self.c1.clone()
self.c1.team_set.add(self.team1, self.team2)
self.team10 = self.team10.clone()
self.c10.team_set.add(self.team10, self.team11)
self.t2 = get_utc_now()
self.c1 = self.c1.clone()
self.c1.team_set.set([])
self.team10 = Team.objects.current.get(
identity=self.team10.identity).clone()
self.c10.team_set.clear()
self.t3 = get_utc_now()
def test_t1_relations_for_cloned_referenced_object(self):
city = City.objects.as_of(self.t1).filter(
identity=self.c1.identity).first()
self.assertEqual(0, city.team_set.all().count())
def test_t2_relations_for_cloned_referenced_object(self):
city = City.objects.as_of(self.t2).filter(
identity=self.c1.identity).first()
self.assertEqual(2, city.team_set.all().count())
def test_t3_relations_for_cloned_referenced_object(self):
city = City.objects.as_of(self.t3).filter(
identity=self.c1.identity).first()
self.assertEqual(0, city.team_set.all().count())
def test_t1_relations_for_cloned_referring_object(self):
city = City.objects.as_of(self.t1).filter(
identity=self.c10.identity).first()
self.assertEqual(0, city.team_set.all().count())
def test_t2_relations_for_cloned_referring_object(self):
city = City.objects.as_of(self.t2).filter(
identity=self.c10.identity).first()
self.assertEqual(2, city.team_set.all().count())
def test_t3_relations_for_cloned_referring_object(self):
city = City.objects.as_of(self.t3).filter(
identity=self.c10.identity).first()
self.assertEqual(0, city.team_set.all().count())
class PrefetchingTests(TestCase):
def setUp(self):
self.city1 = City.objects.create(name='Chicago')
self.team1 = Team.objects.create(name='te1.v1', city=self.city1)
self.p1 = Player.objects.create(name='pl1.v1', team=self.team1)
self.p2 = Player.objects.create(name='pl2.v1', team=self.team1)
sleep(0.1)
self.t1 = get_utc_now()
def test_select_related(self):
with self.assertNumQueries(1):
player = Player.objects.as_of(self.t1).select_related('team').get(
name='pl1.v1')
self.assertIsNotNone(player)
self.assertEqual(player.team, self.team1)
p1 = self.p1.clone()
p1.name = 'pl1.v2'
p1.team = None
p1.save()
t2 = get_utc_now()
with self.assertNumQueries(1):
player = Player.objects.current.select_related('team').get(
name='pl1.v2')
self.assertIsNotNone(player)
self.assertIsNone(player.team)
# Multiple foreign-key related tables should still only require one
# query
with self.assertNumQueries(1):
player = Player.objects.as_of(t2).select_related('team__city').get(
name='pl2.v1')
self.assertIsNotNone(player)
self.assertEqual(self.city1, player.team.city)
@skipUnless(connection.vendor == 'sqlite',
'SQL is database specific, only sqlite is tested here.')
def test_select_related_query_sqlite(self):
select_related_queryset = Player.objects.as_of(self.t1).select_related(
'team').all()
# Validating the query before verifying the SQL string
self.assertEqual(['pl1.v1', 'pl2.v1'],
[player.name for player in select_related_queryset])
select_related_query = str(select_related_queryset.query)
team_table = Team._meta.db_table
player_table = Player._meta.db_table
t1_utc_w_tz = str(self.t1)
t1_utc_wo_tz = t1_utc_w_tz[:-6]
expected_query = """
SELECT "{player_table}"."id",
"{player_table}"."identity",
"{player_table}"."version_start_date",
"{player_table}"."version_end_date",
"{player_table}"."version_birth_date",
"{player_table}"."name",
"{player_table}"."team_id",
"{team_table}"."id",
"{team_table}"."identity",
"{team_table}"."version_start_date",
"{team_table}"."version_end_date",
"{team_table}"."version_birth_date",
"{team_table}"."name",
"{team_table}"."city_id"
FROM "{player_table}"
LEFT OUTER JOIN "{team_table}" ON (
"{player_table}"."team_id" = "{team_table}"."identity"
AND (({team_table}.version_start_date <= {ts}
AND ({team_table}.version_end_date > {ts}
OR {team_table}.version_end_date IS NULL))))
WHERE
(
("{player_table}"."version_end_date" > {ts_wo_tz}
OR "{player_table}"."version_end_date" IS NULL)
AND "{player_table}"."version_start_date" <= {ts_wo_tz}
)
""".format(player_table=player_table, team_table=team_table,
ts=t1_utc_w_tz, ts_wo_tz=t1_utc_wo_tz)
self.assertStringEqualIgnoreWhiteSpaces(expected_query,
select_related_query)
@skipUnless(connection.vendor == 'postgresql',
'SQL is database specific, only PostgreSQL is tested here.')
def test_select_related_query_postgresql(self):
select_related_query = str(
Player.objects.as_of(self.t1).select_related('team').all().query)
team_table = Team._meta.db_table
player_table = Player._meta.db_table
t1_utc_w_tz = str(self.t1)
t1_utc_wo_tz = t1_utc_w_tz[:-6]
expected_query = """
SELECT "{player_table}"."id",
"{player_table}"."identity",
"{player_table}"."version_start_date",
"{player_table}"."version_end_date",
"{player_table}"."version_birth_date",
"{player_table}"."name",
"{player_table}"."team_id",
"{team_table}"."id",
"{team_table}"."identity",
"{team_table}"."version_start_date",
"{team_table}"."version_end_date",
"{team_table}"."version_birth_date",
"{team_table}"."name",
"{team_table}"."city_id"
FROM "{player_table}"
LEFT OUTER JOIN "{team_table}" ON (
"{player_table}"."team_id" = "{team_table}"."identity"
AND (({team_table}.version_start_date <= {ts}
AND ({team_table}.version_end_date > {ts}
OR {team_table}.version_end_date IS NULL))))
WHERE
(
("{player_table}"."version_end_date" > {ts}
OR "{player_table}"."version_end_date" IS NULL)
AND "{player_table}"."version_start_date" <= {ts}
)
""".format(player_table=player_table, team_table=team_table,
ts=t1_utc_w_tz, ts_wo_tz=t1_utc_wo_tz)
self.assertStringEqualIgnoreWhiteSpaces(expected_query,
select_related_query)
def test_prefetch_related_via_foreignkey(self):
with self.assertNumQueries(3):
team = Team.objects.as_of(self.t1).prefetch_related('player_set',
'city').first()
self.assertIsNotNone(team)
with self.assertNumQueries(0):
p1 = team.player_set.all()[0]
p2 = team.player_set.all()[1]
self.assertEqual(self.city1, team.city)
p3 = Player.objects.create(name='pl3.v1', team=self.team1)
p2 = self.p2.clone()
p2.name = 'pl2.v2'
p2.save()
p1.delete()
with self.assertNumQueries(3):
team = Team.objects.current.prefetch_related('player_set',
'city').first()
self.assertIsNotNone(team)
with self.assertNumQueries(0):
self.assertEqual(2, len(team.player_set.all()))
p1 = team.player_set.all()[0]
p2 = team.player_set.all()[1]
self.assertEqual(self.city1, team.city)
with self.assertNumQueries(3):
team = Team.objects.prefetch_related('player_set', 'city').first()
self.assertIsNotNone(team)
with self.assertNumQueries(0):
self.assertEqual(4, len(team.player_set.all()))
px = team.player_set.all()[1]
self.assertEqual(self.city1, team.city)
def test_prefetch_related_via_many_to_many(self):
# award1 - award10
awards = [Award.objects.create(name='award' + str(i)) for i in
range(1, 11)]
# city0 - city2
cities = [City.objects.create(name='city-' + str(i)) for i in range(3)]
teams = []
# team-0-0 with city0 - team-2-1 with city1
for i in range(3):
for j in range(2):
teams.append(Team.objects.create(
name='team-{}-{}'.format(i, j), city=cities[i]))
players = []
for i in range(6):
for j in range(6):
p = Player.objects.create(
name='player-{}-{}'.format(i, j), team=teams[i])
if j % 2:
p.awards.add(*awards[j - 1:j - 9])
players.append(p)
t2 = get_utc_now()
# players is player-0-0 with team-0-0 through player-5-5 with team-2-1
# players with awards:
# player-[012345]-1, [012345]-3, [012345]-5,
# the -1s have awards: 1,2
# the -3s have awards: 3,4
# the -5s have awards: 5,6
with self.assertNumQueries(6):
players_t2 = list(
Player.objects.as_of(t2).prefetch_related('team',
'awards').filter(
name__startswith='player-').order_by('name')
)
players_current = list(
Player.objects.current.prefetch_related('team',
'awards').filter(
name__startswith='player-').order_by('name')
)
self.assertSetEqual(set(players_t2), set(players_current))
award_players = []
with self.assertNumQueries(0):
for i in range(len(players_current)):
t2_p = players_t2[i]
current_p = players_current[i]
self.assertEqual(t2_p.team.name, current_p.team.name)
if i % 2:
self.assertGreater(len(t2_p.awards.all()), 0)
self.assertSetEqual(set(t2_p.awards.all()),
set(current_p.awards.all()))
award_players.append(current_p)
name_list = []
for p in award_players:
p.awards.remove(p.awards.all()[0])
name_list.append(p.name)
with self.assertNumQueries(2):
old_award_players = list(
Player.objects.as_of(t2).prefetch_related('awards').filter(
name__in=name_list).order_by('name')
)
with self.assertNumQueries(2):
updated_award_players = list(
Player.objects.current.prefetch_related('awards').filter(
name__in=name_list).order_by('name')
)
with self.assertNumQueries(0):
for i in range(len(award_players)):
old = len(old_award_players[i].awards.all())
new = len(updated_award_players[i].awards.all())
self.assertTrue(new == old - 1)
class PrefetchingHistoricTests(TestCase):
def setUp(self):
self.c1 = City.objects.create(name='city.v1')
self.t1 = Team.objects.create(name='team1.v1', city=self.c1)
self.t2 = Team.objects.create(name='team2.v1', city=self.c1)
self.p1 = Player.objects.create(name='pl1.v1', team=self.t1)
self.p2 = Player.objects.create(name='pl2.v1', team=self.t1)
self.time1 = get_utc_now()
sleep(0.001)
def modify_objects(self):
# Clone the city (which is referenced by a foreign key in the team
# object).
self.c1a = self.c1.clone()
self.c1a.name = 'city.v2'
self.c1a.save()
self.t1a = self.t1.clone()
self.t1a.name = 'team1.v2'
self.t1a.save()
self.p1a = self.p1.clone()
self.p1a.name = 'pl1.v2'
self.p1a.save()
def test_reverse_fk_prefetch_queryset_with_historic_versions(self):
"""
prefetch_related with Prefetch objects that specify querysets.
"""
historic_cities_qs = City.objects.as_of(self.time1).filter(
name='city.v1').prefetch_related(
Prefetch(
'team_set',
queryset=Team.objects.as_of(self.time1),
to_attr='prefetched_teams'
),
Prefetch(
'prefetched_teams__player_set',
queryset=Player.objects.as_of(self.time1),
to_attr='prefetched_players'
)
)
with self.assertNumQueries(3):
historic_cities = list(historic_cities_qs)
self.assertEquals(1, len(historic_cities))
historic_city = historic_cities[0]
self.assertEquals(2, len(historic_city.prefetched_teams))
self.assertSetEqual(
{'team1.v1', 'team2.v1'},
{t.name for t in historic_city.prefetched_teams})
team = [t for t in historic_city.prefetched_teams if
t.name == 'team1.v1'][0]
self.assertSetEqual({'pl1.v1', 'pl2.v1'},
{p.name for p in team.prefetched_players})
# For the 'current' case:
current_cities_qs = City.objects.current.filter(
name='city.v1').prefetch_related(
Prefetch(
'team_set',
queryset=Team.objects.current,
to_attr='prefetched_teams'
),
Prefetch(
'prefetched_teams__player_set',
queryset=Player.objects.current,
to_attr='prefetched_players'
)
)
with self.assertNumQueries(3):
current_cities = list(current_cities_qs)
self.assertEquals(1, len(current_cities))
current_city = current_cities[0]
self.assertEquals(2, len(current_city.prefetched_teams))
self.assertSetEqual(
{'team1.v1', 'team2.v1'},
{t.name for t in current_city.prefetched_teams})
team = [t for t in current_city.prefetched_teams if
t.name == 'team1.v1'][0]
self.assertSetEqual({'pl1.v1', 'pl2.v1'},
{p.name for p in team.prefetched_players})
self.modify_objects()
historic_cities_qs = City.objects.as_of(self.time1).filter(
name='city.v1').prefetch_related(
Prefetch(
'team_set',
queryset=Team.objects.as_of(self.time1),
to_attr='prefetched_teams'
),
Prefetch(
'prefetched_teams__player_set',
queryset=Player.objects.as_of(self.time1),
to_attr='prefetched_players'
)
)
with self.assertNumQueries(3):
historic_cities = list(historic_cities_qs)
self.assertEquals(1, len(historic_cities))
historic_city = historic_cities[0]
self.assertEquals(2, len(historic_city.prefetched_teams))
self.assertSetEqual(
{'team1.v1', 'team2.v1'},
{t.name for t in historic_city.prefetched_teams})
team = [t for t in historic_city.prefetched_teams if
t.name == 'team1.v1'][0]
self.assertSetEqual({'pl1.v1', 'pl2.v1'},
{p.name for p in team.prefetched_players})
# For the 'current' case:
current_cities_qs = City.objects.current.filter(
name='city.v2').prefetch_related(
Prefetch(
'team_set',
queryset=Team.objects.current,
to_attr='prefetched_teams'
),
Prefetch(
'prefetched_teams__player_set',
queryset=Player.objects.current,
to_attr='prefetched_players'
),
)
with self.assertNumQueries(3):
current_cities = list(current_cities_qs)
self.assertEquals(1, len(current_cities))
current_city = current_cities[0]
self.assertEquals(2, len(current_city.prefetched_teams))
self.assertSetEqual(
{'team1.v2', 'team2.v1'},
{t.name for t in current_city.prefetched_teams})
team = [t for t in current_city.prefetched_teams if
t.name == 'team1.v2'][0]
self.assertSetEqual({'pl1.v2', 'pl2.v1'},
{p.name for p in team.prefetched_players})
# When a different time is specified for the prefetch queryset than
# for the base queryset:
with self.assertRaises(ValueError):
_ = City.objects.current.filter(name='city.v2').prefetch_related(
Prefetch(
'team_set',
queryset=Team.objects.as_of(self.time1),
to_attr='prefetched_teams'
),
Prefetch(
'prefetched_teams__player_set',
queryset=Player.objects.as_of(self.time1),
to_attr='prefetched_players'
),
)[0]
def test_reverse_fk_simple_prefetch_with_historic_versions(self):
"""
prefetch_related with simple lookup.
"""
historic_cities_qs = City.objects.as_of(self.time1).filter(
name='city.v1').prefetch_related(
'team_set', 'team_set__player_set')
with self.assertNumQueries(3):
historic_cities = list(historic_cities_qs)
self.assertEquals(1, len(historic_cities))
historic_city = historic_cities[0]
self.assertEquals(2, len(historic_city.team_set.all()))
self.assertSetEqual({'team1.v1', 'team2.v1'},
{t.name for t in historic_city.team_set.all()})
team = \
[t for t in historic_city.team_set.all() if
t.name == 'team1.v1'][
0]
self.assertSetEqual({'pl1.v1', 'pl2.v1'},
{p.name for p in team.player_set.all()})
# For the 'current' case:
current_cities_qs = City.objects.current.filter(
name='city.v1').prefetch_related(
'team_set', 'team_set__player_set')
with self.assertNumQueries(3):
current_cities = list(current_cities_qs)
self.assertEquals(1, len(current_cities))
current_city = current_cities[0]
self.assertEquals(2, len(current_city.team_set.all()))
self.assertSetEqual({'team1.v1', 'team2.v1'},
{t.name for t in current_city.team_set.all()})
team = \
[t for t in current_city.team_set.all() if
t.name == 'team1.v1'][0]
self.assertSetEqual({'pl1.v1', 'pl2.v1'},
{p.name for p in team.player_set.all()})
# Now, we'll clone the city (which is referenced by a foreign key in
# the team object).
# The queries above, when repeated, should work the same as before.
self.modify_objects()
historic_cities_qs = City.objects.as_of(self.time1).filter(
name='city.v1').prefetch_related(
'team_set', 'team_set__player_set')
with self.assertNumQueries(3):
historic_cities = list(historic_cities_qs)
self.assertEquals(1, len(historic_cities))
historic_city = historic_cities[0]
self.assertEquals(2, len(historic_city.team_set.all()))
self.assertSetEqual({'team1.v1', 'team2.v1'},
{t.name for t in historic_city.team_set.all()})
team = \
[t for t in historic_city.team_set.all() if
t.name == 'team1.v1'][
0]
self.assertSetEqual({'pl1.v1', 'pl2.v1'},
{p.name for p in team.player_set.all()})
# For the 'current' case:
current_cities_qs = City.objects.current.filter(
name='city.v2').prefetch_related(
'team_set', 'team_set__player_set')
with self.assertNumQueries(3):
current_cities = list(current_cities_qs)
self.assertEquals(1, len(current_cities))
current_city = current_cities[0]
self.assertEquals(2, len(current_city.team_set.all()))
self.assertSetEqual({'team1.v2', 'team2.v1'},
{t.name for t in current_city.team_set.all()})
team = \
[t for t in current_city.team_set.all() if
t.name == 'team1.v2'][0]
self.assertSetEqual({'pl1.v2', 'pl2.v1'},
{p.name for p in team.player_set.all()})
def test_foreign_key_prefetch_with_historic_version(self):
self.modify_objects()
historic_city = City.objects.as_of(self.time1).get(
identity=self.c1.identity)
# Test with a simple prefetch.
with self.assertNumQueries(2):
team = Team.objects.as_of(self.time1).filter(
identity=self.t1.identity
).prefetch_related(
'city'
)[0]
self.assertIsNotNone(team.city)
self.assertEquals(team.city.id, historic_city.id)
# Test with a Prefetch object without a queryset.
with self.assertNumQueries(2):
team = Team.objects.as_of(self.time1).filter(
identity=self.t1.identity
).prefetch_related(Prefetch(
'city',
))[0]
self.assertIsNotNone(team.city)
self.assertEquals(team.city.id, historic_city.id)
# Test with a Prefetch object with a queryset with an explicit as_of.
with self.assertNumQueries(2):
team = Team.objects.as_of(self.time1).filter(
identity=self.t1.identity
).prefetch_related(Prefetch(
'city',
queryset=City.objects.as_of(self.time1)
))[0]
self.assertIsNotNone(team.city)
self.assertEquals(team.city.id, historic_city.id)
# Test with a Prefetch object with a queryset with no as_of.
with self.assertNumQueries(2):
team = Team.objects.as_of(self.time1).filter(
identity=self.t1.identity
).prefetch_related(Prefetch(
'city',
queryset=City.objects.all()
))[0]
self.assertIsNotNone(team.city)
self.assertEquals(team.city.id, historic_city.id)
# Test with a Prefetch object with a queryset with an as_of that
# differs from the parents.
# If permitted, it would lead to possibly incorrect results and
# definitely cache misses, which would defeat the purpose of using
# prefetch_related. So a ValueError should be raised.
with self.assertRaises(ValueError):
team = Team.objects.as_of(self.time1).filter(
identity=self.t1.identity
).prefetch_related(Prefetch(
'city',
queryset=City.objects.current
))[0]
# Test with a Prefetch object with a queryset with an as_of, when the
# parent has no as_of.
# This is a bit of an odd thing to do, but possible.
with self.assertNumQueries(2):
team = Team.objects.filter(
identity=self.t1.identity
).prefetch_related(Prefetch(
'city',
queryset=City.objects.as_of(self.time1)
))[0]
self.assertIsNotNone(team.city)
self.assertEquals(team.city.id, historic_city.id)
class IntegrationNonVersionableModelsTests(TestCase):
def setUp(self):
self.bordeaux = Wine.objects.create(name="Bordeaux", vintage=2004)
self.barolo = Wine.objects.create(name="Barolo", vintage=2010)
self.port = Wine.objects.create(name="Port wine", vintage=2014)
self.jacques = WineDrinker.objects.create(name='Jacques',
glass_content=self.bordeaux)
self.alfonso = WineDrinker.objects.create(name='Alfonso',
glass_content=self.barolo)
self.jackie = WineDrinker.objects.create(name='Jackie',
glass_content=self.port)
self.red_sailor_hat = WineDrinkerHat.objects.create(
shape='Sailor',
color='red',
wearer=self.jackie)
self.blue_turban_hat = WineDrinkerHat.objects.create(
shape='Turban',
color='blue',
wearer=self.alfonso)
self.green_vagabond_hat = WineDrinkerHat.objects.create(
shape='Vagabond', color='green', wearer=self.jacques)
self.pink_breton_hat = WineDrinkerHat.objects.create(shape='Breton',
color='pink')
self.t1 = get_utc_now()
sleep(0.1)
self.jacques = self.jacques.clone()
# Jacques wants to try the italian stuff...
self.jacques.glass_content = self.barolo
self.jacques.save()
self.t2 = get_utc_now()
sleep(0.1)
# Jacques gets a bit dizzy and pinches Jackie's hat
self.red_sailor_hat.wearer = self.jacques
self.red_sailor_hat.save()
self.t3 = get_utc_now()
sleep(0.1)
def test_accessibility_of_versions_and_non_versionables_via_plain_fk(self):
# Access coming from a Versionable (reverse access)
jacques_current = WineDrinker.objects.current.get(name='Jacques')
jacques_t2 = WineDrinker.objects.as_of(self.t2).get(name='Jacques')
jacques_t1 = WineDrinker.objects.as_of(self.t1).get(name='Jacques')
self.assertEqual(jacques_current, jacques_t2)
self.assertEqual('Barolo', jacques_t2.glass_content.name)
self.assertEqual('Bordeaux', jacques_t1.glass_content.name)
# Access coming from plain Models (direct access)
barolo = Wine.objects.get(name='Barolo')
all_time_barolo_drinkers = barolo.drinkers.all()
self.assertEqual({'Alfonso', 'Jacques'},
{winedrinker.name for winedrinker in
all_time_barolo_drinkers})
t1_barolo_drinkers = barolo.drinkers.as_of(self.t1).all()
self.assertEqual({'Alfonso'}, {winedrinker.name for winedrinker in
t1_barolo_drinkers})
t2_barolo_drinkers = barolo.drinkers.as_of(self.t2).all()
self.assertEqual({'Alfonso', 'Jacques'},
{winedrinker.name for winedrinker in
t2_barolo_drinkers})
bordeaux = Wine.objects.get(name='Bordeaux')
t2_bordeaux_drinkers = bordeaux.drinkers.as_of(self.t2).all()
self.assertEqual(set([]), {winedrinker.name for winedrinker in
t2_bordeaux_drinkers})
def test_accessibility_of_versions_and_non_versionables_via_versioned_fk(
self):
jacques_current = WineDrinker.objects.current.get(name='Jacques')
jacques_t1 = WineDrinker.objects.as_of(self.t1).get(name='Jacques')
# Testing direct access
# We're not able to track changes in objects that are not versionables,
# pointing objects that are versionables.
# Therefore, it seems like Jacques always had the same combination of
# hats (even though at t1 and t2, he had one single hat)
self.assertEqual({'Vagabond', 'Sailor'},
{hat.shape for hat in jacques_current.hats.all()})
self.assertEqual({hat.shape for hat in jacques_t1.hats.all()},
{hat.shape for hat in jacques_current.hats.all()})
# Fetch jackie-object; at that point, jackie still had her Sailor hat
jackie_t2 = WineDrinker.objects.as_of(self.t2).get(name='Jackie')
self.assertEqual(set([]), {hat.shape for hat in jackie_t2.hats.all()})
# Testing reverse access
green_vagabond_hat = WineDrinkerHat.objects.get(shape='Vagabond')
should_be_jacques = green_vagabond_hat.wearer
self.assertIsNotNone(should_be_jacques)
self.assertEqual('Jacques', should_be_jacques.name)
self.assertTrue(should_be_jacques.is_current)
red_sailor_hat = WineDrinkerHat.objects.get(shape='Sailor')
should_be_jacques = red_sailor_hat.wearer
self.assertIsNotNone(should_be_jacques)
self.assertEqual('Jacques', should_be_jacques.name)
self.assertTrue(should_be_jacques.is_current)
# For the records: navigate to a prior version of a versionable
# object ('Jacques') as follows
# TODO: Issue #33 on Github aims for a more direct syntax to get to
# another version of the same object
should_be_jacques_t1 = should_be_jacques.__class__.objects.as_of(
self.t1).get(
identity=should_be_jacques.identity)
self.assertEqual(jacques_t1, should_be_jacques_t1)
def test_filter_on_fk_versioned_and_nonversioned_join(self):
# Get non-versioned objects, filtering on a FK-related versioned object
jacques_hats = WineDrinkerHat.objects.filter(
wearer__name='Jacques').distinct()
self.assertEqual(set(jacques_hats),
set([self.green_vagabond_hat, self.red_sailor_hat]))
# Get all versions of a Versionable by filtering on a FK-related
# non-versioned object
person_versions = WineDrinker.objects.filter(hats__shape='Vagabond')
self.assertIn(self.jacques, person_versions)
class FilterOnForeignKeyRelationTest(TestCase):
def test_filter_on_fk_relation(self):
team = Team.objects.create(name='team')
player = Player.objects.create(name='player', team=team)
t1 = get_utc_now()
sleep(0.1)
l1 = len(Player.objects.as_of(t1).filter(team__name='team'))
team.clone()
l2 = len(Player.objects.as_of(t1).filter(team__name='team'))
self.assertEqual(l1, l2)
class SpecifiedUUIDTest(TestCase):
@staticmethod
def uuid4(uuid_value=None):
if not uuid_value:
return uuid.uuid4()
if isinstance(uuid_value, uuid.UUID):
return uuid_value
return uuid.UUID(uuid_value)
def test_create_with_uuid(self):
p_id = self.uuid4()
p = Person.objects.create(id=p_id, name="Alice")
self.assertEqual(str(p_id), str(p.id))
self.assertEqual(str(p_id), str(p.identity))
p_id = uuid.uuid5(uuid.NAMESPACE_OID, str('bar'))
with self.assertRaises(ValueError):
Person.objects.create(id=p_id, name="Alexis")
def test_create_with_forced_identity(self):
# This test does some artificial manipulation of versioned objects,
# do not use it as an example
# for real-life usage!
p = Person.objects.create(name="Abela")
# Postgresql will provide protection here, since
# util.postgresql.create_current_version_unique_identity_indexes
# has been invoked in the post migration handler.
if connection.vendor == 'postgresql' and get_version() >= '1.7':
with self.assertRaises(IntegrityError):
with transaction.atomic():
ident = self.uuid4(p.identity)
Person.objects.create(forced_identity=ident, name="Alexis")
p.delete()
# The start date of p2 does not necessarily have to equal the end date
# of p.
sleep(0.1)
ident = self.uuid4(p.identity)
p2 = Person.objects.create(forced_identity=ident, name="Alexis")
p2.version_birth_date = p.version_birth_date
p2.save()
self.assertEqual(p.identity, p2.identity)
self.assertNotEqual(p2.id, p2.identity)
# Thanks to that artificial manipulation, p is now the previous version
# of p2:
self.assertEqual(p.name, Person.objects.previous_version(p2).name)
class VersionRestoreTest(TestCase):
def setup_common(self):
sf = City.objects.create(name="San Francisco")
forty_niners = Team.objects.create(name='49ers', city=sf)
player1 = Player.objects.create(name="Montana", team=forty_niners)
best_quarterback = Award.objects.create(name="Best Quarterback")
best_attitude = Award.objects.create(name="Best Attitude")
player1.awards.add(best_quarterback, best_attitude)
self.player1 = player1
self.awards = {
'best_quarterback': best_quarterback,
'best_attitude': best_attitude,
}
self.forty_niners = forty_niners
def test_restore_latest_version(self):
self.setup_common()
sleep(0.001)
self.player1.delete()
sleep(0.001)
deleted_at = self.player1.version_end_date
player1_pk = self.player1.pk
sleep(0.001)
restored = self.player1.restore()
self.assertEqual(player1_pk, restored.pk)
self.assertIsNone(restored.version_end_date)
self.assertEqual(2, Player.objects.filter(name=restored.name).count())
# There should be no relationships restored:
self.assertIsNone(restored.team_id)
self.assertListEqual([], list(restored.awards.all()))
# The relationships are still present on the previous version.
previous = Player.objects.previous_version(restored)
self.assertEqual(deleted_at, previous.version_end_date)
self.assertSetEqual(set(previous.awards.all()),
set(self.awards.values()))
self.assertEqual(self.forty_niners, previous.team)
def test_restore_previous_version(self):
self.setup_common()
p1 = self.player1.clone()
p1.name = 'Joe'
p1.save()
player1_pk = self.player1.pk
self.player1.restore()
with self.assertRaises(ObjectDoesNotExist):
Player.objects.current.get(name='Joe')
restored = Player.objects.current.get(name='Montana')
self.assertEqual(player1_pk, restored.pk)
self.assertIsNone(restored.version_end_date)
self.assertEqual(2, Player.objects.filter(name=restored.name).count())
# There should be no relationships restored:
self.assertIsNone(restored.team_id)
self.assertListEqual([], list(restored.awards.all()))
# The relationships are also present on the previous version.
previous = Player.objects.previous_version(restored)
self.assertSetEqual(set(previous.awards.all()),
set(self.awards.values()))
self.assertEqual(self.forty_niners, previous.team)
# There should be no overlap of version periods.
self.assertEquals(previous.version_end_date,
restored.version_start_date)
def test_restore_with_required_foreignkey(self):
team = Team.objects.create(name="Flying Pigs")
mascot_v1 = Mascot.objects.create(name="Curly", team=team)
mascot_v1.delete()
# Restoring without supplying a value for the required foreign key
# will fail.
with self.assertRaises(ForeignKeyRequiresValueError):
mascot_v1.restore()
self.assertEqual(1, Mascot.objects.filter(name=mascot_v1.name).count())
mascot2_v1 = Mascot.objects.create(name="Big Ham", team=team)
mascot2_v1.clone()
with self.assertRaises(ForeignKeyRequiresValueError):
mascot2_v1.restore()
self.assertEqual(2,
Mascot.objects.filter(name=mascot2_v1.name).count())
self.assertEqual(1, Mascot.objects.current.filter(
name=mascot2_v1.name).count())
# If a value (object or pk) is supplied, the restore will succeed.
team2 = Team.objects.create(name="Submarine Sandwiches")
restored = mascot2_v1.restore(team=team2)
self.assertEqual(3,
Mascot.objects.filter(name=mascot2_v1.name).count())
self.assertEqual(team2, restored.team)
restored.delete()
rerestored = mascot2_v1.restore(team_id=team.pk)
self.assertEqual(4,
Mascot.objects.filter(name=mascot2_v1.name).count())
self.assertEqual(team, rerestored.team)
def test_over_time(self):
team1 = Team.objects.create(name='team1.v1')
team2 = Team.objects.create(name='team2.v1')
p1 = Player.objects.create(name='p1.v1', team=team1)
p2 = Player.objects.create(name='p2.v1', team=team1)
a1 = Award.objects.create(name='a1.v1')
t1 = get_utc_now()
sleep(0.001)
p1 = p1.clone()
p1.name = 'p1.v2'
p1.save()
t2 = get_utc_now()
sleep(0.001)
p1.delete()
a1.players.add(p2)
t3 = get_utc_now()
sleep(0.001)
a1.players = []
t4 = get_utc_now()
sleep(0.001)
p1 = Player.objects.get(name='p1.v2').restore(team=team2)
# p1 did exist at t2, but not at t3.
self.assertIsNotNone(
Player.objects.as_of(t2).filter(name='p1.v2').first())
self.assertIsNone(
Player.objects.as_of(t3).filter(name='p1.v2').first())
# p1 re-appeared later with team2, though.
self.assertEqual(team2, Player.objects.current.get(name='p1.v2').team)
# many-to-many relations
self.assertEqual([], list(
Award.objects.as_of(t2).get(name='a1.v1').players.all()))
self.assertEqual('p2.v1', Award.objects.as_of(t3).get(
name='a1.v1').players.first().name)
self.assertEqual([], list(
Award.objects.current.get(name='a1.v1').players.all()))
# Expected version counts:
self.assertEqual(1, Team.objects.filter(name='team1.v1').count())
self.assertEqual(1, Team.objects.filter(name='team2.v1').count())
self.assertEqual(3,
Player.objects.filter(identity=p1.identity).count())
self.assertEqual(1, Player.objects.filter(name='p2.v1').count())
m2m_manager = \
Award._meta.get_field('players').remote_field.through.objects
self.assertEqual(1, m2m_manager.all().count())
def test_restore_two_in_memory_objects(self):
# Tests issue #90
# Restoring two in-memory objects with the same identity, which,
# according to their in-memory state, are both the current version,
# should not result in having more than one current object with the
# same identity present in the database.
a = City(name="A")
a.save()
b = a.clone()
b.name = "B"
b.save()
a = City.objects.get(name="A")
a.restore()
b = City.objects.get(name="B")
b2 = b.restore()
current_objects = City.objects.filter(version_end_date=None,
identity=b.identity)
self.assertEqual(1, len(current_objects))
self.assertEqual(b2.pk, current_objects[0].pk)
class DetachTest(TestCase):
def test_simple_detach(self):
c1 = City.objects.create(name="Atlantis").clone()
c1_identity = c1.identity
c2 = c1.detach()
c2.save()
c1 = City.objects.current.get(pk=c1_identity)
self.assertEqual(c1.name, c2.name)
self.assertEqual(c2.id, c2.identity)
self.assertNotEqual(c1.id, c2.id)
self.assertNotEqual(c1.identity, c2.identity)
self.assertEqual(2, City.objects.filter(identity=c1_identity).count())
self.assertEqual(1, City.objects.filter(identity=c2.identity).count())
def test_detach_with_relations(self):
"""
ManyToMany and reverse ForeignKey relationships are not kept.
ForeignKey relationships are kept.
"""
t = Team.objects.create(name='Raining Rats')
t_pk = t.pk
m = Mascot.objects.create(name="Drippy", team=t)
p = Player.objects.create(name="Robby", team=t)
p_pk = p.pk
a = Award.objects.create(name="Most slippery")
a.players.add(p)
p2 = p.detach()
p2.save()
p = Player.objects.current.get(pk=p_pk)
self.assertEqual(t, p.team)
self.assertEqual(t, p2.team)
self.assertListEqual([a], list(p.awards.all()))
self.assertListEqual([], list(p2.awards.all()))
t2 = t.detach()
t2.save()
t = Team.objects.current.get(pk=t_pk)
self.assertEqual({p, p2}, set(t.player_set.all()))
self.assertEqual([], list(t2.player_set.all()))
class DeferredFieldsTest(TestCase):
def setUp(self):
self.c1 = City.objects.create(name="Porto")
self.team1 = Team.objects.create(name="Tigers", city=self.c1)
def test_simple_defer(self):
limited = City.objects.current.only('name').get(pk=self.c1.pk)
deferred_fields = set(Versionable.VERSIONABLE_FIELDS)
deferred_fields.remove('id')
self.assertSetEqual(deferred_fields,
set(limited.get_deferred_fields()))
for field_name in deferred_fields:
self.assertNotIn(field_name, limited.__dict__)
deferred_fields = ['version_start_date', 'version_end_date']
deferred = City.objects.current.defer(*deferred_fields).get(
pk=self.c1.pk)
self.assertSetEqual(set(deferred_fields),
set(deferred.get_deferred_fields()))
for field_name in deferred_fields:
self.assertNotIn(field_name, deferred.__dict__)
# Accessing deferred fields triggers queries:
with self.assertNumQueries(2):
self.assertEquals(self.c1.version_start_date,
deferred.version_start_date)
self.assertEquals(self.c1.version_end_date,
deferred.version_end_date)
# If already fetched, no query is made:
with self.assertNumQueries(0):
self.assertEquals(self.c1.version_start_date,
deferred.version_start_date)
def test_deferred_foreign_key_field(self):
team_full = Team.objects.current.get(pk=self.team1.pk)
self.assertIn('city_id', team_full.__dict__)
team_light = Team.objects.current.only('name').get(pk=self.team1.pk)
self.assertNotIn('city_id', team_light.__dict__)
with self.assertNumQueries(2):
# One query to get city_id, and one query to get the related City
# object.
self.assertEquals(self.c1.name, team_light.city.name)
def test_reverse_foreign_key_access(self):
city = City.objects.current.only('name').get(identity=self.c1.identity)
with self.assertNumQueries(2):
# One query to get the identity, one query to get the related
# objects.
self.assertSetEqual({self.team1.pk},
{o.pk for o in city.team_set.all()})
def test_many_to_many_access(self):
player1 = Player.objects.create(name='Raaaaaow', team=self.team1)
player2 = Player.objects.create(name='Pssshh', team=self.team1)
award1 = Award.objects.create(name='Fastest paws')
award1.players.add(player2)
award2 = Award.objects.create(name='Frighteningly fast')
award2.players.add(player1, player2)
player2_light = Player.objects.current.only('name').get(
identity=player2.identity)
with self.assertNumQueries(1):
# Many-to-many fields use the id field, which is always fetched,
# so only one query should be made to get the related objects.
self.assertSetEqual({award1.pk, award2.pk},
{o.pk for o in player2_light.awards.all()})
# And from the other direction:
award2_light = Award.objects.current.only('name').get(
identity=award2.identity)
with self.assertNumQueries(1):
self.assertSetEqual({player1.pk, player2.pk},
{o.pk for o in award2_light.players.all()})
def test_clone_of_deferred_object(self):
c1_v1_partial = City.objects.current.defer('name').get(pk=self.c1.pk)
self.assertRaisesMessage(
ValueError,
'Can not clone a model instance that has deferred fields',
c1_v1_partial.clone
)
def test_restore_of_deferred_object(self):
t1 = get_utc_now()
sleep(0.001)
c1_v2 = self.c1.clone()
c1_v1 = City.objects.as_of(t1).defer('name').get(
identity=c1_v2.identity)
self.assertRaisesMessage(
ValueError,
'Can not restore a model instance that has deferred fields',
c1_v1.restore
)
class DraftTest(TestCase):
def setUp(self):
self.c1 = City.objects.create(name="Perth")
self.team1 = Team.objects.create(name="capeta", city=self.c1)
self.mr_biggs = Professor.objects.create(
name='Mr. Biggs',
address='123 Mainstreet, Somewhere',
phone_number='123')
self.s1 = Student.objects.create(name="bla")
self.s2 = Student.objects.create(name="blabla")
def test_simple_draft_properties(self):
c1_draft = self.c1.clone(is_draft=True)
# Checking if is_draft property is true in the clone
self.assertTrue(c1_draft.is_draft)
latest_published_version = City.objects.as_of().filter(is_draft=False, identity=self.c1.identity).first()
# Checking if original published version is still latest
self.assertEqual(self.c1, latest_published_version)
# Checking if published version has not been deleted
self.assertFalse(latest_published_version.version_end_date)
# Checking if draft id and identity haven't became equal
self.assertNotEqual(c1_draft.id, c1_draft.identity)
def test_one_to_many_rel(self):
c2 = self.c1.clone(is_draft=True)
# Testing one to many relationship results
self.assertTrue([self.team1], c2.team_set.all())
self.team1.delete()
# Testing the case if an objects gets deleted in the relationship
self.assertEqual([], list(City.objects.as_of().filter(is_draft=True, identity=c2.identity
).first().team_set.all()))
def test_many_to_one_rel(self):
city = City.objects.create(name="new york")
draft_city = city.clone(is_draft=True)
team = Team.objects.create(name="devils", city=draft_city)
# Testing many to one relationship
self.assertEqual(team.city, draft_city)
draft_team = team.clone(is_draft=True)
# Testing if relationship gets copied in draft clone
self.assertEqual(draft_team.city, draft_city)
# Testing if is_draft in true in draft clone object
self.assertEqual(draft_team.is_draft, True)
draft_city.delete()
# Testing if draft object gets deleted if the cascaded one get deleted
self.assertIsNone(Team.objects.as_of().filter(identity=draft_team.identity, is_draft=True).first())
# Testing if a published object gets deleted if a cascaded draft object gets deleted
self.assertIsNone(Team.objects.as_of().filter(identity=draft_team.identity, is_draft=False).first())
def test_many_to_many_rel(self):
s1_draft = self.s1.clone(is_draft=True)
mr_biggs_draft = self.mr_biggs.clone(is_draft=True)
s1_draft.professors.add(self.mr_biggs)
s1_draft.professors.add(mr_biggs_draft)
# Checking if draft object has the correct no of relationships
self.assertEqual(Student.objects.as_of().filter(
is_draft=True, identity=s1_draft.identity).first().professors.all().count(), 2)
mr_biggs_draft.delete()
# Checking if the draft object has correct no. of relationships after deleting some
self.assertEqual(Student.objects.as_of().filter(
is_draft=True, identity=s1_draft.identity).first().professors.all().count(), 1)
mr_biggs_draft2 = self.mr_biggs.clone(is_draft=True)
mr_biggs_draft2.students.add(s1_draft)
mr_biggs_draft2.students.add(self.s1)
# Checking if draft object has the correct no of relationships
self.assertEqual(Professor.objects.as_of().filter(
is_draft=True, identity=mr_biggs_draft2.identity).first().students.all().count(), 2)
s1_draft.delete()
# Checking if the draft object has correct no. of relationships after deleting some
self.assertEqual(Professor.objects.as_of().filter(
is_draft=True, identity=mr_biggs_draft2.identity).first().students.all().count(), 1)
class PublishedVersionListGetTest(TestCase):
def setUp(self):
self.c1 = City.objects.create(name="Manchester")
self.team1 = Team.objects.create(name="bluefish", city=self.c1)
def test_get_published_version(self):
c2 = self.c1.clone()
c3 = c2.clone()
c4 = c3.clone()
c5 = c4.clone(is_draft=True)
c6 = c4.clone()
# Testing if current_published_version result matches the expected result
self.assertEqual(c6, City.objects.current_published_version(self.c1))
def test_get_published_version_list_test(self):
team2 = self.team1.clone()
draft_team3 = team2.clone(is_draft=True)
team4 = team2.clone()
draft_team5 = team4.clone(is_draft=True)
team6 = team4.clone()
draft7 = team6.clone(is_draft=True)
expected_result = [team6.version_start_date, team4.version_start_date, team2.version_start_date,
self.team1.version_start_date]
actual_result = Team.objects.get_published_version_list(self.team1).values_list('version_start_date', flat=True)
# Testing if the result of get_published_version_list matches the expected result flat=True)
self.assertEqual(list(actual_result), expected_result)
|
{"/versions_tests/tests/test_admin.py": ["/versions/admin.py", "/versions_tests/models.py"], "/versions_tests/tests/test_models.py": ["/versions/exceptions.py", "/versions/models.py", "/versions_tests/models.py"], "/versions/models.py": ["/versions/exceptions.py", "/versions/settings.py", "/versions/util/__init__.py", "/versions/fields.py"], "/versions/util/helper.py": ["/versions/models.py"], "/versions_tests/admin.py": ["/versions/admin.py", "/versions_tests/models.py"], "/versions_tests/tests/test_utils.py": ["/versions/util/postgresql.py", "/versions_tests/models.py"], "/versions/fields.py": ["/versions/descriptors.py", "/versions/models.py"], "/versions/descriptors.py": ["/versions/util/__init__.py", "/versions/models.py"], "/versions/deletion.py": ["/versions/models.py"], "/versions_tests/models.py": ["/versions/fields.py", "/versions/models.py"], "/versions_tests/apps.py": ["/versions/util/postgresql.py"], "/versions/util/postgresql.py": ["/versions/fields.py", "/versions/util/helper.py"]}
|
7,535
|
JoshLabs/cleanerversion
|
refs/heads/master
|
/versions/models.py
|
# Copyright 2014 Swisscom, Sophia Engineering
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import datetime
import uuid
from collections import namedtuple
from django.core.exceptions import SuspiciousOperation, ObjectDoesNotExist
from django.db import models, router, transaction
from django.db.models import Q
from django.db.models.constants import LOOKUP_SEP
from django.db.models.fields.related import ForeignKey
from django.db.models.query import QuerySet, ModelIterable
from django.db.models.sql.datastructures import Join
from django.db.models.sql.query import Query
from django.db.models.sql.where import WhereNode
from django.utils import six
from django.utils.timezone import utc
from versions.exceptions import DeletionOfNonCurrentVersionError
from versions.settings import get_versioned_delete_collector_class, \
settings as versions_settings
from versions.util import get_utc_now
def get_utc_now():
return datetime.datetime.utcnow().replace()
def validate_uuid(uuid_obj):
"""
Check that the UUID object is in fact a valid version 4 uuid.
"""
return isinstance(uuid_obj, uuid.UUID) and uuid_obj.version == 4
QueryTime = namedtuple('QueryTime', 'time active')
class ForeignKeyRequiresValueError(ValueError):
pass
class VersionManager(models.Manager):
"""
This is the Manager-class for any class that inherits from Versionable
"""
use_for_related_fields = True
def get_queryset(self):
"""
Returns a VersionedQuerySet capable of handling version time
restrictions.
:return: VersionedQuerySet
"""
qs = VersionedQuerySet(self.model, using=self._db)
if hasattr(self, 'instance') and hasattr(self.instance, '_querytime'):
qs.querytime = self.instance._querytime
return qs
def as_of(self, time=None):
"""
Filters Versionables at a given time
:param time: The timestamp (including timezone info) at which
Versionables shall be retrieved
:return: A QuerySet containing the base for a timestamped query.
"""
return self.get_queryset().as_of(time)
def next_version(self, object, relations_as_of='end'):
"""
Return the next version of the given object.
In case there is no next object existing, meaning the given
object is the current version, the function returns this version.
Note that if object's version_end_date is None, this does not check
the database to see if there is a newer version (perhaps created by
some other code), it simply returns the passed object.
``relations_as_of`` is used to fix the point in time for the version;
this affects which related objects are returned when querying for
object relations. See ``VersionManager.version_as_of`` for details
on valid ``relations_as_of`` values.
:param Versionable object: object whose next version will be returned.
:param mixed relations_as_of: determines point in time used to access
relations. 'start'|'end'|datetime|None
:return: Versionable
"""
if object.version_end_date is None:
next = object
else:
next = self.filter(
Q(identity=object.identity),
Q(version_start_date__gte=object.version_end_date)
).order_by('version_start_date').first()
if not next:
raise ObjectDoesNotExist(
"next_version couldn't find a next version of object " +
str(object.identity))
return self.adjust_version_as_of(next, relations_as_of)
def previous_version(self, object, relations_as_of='end'):
"""
Return the previous version of the given object.
In case there is no previous object existing, meaning the given object
is the first version of the object, then the function returns this
version.
``relations_as_of`` is used to fix the point in time for the version;
this affects which related objects are returned when querying for
object relations. See ``VersionManager.version_as_of`` for details on
valid ``relations_as_of`` values.
:param Versionable object: object whose previous version will be
returned.
:param mixed relations_as_of: determines point in time used to access
relations. 'start'|'end'|datetime|None
:return: Versionable
"""
if object.version_birth_date == object.version_start_date:
previous = object
else:
previous = self.filter(
Q(identity=object.identity),
Q(version_end_date__lte=object.version_start_date)
).order_by('-version_end_date').first()
if not previous:
raise ObjectDoesNotExist(
"previous_version couldn't find a previous version of "
"object " + str(object.identity))
return self.adjust_version_as_of(previous, relations_as_of)
def current_published_version(self, object, relations_as_of=None, check_db=False):
"""
Return the current published version of the given object.
The current published version is the one having its version_end_date set
to NULL and is_draft set to False.
If there is not such a version then it means the object has been
'deleted' and so there is no current version available. In this case
the function returns None.
Note that if check_db is False and object's version_end_date is None,
this does not check the database to see if there is a newer version
(perhaps created by some other code), it simply returns the passed
object.
``relations_as_of`` is used to fix the point in time for the version;
this affects which related objects are returned when querying for
object relations. See ``VersionManager.version_as_of`` for details on
valid ``relations_as_of`` values.
:param Versionable object: object whose current published version will be
returned.
:param mixed relations_as_of: determines point in time used to access
relations. 'start'|'end'|datetime|None
:param bool check_db: Whether or not to look in the database for a
more recent version
:return: Versionable
"""
if object.version_end_date is None and not object.is_draft and not check_db:
current = object
else:
current = self.current.filter(is_draft=False, identity=object.identity).first()
return self.adjust_version_as_of(current, relations_as_of)
def current_version(self, object, relations_as_of=None, check_db=False):
"""
Return the current version of the given object.
The current version is the one having its version_end_date set to NULL.
If there is not such a version then it means the object has been
'deleted' and so there is no current version available. In this case
the function returns None.
Note that if check_db is False and object's version_end_date is None,
this does not check the database to see if there is a newer version
(perhaps created by some other code), it simply returns the passed
object.
``relations_as_of`` is used to fix the point in time for the version;
this affects which related objects are returned when querying for
object relations. See ``VersionManager.version_as_of`` for details on
valid ``relations_as_of`` values.
:param Versionable object: object whose current version will be
returned.
:param mixed relations_as_of: determines point in time used to access
relations. 'start'|'end'|datetime|None
:param bool check_db: Whether or not to look in the database for a
more recent version
:return: Versionable
"""
if object.version_end_date is None and not check_db:
current = object
else:
current = self.current.filter(identity=object.identity).first()
return self.adjust_version_as_of(current, relations_as_of)
def get_published_version_list(self, object):
"""
Return the published version list of the given object.
The published version is the one having its is_draft set to True.
:param Versionable object: object whose published version list would be
returned.
:return: A VersionedQuerySet
"""
return self.filter(is_draft=False, identity=object.identity).order_by('-version_start_date')
@staticmethod
def adjust_version_as_of(version, relations_as_of):
"""
Adjusts the passed version's as_of time to an appropriate value, and
returns it.
``relations_as_of`` is used to fix the point in time for the version;
this affects which related objects are returned when querying for
object relations.
Valid ``relations_as_of`` values and how this affects the returned
version's as_of attribute:
- 'start': version start date
- 'end': version end date - 1 microsecond (no effect if version is
current version)
- datetime object: given datetime (raises ValueError if given datetime
not valid for version)
- None: unset (related object queries will not be restricted to a
point in time)
:param Versionable object: object whose as_of will be adjusted as
requested.
:param mixed relations_as_of: valid values are the strings 'start' or
'end', or a datetime object.
:return: Versionable
"""
if not version:
return version
if relations_as_of == 'end':
if version.is_current:
# Ensure that version._querytime is active, in case it wasn't
# before.
version.as_of = None
else:
version.as_of = version.version_end_date - datetime.timedelta(
microseconds=1)
elif relations_as_of == 'start':
version.as_of = version.version_start_date
elif isinstance(relations_as_of, datetime.datetime):
as_of = relations_as_of
if not as_of >= version.version_start_date:
raise ValueError(
"Provided as_of '{}' is earlier than version's start "
"time '{}'".format(
as_of.isoformat(),
version.version_start_date.isoformat()
)
)
if version.version_end_date is not None \
and as_of >= version.version_end_date:
raise ValueError(
"Provided as_of '{}' is later than version's start "
"time '{}'".format(
as_of.isoformat(),
version.version_end_date.isoformat()
)
)
version.as_of = as_of
elif relations_as_of is None:
version._querytime = QueryTime(time=None, active=False)
else:
raise TypeError(
"as_of parameter must be 'start', 'end', None, or datetime "
"object")
return version
@property
def current(self):
return self.as_of(None)
def create(self, **kwargs):
"""
Creates an instance of a Versionable
:param kwargs: arguments used to initialize the class instance
:return: a Versionable instance of the class
"""
return self._create_at(None, **kwargs)
def _create_at(self, timestamp=None, id=None, forced_identity=None,
**kwargs):
"""
WARNING: Only for internal use and testing.
Create a Versionable having a version_start_date and
version_birth_date set to some pre-defined timestamp
:param timestamp: point in time at which the instance has to be created
:param id: version 4 UUID unicode object. Usually this is not
specified, it will be automatically created.
:param forced_identity: version 4 UUID unicode object. For internal
use only.
:param kwargs: arguments needed for initializing the instance
:return: an instance of the class
"""
id = Versionable.uuid(id)
if forced_identity:
ident = Versionable.uuid(forced_identity)
else:
ident = id
if timestamp is None:
timestamp = get_utc_now()
kwargs['id'] = id
kwargs['identity'] = ident
kwargs['version_start_date'] = timestamp
kwargs['version_birth_date'] = timestamp
return super(VersionManager, self).create(**kwargs)
class VersionedWhereNode(WhereNode):
def as_sql(self, qn, connection):
"""
This method identifies joined table aliases in order for
VersionedExtraWhere.as_sql() to be able to add time restrictions for
those tables based on the VersionedQuery's querytime value.
:param qn: In Django 1.7 & 1.8 this is a compiler
:param connection: A DB connection
:return: A tuple consisting of (sql_string, result_params)
"""
# self.children is an array of VersionedExtraWhere-objects
from versions.fields import VersionedExtraWhere
for child in self.children:
if isinstance(child, VersionedExtraWhere) and not child.params:
_query = qn.query
query_time = _query.querytime.time
apply_query_time = _query.querytime.active
alias_map = _query.alias_map
self._set_child_joined_alias(child, alias_map)
if apply_query_time:
# Add query parameters that have not been added till now
child.set_as_of(query_time)
else:
# Remove the restriction if it's not required
child.sqls = []
return super(VersionedWhereNode, self).as_sql(qn, connection)
@staticmethod
def _set_child_joined_alias(child, alias_map):
"""
Set the joined alias on the child, for Django >= 1.8.0
:param child:
:param alias_map:
"""
for table in alias_map:
join = alias_map[table]
if not isinstance(join, Join):
continue
lhs = join.parent_alias
if (lhs == child.alias and table == child.related_alias) \
or (lhs == child.related_alias and table == child.alias):
child.set_joined_alias(table)
break
class VersionedQuery(Query):
"""
VersionedQuery has awareness of the query time restrictions. When the
query is compiled, this query time information is passed along to the
foreign keys involved in the query, so that they can provide that
information when building the sql.
"""
def __init__(self, *args, **kwargs):
from .fields import VersionedWhereNode
kwargs['where'] = VersionedWhereNode
super(VersionedQuery, self).__init__(*args, **kwargs)
self.querytime = QueryTime(time=None, active=False)
def clone(self, *args, **kwargs):
_clone = super(VersionedQuery, self).clone(*args, **kwargs)
try:
_clone.querytime = self.querytime
except AttributeError:
# If the caller is using clone to create a different type of Query,
# that's OK.
# An example of this is when creating or updating an object, this
# method is called with a first parameter of sql.UpdateQuery.
pass
return _clone
def get_compiler(self, *args, **kwargs):
"""
Add the query time restriction limit at the last moment. Applying it
earlier (e.g. by adding a filter to the queryset) does not allow the
caching of related object to work (they are attached to a queryset;
filter() returns a new queryset).
"""
if self.querytime.active and \
(not hasattr(self, '_querytime_filter_added') or
not self._querytime_filter_added):
time = self.querytime.time
if time is None:
self.add_q(Q(version_end_date__isnull=True))
else:
self.add_q(
(Q(version_end_date__gt=time) |
Q(version_end_date__isnull=True)) &
Q(version_start_date__lte=time)
)
# Ensure applying these filters happens only a single time (even
# if it doesn't falsify the query, it's just not very comfortable
# to read)
self._querytime_filter_added = True
return super(VersionedQuery, self).get_compiler(*args, **kwargs)
def build_filter(self, filter_expr, **kwargs):
"""
When a query is filtered with an expression like
.filter(team=some_team_object), where team is a VersionedForeignKey
field, and some_team_object is a Versionable object, adapt the filter
value to be (team__identity=some_team_object.identity).
When the query is built, this will enforce that the tables are joined
and that the identity column and the as_of restriction is used for
matching.
For example, the generated SQL will be like:
SELECT ... FROM foo INNER JOIN team ON (
foo.team_id == team.identity
AND foo.version_start_date <= [as_of]
AND (foo.version_end_date > [as_of]
OR foo.version_end_date IS NULL)) ...
This is not necessary, and won't be applied, if any of these are true:
- no as_of is in effect
- the current objects are being queried
(e.g. foo.objects.current.filter(...))
- a terminal object is being used as the lookup value
(e.g. .filter(team=the_deleted_team_version)
- the lookup value is not a Versionable
(e.g. .filter(foo='bar') or .filter(team=non_versionable_team)
Note that this has the effect that
Foo.objects.as_of(t1).filter(team=team_object_at_t3) will return the
Foo objects at t1, and that accessing their team field (e.g. foo.team)
will return the team object that was associated with them at t1,
which may be a different object than team_object_at_t3.
The goal is to make expressions like
Foo.objects.as_of(tx).filter(team=some_team_object) work as closely
as possible to standard, non-versioned Django querysets like
Foo.objects.filter(team=some_team_object).
:param filter_expr:
:param kwargs:
:return: tuple
"""
lookup, value = filter_expr
if self.querytime.active \
and isinstance(value, Versionable) and not value.is_latest:
new_lookup = \
lookup + LOOKUP_SEP + Versionable.OBJECT_IDENTIFIER_FIELD
filter_expr = (new_lookup, value.identity)
return super(VersionedQuery, self).build_filter(filter_expr, **kwargs)
def add_immediate_loading(self, field_names):
# TODO: Decide, whether we always want versionable fields to be loaded,
# even if ``only`` is used and they would be deferred
# field_names += tuple(Versionable.VERSIONABLE_FIELDS)
super(VersionedQuery, self).add_immediate_loading(field_names)
class VersionedQuerySet(QuerySet):
"""
The VersionedQuerySet makes sure that every objects retrieved from it has
the added property 'query_time' added to it.
For that matter it override the __getitem__, _fetch_all and _clone methods
for its parent class (QuerySet).
"""
def __init__(self, model=None, query=None, *args, **kwargs):
"""
Overridden so that a VersionedQuery will be used.
"""
if not query:
query = VersionedQuery(model)
super(VersionedQuerySet, self).__init__(model=model, query=query,
*args, **kwargs)
self.querytime = QueryTime(time=None, active=False)
@property
def querytime(self):
return self._querytime
@querytime.setter
def querytime(self, value):
"""
Sets self._querytime as well as self.query.querytime.
:param value: None or datetime
:return:
"""
self._querytime = value
self.query.querytime = value
def __getitem__(self, k):
"""
Overrides the QuerySet.__getitem__ magic method for retrieving a
list-item out of a query set.
:param k: Retrieve the k-th element or a range of elements
:return: Either one element or a list of elements
"""
item = super(VersionedQuerySet, self).__getitem__(k)
if isinstance(item, (list,)):
for i in item:
self._set_item_querytime(i)
else:
self._set_item_querytime(item)
return item
def _fetch_all(self):
"""
Completely overrides the QuerySet._fetch_all method by adding the
timestamp to all objects
:return: See django.db.models.query.QuerySet._fetch_all for return
values
"""
if self._result_cache is None:
self._result_cache = list(self.iterator())
# TODO: Do we have to test for ValuesListIterable, ValuesIterable,
# and FlatValuesListIterable here?
if self._iterable_class == ModelIterable:
for x in self._result_cache:
self._set_item_querytime(x)
if self._prefetch_related_lookups and not self._prefetch_done:
self._prefetch_related_objects()
def _clone(self, *args, **kwargs):
"""
Overrides the QuerySet._clone method by adding the cloning of the
VersionedQuerySet's query_time parameter
:param kwargs: Same as the original QuerySet._clone params
:return: Just as QuerySet._clone, this method returns a clone of the
original object
"""
clone = super(VersionedQuerySet, self)._clone(**kwargs)
clone.querytime = self.querytime
return clone
def _set_item_querytime(self, item, type_check=True):
"""
Sets the time for which the query was made on the resulting item
:param item: an item of type Versionable
:param type_check: Check the item to be a Versionable
:return: Returns the item itself with the time set
"""
if isinstance(item, Versionable):
item._querytime = self.querytime
elif isinstance(item, VersionedQuerySet):
item.querytime = self.querytime
else:
if type_check:
raise TypeError(
"This item is not a Versionable, it's a " + str(
type(item)))
return item
def as_of(self, qtime=None):
"""
Sets the time for which we want to retrieve an object.
:param qtime: The UTC date and time; if None then use the current
state (where version_end_date = NULL)
:return: A VersionedQuerySet
"""
clone = self._clone()
clone.querytime = QueryTime(time=qtime, active=True)
return clone
def delete(self):
"""
Deletes the records in the QuerySet.
"""
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with delete."
# Ensure that only current objects are selected.
del_query = self.filter(version_end_date__isnull=True)
# The delete is actually 2 queries - one to find related objects,
# and one to delete. Make sure that the discovery of related
# objects is performed on the same database as the deletion.
del_query._for_write = True
# Disable non-supported fields.
del_query.query.select_for_update = False
del_query.query.select_related = False
del_query.query.clear_ordering(force_empty=True)
collector_class = get_versioned_delete_collector_class()
collector = collector_class(using=del_query.db)
collector.collect(del_query)
collector.delete(get_utc_now())
# Clear the result cache, in case this QuerySet gets reused.
self._result_cache = None
delete.alters_data = True
delete.queryset_only = True
class Versionable(models.Model):
"""
This is pretty much the central point for versioning objects.
"""
VERSION_IDENTIFIER_FIELD = 'id'
OBJECT_IDENTIFIER_FIELD = 'identity'
VERSIONABLE_FIELDS = [VERSION_IDENTIFIER_FIELD, OBJECT_IDENTIFIER_FIELD,
'version_start_date',
'version_end_date', 'version_birth_date', 'is_draft']
if versions_settings.VERSIONS_USE_UUIDFIELD:
id = models.UUIDField(primary_key=True)
"""id stands for ID and is the primary key; sometimes also referenced
as the surrogate key"""
else:
id = models.CharField(max_length=36, primary_key=True)
if versions_settings.VERSIONS_USE_UUIDFIELD:
identity = models.UUIDField()
"""identity is used as the identifier of an object, ignoring its
versions; sometimes also referenced as the natural key"""
else:
identity = models.CharField(max_length=36)
"""identity is used as the identifier of an object, ignoring its
versions; sometimes also referenced as the natural key"""
version_start_date = models.DateTimeField()
"""version_start_date points the moment in time, when a version was
created (ie. an versionable was cloned). This means, it points the start
of a clone's validity period"""
version_end_date = models.DateTimeField(null=True, default=None,
blank=True)
"""version_end_date, if set, points the moment in time, when the entry was
duplicated (ie. the entry was cloned). It points therefore the end of a
clone's validity period"""
version_birth_date = models.DateTimeField()
"""version_birth_date contains the timestamp pointing to when the
versionable has been created (independent of any version); This timestamp
is bound to an identity"""
is_draft = models.BooleanField(default=False)
"""is_draft is basically a check whether the model is published or still in
draft state. A draft state object which doesnot act as a normal versionable
object. It doesnot come in list of versionable published objects. It can
never be the current object"""
objects = VersionManager()
"""Make the versionable compliant with Django"""
as_of = None
"""Hold the timestamp at which the object's data was looked up. Its value
must always be in between the version_start_date and the
version_end_date"""
class Meta:
abstract = True
unique_together = ('id', 'identity')
def __init__(self, *args, **kwargs):
super(Versionable, self).__init__(*args, **kwargs)
# _querytime is for library-internal use.
self._querytime = QueryTime(time=None, active=False)
# Ensure that the versionable field values are set.
# If there are any deferred fields, then this instance is being
# initialized from data in the database, and thus these values will
# already be set (unless the fields are deferred, in which case they
# should not be set here).
if not self.get_deferred_fields():
if not getattr(self, 'version_start_date', None):
setattr(self, 'version_start_date', get_utc_now())
if not getattr(self, 'version_birth_date', None):
setattr(self, 'version_birth_date', self.version_start_date)
if not getattr(self, self.VERSION_IDENTIFIER_FIELD, None):
setattr(self, self.VERSION_IDENTIFIER_FIELD, self.uuid())
if not getattr(self, self.OBJECT_IDENTIFIER_FIELD, None):
setattr(self, self.OBJECT_IDENTIFIER_FIELD,
getattr(self, self.VERSION_IDENTIFIER_FIELD))
def delete(self, using=None, keep_parents=False):
using = using or router.db_for_write(self.__class__, instance=self)
assert self._get_pk_val() is not None, \
"{} object can't be deleted because its {} attribute is set to " \
"None.".format(self._meta.object_name, self._meta.pk.attname)
collector_class = get_versioned_delete_collector_class()
collector = collector_class(using=using)
collector.collect([self], keep_parents=keep_parents)
collector.delete(get_utc_now())
def _delete_at(self, timestamp, using=None):
"""
WARNING: This method is only for internal use, it should not be used
from outside.
It is used only in the case when you want to make sure a group of
related objects are deleted at the exact same time.
It is certainly not meant to be used for deleting an object and giving
it a random deletion date of your liking.
"""
if self.version_end_date is None:
self.version_end_date = timestamp
self.save(force_update=True, using=using)
else:
raise DeletionOfNonCurrentVersionError(
'Cannot delete anything else but the current version')
@property
def is_current(self):
return self.version_end_date is None
@property
def is_latest(self):
"""
Checks if this is the latest version.
Note that this will not check the database for a possible newer
version.
It simply inspects the object's in-memory state.
:return: boolean
"""
return self.id == self.identity
@property
def is_terminated(self):
"""
Checks if this version has been terminated.
This will be true if a newer version has been created, or if the
version has been "deleted".
:return: boolean
"""
return self.version_end_date is not None
@property
def as_of(self):
return self._querytime.time
@as_of.setter
def as_of(self, time):
self._querytime = QueryTime(time=time, active=True)
@staticmethod
def uuid(uuid_value=None):
"""
Returns a uuid value that is valid to use for id and identity fields.
:return: unicode uuid object if using UUIDFields, uuid unicode string
otherwise.
"""
if uuid_value:
if not validate_uuid(uuid_value):
raise ValueError(
"uuid_value must be a valid UUID version 4 object")
else:
uuid_value = uuid.uuid4()
if versions_settings.VERSIONS_USE_UUIDFIELD:
return uuid_value
else:
return six.u(str(uuid_value))
def _clone_at(self, timestamp):
"""
WARNING: This method is only for internal use, it should not be used
from outside.
This function is mostly intended for testing, to allow creating
realistic test cases.
"""
return self.clone(forced_version_date=timestamp)
def clone(self, forced_version_date=None, in_bulk=False, is_draft=False):
"""
Clones a Versionable and returns a fresh copy of the original object.
Original source: ClonableMixin snippet
(http://djangosnippets.org/snippets/1271), with the pk/id change
suggested in the comments
:param forced_version_date: a timestamp including tzinfo; this value
is usually set only internally!
:param in_bulk: whether not to write this objects to the database
already, if not necessary; this value is usually set only
internally for performance optimization
:param is_draft: whether the clone is in draft state or published
state
:return: returns a fresh clone of the original object
(with adjusted relations)
"""
if not self.pk:
raise ValueError('Instance must be saved before it can be cloned')
if self.version_end_date:
raise ValueError(
'This is a historical item and can not be cloned.')
if forced_version_date:
if not self.version_start_date <= forced_version_date <= \
get_utc_now():
raise ValueError(
'The clone date must be between the version start date '
'and now.')
else:
forced_version_date = get_utc_now()
if self.get_deferred_fields():
# It would be necessary to fetch the record from the database
# again for this to succeed.
# Alternatively, perhaps it would be possible to create a copy of
# the object after fetching the missing fields.
# Doing so may be unexpected by the calling code, so raise an
# exception: the calling code should be adapted if necessary.
raise ValueError(
'Can not clone a model instance that has deferred fields')
earlier_version = self
later_version = copy.copy(earlier_version)
later_version.version_end_date = None
later_version.version_start_date = forced_version_date
# set earlier_version's ID to a new UUID so the clone (later_version)
# can get the old one -- this allows 'head' to always have the original
# id allowing us to get at all historic foreign key relationships
if is_draft:
later_version.id = self.uuid()
else:
earlier_version.id = self.uuid()
earlier_version.version_end_date = forced_version_date
later_version.is_draft = is_draft
if not in_bulk:
# This condition might save us a lot of database queries if we are
# being called from a loop like in .clone_relations
earlier_version.save()
later_version.save()
else:
earlier_version._not_created = True
# re-create ManyToMany relations
for field_name in self.get_all_m2m_field_names():
earlier_version.clone_relations(later_version, field_name,
forced_version_date)
return later_version
def at(self, timestamp):
"""
Force the create date of an object to be at a certain time; This
method can be invoked only on a freshly created Versionable object.
It must not have been cloned yet. Raises a SuspiciousOperation
exception, otherwise.
:param timestamp: a datetime.datetime instance
"""
# Ensure, it's not a historic item
if not self.is_current:
raise SuspiciousOperation(
"Cannot relocate this Versionable instance in time, since it "
"is a historical item")
# Ensure it's not a versioned item (that would lead to some ugly
# situations...
if not self.version_birth_date == self.version_start_date:
raise SuspiciousOperation(
"Cannot relocate this Versionable instance in time, since it "
"is a versioned instance")
# Ensure the argument is really a timestamp
if not isinstance(timestamp, datetime.datetime):
raise ValueError("This is not a datetime.datetime timestamp")
self.version_birth_date = self.version_start_date = timestamp
return self
def clone_relations(self, clone, manager_field_name, forced_version_date):
# Source: the original object, where relations are currently
# pointing to
source = getattr(self,
manager_field_name)
# returns a VersionedRelatedManager instance
# Destination: the clone, where the cloned relations should point to
destination = getattr(clone, manager_field_name)
for item in source.all():
destination.add(item)
# retrieve all current m2m relations pointing the newly created clone
# filter for source_id
m2m_rels = list(source.through.objects.filter(
**{source.source_field.attname: clone.id}))
later_current = []
later_non_current = []
for rel in m2m_rels:
# Only clone the relationship, if it is the current one; Simply
# adjust the older ones to point the old entry.
# Otherwise, the number of pointers pointing an entry will grow
# exponentially
if rel.is_current:
later_current.append(
rel.clone(forced_version_date=self.version_end_date,
in_bulk=True))
# On rel, which is no more 'current', set the source ID to
# self.id
setattr(rel, source.source_field_name, self)
else:
later_non_current.append(rel)
# Perform the bulk changes rel.clone() did not perform because of the
# in_bulk parameter.
# This saves a huge bunch of SQL queries:
# - update current version entries
source.through.objects.filter(
id__in=[l.id for l in later_current]).update(
**{'version_start_date': forced_version_date})
# - update entries that have been pointing the current object, but
# have never been 'current'
source.through.objects.filter(
id__in=[l.id for l in later_non_current]).update(
**{source.source_field_name: self})
# - create entries that were 'current', but which have been relieved
# in this method run
source.through.objects.bulk_create(
[r for r in m2m_rels
if hasattr(r, '_not_created') and r._not_created])
def restore(self, **kwargs):
"""
Restores this version as a new version, and returns this new version.
If a current version already exists, it will be terminated before
restoring this version.
Relations (foreign key, reverse foreign key, many-to-many) are not
restored with the old version. If provided in kwargs,
(Versioned)ForeignKey fields will be set to the provided values.
If passing an id for a (Versioned)ForeignKey, use the field.attname.
For example:
restore(team_id=myteam.pk)
If passing an object, simply use the field name, e.g.:
restore(team=myteam)
If a (Versioned)ForeignKey is not nullable and no value is provided
for it in kwargs, a ForeignKeyRequiresValueError will be raised.
:param kwargs: arguments used to initialize the class instance
:return: Versionable
"""
if not self.pk:
raise ValueError(
'Instance must be saved and terminated before it can be '
'restored.')
if self.is_current:
raise ValueError(
'This is the current version, no need to restore it.')
if self.get_deferred_fields():
# It would be necessary to fetch the record from the database
# again for this to succeed.
# Alternatively, perhaps it would be possible to create a copy
# of the object after fetching the missing fields.
# Doing so may be unexpected by the calling code, so raise an
# exception: the calling code should be adapted if necessary.
raise ValueError(
'Can not restore a model instance that has deferred fields')
cls = self.__class__
now = get_utc_now()
restored = copy.copy(self)
restored.version_end_date = None
restored.version_start_date = now
fields = [f for f in cls._meta.local_fields if
f.name not in Versionable.VERSIONABLE_FIELDS]
for field in fields:
if field.attname in kwargs:
# Fake an object in order to avoid a DB roundtrip
# This was made necessary, since assigning to the field's
# attname did not work anymore with Django 2.0
obj = field.remote_field.model(id=kwargs[field.attname])
setattr(restored, field.name, obj)
elif field.name in kwargs:
setattr(restored, field.name, kwargs[field.name])
elif isinstance(field, ForeignKey):
# Set all non-provided ForeignKeys to None. If required,
# raise an error.
try:
setattr(restored, field.name, None)
# Check for non null foreign key removed since Django 1.10
# https://docs.djangoproject.com/en/1.10/releases/1.10/
# #removed-null-assignment-check-for-non-null-foreign-
# key-fields
if not field.null:
raise ValueError
except ValueError:
raise ForeignKeyRequiresValueError
self.id = self.uuid()
with transaction.atomic():
# If this is not the latest version, terminate the latest version
latest = cls.objects.current_version(self, check_db=True)
if latest and latest != self:
latest.delete()
restored.version_start_date = latest.version_end_date
self.save()
restored.save()
# Update ManyToMany relations to point to the old version's id
# instead of the restored version's id.
for field_name in self.get_all_m2m_field_names():
manager = getattr(restored,
field_name)
# returns a VersionedRelatedManager instance
manager.through.objects.filter(
**{manager.source_field.attname: restored.id}).update(
**{manager.source_field_name: self})
return restored
def get_all_m2m_field_names(self):
opts = self._meta
rel_field_names = [field.attname for field in opts.many_to_many]
if hasattr(opts, 'many_to_many_related'):
rel_field_names += [rel.reverse for rel in
opts.many_to_many_related]
return rel_field_names
def detach(self):
"""
Detaches the instance from its history.
Similar to creating a new object with the same field values. The id and
identity fields are set to a new value. The returned object has not
been saved, call save() afterwards when you are ready to persist the
object.
ManyToMany and reverse ForeignKey relations are lost for the detached
object.
:return: Versionable
"""
self.id = self.identity = self.uuid()
self.version_start_date = self.version_birth_date = get_utc_now()
self.version_end_date = None
return self
@staticmethod
def matches_querytime(instance, querytime):
"""
Checks whether the given instance satisfies the given QueryTime object.
:param instance: an instance of Versionable
:param querytime: QueryTime value to check against
"""
if not querytime.active:
return True
if not querytime.time:
return instance.version_end_date is None
return (instance.version_start_date <= querytime.time and
(instance.version_end_date is None or
instance.version_end_date > querytime.time))
|
{"/versions_tests/tests/test_admin.py": ["/versions/admin.py", "/versions_tests/models.py"], "/versions_tests/tests/test_models.py": ["/versions/exceptions.py", "/versions/models.py", "/versions_tests/models.py"], "/versions/models.py": ["/versions/exceptions.py", "/versions/settings.py", "/versions/util/__init__.py", "/versions/fields.py"], "/versions/util/helper.py": ["/versions/models.py"], "/versions_tests/admin.py": ["/versions/admin.py", "/versions_tests/models.py"], "/versions_tests/tests/test_utils.py": ["/versions/util/postgresql.py", "/versions_tests/models.py"], "/versions/fields.py": ["/versions/descriptors.py", "/versions/models.py"], "/versions/descriptors.py": ["/versions/util/__init__.py", "/versions/models.py"], "/versions/deletion.py": ["/versions/models.py"], "/versions_tests/models.py": ["/versions/fields.py", "/versions/models.py"], "/versions_tests/apps.py": ["/versions/util/postgresql.py"], "/versions/util/postgresql.py": ["/versions/fields.py", "/versions/util/helper.py"]}
|
7,536
|
JoshLabs/cleanerversion
|
refs/heads/master
|
/versions/util/helper.py
|
from __future__ import absolute_import
from versions.models import Versionable
from django import VERSION
from django.db import connection, connections
if VERSION >= (1, 7):
from django.apps import apps
else:
from django.db.models import get_app, get_models
def database_connection(dbname=None):
if dbname:
return connections[dbname]
else:
return connection
def get_app_models(app_name, include_auto_created=False):
if VERSION >= (1, 7):
return apps.get_app_config(app_name).get_models(
include_auto_created=include_auto_created)
else:
return get_models(get_app(app_name),
include_auto_created=include_auto_created)
def versionable_models(app_name, include_auto_created=False):
return [m for m in get_app_models(app_name, include_auto_created) if
issubclass(m, Versionable)]
|
{"/versions_tests/tests/test_admin.py": ["/versions/admin.py", "/versions_tests/models.py"], "/versions_tests/tests/test_models.py": ["/versions/exceptions.py", "/versions/models.py", "/versions_tests/models.py"], "/versions/models.py": ["/versions/exceptions.py", "/versions/settings.py", "/versions/util/__init__.py", "/versions/fields.py"], "/versions/util/helper.py": ["/versions/models.py"], "/versions_tests/admin.py": ["/versions/admin.py", "/versions_tests/models.py"], "/versions_tests/tests/test_utils.py": ["/versions/util/postgresql.py", "/versions_tests/models.py"], "/versions/fields.py": ["/versions/descriptors.py", "/versions/models.py"], "/versions/descriptors.py": ["/versions/util/__init__.py", "/versions/models.py"], "/versions/deletion.py": ["/versions/models.py"], "/versions_tests/models.py": ["/versions/fields.py", "/versions/models.py"], "/versions_tests/apps.py": ["/versions/util/postgresql.py"], "/versions/util/postgresql.py": ["/versions/fields.py", "/versions/util/helper.py"]}
|
7,537
|
JoshLabs/cleanerversion
|
refs/heads/master
|
/versions/exceptions.py
|
class DeletionOfNonCurrentVersionError(ValueError):
pass
|
{"/versions_tests/tests/test_admin.py": ["/versions/admin.py", "/versions_tests/models.py"], "/versions_tests/tests/test_models.py": ["/versions/exceptions.py", "/versions/models.py", "/versions_tests/models.py"], "/versions/models.py": ["/versions/exceptions.py", "/versions/settings.py", "/versions/util/__init__.py", "/versions/fields.py"], "/versions/util/helper.py": ["/versions/models.py"], "/versions_tests/admin.py": ["/versions/admin.py", "/versions_tests/models.py"], "/versions_tests/tests/test_utils.py": ["/versions/util/postgresql.py", "/versions_tests/models.py"], "/versions/fields.py": ["/versions/descriptors.py", "/versions/models.py"], "/versions/descriptors.py": ["/versions/util/__init__.py", "/versions/models.py"], "/versions/deletion.py": ["/versions/models.py"], "/versions_tests/models.py": ["/versions/fields.py", "/versions/models.py"], "/versions_tests/apps.py": ["/versions/util/postgresql.py"], "/versions/util/postgresql.py": ["/versions/fields.py", "/versions/util/helper.py"]}
|
7,538
|
JoshLabs/cleanerversion
|
refs/heads/master
|
/versions/util/__init__.py
|
import datetime
def get_utc_now():
return datetime.datetime.utcnow().replace()
|
{"/versions_tests/tests/test_admin.py": ["/versions/admin.py", "/versions_tests/models.py"], "/versions_tests/tests/test_models.py": ["/versions/exceptions.py", "/versions/models.py", "/versions_tests/models.py"], "/versions/models.py": ["/versions/exceptions.py", "/versions/settings.py", "/versions/util/__init__.py", "/versions/fields.py"], "/versions/util/helper.py": ["/versions/models.py"], "/versions_tests/admin.py": ["/versions/admin.py", "/versions_tests/models.py"], "/versions_tests/tests/test_utils.py": ["/versions/util/postgresql.py", "/versions_tests/models.py"], "/versions/fields.py": ["/versions/descriptors.py", "/versions/models.py"], "/versions/descriptors.py": ["/versions/util/__init__.py", "/versions/models.py"], "/versions/deletion.py": ["/versions/models.py"], "/versions_tests/models.py": ["/versions/fields.py", "/versions/models.py"], "/versions_tests/apps.py": ["/versions/util/postgresql.py"], "/versions/util/postgresql.py": ["/versions/fields.py", "/versions/util/helper.py"]}
|
7,539
|
JoshLabs/cleanerversion
|
refs/heads/master
|
/versions/admin.py
|
from datetime import datetime
from django import forms
from django.conf.urls import url
from django.contrib import admin, messages
from django.contrib.admin.checks import ModelAdminChecks
from django.contrib.admin.options import get_content_type_for_model
from django.contrib.admin.templatetags.admin_static import static
from django.contrib.admin.utils import unquote
from django.contrib.admin.widgets import AdminSplitDateTime
from django.core.exceptions import PermissionDenied
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.template.response import TemplateResponse
from django.utils.encoding import force_text
from django.utils.html import format_html
from django.utils.http import urlquote
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
class DateTimeFilterForm(forms.Form):
def __init__(self, request, *args, **kwargs):
field_name = kwargs.pop('field_name')
super(DateTimeFilterForm, self).__init__(*args, **kwargs)
self.request = request
self.fields['%s_as_of' % field_name] = forms.SplitDateTimeField(
label='',
input_time_formats=['%I:%M %p', '%H:%M:%S'],
widget=AdminSplitDateTime(
attrs={'placeholder': 'as of date and time'}
),
localize=True,
required=True
)
@property
def media(self):
try:
if getattr(self.request, 'daterange_filter_media_included'):
return forms.Media()
except AttributeError:
setattr(self.request, 'daterange_filter_media_included', True)
js = ['calendar.js', 'admin/DateTimeShortcuts.js', ]
css = ['widgets.css', ]
return forms.Media(
js=[static('admin/js/%s' % path) for path in js],
css={'all': [static('admin/css/%s' % path) for path in css]}
)
class DateTimeFilter(admin.FieldListFilter):
template = 'versions/datetimefilter.html'
title = 'DateTime filter'
def __init__(self, field, request, params, model, model_admin, field_path):
self.field_path = field_path
self.lookup_kwarg_as_ofdate = '%s_as_of_0' % field_path
self.lookup_kwarg_as_oftime = '%s_as_of_1' % field_path
super(DateTimeFilter, self).__init__(field, request, params, model,
model_admin, field_path)
self.form = self.get_form(request)
def choices(self, cl):
return []
def expected_parameters(self):
return [self.lookup_kwarg_as_ofdate, self.lookup_kwarg_as_oftime]
def get_form(self, request):
return DateTimeFilterForm(request, data=self.used_parameters,
field_name=self.field_path)
def queryset(self, request, queryset):
fieldname = '%s_as_of' % self.field_path
if self.form.is_valid() and fieldname in self.form.cleaned_data:
filter_params = self.form.cleaned_data.get(fieldname,
datetime.utcnow())
return queryset.as_of(filter_params)
else:
return queryset
class IsCurrentFilter(admin.SimpleListFilter):
title = 'Is Current filter'
parameter_name = 'is_current'
def __init__(self, request, params, model, model_admin):
self.lookup_kwarg = 'is_current'
self.lookup_val = request.GET.get(self.lookup_kwarg, None)
super(IsCurrentFilter, self).__init__(request, params, model,
model_admin)
def lookups(self, request, model_admin):
return [(None, 'All'), ('1', 'Current'), ]
def choices(self, cl):
for lookup, title in self.lookup_choices:
yield {
'selected': self.value() == lookup,
'query_string': cl.get_query_string({
self.parameter_name: lookup,
}, []),
'display': title,
}
def queryset(self, request, queryset):
if self.lookup_val:
return queryset.as_of()
else:
return queryset
class VersionedAdminChecks(ModelAdminChecks):
def _check_exclude(self, cls, model=None):
"""
Required to suppress error about exclude not being a tuple since we
are using @property to dynamically change it
"""
return []
class VersionedAdmin(admin.ModelAdmin):
"""
VersionedAdmin provides functionality to allow cloning of objects when
saving, not cloning if a mistake was made, and making a current object
historical by deleting it
"""
VERSIONED_EXCLUDE = ['id', 'identity', 'version_end_date',
'version_start_date', 'version_birth_date']
# These are so that the subclasses can overwrite these attributes
# to have the identity, end date, or start date column not show
list_display_show_identity = True
list_display_show_end_date = True
list_display_show_start_date = True
ordering = []
checks_class = VersionedAdminChecks
def get_readonly_fields(self, request, obj=None):
"""
This is required a subclass of VersionedAdmin has readonly_fields
ours won't be undone
"""
if obj:
return list(self.readonly_fields) + ['id', 'identity',
'is_current']
return self.readonly_fields
def get_ordering(self, request):
return ['identity', '-version_start_date', ] + self.ordering
def get_list_display(self, request):
"""
This method determines which fields go in the changelist
"""
# Force cast to list as super get_list_display could return a tuple
list_display = list(
super(VersionedAdmin, self).get_list_display(request))
# Preprend the following fields to list display
if self.list_display_show_identity:
list_display = ['identity_shortener', ] + list_display
# Append the following fields to list display
if self.list_display_show_start_date:
list_display += ['version_start_date', ]
if self.list_display_show_end_date:
list_display += ['version_end_date', ]
return list_display + ['is_current', ]
def get_list_filter(self, request):
"""
Adds versionable custom filtering ability to changelist
"""
list_filter = super(VersionedAdmin, self).get_list_filter(request)
return list(list_filter) + [('version_start_date', DateTimeFilter),
IsCurrentFilter]
def restore(self, request, *args, **kwargs):
"""
View for restoring object from change view
"""
paths = request.path_info.split('/')
object_id_index = paths.index("restore") - 2
object_id = paths[object_id_index]
obj = super(VersionedAdmin, self).get_object(request, object_id)
obj.restore()
admin_wordIndex = object_id_index - 3
path = "/%s" % ("/".join(paths[admin_wordIndex:object_id_index]))
opts = self.model._meta
msg_dict = {
'name': force_text(opts.verbose_name),
'obj': format_html('<a href="{}">{}</a>',
urlquote(request.path), obj),
}
msg = format_html(_('The {name} "{obj}" was restored successfully.'),
**msg_dict)
self.message_user(request, msg, messages.SUCCESS)
return HttpResponseRedirect(path)
def will_not_clone(self, request, *args, **kwargs):
"""
Add save but not clone capability in the changeview
"""
paths = request.path_info.split('/')
index_of_object_id = paths.index("will_not_clone") - 1
object_id = paths[index_of_object_id]
self.change_view(request, object_id)
admin_wordInUrl = index_of_object_id - 3
# This gets the adminsite for the app, and the model name and joins
# together with /
path = '/' + '/'.join(paths[admin_wordInUrl:index_of_object_id])
return HttpResponseRedirect(path)
@property
def exclude(self):
"""
Custom descriptor for exclude since there is no get_exclude method to
be overridden
"""
exclude = self.VERSIONED_EXCLUDE
if super(VersionedAdmin, self).exclude is not None:
# Force cast to list as super exclude could return a tuple
exclude = list(super(VersionedAdmin, self).exclude) + exclude
return exclude
def get_object(self, request, object_id, from_field=None):
"""
our implementation of get_object allows for cloning when updating an
object, not cloning when the button 'save but not clone' is pushed
and at no other time will clone be called
"""
# from_field breaks in 1.7.8
obj = super(VersionedAdmin, self).get_object(request,
object_id)
# Only clone if update view as get_object() is also called for change,
# delete, and history views
if request.method == 'POST' and \
obj and \
obj.is_latest and \
'will_not_clone' not in request.path and \
'delete' not in request.path and \
'restore' not in request.path:
obj = obj.clone()
return obj
def history_view(self, request, object_id, extra_context=None):
"The 'history' admin view for this model."
from django.contrib.admin.models import LogEntry
# First check if the user can see this history.
model = self.model
obj = get_object_or_404(self.get_queryset(request),
pk=unquote(object_id))
if not self.has_change_permission(request, obj):
raise PermissionDenied
# Then get the history for this object.
opts = model._meta
app_label = opts.app_label
action_list = LogEntry.objects.filter(
object_id=unquote(str(obj.identity)),
# this is the change for our override;
content_type=get_content_type_for_model(model)
).select_related().order_by('action_time')
ctx = self.admin_site.each_context(request)
context = dict(ctx,
title=('Change history: %s') % force_text(obj),
action_list=action_list,
module_name=capfirst(
force_text(opts.verbose_name_plural)),
object=obj,
opts=opts,
preserved_filters=self.get_preserved_filters(request),
)
context.update(extra_context or {})
return TemplateResponse(request, self.object_history_template or [
"admin/%s/%s/object_history.html" % (app_label, opts.model_name),
"admin/%s/object_history.html" % app_label,
"admin/object_history.html"
], context)
def get_urls(self):
"""
Appends the custom will_not_clone url to the admin site
"""
not_clone_url = [url(r'^(.+)/will_not_clone/$',
admin.site.admin_view(self.will_not_clone))]
restore_url = [
url(r'^(.+)/restore/$', admin.site.admin_view(self.restore))]
return not_clone_url + restore_url + super(VersionedAdmin,
self).get_urls()
def is_current(self, obj):
return obj.is_current
is_current.boolean = True
is_current.short_description = "Current"
def identity_shortener(self, obj):
"""
Shortens identity to the last 12 characters
"""
return "..." + str(obj.identity)[-12:]
identity_shortener.boolean = False
identity_shortener.short_description = "Short Identity"
class Media:
# This supports dynamically adding 'Save without cloning' button:
# http://bit.ly/1T2fGOP
js = ('js/admin_addon.js',)
|
{"/versions_tests/tests/test_admin.py": ["/versions/admin.py", "/versions_tests/models.py"], "/versions_tests/tests/test_models.py": ["/versions/exceptions.py", "/versions/models.py", "/versions_tests/models.py"], "/versions/models.py": ["/versions/exceptions.py", "/versions/settings.py", "/versions/util/__init__.py", "/versions/fields.py"], "/versions/util/helper.py": ["/versions/models.py"], "/versions_tests/admin.py": ["/versions/admin.py", "/versions_tests/models.py"], "/versions_tests/tests/test_utils.py": ["/versions/util/postgresql.py", "/versions_tests/models.py"], "/versions/fields.py": ["/versions/descriptors.py", "/versions/models.py"], "/versions/descriptors.py": ["/versions/util/__init__.py", "/versions/models.py"], "/versions/deletion.py": ["/versions/models.py"], "/versions_tests/models.py": ["/versions/fields.py", "/versions/models.py"], "/versions_tests/apps.py": ["/versions/util/postgresql.py"], "/versions/util/postgresql.py": ["/versions/fields.py", "/versions/util/helper.py"]}
|
7,540
|
JoshLabs/cleanerversion
|
refs/heads/master
|
/setup.py
|
#!/usr/bin/env python
from setuptools import setup, find_packages
"""
Documentation can be found at https://docs.python.org/2/distutils/index.html,
but usually you only need to do the following steps to publish a new package
version to PyPI::
# Update the version tag in this file (setup.py)
python setup.py sdist --formats=gztar
python setup.py bdist_wheel
twine upload dist/*
That's already it. You should get the following output written to your
command line::
Server response (200): OK
If you get errors, check the following things:
- Are you behind a proxy? --> Try not to be behind a proxy (I don't actually
know how to configure setup.py to be proxy-aware)
- Is your command correct? --> Double-check using the reference documentation
- Do you have all the necessary libraries to generate the wanted formats? -->
Reduce the set of formats or install libs
"""
version = __import__('cleanerversion').get_version()
setup(name='CleanerVersion',
version=version,
description='A versioning solution for relational data models using the '
'Django ORM',
long_description='CleanerVersion is a solution that allows you to read '
'and write multiple versions of an entry '
'to and from your relational database. It allows to '
'keep track of modifications on an object '
'over time, as described by the theory of **Slowly '
'Changing Dimensions** (SCD) **- Type 2**. '
''
'CleanerVersion therefore enables a Django-based '
'Datawarehouse, which was the initial idea of '
'this package.',
author='Manuel Jeckelmann, Jean-Christophe Zulian, Brian King, '
'Andrea Marcacci',
author_email='engineering.sophia@swisscom.com',
license='Apache License 2.0',
packages=find_packages(exclude=['cleanerversion.settings.*']),
url='https://github.com/swisscom/cleanerversion',
package_data={'versions': ['static/js/*.js',
'templates/versions/*.html']},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Django',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
'Topic :: Database',
'Topic :: System :: Archiving',
])
|
{"/versions_tests/tests/test_admin.py": ["/versions/admin.py", "/versions_tests/models.py"], "/versions_tests/tests/test_models.py": ["/versions/exceptions.py", "/versions/models.py", "/versions_tests/models.py"], "/versions/models.py": ["/versions/exceptions.py", "/versions/settings.py", "/versions/util/__init__.py", "/versions/fields.py"], "/versions/util/helper.py": ["/versions/models.py"], "/versions_tests/admin.py": ["/versions/admin.py", "/versions_tests/models.py"], "/versions_tests/tests/test_utils.py": ["/versions/util/postgresql.py", "/versions_tests/models.py"], "/versions/fields.py": ["/versions/descriptors.py", "/versions/models.py"], "/versions/descriptors.py": ["/versions/util/__init__.py", "/versions/models.py"], "/versions/deletion.py": ["/versions/models.py"], "/versions_tests/models.py": ["/versions/fields.py", "/versions/models.py"], "/versions_tests/apps.py": ["/versions/util/postgresql.py"], "/versions/util/postgresql.py": ["/versions/fields.py", "/versions/util/helper.py"]}
|
7,541
|
JoshLabs/cleanerversion
|
refs/heads/master
|
/versions_tests/admin.py
|
from django.contrib import admin
from versions.admin import VersionedAdmin
from versions_tests.models import City, Student, Observer, Professor, \
Subject, Teacher, Team, Player, Award, ChainStore, \
Classroom, WineDrinker, WineDrinkerHat, Wine
admin.site.register(
[City, Student, Subject, Teacher, Team, Player, Award, Observer,
ChainStore, Professor, Classroom,
WineDrinker], VersionedAdmin)
admin.site.register([Wine, WineDrinkerHat], admin.ModelAdmin)
|
{"/versions_tests/tests/test_admin.py": ["/versions/admin.py", "/versions_tests/models.py"], "/versions_tests/tests/test_models.py": ["/versions/exceptions.py", "/versions/models.py", "/versions_tests/models.py"], "/versions/models.py": ["/versions/exceptions.py", "/versions/settings.py", "/versions/util/__init__.py", "/versions/fields.py"], "/versions/util/helper.py": ["/versions/models.py"], "/versions_tests/admin.py": ["/versions/admin.py", "/versions_tests/models.py"], "/versions_tests/tests/test_utils.py": ["/versions/util/postgresql.py", "/versions_tests/models.py"], "/versions/fields.py": ["/versions/descriptors.py", "/versions/models.py"], "/versions/descriptors.py": ["/versions/util/__init__.py", "/versions/models.py"], "/versions/deletion.py": ["/versions/models.py"], "/versions_tests/models.py": ["/versions/fields.py", "/versions/models.py"], "/versions_tests/apps.py": ["/versions/util/postgresql.py"], "/versions/util/postgresql.py": ["/versions/fields.py", "/versions/util/helper.py"]}
|
7,542
|
JoshLabs/cleanerversion
|
refs/heads/master
|
/versions_tests/tests/test_utils.py
|
from unittest import skipUnless
from django.db import IntegrityError
from django.db import connection
from django.test import TestCase, TransactionTestCase
from versions.util.postgresql import get_uuid_like_indexes_on_table
from versions_tests.models import ChainStore, Color
@skipUnless(connection.vendor == 'postgresql', "Postgresql-specific test")
class PostgresqlVersionUniqueTests(TransactionTestCase):
def setUp(self):
self.red = Color.objects.create(name='red')
self.green = Color.objects.create(name='green')
self.black = Color.objects.create(name='black')
self.yellow = Color.objects.create(name='yellow')
# - only one store with the same name and subchain_id can exist in a
# single city
# - no two stores can share the same door_frame_color and door_color
store = {
'subchain_id': 1,
'city': 'Santa Barbara',
'name': 'Barbara style',
'opening_hours': '9-9 everyday',
'door_frame_color': self.red,
'door_color': self.black,
}
self.sb1 = ChainStore.objects.create(**store)
def test_version_unique(self):
# It should not be possible to create another store with the same name,
# city, and subchain_id
with self.assertRaises(IntegrityError):
sb2 = ChainStore.objects.create(
subchain_id=self.sb1.subchain_id,
city=self.sb1.city,
name=self.sb1.name,
door_frame_color=self.sb1.door_frame_color,
door_color=self.green
)
# It should not be possible to create another store with the same door
# and door_frame color
with self.assertRaises(IntegrityError):
sb3 = ChainStore.objects.create(
subchain_id=self.sb1.subchain_id,
city=self.sb1.city,
name="Bearded Bob's style",
door_frame_color=self.sb1.door_frame_color,
door_color=self.sb1.door_color
)
# It should be possible to create objects as long as they follow the
# unique constraints, though:
sb4 = ChainStore.objects.create(
subchain_id=self.sb1.subchain_id,
city=self.sb1.city,
name="Bearded Bob's style",
door_frame_color=self.sb1.door_frame_color,
door_color=self.green
)
sb5 = ChainStore.objects.create(
subchain_id=sb4.subchain_id + 1,
city=sb4.city,
name=sb4.name,
door_frame_color=sb4.door_frame_color,
door_color=self.yellow
)
# If a version is soft-deleted, it should be possible to create a new
# object with the
# value of that old version
sb4.delete()
sb6 = ChainStore.objects.create(
subchain_id=sb4.subchain_id,
city=sb4.city,
name=sb4.name,
door_frame_color=sb4.door_frame_color,
door_color=sb4.door_color
)
def test_identity_unique(self):
c = Color.objects.create(name='sky blue')
c.identity = self.green.identity
# It should not be possible to have two "current" objects with the
# same identity:
with self.assertRaises(IntegrityError):
c.save()
@skipUnless(connection.vendor == 'postgresql', "Postgresql-specific test")
class PostgresqlUuidLikeIndexesTest(TestCase):
def test_no_like_indexes_on_uuid_columns(self):
# Django creates like indexes on char columns. In Django 1.7.x and
# below, there is no support for native uuid columns, so
# CleanerVersion uses a CharField to store the uuid values. For
# postgresql, Django creates special indexes for char fields so that
# like searches (e.g. WHERE foo like '%bar') are fast.
# Those indexes are not going to be used in our case, and extra
# indexes will slow down updates and inserts. So, they should have
# been removed by the post_migrate handler in
# versions_tests.apps.VersionsTestsConfig.ready.
self.assertEqual(0, len(get_uuid_like_indexes_on_table(ChainStore)))
|
{"/versions_tests/tests/test_admin.py": ["/versions/admin.py", "/versions_tests/models.py"], "/versions_tests/tests/test_models.py": ["/versions/exceptions.py", "/versions/models.py", "/versions_tests/models.py"], "/versions/models.py": ["/versions/exceptions.py", "/versions/settings.py", "/versions/util/__init__.py", "/versions/fields.py"], "/versions/util/helper.py": ["/versions/models.py"], "/versions_tests/admin.py": ["/versions/admin.py", "/versions_tests/models.py"], "/versions_tests/tests/test_utils.py": ["/versions/util/postgresql.py", "/versions_tests/models.py"], "/versions/fields.py": ["/versions/descriptors.py", "/versions/models.py"], "/versions/descriptors.py": ["/versions/util/__init__.py", "/versions/models.py"], "/versions/deletion.py": ["/versions/models.py"], "/versions_tests/models.py": ["/versions/fields.py", "/versions/models.py"], "/versions_tests/apps.py": ["/versions/util/postgresql.py"], "/versions/util/postgresql.py": ["/versions/fields.py", "/versions/util/helper.py"]}
|
7,543
|
JoshLabs/cleanerversion
|
refs/heads/master
|
/versions/fields.py
|
from django.db.models.deletion import DO_NOTHING
from django.db.models.fields.related import ForeignKey, ManyToManyField, \
resolve_relation, lazy_related_operation
from django.db.models.query_utils import Q
from django.db.models.sql.datastructures import Join
from django.db.models.sql.where import ExtraWhere, WhereNode
from django.db.models.utils import make_model_tuple
from versions.descriptors import (VersionedForwardManyToOneDescriptor,
VersionedReverseManyToOneDescriptor,
VersionedManyToManyDescriptor)
from versions.models import Versionable
class VersionedForeignKey(ForeignKey):
"""
We need to replace the standard ForeignKey declaration in order to be able
to introduce the VersionedReverseSingleRelatedObjectDescriptor, which
allows to go back in time...
We also want to allow keeping track of any as_of time so that joins can
be restricted based on that.
"""
def __init__(self, *args, **kwargs):
super(VersionedForeignKey, self).__init__(*args, **kwargs)
def contribute_to_class(self, cls, name, virtual_only=False):
super(VersionedForeignKey, self).contribute_to_class(cls, name,
virtual_only)
setattr(cls, self.name, VersionedForwardManyToOneDescriptor(self))
def contribute_to_related_class(self, cls, related):
"""
Override ForeignKey's methods, and replace the descriptor, if set by
the parent's methods
"""
# Internal FK's - i.e., those with a related name ending with '+' -
# and swapped models don't get a related descriptor.
super(VersionedForeignKey, self).contribute_to_related_class(cls,
related)
accessor_name = related.get_accessor_name()
if hasattr(cls, accessor_name):
setattr(cls, accessor_name,
VersionedReverseManyToOneDescriptor(related))
def get_extra_restriction(self, where_class, alias, remote_alias):
"""
Overrides ForeignObject's get_extra_restriction function that returns
an SQL statement which is appended to a JOIN's conditional filtering
part
:return: SQL conditional statement
:rtype: WhereNode
"""
historic_sql = '''{alias}.version_start_date <= %s
AND ({alias}.version_end_date > %s
OR {alias}.version_end_date is NULL )'''
current_sql = '''{alias}.version_end_date is NULL'''
# How 'bout creating an ExtraWhere here, without params
return where_class([VersionedExtraWhere(historic_sql=historic_sql,
current_sql=current_sql,
alias=alias,
remote_alias=remote_alias)])
def get_joining_columns(self, reverse_join=False):
"""
Get and return joining columns defined by this foreign key relationship
:return: A tuple containing the column names of the tables to be
joined (<local_col_name>, <remote_col_name>)
:rtype: tuple
"""
source = self.reverse_related_fields if reverse_join \
else self.related_fields
joining_columns = tuple()
for lhs_field, rhs_field in source:
lhs_col_name = lhs_field.column
rhs_col_name = rhs_field.column
# Test whether
# - self is the current ForeignKey relationship
# - self was not auto_created (e.g. is not part of a M2M
# relationship)
if self is lhs_field and not self.auto_created:
if rhs_col_name == Versionable.VERSION_IDENTIFIER_FIELD:
rhs_col_name = Versionable.OBJECT_IDENTIFIER_FIELD
elif self is rhs_field and not self.auto_created:
if lhs_col_name == Versionable.VERSION_IDENTIFIER_FIELD:
lhs_col_name = Versionable.OBJECT_IDENTIFIER_FIELD
joining_columns = joining_columns + ((lhs_col_name, rhs_col_name),)
return joining_columns
def get_reverse_related_filter(self, obj):
base_filter = dict()
timestamp_q = None
for lh_field, rh_field in self.related_fields:
if isinstance(obj, Versionable) and \
rh_field.attname == \
Versionable.VERSION_IDENTIFIER_FIELD:
base_filter.update(**{
Versionable.OBJECT_IDENTIFIER_FIELD:
getattr(obj, lh_field.attname)})
if hasattr(obj, 'as_of') and obj.as_of is not None:
start_date_q = Q(version_start_date__lt=obj.as_of)
end_date_q = Q(version_end_date__gte=obj.as_of) | Q(
version_end_date__isnull=True)
timestamp_q = start_date_q & end_date_q
else:
base_filter.update(
**{rh_field.attname: getattr(obj, lh_field.attname)})
base_q = Q(**base_filter)
if timestamp_q:
base_q &= timestamp_q
descriptor_filter = self.get_extra_descriptor_filter(obj)
if isinstance(descriptor_filter, dict):
return base_q & Q(**descriptor_filter)
elif descriptor_filter:
return base_q & descriptor_filter
return base_q
class VersionedManyToManyField(ManyToManyField):
def __init__(self, *args, **kwargs):
super(VersionedManyToManyField, self).__init__(*args, **kwargs)
def contribute_to_class(self, cls, name, **kwargs):
"""
Called at class type creation. So, this method is called, when
metaclasses get created
"""
# TODO: Apply 3 edge cases when not to create an intermediary model
# specified in django.db.models.fields.related:1566
# self.rel.through needs to be set prior to calling super, since
# super(...).contribute_to_class refers to it.
# Classes pointed to by a string do not need to be resolved here,
# since Django does that at a later point in time - which is nice... ;)
#
# Superclasses take care of:
# - creating the through class if unset
# - resolving the through class if it's a string
# - resolving string references within the through class
if not self.remote_field.through and \
not cls._meta.abstract and \
not cls._meta.swapped:
# We need to anticipate some stuff, that's done only later in
# class contribution
self.set_attributes_from_name(name)
self.model = cls
self.remote_field.through = VersionedManyToManyField.\
create_versioned_many_to_many_intermediary_model(self, cls,
name)
super(VersionedManyToManyField, self).contribute_to_class(cls, name)
# Overwrite the descriptor
if hasattr(cls, self.name):
setattr(cls, self.name,
VersionedManyToManyDescriptor(self.remote_field))
def contribute_to_related_class(self, cls, related):
"""
Called at class type creation. So, this method is called, when
metaclasses get created
"""
super(VersionedManyToManyField, self). \
contribute_to_related_class(cls, related)
accessor_name = related.get_accessor_name()
if accessor_name and hasattr(cls, accessor_name):
descriptor = VersionedManyToManyDescriptor(related, accessor_name)
setattr(cls, accessor_name, descriptor)
if hasattr(cls._meta, 'many_to_many_related') and isinstance(
cls._meta.many_to_many_related, list):
cls._meta.many_to_many_related.append(descriptor)
else:
cls._meta.many_to_many_related = [descriptor]
@staticmethod
def create_versioned_many_to_many_intermediary_model(field, cls,
field_name):
# TODO: Verify functionality against
# django.db.models.fields.related:1048
# Let's not care too much on what flags could potentially be set on
# that intermediary class (e.g. managed, etc)
# Let's play the game, as if the programmer had specified a class
# within his models... Here's how.
# FIXME: VersionedManyToManyModels do not get registered in the
# apps models.
# FIXME: This is usually done at django/db/models/base.py:284,
# invoked by create_many_to_many_intermediary_model at
# django.db.models.fields.related:1048
def set_managed(model, related, through):
through._meta.managed = model._meta.managed or \
related._meta.managed
to_model = resolve_relation(cls, field.remote_field.model)
name = '%s_%s' % (cls._meta.object_name, field_name)
lazy_related_operation(set_managed, cls, to_model, name)
# Force 'to' to be a string (and leave the hard work to Django)
to = make_model_tuple(to_model)[1]
from_ = cls._meta.model_name
if to == from_:
from_ = 'from_%s' % from_
to = 'to_%s' % to
meta = type('Meta', (object,), {
'db_table': field._get_m2m_db_table(cls._meta),
'auto_created': cls,
'app_label': cls._meta.app_label,
'db_tablespace': cls._meta.db_tablespace,
# 'unique_together' is not applicable as is, due to multiple
# versions to be allowed to exist.
# 'unique_together': (from_, to),
'verbose_name': '%(from)s-%(to)s relationship' % {'from': from_,
'to': to},
'verbose_name_plural': '%(from)s-%(to)s relationships' % {
'from': from_, 'to': to},
'apps': field.model._meta.apps,
})
return type(str(name), (Versionable,), {
'Meta': meta,
'__module__': cls.__module__,
from_: VersionedForeignKey(
cls,
related_name='%s+' % name,
db_tablespace=field.db_tablespace,
db_constraint=field.remote_field.db_constraint,
auto_created=name,
on_delete=DO_NOTHING,
),
to: VersionedForeignKey(
to_model,
related_name='%s+' % name,
db_tablespace=field.db_tablespace,
db_constraint=field.remote_field.db_constraint,
auto_created=name,
on_delete=DO_NOTHING,
),
})
class VersionedExtraWhere(ExtraWhere):
"""
A specific implementation of ExtraWhere;
Before as_sql can be called on an object, ensure that calls to
- set_as_of and
- set_joined_alias
have been done
"""
def __init__(self, historic_sql, current_sql, alias, remote_alias):
super(VersionedExtraWhere, self).__init__(sqls=[], params=[])
self.historic_sql = historic_sql
self.current_sql = current_sql
self.alias = alias
self.related_alias = remote_alias
self._as_of_time_set = False
self.as_of_time = None
self._joined_alias = None
def set_as_of(self, as_of_time):
self.as_of_time = as_of_time
self._as_of_time_set = True
def set_joined_alias(self, joined_alias):
"""
Takes the alias that is being joined to the query and applies the query
time constraint to its table
:param str joined_alias: The table name of the alias
"""
self._joined_alias = joined_alias
def as_sql(self, qn=None, connection=None):
sql = ""
params = []
# Fail fast for inacceptable cases
if self._as_of_time_set and not self._joined_alias:
raise ValueError(
"joined_alias is not set, but as_of is; this is a conflict!")
# Set the SQL string in dependency of whether as_of_time was set or not
if self._as_of_time_set:
if self.as_of_time:
sql = self.historic_sql
params = [self.as_of_time] * 2
# 2 is the number of occurences of the timestamp in an
# as_of-filter expression
else:
# If as_of_time was set to None, we're dealing with a query
# for "current" values
sql = self.current_sql
else:
# No as_of_time has been set; Perhaps, as_of was not part of the
# query -> That's OK
pass
# By here, the sql string is defined if an as_of_time was provided
if self._joined_alias:
sql = sql.format(alias=self._joined_alias)
# Set the final sqls
# self.sqls needs to be set before the call to parent
if sql:
self.sqls = [sql]
else:
self.sqls = ["1=1"]
self.params = params
return super(VersionedExtraWhere, self).as_sql(qn, connection)
class VersionedWhereNode(WhereNode):
def as_sql(self, qn, connection):
"""
This method identifies joined table aliases in order for
VersionedExtraWhere.as_sql() to be able to add time restrictions for
those tables based on the VersionedQuery's querytime value.
:param qn: In Django 1.7 & 1.8 this is a compiler; in 1.6, it's an
instance-method
:param connection: A DB connection
:return: A tuple consisting of (sql_string, result_params)
"""
# self.children is an array of VersionedExtraWhere-objects
for child in self.children:
if isinstance(child, VersionedExtraWhere) and not child.params:
# Django 1.7 & 1.8 handles compilers as objects
_query = qn.query
query_time = _query.querytime.time
apply_query_time = _query.querytime.active
alias_map = _query.alias_map
# In Django 1.8, use the Join objects in alias_map
self._set_child_joined_alias(child, alias_map)
if apply_query_time:
# Add query parameters that have not been added till now
child.set_as_of(query_time)
else:
# Remove the restriction if it's not required
child.sqls = []
return super(VersionedWhereNode, self).as_sql(qn, connection)
@staticmethod
def _set_child_joined_alias_using_join_map(child, join_map, alias_map):
"""
Set the joined alias on the child, for Django <= 1.7.x.
:param child:
:param join_map:
:param alias_map:
"""
for lhs, table, join_cols in join_map:
if lhs is None:
continue
if lhs == child.alias:
relevant_alias = child.related_alias
elif lhs == child.related_alias:
relevant_alias = child.alias
else:
continue
join_info = alias_map[relevant_alias]
if join_info.join_type is None:
continue
if join_info.lhs_alias in [child.alias, child.related_alias]:
child.set_joined_alias(relevant_alias)
break
@staticmethod
def _set_child_joined_alias(child, alias_map):
"""
Set the joined alias on the child, for Django >= 1.8.0
:param child:
:param alias_map:
"""
for table in alias_map:
join = alias_map[table]
if not isinstance(join, Join):
continue
lhs = join.parent_alias
if (lhs == child.alias and table == child.related_alias) \
or (lhs == child.related_alias and table == child.alias):
child.set_joined_alias(table)
break
|
{"/versions_tests/tests/test_admin.py": ["/versions/admin.py", "/versions_tests/models.py"], "/versions_tests/tests/test_models.py": ["/versions/exceptions.py", "/versions/models.py", "/versions_tests/models.py"], "/versions/models.py": ["/versions/exceptions.py", "/versions/settings.py", "/versions/util/__init__.py", "/versions/fields.py"], "/versions/util/helper.py": ["/versions/models.py"], "/versions_tests/admin.py": ["/versions/admin.py", "/versions_tests/models.py"], "/versions_tests/tests/test_utils.py": ["/versions/util/postgresql.py", "/versions_tests/models.py"], "/versions/fields.py": ["/versions/descriptors.py", "/versions/models.py"], "/versions/descriptors.py": ["/versions/util/__init__.py", "/versions/models.py"], "/versions/deletion.py": ["/versions/models.py"], "/versions_tests/models.py": ["/versions/fields.py", "/versions/models.py"], "/versions_tests/apps.py": ["/versions/util/postgresql.py"], "/versions/util/postgresql.py": ["/versions/fields.py", "/versions/util/helper.py"]}
|
7,544
|
JoshLabs/cleanerversion
|
refs/heads/master
|
/versions/descriptors.py
|
from collections import namedtuple
from django import VERSION
from django.core.exceptions import SuspiciousOperation, FieldDoesNotExist
from django.db import router, transaction
from django.db.models.base import Model
from django.db.models.fields.related import (ForwardManyToOneDescriptor,
ReverseManyToOneDescriptor,
ManyToManyDescriptor)
from django.db.models.fields.related_descriptors import \
create_forward_many_to_many_manager
from django.db.models.query_utils import Q
from django.utils.functional import cached_property
from versions.util import get_utc_now
def matches_querytime(instance, querytime):
"""
Checks whether the given instance satisfies the given QueryTime object.
:param instance: an instance of Versionable
:param querytime: QueryTime value to check against
"""
if not querytime.active:
return True
if not querytime.time:
return instance.version_end_date is None
return (instance.version_start_date <= querytime.time and (
instance.version_end_date is None or
instance.version_end_date > querytime.time))
class VersionedForwardManyToOneDescriptor(ForwardManyToOneDescriptor):
"""
The VersionedForwardManyToOneDescriptor is used when pointing another
Model using a VersionedForeignKey;
For example:
class Team(Versionable):
name = CharField(max_length=200)
city = VersionedForeignKey(City, null=True)
``team.city`` is a VersionedForwardManyToOneDescriptor
"""
def get_prefetch_queryset(self, instances, queryset=None):
"""
Overrides the parent method to:
- force queryset to use the querytime of the parent objects
- ensure that the join is done on identity, not id
- make the cache key identity, not id.
"""
if queryset is None:
queryset = self.get_queryset()
queryset._add_hints(instance=instances[0])
# CleanerVersion change 1: force the querytime to be the same as the
# prefetched-for instance.
# This is necessary to have reliable results and avoid extra queries
# for cache misses when accessing the child objects from their
# parents (e.g. choice.poll).
instance_querytime = instances[0]._querytime
if instance_querytime.active:
if queryset.querytime.active and \
queryset.querytime.time != instance_querytime.time:
raise ValueError(
"A Prefetch queryset that specifies an as_of time must "
"match the as_of of the base queryset.")
else:
queryset.querytime = instance_querytime
# CleanerVersion change 2: make rel_obj_attr return a tuple with
# the object's identity.
# rel_obj_attr = self.field.get_foreign_related_value
def versioned_fk_rel_obj_attr(versioned_rel_obj):
return versioned_rel_obj.identity,
rel_obj_attr = versioned_fk_rel_obj_attr
instance_attr = self.field.get_local_related_value
instances_dict = {instance_attr(inst): inst for inst in instances}
# CleanerVersion change 3: fake the related field so that it provides
# a name of 'identity'.
# related_field = self.field.foreign_related_fields[0]
related_field = namedtuple('VersionedRelatedFieldTuple', 'name')(
'identity')
# FIXME: This will need to be revisited when we introduce support for
# composite fields. In the meantime we take this practical approach to
# solve a regression on 1.6 when the reverse manager in hidden
# (related_name ends with a '+'). Refs #21410.
# The check for len(...) == 1 is a special case that allows the query
# to be join-less and smaller. Refs #21760.
if self.field.remote_field.is_hidden() or len(
self.field.foreign_related_fields) == 1:
query = {'%s__in' % related_field.name: set(
instance_attr(inst)[0] for inst in instances)}
else:
query = {'%s__in' % self.field.related_query_name(): instances}
queryset = queryset.filter(**query)
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
if not self.field.remote_field.multiple:
rel_obj_cache_name = self.field.remote_field.get_cache_name()
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_obj_cache_name, instance)
if VERSION[:1] < (2,):
return (queryset, rel_obj_attr, instance_attr, True,
self.field.get_cache_name())
else:
return (queryset, rel_obj_attr, instance_attr, True,
self.field.get_cache_name(), False)
def get_queryset(self, **hints):
queryset = self.field.remote_field.model.objects\
.db_manager(hints=hints).all()
if hasattr(queryset, 'querytime'):
if 'instance' in hints:
instance = hints['instance']
if hasattr(instance, '_querytime'):
if instance._querytime.active and \
instance._querytime != queryset.querytime:
queryset = queryset.as_of(instance._querytime.time)
else:
queryset = queryset.as_of(None)
return queryset
def __get__(self, instance, cls=None):
"""
The getter method returns the object, which points instance,
e.g. choice.poll returns a Poll instance, whereas the Poll class
defines the ForeignKey.
:param instance: The object on which the property was accessed
:param instance_type: The type of the instance object
:return: Returns a Versionable
"""
from versions.models import Versionable
if instance is None:
return self
current_elt = super(self.__class__, self).__get__(instance,
cls)
if not current_elt:
return None
if not isinstance(current_elt, Versionable):
raise TypeError("VersionedForeignKey target is of type " +
str(type(current_elt)) +
", which is not a subclass of Versionable")
if hasattr(instance, '_querytime'):
# If current_elt matches the instance's querytime, there's no
# need to make a database query.
if matches_querytime(current_elt, instance._querytime):
current_elt._querytime = instance._querytime
return current_elt
return current_elt.__class__.objects.as_of(
instance._querytime.time).get(identity=current_elt.identity)
else:
return current_elt.__class__.objects.current.get(
identity=current_elt.identity)
vforward_many_to_one_descriptor_class = VersionedForwardManyToOneDescriptor
class VersionedReverseManyToOneDescriptor(ReverseManyToOneDescriptor):
@cached_property
def related_manager_cls(self):
manager_cls = super(VersionedReverseManyToOneDescriptor,
self).related_manager_cls
rel_field = self.field
class VersionedRelatedManager(manager_cls):
def __init__(self, instance):
super(VersionedRelatedManager, self).__init__(instance)
# This is a hack, in order to get the versioned related objects
for key in self.core_filters.keys():
if '__exact' in key or '__' not in key:
self.core_filters[key] = instance.identity
def get_queryset(self):
from versions.models import VersionedQuerySet
queryset = super(VersionedRelatedManager, self).get_queryset()
# Do not set the query time if it is already correctly set.
# queryset.as_of() returns a clone of the queryset, and this
# will destroy the prefetched objects cache if it exists.
if isinstance(queryset, VersionedQuerySet) \
and self.instance._querytime.active \
and queryset.querytime != self.instance._querytime:
queryset = queryset.as_of(self.instance._querytime.time)
return queryset
def get_prefetch_queryset(self, instances, queryset=None):
"""
Overrides RelatedManager's implementation of
get_prefetch_queryset so that it works nicely with
VersionedQuerySets. It ensures that identities and time-limited
where clauses are used when selecting related reverse foreign
key objects.
"""
if queryset is None:
# Note that this intentionally call's VersionManager's
# get_queryset, instead of simply calling the superclasses'
# get_queryset (as the non-versioned RelatedManager does),
# because what is needed is a simple Versioned queryset
# without any restrictions (e.g. do not apply
# self.core_filters).
from versions.models import VersionManager
queryset = VersionManager.get_queryset(self)
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
instance_querytime = instances[0]._querytime
if instance_querytime.active:
if queryset.querytime.active and \
queryset.querytime.time != \
instance_querytime.time:
raise ValueError(
"A Prefetch queryset that specifies an as_of time "
"must match the as_of of the base queryset.")
else:
queryset.querytime = instance_querytime
rel_obj_attr = rel_field.get_local_related_value
instance_attr = rel_field.get_foreign_related_value
# Use identities instead of ids so that this will work with
# versioned objects.
instances_dict = {(inst.identity,): inst for inst in instances}
identities = [inst.identity for inst in instances]
query = {'%s__identity__in' % rel_field.name: identities}
queryset = queryset.filter(**query)
# Since we just bypassed this class' get_queryset(), we must
# manage the reverse relation manually.
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_field.name, instance)
cache_name = rel_field.related_query_name()
if VERSION[:1] < (2,):
return (queryset, rel_obj_attr, instance_attr, False,
cache_name)
else:
return (queryset, rel_obj_attr, instance_attr, False,
cache_name, False)
def add(self, *objs, **kwargs):
from versions.models import Versionable
cloned_objs = ()
for obj in objs:
if not isinstance(obj, Versionable):
raise TypeError(
"Trying to add a non-Versionable to a "
"VersionedForeignKey relationship")
cloned_objs += (obj.clone(),)
super(VersionedRelatedManager, self).add(*cloned_objs,
**kwargs)
# clear() and remove() are present if the FK is nullable
if 'clear' in dir(manager_cls):
def clear(self, **kwargs):
"""
Overridden to ensure that the current queryset is used,
and to clone objects before they are removed, so that
history is not lost.
"""
bulk = kwargs.pop('bulk', True)
db = router.db_for_write(self.model,
instance=self.instance)
queryset = self.current.using(db)
with transaction.atomic(using=db, savepoint=False):
cloned_pks = [obj.clone().pk for obj in queryset]
update_qs = self.current.filter(pk__in=cloned_pks)
self._clear(update_qs, bulk)
if 'remove' in dir(manager_cls):
def remove(self, *objs, **kwargs):
from versions.models import Versionable
val = rel_field.get_foreign_related_value(self.instance)
cloned_objs = ()
for obj in objs:
# Is obj actually part of this descriptor set?
# Otherwise, silently go over it, since Django
# handles that case
if rel_field.get_local_related_value(obj) == val:
# Silently pass over non-versionable items
if not isinstance(obj, Versionable):
raise TypeError(
"Trying to remove a non-Versionable from "
"a VersionedForeignKey realtionship")
cloned_objs += (obj.clone(),)
super(VersionedRelatedManager, self).remove(*cloned_objs,
**kwargs)
return VersionedRelatedManager
class VersionedManyToManyDescriptor(ManyToManyDescriptor):
@cached_property
def related_manager_cls(self):
model = self.rel.related_model if self.reverse else self.rel.model
return create_versioned_forward_many_to_many_manager(
model._default_manager.__class__,
self.rel,
reverse=self.reverse,
)
def __set__(self, instance, value):
"""
Completely overridden to avoid bulk deletion that happens when the
parent method calls clear().
The parent method's logic is basically: clear all in bulk, then add
the given objects in bulk.
Instead, we figure out which ones are being added and removed, and
call add and remove for these values.
This lets us retain the versioning information.
Since this is a many-to-many relationship, it is assumed here that
the django.db.models.deletion.Collector logic, that is used in
clear(), is not necessary here. Collector collects related models,
e.g. ones that should also be deleted because they have
a ON CASCADE DELETE relationship to the object, or, in the case of
"Multi-table inheritance", are parent objects.
:param instance: The instance on which the getter was called
:param value: iterable of items to set
"""
if not instance.is_current:
raise SuspiciousOperation(
"Related values can only be directly set on the current "
"version of an object")
if not self.field.remote_field.through._meta.auto_created:
opts = self.field.rel.through._meta
raise AttributeError((
"Cannot set values on a ManyToManyField "
"which specifies an intermediary model. "
"Use %s.%s's Manager instead.") % (
opts.app_label, opts.object_name))
manager = self.__get__(instance)
# Below comment is from parent __set__ method. We'll force
# evaluation, too:
# clear() can change expected output of 'value' queryset, we force
# evaluation of queryset before clear; ticket #19816
value = tuple(value)
being_removed, being_added = self.get_current_m2m_diff(instance, value)
timestamp = get_utc_now()
manager.remove_at(timestamp, *being_removed)
manager.add_at(timestamp, *being_added)
def get_current_m2m_diff(self, instance, new_objects):
"""
:param instance: Versionable object
:param new_objects: objects which are about to be associated with
instance
:return: (being_removed id list, being_added id list)
:rtype : tuple
"""
new_ids = self.pks_from_objects(new_objects)
relation_manager = self.__get__(instance)
filter = Q(**{relation_manager.source_field.attname: instance.pk})
qs = self.through.objects.current.filter(filter)
try:
# Django 1.7
target_name = relation_manager.target_field.attname
except AttributeError:
# Django 1.6
target_name = relation_manager.through._meta.get_field_by_name(
relation_manager.target_field_name)[0].attname
current_ids = set(qs.values_list(target_name, flat=True))
being_removed = current_ids - new_ids
being_added = new_ids - current_ids
return list(being_removed), list(being_added)
def pks_from_objects(self, objects):
"""
Extract all the primary key strings from the given objects.
Objects may be Versionables, or bare primary keys.
:rtype : set
"""
return {o.pk if isinstance(o, Model) else o for o in objects}
def create_versioned_forward_many_to_many_manager(superclass, rel,
reverse=None):
many_related_manager_klass = create_forward_many_to_many_manager(
superclass, rel, reverse)
class VersionedManyRelatedManager(many_related_manager_klass):
def __init__(self, *args, **kwargs):
super(VersionedManyRelatedManager, self).__init__(*args, **kwargs)
# Additional core filters are:
# version_start_date <= t &
# (version_end_date > t | version_end_date IS NULL)
# but we cannot work with the Django core filters, since they
# don't support ORing filters, which is a thing we need to
# consider the "version_end_date IS NULL" case;
# So, we define our own set of core filters being applied when
# versioning
try:
_ = self.through._meta.get_field('version_start_date')
_ = self.through._meta.get_field('version_end_date')
except FieldDoesNotExist as e:
fields = [f.name for f in self.through._meta.get_fields()]
print(str(e) + "; available fields are " + ", ".join(fields))
raise e
# FIXME: this probably does not work when auto-referencing
def get_queryset(self):
"""
Add a filter to the queryset, limiting the results to be pointed
by relationship that are valid for the given timestamp (which is
taken at the current instance, or set to now, if not available).
Long story short, apply the temporal validity filter also to the
intermediary model.
"""
queryset = super(VersionedManyRelatedManager, self).get_queryset()
if hasattr(queryset, 'querytime'):
if self.instance._querytime.active and \
self.instance._querytime != queryset.querytime:
queryset = queryset.as_of(self.instance._querytime.time)
return queryset
def _remove_items(self, source_field_name, target_field_name, *objs):
"""
Instead of removing items, we simply set the version_end_date of
the current item to the current timestamp --> t[now].
Like that, there is no more current entry having that identity -
which is equal to not existing for timestamps greater than t[now].
"""
return self._remove_items_at(None, source_field_name,
target_field_name, *objs)
def _remove_items_at(self, timestamp, source_field_name,
target_field_name, *objs):
if objs:
if timestamp is None:
timestamp = get_utc_now()
old_ids = set()
for obj in objs:
if isinstance(obj, self.model):
# The Django 1.7-way is preferred
if hasattr(self, 'target_field'):
fk_val = \
self.target_field \
.get_foreign_related_value(obj)[0]
else:
raise TypeError(
"We couldn't find the value of the foreign "
"key, this might be due to the use of an "
"unsupported version of Django")
old_ids.add(fk_val)
else:
old_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
qs = self.through._default_manager.using(db).filter(**{
source_field_name: self.instance.id,
'%s__in' % target_field_name: old_ids
}).as_of(timestamp)
for relation in qs:
relation._delete_at(timestamp)
if 'add' in dir(many_related_manager_klass):
def add(self, *objs):
if not self.instance.is_current:
raise SuspiciousOperation(
"Adding many-to-many related objects is only possible "
"on the current version")
# The ManyRelatedManager.add() method uses the through model's
# default manager to get a queryset when looking at which
# objects already exist in the database.
# In order to restrict the query to the current versions when
# that is done, we temporarily replace the queryset's using
# method so that the version validity condition can be
# specified.
klass = self.through._default_manager.get_queryset().__class__
__using_backup = klass.using
def using_replacement(self, *args, **kwargs):
qs = __using_backup(self, *args, **kwargs)
return qs.as_of(None)
klass.using = using_replacement
super(VersionedManyRelatedManager, self).add(*objs)
klass.using = __using_backup
def add_at(self, timestamp, *objs):
"""
This function adds an object at a certain point in time
(timestamp)
"""
# First off, define the new constructor
def _through_init(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.version_birth_date = timestamp
self.version_start_date = timestamp
# Through-classes have an empty constructor, so it can easily
# be overwritten when needed;
# This is not the default case, so the overwrite only takes
# place when we "modify the past"
self.through.__init_backup__ = self.through.__init__
self.through.__init__ = _through_init
# Do the add operation
self.add(*objs)
# Remove the constructor again (by replacing it with the
# original empty constructor)
self.through.__init__ = self.through.__init_backup__
del self.through.__init_backup__
add_at.alters_data = True
if 'remove' in dir(many_related_manager_klass):
def remove_at(self, timestamp, *objs):
"""
Performs the act of removing specified relationships at a
specified time (timestamp);
So, not the objects at a given time are removed, but their
relationship!
"""
self._remove_items_at(timestamp, self.source_field_name,
self.target_field_name, *objs)
# For consistency, also handle the symmetrical case
if self.symmetrical:
self._remove_items_at(timestamp, self.target_field_name,
self.source_field_name, *objs)
remove_at.alters_data = True
return VersionedManyRelatedManager
|
{"/versions_tests/tests/test_admin.py": ["/versions/admin.py", "/versions_tests/models.py"], "/versions_tests/tests/test_models.py": ["/versions/exceptions.py", "/versions/models.py", "/versions_tests/models.py"], "/versions/models.py": ["/versions/exceptions.py", "/versions/settings.py", "/versions/util/__init__.py", "/versions/fields.py"], "/versions/util/helper.py": ["/versions/models.py"], "/versions_tests/admin.py": ["/versions/admin.py", "/versions_tests/models.py"], "/versions_tests/tests/test_utils.py": ["/versions/util/postgresql.py", "/versions_tests/models.py"], "/versions/fields.py": ["/versions/descriptors.py", "/versions/models.py"], "/versions/descriptors.py": ["/versions/util/__init__.py", "/versions/models.py"], "/versions/deletion.py": ["/versions/models.py"], "/versions_tests/models.py": ["/versions/fields.py", "/versions/models.py"], "/versions_tests/apps.py": ["/versions/util/postgresql.py"], "/versions/util/postgresql.py": ["/versions/fields.py", "/versions/util/helper.py"]}
|
7,545
|
JoshLabs/cleanerversion
|
refs/heads/master
|
/versions_tests/tests/test_commands.py
|
from django.core.management import call_command
from django.test import TestCase
APP_NAME = 'versions_tests'
class TestMigrations(TestCase):
def test_makemigrations_command(self):
call_command('makemigrations', APP_NAME, dry_run=True, verbosity=0)
|
{"/versions_tests/tests/test_admin.py": ["/versions/admin.py", "/versions_tests/models.py"], "/versions_tests/tests/test_models.py": ["/versions/exceptions.py", "/versions/models.py", "/versions_tests/models.py"], "/versions/models.py": ["/versions/exceptions.py", "/versions/settings.py", "/versions/util/__init__.py", "/versions/fields.py"], "/versions/util/helper.py": ["/versions/models.py"], "/versions_tests/admin.py": ["/versions/admin.py", "/versions_tests/models.py"], "/versions_tests/tests/test_utils.py": ["/versions/util/postgresql.py", "/versions_tests/models.py"], "/versions/fields.py": ["/versions/descriptors.py", "/versions/models.py"], "/versions/descriptors.py": ["/versions/util/__init__.py", "/versions/models.py"], "/versions/deletion.py": ["/versions/models.py"], "/versions_tests/models.py": ["/versions/fields.py", "/versions/models.py"], "/versions_tests/apps.py": ["/versions/util/postgresql.py"], "/versions/util/postgresql.py": ["/versions/fields.py", "/versions/util/helper.py"]}
|
7,546
|
JoshLabs/cleanerversion
|
refs/heads/master
|
/versions/deletion.py
|
from django.db.models.deletion import (
attrgetter, signals, sql, transaction,
CASCADE,
Collector,
)
import versions.models
class VersionedCollector(Collector):
"""
A Collector that can be used to collect and delete Versionable objects.
The delete operation for Versionable objects is Versionable._delete_at,
which does not delete the record, it updates it's version_end_date to be
the timestamp passed to the delete() method.
Since non-versionable and versionable objects can be related, the delete()
method handles both of them. The standard Django behaviour is kept for
non-versionable objects. For versionable objects, no pre/post-delete
signals are sent. No signal is sent because the object is not being
removed from the database. If you want the standard signals to be sent,
or custom signals, create a subclass of this class and override
versionable_pre_delete() and/or versionable_post_delete(), and in your
settings file specify the dotted path to your custom class as a
string, e.g.:
VERSIONED_DELETE_COLLECTOR_CLASS =
'myapp.deletion.CustomVersionedCollector'
"""
def can_fast_delete(self, objs, from_field=None):
"""Do not fast delete anything"""
return False
def is_versionable(self, model):
return hasattr(model, 'VERSION_IDENTIFIER_FIELD') and \
hasattr(model, 'OBJECT_IDENTIFIER_FIELD')
def delete(self, timestamp):
# sort instance collections
for model, instances in self.data.items():
self.data[model] = sorted(instances, key=attrgetter("pk"))
# if possible, bring the models in an order suitable for databases that
# don't support transactions or cannot defer constraint checks until
# the end of a transaction.
self.sort()
with transaction.atomic(using=self.using, savepoint=False):
# send pre_delete signals, but not for versionables
for model, obj in self.instances_with_model():
if not model._meta.auto_created:
if self.is_versionable(model):
# By default, no signal is sent when deleting a
# Versionable.
self.versionable_pre_delete(obj, timestamp)
else:
signals.pre_delete.send(
sender=model, instance=obj, using=self.using
)
# do not do fast deletes
if self.fast_deletes:
raise RuntimeError("No fast_deletes should be present; "
"they are not safe for Versionables")
# update fields
for model, instances_for_fieldvalues in self.field_updates.items():
id_map = {}
for (field, value), instances in \
instances_for_fieldvalues.items():
if self.is_versionable(model):
# Do not set the foreign key to null, which can be the
# behaviour (depending on DB backend) for the default
# CASCADE on_delete method.
# In the case of a SET.. method, clone before
# changing the value (if it hasn't already been cloned)
updated_instances = set()
if not (isinstance(
field,
versions.fields.VersionedForeignKey) and
field.remote_field.on_delete == CASCADE):
for instance in instances:
# Clone before updating
cloned = id_map.get(instance.pk, None)
if not cloned:
cloned = instance.clone()
id_map[instance.pk] = cloned
updated_instances.add(cloned)
# TODO: instance should get updated with new
# values from clone ?
instances_for_fieldvalues[
(field, value)] = updated_instances
# Replace the instances with their clones in self.data, too
model_instances = self.data.get(model, {})
for index, instance in enumerate(model_instances):
cloned = id_map.get(instance.pk)
if cloned:
self.data[model][index] = cloned
query = sql.UpdateQuery(model)
for (field, value), instances in \
instances_for_fieldvalues.items():
if instances:
query.update_batch([obj.pk for obj in instances],
{field.name: value}, self.using)
# reverse instance collections
for instances in self.data.values():
instances.reverse()
# delete instances
for model, instances in self.data.items():
if self.is_versionable(model):
for instance in instances:
self.versionable_delete(instance, timestamp)
if not model._meta.auto_created:
# By default, no signal is sent when deleting a
# Versionable.
self.versionable_post_delete(instance, timestamp)
else:
query = sql.DeleteQuery(model)
pk_list = [obj.pk for obj in instances]
query.delete_batch(pk_list, self.using)
if not model._meta.auto_created:
for obj in instances:
signals.post_delete.send(
sender=model, instance=obj, using=self.using
)
# update collected instances
for model, instances_for_fieldvalues in self.field_updates.items():
for (field, value), instances in instances_for_fieldvalues.items():
for obj in instances:
setattr(obj, field.attname, value)
# Do not set Versionable object ids to None, since they still do have
# an id.
# Instead, set their version_end_date.
for model, instances in self.data.items():
is_versionable = self.is_versionable(model)
for instance in instances:
if is_versionable:
setattr(instance, 'version_end_date', timestamp)
else:
setattr(instance, model._meta.pk.attname, None)
def related_objects(self, related, objs):
"""
Gets a QuerySet of current objects related to ``objs`` via the
relation ``related``.
"""
from versions.models import Versionable
related_model = related.related_model
if issubclass(related_model, Versionable):
qs = related_model.objects.current
else:
qs = related_model._base_manager.all()
return qs.using(self.using).filter(
**{"%s__in" % related.field.name: objs}
)
def versionable_pre_delete(self, instance, timestamp):
"""
Override this method to implement custom behaviour. By default,
does nothing.
:param Versionable instance:
:param datetime timestamp:
"""
pass
def versionable_post_delete(self, instance, timestamp):
"""
Override this method to implement custom behaviour. By default,
does nothing.
:param Versionable instance:
:param datetime timestamp:
"""
pass
def versionable_delete(self, instance, timestamp):
"""
Soft-deletes the instance, setting it's version_end_date to timestamp.
Override this method to implement custom behaviour.
:param Versionable instance:
:param datetime timestamp:
"""
instance._delete_at(timestamp, using=self.using)
|
{"/versions_tests/tests/test_admin.py": ["/versions/admin.py", "/versions_tests/models.py"], "/versions_tests/tests/test_models.py": ["/versions/exceptions.py", "/versions/models.py", "/versions_tests/models.py"], "/versions/models.py": ["/versions/exceptions.py", "/versions/settings.py", "/versions/util/__init__.py", "/versions/fields.py"], "/versions/util/helper.py": ["/versions/models.py"], "/versions_tests/admin.py": ["/versions/admin.py", "/versions_tests/models.py"], "/versions_tests/tests/test_utils.py": ["/versions/util/postgresql.py", "/versions_tests/models.py"], "/versions/fields.py": ["/versions/descriptors.py", "/versions/models.py"], "/versions/descriptors.py": ["/versions/util/__init__.py", "/versions/models.py"], "/versions/deletion.py": ["/versions/models.py"], "/versions_tests/models.py": ["/versions/fields.py", "/versions/models.py"], "/versions_tests/apps.py": ["/versions/util/postgresql.py"], "/versions/util/postgresql.py": ["/versions/fields.py", "/versions/util/helper.py"]}
|
7,547
|
JoshLabs/cleanerversion
|
refs/heads/master
|
/versions_tests/models.py
|
# -*- coding: utf-8 -*-
from django.db.models import CharField, IntegerField, Model, ForeignKey, \
CASCADE
from django.db.models.deletion import DO_NOTHING, PROTECT, SET, SET_NULL
from django.utils.encoding import python_2_unicode_compatible
from versions.fields import VersionedManyToManyField, VersionedForeignKey
from versions.models import Versionable
def versionable_description(obj):
return "<" + str(obj.__class__.__name__) + " object: " + \
obj.name + " {valid: [" + obj.version_start_date.isoformat() + \
" | " + \
(obj.version_end_date.isoformat()
if obj.version_end_date else "None") + \
"], created: " + obj.version_birth_date.isoformat() + "}>"
############################################
# The following model is used for:
# - CreationTest
# - DeletionTest
# - CurrentVersionTest
# - VersionedQuerySetTest
# - VersionNavigationTest
# - HistoricObjectsHandling
class B(Versionable):
name = CharField(max_length=200)
__str__ = versionable_description
############################################
# Models for
# - DeletionHandlerTest
# - OneToManyTest
# - PrefetchingTest
# - VersionNavigationAsOfTest
# - VersionRestoreTest
# - DetachTest
# - DeferredFieldsTest
# – VersionedAdminTest
@python_2_unicode_compatible
class City(Versionable):
name = CharField(max_length=200)
__str__ = versionable_description
@python_2_unicode_compatible
class Team(Versionable):
name = CharField(max_length=200)
city = VersionedForeignKey(City, null=True, on_delete=CASCADE)
__str__ = versionable_description
@python_2_unicode_compatible
class Player(Versionable):
name = CharField(max_length=200)
team = VersionedForeignKey(Team, null=True, on_delete=CASCADE)
__str__ = versionable_description
class Award(Versionable):
name = CharField(max_length=200)
players = VersionedManyToManyField(Player, related_name='awards')
@python_2_unicode_compatible
class Mascot(Versionable):
name = CharField(max_length=200)
team = VersionedForeignKey(Team, null=False, on_delete=CASCADE)
__str__ = versionable_description
def default_team():
return Team.objects.current.get(name__startswith='default_team.')
@python_2_unicode_compatible
class Fan(Versionable):
name = CharField(max_length=200)
team = VersionedForeignKey(Team, null=False, on_delete=SET(default_team))
__str__ = versionable_description
@python_2_unicode_compatible
class RabidFan(Versionable):
name = CharField(max_length=200)
team = VersionedForeignKey(Team, null=True, on_delete=SET_NULL)
__str__ = versionable_description
@python_2_unicode_compatible
class WizardFan(Versionable):
name = CharField(max_length=200)
team = VersionedForeignKey(Team, null=True, on_delete=PROTECT)
__str__ = versionable_description
@python_2_unicode_compatible
class NonFan(Versionable):
name = CharField(max_length=200)
team = VersionedForeignKey(Team, null=False, on_delete=DO_NOTHING)
__str__ = versionable_description
############################################
# SelfOneToManyTest models
class Directory(Versionable):
name = CharField(max_length=100)
parent = VersionedForeignKey('self', null=True, on_delete=CASCADE)
# ############################################
# MultiM2MTest models
@python_2_unicode_compatible
class Professor(Versionable):
name = CharField(max_length=200)
address = CharField(max_length=200)
phone_number = CharField(max_length=200)
__str__ = versionable_description
@python_2_unicode_compatible
class Classroom(Versionable):
name = CharField(max_length=200)
building = CharField(max_length=200)
__str__ = versionable_description
@python_2_unicode_compatible
class Student(Versionable):
name = CharField(max_length=200)
professors = VersionedManyToManyField("Professor", related_name='students')
classrooms = VersionedManyToManyField("Classroom", related_name='students')
__str__ = versionable_description
############################################
# MultiM2MToSameTest models
@python_2_unicode_compatible
class Pupil(Versionable):
name = CharField(max_length=200)
phone_number = CharField(max_length=200)
language_teachers = VersionedManyToManyField(
'Teacher', related_name='language_students')
science_teachers = VersionedManyToManyField(
'Teacher', related_name='science_students')
__str__ = versionable_description
@python_2_unicode_compatible
class Teacher(Versionable):
name = CharField(max_length=200)
domain = CharField(max_length=200)
__str__ = versionable_description
############################################
# ManyToManyFilteringTest models
@python_2_unicode_compatible
class C1(Versionable):
name = CharField(max_length=50)
c2s = VersionedManyToManyField("C2", related_name='c1s')
__str__ = versionable_description
@python_2_unicode_compatible
class C2(Versionable):
name = CharField(max_length=50)
c3s = VersionedManyToManyField("C3", related_name='c2s')
__str__ = versionable_description
@python_2_unicode_compatible
class C3(Versionable):
name = CharField(max_length=50)
__str__ = versionable_description
############################################
# HistoricM2MOperationsTests models
@python_2_unicode_compatible
class Observer(Versionable):
name = CharField(max_length=200)
__str__ = versionable_description
@python_2_unicode_compatible
class Subject(Versionable):
name = CharField(max_length=200)
observers = VersionedManyToManyField('Observer', related_name='subjects')
__str__ = versionable_description
############################################
# VersionUniqueTests models
class ChainStore(Versionable):
subchain_id = IntegerField()
city = CharField(max_length=40)
name = CharField(max_length=40)
opening_hours = CharField(max_length=40)
door_frame_color = VersionedForeignKey('Color', on_delete=CASCADE)
door_color = VersionedForeignKey('Color', related_name='cs',
on_delete=CASCADE)
# There are lots of these chain stores. They follow these rules:
# - only one store with the same name and subchain_id can exist in a
# single city
# - no two stores can share the same door_frame_color and door_color
# Yea, well, they want to appeal to people who want to be different.
VERSION_UNIQUE = [['subchain_id', 'city', 'name'],
['door_frame_color', 'door_color']]
class Color(Versionable):
name = CharField(max_length=40)
############################################
# IntegrationNonVersionableModelsTests models
@python_2_unicode_compatible
class Wine(Model):
name = CharField(max_length=200)
vintage = IntegerField()
def __str__(self):
return "<" + str(self.__class__.__name__) + " object: " + str(
self.name) + " (" + str(self.vintage) + ")>"
@python_2_unicode_compatible
class WineDrinker(Versionable):
name = CharField(max_length=200)
glass_content = ForeignKey(Wine, related_name='drinkers', null=True,
on_delete=CASCADE)
__str__ = versionable_description
@python_2_unicode_compatible
class WineDrinkerHat(Model):
shape_choices = [('Sailor', 'Sailor'),
('Cloche', 'Cloche'),
('Cartwheel', 'Cartwheel'),
('Turban', 'Turban'),
('Breton', 'Breton'),
('Vagabond', 'Vagabond')]
color = CharField(max_length=40)
shape = CharField(max_length=200, choices=shape_choices, default='Sailor')
wearer = VersionedForeignKey(WineDrinker, related_name='hats', null=True,
on_delete=CASCADE)
def __str__(self):
return "<" + str(self.__class__.__name__) + " object: " + str(
self.shape) + " (" + str(self.color) + ")>"
############################################
# SelfReferencingManyToManyTest models
class Person(Versionable):
name = CharField(max_length=200)
children = VersionedManyToManyField('self', symmetrical=False,
related_name='parents')
|
{"/versions_tests/tests/test_admin.py": ["/versions/admin.py", "/versions_tests/models.py"], "/versions_tests/tests/test_models.py": ["/versions/exceptions.py", "/versions/models.py", "/versions_tests/models.py"], "/versions/models.py": ["/versions/exceptions.py", "/versions/settings.py", "/versions/util/__init__.py", "/versions/fields.py"], "/versions/util/helper.py": ["/versions/models.py"], "/versions_tests/admin.py": ["/versions/admin.py", "/versions_tests/models.py"], "/versions_tests/tests/test_utils.py": ["/versions/util/postgresql.py", "/versions_tests/models.py"], "/versions/fields.py": ["/versions/descriptors.py", "/versions/models.py"], "/versions/descriptors.py": ["/versions/util/__init__.py", "/versions/models.py"], "/versions/deletion.py": ["/versions/models.py"], "/versions_tests/models.py": ["/versions/fields.py", "/versions/models.py"], "/versions_tests/apps.py": ["/versions/util/postgresql.py"], "/versions/util/postgresql.py": ["/versions/fields.py", "/versions/util/helper.py"]}
|
7,548
|
JoshLabs/cleanerversion
|
refs/heads/master
|
/versions/settings.py
|
import importlib
from django import VERSION
from django.conf import settings as django_settings
_cache = {}
class VersionsSettings(object):
"""
Gets a setting from django.conf.settings if set, otherwise from the
defaults defined in this class.
A magic accessor is used instead of just defining module-level variables
because Django doesn't like attributes of the django.conf.settings object
to be accessed in module scope.
"""
defaults = {
'VERSIONED_DELETE_COLLECTOR': 'versions.deletion.VersionedCollector',
'VERSIONS_USE_UUIDFIELD': VERSION[:3] >= (1, 8, 3),
}
def __getattr__(self, name):
try:
return getattr(django_settings, name)
except AttributeError:
try:
return self.defaults[name]
except KeyError:
raise AttributeError(
"{} object has no attribute {}".format(self.__class__,
name))
settings = VersionsSettings()
def import_from_string(val, setting_name):
"""
Attempt to import a class from a string representation.
Based on the method of the same name in Django Rest Framework.
"""
try:
parts = val.split('.')
module_path, class_name = '.'.join(parts[:-1]), parts[-1]
module = importlib.import_module(module_path)
return getattr(module, class_name)
except ImportError as e:
raise ImportError("Could not import '{}' for CleanerVersion "
"setting '{}'. {}: {}.".format((val,
setting_name,
e.__class__.__name__,
e)))
def get_versioned_delete_collector_class():
"""
Gets the class to use for deletion collection.
:return: class
"""
key = 'VERSIONED_DELETE_COLLECTOR'
try:
cls = _cache[key]
except KeyError:
collector_class_string = getattr(settings, key)
cls = import_from_string(collector_class_string, key)
_cache[key] = cls
return cls
|
{"/versions_tests/tests/test_admin.py": ["/versions/admin.py", "/versions_tests/models.py"], "/versions_tests/tests/test_models.py": ["/versions/exceptions.py", "/versions/models.py", "/versions_tests/models.py"], "/versions/models.py": ["/versions/exceptions.py", "/versions/settings.py", "/versions/util/__init__.py", "/versions/fields.py"], "/versions/util/helper.py": ["/versions/models.py"], "/versions_tests/admin.py": ["/versions/admin.py", "/versions_tests/models.py"], "/versions_tests/tests/test_utils.py": ["/versions/util/postgresql.py", "/versions_tests/models.py"], "/versions/fields.py": ["/versions/descriptors.py", "/versions/models.py"], "/versions/descriptors.py": ["/versions/util/__init__.py", "/versions/models.py"], "/versions/deletion.py": ["/versions/models.py"], "/versions_tests/models.py": ["/versions/fields.py", "/versions/models.py"], "/versions_tests/apps.py": ["/versions/util/postgresql.py"], "/versions/util/postgresql.py": ["/versions/fields.py", "/versions/util/helper.py"]}
|
7,549
|
JoshLabs/cleanerversion
|
refs/heads/master
|
/versions_tests/apps.py
|
from django.apps import AppConfig
from django.db import connection
from django.db.models.signals import post_migrate
def index_adjustments(sender, using=None, **kwargs):
"""
Remove -like indexes (varchar_pattern_ops) on UUID fields and create
version-unique indexes for models that have a VERSION_UNIQUE attribute.
:param AppConfig sender:
:param str sender: database alias
:param kwargs:
"""
from versions.util.postgresql import (
remove_uuid_id_like_indexes,
create_current_version_unique_indexes,
create_current_version_unique_identity_indexes
)
remove_uuid_id_like_indexes(sender.name, using)
create_current_version_unique_indexes(sender.name, using)
create_current_version_unique_identity_indexes(sender.name, using)
class VersionsTestsConfig(AppConfig):
name = 'versions_tests'
verbose_name = "Versions Tests default application configuration"
def ready(self):
"""
For postgresql only, remove like indexes for uuid columns and
create version-unique indexes.
This will only be run in django >= 1.7.
:return: None
"""
if connection.vendor == 'postgresql':
post_migrate.connect(index_adjustments, sender=self)
|
{"/versions_tests/tests/test_admin.py": ["/versions/admin.py", "/versions_tests/models.py"], "/versions_tests/tests/test_models.py": ["/versions/exceptions.py", "/versions/models.py", "/versions_tests/models.py"], "/versions/models.py": ["/versions/exceptions.py", "/versions/settings.py", "/versions/util/__init__.py", "/versions/fields.py"], "/versions/util/helper.py": ["/versions/models.py"], "/versions_tests/admin.py": ["/versions/admin.py", "/versions_tests/models.py"], "/versions_tests/tests/test_utils.py": ["/versions/util/postgresql.py", "/versions_tests/models.py"], "/versions/fields.py": ["/versions/descriptors.py", "/versions/models.py"], "/versions/descriptors.py": ["/versions/util/__init__.py", "/versions/models.py"], "/versions/deletion.py": ["/versions/models.py"], "/versions_tests/models.py": ["/versions/fields.py", "/versions/models.py"], "/versions_tests/apps.py": ["/versions/util/postgresql.py"], "/versions/util/postgresql.py": ["/versions/fields.py", "/versions/util/helper.py"]}
|
7,550
|
JoshLabs/cleanerversion
|
refs/heads/master
|
/versions/util/postgresql.py
|
from __future__ import absolute_import
from django.db import connection as default_connection
from versions.fields import VersionedForeignKey
from .helper import database_connection, versionable_models
def index_exists(cursor, index_name):
"""
Checks if an index with the given name exists in the database
:param cursor: database connection cursor
:param index_name: string
:return: boolean
"""
cursor.execute("SELECT COUNT(1) FROM pg_indexes WHERE indexname = %s",
[index_name])
return cursor.fetchone()[0] > 0
def remove_uuid_id_like_indexes(app_name, database=None):
"""
Remove all of varchar_pattern_ops indexes that django created for uuid
columns.
A search is never done with a filter of the style (uuid__like='1ae3c%'), so
all such indexes can be removed from Versionable models.
This will only try to remove indexes if they exist in the database, so it
should be safe to run in a post_migrate signal handler. Running it several
times should leave the database in the same state as running it once.
:param str app_name: application name whose Versionable models will be
acted on.
:param str database: database alias to use. If None, use default
connection.
:return: number of indexes removed
:rtype: int
"""
removed_indexes = 0
with database_connection(database).cursor() as cursor:
for model in versionable_models(app_name, include_auto_created=True):
indexes = select_uuid_like_indexes_on_table(model, cursor)
if indexes:
index_list = ','.join(['"%s"' % r[0] for r in indexes])
cursor.execute("DROP INDEX %s" % index_list)
removed_indexes += len(indexes)
return removed_indexes
def get_uuid_like_indexes_on_table(model):
"""
Gets a list of database index names for the given model for the
uuid-containing fields that have had a like-index created on them.
:param model: Django model
:return: list of database rows; the first field of each row is an index
name
"""
with default_connection.cursor() as c:
indexes = select_uuid_like_indexes_on_table(model, c)
return indexes
def select_uuid_like_indexes_on_table(model, cursor):
"""
Gets a list of database index names for the given model for the
uuid-containing fields that have had a like-index created on them.
:param model: Django model
:param cursor: database connection cursor
:return: list of database rows; the first field of each row is an index
name
"""
# VersionedForeignKey fields as well as the id fields have these useless
# like indexes
field_names = ["'%s'" % f.column for f in model._meta.fields if
isinstance(f, VersionedForeignKey)]
field_names.append("'id'")
sql = """
select i.relname as index_name
from pg_class t,
pg_class i,
pg_index ix,
pg_attribute a
where t.oid = ix.indrelid
and i.oid = ix.indexrelid
and a.attrelid = t.oid
and a.attnum = ANY(ix.indkey)
and t.relkind = 'r'
and t.relname = '{0}'
and a.attname in ({1})
and i.relname like '%_like'
""".format(model._meta.db_table, ','.join(field_names))
cursor.execute(sql)
return cursor.fetchall()
def create_current_version_unique_indexes(app_name, database=None):
"""
Add unique indexes for models which have a VERSION_UNIQUE attribute.
These must be defined as partially unique indexes, which django
does not support.
The unique indexes are defined so that no two *current* versions can have
the same value.
This will only try to create indexes if they do not exist in the database,
so it should be safe to run in a post_migrate signal handler. Running it
several times should leave the database in the same state as running it
once.
:param str app_name: application name whose Versionable models will be
acted on.
:param str database: database alias to use. If None, use default
connection.
:return: number of partial unique indexes created
:rtype: int
"""
indexes_created = 0
connection = database_connection(database)
with connection.cursor() as cursor:
for model in versionable_models(app_name):
unique_field_groups = getattr(model, 'VERSION_UNIQUE', None)
if not unique_field_groups:
continue
table_name = model._meta.db_table
for group in unique_field_groups:
col_prefixes = []
columns = []
for field in group:
column = model._meta.get_field(field).column
col_prefixes.append(column[0:3])
columns.append(column)
index_name = '%s_%s_%s_v_uniq' % (
app_name, table_name, '_'.join(col_prefixes))
if not index_exists(cursor, index_name):
cursor.execute(
"CREATE UNIQUE INDEX %s ON %s(%s) "
"WHERE version_end_date IS NULL"
% (index_name, table_name, ','.join(columns)))
indexes_created += 1
return indexes_created
def create_current_version_unique_identity_indexes(app_name, database=None):
"""
Add partial unique indexes for the the identity column of versionable
models.
This enforces that no two *current* versions can have the same identity.
This will only try to create indexes if they do not exist in the database,
so it should be safe to run in a post_migrate signal handler. Running it
several times should leave the database in the same state as running it
once.
:param str app_name: application name whose Versionable models will be
acted on.
:param str database: database alias to use. If None, use default
connection.
:return: number of partial unique indexes created
:rtype: int
"""
indexes_created = 0
connection = database_connection(database)
with connection.cursor() as cursor:
for model in versionable_models(app_name):
if getattr(model._meta, 'managed', True):
table_name = model._meta.db_table
index_name = '%s_%s_identity_v_uniq' % (app_name, table_name)
if not index_exists(cursor, index_name):
cursor.execute(
"CREATE UNIQUE INDEX %s ON %s(%s) "
"WHERE version_end_date IS NULL"
% (index_name, table_name, 'identity'))
indexes_created += 1
return indexes_created
|
{"/versions_tests/tests/test_admin.py": ["/versions/admin.py", "/versions_tests/models.py"], "/versions_tests/tests/test_models.py": ["/versions/exceptions.py", "/versions/models.py", "/versions_tests/models.py"], "/versions/models.py": ["/versions/exceptions.py", "/versions/settings.py", "/versions/util/__init__.py", "/versions/fields.py"], "/versions/util/helper.py": ["/versions/models.py"], "/versions_tests/admin.py": ["/versions/admin.py", "/versions_tests/models.py"], "/versions_tests/tests/test_utils.py": ["/versions/util/postgresql.py", "/versions_tests/models.py"], "/versions/fields.py": ["/versions/descriptors.py", "/versions/models.py"], "/versions/descriptors.py": ["/versions/util/__init__.py", "/versions/models.py"], "/versions/deletion.py": ["/versions/models.py"], "/versions_tests/models.py": ["/versions/fields.py", "/versions/models.py"], "/versions_tests/apps.py": ["/versions/util/postgresql.py"], "/versions/util/postgresql.py": ["/versions/fields.py", "/versions/util/helper.py"]}
|
7,553
|
pfaion/pybeautymaps
|
refs/heads/master
|
/tests/test_utils.py
|
import pytest
from pybeautymaps import utils
VALID_LATLON = (37.030347, -93.473126)
VALID_SIZE = 1.2
def test_bbox_from_centered_raise_negative_size():
with pytest.raises(ValueError):
utils.bbox_from_centered(VALID_LATLON, -1)
def test_bbox_from_centered_raise_wrong_latlon():
with pytest.raises(ValueError):
utils.bbox_from_centered((-100, 0), VALID_SIZE)
with pytest.raises(ValueError):
utils.bbox_from_centered((100, 0), VALID_SIZE)
with pytest.raises(ValueError):
utils.bbox_from_centered((0, -200), VALID_SIZE)
with pytest.raises(ValueError):
utils.bbox_from_centered((0, 200), VALID_SIZE)
|
{"/tests/test_utils.py": ["/pybeautymaps/__init__.py"], "/pybeautymaps/__init__.py": ["/pybeautymaps/beautymap.py"], "/pybeautymaps/beautymap.py": ["/pybeautymaps/__init__.py"], "/tests/test_beautymap.py": ["/pybeautymaps/__init__.py"], "/examples/paris.py": ["/pybeautymaps/__init__.py"], "/examples/manhattan.py": ["/pybeautymaps/__init__.py"]}
|
7,554
|
pfaion/pybeautymaps
|
refs/heads/master
|
/setup.py
|
from os import path
from setuptools import setup, find_packages
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
install_requires = [
'numpy',
'overpy',
'pycairo',
'pyproj',
]
setup_requires = [
'pytest-runner',
]
tests_require = [
'pytest',
# coverage 5.* has SQLite output and is not compatible with coveralls
# pytest-cov will automatically install coverage 5.* though!
'coverage==4.*',
'pytest-cov',
]
setup(
name="pybeautymaps",
version="0",
author="Patrick Faion",
description="Beautiful images of street maps made with python.",
long_description=long_description,
long_description_content_type='text/markdown',
keywords="art beautiful maps street-maps openstreetmaps",
urls="https://github.com/pfaion/pybeautymaps",
packages=find_packages(),
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=tests_require,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Information Technology',
'Intended Audience :: Other Audience',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Artistic Software',
'Topic :: Multimedia :: Graphics',
'Topic :: Scientific/Engineering :: Visualization',
],
)
|
{"/tests/test_utils.py": ["/pybeautymaps/__init__.py"], "/pybeautymaps/__init__.py": ["/pybeautymaps/beautymap.py"], "/pybeautymaps/beautymap.py": ["/pybeautymaps/__init__.py"], "/tests/test_beautymap.py": ["/pybeautymaps/__init__.py"], "/examples/paris.py": ["/pybeautymaps/__init__.py"], "/examples/manhattan.py": ["/pybeautymaps/__init__.py"]}
|
7,555
|
pfaion/pybeautymaps
|
refs/heads/master
|
/pybeautymaps/__init__.py
|
from .beautymap import Beautymap
|
{"/tests/test_utils.py": ["/pybeautymaps/__init__.py"], "/pybeautymaps/__init__.py": ["/pybeautymaps/beautymap.py"], "/pybeautymaps/beautymap.py": ["/pybeautymaps/__init__.py"], "/tests/test_beautymap.py": ["/pybeautymaps/__init__.py"], "/examples/paris.py": ["/pybeautymaps/__init__.py"], "/examples/manhattan.py": ["/pybeautymaps/__init__.py"]}
|
7,556
|
pfaion/pybeautymaps
|
refs/heads/master
|
/pybeautymaps/utils.py
|
import math
import numpy as np
from pyproj import Proj
def is_valid_latitude(lat):
return -90 <= lat <= 90
def is_valid_longitude(lon):
return -180 <= lon <= 180
def bbox_from_centered(center_latlon, width):
if width <= 0:
raise ValueError(f'Bounding box width must be positive! Is: {width}')
lat, lon = center_latlon
if not is_valid_latitude(lat):
raise ValueError(f'Latitude needs to be in [-90, 90]! Is: {lat}')
if not is_valid_longitude(lon):
raise ValueError(f'Longitude needs to be in [-180, 180]! Is: {lon}')
# quick and dirty conversion of cathographic to geodetic distances
# see: https://gis.stackexchange.com/a/2964
# TODO: use pyproj for this as well!
delta_lat = width / 111.111
delta_lon = abs(width / (111.111 * math.cos(lat)))
bbox = (lat - delta_lat, lon - delta_lon, lat + delta_lat, lon + delta_lon)
return bbox
def carthographic_from_geodetic(*latlons):
# EPSG.3857 projection https://epsg.io/3857
# Pseudo-Mercator as used by Google Maps and Open Street Maps
proj = Proj(3857)
return [
# projector works with separate arrays of longs and lats (!)
np.vstack(proj(coordinates[:, 1], coordinates[:, 0])).T
for coordinates in latlons
]
|
{"/tests/test_utils.py": ["/pybeautymaps/__init__.py"], "/pybeautymaps/__init__.py": ["/pybeautymaps/beautymap.py"], "/pybeautymaps/beautymap.py": ["/pybeautymaps/__init__.py"], "/tests/test_beautymap.py": ["/pybeautymaps/__init__.py"], "/examples/paris.py": ["/pybeautymaps/__init__.py"], "/examples/manhattan.py": ["/pybeautymaps/__init__.py"]}
|
7,557
|
pfaion/pybeautymaps
|
refs/heads/master
|
/examples/tests/test_examples.py
|
from examples import (
manhattan,
paris,
)
def test_manhattan():
manhattan.main()
def test_paris():
paris.main()
|
{"/tests/test_utils.py": ["/pybeautymaps/__init__.py"], "/pybeautymaps/__init__.py": ["/pybeautymaps/beautymap.py"], "/pybeautymaps/beautymap.py": ["/pybeautymaps/__init__.py"], "/tests/test_beautymap.py": ["/pybeautymaps/__init__.py"], "/examples/paris.py": ["/pybeautymaps/__init__.py"], "/examples/manhattan.py": ["/pybeautymaps/__init__.py"]}
|
7,558
|
pfaion/pybeautymaps
|
refs/heads/master
|
/pybeautymaps/beautymap.py
|
import cairo
import numpy as np
import overpy
from . import utils
class Beautymap:
@classmethod
def square_centered(cls, center_latlon, width):
bbox = utils.bbox_from_centered(center_latlon, width)
return cls(bbox)
def __init__(self, bbox):
self.bbox = bbox
bbox_data = np.array(self.bbox).reshape((2, 2))
self.carthographic_bbox = utils.carthographic_from_geodetic(bbox_data)[0]
self.road_types = {
'motorway',
'trunk',
'primary',
'secondary',
'tertiary',
'residential',
'living_street',
}
self.raw_overpass_data = self.get_overpass_data()
self.road_data = [
way.tags.get('highway', '')
for way in self.raw_overpass_data
]
self.geodetic_data = [
np.array([(node.lat, node.lon) for node in way.nodes], dtype=float)
for way in self.raw_overpass_data
]
self.carthographic_data = utils.carthographic_from_geodetic(*self.geodetic_data)
def get_overpass_data(self):
self.overpass_ql_query = f"""
(
way
// filter road types with OR regex
["highway"~"^{'|'.join(self.road_types)}$"]
{str(self.bbox)};
>;
);
out;
"""
return overpy.Overpass().query(self.overpass_ql_query).ways
def render_square_png(self, filename, size, padding, line_widths=dict()):
coord_min = self.carthographic_bbox[0, :]
coord_max = self.carthographic_bbox[1, :]
coord_range = coord_max - coord_min
px_per_coord = (size - 2 * padding) / coord_range.min()
# offsets for non-square shaped bounding boxes
offset = (coord_range - coord_range.min()) / 2
with cairo.ImageSurface(cairo.FORMAT_ARGB32, size, size) as surface:
ctx = cairo.Context(surface)
ctx.scale(1, 1)
# white background
ctx.rectangle(0, 0, size, size)
ctx.set_source_rgb(1, 1, 1)
ctx.fill()
ctx.set_source_rgb(0, 0, 0)
ctx.set_line_cap(cairo.LINE_CAP_ROUND)
for way, road_type in zip(self.carthographic_data, self.road_data):
ctx.set_line_width(line_widths.get(road_type, 1))
way_zeroed = (way - coord_min - offset) * px_per_coord + padding
way_zeroed = np.rint(way_zeroed).astype(int)
x, y = way_zeroed[0, :]
ctx.move_to(x, size - y)
for x, y in way_zeroed[1:]:
ctx.line_to(x, size - y)
ctx.stroke()
# padding
ctx.set_source_rgb(1, 1, 1)
padding_rects = [
(0, 0, size, padding),
(0, 0, padding, size),
(size - padding, 0, padding, size),
(0, size - padding, size, padding),
]
for rect in padding_rects:
ctx.rectangle(*rect)
ctx.fill()
surface.write_to_png(filename)
if __name__ == "__main__":
m = Beautymap.square_centered((40.757667, -73.983715), 8.0)
m.render_square_png(
filename='test.png',
size=2000,
padding=50,
line_widths={
'trunk': 5,
'primary': 4,
'secondary': 3,
'tertiary': 2,
}
)
|
{"/tests/test_utils.py": ["/pybeautymaps/__init__.py"], "/pybeautymaps/__init__.py": ["/pybeautymaps/beautymap.py"], "/pybeautymaps/beautymap.py": ["/pybeautymaps/__init__.py"], "/tests/test_beautymap.py": ["/pybeautymaps/__init__.py"], "/examples/paris.py": ["/pybeautymaps/__init__.py"], "/examples/manhattan.py": ["/pybeautymaps/__init__.py"]}
|
7,559
|
pfaion/pybeautymaps
|
refs/heads/master
|
/tests/test_beautymap.py
|
from pathlib import Path
import pybeautymaps as pbm
def test_beautymap_general_workflow(tmp_path):
file_path: Path = tmp_path / 'test.png'
assert not file_path.exists()
line_widths = dict(
trunk=5,
primary=4,
secondary=3,
tertiary=2,
)
m = pbm.Beautymap.square_centered(center_latlon=(37.030347, -93.473126), width=1.2)
m.render_square_png(file_path, size=1000, padding=50, line_widths=line_widths)
assert file_path.exists()
|
{"/tests/test_utils.py": ["/pybeautymaps/__init__.py"], "/pybeautymaps/__init__.py": ["/pybeautymaps/beautymap.py"], "/pybeautymaps/beautymap.py": ["/pybeautymaps/__init__.py"], "/tests/test_beautymap.py": ["/pybeautymaps/__init__.py"], "/examples/paris.py": ["/pybeautymaps/__init__.py"], "/examples/manhattan.py": ["/pybeautymaps/__init__.py"]}
|
7,560
|
pfaion/pybeautymaps
|
refs/heads/master
|
/examples/paris.py
|
from pybeautymaps import Beautymap
def main():
m = Beautymap.square_centered(center_latlon=(48.873768, 2.295046), width=4.0)
m.render_square_png(
filename='paris.png',
size=2000,
padding=50,
line_widths={
'trunk': 5,
'primary': 4,
'secondary': 3,
'tertiary': 2,
}
)
if __name__ == "__main__":
main()
|
{"/tests/test_utils.py": ["/pybeautymaps/__init__.py"], "/pybeautymaps/__init__.py": ["/pybeautymaps/beautymap.py"], "/pybeautymaps/beautymap.py": ["/pybeautymaps/__init__.py"], "/tests/test_beautymap.py": ["/pybeautymaps/__init__.py"], "/examples/paris.py": ["/pybeautymaps/__init__.py"], "/examples/manhattan.py": ["/pybeautymaps/__init__.py"]}
|
7,561
|
pfaion/pybeautymaps
|
refs/heads/master
|
/examples/manhattan.py
|
from pybeautymaps import Beautymap
def main():
m = Beautymap.square_centered(center_latlon=(40.757667, -73.983715), width=8.0)
m.render_square_png(
filename='manhattan.png',
size=2000,
padding=50,
line_widths={
'trunk': 5,
'primary': 4,
'secondary': 3,
'tertiary': 2,
}
)
if __name__ == "__main__":
main()
|
{"/tests/test_utils.py": ["/pybeautymaps/__init__.py"], "/pybeautymaps/__init__.py": ["/pybeautymaps/beautymap.py"], "/pybeautymaps/beautymap.py": ["/pybeautymaps/__init__.py"], "/tests/test_beautymap.py": ["/pybeautymaps/__init__.py"], "/examples/paris.py": ["/pybeautymaps/__init__.py"], "/examples/manhattan.py": ["/pybeautymaps/__init__.py"]}
|
7,562
|
plai-group/relationPrediction
|
refs/heads/master
|
/new_main.py
|
import torch
from models import GAT, ConvDecoder
from main import parse_args
args = parse_args()
gat = GAT(
seed=args.seed,
nn_args=dict(args=GAT.adapt_args(True, args)),
optim_args=dict(),
)
gat.set_save_valid_conditions('save', 'every', 600, 'epochs')
gat.train_n_epochs(args.epochs_gat)
gat.load_checkpoint(max_epochs=args.epochs_gat) # line should be unnecessary once using latest ptutils
conv = ConvDecoder(
seed=args.seed,
nn_args=dict(
args=ConvDecoder.adapt_args(False, args),
entity_embeddings=gat.final_entity_embeddings,
relation_embeddings=gat.final_relation_embeddings,
),
optim_args=dict(),
extra_things_to_use_in_hash=gat.get_path(gat.epochs),
)
conv.set_save_valid_conditions('save', 'every', 10, 'epochs')
conv.train_n_epochs(args.epochs_conv)
# fuck it
conv.conv.n_samples = 5
conv.eval()
with torch.no_grad():
corpus = conv.train_loader.corpus.corpus
corpus.get_auroc( # get_validation_pred(
args, conv.conv, corpus.unique_entities_train
)
|
{"/new_main.py": ["/models.py"], "/models.py": ["/create_batch.py"]}
|
7,563
|
plai-group/relationPrediction
|
refs/heads/master
|
/create_batch.py
|
import torch
from torch.autograd import Variable
import numpy as np
from collections import defaultdict
import time
import queue
import random
import math
import pickle
import os
from sklearn import metrics
from preprocess import init_embedding, build_data
# begin my additions ----------------------------------------------------------------
class CorpusDataset(torch.utils.data.Dataset):
def __init__(self, args):
self.args = args
self.directory = args.data
# avoid temptation to refactor below, it is not worth it ;(
train_data, validation_data, test_data, entity2id, relation2id, headTailSelector, unique_entities_train = build_data(args.data, is_unweigted=False, directed=True)
self.corpus = Corpus(args, train_data, validation_data, test_data,
entity2id, relation2id, headTailSelector,
args.batch_size, args.valid_invalid_ratio,
unique_entities_train, args.get_2hop)
def get_pretrained_embs(self):
if self.args.pretrained_emb:
def get_emb_from_file(fname):
fpath = os.path.join(self.directory, fname)
return torch.FloatTensor(init_embedding(fpath))
initial_entity_embeddings = get_emb_from_file('entity2vec.txt')
initial_relation_embeddings = get_emb_from_file('relation2vec.txt')
print("Initialised relations and entities from TransE")
else:
raise NotImplementedError('Random initialisation not implemented.')
return initial_entity_embeddings, initial_relation_embeddings
def get_current_batch_2hop_indices(self):
# no idea why this is called `current_batch`
if self.args.use_2hop:
print("Opening node_neighbors pickle object.")
file = self.directory + "/2hop.pickle"
with open(file, 'rb') as handle:
node_neighbors_2hop = pickle.load(handle)
nbours = self.corpus.get_batch_nhop_neighbors_all(
self.args,
self.corpus.unique_entities_train,
node_neighbors_2hop,
)
return Variable(torch.LongTensor(nbours))
@property
def train_adj_matrix(self):
return self.corpus.train_adj_matrix
class TrainLoader():
def __init__(self, corpus):
self.corpus = corpus
def __len__(self):
args = self.corpus.args
train_indices = self.corpus.corpus.train_indices
# elaborate ceiling division lol
if len(train_indices) % args.batch_size == 0:
num_iters_per_epoch = len(
train_indices) // args.batch_size
else:
num_iters_per_epoch = (
len(train_indices) // args.batch_size) + 1
# normal ceiling division
equivalent_num_iters_per_epoch = math.ceil(len(train_indices) / args.batch_size)
return num_iters_per_epoch
def __iter__(self):
shuffle = random.shuffle(self.corpus.corpus.train_triples)
for iters in range(len(self)):
indices, values = self.corpus.corpus.get_iteration_batch(iters)
yield (torch.LongTensor(indices), torch.FloatTensor(values))
def get_loaders(corpus):
return TrainLoader(corpus), None, None
# end my additions ------------------------------------------------------------------
class Corpus:
def __init__(self, args, train_data, validation_data, test_data, entity2id,
relation2id, headTailSelector, batch_size, valid_to_invalid_samples_ratio, unique_entities_train, get_2hop=False):
self.train_triples = train_data[0]
# Converting to sparse tensor
adj_indices = torch.LongTensor(
[train_data[1][0], train_data[1][1]]) # rows and columns
adj_values = torch.LongTensor(train_data[1][2])
self.train_adj_matrix = (adj_indices, adj_values)
# adjacency matrix is needed for train_data only, as GAT is trained for
# training data
self.validation_triples = validation_data[0]
self.test_triples = test_data[0]
self.headTailSelector = headTailSelector # for selecting random entities
self.entity2id = entity2id
self.id2entity = {v: k for k, v in self.entity2id.items()}
self.relation2id = relation2id
self.id2relation = {v: k for k, v in self.relation2id.items()}
self.batch_size = batch_size
# ratio of valid to invalid samples per batch for training ConvKB Model
self.invalid_valid_ratio = int(valid_to_invalid_samples_ratio)
if(get_2hop):
self.graph = self.get_graph()
self.node_neighbors_2hop = self.get_further_neighbors()
self.unique_entities_train = [self.entity2id[i]
for i in unique_entities_train]
self.train_indices = np.array(
list(self.train_triples)).astype(np.int32)
# These are valid triples, hence all have value 1
self.train_values = np.array(
[[1]] * len(self.train_triples)).astype(np.float32)
self.validation_indices = np.array(
list(self.validation_triples)).astype(np.int32)
self.validation_values = np.array(
[[1]] * len(self.validation_triples)).astype(np.float32)
self.test_indices = np.array(list(self.test_triples)).astype(np.int32)
self.test_values = np.array(
[[1]] * len(self.test_triples)).astype(np.float32)
self.valid_triples_dict = {j: i for i, j in enumerate(
self.train_triples + self.validation_triples + self.test_triples)}
print("Total triples count {}, training triples {}, validation_triples {}, test_triples {}".format(len(self.valid_triples_dict), len(self.train_indices),
len(self.validation_indices), len(self.test_indices)))
# For training purpose
self.batch_indices = np.empty(
(self.batch_size * (self.invalid_valid_ratio + 1), 3)).astype(np.int32)
self.batch_values = np.empty(
(self.batch_size * (self.invalid_valid_ratio + 1), 1)).astype(np.float32)
def get_iteration_batch(self, iter_num):
if (iter_num + 1) * self.batch_size <= len(self.train_indices):
self.batch_indices = np.empty(
(self.batch_size * (self.invalid_valid_ratio + 1), 3)).astype(np.int32)
self.batch_values = np.empty(
(self.batch_size * (self.invalid_valid_ratio + 1), 1)).astype(np.float32)
indices = range(self.batch_size * iter_num,
self.batch_size * (iter_num + 1))
self.batch_indices[:self.batch_size,
:] = self.train_indices[indices, :]
self.batch_values[:self.batch_size,
:] = self.train_values[indices, :]
last_index = self.batch_size
if self.invalid_valid_ratio > 0:
random_entities = np.random.randint(
0, len(self.entity2id), last_index * self.invalid_valid_ratio)
# Precopying the same valid indices from 0 to batch_size to rest
# of the indices
self.batch_indices[last_index:(last_index * (self.invalid_valid_ratio + 1)), :] = np.tile(
self.batch_indices[:last_index, :], (self.invalid_valid_ratio, 1))
self.batch_values[last_index:(last_index * (self.invalid_valid_ratio + 1)), :] = np.tile(
self.batch_values[:last_index, :], (self.invalid_valid_ratio, 1))
for i in range(last_index):
for j in range(self.invalid_valid_ratio // 2):
current_index = i * (self.invalid_valid_ratio // 2) + j
while (random_entities[current_index], self.batch_indices[last_index + current_index, 1],
self.batch_indices[last_index + current_index, 2]) in self.valid_triples_dict.keys():
random_entities[current_index] = np.random.randint(
0, len(self.entity2id))
self.batch_indices[last_index + current_index,
0] = random_entities[current_index]
self.batch_values[last_index + current_index, :] = [-1]
for j in range(self.invalid_valid_ratio // 2):
current_index = last_index * \
(self.invalid_valid_ratio // 2) + \
(i * (self.invalid_valid_ratio // 2) + j)
while (self.batch_indices[last_index + current_index, 0], self.batch_indices[last_index + current_index, 1],
random_entities[current_index]) in self.valid_triples_dict.keys():
random_entities[current_index] = np.random.randint(
0, len(self.entity2id))
self.batch_indices[last_index + current_index,
2] = random_entities[current_index]
self.batch_values[last_index + current_index, :] = [-1]
return self.batch_indices, self.batch_values
return self.batch_indices, self.batch_values
else:
last_iter_size = len(self.train_indices) - \
self.batch_size * iter_num
self.batch_indices = np.empty(
(last_iter_size * (self.invalid_valid_ratio + 1), 3)).astype(np.int32)
self.batch_values = np.empty(
(last_iter_size * (self.invalid_valid_ratio + 1), 1)).astype(np.float32)
indices = range(self.batch_size * iter_num,
len(self.train_indices))
self.batch_indices[:last_iter_size,
:] = self.train_indices[indices, :]
self.batch_values[:last_iter_size,
:] = self.train_values[indices, :]
last_index = last_iter_size
if self.invalid_valid_ratio > 0:
random_entities = np.random.randint(
0, len(self.entity2id), last_index * self.invalid_valid_ratio)
# Precopying the same valid indices from 0 to batch_size to rest
# of the indices
self.batch_indices[last_index:(last_index * (self.invalid_valid_ratio + 1)), :] = np.tile(
self.batch_indices[:last_index, :], (self.invalid_valid_ratio, 1))
self.batch_values[last_index:(last_index * (self.invalid_valid_ratio + 1)), :] = np.tile(
self.batch_values[:last_index, :], (self.invalid_valid_ratio, 1))
for i in range(last_index):
for j in range(self.invalid_valid_ratio // 2):
current_index = i * (self.invalid_valid_ratio // 2) + j
while (random_entities[current_index], self.batch_indices[last_index + current_index, 1],
self.batch_indices[last_index + current_index, 2]) in self.valid_triples_dict.keys():
random_entities[current_index] = np.random.randint(
0, len(self.entity2id))
self.batch_indices[last_index + current_index,
0] = random_entities[current_index]
self.batch_values[last_index + current_index, :] = [-1]
for j in range(self.invalid_valid_ratio // 2):
current_index = last_index * \
(self.invalid_valid_ratio // 2) + \
(i * (self.invalid_valid_ratio // 2) + j)
while (self.batch_indices[last_index + current_index, 0], self.batch_indices[last_index + current_index, 1],
random_entities[current_index]) in self.valid_triples_dict.keys():
random_entities[current_index] = np.random.randint(
0, len(self.entity2id))
self.batch_indices[last_index + current_index,
2] = random_entities[current_index]
self.batch_values[last_index + current_index, :] = [-1]
return self.batch_indices, self.batch_values
return self.batch_indices, self.batch_values
def get_iteration_batch_nhop(self, current_batch_indices, node_neighbors, batch_size):
self.batch_indices = np.empty(
(batch_size * (self.invalid_valid_ratio + 1), 4)).astype(np.int32)
self.batch_values = np.empty(
(batch_size * (self.invalid_valid_ratio + 1), 1)).astype(np.float32)
indices = random.sample(range(len(current_batch_indices)), batch_size)
self.batch_indices[:batch_size,
:] = current_batch_indices[indices, :]
self.batch_values[:batch_size,
:] = np.ones((batch_size, 1))
last_index = batch_size
if self.invalid_valid_ratio > 0:
random_entities = np.random.randint(
0, len(self.entity2id), last_index * self.invalid_valid_ratio)
# Precopying the same valid indices from 0 to batch_size to rest
# of the indices
self.batch_indices[last_index:(last_index * (self.invalid_valid_ratio + 1)), :] = np.tile(
self.batch_indices[:last_index, :], (self.invalid_valid_ratio, 1))
self.batch_values[last_index:(last_index * (self.invalid_valid_ratio + 1)), :] = np.tile(
self.batch_values[:last_index, :], (self.invalid_valid_ratio, 1))
for i in range(last_index):
for j in range(self.invalid_valid_ratio // 2):
current_index = i * (self.invalid_valid_ratio // 2) + j
self.batch_indices[last_index + current_index,
0] = random_entities[current_index]
self.batch_values[last_index + current_index, :] = [0]
for j in range(self.invalid_valid_ratio // 2):
current_index = last_index * \
(self.invalid_valid_ratio // 2) + \
(i * (self.invalid_valid_ratio // 2) + j)
self.batch_indices[last_index + current_index,
3] = random_entities[current_index]
self.batch_values[last_index + current_index, :] = [0]
return self.batch_indices, self.batch_values
return self.batch_indices, self.batch_values
def get_graph(self):
graph = {}
all_tiples = torch.cat([self.train_adj_matrix[0].transpose(
0, 1), self.train_adj_matrix[1].unsqueeze(1)], dim=1)
for data in all_tiples:
source = data[1].data.item()
target = data[0].data.item()
value = data[2].data.item()
if(source not in graph.keys()):
graph[source] = {}
graph[source][target] = value
else:
graph[source][target] = value
print("Graph created")
return graph
def bfs(self, graph, source, nbd_size=2):
visit = {}
distance = {}
parent = {}
distance_lengths = {}
visit[source] = 1
distance[source] = 0
parent[source] = (-1, -1)
q = queue.Queue()
q.put((source, -1))
while(not q.empty()):
top = q.get()
if top[0] in graph.keys():
for target in graph[top[0]].keys():
if(target in visit.keys()):
continue
else:
q.put((target, graph[top[0]][target]))
distance[target] = distance[top[0]] + 1
visit[target] = 1
if distance[target] > 2:
continue
parent[target] = (top[0], graph[top[0]][target])
if distance[target] not in distance_lengths.keys():
distance_lengths[distance[target]] = 1
neighbors = {}
for target in visit.keys():
if(distance[target] != nbd_size):
continue
edges = [-1, parent[target][1]]
relations = []
entities = [target]
temp = target
while(parent[temp] != (-1, -1)):
relations.append(parent[temp][1])
entities.append(parent[temp][0])
temp = parent[temp][0]
if(distance[target] in neighbors.keys()):
neighbors[distance[target]].append(
(tuple(relations), tuple(entities[:-1])))
else:
neighbors[distance[target]] = [
(tuple(relations), tuple(entities[:-1]))]
return neighbors
def get_further_neighbors(self, nbd_size=2):
neighbors = {}
start_time = time.time()
print("length of graph keys is ", len(self.graph.keys()))
for source in self.graph.keys():
# st_time = time.time()
temp_neighbors = self.bfs(self.graph, source, nbd_size)
for distance in temp_neighbors.keys():
if(source in neighbors.keys()):
if(distance in neighbors[source].keys()):
neighbors[source][distance].append(
temp_neighbors[distance])
else:
neighbors[source][distance] = temp_neighbors[distance]
else:
neighbors[source] = {}
neighbors[source][distance] = temp_neighbors[distance]
print("time taken ", time.time() - start_time)
print("length of neighbors dict is ", len(neighbors))
return neighbors
def get_batch_nhop_neighbors_all(self, args, batch_sources, node_neighbors, nbd_size=2):
batch_source_triples = []
print("length of unique_entities ", len(batch_sources))
count = 0
for source in batch_sources:
# randomly select from the list of neighbors
if source in node_neighbors.keys():
nhop_list = node_neighbors[source][nbd_size]
for i, tup in enumerate(nhop_list):
if(args.partial_2hop and i >= 2):
break
count += 1
batch_source_triples.append([source, nhop_list[i][0][-1], nhop_list[i][0][0],
nhop_list[i][1][0]])
return np.array(batch_source_triples).astype(np.int32)
def transe_scoring(self, batch_inputs, entity_embeddings, relation_embeddings):
source_embeds = entity_embeddings[batch_inputs[:, 0]]
relation_embeds = relation_embeddings[batch_inputs[:, 1]]
tail_embeds = entity_embeddings[batch_inputs[:, 2]]
x = source_embeds + relation_embed - tail_embeds
x = torch.norm(x, p=1, dim=1)
return x
def corrupt_triple(self, triple):
def sample_different_entity(old):
new = np.random.randint(len(self.unique_entities_train))
if new != old:
return new
else:
return sample_different_entity(old)
corrupt_head = np.random.rand() > 0.5
if corrupt_head:
triple[0] = sample_different_entity(triple[0])
else:
triple[2] = sample_different_entity(triple[2])
return triple
def get_auroc(self, args, model, unique_entities):
scores = []
labels = []
n_batches = math.ceil(len(self.test_triples) / self.batch_size)
for batch_i in range(n_batches):
batch_start = batch_i * self.batch_size
pos_batch = self.test_triples[batch_start:batch_start+self.batch_size]
neg_batch = np.stack([self.corrupt_triple(tr)
for tr in np.copy(pos_batch)])
def to_tensor(arr):
return torch.tensor(arr).to(next(model.parameters())).long()
pos_scores = model.batch_test(to_tensor(pos_batch)).cpu().numpy()
neg_scores = model.batch_test(to_tensor(neg_batch)).cpu().numpy()
scores.extend([pos_scores, neg_scores])
labels.extend([np.ones(len(pos_scores)), np.zeros(len(neg_scores))])
scores = np.concatenate(scores)
labels = np.concatenate(labels)
auroc = metrics.roc_auc_score(labels, scores)
print(auroc)
return auroc
def get_validation_pred(self, args, model, unique_entities):
average_hits_at_100_head, average_hits_at_100_tail = [], []
average_hits_at_ten_head, average_hits_at_ten_tail = [], []
average_hits_at_three_head, average_hits_at_three_tail = [], []
average_hits_at_one_head, average_hits_at_one_tail = [], []
average_mean_rank_head, average_mean_rank_tail = [], []
average_mean_recip_rank_head, average_mean_recip_rank_tail = [], []
for iters in range(1):
start_time = time.time()
indices = [i for i in range(len(self.test_indices))]
batch_indices = self.test_indices[indices, :] # ie batch_indices = self.test_indices ?
print("Sampled indices")
print("test set length ", len(self.test_indices))
entity_list = [j for i, j in self.entity2id.items()]
ranks_head, ranks_tail = [], []
reciprocal_ranks_head, reciprocal_ranks_tail = [], []
hits_at_100_head, hits_at_100_tail = 0, 0
hits_at_ten_head, hits_at_ten_tail = 0, 0
hits_at_three_head, hits_at_three_tail = 0, 0
hits_at_one_head, hits_at_one_tail = 0, 0
for i in range(batch_indices.shape[0]):
print(len(ranks_head))
start_time_it = time.time()
new_x_batch_head = np.tile(
batch_indices[i, :], (len(self.entity2id), 1))
new_x_batch_tail = np.tile(
batch_indices[i, :], (len(self.entity2id), 1))
if(batch_indices[i, 0] not in unique_entities or batch_indices[i, 2] not in unique_entities):
continue
new_x_batch_head[:, 0] = entity_list
new_x_batch_tail[:, 2] = entity_list
last_index_head = [] # array of already existing triples
last_index_tail = []
for tmp_index in range(len(new_x_batch_head)):
temp_triple_head = (new_x_batch_head[tmp_index][0], new_x_batch_head[tmp_index][1],
new_x_batch_head[tmp_index][2])
if temp_triple_head in self.valid_triples_dict.keys():
last_index_head.append(tmp_index)
temp_triple_tail = (new_x_batch_tail[tmp_index][0], new_x_batch_tail[tmp_index][1],
new_x_batch_tail[tmp_index][2])
if temp_triple_tail in self.valid_triples_dict.keys():
last_index_tail.append(tmp_index)
# Deleting already existing triples, leftover triples are invalid, according
# to train, validation and test data
# Note, all of them maynot be actually invalid
new_x_batch_head = np.delete(
new_x_batch_head, last_index_head, axis=0)
new_x_batch_tail = np.delete(
new_x_batch_tail, last_index_tail, axis=0)
# adding the current valid triples to the top, i.e, index 0
new_x_batch_head = np.insert(
new_x_batch_head, 0, batch_indices[i], axis=0)
new_x_batch_tail = np.insert(
new_x_batch_tail, 0, batch_indices[i], axis=0)
# Have to do this, because it doesn't fit in memory
if 'WN' in args.data:
num_triples_each_shot = int(
math.ceil(new_x_batch_head.shape[0] / 4))
device = next(model.parameters()).device
scores1_head = model.batch_test(torch.LongTensor(
new_x_batch_head[:num_triples_each_shot, :]).to(device))
scores2_head = model.batch_test(torch.LongTensor(
new_x_batch_head[num_triples_each_shot: 2 * num_triples_each_shot, :]).to(device))
scores3_head = model.batch_test(torch.LongTensor(
new_x_batch_head[2 * num_triples_each_shot: 3 * num_triples_each_shot, :]).to(device))
scores4_head = model.batch_test(torch.LongTensor(
new_x_batch_head[3 * num_triples_each_shot: 4 * num_triples_each_shot, :]).to(device))
# scores5_head = model.batch_test(torch.LongTensor(
# new_x_batch_head[4 * num_triples_each_shot: 5 * num_triples_each_shot, :]).cuda())
# scores6_head = model.batch_test(torch.LongTensor(
# new_x_batch_head[5 * num_triples_each_shot: 6 * num_triples_each_shot, :]).cuda())
# scores7_head = model.batch_test(torch.LongTensor(
# new_x_batch_head[6 * num_triples_each_shot: 7 * num_triples_each_shot, :]).cuda())
# scores8_head = model.batch_test(torch.LongTensor(
# new_x_batch_head[7 * num_triples_each_shot: 8 * num_triples_each_shot, :]).cuda())
# scores9_head = model.batch_test(torch.LongTensor(
# new_x_batch_head[8 * num_triples_each_shot: 9 * num_triples_each_shot, :]).cuda())
# scores10_head = model.batch_test(torch.LongTensor(
# new_x_batch_head[9 * num_triples_each_shot:, :]).cuda())
scores_head = torch.cat(
[scores1_head, scores2_head, scores3_head, scores4_head], dim=0)
#scores5_head, scores6_head, scores7_head, scores8_head,
# cores9_head, scores10_head], dim=0)
else:
scores_head = model.batch_test(new_x_batch_head)
sorted_scores_head, sorted_indices_head = torch.sort(
scores_head.view(-1), dim=-1, descending=True)
# Just search for zeroth index in the sorted scores, we appended valid triple at top
ranks_head.append(
np.where(sorted_indices_head.cpu().numpy() == 0)[0][0] + 1)
reciprocal_ranks_head.append(1.0 / ranks_head[-1])
# Tail part here
if 'WN' in args.data:
num_triples_each_shot = int(
math.ceil(new_x_batch_tail.shape[0] / 4))
scores1_tail = model.batch_test(torch.LongTensor(
new_x_batch_tail[:num_triples_each_shot, :]).to(device))
scores2_tail = model.batch_test(torch.LongTensor(
new_x_batch_tail[num_triples_each_shot: 2 * num_triples_each_shot, :]).to(device))
scores3_tail = model.batch_test(torch.LongTensor(
new_x_batch_tail[2 * num_triples_each_shot: 3 * num_triples_each_shot, :]).to(device))
scores4_tail = model.batch_test(torch.LongTensor(
new_x_batch_tail[3 * num_triples_each_shot: 4 * num_triples_each_shot, :]).to(device))
# scores5_tail = model.batch_test(torch.LongTensor(
# new_x_batch_tail[4 * num_triples_each_shot: 5 * num_triples_each_shot, :]).cuda())
# scores6_tail = model.batch_test(torch.LongTensor(
# new_x_batch_tail[5 * num_triples_each_shot: 6 * num_triples_each_shot, :]).cuda())
# scores7_tail = model.batch_test(torch.LongTensor(
# new_x_batch_tail[6 * num_triples_each_shot: 7 * num_triples_each_shot, :]).cuda())
# scores8_tail = model.batch_test(torch.LongTensor(
# new_x_batch_tail[7 * num_triples_each_shot: 8 * num_triples_each_shot, :]).cuda())
# scores9_tail = model.batch_test(torch.LongTensor(
# new_x_batch_tail[8 * num_triples_each_shot: 9 * num_triples_each_shot, :]).cuda())
# scores10_tail = model.batch_test(torch.LongTensor(
# new_x_batch_tail[9 * num_triples_each_shot:, :]).cuda())
scores_tail = torch.cat(
[scores1_tail, scores2_tail, scores3_tail, scores4_tail], dim=0)
# scores5_tail, scores6_tail, scores7_tail, scores8_tail,
# scores9_tail, scores10_tail], dim=0)
else:
scores_tail = model.batch_test(new_x_batch_tail)
sorted_scores_tail, sorted_indices_tail = torch.sort(
scores_tail.view(-1), dim=-1, descending=True)
# Just search for zeroth index in the sorted scores, we appended valid triple at top
ranks_tail.append(
np.where(sorted_indices_tail.cpu().numpy() == 0)[0][0] + 1)
reciprocal_ranks_tail.append(1.0 / ranks_tail[-1])
print("sample - ", ranks_head[-1], ranks_tail[-1])
for i in range(len(ranks_head)):
if ranks_head[i] <= 100:
hits_at_100_head = hits_at_100_head + 1
if ranks_head[i] <= 10:
hits_at_ten_head = hits_at_ten_head + 1
if ranks_head[i] <= 3:
hits_at_three_head = hits_at_three_head + 1
if ranks_head[i] == 1:
hits_at_one_head = hits_at_one_head + 1
for i in range(len(ranks_tail)):
if ranks_tail[i] <= 100:
hits_at_100_tail = hits_at_100_tail + 1
if ranks_tail[i] <= 10:
hits_at_ten_tail = hits_at_ten_tail + 1
if ranks_tail[i] <= 3:
hits_at_three_tail = hits_at_three_tail + 1
if ranks_tail[i] == 1:
hits_at_one_tail = hits_at_one_tail + 1
assert len(ranks_head) == len(reciprocal_ranks_head)
assert len(ranks_tail) == len(reciprocal_ranks_tail)
print("here {}".format(len(ranks_head)))
print("\nCurrent iteration time {}".format(time.time() - start_time))
print("Stats for replacing head are -> ")
print("Current iteration Hits@100 are {}".format(
hits_at_100_head / float(len(ranks_head))))
print("Current iteration Hits@10 are {}".format(
hits_at_ten_head / len(ranks_head)))
print("Current iteration Hits@3 are {}".format(
hits_at_three_head / len(ranks_head)))
print("Current iteration Hits@1 are {}".format(
hits_at_one_head / len(ranks_head)))
print("Current iteration Mean rank {}".format(
sum(ranks_head) / len(ranks_head)))
print("Current iteration Mean Reciprocal Rank {}".format(
sum(reciprocal_ranks_head) / len(reciprocal_ranks_head)))
print("\nStats for replacing tail are -> ")
print("Current iteration Hits@100 are {}".format(
hits_at_100_tail / len(ranks_head)))
print("Current iteration Hits@10 are {}".format(
hits_at_ten_tail / len(ranks_head)))
print("Current iteration Hits@3 are {}".format(
hits_at_three_tail / len(ranks_head)))
print("Current iteration Hits@1 are {}".format(
hits_at_one_tail / len(ranks_head)))
print("Current iteration Mean rank {}".format(
sum(ranks_tail) / len(ranks_tail)))
print("Current iteration Mean Reciprocal Rank {}".format(
sum(reciprocal_ranks_tail) / len(reciprocal_ranks_tail)))
average_hits_at_100_head.append(
hits_at_100_head / len(ranks_head))
average_hits_at_ten_head.append(
hits_at_ten_head / len(ranks_head))
average_hits_at_three_head.append(
hits_at_three_head / len(ranks_head))
average_hits_at_one_head.append(
hits_at_one_head / len(ranks_head))
average_mean_rank_head.append(sum(ranks_head) / len(ranks_head))
average_mean_recip_rank_head.append(
sum(reciprocal_ranks_head) / len(reciprocal_ranks_head))
average_hits_at_100_tail.append(
hits_at_100_tail / len(ranks_head))
average_hits_at_ten_tail.append(
hits_at_ten_tail / len(ranks_head))
average_hits_at_three_tail.append(
hits_at_three_tail / len(ranks_head))
average_hits_at_one_tail.append(
hits_at_one_tail / len(ranks_head))
average_mean_rank_tail.append(sum(ranks_tail) / len(ranks_tail))
average_mean_recip_rank_tail.append(
sum(reciprocal_ranks_tail) / len(reciprocal_ranks_tail))
print("\nAveraged stats for replacing head are -> ")
print("Hits@100 are {}".format(
sum(average_hits_at_100_head) / len(average_hits_at_100_head)))
print("Hits@10 are {}".format(
sum(average_hits_at_ten_head) / len(average_hits_at_ten_head)))
print("Hits@3 are {}".format(
sum(average_hits_at_three_head) / len(average_hits_at_three_head)))
print("Hits@1 are {}".format(
sum(average_hits_at_one_head) / len(average_hits_at_one_head)))
print("Mean rank {}".format(
sum(average_mean_rank_head) / len(average_mean_rank_head)))
print("Mean Reciprocal Rank {}".format(
sum(average_mean_recip_rank_head) / len(average_mean_recip_rank_head)))
print("\nAveraged stats for replacing tail are -> ")
print("Hits@100 are {}".format(
sum(average_hits_at_100_tail) / len(average_hits_at_100_tail)))
print("Hits@10 are {}".format(
sum(average_hits_at_ten_tail) / len(average_hits_at_ten_tail)))
print("Hits@3 are {}".format(
sum(average_hits_at_three_tail) / len(average_hits_at_three_tail)))
print("Hits@1 are {}".format(
sum(average_hits_at_one_tail) / len(average_hits_at_one_tail)))
print("Mean rank {}".format(
sum(average_mean_rank_tail) / len(average_mean_rank_tail)))
print("Mean Reciprocal Rank {}".format(
sum(average_mean_recip_rank_tail) / len(average_mean_recip_rank_tail)))
cumulative_hits_100 = (sum(average_hits_at_100_head) / len(average_hits_at_100_head)
+ sum(average_hits_at_100_tail) / len(average_hits_at_100_tail)) / 2
cumulative_hits_ten = (sum(average_hits_at_ten_head) / len(average_hits_at_ten_head)
+ sum(average_hits_at_ten_tail) / len(average_hits_at_ten_tail)) / 2
cumulative_hits_three = (sum(average_hits_at_three_head) / len(average_hits_at_three_head)
+ sum(average_hits_at_three_tail) / len(average_hits_at_three_tail)) / 2
cumulative_hits_one = (sum(average_hits_at_one_head) / len(average_hits_at_one_head)
+ sum(average_hits_at_one_tail) / len(average_hits_at_one_tail)) / 2
cumulative_mean_rank = (sum(average_mean_rank_head) / len(average_mean_rank_head)
+ sum(average_mean_rank_tail) / len(average_mean_rank_tail)) / 2
cumulative_mean_recip_rank = (sum(average_mean_recip_rank_head) / len(average_mean_recip_rank_head) + sum(
average_mean_recip_rank_tail) / len(average_mean_recip_rank_tail)) / 2
print("\nCumulative stats are -> ")
print("Hits@100 are {}".format(cumulative_hits_100))
print("Hits@10 are {}".format(cumulative_hits_ten))
print("Hits@3 are {}".format(cumulative_hits_three))
print("Hits@1 are {}".format(cumulative_hits_one))
print("Mean rank {}".format(cumulative_mean_rank))
print("Mean Reciprocal Rank {}".format(cumulative_mean_recip_rank))
|
{"/new_main.py": ["/models.py"], "/models.py": ["/create_batch.py"]}
|
7,564
|
plai-group/relationPrediction
|
refs/heads/master
|
/models.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import time
import math
from layers import SpGraphAttentionLayer, ConvKB
from create_batch import CorpusDataset, get_loaders
from argparse import Namespace
import ptutils as ptu
CUDA = torch.cuda.is_available() # checking cuda availability
# begin my additions (some copied from other files) -------------------------------
class ConvOrGAT(ptu.CudaCompatibleMixin, ptu.HasDataloaderMixin, ptu.Trainable):
@staticmethod
def adapt_args(is_gat, args):
"""
adapts arguments to select either conv args or gat args, depending on self.is_gat
"""
new = {}
for field, value in args.__dict__.items():
last = field.split('_')[-1].lower()
rest = '_'.join(field.split('_')[:-1])
to_use = 'gat' if is_gat else 'conv'
if last not in ['gat', 'conv']:
new[field] = value
elif last == to_use:
new[rest] = value
else:
pass
new.pop('epochs') # artifact doesn't use this
return Namespace(**new)
def get_optim_state(self):
return {'optim': self.optim.state_dict(),
'lr_scheduler': self.lr_scheduler.state_dict()}
def set_optim_state(self, state):
self.optim.load_state_dict(state['optim'])
self.lr_scheduler.load_state_dict(state['lr_scheduler'])
def init_optim(self):
# weight_decay = self.args.weight_decay_gat if self.is_gat else self.args.weight_decay_conv
step_size = 500 if self.is_gat else 25
self.optim = torch.optim.Adam(
self.parameters(), lr=self.args.lr,
weight_decay=self.args.weight_decay)
self.lr_scheduler = torch.optim.lr_scheduler.StepLR(
self.optim, step_size=step_size, gamma=0.5, last_epoch=-1)
def end_epoch(self):
self.lr_scheduler.step()
super().end_epoch()
def gat_or_conv_init(self, args):
self.args = args
self.corpus = CorpusDataset(self.args)
self.current_batch_2hop_indices = self.corpus.get_current_batch_2hop_indices()
train_loader, valid_loader, test_loader = get_loaders(self.corpus)
self.set_dataloaders(train_loader, valid_loader, test_loader)
def post_init(self):
if torch.cuda.is_available():
self.to_cuda()
class GAT(ConvOrGAT):
is_gat = True
def init_nn(self, args):
# args = Namespace(**args)
self.gat_or_conv_init(args)
init_ent_emb, init_rel_emb = self.corpus.get_pretrained_embs()
self.spgat = SpKBGATModified(
initial_entity_emb=init_ent_emb,
initial_relation_emb=init_rel_emb,
entity_out_dim=self.args.entity_out_dim,
relation_out_dim=self.args.entity_out_dim,
drop_GAT=self.args.drop,
alpha=self.args.alpha,
nheads_GAT=self.args.nheads,
)
def loss(self, train_indices, train_values):
entity_embed, relation_embed = self.spgat(
self.corpus, self.corpus.train_adj_matrix, train_indices, self.current_batch_2hop_indices)
loss = self.loss_from_embeddings(train_indices, entity_embed, relation_embed)
self.log = {'loss': loss.item()}
self.tqdm_text = str(self.log)
return loss
def loss_from_embeddings(self, train_indices, entity_embed, relation_embed):
len_pos_triples = int(
train_indices.shape[0] / (int(self.args.valid_invalid_ratio) + 1))
pos_triples = train_indices[:len_pos_triples].repeat(int(self.args.valid_invalid_ratio), 1)
neg_triples = train_indices[len_pos_triples:]
def get_norm(triples):
source_embeds = entity_embed[triples[:, 0]]
relation_embeds = relation_embed[triples[:, 1]]
tail_embeds = entity_embed[triples[:, 2]]
x = source_embeds + relation_embeds - tail_embeds
return torch.norm(x, p=1, dim=1)
pos_norm = get_norm(pos_triples)
neg_norm = get_norm(neg_triples)
y = torch.ones(int(self.args.valid_invalid_ratio) * len_pos_triples).to(self.device)
loss_func = nn.MarginRankingLoss(margin=self.args.margin)
loss = loss_func(pos_norm, neg_norm, y)
return loss
@property
def final_entity_embeddings(self):
return self.spgat.final_entity_embeddings
@property
def final_relation_embeddings(self):
return self.spgat.final_relation_embeddings
class ConvDecoder(ConvOrGAT):
is_gat = False
def init_nn(self, args, entity_embeddings, relation_embeddings):
self.gat_or_conv_init(args)
self.conv = SpKBGATConvOnly(
final_entity_emb=entity_embeddings,
final_relation_emb=relation_embeddings,
entity_out_dim=self.args.entity_out_dim,
relation_out_dim=self.args.entity_out_dim,
drop_conv=self.args.drop,
alpha_conv=self.args.alpha,
nheads_GAT=self.args.nheads,
conv_out_channels=self.args.out_channels,
variational=self.args.variational,
temperature=self.args.temperature,
sigma_p=self.args.sigma_p,
)
def classifier_loss(self, train_indices, train_values):
preds = self.conv(
self.corpus, self.corpus.train_adj_matrix, train_indices)
return nn.SoftMarginLoss(reduction='sum')(preds.view(-1), train_values.view(-1))
def prior_logpdf(self):
entity_embeddings, relation_embeddings = self.conv.get_sampled_embeddings()
entity_sqr_distance = torch.norm(self.conv.entity_embeddings_from_gat - entity_embeddings, 2)**2
relation_sqr_distance = torch.norm(self.conv.relation_embeddings_from_gat - relation_embeddings, 2)**2
sqr_distance = entity_sqr_distance + relation_sqr_distance
return -0.5 * sqr_distance / self.conv.sigma_p**2
def temp_entropy_q(self):
std = torch.cat([self.conv.entity_logstddev.exp(), self.conv.relation_logstddev.exp()], dim=0)
entropy_q = 0.5 * torch.log(2*math.pi*math.e*std**2).sum()
return entropy_q * self.conv.temperature
def loss(self, train_indices, train_values):
likelihood_neg_logpdf = self.classifier_loss(train_indices, train_values)
if self.conv.variational:
B = len(train_values)
D = len(self.corpus.corpus.train_indices)
data_neg_logpdf = likelihood_neg_logpdf * D/B
elbo_temp = self.prior_logpdf() - data_neg_logpdf + self.temp_entropy_q()
loss = -elbo_temp * B/D
else:
loss = likelihood_neg_logpdf
self.log = {'loss': loss.item()}
self.tqdm_text = str(self.log)
return loss
# end my additions ------------------------------------------------------------------
class SpGAT(nn.Module):
def __init__(self, num_nodes, nfeat, nhid, relation_dim, dropout, alpha, nheads):
"""
Sparse version of GAT
nfeat -> Entity Input Embedding dimensions
nhid -> Entity Output Embedding dimensions
relation_dim -> Relation Embedding dimensions
num_nodes -> number of nodes in the Graph
nheads -> Used for Multihead attention
"""
super(SpGAT, self).__init__()
self.dropout = dropout
self.dropout_layer = nn.Dropout(self.dropout)
self.attentions = [SpGraphAttentionLayer(num_nodes, nfeat,
nhid,
relation_dim,
dropout=dropout,
alpha=alpha,
concat=True)
for _ in range(nheads)]
for i, attention in enumerate(self.attentions):
self.add_module('attention_{}'.format(i), attention)
# W matrix to convert h_input to h_output dimension
self.W = nn.Parameter(torch.zeros(size=(relation_dim, nheads * nhid)))
nn.init.xavier_uniform_(self.W.data, gain=1.414)
self.out_att = SpGraphAttentionLayer(num_nodes, nhid * nheads,
nheads * nhid, nheads * nhid,
dropout=dropout,
alpha=alpha,
concat=False
)
def forward(self, Corpus_, batch_inputs, entity_embeddings, relation_embed,
edge_list, edge_type, edge_embed, edge_list_nhop, edge_type_nhop):
x = entity_embeddings
edge_embed_nhop = relation_embed[
edge_type_nhop[:, 0]] + relation_embed[edge_type_nhop[:, 1]]
x = torch.cat([att(x, edge_list, edge_embed, edge_list_nhop, edge_embed_nhop)
for att in self.attentions], dim=1)
x = self.dropout_layer(x)
out_relation_1 = relation_embed.mm(self.W)
edge_embed = out_relation_1[edge_type]
edge_embed_nhop = out_relation_1[
edge_type_nhop[:, 0]] + out_relation_1[edge_type_nhop[:, 1]]
x = F.elu(self.out_att(x, edge_list, edge_embed,
edge_list_nhop, edge_embed_nhop))
return x, out_relation_1
class SpKBGATModified(nn.Module):
def __init__(self, initial_entity_emb, initial_relation_emb, entity_out_dim, relation_out_dim,
drop_GAT, alpha, nheads_GAT):
'''Sparse version of KBGAT
entity_in_dim -> Entity Input Embedding dimensions
entity_out_dim -> Entity Output Embedding dimensions, passed as a list
num_relation -> number of unique relations
relation_dim -> Relation Embedding dimensions
num_nodes -> number of nodes in the Graph
nheads_GAT -> Used for Multihead attention, passed as a list '''
super().__init__()
self.num_nodes = initial_entity_emb.shape[0]
self.entity_in_dim = initial_entity_emb.shape[1]
self.entity_out_dim_1 = entity_out_dim[0]
self.nheads_GAT_1 = nheads_GAT[0]
self.entity_out_dim_2 = entity_out_dim[1]
self.nheads_GAT_2 = nheads_GAT[1]
# Properties of Relations
self.num_relation = initial_relation_emb.shape[0]
self.relation_dim = initial_relation_emb.shape[1]
self.relation_out_dim_1 = relation_out_dim[0]
self.drop_GAT = drop_GAT
self.alpha = alpha # For leaky relu
self.final_entity_embeddings = nn.Parameter(
torch.randn(self.num_nodes, self.entity_out_dim_1 * self.nheads_GAT_1))
self.final_relation_embeddings = nn.Parameter(
torch.randn(self.num_relation, self.entity_out_dim_1 * self.nheads_GAT_1))
self.entity_embeddings = nn.Parameter(initial_entity_emb)
self.relation_embeddings = nn.Parameter(initial_relation_emb)
self.sparse_gat_1 = SpGAT(self.num_nodes, self.entity_in_dim, self.entity_out_dim_1, self.relation_dim,
self.drop_GAT, self.alpha, self.nheads_GAT_1)
self.W_entities = nn.Parameter(torch.zeros(
size=(self.entity_in_dim, self.entity_out_dim_1 * self.nheads_GAT_1)))
nn.init.xavier_uniform_(self.W_entities.data, gain=1.414)
def forward(self, Corpus_, adj, batch_inputs, train_indices_nhop):
# getting edge list
edge_list = adj[0]
edge_type = adj[1]
edge_list_nhop = torch.cat(
(train_indices_nhop[:, 3].unsqueeze(-1), train_indices_nhop[:, 0].unsqueeze(-1)), dim=1).t()
edge_type_nhop = torch.cat(
[train_indices_nhop[:, 1].unsqueeze(-1), train_indices_nhop[:, 2].unsqueeze(-1)], dim=1)
if(CUDA):
edge_list = edge_list.cuda()
edge_type = edge_type.cuda()
edge_list_nhop = edge_list_nhop.cuda()
edge_type_nhop = edge_type_nhop.cuda()
edge_embed = self.relation_embeddings[edge_type]
start = time.time()
self.entity_embeddings.data = F.normalize(
self.entity_embeddings.data, p=2, dim=1).detach()
# self.relation_embeddings.data = F.normalize(
# self.relation_embeddings.data, p=2, dim=1)
out_entity_1, out_relation_1 = self.sparse_gat_1(
Corpus_, batch_inputs, self.entity_embeddings, self.relation_embeddings,
edge_list, edge_type, edge_embed, edge_list_nhop, edge_type_nhop)
mask_indices = torch.unique(batch_inputs[:, 2]).to(edge_list.device)
mask = torch.zeros(self.entity_embeddings.shape[0]).to(edge_list.device)
mask[mask_indices] = 1.0
entities_upgraded = self.entity_embeddings.mm(self.W_entities)
out_entity_1 = entities_upgraded + \
mask.unsqueeze(-1).expand_as(out_entity_1) * out_entity_1
out_entity_1 = F.normalize(out_entity_1, p=2, dim=1)
self.final_entity_embeddings.data = out_entity_1.data
self.final_relation_embeddings.data = out_relation_1.data
return out_entity_1, out_relation_1
class SpKBGATConvOnly(nn.Module):
def __init__(self, final_entity_emb, final_relation_emb, entity_out_dim, relation_out_dim,
drop_conv, alpha_conv, nheads_GAT, conv_out_channels, variational,
temperature, sigma_p): # NOTE removed alpha as it doesn't seem to get used
'''
Sparse version of KBGAT
entity_in_dim -> Entity Input Embedding dimensions
entity_out_dim -> Entity Output Embedding dimensions, passed as a list
num_relation -> number of unique relations
relation_dim -> Relation Embedding dimensions
num_nodes -> number of nodes in the Graph
nheads_GAT -> Used for Multihead attention, passed as a list
'''
super().__init__()
self.num_nodes = final_entity_emb.shape[0]
emb_dim = entity_out_dim[0] * nheads_GAT[0]
# Properties of Relations
self.num_relation = final_relation_emb.shape[0]
self.relation_dim = final_relation_emb.shape[1]
self.relation_out_dim_1 = relation_out_dim[0]
self.drop_conv = drop_conv
# self.alpha = alpha # For leaky relu
self.alpha_conv = alpha_conv
self.conv_out_channels = conv_out_channels
self.variational = variational
self.temperature = temperature
self.sigma_p = sigma_p
assert final_entity_emb.shape == (self.num_nodes, emb_dim,)
assert final_relation_emb.shape == (self.num_relation, emb_dim,)
self.entity_embeddings_from_gat = final_entity_emb.clone() # requires we always load GAT before initialising this
self.relation_embeddings_from_gat = final_relation_emb.clone()
self.final_entity_embeddings_mean = nn.Parameter(final_entity_emb.clone())
self.final_relation_embeddings_mean = nn.Parameter(final_relation_emb.clone()) # this is learnable more. is this desired?
if self.variational:
self.entity_logstddev = nn.Parameter(final_entity_emb*0-2)
self.relation_logstddev = nn.Parameter(final_relation_emb*0-2)
self.convKB = ConvKB(emb_dim, 3, 1,
self.conv_out_channels, self.drop_conv, self.alpha_conv)
def get_sampled_embeddings(self):
entity_embeddings = self.final_entity_embeddings_mean
relation_embeddings = self.final_relation_embeddings_mean
if self.variational:
entity_embeddings = entity_embeddings + torch.randn_like(entity_embeddings) * self.entity_logstddev.exp()
relation_embeddings = relation_embeddings + torch.randn_like(relation_embeddings) * self.relation_logstddev.exp()
return entity_embeddings, relation_embeddings
def forward(self, Corpus_, adj, batch_inputs):
entity_embeddings, relation_embeddings = self.get_sampled_embeddings()
conv_input = torch.cat((entity_embeddings[batch_inputs[:, 0], :].unsqueeze(1), relation_embeddings[
batch_inputs[:, 1]].unsqueeze(1), entity_embeddings[batch_inputs[:, 2], :].unsqueeze(1)), dim=1)
out_conv = self.convKB(conv_input)
return out_conv
def batch_test(self, batch_inputs):
def get_probs():
entity_embeddings, relation_embeddings = self.get_sampled_embeddings()
conv_input = torch.cat((entity_embeddings[batch_inputs[:, 0], :].unsqueeze(1), relation_embeddings[
batch_inputs[:, 1]].unsqueeze(1), entity_embeddings[batch_inputs[:, 2], :].unsqueeze(1)), dim=1)
return torch.sigmoid(self.convKB(conv_input))
if self.variational:
if not hasattr(self, 'n_samples'):
raise Exception('Must set attribute n_samples before testing.')
return sum(get_probs() for _ in range(self.n_samples)) / self.n_samples
else:
return get_probs()
|
{"/new_main.py": ["/models.py"], "/models.py": ["/create_batch.py"]}
|
7,584
|
wuvt/jtr
|
refs/heads/master
|
/jtr/__init__.py
|
import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
test_config = None
# create and configure the app
app = Flask(__name__, instance_relative_config=True)
app.config.from_mapping(
SECRET_KEY='dev',
SQLALCHEMY_DATABASE_URI="postgresql://postgres:mysecretpassword@localhost:5432/jtr"
)
db = SQLAlchemy(app)
from jtr import models
from jtr import views
if test_config is None:
# load the instance config, if it exists, when not testing
app.config.from_pyfile('config.py', silent=True)
else:
# load the test config if passed in
app.config.from_mapping(test_config)
# ensure the instance folder exists
try:
os.makedirs(app.instance_path)
except OSError:
pass
|
{"/jtr/models.py": ["/jtr/__init__.py"], "/jtr/views.py": ["/jtr/__init__.py", "/jtr/models.py"]}
|
7,585
|
wuvt/jtr
|
refs/heads/master
|
/jtr/models.py
|
import uuid
import datetime
from enum import Enum, auto
from jtr import db
from sqlalchemy.dialects.postgresql import UUID
class CDRipState(Enum):
DONE = auto()
IN_PROGRESS = auto()
ERROR = auto()
class CDRip(db.Model):
__tablename__ = 'cd_rip'
def __init__(self, artist, album, label, stack, disc, barcode):
self.artist = artist
self.album = album
self.label = label
self.stack = stack
self.disc = disc
self.barcode = barcode
self.state = CDRipState.IN_PROGRESS
self.progress = 0
self.uuid = uuid.uuid4()
id = db.Column(db.Integer, primary_key=True)
uuid = db.Column(UUID(as_uuid=True))
created = db.Column(db.DateTime, default=datetime.datetime.utcnow)
artist = db.Column(db.Unicode(255).with_variant(db.Unicode, 'postgresql'))
album = db.Column(db.Unicode(255).with_variant(db.Unicode, 'postgresql'))
label = db.Column(db.Unicode(255).with_variant(db.Unicode, 'postgresql'))
stack = db.Column(db.Unicode(255).with_variant(db.Unicode, 'postgresql'))
disc = db.Column(db.Unicode(255).with_variant(db.Unicode, 'postgresql'))
barcode = db.Column(db.Integer)
state = db.Column(db.Enum(CDRipState))
progress = db.Column(db.Integer)
class Ripper(db.Model):
__tablename__ = 'ripper'
def __init__(self, id_num=None, label=None):
if id_num is not None:
self.id = id_num
if label is not None:
self.label = label
id = db.Column(db.Integer, primary_key=True)
label = db.Column(db.String(10), unique=True, nullable=False)
current_rip_id = db.Column(db.Integer, db.ForeignKey('cd_rip.id'))
current_rip = db.relationship('CDRip', backref=db.backref('ripper', lazy='dynamic'))
|
{"/jtr/models.py": ["/jtr/__init__.py"], "/jtr/views.py": ["/jtr/__init__.py", "/jtr/models.py"]}
|
7,586
|
wuvt/jtr
|
refs/heads/master
|
/jtr/views.py
|
from flask import render_template, request, abort
from sqlalchemy import exc
from jtr import db, app
from jtr.models import CDRipState, CDRip, Ripper
def main_page(error=None):
rippers = Ripper.query.order_by(Ripper.id).all()
return render_template('ripper.html', rippers=rippers, error=error)
@app.route('/', methods=['GET', 'POST'])
def display():
if request.method == 'GET':
return main_page()
elif request.method == 'POST':
# First we'll validate the form
if (request.form.get('id', None) is None or
request.form.get('artist', None) is None or
request.form.get('album', None) is None or
request.form.get('label', None) is None or
request.form.get('stack', None) is None or
request.form.get('disc', None) is None or
request.form.get('barcode', None) is None or
request.form['id'].strip() == "" or
request.form['artist'].strip() == "" or
request.form['album'].strip() == "" or
request.form['label'].strip() == "" or
request.form['stack'].strip() == "" or
request.form['disc'].strip() == "" or
request.form['barcode'].strip() == "" ):
rippers = Ripper.query.order_by(Ripper.id).all()
return main_page(error="You must fill in all fields")
# Let's do a little more validation on barcode
id_num = request.form['id'].strip()
artist = request.form['artist'].strip()
album = request.form['album'].strip()
label = request.form['label'].strip()
stack = request.form['stack'].strip()
disc = request.form['disc'].strip()
barcode = request.form['barcode'].strip()
try:
id_num = int(id_num)
except ValueError:
return main_page(error="Ripper is not valid, are you screwing with the site?")
try:
barcode = int(barcode)
except ValueError:
return main_page(error="Barcode is not valid! Please enter it by scanning.")
rip = CDRip(artist, album, label, stack, disc, barcode)
db.session.add(rip)
try:
db.session.flush()
except exc.IntegrityError:
db.session.rollback()
return main_page(error="Barcode already exists in system.")
ripper = Ripper.query.get(id_num)
ripper.current_rip = rip
# Need to test duplicate barcodes here)
db.session.commit()
return main_page()
@app.route('/add-ripper', methods=['POST'])
def add_ripper():
ripper = Ripper(id_num=int(request.form['id']), label=request.form['label'])
try:
db.session.add(ripper)
db.session.commit()
return "Success"
except:
return "Fail"
# curl http://localhost:5000/api/status/1
# curl -X PUT -d "state=IN_PROGRESS&progress=99" http://localhost:5000/api/status/1
# curl -X PUT -d "state=DONE" http://localhost:5000/api/status/1
# curl -X PUT -d "state=ERROR" http://localhost:5000/api/status/1
@app.route('/api/status/<int:ripper_id>', methods=['GET', 'PUT'])
def ripper_status(ripper_id):
if request.method == 'GET':
ripper = Ripper.query.get_or_404(ripper_id)
if ripper.current_rip is None:
return "None"
else:
return str(ripper.current_rip.uuid)
if request.method == 'PUT':
# I expect a progress number and a state which is one of "IN_PROGRESS",
# "DONE", "ERROR"
# Let's sanitize the input a bit
if request.form.get('state', None) is None or \
request.form['state'].strip() == "":
abort(400)
state = request.form['state'].strip()
try:
state = CDRipState.__members__[state]
except:
abort(400)
# This means we're updating the progress of the rip
if state == CDRipState.IN_PROGRESS:
if request.form.get('progress', None) is None or \
request.form['progress'].strip() == "":
abort(400)
# Let's make it an int now
progress = request.form['progress'].strip()
try:
progress = int(progress)
except ValueError:
abort(400)
# Okay we got everything we need, let's update
ripper = Ripper.query.get_or_404(ripper_id)
if ripper.current_rip is None:
abort(404)
# We may want to ensure this is between 0 and 100 inclusive
ripper.current_rip.progress = progress
db.session.commit()
return "Success"
else:
# This means we're done or erroring
ripper = Ripper.query.get_or_404(ripper_id)
if ripper.current_rip is None:
abort(404)
# Let's sanity check the state
if ripper.current_rip.state != CDRipState.IN_PROGRESS:
abort(500)
# Set this to submitted state
ripper.current_rip.state = state
ripper.current_rip = None
db.session.commit()
return "Success"
|
{"/jtr/models.py": ["/jtr/__init__.py"], "/jtr/views.py": ["/jtr/__init__.py", "/jtr/models.py"]}
|
7,587
|
MADRobotNO/Simple_NEAT_v0.2
|
refs/heads/master
|
/RandomData.py
|
import random
class Xor:
def __init__(self):
self.data = [[0.0, 1.0], [1.0, 0.0], [1.0, 1.0], [0.0, 0.0]]
self.targets = [[1.0], [1.0], [0.0], [0.0]]
def getRandomXorData(self):
random_number = random.randint(0, 4)
return self.data[random_number]
|
{"/Layer.py": ["/Node.py", "/Connection.py"], "/Connection.py": ["/Node.py"], "/Model.py": ["/Connection.py", "/Layer.py", "/Node.py"], "/example.py": ["/NEAT.py", "/RandomData.py"], "/NEAT.py": ["/Model.py"]}
|
7,588
|
MADRobotNO/Simple_NEAT_v0.2
|
refs/heads/master
|
/Node.py
|
import numpy as np
class Node:
INPUT_NODE = "input"
HIDDEN_NODE = "hidden"
OUTPUT_NODE = "output"
node_types = [INPUT_NODE, HIDDEN_NODE, OUTPUT_NODE]
TANH_ACTIVATION_FUNCTION = 1
SIGMOID_ACTIVATION_FUNCTION = 2
def __init__(self, node_id, layer_type, layer_id, activation_function):
self.node_id = node_id
self.node_type = layer_type
self.layer_id = layer_id
self.input_data = 0.0
self.output = 0.0
self.bias = None
self.activation_function = activation_function
self.generate_random_bias()
def adjust_bias(self):
if self.node_type == self.INPUT_NODE:
self.bias = 0.0
else:
self.bias += np.random.uniform(-0.1, 0.1)
def generate_random_bias(self):
if self.node_type == self.INPUT_NODE:
self.bias = 0.0
else:
self.bias = np.random.uniform(-1, 1)
def calculate_output(self):
self.input_data = self.input_data + self.bias
self.output = self.activate(self.input_data)
return self.output
def activate(self, value):
if self.activation_function == self.TANH_ACTIVATION_FUNCTION:
return np.tanh(value)
elif self.activation_function == self.SIGMOID_ACTIVATION_FUNCTION:
return 1 / (1 + np.exp(-value))
def __str__(self):
return "Node id: " + str(self.node_id) + ", node type: " + self.node_type + ", layer id: " + str(self.layer_id) \
+ ", bias: " + str(self.bias) + ", input: " + str(self.input_data) + ", output: " + str(self.output)
|
{"/Layer.py": ["/Node.py", "/Connection.py"], "/Connection.py": ["/Node.py"], "/Model.py": ["/Connection.py", "/Layer.py", "/Node.py"], "/example.py": ["/NEAT.py", "/RandomData.py"], "/NEAT.py": ["/Model.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.