code stringlengths 1 2.06M | language stringclasses 1 value |
|---|---|
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Returns standard filenames for all the data files used by GraphChi.
* All functions expect a "basefilename".
* You can specify environment variable "GRAPHCHI_ROOT", which is the
* root directory for the GraphChi configuration and source directories.
*/
#ifndef GRAPHCHI_FILENAMES_DEF
#define GRAPHCHI_FILENAMES_DEF
#include <fstream>
#include <fcntl.h>
#include <string>
#include <sstream>
#include <stdlib.h>
#include <errno.h>
#include <unistd.h>
#include <vector>
#include <sys/stat.h>
#include "graphchi_types.hpp"
#include "logger/logger.hpp"
#ifdef DYNAMICEDATA
#include "shards/dynamicdata/dynamicblock.hpp"
#endif
namespace graphchi {
#ifdef __GNUC__
#define VARIABLE_IS_NOT_USED __attribute__ ((unused))
#else
#define VARIABLE_IS_NOT_USED
#endif
static int VARIABLE_IS_NOT_USED get_option_int(const char *option_name, int default_value);
/**
* Vertex data file
*/
template <typename VertexDataType>
static std::string filename_vertex_data(std::string basefilename) {
std::stringstream ss;
ss << basefilename;
ss << "." << sizeof(VertexDataType) << "B.vout";
return ss.str();
}
static std::string filename_degree_data(std::string basefilename) {
return basefilename + "_degs.bin";
}
static std::string filename_intervals(std::string basefilename, int nshards) {
std::stringstream ss;
ss << basefilename;
ss << "." << nshards << ".intervals";
return ss.str();
}
static std::string VARIABLE_IS_NOT_USED get_part_str(int p, int nshards) {
char partstr[32];
sprintf(partstr, ".%d_%d", p, nshards);
return std::string(partstr);
}
template <typename EdgeDataType>
static std::string filename_shard_edata(std::string basefilename, int p, int nshards) {
std::stringstream ss;
ss << basefilename;
#ifdef DYNAMICEDATA
ss << ".dynamic.";
#else
ss << ".edata.";
#endif
ss << "e" << sizeof(EdgeDataType) << "B.";
ss << p << "_" << nshards;
return ss.str();
}
static std::string dirname_shard_edata_block(std::string edata_shardname, size_t blocksize) {
std::stringstream ss;
ss << edata_shardname;
ss << "_blockdir_" << blocksize;
return ss.str();
}
template <typename EdgeDataType>
static size_t get_shard_edata_filesize(std::string edata_shardname) {
size_t fsize;
std::string fname = edata_shardname + ".size";
std::ifstream ifs(fname.c_str());
if (!ifs.good()) {
logstream(LOG_FATAL) << "Could not load " << fname << ". Preprocessing forgotten?" << std::endl;
assert(ifs.good());
}
ifs >> fsize;
ifs.close();
return fsize;
}
static std::string filename_shard_edata_block(std::string edata_shardname, int blockid, size_t blocksize) {
std::stringstream ss;
ss << dirname_shard_edata_block(edata_shardname, blocksize);
ss << "/";
ss << blockid;
return ss.str();
}
static std::string filename_shard_adj(std::string basefilename, int p, int nshards) {
std::stringstream ss;
ss << basefilename;
ss << ".edata_azv.";
ss << p << "_" << nshards << ".adj";
return ss.str();
}
/**
* Configuration file name
*/
static std::string filename_config();
static std::string filename_config() {
char * chi_root = getenv("GRAPHCHI_ROOT");
if (chi_root != NULL) {
return std::string(chi_root) + "/conf/graphchi.cnf";
} else {
return "conf/graphchi.cnf";
}
}
/**
* Configuration file name - local version which can
* override the version in the version control.
*/
static std::string filename_config_local();
static std::string filename_config_local() {
char * chi_root = getenv("GRAPHCHI_ROOT");
if (chi_root != NULL) {
return std::string(chi_root) + "/conf/graphchi.local.cnf";
} else {
return "conf/graphchi.local.cnf";
}
}
static bool file_exists(std::string sname);
static bool file_exists(std::string sname) {
int tryf = open(sname.c_str(), O_RDONLY);
if (tryf < 0) {
return false;
} else {
close(tryf);
return true;
}
}
/**
* Returns the number of shards if a file has been already
* sharded or 0 if not found.
*/
template<typename EdgeDataType>
static int find_shards(std::string base_filename, std::string shard_string="auto") {
int try_shard_num;
int start_num = 0;
int last_shard_num = 2400;
if (shard_string == "auto") {
start_num = 0;
} else {
start_num = atoi(shard_string.c_str());
}
if (start_num > 0) {
last_shard_num = start_num;
}
size_t blocksize = 4096 * 1024;
while (blocksize % sizeof(EdgeDataType) != 0) blocksize++;
for(try_shard_num=start_num; try_shard_num <= last_shard_num; try_shard_num++) {
std::string last_shard_name = filename_shard_edata<EdgeDataType>(base_filename, try_shard_num - 1, try_shard_num);
std::string last_block_name = filename_shard_edata_block(last_shard_name, 0, blocksize);
int tryf = open(last_block_name.c_str(), O_RDONLY);
if (tryf >= 0) {
// Found!
close(tryf);
int nshards_candidate = try_shard_num;
bool success = true;
// Validate all relevant files exists
for(int p=0; p < nshards_candidate; p++) {
std::string sname = filename_shard_edata_block(
filename_shard_edata<EdgeDataType>(base_filename, p, nshards_candidate), 0, blocksize);
if (!file_exists(sname)) {
logstream(LOG_DEBUG) << "Missing directory file: " << sname << std::endl;
success = false;
break;
}
sname = filename_shard_adj(base_filename, p, nshards_candidate);
if (!file_exists(sname)) {
logstream(LOG_DEBUG) << "Missing shard file: " << sname << std::endl;
success = false;
break;
}
}
// Check degree file
std::string degreefname = filename_degree_data(base_filename);
if (!file_exists(degreefname)) {
logstream(LOG_ERROR) << "Missing degree file: " << degreefname << std::endl;
logstream(LOG_ERROR) << "You need to preprocess (sharder) your file again!" << std::endl;
return 0;
}
std::string intervalfname = filename_intervals(base_filename, nshards_candidate);
if (!file_exists(intervalfname)) {
logstream(LOG_ERROR) << "Missing intervals file: " << intervalfname << std::endl;
logstream(LOG_ERROR) << "You need to preprocess (sharder) your file again!" << std::endl;
return 0;
}
if (!success) {
continue;
}
logstream(LOG_INFO) << "Detected number of shards: " << nshards_candidate << std::endl;
logstream(LOG_INFO) << "To specify a different number of shards, use command-line parameter 'nshards'" << std::endl;
return nshards_candidate;
}
}
if (last_shard_num == start_num) {
logstream(LOG_WARNING) << "Could not find shards with nshards = " << start_num << std::endl;
logstream(LOG_WARNING) << "Please define 'nshards 0' or 'nshards auto' to automatically detect." << std::endl;
}
return 0;
}
/**
* Delete the shard files
*/
template<typename EdgeDataType_>
static void delete_shards(std::string base_filename, int nshards) {
#ifdef DYNAMICEDATA
typedef int EdgeDataType;
#else
typedef EdgeDataType_ EdgeDataType;
#endif
logstream(LOG_DEBUG) << "Deleting files for " << base_filename << " shards=" << nshards << std::endl;
std::string intervalfname = filename_intervals(base_filename, nshards);
if (file_exists(intervalfname)) {
int err = remove(intervalfname.c_str());
if (err != 0) logstream(LOG_ERROR) << "Error removing file " << intervalfname
<< ", " << strerror(errno) << std::endl;
}
/* Note: degree file is not removed, because same graph with different number
of shards share the file. This should be probably change.
std::string degreefname = filename_degree_data(base_filename);
if (file_exists(degreefname)) {
remove(degreefname.c_str());
} */
size_t blocksize = 4096 * 1024;
while (blocksize % sizeof(EdgeDataType) != 0) blocksize++;
for(int p=0; p < nshards; p++) {
int blockid = 0;
std::string filename_edata = filename_shard_edata<EdgeDataType>(base_filename, p, nshards);
std::string fsizename = filename_edata + ".size";
if (file_exists(fsizename)) {
int err = remove(fsizename.c_str());
if (err != 0) logstream(LOG_ERROR) << "Error removing file " << fsizename
<< ", " << strerror(errno) << std::endl;
}
while(true) {
std::string block_filename = filename_shard_edata_block(filename_edata, blockid, blocksize);
logstream(LOG_DEBUG) << "Deleting " << block_filename << " exists: " << file_exists(block_filename) << std::endl;
if (file_exists(block_filename)) {
int err = remove(block_filename.c_str());
if (err != 0) logstream(LOG_ERROR) << "Error removing file " << block_filename
<< ", " << strerror(errno) << std::endl;
} else {
break;
}
#ifdef DYNAMICEDATA
delete_block_uncompressed_sizefile(block_filename);
#endif
blockid++;
}
std::string dirname = dirname_shard_edata_block(filename_edata, blocksize);
if (file_exists(dirname)) {
int err = remove(dirname.c_str());
if (err != 0) logstream(LOG_ERROR) << "Error removing directory " << dirname
<< ", " << strerror(errno) << std::endl;
}
std::string adjname = filename_shard_adj(base_filename, p, nshards);
logstream(LOG_DEBUG) << "Deleting " << adjname << " exists: " << file_exists(adjname) << std::endl;
if (file_exists(adjname)) {
int err = remove(adjname.c_str());
if (err != 0) logstream(LOG_ERROR) << "Error removing file " << adjname
<< ", " << strerror(errno) << std::endl;
}
}
std::string numv_filename = base_filename + ".numvertices";
if (file_exists(numv_filename)) {
int err = remove(numv_filename.c_str());
if (err != 0) logstream(LOG_ERROR) << "Error removing file " << numv_filename
<< ", " << strerror(errno) << std::endl;
}
}
/**
* Loads vertex intervals.
*/
static void load_vertex_intervals(std::string base_filename, int nshards, std::vector<std::pair<vid_t, vid_t> > & intervals, bool allowfail);
static void load_vertex_intervals(std::string base_filename, int nshards, std::vector<std::pair<vid_t, vid_t> > & intervals, bool allowfail=false) {
std::string intervalsFilename = filename_intervals(base_filename, nshards);
std::ifstream intervalsF(intervalsFilename.c_str());
if (!intervalsF.good()) {
if (allowfail) return; // Hack
logstream(LOG_ERROR) << "Could not load intervals-file: " << intervalsFilename << std::endl;
}
assert(intervalsF.good());
intervals.clear();
vid_t st=0, en;
for(int i=0; i < nshards; i++) {
assert(!intervalsF.eof());
intervalsF >> en;
intervals.push_back(std::pair<vid_t,vid_t>(st, en));
st = en + 1;
}
for(int i=0; i < nshards; i++) {
logstream(LOG_INFO) << "shard: " << intervals[i].first << " - " << intervals[i].second << std::endl;
}
intervalsF.close();
}
/**
* Returns the number of vertices in a graph. The value is stored in a separate file <graphname>.numvertices
*/
static VARIABLE_IS_NOT_USED size_t get_num_vertices(std::string basefilename);
static VARIABLE_IS_NOT_USED size_t get_num_vertices(std::string basefilename) {
std::string numv_filename = basefilename + ".numvertices";
std::ifstream vfileF(numv_filename.c_str());
if (!vfileF.good()) {
logstream(LOG_ERROR) << "Could not find file " << numv_filename << std::endl;
logstream(LOG_ERROR) << "Maybe you have old shards - please recreate." << std::endl;
assert(false);
}
size_t n;
vfileF >> n;
vfileF.close();
return n;
}
template <typename EdgeDataType>
std::string preprocess_filename(std::string basefilename) {
std::stringstream ss;
ss << basefilename;
ss << "." << sizeof(EdgeDataType) << "B.bin";
return ss.str();
}
/**
* Checks if original file has more recent modification date
* than the shards. If it has, deletes the shards and returns false.
* Otherwise return true.
*/
template <typename EdgeDataType>
bool check_origfile_modification_earlier(std::string basefilename, int nshards) {
/* Compare last modified dates of the original graph and the shards */
if (file_exists(basefilename) && get_option_int("disable-modtime-check", 0) == 0) {
struct stat origstat, shardstat;
int err1 = stat(basefilename.c_str(), &origstat);
std::string adjfname = filename_shard_adj(basefilename, 0, nshards);
int err2 = stat(adjfname.c_str(), &shardstat);
if (err1 != 0 || err2 != 0) {
logstream(LOG_ERROR) << "Error when checking file modification times: " << strerror(errno) << std::endl;
return nshards;
}
if (origstat.st_mtime > shardstat.st_mtime) {
logstream(LOG_INFO) << "The input graph modification date was newer than of the shards." << std::endl;
logstream(LOG_INFO) << "Going to delete old shards and recreate new ones. To disable " << std::endl;
logstream(LOG_INFO) << "functionality, specify --disable-modtime-check=1" << std::endl;
// Delete shards
delete_shards<EdgeDataType>(basefilename, nshards);
// Delete the bin-file
std::string preprocfile = preprocess_filename<EdgeDataType>(basefilename);
if (file_exists(preprocfile)) {
logstream(LOG_DEBUG) << "Deleting: " << preprocfile << std::endl;
int err = remove(preprocfile.c_str());
if (err != 0) {
logstream(LOG_ERROR) << "Error deleting file: " << preprocfile << ", " <<
strerror(errno) << std::endl;
}
}
return false;
} else {
return true;
}
}
return true;
}
}
#endif
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Simple vertex-aggregators/scanners which allows reductions over all vertices
* in an I/O efficient manner.
*/
#ifndef DEF_GRAPHCHI_VERTEX_AGGREGATOR
#define DEF_GRAPHCHI_VERTEX_AGGREGATOR
#include <errno.h>
#include <memory.h>
#include <string>
#include "graphchi_types.hpp"
#include "api/chifilenames.hpp"
#include "io/stripedio.hpp"
#include "util/ioutil.hpp"
#include "engine/auxdata/vertex_data.hpp"
namespace graphchi {
/**
* Abstract class for callbacks that are invoked for each
* vertex when foreach_vertices() is called (see below).
*/
template <typename VertexDataType>
class VCallback {
public:
virtual void callback(vid_t vertex_id, VertexDataType &value) = 0;
};
/**
* Foreach: a callback object is invoked for every vertex in the given range.
* See VCallback above.
* @param basefilename base filename
* @param fromv first vertex
* @param tov last vertex (exclusive)
* @param callback user-defined callback-object.
*/
template <typename VertexDataType>
void foreach_vertices(std::string basefilename, vid_t fromv, vid_t tov, VCallback<VertexDataType> &callback) {
std::string filename = filename_vertex_data<VertexDataType>(basefilename);
metrics m("foreach");
stripedio * iomgr = new stripedio(m);
vid_t readwindow = 1024 * 1024;
size_t numvertices = get_num_vertices(basefilename);
vertex_data_store<VertexDataType> * vertexdata =
new vertex_data_store<VertexDataType>(basefilename, numvertices, iomgr);
vid_t st = fromv;
vid_t en = 0;
while(st <= tov) {
en = st + readwindow - 1;
if (en >= tov) en = tov - 1;
if (st < en) {
vertexdata->load(st, en);
for(vid_t v=st; v<=en; v++) {
VertexDataType * vptr = vertexdata->vertex_data_ptr(v);
callback.callback(v, (VertexDataType&) *vptr);
}
}
st += readwindow;
}
delete vertexdata;
delete iomgr;
}
/**
* Callback for computing a sum.
* TODO: a functional version instead of imperative.
*/
template <typename VertexDataType, typename SumType>
class SumCallback : public VCallback<VertexDataType> {
public:
SumType accum;
SumCallback(SumType initval) : VCallback<VertexDataType>() {
accum = initval;
}
void callback(vid_t vertex_id, VertexDataType &value) {
accum += value;
}
};
/**
* Computes a sum over a range of vertices' values.
* Type SumType defines the accumulator type, which may be different
* than vertex type. For example, often vertex value is 32-bit
* integer, but the sum will need to be 64-bit integer.
* @param basefilename base filename
* @param fromv first vertex
* @param tov last vertex (exclusive)
*/
template <typename VertexDataType, typename SumType>
SumType sum_vertices(std::string base_filename, vid_t fromv, vid_t tov) {
SumCallback<VertexDataType, SumType> sumc(0);
foreach_vertices<VertexDataType>(base_filename, fromv, tov, sumc);
return sumc.accum;
}
}
#endif
| C++ |
;/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Graph conversion and parsing routines.
*/
#ifndef GRAPHCHI_CONVERSIONS_DEF
#define GRAPHCHI_CONVERSIONS_DEF
#include <fcntl.h>
#include <unistd.h>
#include <sys/types.h>
#include <dirent.h>
#include <sys/stat.h>
#include "graphchi_types.hpp"
#include "logger/logger.hpp"
#include "preprocessing/sharder.hpp"
#include "preprocessing/formats/binary_adjacency_list.hpp"
/**
* GNU COMPILER HACK TO PREVENT WARNINGS "Unused variable", if
* the particular app being compiled does not use a function.
*/
#ifdef __GNUC__
#define VARIABLE_IS_NOT_USED __attribute__ ((unused))
#else
#define VARIABLE_IS_NOT_USED
#endif
namespace graphchi {
struct dummy {};
/* Simple string to number parsers */
static void VARIABLE_IS_NOT_USED parse(int &x, const char * s);
static void VARIABLE_IS_NOT_USED parse(unsigned int &x, const char * s);
static void VARIABLE_IS_NOT_USED parse(float &x, const char * s);
static void VARIABLE_IS_NOT_USED parse(long &x, const char * s);
static void VARIABLE_IS_NOT_USED parse(char &x, const char * s);
static void VARIABLE_IS_NOT_USED parse(bool &x, const char * s);
static void VARIABLE_IS_NOT_USED parse(double &x, const char * s);
static void VARIABLE_IS_NOT_USED parse(short &x, const char * s);
static void FIXLINE(char * s);
static void parse(int &x, const char * s) {
x = atoi(s);
}
static void parse(unsigned int &x, const char * s) {
x = (unsigned int) strtoul(s, NULL, 10);
}
static void parse(float &x, const char * s) {
x = (float) atof(s);
}
/**
* Special templated parser for PairContainers.
*/
template <typename T>
void parse(PairContainer<T> &x, const char * s) {
parse(x.left, s);
parse(x.right, s);
}
static void parse(long &x, const char * s) {
x = atol(s);
}
static void parse(char &x, const char * s) {
x = s[0];
}
static void parse(bool &x, const char * s) {
x = atoi(s) == 1;
}
static void parse(double &x, const char * s) {
x = atof(s);
}
static void parse(short &x, const char * s) {
x = (short) atoi(s);
}
#ifdef DYNAMICEDATA
static void VARIABLE_IS_NOT_USED parse_multiple(std::vector<dummy> &values, char * s);
void parse_multiple(std::vector<dummy> & values, char * s) {
assert(false);
}
/**
* Parse ':' -delimited values into a vector.
*/
template <typename T>
static void parse_multiple(typename std::vector<T> & values, char * s) {
char delims[] = ":";
char * t;
t = strtok(s, delims);
T x;
parse(x, (const char*) t);
values.push_back(x);
while((t = strtok(NULL, delims)) != NULL) {
parse(x, (const char*) t);
values.push_back(x);
}
}
#endif
// Catch all
template <typename T>
void parse(T &x, const char * s) {
logstream(LOG_FATAL) << "You need to define parse<your-type>(your-type &x, const char *s) function"
<< " to support parsing the edge value." << std::endl;
assert(false);
}
// Removes \n from the end of line
void FIXLINE(char * s) {
int len = (int) strlen(s)-1;
if(s[len] == '\n') s[len] = 0;
}
// http://www.linuxquestions.org/questions/programming-9/c-list-files-in-directory-379323/
int getdir (std::string dir, std::vector<std::string> &files);
int getdir (std::string dir, std::vector<std::string> &files)
{
DIR *dp;
struct dirent *dirp;
if((dp = opendir(dir.c_str())) == NULL) {
std::cout << "Error(" << errno << ") opening " << dir << std::endl;
return errno;
}
while ((dirp = readdir(dp)) != NULL) {
files.push_back(std::string(dirp->d_name));
}
closedir(dp);
return 0;
}
std::string get_dirname(std::string arg);
std::string get_dirname(std::string arg) {
size_t a = arg.find_last_of("/");
if (a != arg.npos) {
std::string dir = arg.substr(0, a);
return dir;
} else {
assert(false);
}
}
std::string get_filename(std::string arg);
std::string get_filename(std::string arg) {
size_t a = arg.find_last_of("/");
if (a != arg.npos) {
std::string f = arg.substr(a + 1);
return f;
} else {
assert(false);
}
}
/**
* Converts graph from an edge list format. Input may contain
* value for the edges. Self-edges are ignored.
*/
template <typename EdgeDataType>
void convert_edgelist(std::string inputfile, sharder<EdgeDataType> &sharderobj, bool multivalue_edges=false) {
FILE * inf = fopen(inputfile.c_str(), "r");
size_t bytesread = 0;
size_t linenum = 0;
if (inf == NULL) {
logstream(LOG_FATAL) << "Could not load :" << inputfile << " error: " << strerror(errno) << std::endl;
}
assert(inf != NULL);
logstream(LOG_INFO) << "Reading in edge list format!" << std::endl;
char s[1024];
while(fgets(s, 1024, inf) != NULL) {
linenum++;
if (linenum % 10000000 == 0) {
logstream(LOG_DEBUG) << "Read " << linenum << " lines, " << bytesread / 1024 / 1024. << " MB" << std::endl;
}
FIXLINE(s);
bytesread += strlen(s);
if (s[0] == '#') continue; // Comment
if (s[0] == '%') continue; // Comment
char delims[] = "\t, ";
char * t;
t = strtok(s, delims);
if (t == NULL) {
logstream(LOG_ERROR) << "Input file is not in right format. "
<< "Expecting \"<from>\t<to>\". "
<< "Current line: \"" << s << "\"\n";
assert(false);
}
vid_t from = atoi(t);
t = strtok(NULL, delims);
if (t == NULL) {
logstream(LOG_ERROR) << "Input file is not in right format. "
<< "Expecting \"<from>\t<to>\". "
<< "Current line: \"" << s << "\"\n";
assert(false);
}
vid_t to = atoi(t);
/* Check if has value */
t = strtok(NULL, delims);
if (!multivalue_edges) {
EdgeDataType val;
if (t != NULL) {
parse(val, (const char*) t);
}
if (from != to) {
if (t != NULL) {
sharderobj.preprocessing_add_edge(from, to, val);
} else {
sharderobj.preprocessing_add_edge(from, to);
}
}
} else {
#ifdef DYNAMICEDATA
std::vector<EdgeDataType> vals;
parse_multiple(vals, (char*) t);
if (from != to) {
if (vals.size() == 0) {
// TODO: go around this problem
logstream(LOG_FATAL) << "Each edge needs at least one value." << std::endl;
assert(vals.size() > 0);
}
sharderobj.preprocessing_add_edge_multival(from, to, vals);
}
#else
logstream(LOG_FATAL) << "To support multivalue-edges, dynamic edge data needs to be used." << std::endl;
assert(false);
#endif
}
}
fclose(inf);
}
/**
* Converts a graph from adjacency list format. Edge values are not supported,
* and each edge gets the default value for the type. Self-edges are ignored.
*/
template <typename EdgeDataType>
void convert_adjlist(std::string inputfile, sharder<EdgeDataType> &sharderobj) {
FILE * inf = fopen(inputfile.c_str(), "r");
if (inf == NULL) {
logstream(LOG_FATAL) << "Could not load :" << inputfile << " error: " << strerror(errno) << std::endl;
}
assert(inf != NULL);
logstream(LOG_INFO) << "Reading in adjacency list format!" << std::endl;
int maxlen = 100000000;
char * s = (char*) malloc(maxlen);
size_t bytesread = 0;
char delims[] = " \t";
size_t linenum = 0;
size_t lastlog = 0;
/*** PHASE 1 - count ***/
while(fgets(s, maxlen, inf) != NULL) {
linenum++;
if (bytesread - lastlog >= 500000000) {
logstream(LOG_DEBUG) << "Read " << linenum << " lines, " << bytesread / 1024 / 1024. << " MB" << std::endl;
lastlog = bytesread;
}
FIXLINE(s);
bytesread += strlen(s);
if (s[0] == '#') continue; // Comment
if (s[0] == '%') continue; // Comment
char * t = strtok(s, delims);
vid_t from = atoi(t);
t = strtok(NULL,delims);
if (t != NULL) {
vid_t num = atoi(t);
vid_t i = 0;
while((t = strtok(NULL,delims)) != NULL) {
vid_t to = atoi(t);
if (from != to) {
sharderobj.preprocessing_add_edge(from, to, EdgeDataType());
}
i++;
}
if (num != i)
logstream(LOG_ERROR) << "Mismatch when reading adjacency list: " << num << " != " << i << " s: " << std::string(s)
<< " on line: " << linenum << std::endl;
assert(num == i);
}
}
free(s);
fclose(inf);
}
/**
* Converts a graph from cassovary's (Twitter) format. Edge values are not supported,
* and each edge gets the default value for the type. Self-edges are ignored.
*/
template <typename EdgeDataType>
void convert_cassovary(std::string basefilename, sharder<EdgeDataType> &sharderobj) {
std::vector<std::string> parts;
std::string dirname = get_dirname(basefilename);
std::string prefix = get_filename(basefilename);
std::cout << "dir=[" << dirname << "] prefix=[" << prefix << "]" << std::endl;
getdir(dirname, parts);
for(std::vector<std::string>::iterator it=parts.begin(); it != parts.end(); ++it) {
std::string inputfile = *it;
if (inputfile.find(prefix) == 0 && inputfile.find("tmp") == inputfile.npos) {
std::cout << "Going to process: " << inputfile << std::endl;
}
}
for(std::vector<std::string>::iterator it=parts.begin(); it != parts.end(); ++it) {
std::string inputfile = *it;
if (inputfile.find(prefix) == 0 && inputfile.find(".tmp") == inputfile.npos) {
inputfile = dirname + "/" + inputfile;
std::cout << "Process: " << inputfile << std::endl;
FILE * inf = fopen(inputfile.c_str(), "r");
if (inf == NULL) {
logstream(LOG_FATAL) << "Could not load :" << inputfile << " error: " << strerror(errno) << std::endl;
}
assert(inf != NULL);
logstream(LOG_INFO) << "Reading in cassovary format!" << std::endl;
int maxlen = 100000000;
char * s = (char*) malloc(maxlen);
size_t bytesread = 0;
char delims[] = " \t";
size_t linenum = 0;
size_t lastlog = 0;
while(fgets(s, maxlen, inf) != NULL) {
linenum++;
if (bytesread - lastlog >= 500000000) {
logstream(LOG_DEBUG) << "Read " << linenum << " lines, " << bytesread / 1024 / 1024. << " MB" << std::endl;
lastlog = bytesread;
}
FIXLINE(s);
bytesread += strlen(s);
if (s[0] == '#') continue; // Comment
if (s[0] == '%') continue; // Comment
char * t = strtok(s, delims);
vid_t from = atoi(t);
t = strtok(NULL,delims);
if (t != NULL) {
vid_t num = atoi(t);
// Read next line
linenum += num + 1;
for(vid_t i=0; i < num; i++) {
s = fgets(s, maxlen, inf);
FIXLINE(s);
vid_t to = atoi(s);
if (from != to) {
sharderobj.preprocessing_add_edge(from, to, EdgeDataType());
}
}
}
}
free(s);
fclose(inf);
}
}
}
/**
* Converts a set of files in the binedgelist format (binary edge list)
*/
template <typename EdgeDataType>
void convert_binedgelist(std::string basefilename, sharder<EdgeDataType> &sharderobj) {
std::vector<std::string> parts;
std::string dirname = get_dirname(basefilename);
std::string prefix = get_filename(basefilename);
std::cout << "dir=[" << dirname << "] prefix=[" << prefix << "]" << std::endl;
getdir(dirname, parts);
for(std::vector<std::string>::iterator it=parts.begin(); it != parts.end(); ++it) {
std::string inputfile = *it;
if (inputfile.find(prefix) == 0 && inputfile.find("tmp") == inputfile.npos) {
std::cout << "Going to process: " << inputfile << std::endl;
}
}
for(std::vector<std::string>::iterator it=parts.begin(); it != parts.end(); ++it) {
std::string inputfile = *it;
if (inputfile.find(prefix) == 0 && inputfile.find(".tmp") == inputfile.npos) {
inputfile = dirname + "/" + inputfile;
std::cout << "Process: " << inputfile << std::endl;
FILE * inf = fopen(inputfile.c_str(), "r");
while(!feof(inf)) {
vid_t from;
vid_t to;
size_t res1 = fread(&from, sizeof(vid_t), 1, inf);
size_t res2 = fread(&to, sizeof(vid_t), 1, inf);
assert(res1 > 0 && res2 > 0);
if (from != to) {
sharderobj.preprocessing_add_edge(from, to, EdgeDataType());
}
}
fclose(inf);
}
}
}
// TODO: remove code duplication.
template <typename EdgeDataType>
void convert_binedgelistval(std::string basefilename, sharder<EdgeDataType> &sharderobj) {
std::vector<std::string> parts;
std::string dirname = get_dirname(basefilename);
std::string prefix = get_filename(basefilename);
std::cout << "dir=[" << dirname << "] prefix=[" << prefix << "]" << std::endl;
getdir(dirname, parts);
for(std::vector<std::string>::iterator it=parts.begin(); it != parts.end(); ++it) {
std::string inputfile = *it;
if (inputfile.find(prefix) == 0 && inputfile.find("tmp") == inputfile.npos) {
std::cout << "Going to process: " << inputfile << std::endl;
}
}
for(std::vector<std::string>::iterator it=parts.begin(); it != parts.end(); ++it) {
std::string inputfile = *it;
if (inputfile.find(prefix) == 0 && inputfile.find(".tmp") == inputfile.npos) {
inputfile = dirname + "/" + inputfile;
std::cout << "Process: " << inputfile << std::endl;
FILE * inf = fopen(inputfile.c_str(), "r");
while(!feof(inf)) {
vid_t from;
vid_t to;
EdgeDataType edgeval;
size_t res1 = fread(&from, sizeof(vid_t), 1, inf);
size_t res2 = fread(&to, sizeof(vid_t), 1, inf);
size_t res3 = fread(&edgeval, sizeof(EdgeDataType), 1, inf);
assert(res1 > 0 && res2 > 0 && res3 > 0);
if (from != to) {
sharderobj.preprocessing_add_edge(from, to, edgeval);
}
}
fclose(inf);
}
}
}
/**
* An abstract class for defining preprocessor objects
* that modify the preprocessed binary input prior
* to sharding.
*/
template <typename EdgeDataType>
class SharderPreprocessor {
public:
virtual ~SharderPreprocessor() {}
virtual std::string getSuffix() = 0;
virtual void reprocess(std::string preprocFilename, std::string basefileName) = 0;
};
/**
* Converts a graph input to shards. Preprocessing has several steps,
* see sharder.hpp for more information.
*/
template <typename EdgeDataType>
int convert(std::string basefilename, std::string nshards_string, SharderPreprocessor<EdgeDataType> * preprocessor = NULL) {
std::string suffix = "";
if (preprocessor != NULL) {
suffix = preprocessor->getSuffix();
}
sharder<EdgeDataType> sharderobj(basefilename + suffix);
if (!sharderobj.preprocessed_file_exists()) {
std::string file_type_str = get_option_string_interactive("filetype", "edgelist, adjlist");
if (file_type_str != "adjlist" && file_type_str != "edgelist" && file_type_str != "binedgelist" &&
file_type_str != "multivalueedgelist") {
logstream(LOG_ERROR) << "You need to specify filetype: 'edgelist' or 'adjlist'." << std::endl;
assert(false);
}
/* Start preprocessing */
sharderobj.start_preprocessing();
if (file_type_str == "adjlist") {
convert_adjlist<EdgeDataType>(basefilename, sharderobj);
} else if (file_type_str == "edgelist") {
convert_edgelist<EdgeDataType>(basefilename, sharderobj);
#ifdef DYNAMICEDATA
} else if (file_type_str == "multivalueedgelist" ) {
convert_edgelist<EdgeDataType>(basefilename, sharderobj, true);
#endif
} else if (file_type_str == "binedgelist") {
convert_binedgelistval<EdgeDataType>(basefilename, sharderobj);
} else {
assert(false);
}
/* Finish preprocessing */
sharderobj.end_preprocessing();
if (preprocessor != NULL) {
preprocessor->reprocess(sharderobj.preprocessed_name(), basefilename);
}
}
vid_t max_vertex_id = get_option_int("maxvertex", 0);
if (max_vertex_id > 0) {
sharderobj.set_max_vertex_id(max_vertex_id);
}
int nshards = sharderobj.execute_sharding(nshards_string);
logstream(LOG_INFO) << "Successfully finished sharding for " << basefilename + suffix << std::endl;
logstream(LOG_INFO) << "Created " << nshards << " shards." << std::endl;
return nshards;
}
/**
* Converts a graph input to shards with no edge values. Preprocessing has several steps,
* see sharder.hpp for more information.
*/
int convert_none(std::string basefilename, std::string nshards_string);
int convert_none(std::string basefilename, std::string nshards_string) {
std::string suffix = "";
sharder<dummy> sharderobj(basefilename + suffix);
sharderobj.set_no_edgevalues();
if (!sharderobj.preprocessed_file_exists()) {
std::string file_type_str = get_option_string_interactive("filetype", "edgelist, adjlist, cassovary, binedgelist");
if (file_type_str != "adjlist" && file_type_str != "edgelist" && file_type_str != "cassovary" && file_type_str != "binedgelist") {
logstream(LOG_ERROR) << "You need to specify filetype: 'edgelist' or 'adjlist'." << std::endl;
assert(false);
}
/* Start preprocessing */
sharderobj.start_preprocessing();
if (file_type_str == "adjlist") {
convert_adjlist<dummy>(basefilename, sharderobj);
} else if (file_type_str == "edgelist") {
convert_edgelist<dummy>(basefilename, sharderobj);
} else if (file_type_str == "cassovary") {
convert_cassovary<dummy>(basefilename, sharderobj);
} else if (file_type_str == "binedgelist") {
convert_binedgelist<dummy>(basefilename, sharderobj);
}
/* Finish preprocessing */
sharderobj.end_preprocessing();
}
if (get_option_int("skipsharding", 0) == 1) {
std::cout << "Skip sharding..." << std::endl;
exit(0);
}
vid_t max_vertex_id = get_option_int("maxvertex", 0);
if (max_vertex_id > 0) {
sharderobj.set_max_vertex_id(max_vertex_id);
}
int nshards = sharderobj.execute_sharding(nshards_string);
logstream(LOG_INFO) << "Successfully finished sharding for " << basefilename + suffix << std::endl;
logstream(LOG_INFO) << "Created " << nshards << " shards." << std::endl;
return nshards;
}
template <typename EdgeDataType>
int convert_if_notexists(std::string basefilename, std::string nshards_string, bool &didexist,
SharderPreprocessor<EdgeDataType> * preprocessor = NULL) {
int nshards;
std::string suffix = "";
if (preprocessor != NULL) {
suffix = preprocessor->getSuffix();
}
/* Check if input file is already sharded */
if ((nshards = find_shards<EdgeDataType>(basefilename + suffix, nshards_string))) {
logstream(LOG_INFO) << "Found preprocessed files for " << basefilename << ", num shards=" << nshards << std::endl;
didexist = true;
if (check_origfile_modification_earlier<EdgeDataType>(basefilename + suffix, nshards)) {
return nshards;
}
}
didexist = false;
logstream(LOG_INFO) << "Did not find preprocessed shards for " << basefilename + suffix << std::endl;
logstream(LOG_INFO) << "(Edge-value size: " << sizeof(EdgeDataType) << ")" << std::endl;
logstream(LOG_INFO) << "Will try create them now..." << std::endl;
nshards = convert<EdgeDataType>(basefilename, nshards_string, preprocessor);
return nshards;
}
template <typename EdgeDataType>
int convert_if_notexists(std::string basefilename, std::string nshards_string, SharderPreprocessor<EdgeDataType> * preprocessor = NULL) {
bool b;
return convert_if_notexists<EdgeDataType>(basefilename, nshards_string, b, preprocessor);
}
struct vertex_degree {
int deg;
vid_t id;
vertex_degree() {}
vertex_degree(int deg, vid_t id) : deg(deg), id(id) {}
};
static bool vertex_degree_less(const vertex_degree &a, const vertex_degree &b);
static bool vertex_degree_less(const vertex_degree &a, const vertex_degree &b) {
return a.deg < b.deg || (a.deg == b.deg && a.id < b.id);
}
/**
* Special preprocessor which relabels vertices in ascending order
* of their degree.
*/
template <typename EdgeDataType>
class OrderByDegree : public SharderPreprocessor<EdgeDataType> {
int phase;
public:
typedef edge_with_value<EdgeDataType> edge_t;
vid_t * translate_table;
vid_t max_vertex_id;
vertex_degree * degarray;
binary_adjacency_list_writer<EdgeDataType> * writer;
OrderByDegree() {
degarray = NULL;
writer = NULL;
}
~OrderByDegree() {
if (degarray != NULL) free(degarray);
degarray = NULL;
if (writer != NULL) delete writer;
writer = NULL;
}
std::string getSuffix() {
return "_degord";
}
vid_t translate(vid_t vid) {
if (vid > max_vertex_id) return vid;
return translate_table[vid];
}
/**
* Callback function that binary_adjacency_list_reader
* invokes. In first phase, the degrees of vertice sare collected.
* In the next face, they are written out to the degree-ordered data.
* Note: this version does not preserve edge values!
*/
void receive_edge(vid_t from, vid_t to, EdgeDataType value, bool is_value) {
if (phase == 0) {
degarray[from].deg++;
degarray[to].deg++;
} else {
writer->add_edge(translate(from), translate(to)); // Value is ignored
}
}
void reprocess(std::string preprocessedFile, std::string baseFilename) {
binary_adjacency_list_reader<EdgeDataType> reader(preprocessedFile);
max_vertex_id = (vid_t) reader.get_max_vertex_id();
degarray = (vertex_degree *) calloc(max_vertex_id + 1, sizeof(vertex_degree));
vid_t nverts = max_vertex_id + 1;
for(vid_t i=0; i < nverts; i++) {
degarray[i].id = i;
}
phase = 0;
/* Reader will invoke receive_edge() above */
reader.read_edges(this);
/* Now sort */
quickSort(degarray, nverts, vertex_degree_less);
/* Create translation table */
translate_table = (vid_t*) calloc(sizeof(vid_t), nverts);
for(vid_t i=0; i<nverts; i++) {
translate_table[degarray[i].id] = i;
}
delete degarray;
/* Write translate table */
std::string translate_table_file = baseFilename + ".vertexmap";
int df = open(translate_table_file.c_str(), O_RDWR | O_CREAT, S_IROTH | S_IWOTH | S_IWUSR | S_IRUSR);
if (df < 0) logstream(LOG_ERROR) << "Could not write vertex map: " << translate_table_file <<
" error: " << strerror(errno) << std::endl;
assert(df >= 0);
pwritea(df, translate_table, nverts * sizeof(vid_t), 0);
close(df);
/* Now recreate the processed file */
std::string tmpfilename = preprocessedFile + ".old";
rename(preprocessedFile.c_str(), tmpfilename.c_str());
writer = new binary_adjacency_list_writer<EdgeDataType>(preprocessedFile);
binary_adjacency_list_reader<EdgeDataType> reader2(tmpfilename);
phase = 1;
reader2.read_edges(this);
writer->finish();
delete writer;
writer = NULL;
delete translate_table;
}
};
} // end namespace
#endif
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Class representing a binary adjacency list format used by the
* sharder. Note, this format does not comply with standard (if there are
* any) formats.
*
* File format supports edges with and without values.
*/
#ifndef DEF_GRAPHCHI_BINADJLIST_FORMAT
#define DEF_GRAPHCHI_BINADJLIST_FORMAT
#include <assert.h>
#include <stdio.h>
#include <stdint.h>
#include <unistd.h>
#include <errno.h>
#include <string>
#include "graphchi_types.hpp"
#include "logger/logger.hpp"
#include "util/ioutil.hpp"
namespace graphchi {
#define FORMAT_VERSION 20120705 // Format version is the date it was conceived
/**
* Header struct
*/
struct bin_adj_header {
int format_version;
uint64_t max_vertex_id; // Note, use 64-bit to be future-proof.
bool contains_edge_values;
uint32_t edge_value_size;
uint64_t numedges;
};
/**
* Internal container class.
*/
template <typename EdgeDataType>
struct edge_with_value_badj {
vid_t vertex;
EdgeDataType value;
edge_with_value_badj() {}
edge_with_value_badj(vid_t v, EdgeDataType x) : vertex(v), value(x) {}
};
template <typename EdgeDataType>
class binary_adjacency_list_reader {
std::string filename;
int fd;
size_t fpos;
size_t blocklen;
size_t blocksize;
size_t total_to_process;
char * block;
char * blockptr;
bin_adj_header header;
template <typename U>
inline U read_val() {
if (blockptr + sizeof(U) > block + blocklen) {
// Read
blocklen = std::min(blocksize, total_to_process - fpos);
preada(fd, block, blocklen, fpos);
blockptr = block;
}
U res = *((U*)blockptr);
blockptr += sizeof(U);
fpos += sizeof(U);
return res;
}
public:
binary_adjacency_list_reader(std::string filename) : filename(filename) {
fd = open(filename.c_str(), O_RDONLY);
if (fd < 0) {
logstream(LOG_FATAL) << "Could not open file: " << filename << " error: " <<
strerror(errno) << std::endl;
}
assert(fd >= 0);
blocksize = (size_t) get_option_long("preprocessing.bufsize", 64 * 1024 * 1024);
block = (char*) malloc(blocksize);
blockptr = block;
total_to_process = get_filesize(filename);
blocklen = 0;
fpos = 0;
header = read_val<bin_adj_header>();
assert(header.format_version == FORMAT_VERSION);
}
~binary_adjacency_list_reader() {
if (block != NULL) free(block);
close(fd);
}
template <class Callback>
void read_edges(Callback * callback) {
size_t nedges = 0;
/* Note, header has been read in the beginning */
do {
if (nedges % 10000000 == 0) {
logstream(LOG_DEBUG) << (fpos * 1.0 / total_to_process * 100) << "%" << std::endl;
}
vid_t from;
vid_t to;
int adjlen;
EdgeDataType val = EdgeDataType();
from = read_val<vid_t>();
adjlen = (int) read_val<uint8_t>();
assert(adjlen > 0);
for(int i=0; i < adjlen; i++) {
to = read_val<vid_t>();
if (header.contains_edge_values) {
val = read_val<EdgeDataType>();
}
callback->receive_edge(from, to, val, header.contains_edge_values);
nedges++;
}
} while (nedges < header.numedges);
}
bool has_edge_values() {
return header.contains_edge_values;
}
size_t get_max_vertex_id() {
return header.max_vertex_id;
}
size_t get_numedges() {
return header.numedges;
}
};
template <typename EdgeDataType>
class binary_adjacency_list_writer {
private:
std::string filename;
int fd;
bin_adj_header header;
int bufsize;
char * buf;
char * bufptr;
bool initialized;
edge_with_value_badj<EdgeDataType> samev_buf[256];
vid_t lastid;
uint8_t counter;
public:
binary_adjacency_list_writer(std::string filename) : filename(filename) {
bufsize = (int) get_option_int("preprocessing.bufsize", 64 * 1024 * 1024);
assert(bufsize > 1024 * 1024);
fd = open(filename.c_str(), O_WRONLY | O_CREAT, S_IROTH | S_IWOTH | S_IWUSR | S_IRUSR);
if (fd < 0) {
logstream(LOG_FATAL) << "Could not open file " << filename << " for writing. " <<
" Error: " << strerror(errno) << std::endl;
}
int res = ftruncate(fd, 0);
if (res != 0) {
logstream(LOG_FATAL) << "Could not truncate file " << filename <<
" Error: " << strerror(errno) << std::endl;
}
assert(res == 0);
header.format_version = FORMAT_VERSION;
header.max_vertex_id = 0;
header.contains_edge_values = false;
header.numedges = 0;
header.edge_value_size = (uint32_t) sizeof(EdgeDataType);
buf = (char*) malloc(bufsize);
bufptr = buf;
bwrite<bin_adj_header>(fd, buf, bufptr, header);
counter = 0;
lastid = 0;
initialized = false;
assert(fd >= 0);
}
~binary_adjacency_list_writer() {
if (buf != NULL) delete buf;
}
protected:
void write_header() {
logstream(LOG_DEBUG) << "Write header: max vertex: " << header.max_vertex_id << std::endl;
pwritea(fd, &header, sizeof(bin_adj_header), 0);
}
/**
* Write edges for the current vertex (lastid)
*/
void flush() {
if (counter != 0) {
bwrite<vid_t>(fd, buf, bufptr, lastid);
bwrite<uint8_t>(fd, buf, bufptr, counter);
for(int i=0; i < counter; i++) {
bwrite<vid_t>(fd, buf, bufptr, samev_buf[i].vertex);
if (header.contains_edge_values) {
bwrite<EdgeDataType>(fd, buf, bufptr, samev_buf[i].value);
}
}
header.numedges += (uint64_t)counter;
counter = 0;
}
}
void _addedge(vid_t from, vid_t to, EdgeDataType val) {
if (from == to) return; // Filter self-edges
if (from == lastid && counter > 0) {
samev_buf[counter++] = edge_with_value_badj<EdgeDataType>(to, val);
} else {
flush();
lastid = from;
samev_buf[counter++] = edge_with_value_badj<EdgeDataType>(to, val);
}
if (counter == 255) {
/* Flush */
flush();
counter = 0;
}
if (from > header.max_vertex_id || to > header.max_vertex_id) {
header.max_vertex_id = std::max(from, to);
}
}
public:
void add_edge(vid_t from, vid_t to, EdgeDataType val) {
if (!initialized) {
header.contains_edge_values = true;
initialized = true;
}
if (!header.contains_edge_values) {
logstream(LOG_ERROR) << "Tried to add edge with a value, although previously added one with a value!" << std::endl;
}
assert(header.contains_edge_values);
_addedge(from, to, val);
}
void add_edge(vid_t from, vid_t to) {
if (!initialized) {
header.contains_edge_values = false;
initialized = true;
}
if (header.contains_edge_values) {
logstream(LOG_ERROR) << "Tried to add edge without a value, although previously added edge with a value!" << std::endl;
}
assert(!header.contains_edge_values);
_addedge(from, to, EdgeDataType());
}
bool has_edge_values() {
return header.contains_edge_values;
}
void finish() {
flush();
/* Write rest of the buffer out */
writea(fd, buf, bufptr - buf);
free(buf);
buf = NULL;
write_header();
close(fd);
}
/** Buffered write function */
template <typename T>
void bwrite(int f, char * buf, char * &bufptr, T val) {
if (bufptr + sizeof(T) - buf >= bufsize) {
writea(f, buf, bufptr - buf);
bufptr = buf;
}
*((T*)bufptr) = val;
bufptr += sizeof(T);
}
};
}
#endif
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Splits shards into blocks. Experimental.
*/
#include <iostream>
#include <stdlib.h>
#include <string>
#include <assert.h>
#include <unistd.h>
#include <fstream>
#include <sys/stat.h>
#include "api/chifilenames.hpp"
#include "io/stripedio.hpp"
#include "logger/logger.hpp"
#include "util/ioutil.hpp"
#include "util/cmdopts.hpp"
#include "preprocessing/conversions.hpp"
#include "preprocessing/sharder.hpp"
using namespace graphchi;
typedef float EdgeDataType;
int main(int argc, const char ** argv) {
graphchi_init(argc, argv);
global_logger().set_log_level(LOG_DEBUG);
std::string filename = get_option_string("file");
int nshards = convert_if_notexists<EdgeDataType>(filename, get_option_string("nshards", "auto"));
size_t blocksize= get_option_long("blocksize", 4096 * 1024);
char * buf = (char *) malloc(blocksize);
for(int p=0; p < nshards; p++) {
std::string shard_filename = filename_shard_edata<EdgeDataType>(filename, p, nshards);
int f = open(shard_filename.c_str(), O_RDONLY);
size_t fsize = get_filesize(shard_filename);
size_t nblocks = fsize / blocksize + (fsize % blocksize != 0);
size_t idx = 0;
std::string block_dirname = dirname_shard_edata_block(shard_filename, blocksize);
logstream(LOG_INFO) << "Going to create: " << block_dirname << std::endl;
int err = mkdir(block_dirname.c_str(), 0777);
if (err != 0) {
logstream(LOG_ERROR) << strerror(errno) << std::endl;
}
for(int i=0; i < nblocks; i++) {
size_t len = std::min(blocksize, fsize - idx);
preada(f, buf, len, idx);
std::string block_filename = filename_shard_edata_block(shard_filename, i, blocksize);
int bf = open(block_filename.c_str(), O_RDWR | O_CREAT, S_IROTH | S_IWOTH | S_IWUSR | S_IRUSR);
write_compressed(bf, buf, len);
close(bf);
idx += blocksize;
}
close(f);
std::string sizefilename = shard_filename + ".size";
std::ofstream ofs(sizefilename.c_str());
ofs << fsize;
ofs.close();
}
} | C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Sharder_basic can convert graphs from the edgelist and adjacency
* list representations to shards used by the GraphChi system.
*/
#include <iostream>
#include <stdlib.h>
#include <string>
#include <assert.h>
#include "logger/logger.hpp"
#include "preprocessing/conversions.hpp"
#include "preprocessing/sharder.hpp"
#include "util/cmdopts.hpp"
using namespace graphchi;
int main(int argc, const char ** argv) {
graphchi_init(argc, argv);
global_logger().set_log_level(LOG_DEBUG);
std::string basefile = get_option_string_interactive("file", "[path to the input graph]");
std::string edge_data_type = get_option_string_interactive("edgedatatype", "int, uint, short, float, char, double, boolean, long, float-float, int-int, none");
std::string nshards_str = get_option_string_interactive("nshards", "Number of shards to create, or 'auto'");
if (edge_data_type == "float") {
convert<float>(basefile, nshards_str);
} if (edge_data_type == "float-float") {
convert<PairContainer<float> >(basefile, nshards_str);
} else if (edge_data_type == "int") {
convert<int>(basefile, nshards_str);
} else if (edge_data_type == "uint") {
convert<unsigned int>(basefile, nshards_str);
} else if (edge_data_type == "int-int") {
convert<PairContainer<int> >(basefile, nshards_str);
} else if (edge_data_type == "short") {
convert<short>(basefile, nshards_str);
} else if (edge_data_type == "double") {
convert<double>(basefile, nshards_str);
} else if (edge_data_type == "char") {
convert<char>(basefile, nshards_str);
} else if (edge_data_type == "boolean") {
convert<bool>(basefile, nshards_str);
} else if (edge_data_type == "long") {
convert<long>(basefile, nshards_str);
} else if (edge_data_type == "none") {
convert_none(basefile, nshards_str);
} else {
logstream(LOG_ERROR) << "You need to specify edgedatatype. Currently supported: int, short, float, char, double, boolean, long.";
return -1;
}
return 0;
}
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Sharder converts a graph into shards which the GraphChi engine
* can process.
*/
/**
* @section TODO
* Change all C-style IO to Unix-style IO.
*/
#ifndef GRAPHCHI_SHARDER_DEF
#define GRAPHCHI_SHARDER_DEF
#include <iostream>
#include <cstdio>
#include <fcntl.h>
#include <unistd.h>
#include <sys/stat.h>
#include <vector>
#include <omp.h>
#include <errno.h>
#include <sstream>
#include <string>
#include "api/chifilenames.hpp"
#include "api/graphchi_context.hpp"
#include "graphchi_types.hpp"
#include "io/stripedio.hpp"
#include "logger/logger.hpp"
#include "engine/auxdata/degree_data.hpp"
#include "metrics/metrics.hpp"
#include "metrics/reps/basic_reporter.hpp"
#include "preprocessing/formats/binary_adjacency_list.hpp"
#include "shards/memoryshard.hpp"
#include "shards/slidingshard.hpp"
#include "util/ioutil.hpp"
#include "util/qsort.hpp"
namespace graphchi {
#define SHARDER_BUFSIZE (64 * 1024 * 1024)
enum ProcPhase { COMPUTE_INTERVALS=1, SHOVEL=2 };
template <typename EdgeDataType>
struct edge_with_value {
vid_t src;
vid_t dst;
EdgeDataType value;
#ifdef DYNAMICEDATA
// For dynamic edge data, we need to know if the value needs to be added
// to the vector, or are we storing an empty vector.
bool is_chivec_value;
uint16_t valindex;
#endif
edge_with_value() {}
edge_with_value(vid_t src, vid_t dst, EdgeDataType value) : src(src), dst(dst), value(value) {
#ifdef DYNAMICEDATA
is_chivec_value = false;
valindex = 0;
#endif
}
bool stopper() { return src == 0 && dst == 0; }
};
template <typename EdgeDataType>
bool edge_t_src_less(const edge_with_value<EdgeDataType> &a, const edge_with_value<EdgeDataType> &b) {
if (a.src == b.src) {
#ifdef DYNAMICEDATA
if (a.dst == b.dst) {
return a.valindex < b.valindex;
}
#endif
return a.dst < b.dst;
}
return a.src < b.src;
}
template <typename EdgeDataType>
class sharder {
typedef edge_with_value<EdgeDataType> edge_t;
protected:
std::string basefilename;
vid_t max_vertex_id;
/* Sharding */
int nshards;
std::vector< std::pair<vid_t, vid_t> > intervals;
std::vector< size_t > shovelsizes;
std::vector< int > shovelblocksidxs;
int phase;
int * edgecounts;
int vertexchunk;
size_t nedges;
std::string prefix;
int compressed_block_size;
edge_t ** bufs;
int * bufptrs;
size_t bufsize;
size_t edgedatasize;
size_t ebuffer_size;
size_t edges_per_block;
vid_t filter_max_vertex;
bool no_edgevalues;
#ifdef DYNAMICEDATA
edge_t last_added_edge;
#endif
metrics m;
binary_adjacency_list_writer<EdgeDataType> * preproc_writer;
public:
sharder(std::string basefilename) : basefilename(basefilename), m("sharder"), preproc_writer(NULL) { bufs = NULL;
edgedatasize = sizeof(EdgeDataType);
no_edgevalues = false;
compressed_block_size = 4096 * 1024;
filter_max_vertex = 0;
while (compressed_block_size % sizeof(EdgeDataType) != 0) compressed_block_size++;
edges_per_block = compressed_block_size / sizeof(EdgeDataType);
}
virtual ~sharder() {
if (preproc_writer != NULL) {
delete preproc_writer;
}
}
void set_max_vertex_id(vid_t maxid) {
filter_max_vertex = maxid;
}
void set_no_edgevalues() {
no_edgevalues = true;
}
std::string preprocessed_name() {
return preprocess_filename<EdgeDataType>(basefilename);
}
/**
* Checks if the preprocessed binary temporary file of a graph already exists,
* so it does not need to be recreated.
*/
bool preprocessed_file_exists() {
int f = open(preprocessed_name().c_str(), O_RDONLY);
if (f >= 0) {
close(f);
return true;
} else {
return false;
}
}
/**
* Call to start a preprocessing session.
*/
void start_preprocessing() {
if (preproc_writer != NULL) {
logstream(LOG_FATAL) << "start_preprocessing() already called! Aborting." << std::endl;
}
m.start_time("preprocessing");
std::string tmpfilename = preprocessed_name() + ".tmp";
preproc_writer = new binary_adjacency_list_writer<EdgeDataType>(tmpfilename);
logstream(LOG_INFO) << "Started preprocessing: " << basefilename << " --> " << tmpfilename << std::endl;
/* Write the maximum vertex id place holder - to be filled later */
max_vertex_id = 0;
}
/**
* Call to finish the preprocessing session.
*/
void end_preprocessing() {
assert(preproc_writer != NULL);
preproc_writer->finish();
delete preproc_writer;
preproc_writer = NULL;
/* Rename temporary file */
std::string tmpfilename = preprocessed_name() + ".tmp";
rename(tmpfilename.c_str(), preprocessed_name().c_str());
assert(preprocessed_file_exists());
logstream(LOG_INFO) << "Finished preprocessing: " << basefilename << " --> " << preprocessed_name() << std::endl;
m.stop_time("preprocessing");
}
/**
* Add edge to be preprocessed with a value.
*/
void preprocessing_add_edge(vid_t from, vid_t to, EdgeDataType val) {
preproc_writer->add_edge(from, to, val);
max_vertex_id = std::max(std::max(from, to), max_vertex_id);
}
#ifdef DYNAMICEDATA
void preprocessing_add_edge_multival(vid_t from, vid_t to, std::vector<EdgeDataType> & vals) {
typename std::vector<EdgeDataType>::iterator iter;
for(iter=vals.begin(); iter != vals.end(); ++iter) {
preproc_writer->add_edge(from, to, *iter);
}
max_vertex_id = std::max(std::max(from, to), max_vertex_id);
}
#endif
/**
* Add edge without value to be preprocessed
*/
void preprocessing_add_edge(vid_t from, vid_t to) {
preproc_writer->add_edge(from, to);
max_vertex_id = std::max(std::max(from, to), max_vertex_id);
}
/** Buffered write function */
template <typename T>
void bwrite(int f, char * buf, char * &bufptr, T val) {
if (bufptr + sizeof(T) - buf >= SHARDER_BUFSIZE) {
writea(f, buf, bufptr - buf);
bufptr = buf;
}
*((T*)bufptr) = val;
bufptr += sizeof(T);
}
int blockid;
template <typename T>
void edata_flush(char * buf, char * bufptr, std::string & shard_filename, size_t totbytes) {
int len = (int) (bufptr - buf);
m.start_time("edata_flush");
std::string block_filename = filename_shard_edata_block(shard_filename, blockid, compressed_block_size);
int f = open(block_filename.c_str(), O_RDWR | O_CREAT, S_IROTH | S_IWOTH | S_IWUSR | S_IRUSR);
write_compressed(f, buf, len);
close(f);
m.stop_time("edata_flush");
#ifdef DYNAMICEDATA
// Write block's uncompressed size
write_block_uncompressed_size(block_filename, len);
#endif
blockid++;
}
template <typename T>
void bwrite_edata(char * &buf, char * &bufptr, T val, size_t & totbytes, std::string & shard_filename,
size_t & edgecounter) {
if (no_edgevalues) return;
if (edgecounter == edges_per_block) {
edata_flush<T>(buf, bufptr, shard_filename, totbytes);
bufptr = buf;
edgecounter = 0;
}
// Check if buffer is big enough
if (bufptr - buf + sizeof(T) > ebuffer_size) {
ebuffer_size *= 2;
logstream(LOG_DEBUG) << "Increased buffer size to: " << ebuffer_size << std::endl;
size_t ptroff = bufptr - buf; // Remember the offset
buf = (char *) realloc(buf, ebuffer_size);
bufptr = buf + ptroff;
}
totbytes += sizeof(T);
*((T*)bufptr) = val;
bufptr += sizeof(T);
}
bool try_load_intervals() {
std::vector<std::pair<vid_t, vid_t> > tmpintervals;
load_vertex_intervals(basefilename, nshards, tmpintervals, true);
if (tmpintervals.empty()) {
return false;
}
intervals = tmpintervals;
return true;
}
/**
* Executes sharding.
* @param nshards_string the number of shards as a number, or "auto" for automatic determination
*/
int execute_sharding(std::string nshards_string) {
m.start_time("execute_sharding");
determine_number_of_shards(nshards_string);
if (nshards == 1) {
binary_adjacency_list_reader<EdgeDataType> reader(preprocessed_name());
max_vertex_id = (vid_t) reader.get_max_vertex_id();
one_shard_intervals();
}
for(int phase=1; phase <= 2; ++phase) {
if (nshards == 1 && phase == 1) continue; // No need for the first phase
/* Start the sharing process */
binary_adjacency_list_reader<EdgeDataType> reader(preprocessed_name());
/* Read max vertex id */
max_vertex_id = (vid_t) reader.get_max_vertex_id();
if (filter_max_vertex > 0) {
max_vertex_id = filter_max_vertex;
}
logstream(LOG_INFO) << "Max vertex id: " << max_vertex_id << std::endl;
if (phase == 1) {
if (try_load_intervals()) { // Hack: if intervals already computed, can skip that phase
logstream(LOG_INFO) << "Found intervals-file, skipping that step!" << std::endl;
continue;
}
}
this->start_phase(phase);
reader.read_edges(this);
this->end_phase();
}
/* Write the shards */
write_shards();
m.stop_time("execute_sharding");
/* Print metrics */
basic_reporter basicrep;
m.report(basicrep);
return nshards;
}
/**
* Sharding. This code might be hard to read - modify very carefully!
*/
protected:
virtual void determine_number_of_shards(std::string nshards_string) {
assert(preprocessed_file_exists());
if (nshards_string.find("auto") != std::string::npos || nshards_string == "0") {
logstream(LOG_INFO) << "Determining number of shards automatically." << std::endl;
int membudget_mb = get_option_int("membudget_mb", 1024);
logstream(LOG_INFO) << "Assuming available memory is " << membudget_mb << " megabytes. " << std::endl;
logstream(LOG_INFO) << " (This can be defined with configuration parameter 'membudget_mb')" << std::endl;
binary_adjacency_list_reader<EdgeDataType> reader(preprocessed_name());
size_t numedges = reader.get_numedges();
double max_shardsize = membudget_mb * 1024. * 1024. / 8;
logstream(LOG_INFO) << "Determining maximum shard size: " << (max_shardsize / 1024. / 1024.) << " MB." << std::endl;
nshards = (int) ( 2 + (numedges * sizeof(EdgeDataType) / max_shardsize) + 0.5);
#ifdef DYNAMICEDATA
// For dynamic edge data, more working memory is needed, thus the number of shards is larger.
nshards = (int) ( 2 + 4 * (numedges * sizeof(EdgeDataType) / max_shardsize) + 0.5);
#endif
} else {
nshards = atoi(nshards_string.c_str());
}
assert(nshards > 0);
logstream(LOG_INFO) << "Number of shards to be created: " << nshards << std::endl;
}
void compute_partitionintervals() {
size_t edges_per_part = nedges / nshards + 1;
logstream(LOG_INFO) << "Number of shards: " << nshards << std::endl;
logstream(LOG_INFO) << "Edges per shard: " << edges_per_part << std::endl;
logstream(LOG_INFO) << "Max vertex id: " << max_vertex_id << std::endl;
vid_t cur_st = 0;
size_t edgecounter=0;
std::string fname = filename_intervals(basefilename, nshards);
FILE * f = fopen(fname.c_str(), "w");
if (f == NULL) {
logstream(LOG_ERROR) << "Could not open file: " << fname << " error: " <<
strerror(errno) << std::endl;
}
assert(f != NULL);
vid_t i = 0;
while(nshards > (int) intervals.size()) {
i += vertexchunk;
edgecounter += edgecounts[i / vertexchunk];
if (edgecounter >= edges_per_part || (i >= max_vertex_id)) {
intervals.push_back(std::pair<vid_t,vid_t>(cur_st, std::min(i, max_vertex_id)));
logstream(LOG_INFO) << "Interval: " << cur_st << " - " << i << std::endl;
fprintf(f, "%u\n", std::min(i, max_vertex_id));
cur_st = i + 1;
edgecounter = 0;
}
}
fclose(f);
assert(nshards == (int)intervals.size());
/* Write meta-file with the number of vertices */
std::string numv_filename = basefilename + ".numvertices";
f = fopen(numv_filename.c_str(), "w");
fprintf(f, "%u\n", 1 + max_vertex_id);
fclose(f);
logstream(LOG_INFO) << "Computed intervals." << std::endl;
}
void one_shard_intervals() {
assert(nshards == 1);
std::string fname = filename_intervals(basefilename, nshards);
FILE * f = fopen(fname.c_str(), "w");
intervals.push_back(std::pair<vid_t,vid_t>(0, max_vertex_id));
fprintf(f, "%u\n", max_vertex_id);
fclose(f);
/* Write meta-file with the number of vertices */
std::string numv_filename = basefilename + ".numvertices";
f = fopen(numv_filename.c_str(), "w");
fprintf(f, "%u\n", 1 + max_vertex_id);
fclose(f);
assert(nshards == (int)intervals.size());
}
std::string shovel_filename(int shard) {
std::stringstream ss;
ss << basefilename << shard << "." << nshards << ".shovel";
return ss.str();
}
void start_phase(int p) {
phase = p;
lastpart = 0;
logstream(LOG_INFO) << "Starting phase: " << phase << std::endl;
switch (phase) {
case COMPUTE_INTERVALS:
/* To compute the intervals, we need to keep track of the vertex degrees.
If there is not enough memory to store degree for each vertex, we combine
degrees of successive vertice. This results into less accurate shard split,
but in practice it hardly matters. */
vertexchunk = (int) (max_vertex_id * sizeof(int) / (1024 * 1024 * get_option_long("membudget_mb", 1024)));
if (vertexchunk<1) vertexchunk = 1;
edgecounts = (int*)calloc( max_vertex_id / vertexchunk + 1, sizeof(int));
nedges = 0;
break;
case SHOVEL:
#ifdef DYNAMICEDATA
last_added_edge = edge_t(-1, -1, EdgeDataType());
#endif
shovelsizes.resize(nshards);
shovelblocksidxs.resize(nshards);
bufs = new edge_t*[nshards];
bufptrs = new int[nshards];
size_t membudget_mb = get_option_long("membudget_mb", 1024);
if (membudget_mb > 3000) membudget_mb = 3000; // Cap to 3 gigs for this purpose
bufsize = (1024 * 1024 * membudget_mb) / nshards / 4;
while(bufsize % sizeof(edge_t) != 0) bufsize++;
logstream(LOG_DEBUG)<< "Shoveling bufsize: " << bufsize << std::endl;
for(int i=0; i < nshards; i++) {
shovelsizes[i] = 0;
shovelblocksidxs[i] = 0;
bufs[i] = (edge_t*) malloc(bufsize);
bufptrs[i] = 0;
}
break;
}
}
void end_phase() {
logstream(LOG_INFO) << "Ending phase: " << phase << std::endl;
switch (phase) {
case COMPUTE_INTERVALS:
compute_partitionintervals();
free(edgecounts);
edgecounts = NULL;
break;
case SHOVEL:
for(int i=0; i<nshards; i++) {
swrite(i, edge_t(0, 0, EdgeDataType()), true);
free(bufs[i]);
}
free(bufs);
free(bufptrs);
break;
}
}
int lastpart;
void swrite(int shard, edge_t et, bool flush=false) {
if (!flush)
bufs[shard][bufptrs[shard]++] = et;
if (flush || bufptrs[shard] * sizeof(edge_t) >= bufsize) {
m.start_time("shovel_flush");
std::stringstream ss;
ss << shovel_filename(shard) << "." << shovelblocksidxs[shard];
std::string shovelfblockname = ss.str();
int bf = open(shovelfblockname.c_str(), O_WRONLY | O_CREAT, S_IROTH | S_IWOTH | S_IWUSR | S_IRUSR);
size_t len = sizeof(edge_t) * bufptrs[shard];
writea(bf, bufs[shard], len);
bufptrs[shard] = 0;
close(bf);
shovelsizes[shard] += len;
shovelblocksidxs[shard] ++;
m.stop_time("shovel_flush");
logstream(LOG_DEBUG) << "Flushed " << shovelfblockname << " bufsize: " << bufsize << std::endl;
}
}
/**
* Called on the second and third phase of the preprocessing by binary_adjacency_list reader.
*/
void receive_edge(vid_t from, vid_t to, EdgeDataType value, bool input_value) {
if (to == from) {
logstream(LOG_WARNING) << "Tried to add self-edge " << from << "->" << to << std::endl;
return;
}
if (from > max_vertex_id || to > max_vertex_id) {
if (max_vertex_id == 0) {
logstream(LOG_ERROR) << "Tried to add an edge with too large from/to values. From:" <<
from << " to: "<< to << " max: " << max_vertex_id << std::endl;
assert(false);
} else {
return;
}
}
switch (phase) {
case COMPUTE_INTERVALS:
edgecounts[to / vertexchunk]++;
nedges++;
break;
case SHOVEL:
bool found=false;
for(int i=0; i < nshards; i++) {
int shard = (lastpart + i) % nshards;
if (to >= intervals[shard].first && to <= intervals[shard].second) {
edge_t e(from, to, value);
#ifdef DYNAMICEDATA
e.is_chivec_value = input_value;
// Keep track of multiple values for same edge
if (last_added_edge.src == e.src && last_added_edge.dst == to) {
e.valindex = last_added_edge.valindex + 1;
}
last_added_edge = e;
#endif
swrite(shard, e);
lastpart = shard; // Small optimizations, which works if edges are in order for each vertex - not much though
found = true;
break;
}
}
if(!found) {
logstream(LOG_ERROR) << "Shard not found for : " << to << std::endl;
}
assert(found);
break;
}
}
size_t read_shovel(int shard, char ** data) {
m.start_time("read_shovel");
size_t sz = shovelsizes[shard];
*data = (char *) malloc(sz);
char * ptr = * data;
size_t nread = 0;
int blockidx = 0;
while(true) {
size_t len = std::min(bufsize, sz-nread);
std::stringstream ss;
ss << shovel_filename(shard) << "." << blockidx;
std::string shovelfblockname = ss.str();
int f = open(shovelfblockname.c_str(), O_RDONLY);
if (f < 0) break;
m.start_time("shovel_read");
preada(f, ptr, len, 0);
m.stop_time("shovel_read");
nread += len;
ptr += len;
close(f);
blockidx++;
remove(shovelfblockname.c_str());
}
m.stop_time("read_shovel");
assert(nread == sz);
return sz;
}
/**
* Write the shard by sorting the shovel file and compressing the
* adjacency information.
* To support different shard types, override this function!
*/
virtual void write_shards() {
int membudget_mb = get_option_int("membudget_mb", 1024);
// Check if we have enough memory to keep track
// of the vertex degrees in-memory (heuristic)
bool count_degrees_inmem = size_t(membudget_mb) * 1024 * 1024 / 3 > max_vertex_id * sizeof(degree);
#ifdef DYNAMICEDATA
if (!count_degrees_inmem) {
/* Temporary: force in-memory count of degrees because the PSW-based computation
is not yet compatible with dynamic edge data.
*/
logstream(LOG_WARNING) << "Dynamic edge data support only sharding when the vertex degrees can be computed in-memory." << std::endl;
logstream(LOG_WARNING) << "If the program gets very slow (starts swapping), the data size is too big." << std::endl;
count_degrees_inmem = true;
}
#endif
degree * degrees = NULL;
if (count_degrees_inmem) {
degrees = (degree *) calloc(1 + max_vertex_id, sizeof(degree));
}
for(int shard=0; shard < nshards; shard++) {
m.start_time("shard_final");
blockid = 0;
size_t edgecounter = 0;
logstream(LOG_INFO) << "Starting final processing for shard: " << shard << std::endl;
std::string fname = filename_shard_adj(basefilename, shard, nshards);
std::string edfname = filename_shard_edata<EdgeDataType>(basefilename, shard, nshards);
std::string edblockdirname = dirname_shard_edata_block(edfname, compressed_block_size);
/* Make the block directory */
if (!no_edgevalues)
mkdir(edblockdirname.c_str(), 0777);
edge_t * shovelbuf;
size_t shovelsize = read_shovel(shard, (char**) &shovelbuf);
size_t numedges = shovelsize / sizeof(edge_t);
logstream(LOG_DEBUG) << "Shovel size:" << shovelsize << " edges: " << numedges << std::endl;
quickSort(shovelbuf, (int)numedges, edge_t_src_less<EdgeDataType>);
// Create the final file
int f = open(fname.c_str(), O_WRONLY | O_CREAT, S_IROTH | S_IWOTH | S_IWUSR | S_IRUSR);
if (f < 0) {
logstream(LOG_ERROR) << "Could not open " << fname << " error: " << strerror(errno) << std::endl;
}
assert(f >= 0);
int trerr = ftruncate(f, 0);
assert(trerr == 0);
char * buf = (char*) malloc(SHARDER_BUFSIZE);
char * bufptr = buf;
char * ebuf = (char*) malloc(compressed_block_size);
ebuffer_size = compressed_block_size;
char * ebufptr = ebuf;
vid_t curvid=0;
#ifdef DYNAMICEDATA
vid_t lastdst = 0xffffffff;
int jumpover = 0;
size_t num_uniq_edges = 0;
size_t last_edge_count = 0;
#endif
size_t istart = 0;
size_t tot_edatabytes = 0;
for(size_t i=0; i <= numedges; i++) {
#ifdef DYNAMICEDATA
i += jumpover; // With dynamic values, there might be several values for one edge, and thus the edge repeated in the data.
jumpover = 0;
#endif //DYNAMICEDATA
edge_t edge = (i < numedges ? shovelbuf[i] : edge_t(0, 0, EdgeDataType())); // Last "element" is a stopper
#ifdef DYNAMICEDATA
if (lastdst == edge.dst && edge.src == curvid) {
// Currently not supported
logstream(LOG_ERROR) << "Duplicate edge in the stream - aborting" << std::endl;
assert(false);
}
lastdst = edge.dst;
#endif
if (!edge.stopper()) {
#ifndef DYNAMICEDATA
bwrite_edata<EdgeDataType>(ebuf, ebufptr, EdgeDataType(edge.value), tot_edatabytes, edfname, edgecounter);
#else
/* If we have dynamic edge data, we need to write the header of chivector - if there are edge values */
if (edge.is_chivec_value) {
// Need to check how many values for this edge
int count = 1;
while(shovelbuf[i + count].valindex == count) { count++; }
assert(count < 32768);
typename chivector<EdgeDataType>::sizeword_t szw;
((uint16_t *) &szw)[0] = (uint16_t)count; // Sizeword with length and capacity = count
((uint16_t *) &szw)[1] = (uint16_t)count;
bwrite_edata<typename chivector<EdgeDataType>::sizeword_t>(ebuf, ebufptr, szw, tot_edatabytes, edfname, edgecounter);
for(int j=0; j < count; j++) {
bwrite_edata<EdgeDataType>(ebuf, ebufptr, EdgeDataType(shovelbuf[i + j].value), tot_edatabytes, edfname, edgecounter);
}
jumpover = count - 1; // Jump over
} else {
// Just write size word with zero
bwrite_edata<int>(ebuf, ebufptr, 0, tot_edatabytes, edfname, edgecounter);
}
num_uniq_edges++;
#endif
edgecounter++; // Increment edge counter here --- notice that dynamic edata case makes two or more calls to bwrite_edata before incrementing
}
if (degrees != NULL && edge.src != edge.dst) {
degrees[edge.src].outdegree++;
degrees[edge.dst].indegree++;
}
if ((edge.src != curvid) || edge.stopper()) {
// New vertex
#ifndef DYNAMICEDATA
size_t count = i - istart;
#else
size_t count = num_uniq_edges - 1 - last_edge_count;
last_edge_count = num_uniq_edges - 1;
if (edge.stopper()) count++;
#endif
assert(count>0 || curvid==0);
if (count>0) {
if (count < 255) {
uint8_t x = (uint8_t)count;
bwrite<uint8_t>(f, buf, bufptr, x);
} else {
bwrite<uint8_t>(f, buf, bufptr, 0xff);
bwrite<uint32_t>(f, buf, bufptr, (uint32_t)count);
}
}
#ifndef DYNAMICEDATA
for(size_t j=istart; j < i; j++) {
bwrite(f, buf, bufptr, shovelbuf[j].dst);
}
#else
// Special dealing with dynamic edata because some edges can be present multiple
// times in the shovel.
for(size_t j=istart; j < i; j++) {
if (j == istart || shovelbuf[j - 1].dst != shovelbuf[j].dst) {
bwrite(f, buf, bufptr, shovelbuf[j].dst);
}
}
#endif
istart = i;
#ifdef DYNAMICEDATA
istart += jumpover;
#endif
// Handle zeros
if (!edge.stopper()) {
if (edge.src - curvid > 1 || (i == 0 && edge.src>0)) {
int nz = edge.src - curvid - 1;
if (i == 0 && edge.src > 0) nz = edge.src; // border case with the first one
do {
bwrite<uint8_t>(f, buf, bufptr, 0);
nz--;
int tnz = std::min(254, nz);
bwrite<uint8_t>(f, buf, bufptr, (uint8_t) tnz);
nz -= tnz;
} while (nz>0);
}
}
curvid = edge.src;
}
}
/* Flush buffers and free memory */
writea(f, buf, bufptr - buf);
free(buf);
free(shovelbuf);
close(f);
/* Write edata size file */
if (!no_edgevalues) {
edata_flush<EdgeDataType>(ebuf, ebufptr, edfname, tot_edatabytes);
std::string sizefilename = edfname + ".size";
std::ofstream ofs(sizefilename.c_str());
#ifndef DYNAMICEDATA
ofs << tot_edatabytes;
#else
ofs << num_uniq_edges * sizeof(int); // For dynamic edge data, write the number of edges.
#endif
ofs.close();
}
free(ebuf);
m.stop_time("shard_final");
}
if (!count_degrees_inmem) {
#ifndef DYNAMICEDATA
// Use memory-efficient (but slower) method to create degree-data
create_degree_file();
#endif
} else {
std::string degreefname = filename_degree_data(basefilename);
int degreeOutF = open(degreefname.c_str(), O_RDWR | O_CREAT, S_IROTH | S_IWOTH | S_IWUSR | S_IRUSR);
if (degreeOutF < 0) {
logstream(LOG_ERROR) << "Could not create: " << degreeOutF << std::endl;
assert(degreeOutF >= 0);
}
writea(degreeOutF, degrees, sizeof(degree) * (1 + max_vertex_id));
free(degrees);
close(degreeOutF);
}
}
typedef char dummy_t;
typedef sliding_shard<int, dummy_t> slidingshard_t;
typedef memory_shard<int, dummy_t> memshard_t;
#ifndef DYNAMICEDATA
void create_degree_file() {
// Initialize IO
stripedio * iomgr = new stripedio(m);
std::vector<slidingshard_t * > sliding_shards;
int subwindow = 5000000;
m.set("subwindow", (size_t)subwindow);
int loadthreads = 4;
m.start_time("degrees.runtime");
/* Initialize streaming shards */
int blocksize = compressed_block_size;
for(int p=0; p < nshards; p++) {
logstream(LOG_INFO) << "Initialize streaming shard: " << p << std::endl;
sliding_shards.push_back(
new slidingshard_t(iomgr, filename_shard_edata<dummy_t>(basefilename, p, nshards),
filename_shard_adj(basefilename, p, nshards), intervals[p].first,
intervals[p].second,
blocksize, m, true, true));
}
graphchi_context ginfo;
ginfo.nvertices = 1 + intervals[nshards - 1].second;
ginfo.scheduler = NULL;
std::string outputfname = filename_degree_data(basefilename);
int degreeOutF = open(outputfname.c_str(), O_RDWR | O_CREAT, S_IROTH | S_IWOTH | S_IWUSR | S_IRUSR);
if (degreeOutF < 0) {
logstream(LOG_ERROR) << "Could not create: " << degreeOutF << std::endl;
}
assert(degreeOutF >= 0);
int trerr = ftruncate(degreeOutF, ginfo.nvertices * sizeof(int) * 2);
assert(trerr == 0);
for(int window=0; window<nshards; window++) {
metrics_entry mwi = m.start_time();
vid_t interval_st = intervals[window].first;
vid_t interval_en = intervals[window].second;
/* Flush stream shard for the window */
sliding_shards[window]->flush();
/* Load shard[window] into memory */
memshard_t memshard(iomgr, filename_shard_edata<EdgeDataType>(basefilename, window, nshards), filename_shard_adj(basefilename, window, nshards),
interval_st, interval_en, blocksize, m);
memshard.only_adjacency = true;
logstream(LOG_INFO) << "Interval: " << interval_st << " " << interval_en << std::endl;
for(vid_t subinterval_st=interval_st; subinterval_st <= interval_en; ) {
vid_t subinterval_en = std::min(interval_en, subinterval_st + subwindow);
logstream(LOG_INFO) << "(Degree proc.) Sub-window: [" << subinterval_st << " - " << subinterval_en << "]" << std::endl;
assert(subinterval_en >= subinterval_st && subinterval_en <= interval_en);
/* Preallocate vertices */
metrics_entry men = m.start_time();
int nvertices = subinterval_en - subinterval_st + 1;
std::vector< graphchi_vertex<int, dummy_t> > vertices(nvertices, graphchi_vertex<int, dummy_t>()); // preallocate
for(int i=0; i < nvertices; i++) {
vertices[i] = graphchi_vertex<int, dummy_t>(subinterval_st + i, NULL, NULL, 0, 0);
vertices[i].scheduled = true;
}
metrics_entry me = m.start_time();
omp_set_num_threads(loadthreads);
#pragma omp parallel for
for(int p=-1; p < nshards; p++) {
if (p == (-1)) {
// if first window, now need to load the memshard
if (memshard.loaded() == false) {
memshard.load();
}
/* Load vertices from memshard (only inedges for now so can be done in parallel) */
memshard.load_vertices(subinterval_st, subinterval_en, vertices);
} else {
/* Stream forward other than the window partition */
if (p != window) {
sliding_shards[p]->read_next_vertices(nvertices, subinterval_st, vertices, false);
}
}
}
m.stop_time(me, "stream_ahead", window);
metrics_entry mev = m.start_time();
// Read first current values
int * vbuf = (int*) malloc(nvertices * sizeof(int) * 2);
for(int i=0; i<nvertices; i++) {
vbuf[2 * i] = vertices[i].num_inedges();
vbuf[2 * i +1] = vertices[i].num_outedges();
}
pwritea(degreeOutF, vbuf, nvertices * sizeof(int) * 2, subinterval_st * sizeof(int) * 2);
free(vbuf);
// Move window
subinterval_st = subinterval_en+1;
}
/* Move the offset of the window-shard forward */
sliding_shards[window]->set_offset(memshard.offset_for_stream_cont(), memshard.offset_vid_for_stream_cont(),
memshard.edata_ptr_for_stream_cont());
}
close(degreeOutF);
m.stop_time("degrees.runtime");
delete iomgr;
}
#endif
friend class binary_adjacency_list_reader<EdgeDataType>;
}; // End class sharder
}; // namespace
#endif
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Dynamic data version: manages a block.
*/
#ifndef graphchi_xcode_dynamicblock_hpp
#define graphchi_xcode_dynamicblock_hpp
#include <stdint.h>
namespace graphchi {
int get_block_uncompressed_size(std::string blockfilename, int defaultsize);
int get_block_uncompressed_size(std::string blockfilename, int defaultsize) {
std::string szfilename = blockfilename + ".bsize";
FILE * f = fopen(szfilename.c_str(), "r");
if (f != NULL) {
int sz;
fread(&sz, 1, sizeof(int), f);
fclose(f);
return sz;
} else {
return defaultsize;
}
}
void write_block_uncompressed_size(std::string blockfilename, int size);
void write_block_uncompressed_size(std::string blockfilename, int size) {
std::string szfilename = blockfilename + ".bsize";
FILE * f = fopen(szfilename.c_str(), "w");
fwrite(&size, 1, sizeof(int), f);
fclose(f);
if (size > 20000000) {
logstream(LOG_DEBUG) << "Block " << blockfilename << " size:" << size << std::endl;
}
}
void delete_block_uncompressed_sizefile(std::string blockfilename);
void delete_block_uncompressed_sizefile(std::string blockfilename) {
std::string szfilename = blockfilename + ".bsize";
int err = remove(szfilename.c_str());
if (err != 0) {
// File did not exist - ok
}
}
template <typename ET>
struct dynamicdata_block {
int nitems;
uint8_t * data;
ET * chivecs;
dynamicdata_block() : data(NULL), chivecs(NULL) {}
dynamicdata_block(int nitems, uint8_t * data, int datasize) : nitems(nitems){
chivecs = new ET[nitems];
uint8_t * ptr = data;
for(int i=0; i < nitems; i++) {
assert(ptr - data <= datasize);
typename ET::sizeword_t * sz = ((typename ET::sizeword_t *) ptr);
ptr += sizeof(typename ET::sizeword_t);
chivecs[i] = ET(((uint16_t *)sz)[0], ((uint16_t *)sz)[1], (typename ET::element_type_t *) ptr);
ptr += (int) ((uint16_t *)sz)[1] * sizeof(typename ET::element_type_t);
}
}
ET * edgevec(int i) {
assert(i < nitems);
assert(chivecs != NULL);
return &chivecs[i];
}
void write(uint8_t ** outdata, int & size) {
// First compute size
size = 0;
for(int i=0; i < nitems; i++) {
size += chivecs[i].capacity() * sizeof(typename ET::element_type_t) + sizeof(typename ET::sizeword_t);
}
*outdata = (uint8_t *) malloc(size);
uint8_t * ptr = *outdata;
for(int i=0; i < nitems; i++) {
ET & vec = chivecs[i];
((uint16_t *) ptr)[0] = vec.size();
((uint16_t *) ptr)[1] = vec.capacity();
ptr += sizeof(typename ET::sizeword_t);
vec.write((typename ET::element_type_t *) ptr);
ptr += vec.capacity() * sizeof(typename ET::element_type_t);
}
}
~dynamicdata_block() {
if (chivecs != NULL) {
delete [] chivecs;
}
}
};
};
#endif
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Dynamic data version: The sliding shard.
*/
#ifndef DEF_GRAPHCHI_SLIDINGSHARD
#define DEF_GRAPHCHI_SLIDINGSHARD
#include <iostream>
#include <cstdio>
#include <sstream>
#include <vector>
#include <fcntl.h>
#include <unistd.h>
#include <assert.h>
#include <string>
#include "api/graph_objects.hpp"
#include "metrics/metrics.hpp"
#include "logger/logger.hpp"
#include "io/stripedio.hpp"
#include "graphchi_types.hpp"
#include "api/dynamicdata/chivector.hpp"
#include "shards/dynamicdata/dynamicblock.hpp"
namespace graphchi {
/**
* A streaming block.
*/
template <typename ET>
struct sblock {
int writedesc;
int readdesc;
size_t offset;
size_t end;
uint8_t * data;
uint8_t * ptr;
bool active;
bool is_edata_block;
std::string blockfilename;
dynamicdata_block<ET> * dynblock;
sblock() : writedesc(0), readdesc(0), active(false) { data = NULL; dynblock = NULL; }
sblock(int wdesc, int rdesc, bool is_edata_block=false) : writedesc(wdesc), readdesc(rdesc), active(false),
is_edata_block(is_edata_block){ data = NULL; dynblock = NULL; }
sblock(int wdesc, int rdesc, bool is_edata_block, std::string blockfilename) : writedesc(wdesc), readdesc(rdesc), active(false),
is_edata_block(is_edata_block), blockfilename(blockfilename) {
assert(is_edata_block == true);
data = NULL;
dynblock = NULL;
}
void commit_async(stripedio * iomgr) {
commit_now(iomgr); // TODO: async
release(iomgr); // Note!
}
void commit_now(stripedio * iomgr) {
if (active && data != NULL && writedesc >= 0) {
size_t len = ptr-data;
if (len > end-offset) len = end-offset;
if (is_edata_block) {
uint8_t * outdata = NULL;
int realsize;
dynblock->write(&outdata, realsize);
write_block_uncompressed_size(blockfilename, realsize);
iomgr->managed_pwritea_now(writedesc, &outdata, realsize, 0); /* Need to write whole block in the compressed regime */
free(outdata);
} else {
iomgr->managed_pwritea_now(writedesc, &data, len, offset);
}
}
}
void read_async(stripedio * iomgr) {
assert(false);
}
void read_now(stripedio * iomgr) {
if (is_edata_block) {
int realsize = get_block_uncompressed_size(blockfilename, end-offset);
iomgr->managed_preada_now(readdesc, &data, realsize, 0);
int nedges = (end - offset) / sizeof(int); // Ugly
dynblock = new dynamicdata_block<ET>(nedges, (uint8_t *) data, realsize);
} else {
iomgr->managed_preada_now(readdesc, &data, end - offset, offset);
}
}
void release(stripedio * iomgr) {
if (data != NULL) {
iomgr->managed_release(readdesc, &data);
}
if (is_edata_block) {
iomgr->close_session(readdesc);
}
if (dynblock != NULL) {
delete dynblock;
dynblock = NULL;
}
data = NULL;
}
};
struct indexentry {
size_t adjoffset, edataoffset;
indexentry(size_t a, size_t e) : adjoffset(a), edataoffset(e) {}
};
/*
* Graph shard that is streamed. I.e, it can only read in one direction, a chunk
* a time.
*/
// ET must be a chivector<T>
template <typename VT, typename ET, typename svertex_t = graphchi_vertex<VT, ET>, typename ETspecial = ET>
class sliding_shard {
stripedio * iomgr;
std::string filename_edata;
std::string filename_adj;
vid_t range_st, range_end;
size_t blocksize;
vid_t curvid;
size_t adjoffset, edataoffset, adjfilesize, edatafilesize;
size_t window_start_edataoffset;
std::vector<sblock<ET> > activeblocks;
int adjfile_session;
int writedesc;
sblock<ET> * curblock;
sblock<ET> * curadjblock;
metrics &m;
std::map<int, indexentry> sparse_index; // Sparse index that can be created in the fly
bool disable_writes;
bool async_edata_loading;
// bool need_read_outedges; // Disabled - does not work with compressed data: whole block needs to be read.
public:
bool only_adjacency;
sliding_shard(stripedio * iomgr, std::string _filename_edata, std::string _filename_adj, vid_t _range_st, vid_t _range_en, size_t _blocksize, metrics &_m,
bool _disable_writes=false, bool onlyadj = false) :
iomgr(iomgr),
filename_edata(_filename_edata),
filename_adj(_filename_adj),
range_st(_range_st),
range_end(_range_en),
blocksize(_blocksize),
m(_m),
disable_writes(_disable_writes) {
curvid = 0;
adjoffset = 0;
edataoffset = 0;
disable_writes = false;
only_adjacency = onlyadj;
curblock = NULL;
curadjblock = NULL;
window_start_edataoffset = 0;
while(blocksize % sizeof(int) != 0) blocksize++;
assert(blocksize % sizeof(int)==0);
adjfilesize = get_filesize(filename_adj);
edatafilesize = get_shard_edata_filesize<int>(filename_edata);
if (!only_adjacency) {
logstream(LOG_DEBUG) << "Total edge data size: " << edatafilesize << std::endl;
} else {
// Nothing
}
adjfile_session = iomgr->open_session(filename_adj, true);
save_offset();
async_edata_loading = false; // With dynamic edge data size, do not load
}
~sliding_shard() {
release_prior_to_offset(true);
if (curblock != NULL) {
curblock->release(iomgr);
delete curblock;
curblock = NULL;
}
if (curadjblock != NULL) {
curadjblock->release(iomgr);
delete curadjblock;
curadjblock = NULL;
}
iomgr->close_session(adjfile_session);
}
size_t num_edges() {
return edatafilesize / sizeof(ET);
}
protected:
size_t get_adjoffset() { return adjoffset; }
size_t get_edataoffset() { return edataoffset; }
void save_offset() {
// Note, so that we can use the lower bound operation in map, we need
// to insert indices in reverse order
sparse_index.insert(std::pair<int, indexentry>(-((int)curvid), indexentry(adjoffset, edataoffset)));
}
void move_close_to(vid_t v) {
if (curvid >= v) return;
std::map<int,indexentry>::iterator lowerbd_iter = sparse_index.lower_bound(-((int)v));
int closest_vid = -((int)lowerbd_iter->first);
assert(closest_vid>=0);
indexentry closest_offset = lowerbd_iter->second;
assert(closest_vid <= (int)v);
if (closest_vid > (int)curvid) { /* Note: this will fail if we have over 2B vertices! */
logstream(LOG_DEBUG)
<< "Sliding shard, start: " << range_st << " moved to: " << closest_vid << " " << closest_offset.adjoffset << ", asked for : " << v << " was in: curvid= " << curvid << " " << adjoffset << std::endl;
if (curblock != NULL) // Move the pointer - this may invalidate the curblock, but it is being checked later
curblock->ptr += closest_offset.edataoffset - edataoffset;
if (curadjblock != NULL)
curadjblock->ptr += closest_offset.adjoffset - adjoffset;
curvid = (vid_t)closest_vid;
adjoffset = closest_offset.adjoffset;
edataoffset = closest_offset.edataoffset;
return;
} else {
// Do nothing - just continue from current pos.
return;
}
}
inline void check_curblock(size_t toread) {
if (curblock == NULL || curblock->end < edataoffset+toread) {
if (curblock != NULL) {
if (!curblock->active) {
curblock->release(iomgr);
}
}
// Load next
std::string blockfilename = filename_shard_edata_block(filename_edata, (int) (edataoffset / blocksize), blocksize);
int edata_session = iomgr->open_session(blockfilename, false, true);
sblock<ET> newblock(edata_session, edata_session, true, blockfilename);
// We align blocks always to the blocksize, even if that requires
// allocating and reading some unnecessary data.
newblock.offset = (edataoffset / blocksize) * blocksize; // Align
size_t correction = edataoffset - newblock.offset;
newblock.end = std::min(edatafilesize, newblock.offset + blocksize);
assert(newblock.end >= newblock.offset);
int realsize = get_block_uncompressed_size(blockfilename, newblock.end - newblock.offset);
iomgr->managed_malloc(edata_session, &newblock.data, realsize, newblock.offset);
newblock.ptr = newblock.data + correction;
activeblocks.push_back(newblock);
curblock = &activeblocks[activeblocks.size()-1];
curblock->active = true;
curblock->read_now(iomgr);
}
}
inline void check_adjblock(size_t toread) {
if (curadjblock == NULL || curadjblock->end <= adjoffset + toread) {
if (curadjblock != NULL) {
curadjblock->release(iomgr);
delete curadjblock;
curadjblock = NULL;
}
sblock<ET> * newblock = new sblock<ET>(0, adjfile_session);
newblock->offset = adjoffset;
newblock->end = std::min(adjfilesize, adjoffset+blocksize);
assert(newblock->end > 0);
assert(newblock->end >= newblock->offset);
iomgr->managed_malloc(adjfile_session, &newblock->data, newblock->end - newblock->offset, adjoffset);
newblock->ptr = newblock->data;
metrics_entry me = m.start_time();
iomgr->managed_preada_now(adjfile_session, &newblock->data, newblock->end - newblock->offset, adjoffset);
m.stop_time(me, "blockload");
curadjblock = newblock;
}
}
template <typename U>
inline U read_val() {
check_adjblock(sizeof(U));
U res = *((U*)curadjblock->ptr);
adjoffset += sizeof(U);
curadjblock->ptr += sizeof(U);
return res;
}
inline ET * read_edgeptr() {
if (only_adjacency) return NULL;
check_curblock(sizeof(int));
edataoffset += sizeof(int);
int blockedgeidx = (curblock->ptr - curblock->data) / sizeof(int);
curblock->ptr += sizeof(int);
assert(curblock->dynblock != NULL);
return curblock->dynblock->edgevec(blockedgeidx);
}
inline void skip(int n, int sz) {
size_t tot = n * sz;
adjoffset += tot;
if (curadjblock != NULL)
curadjblock->ptr += tot;
edataoffset += sizeof(int) * n;
if (curblock != NULL)
curblock->ptr += sizeof(int) * n;
}
public:
/**
* Read out-edges for vertices.
*/
void read_next_vertices(int nvecs, vid_t start, std::vector<svertex_t> & prealloc, bool record_index=false, bool disable_writes=false) {
metrics_entry me = m.start_time();
if (!record_index)
move_close_to(start);
/* Release the blocks we do not need anymore */
curblock = NULL;
release_prior_to_offset(false, disable_writes);
assert(activeblocks.size() <= 1);
/* Read next */
if (!activeblocks.empty() && !only_adjacency) {
curblock = &activeblocks[0];
}
vid_t lastrec = start;
window_start_edataoffset = edataoffset;
for(int i=((int)curvid) - ((int)start); i<nvecs; i++) {
if (adjoffset >= adjfilesize) break;
// TODO: skip unscheduled vertices.
int n;
if (record_index && (size_t)(curvid - lastrec) >= (size_t) std::max((int)100000, nvecs/16)) {
save_offset();
lastrec = curvid;
}
uint8_t ns = read_val<uint8_t>();
if (ns == 0x00) {
curvid++;
uint8_t nz = read_val<uint8_t>();
curvid += nz;
i += nz;
continue;
}
if (ns == 0xff) {
n = read_val<uint32_t>();
} else {
n = ns;
}
if (i<0) {
// Just skipping
skip(n, sizeof(vid_t));
} else {
svertex_t& vertex = prealloc[i];
assert(vertex.id() == curvid);
if (vertex.scheduled) {
while(--n >= 0) {
bool special_edge = false;
vid_t target = (sizeof(ET) == sizeof(ETspecial) ? read_val<vid_t>() : translate_edge(read_val<vid_t>(), special_edge));
ET * evalue = read_edgeptr();
vertex.add_outedge(target, evalue, special_edge);
if (!((target >= range_st && target <= range_end))) {
logstream(LOG_ERROR) << "Error : " << target << " not in [" << range_st << " - " << range_end << "]" << std::endl;
iomgr->print_session(adjfile_session);
}
assert(target >= range_st && target <= range_end);
}
} else {
// This vertex was not scheduled, so we can just skip its edges.
skip(n, sizeof(vid_t));
}
}
curvid++;
}
m.stop_time(me, "read_next_vertices");
curblock = NULL;
}
/**
* Commit modifications.
*/
void commit(sblock<ET> &b, bool synchronously, bool disable_writes=false) {
if (synchronously) {
metrics_entry me = m.start_time();
if (!disable_writes) b.commit_now(iomgr);
m.stop_time(me, "commit");
b.release(iomgr);
} else {
if (!disable_writes) b.commit_async(iomgr);
else b.release(iomgr);
}
}
/**
* Release all buffers
*/
void flush() {
release_prior_to_offset(true);
if (curadjblock != NULL) {
curadjblock->release(iomgr);
delete curadjblock;
curadjblock = NULL;
}
}
/**
* Set the position of the sliding shard.
*/
void set_offset(size_t newoff, vid_t _curvid, size_t edgeptr) {
this->adjoffset = newoff;
this->curvid = _curvid;
this->edataoffset = edgeptr;
if (curadjblock != NULL) {
curadjblock->release(iomgr);
delete curadjblock;
curadjblock = NULL;
}
}
/**
* Release blocks that come prior to the current offset/
*/
void release_prior_to_offset(bool all=false, bool disable_writes=false) { // disable writes is for the dynamic case
for(int i=(int)activeblocks.size() - 1; i >= 0; i--) {
sblock<ET> &b = activeblocks[i];
if (b.end <= edataoffset || all) {
commit(b, all, disable_writes);
activeblocks.erase(activeblocks.begin() + (unsigned int)i);
}
}
}
std::string get_info_json() {
std::stringstream json;
json << "\"size\": ";
json << edatafilesize << std::endl;
json << ", \"windowStart\": ";
json << window_start_edataoffset;
json << ", \"windowEnd\": ";
json << edataoffset;
json << ", \"intervalStart\": ";
json << range_st;
json << ", \"intervalEnd\": ";
json << range_end;
return json.str();
}
};
};
#endif
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Dynamic edge data version: The memory shard.
* This class should only be accessed internally by the GraphChi engine.
*/
#ifndef DEF_GRAPHCHI_MEMSHARD
#define DEF_GRAPHCHI_MEMSHARD
#include <iostream>
#include <cstdio>
#include <sstream>
#include <vector>
#include <fcntl.h>
#include <unistd.h>
#include <assert.h>
#include <string>
#include "api/graph_objects.hpp"
#include "metrics/metrics.hpp"
#include "io/stripedio.hpp"
#include "graphchi_types.hpp"
#include "shards/dynamicdata/dynamicblock.hpp"
namespace graphchi {
template <typename VT, typename ET, typename svertex_t = graphchi_vertex<VT, ET> >
class memory_shard {
stripedio * iomgr;
std::string filename_edata;
std::string filename_adj;
vid_t range_st;
vid_t range_end;
size_t adjfilesize;
size_t edatafilesize;
size_t edgeptr;
vid_t streaming_offset_vid;
size_t streaming_offset; // The offset where streaming should continue
size_t range_start_offset; // First byte for this range's vertices (used for writing only outedges)
size_t range_start_edge_ptr;
size_t streaming_offset_edge_ptr;
uint8_t * adjdata;
char ** edgedata;
std::vector<size_t> blocksizes;
std::vector< dynamicdata_block<ET> * > dynamicblocks;
uint64_t chunkid;
std::vector<int> block_edatasessions;
int adj_session;
streaming_task adj_stream_session;
bool is_loaded;
size_t blocksize;
metrics &m;
public:
bool only_adjacency;
/* Dynamic edata */
memory_shard(stripedio * iomgr,
std::string _filename_edata,
std::string _filename_adj,
vid_t _range_start,
vid_t _range_end,
size_t _blocksize,
metrics &_m) : iomgr(iomgr), filename_edata(_filename_edata),
filename_adj(_filename_adj),
range_st(_range_start), range_end(_range_end), blocksize(_blocksize), m(_m) {
adjdata = NULL;
only_adjacency = false;
is_loaded = false;
adj_session = -1;
edgedata = NULL;
}
/* Dynamic edata */
~memory_shard() {
int nblocks = (int) block_edatasessions.size();
for(int i=0; i < nblocks; i++) {
if (edgedata[i] != NULL) {
iomgr->managed_release(block_edatasessions[i], &edgedata[i]);
iomgr->close_session(block_edatasessions[i]);
}
if (dynamicblocks[i] != NULL)
delete dynamicblocks[i];
dynamicblocks[i] = NULL;
}
dynamicblocks.clear();
if (adj_session >= 0) {
if (adjdata != NULL) iomgr->managed_release(adj_session, &adjdata);
iomgr->close_session(adj_session);
}
if (edgedata != NULL)
free(edgedata);
edgedata = NULL;
}
/* Dynamic edata */
void write_and_release_block(int i) {
std::string block_filename = filename_shard_edata_block(filename_edata, i, blocksize);
dynamicdata_block<ET> * dynblock = dynamicblocks[i];
if (dynblock != NULL) {
uint8_t * outdata;
int outsize;
dynblock->write(&outdata, outsize);
write_block_uncompressed_size(block_filename, outsize);
iomgr->managed_pwritea_now(block_edatasessions[i], &outdata, outsize, 0);
iomgr->managed_release(block_edatasessions[i], &edgedata[i]);
iomgr->close_session(block_edatasessions[i]);
free(outdata);
delete dynblock;
}
dynamicblocks[i] = NULL;
}
/* Dynamic edata */
void commit(bool commit_inedges, bool commit_outedges) {
if (block_edatasessions.size() == 0 || only_adjacency) return;
assert(is_loaded);
metrics_entry cm = m.start_time();
/**
* This is an optimization that is relevant only if memory shard
* has been used in a case where only out-edges are considered.
* Out-edges are in a continuous "window", while in-edges are
* scattered all over the shard
*/
int nblocks = (int) block_edatasessions.size();
if (commit_inedges) {
for(int i=0; i < nblocks; i++) {
/* NOTE: WRITE ALL BLOCKS SYNCHRONOUSLY */
write_and_release_block(i);
edgedata[i] = NULL;
}
} else if (commit_outedges) {
size_t last = streaming_offset_edge_ptr;
if (last == 0){
// rollback
last = edatafilesize;
}
//char * bufp = ((char*)edgedata + range_start_edge_ptr);
int startblock = (int) (range_start_edge_ptr / blocksize);
int endblock = (int) (last / blocksize);
for(int i=0; i < nblocks; i++) {
if (i >= startblock && i <= endblock) {
write_and_release_block(i);
} else {
iomgr->managed_release(block_edatasessions[i], &edgedata[i]);
}
edgedata[i] = NULL;
iomgr->close_session(block_edatasessions[i]);
}
}
m.stop_time(cm, "memshard_commit");
iomgr->managed_release(adj_session, &adjdata);
// FIXME: this is duplicated code from destructor
for(int i=0; i < nblocks; i++) {
if (edgedata[i] != NULL) {
iomgr->managed_release(block_edatasessions[i], &edgedata[i]);
}
}
block_edatasessions.clear();
is_loaded = false;
}
bool loaded() {
return is_loaded;
}
private:
/* Dynamic edata */
void load_edata() {
bool async_inedgedata_loading = false; // Not supported with dynamic edgedata
assert(blocksize % sizeof(int) == 0);
int nblocks = (int) (edatafilesize / blocksize + (edatafilesize % blocksize != 0));
edgedata = (char **) calloc(nblocks, sizeof(char*));
size_t compressedsize = 0;
int blockid = 0;
while(true) {
std::string block_filename = filename_shard_edata_block(filename_edata, blockid, blocksize);
if (file_exists(block_filename)) {
size_t fsize = get_block_uncompressed_size(block_filename, std::min(edatafilesize - blocksize * blockid, blocksize)); //std::min(edatafilesize - blocksize * blockid, blocksize);
compressedsize += get_filesize(block_filename);
int blocksession = iomgr->open_session(block_filename, false, true); // compressed
block_edatasessions.push_back(blocksession);
blocksizes.push_back(fsize);
edgedata[blockid] = NULL;
iomgr->managed_malloc(blocksession, &edgedata[blockid], fsize, 0);
if (async_inedgedata_loading) {
assert(false);
} else {
iomgr->managed_preada_now(blocksession, &edgedata[blockid], fsize, 0);
}
dynamicblocks.push_back(NULL);
blockid++;
} else {
if (blockid == 0) {
logstream(LOG_ERROR) << "Shard block file did not exists:" << block_filename << std::endl;
}
break;
}
}
assert(blockid == nblocks);
logstream(LOG_DEBUG) << "Compressed/full size: " << compressedsize * 1.0 / edatafilesize <<
" number of blocks: " << nblocks << std::endl;
}
/* Initialize a dynamic block if required */
void check_block_initialized(int blockid) {
if (dynamicblocks[blockid] == NULL) {
std::string block_filename = filename_shard_edata_block(filename_edata, blockid, blocksize);
size_t fsize = get_block_uncompressed_size(block_filename, std::min(edatafilesize - blocksize * blockid, blocksize)); //std::min(edatafilesize - blocksize * blockid, blocksize);
int nedges = std::min(edatafilesize - blocksize * blockid, blocksize) / sizeof(int);
dynamicblocks[blockid] = new dynamicdata_block<ET>(nedges, (uint8_t*) edgedata[blockid], fsize);
}
}
public:
/* Dynamic edata */
void load() {
is_loaded = true;
adjfilesize = get_filesize(filename_adj);
edatafilesize = get_shard_edata_filesize<ET>(filename_edata);
#ifdef SUPPORT_DELETIONS
async_inedgedata_loading = false; // Currently we encode the deleted status of an edge into the edge value (should be changed!),
// so we need the edge data while loading
#endif
//preada(adjf, adjdata, adjfilesize, 0);
adj_session = iomgr->open_session(filename_adj, true);
iomgr->managed_malloc(adj_session, &adjdata, adjfilesize, 0);
adj_stream_session = streaming_task(iomgr, adj_session, adjfilesize, (char**) &adjdata);
iomgr->launch_stream_reader(&adj_stream_session);
/* Initialize edge data asynchonous reading */
if (!only_adjacency) {
load_edata();
}
}
/* Dynamic edata */
inline void check_stream_progress(int toread, size_t pos) {
if (adj_stream_session.curpos == adjfilesize) return;
while(adj_stream_session.curpos < toread+pos) {
usleep(20000);
if (adj_stream_session.curpos == adjfilesize) return;
}
}
/* Dynamic edata */
void load_vertices(vid_t window_st, vid_t window_en, std::vector<svertex_t> & prealloc, bool inedges=true, bool outedges=true) {
/* Find file size */
m.start_time("memoryshard_create_edges");
assert(adjdata != NULL);
// Now start creating vertices
uint8_t * ptr = adjdata;
uint8_t * end = ptr + adjfilesize;
vid_t vid = 0;
edgeptr = 0;
streaming_offset = 0;
streaming_offset_vid = 0;
streaming_offset_edge_ptr = 0;
range_start_offset = adjfilesize;
range_start_edge_ptr = edatafilesize;
bool setoffset = false;
bool setrangeoffset = false;
while (ptr < end) {
check_stream_progress(6, ptr-adjdata); // read at least 6 bytes
if (!setoffset && vid > range_end) {
// This is where streaming should continue. Notice that because of the
// non-zero counters, this might be a bit off.
streaming_offset = ptr-adjdata;
streaming_offset_vid = vid;
streaming_offset_edge_ptr = edgeptr;
setoffset = true;
}
if (!setrangeoffset && vid>=range_st) {
range_start_offset = ptr-adjdata;
range_start_edge_ptr = edgeptr;
setrangeoffset = true;
}
uint8_t ns = *ptr;
int n;
ptr += sizeof(uint8_t);
if (ns == 0x00) {
// next value tells the number of vertices with zeros
uint8_t nz = *ptr;
ptr += sizeof(uint8_t);
vid++;
vid += nz;
continue;
}
if (ns == 0xff) { // If 255 is not enough, then stores a 32-bit integer after.
n = *((uint32_t*)ptr);
ptr += sizeof(uint32_t);
} else {
n = ns;
}
svertex_t* vertex = NULL;
if (vid>=window_st && vid <=window_en) { // TODO: Make more efficient
vertex = &prealloc[vid-window_st];
if (!vertex->scheduled) vertex = NULL;
}
check_stream_progress(n * 4, ptr - adjdata);
bool any_edges = false;
while(--n>=0) {
int blockid = (int) (edgeptr / blocksize);
vid_t target = *((vid_t*) ptr);
ptr += sizeof(vid_t);
if (vertex != NULL && outedges)
{
check_block_initialized(blockid);
vertex->add_outedge(target, (only_adjacency ? NULL : dynamicblocks[blockid]->edgevec((edgeptr % blocksize)/sizeof(int))), false);
}
if (target >= window_st) {
if (target <= window_en) { /* In edge */
if (inedges) {
svertex_t & dstvertex = prealloc[target - window_st];
if (dstvertex.scheduled) {
any_edges = true;
// assert(only_adjacency || edgeptr < edatafilesize);
check_block_initialized(blockid);
ET * eptr = (only_adjacency ? NULL : dynamicblocks[blockid]->edgevec((edgeptr % blocksize)/sizeof(int)));
dstvertex.add_inedge(vid, (only_adjacency ? NULL : eptr), false);
dstvertex.parallel_safe = dstvertex.parallel_safe && (vertex == NULL); // Avoid if
}
}
} else { // Note, we cannot skip if there can be "special edges". FIXME so dirty.
// This vertex has no edges any more for this window, bail out
if (vertex == NULL) {
ptr += sizeof(vid_t) * n;
edgeptr += (n + 1) * sizeof(int);
break;
}
}
}
edgeptr += sizeof(int);
}
if (any_edges && vertex != NULL) {
vertex->parallel_safe = false;
}
vid++;
}
m.stop_time("memoryshard_create_edges", false);
}
size_t offset_for_stream_cont() {
return streaming_offset;
}
vid_t offset_vid_for_stream_cont() {
return streaming_offset_vid;
}
size_t edata_ptr_for_stream_cont() {
return streaming_offset_edge_ptr;
}
};
};
#endif
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* The sliding shard.
*/
#ifdef DYNAMICEDATA
#include "shards/dynamicdata/slidingshard.hpp"
#else
#ifndef DEF_GRAPHCHI_SLIDINGSHARD
#define DEF_GRAPHCHI_SLIDINGSHARD
#include <iostream>
#include <cstdio>
#include <sstream>
#include <vector>
#include <fcntl.h>
#include <unistd.h>
#include <assert.h>
#include <string>
#include "api/graph_objects.hpp"
#include "metrics/metrics.hpp"
#include "logger/logger.hpp"
#include "io/stripedio.hpp"
#include "graphchi_types.hpp"
namespace graphchi {
/**
* A streaming block.
*/
struct sblock {
int writedesc;
int readdesc;
size_t offset;
size_t end;
uint8_t * data;
uint8_t * ptr;
bool active;
bool is_edata_block;
sblock() : writedesc(0), readdesc(0), active(false) { data = NULL; }
sblock(int wdesc, int rdesc, bool is_edata_block=false) : writedesc(wdesc), readdesc(rdesc), active(false),
is_edata_block(is_edata_block){ data = NULL; }
void commit_async(stripedio * iomgr) {
if (active && data != NULL && writedesc >= 0) {
if (is_edata_block) {
iomgr->managed_pwritea_async(writedesc, &data, end-offset, 0, true, true);
data = NULL;
} else {
iomgr->managed_pwritea_async(writedesc, &data, end-offset, offset, true);
}
}
}
void commit_now(stripedio * iomgr) {
if (active && data != NULL && writedesc >= 0) {
size_t len = ptr-data;
if (len > end-offset) len = end-offset;
if (is_edata_block) {
iomgr->managed_pwritea_now(writedesc, &data, end - offset, 0); /* Need to write whole block in the compressed regime */
} else {
iomgr->managed_pwritea_now(writedesc, &data, len, offset);
}
}
}
void read_async(stripedio * iomgr) {
if (is_edata_block) {
iomgr->managed_preada_async(readdesc, &data, (end - offset), 0);
} else {
iomgr->managed_preada_async(readdesc, &data, end - offset, offset);
}
}
void read_now(stripedio * iomgr) {
if (is_edata_block) {
iomgr->managed_preada_now(readdesc, &data, end-offset, 0);
} else {
iomgr->managed_preada_now(readdesc, &data, end-offset, offset);
}
}
void release(stripedio * iomgr) {
if (data != NULL) {
iomgr->managed_release(readdesc, &data);
if (is_edata_block) {
iomgr->close_session(readdesc);
}
}
data = NULL;
}
};
struct indexentry {
size_t adjoffset, edataoffset;
indexentry(size_t a, size_t e) : adjoffset(a), edataoffset(e) {}
};
/*
* Graph shard that is streamed. I.e, it can only read in one direction, a chunk
* a time.
*/
template <typename VT, typename ET, typename svertex_t = graphchi_vertex<VT, ET>, typename ETspecial = ET>
class sliding_shard {
stripedio * iomgr;
std::string filename_edata;
std::string filename_adj;
vid_t range_st, range_end;
size_t blocksize;
vid_t curvid;
size_t adjoffset, edataoffset, adjfilesize, edatafilesize;
size_t window_start_edataoffset;
std::vector<sblock> activeblocks;
int adjfile_session;
int writedesc;
sblock * curblock;
sblock * curadjblock;
metrics &m;
std::map<int, indexentry> sparse_index; // Sparse index that can be created in the fly
bool disable_writes;
bool async_edata_loading;
// bool need_read_outedges; // Disabled - does not work with compressed data: whole block needs to be read.
public:
bool only_adjacency;
sliding_shard(stripedio * iomgr, std::string _filename_edata, std::string _filename_adj, vid_t _range_st, vid_t _range_en, size_t _blocksize, metrics &_m,
bool _disable_writes=false, bool onlyadj = false) :
iomgr(iomgr),
filename_edata(_filename_edata),
filename_adj(_filename_adj),
range_st(_range_st),
range_end(_range_en),
blocksize(_blocksize),
m(_m),
disable_writes(_disable_writes) {
curvid = 0;
adjoffset = 0;
edataoffset = 0;
disable_writes = false;
only_adjacency = onlyadj;
curblock = NULL;
curadjblock = NULL;
window_start_edataoffset = 0;
while(blocksize % sizeof(ET) != 0) blocksize++;
assert(blocksize % sizeof(ET)==0);
adjfilesize = get_filesize(filename_adj);
if (!only_adjacency) {
edatafilesize = get_shard_edata_filesize<ET>(filename_edata);
logstream(LOG_DEBUG) << "Total edge data size: " << edatafilesize << ", " << filename_edata
<< "sizeof(ET): " << sizeof(ET) << std::endl;
} else {
// Nothing
}
adjfile_session = iomgr->open_session(filename_adj, true);
save_offset();
async_edata_loading = !svertex_t().computational_edges();
#ifdef SUPPORT_DELETIONS
async_edata_loading = false; // See comment above for memshard, async_edata_loading = false;
#endif
}
~sliding_shard() {
release_prior_to_offset(true);
if (curblock != NULL) {
curblock->release(iomgr);
delete curblock;
curblock = NULL;
}
if (curadjblock != NULL) {
curadjblock->release(iomgr);
delete curadjblock;
curadjblock = NULL;
}
iomgr->close_session(adjfile_session);
}
size_t num_edges() {
return edatafilesize / sizeof(ET);
}
protected:
size_t get_adjoffset() { return adjoffset; }
size_t get_edataoffset() { return edataoffset; }
void save_offset() {
// Note, so that we can use the lower bound operation in map, we need
// to insert indices in reverse order
sparse_index.insert(std::pair<int, indexentry>(-((int)curvid), indexentry(adjoffset, edataoffset)));
}
void move_close_to(vid_t v) {
if (curvid >= v) return;
std::map<int,indexentry>::iterator lowerbd_iter = sparse_index.lower_bound(-((int)v));
int closest_vid = -((int)lowerbd_iter->first);
assert(closest_vid>=0);
indexentry closest_offset = lowerbd_iter->second;
assert(closest_vid <= (int)v);
if (closest_vid > (int)curvid) { /* Note: this will fail if we have over 2B vertices! */
if (curblock != NULL) // Move the pointer - this may invalidate the curblock, but it is being checked later
curblock->ptr += closest_offset.edataoffset - edataoffset;
if (curadjblock != NULL)
curadjblock->ptr += closest_offset.adjoffset - adjoffset;
curvid = (vid_t)closest_vid;
adjoffset = closest_offset.adjoffset;
edataoffset = closest_offset.edataoffset;
return;
} else {
// Do nothing - just continue from current pos.
return;
}
}
inline void check_curblock(size_t toread) {
if (curblock == NULL || curblock->end < edataoffset+toread) {
if (curblock != NULL) {
if (!curblock->active) {
curblock->release(iomgr);
}
}
// Load next
std::string blockfilename = filename_shard_edata_block(filename_edata, (int) (edataoffset / blocksize), blocksize);
int edata_session = iomgr->open_session(blockfilename, false, true);
sblock newblock(edata_session, edata_session, true);
// We align blocks always to the blocksize, even if that requires
// allocating and reading some unnecessary data.
newblock.offset = (edataoffset / blocksize) * blocksize; // Align
size_t correction = edataoffset - newblock.offset;
newblock.end = std::min(edatafilesize, newblock.offset + blocksize);
assert(newblock.end >= newblock.offset);
iomgr->managed_malloc(edata_session, &newblock.data, newblock.end - newblock.offset, newblock.offset);
newblock.ptr = newblock.data + correction;
activeblocks.push_back(newblock);
curblock = &activeblocks[activeblocks.size()-1];
}
}
inline void check_adjblock(size_t toread) {
if (curadjblock == NULL || curadjblock->end <= adjoffset + toread) {
if (curadjblock != NULL) {
curadjblock->release(iomgr);
delete curadjblock;
curadjblock = NULL;
}
sblock * newblock = new sblock(0, adjfile_session);
newblock->offset = adjoffset;
newblock->end = std::min(adjfilesize, adjoffset+blocksize);
assert(newblock->end > 0);
assert(newblock->end >= newblock->offset);
iomgr->managed_malloc(adjfile_session, &newblock->data, newblock->end - newblock->offset, adjoffset);
newblock->ptr = newblock->data;
metrics_entry me = m.start_time();
iomgr->managed_preada_now(adjfile_session, &newblock->data, newblock->end - newblock->offset, adjoffset);
m.stop_time(me, "blockload");
curadjblock = newblock;
}
}
template <typename U>
inline U read_val() {
check_adjblock(sizeof(U));
U res = *((U*)curadjblock->ptr);
adjoffset += sizeof(U);
curadjblock->ptr += sizeof(U);
return res;
}
template <typename U>
inline U * read_edgeptr() {
if (only_adjacency) return NULL;
check_curblock(sizeof(U));
U * resptr = ((U*)curblock->ptr);
edataoffset += sizeof(U);
curblock->ptr += sizeof(U);
return resptr;
}
inline void skip(int n, int sz) {
size_t tot = n * sz;
adjoffset += tot;
if (curadjblock != NULL)
curadjblock->ptr += tot;
edataoffset += sizeof(ET)*n;
if (curblock != NULL)
curblock->ptr += sizeof(ET)*n;
}
public:
/**
* Read out-edges for vertices.
*/
void read_next_vertices(int nvecs, vid_t start, std::vector<svertex_t> & prealloc, bool record_index=false, bool disable_writes=false) {
metrics_entry me = m.start_time();
if (!record_index)
move_close_to(start);
/* Release the blocks we do not need anymore */
curblock = NULL;
release_prior_to_offset(false, disable_writes);
assert(activeblocks.size() <= 1);
/* Read next */
if (!activeblocks.empty() && !only_adjacency) {
curblock = &activeblocks[0];
}
vid_t lastrec = start;
window_start_edataoffset = edataoffset;
for(int i=((int)curvid) - ((int)start); i<nvecs; i++) {
if (adjoffset >= adjfilesize) break;
// TODO: skip unscheduled vertices.
int n;
if (record_index && (size_t)(curvid - lastrec) >= (size_t) std::max((int)100000, nvecs/16)) {
save_offset();
lastrec = curvid;
}
uint8_t ns = read_val<uint8_t>();
if (ns == 0x00) {
curvid++;
uint8_t nz = read_val<uint8_t>();
curvid += nz;
i += nz;
continue;
}
if (ns == 0xff) {
n = read_val<uint32_t>();
} else {
n = ns;
}
if (i<0) {
// Just skipping
skip(n, sizeof(vid_t));
} else {
svertex_t& vertex = prealloc[i];
assert(vertex.id() == curvid);
if (vertex.scheduled) {
while(--n >= 0) {
bool special_edge = false;
vid_t target = (sizeof(ET) == sizeof(ETspecial) ? read_val<vid_t>() : translate_edge(read_val<vid_t>(), special_edge));
ET * evalue = (special_edge ? (ET*)read_edgeptr<ETspecial>(): read_edgeptr<ET>());
if (!only_adjacency) {
if (!curblock->active) {
if (async_edata_loading) {
curblock->read_async(iomgr);
} else {
curblock->read_now(iomgr);
}
}
// Note: this needs to be set always because curblock might change during this loop.
curblock->active = true; // This block has an scheduled vertex - need to commit
}
vertex.add_outedge(target, evalue, special_edge);
if (!((target >= range_st && target <= range_end))) {
logstream(LOG_ERROR) << "Error : " << target << " not in [" << range_st << " - " << range_end << "]" << std::endl;
iomgr->print_session(adjfile_session);
}
assert(target >= range_st && target <= range_end);
}
} else {
// This vertex was not scheduled, so we can just skip its edges.
skip(n, sizeof(vid_t));
}
}
curvid++;
}
m.stop_time(me, "read_next_vertices");
curblock = NULL;
}
/**
* Commit modifications.
*/
void commit(sblock &b, bool synchronously, bool disable_writes=false) {
if (synchronously) {
metrics_entry me = m.start_time();
if (!disable_writes) b.commit_now(iomgr);
m.stop_time(me, "commit");
b.release(iomgr);
} else {
if (!disable_writes) b.commit_async(iomgr);
else b.release(iomgr);
}
}
/**
* Release all buffers
*/
void flush() {
release_prior_to_offset(true);
if (curadjblock != NULL) {
curadjblock->release(iomgr);
delete curadjblock;
curadjblock = NULL;
}
}
/**
* Set the position of the sliding shard.
*/
void set_offset(size_t newoff, vid_t _curvid, size_t edgeptr) {
this->adjoffset = newoff;
this->curvid = _curvid;
this->edataoffset = edgeptr;
if (curadjblock != NULL) {
curadjblock->release(iomgr);
delete curadjblock;
curadjblock = NULL;
}
}
/**
* Release blocks that come prior to the current offset/
*/
void release_prior_to_offset(bool all=false, bool disable_writes=false) { // disable writes is for the dynamic case
for(int i=(int)activeblocks.size() - 1; i >= 0; i--) {
sblock &b = activeblocks[i];
if (b.end <= edataoffset || all) {
commit(b, all, disable_writes);
activeblocks.erase(activeblocks.begin() + (unsigned int)i);
}
}
}
std::string get_info_json() {
std::stringstream json;
json << "\"size\": ";
json << edatafilesize << std::endl;
json << ", \"windowStart\": ";
json << window_start_edataoffset;
json << ", \"windowEnd\": ";
json << edataoffset;
json << ", \"intervalStart\": ";
json << range_st;
json << ", \"intervalEnd\": ";
json << range_end;
return json.str();
}
};
};
#endif
#endif
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* The memory shard. This class should only be accessed internally by the GraphChi engine.
*/
#ifdef DYNAMICEDATA
#include "shards/dynamicdata/memoryshard.hpp"
#else
#ifndef DEF_GRAPHCHI_MEMSHARD
#define DEF_GRAPHCHI_MEMSHARD
#include <iostream>
#include <cstdio>
#include <sstream>
#include <vector>
#include <fcntl.h>
#include <unistd.h>
#include <assert.h>
#include <string>
#include "api/graph_objects.hpp"
#include "metrics/metrics.hpp"
#include "io/stripedio.hpp"
#include "graphchi_types.hpp"
namespace graphchi {
template <typename VT, typename ET, typename svertex_t = graphchi_vertex<VT, ET> >
class memory_shard {
stripedio * iomgr;
std::string filename_edata;
std::string filename_adj;
vid_t range_st;
vid_t range_end;
size_t adjfilesize;
size_t edatafilesize;
size_t edgeptr;
vid_t streaming_offset_vid;
size_t streaming_offset; // The offset where streaming should continue
size_t range_start_offset; // First byte for this range's vertices (used for writing only outedges)
size_t range_start_edge_ptr;
size_t streaming_offset_edge_ptr;
uint8_t * adjdata;
char ** edgedata;
int * doneptr;
std::vector<size_t> blocksizes;
uint64_t chunkid;
std::vector<int> block_edatasessions;
int adj_session;
streaming_task adj_stream_session;
bool async_edata_loading;
bool is_loaded;
size_t blocksize;
metrics &m;
public:
bool only_adjacency;
memory_shard(stripedio * iomgr,
std::string _filename_edata,
std::string _filename_adj,
vid_t _range_start,
vid_t _range_end,
size_t _blocksize,
metrics &_m) : iomgr(iomgr), filename_edata(_filename_edata),
filename_adj(_filename_adj),
range_st(_range_start), range_end(_range_end), blocksize(_blocksize), m(_m) {
adjdata = NULL;
only_adjacency = false;
is_loaded = false;
adj_session = -1;
edgedata = NULL;
doneptr = NULL;
async_edata_loading = !svertex_t().computational_edges();
#ifdef SUPPORT_DELETIONS
async_edata_loading = false; // See comment above for memshard, async_edata_loading = false;
#endif
}
~memory_shard() {
int nblocks = (int) block_edatasessions.size();
for(int i=0; i < nblocks; i++) {
if (edgedata[i] != NULL) {
iomgr->managed_release(block_edatasessions[i], &edgedata[i]);
iomgr->close_session(block_edatasessions[i]);
}
}
if (adj_session >= 0) {
if (adjdata != NULL) iomgr->managed_release(adj_session, &adjdata);
iomgr->close_session(adj_session);
}
if (edgedata != NULL)
free(edgedata);
edgedata = NULL;
if (doneptr != NULL) {
free(doneptr);
}
}
void commit(bool commit_inedges, bool commit_outedges) {
if (block_edatasessions.size() == 0 || only_adjacency) return;
assert(is_loaded);
metrics_entry cm = m.start_time();
/**
* This is an optimization that is relevant only if memory shard
* has been used in a case where only out-edges are considered.
* Out-edges are in a continuous "window", while in-edges are
* scattered all over the shard
*/
int nblocks = (int) block_edatasessions.size();
if (commit_inedges) {
int start_stream_block = (int) (range_start_edge_ptr / blocksize);
for(int i=0; i < nblocks; i++) {
/* Write asynchronously blocks that will not be needed by the sliding windows on
this iteration. */
if (i >= start_stream_block) {
iomgr->managed_pwritea_now(block_edatasessions[i], &edgedata[i], blocksizes[i], 0);
iomgr->managed_release(block_edatasessions[i], &edgedata[i]);
iomgr->close_session(block_edatasessions[i]);
edgedata[i] = NULL;
} else {
iomgr->managed_pwritea_async(block_edatasessions[i], &edgedata[i], blocksizes[i], 0, true, true);
edgedata[i] = NULL;
}
}
} else if (commit_outedges) {
size_t last = streaming_offset_edge_ptr;
if (last == 0){
// rollback
last = edatafilesize;
}
//char * bufp = ((char*)edgedata + range_start_edge_ptr);
int startblock = (int) (range_start_edge_ptr / blocksize);
int endblock = (int) (last / blocksize);
for(int i=0; i < nblocks; i++) {
if (i >= startblock && i <= endblock) {
iomgr->managed_pwritea_now(block_edatasessions[i], &edgedata[i], blocksizes[i], 0);
}
iomgr->managed_release(block_edatasessions[i], &edgedata[i]);
edgedata[i] = NULL;
iomgr->close_session(block_edatasessions[i]);
}
} else {
for(int i=0; i < nblocks; i++) {
iomgr->close_session(block_edatasessions[i]);
}
}
m.stop_time(cm, "memshard_commit");
iomgr->managed_release(adj_session, &adjdata);
// FIXME: this is duplicated code from destructor
for(int i=0; i < nblocks; i++) {
if (edgedata[i] != NULL) {
iomgr->managed_release(block_edatasessions[i], &edgedata[i]);
}
}
block_edatasessions.clear();
is_loaded = false;
}
bool loaded() {
return is_loaded;
}
private:
void load_edata() {
assert(blocksize % sizeof(ET) == 0);
int nblocks = (int) (edatafilesize / blocksize + (edatafilesize % blocksize != 0));
edgedata = (char **) calloc(nblocks, sizeof(char*));
size_t compressedsize = 0;
int blockid = 0;
if (!async_edata_loading) {
doneptr = (int *) malloc(nblocks * sizeof(int));
for(int i=0; i < nblocks; i++) doneptr[i] = 1;
}
while(true) {
std::string block_filename = filename_shard_edata_block(filename_edata, blockid, blocksize);
if (file_exists(block_filename)) {
size_t fsize = std::min(edatafilesize - blocksize * blockid, blocksize);
compressedsize += get_filesize(block_filename);
int blocksession = iomgr->open_session(block_filename, false, true); // compressed
block_edatasessions.push_back(blocksession);
blocksizes.push_back(fsize);
edgedata[blockid] = NULL;
iomgr->managed_malloc(blocksession, &edgedata[blockid], fsize, 0);
if (async_edata_loading) {
iomgr->managed_preada_async(blocksession, &edgedata[blockid], fsize, 0);
} else {
iomgr->managed_preada_async(blocksession, &edgedata[blockid], fsize, 0, (volatile int *)&doneptr[blockid]);
}
blockid++;
} else {
if (blockid == 0) {
logstream(LOG_ERROR) << "Shard block file did not exists:" << block_filename << std::endl;
}
if (blockid < nblocks) {
logstream(LOG_ERROR) << "Did not find block " << block_filename << std::endl;
logstream(LOG_ERROR) << "Going to exit..." << std::endl;
}
break;
}
}
logstream(LOG_DEBUG) << "Compressed/full size: " << compressedsize * 1.0 / edatafilesize <<
" number of blocks: " << nblocks << std::endl;
assert(blockid == nblocks);
}
public:
// TODO: recycle ptr!
void load() {
is_loaded = true;
adjfilesize = get_filesize(filename_adj);
#ifdef SUPPORT_DELETIONS
async_edata_loading = false; // Currently we encode the deleted status of an edge into the edge value (should be changed!),
// so we need the edge data while loading
#endif
//preada(adjf, adjdata, adjfilesize, 0);
adj_session = iomgr->open_session(filename_adj, true);
iomgr->managed_malloc(adj_session, &adjdata, adjfilesize, 0);
adj_stream_session = streaming_task(iomgr, adj_session, adjfilesize, (char**) &adjdata);
iomgr->launch_stream_reader(&adj_stream_session);
/* Initialize edge data asynchonous reading */
if (!only_adjacency) {
edatafilesize = get_shard_edata_filesize<ET>(filename_edata);
load_edata();
}
}
inline void check_stream_progress(int toread, size_t pos) {
if (adj_stream_session.curpos == adjfilesize) return;
while(adj_stream_session.curpos < toread+pos) {
usleep(20000);
if (adj_stream_session.curpos == adjfilesize) return;
}
}
void load_vertices(vid_t window_st, vid_t window_en, std::vector<svertex_t> & prealloc, bool inedges=true, bool outedges=true) {
/* Find file size */
m.start_time("memoryshard_create_edges");
assert(adjdata != NULL);
// Now start creating vertices
uint8_t * ptr = adjdata;
uint8_t * end = ptr + adjfilesize;
vid_t vid = 0;
edgeptr = 0;
streaming_offset = 0;
streaming_offset_vid = 0;
streaming_offset_edge_ptr = 0;
range_start_offset = adjfilesize;
range_start_edge_ptr = edatafilesize;
bool setoffset = false;
bool setrangeoffset = false;
while (ptr < end) {
check_stream_progress(6, ptr-adjdata); // read at least 6 bytes
if (!setoffset && vid > range_end) {
// This is where streaming should continue. Notice that because of the
// non-zero counters, this might be a bit off.
streaming_offset = ptr-adjdata;
streaming_offset_vid = vid;
streaming_offset_edge_ptr = edgeptr;
setoffset = true;
}
if (!setrangeoffset && vid>=range_st) {
range_start_offset = ptr-adjdata;
range_start_edge_ptr = edgeptr;
setrangeoffset = true;
}
uint8_t ns = *ptr;
int n;
ptr += sizeof(uint8_t);
if (ns == 0x00) {
// next value tells the number of vertices with zeros
uint8_t nz = *ptr;
ptr += sizeof(uint8_t);
vid++;
vid += nz;
continue;
}
if (ns == 0xff) { // If 255 is not enough, then stores a 32-bit integer after.
n = *((uint32_t*)ptr);
ptr += sizeof(uint32_t);
} else {
n = ns;
}
svertex_t* vertex = NULL;
if (vid>=window_st && vid <=window_en) { // TODO: Make more efficient
vertex = &prealloc[vid-window_st];
if (!vertex->scheduled) vertex = NULL;
}
check_stream_progress(n * 4, ptr - adjdata);
bool any_edges = false;
while(--n>=0) {
int blockid = (int) (edgeptr / blocksize);
if (!async_edata_loading && !only_adjacency) {
/* Wait until blocks loaded (non-asynchronous version) */
while(doneptr[edgeptr / blocksize] != 0) { usleep(10); }
}
vid_t target = *((vid_t*) ptr);
ptr += sizeof(vid_t);
if (vertex != NULL && outedges)
{
char * eptr = (only_adjacency ? NULL : &(edgedata[blockid][edgeptr % blocksize]));
vertex->add_outedge(target, (only_adjacency ? NULL : (ET*) eptr), false);
}
if (target >= window_st) {
if (target <= window_en) { /* In edge */
if (inedges) {
svertex_t & dstvertex = prealloc[target - window_st];
if (dstvertex.scheduled) {
any_edges = true;
// assert(only_adjacency || edgeptr < edatafilesize);
char * eptr = (only_adjacency ? NULL : &(edgedata[blockid][edgeptr % blocksize]));
dstvertex.add_inedge(vid, (only_adjacency ? NULL : (ET*) eptr), false);
dstvertex.parallel_safe = dstvertex.parallel_safe && (vertex == NULL); // Avoid if
}
}
} else { // Note, we cannot skip if there can be "special edges". FIXME so dirty.
// This vertex has no edges any more for this window, bail out
if (vertex == NULL) {
ptr += sizeof(vid_t) * n;
edgeptr += (n + 1) * sizeof(ET);
break;
}
}
}
edgeptr += sizeof(ET);
}
if (any_edges && vertex != NULL) {
vertex->parallel_safe = false;
}
vid++;
}
m.stop_time("memoryshard_create_edges", false);
}
size_t offset_for_stream_cont() {
return streaming_offset;
}
vid_t offset_vid_for_stream_cont() {
return streaming_offset_vid;
}
size_t edata_ptr_for_stream_cont() {
return streaming_offset_edge_ptr;
}
};
};
#endif
#endif
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* This header includes all the main headers needed for a GraphChi
* program.
*/
#ifndef GRAPHCHI_DEF_ALLBASIC_INCLUDES
#define GRAPHCHI_DEF_ALLBASIC_INCLUDES
#include <omp.h>
#include <sstream>
#include "api/chifilenames.hpp"
#include "api/graphchi_context.hpp"
#include "api/graphchi_program.hpp"
#include "api/graph_objects.hpp"
#include "api/ischeduler.hpp"
#include "api/vertex_aggregator.hpp"
#include "engine/graphchi_engine.hpp"
#include "logger/logger.hpp"
#include "metrics/metrics.hpp"
#include "metrics/reps/basic_reporter.hpp"
#include "metrics/reps/file_reporter.hpp"
#include "metrics/reps/html_reporter.hpp"
#include "preprocessing/conversions.hpp"
#include "util/cmdopts.hpp"
namespace graphchi {
/**
* Helper for metrics.
*/
static VARIABLE_IS_NOT_USED void metrics_report(metrics &m);
static VARIABLE_IS_NOT_USED void metrics_report(metrics &m) {
std::string reporters = get_option_string("metrics.reporter", "console");
char * creps = (char*)reporters.c_str();
const char * delims = ",";
char * t = strtok(creps, delims);
while(t != NULL) {
std::string repname(t);
if (repname == "basic" || repname == "console") {
basic_reporter rep;
m.report(rep);
} else if (repname == "file") {
file_reporter rep(get_option_string("metrics.reporter.filename", "metrics.txt"));
m.report(rep);
} else if (repname == "html") {
html_reporter rep(get_option_string("metrics.reporter.htmlfile", "metrics.html"));
m.report(rep);
} else {
logstream(LOG_WARNING) << "Could not find metrics reporter with name [" << repname << "], ignoring." << std::endl;
}
t = strtok(NULL, delims);
}
}
};
#endif
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Command line options.
*/
#ifndef GRAPHCHI_CMDOPTS_DEF
#define GRAPHCHI_CMDOPTS_DEF
#include <string>
#include <iostream>
#include <stdint.h>
#include "api/chifilenames.hpp"
#include "util/configfile.hpp"
namespace graphchi {
/** GNU COMPILER HACK TO PREVENT IT FOR COMPILING METHODS WHICH ARE NOT USED IN
THE PARTICULAR APP BEING BUILT */
#ifdef __GNUC__
#define VARIABLE_IS_NOT_USED __attribute__ ((unused))
#else
#define VARIABLE_IS_NOT_USED
#endif
static bool _cmd_configured = false;
static int _argc;
static char **_argv;
static std::map<std::string, std::string> conf;
static void VARIABLE_IS_NOT_USED set_conf(std::string key, std::string value) {
conf[key] = value;
}
// Config file
static std::string VARIABLE_IS_NOT_USED get_config_option_string(const char *option_name) {
if (conf.find(option_name) != conf.end()) {
return conf[option_name];
} else {
std::cout << "ERROR: could not find option " << option_name << " from config.";
assert(false);
}
}
static std::string VARIABLE_IS_NOT_USED get_config_option_string(const char *option_name,
std::string default_value) {
if (conf.find(option_name) != conf.end()) {
return conf[option_name];
} else {
return default_value;
}
}
static int VARIABLE_IS_NOT_USED get_config_option_int(const char *option_name, int default_value) {
if (conf.find(option_name) != conf.end()) {
return atoi(conf[option_name].c_str());
} else {
return default_value;
}
}
static int VARIABLE_IS_NOT_USED get_config_option_int(const char *option_name) {
if (conf.find(option_name) != conf.end()) {
return atoi(conf[option_name].c_str());
} else {
std::cout << "ERROR: could not find option " << option_name << " from config.";
assert(false);
}
}
static uint64_t VARIABLE_IS_NOT_USED get_config_option_long(const char *option_name, uint64_t default_value) {
if (conf.find(option_name) != conf.end()) {
return atol(conf[option_name].c_str());
} else {
return default_value;
}
}
static double VARIABLE_IS_NOT_USED get_config_option_double(const char *option_name, double default_value) {
if (conf.find(option_name) != conf.end()) {
return atof(conf[option_name].c_str());
} else {
return default_value;
}
}
static void set_argc(int argc, const char ** argv);
static void set_argc(int argc, const char ** argv) {
_argc = argc;
_argv = (char**)argv;
_cmd_configured = true;
conf = loadconfig(filename_config_local(), filename_config());
/* Load --key=value type arguments into the conf map */
std::string prefix = "--";
for (int i = 1; i < argc; i++) {
std::string arg = std::string(_argv[i]);
if (arg.substr(0, prefix.size()) == prefix) {
arg = arg.substr(prefix.size());
size_t a = arg.find_first_of("=", 0);
if (a != arg.npos) {
std::string key = arg.substr(0, a);
std::string val = arg.substr(a + 1);
std::cout << "[" << key << "]" << " => " << "[" << val << "]" << std::endl;
conf[key] = val;
}
}
}
}
static void graphchi_init(int argc, const char ** argv);
static void graphchi_init(int argc, const char ** argv) {
set_argc(argc, argv);
}
static void check_cmd_init() {
if (!_cmd_configured) {
std::cout << "ERROR: command line options not initialized." << std::endl;
std::cout << " You need to call set_argc() in the beginning of the program." << std::endl;
}
}
static std::string VARIABLE_IS_NOT_USED get_option_string(const char *option_name,
std::string default_value)
{
check_cmd_init();
int i;
for (i = _argc - 2; i >= 0; i -= 1)
if (strcmp(_argv[i], option_name) == 0)
return std::string(_argv[i + 1]);
return get_config_option_string(option_name, default_value);
}
static std::string VARIABLE_IS_NOT_USED get_option_string(const char *option_name)
{
int i;
check_cmd_init();
for (i = _argc - 2; i >= 0; i -= 1)
if (strcmp(_argv[i], option_name) == 0)
return std::string(_argv[i + 1]);
return get_config_option_string(option_name);
}
static std::string VARIABLE_IS_NOT_USED get_option_string_interactive(const char *option_name, std::string options)
{
int i;
check_cmd_init();
for (i = _argc - 2; i >= 0; i -= 1)
if (strcmp(_argv[i], option_name) == 0)
return std::string(_argv[i + 1]);
if (conf.find(option_name) != conf.end()) {
return conf[option_name];
}
std::cout << "Please enter value for command-line argument [" << std::string(option_name) << "]"<< std::endl;
std::cout << " (Options are: " << options << ")" << std::endl;
std::string val;
std::cin >> val;
return val;
}
static int VARIABLE_IS_NOT_USED get_option_int(const char *option_name, int default_value)
{
int i;
check_cmd_init();
for (i = _argc - 2; i >= 0; i -= 1)
if (strcmp(_argv[i], option_name) == 0)
return atoi(_argv[i + 1]);
return get_config_option_int(option_name, default_value);
}
static int VARIABLE_IS_NOT_USED get_option_int(const char *option_name)
{
int i;
check_cmd_init();
for (i = _argc - 2; i >= 0; i -= 1)
if (strcmp(_argv[i], option_name) == 0)
return atoi(_argv[i + 1]);
return get_config_option_int(option_name);
}
static uint64_t VARIABLE_IS_NOT_USED get_option_long(const char *option_name, uint64_t default_value)
{
int i;
check_cmd_init();
for (i = _argc - 2; i >= 0; i -= 1)
if (strcmp(_argv[i], option_name) == 0)
return atol(_argv[i + 1]);
return get_config_option_long(option_name, default_value);
}
static float VARIABLE_IS_NOT_USED get_option_float(const char *option_name, float default_value)
{
int i;
check_cmd_init();
for (i = _argc - 2; i >= 0; i -= 1)
if (strcmp(_argv[i], option_name) == 0)
return (float)atof(_argv[i + 1]);
return (float) get_config_option_double(option_name, default_value);
}
} // End namespace
#endif
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Tools for listing the TOP K values from a verte data file.
*/
#ifndef DEF_GRAPHCHI_TOPLIST
#define DEF_GRAPHCHI_TOPLIST
#include <vector>
#include <algorithm>
#include <errno.h>
#include <assert.h>
#include "io/stripedio.hpp"
#include "logger/logger.hpp"
#include "util/merge.hpp"
#include "util/ioutil.hpp"
#include "util/qsort.hpp"
#include "api/chifilenames.hpp"
#include "engine/auxdata/vertex_data.hpp"
namespace graphchi {
template <typename VertexDataType>
struct vertex_value {
vid_t vertex;
VertexDataType value;
vertex_value() {}
vertex_value(vid_t v, VertexDataType x) : vertex(v), value(x) {}
};
template <typename VertexDataType>
bool vertex_value_greater(const vertex_value<VertexDataType> &a, const vertex_value<VertexDataType> &b) {
return a.value > b.value;
}
/**
* Reads the vertex data file and returns top N values.
* Vertex value type must be given as a template parameter.
* This method has been implemented in a manner to consume very little
* memory, i.e the whole file is not loaded into memory (unless ntop = nvertices).
* @param basefilename name of the graph
* @param ntop number of top values to return (if ntop is smaller than the total number of vertices, returns all in sorted order)
* @param from first vertex to include (default, 0)
* @param to last vertex to include (default, all)
* @return a vector of top ntop values
*/
template <typename VertexDataType>
std::vector<vertex_value<VertexDataType> > get_top_vertices(std::string basefilename, int ntop, vid_t from=0, vid_t to=0) {
typedef vertex_value<VertexDataType> vv_t;
/* Initialize striped IO manager */
metrics m("toplist");
stripedio * iomgr = new stripedio(m);
/* Initialize the vertex-data reader */
vid_t readwindow = 1024 * 1024;
size_t numvertices = get_num_vertices(basefilename);
vertex_data_store<VertexDataType> * vertexdata =
new vertex_data_store<VertexDataType>(basefilename, numvertices, iomgr);
if ((size_t)ntop > numvertices) {
ntop = (int)numvertices;
}
/* Initialize buffer */
vv_t * buffer_idxs = (vv_t*) calloc(readwindow, sizeof(vv_t));
vv_t * topbuf = (vv_t*) calloc(ntop, sizeof(vv_t));
vv_t * mergearr = (vv_t*) calloc(ntop * 2, sizeof(vv_t));
/* Iterate the vertex values and maintain the top-list */
size_t idx = 0;
vid_t st = 0;
vid_t en = numvertices - 1;
int count = 0;
while(st <= numvertices - 1) {
en = st + readwindow - 1;
if (en >= numvertices - 1) en = numvertices - 1;
/* Load the vertex values */
vertexdata->load(st, en);
int nt = en - st + 1;
int k = 0;
VertexDataType minima = VertexDataType();
if (count > 0) {
minima = topbuf[ntop - 1].value; // Minimum value that should be even considered
}
for(int j=0; j < nt; j++) {
VertexDataType& val = *vertexdata->vertex_data_ptr(j + st);
if (count == 0 || (val > minima)) {
buffer_idxs[k] = vv_t((vid_t)idx + from, val);
k++;
}
idx++;
}
nt = k; /* How many were actually included */
/* Sort buffer-idxs */
quickSort(buffer_idxs, nt, vertex_value_greater<VertexDataType>);
/* Merge the top with the current top */
if (count == 0) {
/* Nothing to merge, just copy */
memcpy(topbuf, buffer_idxs, ntop * sizeof(vv_t));
} else {
// void merge(ET* S1, int l1, ET* S2, int l2, ET* R, F f) {
merge<vv_t>(topbuf, ntop, buffer_idxs, std::min(ntop, nt), mergearr, vertex_value_greater<VertexDataType>);
memcpy(topbuf, mergearr, ntop * sizeof(vv_t));
}
count++;
st += readwindow;
}
/* Return */
std::vector< vv_t > ret;
for(int i=0; i < ntop; i++) {
ret.push_back(topbuf[i]);
}
free(buffer_idxs);
free(mergearr);
free(topbuf);
delete vertexdata;
delete iomgr;
return ret;
}
};
#endif
| C++ |
//
// readdeg.cpp
// graphchi_xcode
//
// Created by Aapo Kyrola on 9/14/12.
// Copyright 2012 __MyCompanyName__. All rights reserved.
//
#include <iostream>
#include <fstream>
struct degree {
int indegree;
int outdegree;
};
int main(int argc, const char ** argv) {
FILE * f = fopen(argv[1], "r");
int wanted = atoi(argv[2]);
size_t nout = 0;
size_t nin = 0;
size_t nonz = 0;
size_t tot = 0;
degree d;
int j = 0;
while(!feof(f)) {
fread(&d, sizeof(degree), 1, f);
nout += d.outdegree;
nin += d.indegree;
if (wanted == j) {
std::cout << wanted << " indeg: " << d.indegree << " outdeg: " << d.outdegree << std::endl;
break;
}
j++;
}
std::cout << "Total in: " << nin << " total out: " << nout << std::endl;
std::cout << "Non-singleton vertices: " << nonz << std::endl;
std::cout << "Total vertices: " << tot << std::endl;
} | C++ |
#ifndef SYNCHRONIZED_QUEUE_HPP
#define SYNCHRONIZED_QUEUE_HPP
#include <queue>
#include "pthread_tools.hpp"
// From graphlab
namespace graphchi {
template <typename T>
class synchronized_queue {
public:
synchronized_queue() { };
~synchronized_queue() { };
void push(const T &item) {
_queuelock.lock();
_queue.push(item);
_queuelock.unlock();
}
bool safepop(T * ret) {
_queuelock.lock();
if (_queue.size() == 0) {
_queuelock.unlock();
return false;
}
*ret = _queue.front();
_queue.pop();
_queuelock.unlock();
return true;
}
T pop() {
_queuelock.lock();
T t = _queue.front();
_queue.pop();
_queuelock.unlock();
return t;
}
size_t size() const{
return _queue.size();
}
private:
std::queue<T> _queue;
spinlock _queuelock;
};
}
#endif
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* I/O Utils.
*/
#ifndef DEF_IOUTIL_HPP
#define DEF_IOUTIL_HPP
#include <unistd.h>
#include <assert.h>
#include <stdlib.h>
#include <errno.h>
#include <zlib.h>
// Reads given number of bytes to a buffer
template <typename T>
void preada(int f, T * tbuf, size_t nbytes, size_t off) {
size_t nread = 0;
char * buf = (char*)tbuf;
while(nread<nbytes) {
ssize_t a = pread(f, buf, nbytes - nread, off + nread);
if (a == (-1)) {
std::cout << "Error, could not read: " << strerror(errno) << "; file-desc: " << f << std::endl;
std::cout << "Pread arguments: " << f << " tbuf: " << tbuf << " nbytes: " << nbytes << " off: " << off << std::endl;
assert(a != (-1));
}
assert(a>0);
buf += a;
nread += a;
}
assert(nread <= nbytes);
}
template <typename T>
void preada_trunc(int f, T * tbuf, size_t nbytes, size_t off) {
size_t nread = 0;
char * buf = (char*)tbuf;
while(nread<nbytes) {
size_t a = pread(f, buf, nbytes-nread, off+nread);
if (a == 0) {
// set rest to 0
// std::cout << "WARNING: file was not long enough - filled with zeros. " << std::endl;
memset(buf, 0, nbytes-nread);
return;
}
buf += a;
nread += a;
}
}
template <typename T>
size_t readfull(int f, T ** buf) {
off_t sz = lseek(f, 0, SEEK_END);
lseek(f, 0, SEEK_SET);
*buf = (char*)malloc(sz);
preada(f, *buf, sz, 0);
return sz;
}
template <typename T>
void pwritea(int f, T * tbuf, size_t nbytes, size_t off) {
size_t nwritten = 0;
assert(f>0);
char * buf = (char*)tbuf;
while(nwritten<nbytes) {
size_t a = pwrite(f, buf, nbytes-nwritten, off+nwritten);
if (a == size_t(-1)) {
logstream(LOG_ERROR) << "f:" << f << " nbytes: " << nbytes << " written: " << nwritten << " off:" <<
off << " f: " << f << " error:" << strerror(errno) << std::endl;
assert(false);
}
assert(a>0);
buf += a;
nwritten += a;
}
}
template <typename T>
void checkarray_filesize(std::string fname, size_t nelements) {
// Check the vertex file is correct size
int f = open(fname.c_str(), O_RDWR | O_CREAT, S_IROTH | S_IWOTH | S_IWUSR | S_IRUSR);
if (f < 1) {
logstream(LOG_ERROR) << "Error initializing the data-file: " << fname << " error:" << strerror(errno) << std::endl; }
assert(f>0);
int err = ftruncate(f, nelements * sizeof(T));
if (err != 0) {
logstream(LOG_ERROR) << "Error in adjusting file size: " << fname << " to size: " << nelements * sizeof(T)
<< " error:" << strerror(errno) << std::endl;
}
assert(err == 0);
close(f);
}
template <typename T>
void writea(int f, T * tbuf, size_t nbytes) {
size_t nwritten = 0;
char * buf = (char*)tbuf;
while(nwritten<nbytes) {
size_t a = write(f, buf, nbytes-nwritten);
assert(a>0);
if (a == size_t(-1)) {
logstream(LOG_ERROR) << "Could not write " << (nbytes-nwritten) << " bytes!" << " error:" << strerror(errno) << std::endl;
assert(false);
}
buf += a;
nwritten += a;
}
}
/*
* COMPRESSED
*/
template <typename T>
size_t write_compressed(int f, T * tbuf, size_t nbytes) {
#ifndef GRAPHCHI_DISABLE_COMPRESSION
unsigned char * buf = (unsigned char*)tbuf;
int ret;
unsigned have;
z_stream strm;
int CHUNK = (int) std::max((size_t)4096 * 1024, nbytes);
unsigned char * out = (unsigned char *) malloc(CHUNK);
lseek(f, 0, SEEK_SET);
/* allocate deflate state */
strm.zalloc = Z_NULL;
strm.zfree = Z_NULL;
strm.opaque = Z_NULL;
ret = deflateInit(&strm, Z_BEST_SPEED);
if (ret != Z_OK)
assert(false);
/* compress until end of file */
strm.avail_in = (int) nbytes;
strm.next_in = buf;
int trerr = ftruncate(f, 0);
assert (trerr == 0);
size_t totwritten = 0;
/* run deflate() on input until output buffer not full, finish
compression if all of source has been read in */
do {
strm.avail_out = CHUNK;
strm.next_out = out;
ret = deflate(&strm, Z_FINISH); /* no bad return value */
assert(ret != Z_STREAM_ERROR); /* state not clobbered */
have = CHUNK - strm.avail_out;
if (write(f, out, have) != have) {
(void)deflateEnd(&strm);
assert(false);
}
totwritten += have;
} while (strm.avail_out == 0);
assert(strm.avail_in == 0); /* all input will be used */
assert(ret == Z_STREAM_END); /* stream will be complete */
/* clean up and return */
(void)deflateEnd(&strm);
free(out);
return totwritten;
#else
writea(f, tbuf, nbytes);
return nbytes;
#endif
}
/* Zlib-inflated read. Assume tbuf is correctly sized memory block. */
template <typename T>
void read_compressed(int f, T * tbuf, size_t nbytes) {
#ifndef GRAPHCHI_DISABLE_COMPRESSION
unsigned char * buf = (unsigned char*)tbuf;
int ret;
unsigned have;
z_stream strm;
int CHUNK = (int) std::max((size_t)4096 * 1024, nbytes);
size_t fsize = lseek(f, 0, SEEK_END);
unsigned char * in = (unsigned char *) malloc(fsize);
lseek(f, 0, SEEK_SET);
/* allocate inflate state */
strm.zalloc = Z_NULL;
strm.zfree = Z_NULL;
strm.opaque = Z_NULL;
strm.avail_in = 0;
strm.next_in = Z_NULL;
ret = inflateInit(&strm);
if (ret != Z_OK)
assert(false);
/* decompress until deflate stream ends or end of file */
do {
ssize_t a = 0;
do {
a = read(f, in + strm.avail_in, fsize - strm.avail_in); //fread(in, 1, CHUNK, source);
strm.avail_in += (int) a;
assert(a != (ssize_t)(-1));
} while (a > 0);
if (strm.avail_in == 0)
break;
strm.next_in = in;
/* run inflate() on input until output buffer not full */
do {
strm.avail_out = CHUNK;
strm.next_out = buf;
ret = inflate(&strm, Z_NO_FLUSH);
assert(ret != Z_STREAM_ERROR); /* state not clobbered */
switch (ret) {
case Z_NEED_DICT:
ret = Z_DATA_ERROR; /* and fall through */
case Z_DATA_ERROR:
case Z_MEM_ERROR:
assert(false);
}
have = CHUNK - strm.avail_out;
buf += have;
} while (strm.avail_out == 0);
/* done when inflate() says it's done */
} while (ret != Z_STREAM_END);
// std::cout << "Read: " << (buf - (unsigned char*)tbuf) << std::endl;
/* clean up and return */
(void)inflateEnd(&strm);
free(in);
#else
preada(f, tbuf, nbytes, 0);
#endif
}
#endif
| C++ |
#ifndef ATOMIC_HPP
#define ATOMIC_HPP
// Note, stolen from GraphLab.
namespace graphchi {
/**
* \brief atomic object toolkit
*
* A templated class for creating atomic numbers.
*/
template<typename T>
class atomic{
public:
volatile T value;
atomic(const T& value = 0) : value(value) { }
T inc() { return __sync_add_and_fetch(&value, 1); }
T dec() { return __sync_sub_and_fetch(&value, 1); }
//! Lvalue implicit cast
operator T() const { return value; }
//! Performs an atomic increment by 1, returning the new value
T operator++() { return inc(); }
//! Performs an atomic decrement by 1, returning the new value
T operator--() { return dec(); }
//! Performs an atomic increment by 'val', returning the new value
T inc(const T val) { return __sync_add_and_fetch(&value, val); }
//! Performs an atomic decrement by 'val', returning the new value
T dec(const T val) { return __sync_sub_and_fetch(&value, val); }
//! Performs an atomic increment by 'val', returning the new value
T operator+=(const T val) { return inc(val); }
//! Performs an atomic decrement by 'val', returning the new value
T operator-=(const T val) { return dec(val); }
//! Performs an atomic increment by 1, returning the old value
T inc_ret_last() { return __sync_fetch_and_add(&value, 1); }
//! Performs an atomic decrement by 1, returning the old value
T dec_ret_last() { return __sync_fetch_and_sub(&value, 1); }
//! Performs an atomic increment by 1, returning the old value
T operator++(int) { return inc_ret_last(); }
//! Performs an atomic decrement by 1, returning the old value
T operator--(int) { return dec_ret_last(); }
//! Performs an atomic increment by 'val', returning the old value
T inc_ret_last(const T val) { return __sync_fetch_and_add(&value, val); }
//! Performs an atomic decrement by 'val', returning the new value
T dec_ret_last(const T val) { return __sync_fetch_and_sub(&value, val); }
//! Performs an atomic exchange with 'val', returning the previous value
T exchange(const T val) { return __sync_lock_test_and_set(&value, val); }
};
/**
atomic instruction that is equivalent to the following::
if a==oldval, then { \
a = newval; \
return true; \
}
return false;
*/
template<typename T>
bool atomic_compare_and_swap(T& a, const T &oldval, const T &newval) {
return __sync_bool_compare_and_swap(&a, oldval, newval);
};
template <>
inline bool atomic_compare_and_swap(double& a, const double &oldval, const double &newval) {
return __sync_bool_compare_and_swap(reinterpret_cast<uint64_t*>(&a),
*reinterpret_cast<const uint64_t*>(&oldval),
*reinterpret_cast<const uint64_t*>(&newval));
};
template <>
inline bool atomic_compare_and_swap(float& a, const float &oldval, const float &newval) {
return __sync_bool_compare_and_swap(reinterpret_cast<uint32_t*>(&a),
*reinterpret_cast<const uint32_t*>(&oldval),
*reinterpret_cast<const uint32_t*>(&newval));
};
template<typename T>
void atomic_exchange(T& a, T& b) {
b =__sync_lock_test_and_set(&a, b);
};
}
#endif
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Analyses output of label propagation algorithms such as connected components
* and community detection. Memory efficient implementation.
*
* @author Aapo Kyrola
*/
#include <vector>
#include <algorithm>
#include <errno.h>
#include <assert.h>
#include "io/stripedio.hpp"
#include "logger/logger.hpp"
#include "util/merge.hpp"
#include "util/ioutil.hpp"
#include "util/qsort.hpp"
#include "api/chifilenames.hpp"
#include "engine/auxdata/vertex_data.hpp"
#ifndef DEF_GRAPHCHI_LABELANALYSIS
#define DEF_GRAPHCHI_LABELANALYSIS
using namespace graphchi;
template <typename LabelType>
struct labelcount_tt {
LabelType label;
unsigned int count; // Count excludes the vertex which has its own id as the label. (Important optimization)
labelcount_tt(LabelType l, int c) : label(l), count(c) {}
labelcount_tt() {}
};
template <typename LabelType>
bool label_count_greater(const labelcount_tt<LabelType> &a, const labelcount_tt<LabelType> &b) {
return a.count > b.count;
}
template <typename LabelType>
void analyze_labels(std::string basefilename, int printtop = 20) {
typedef labelcount_tt<LabelType> labelcount_t;
/**
* NOTE: this implementation is quite a mouthful. Cleaner implementation
* could be done by using a map implementation. But STL map takes too much
* memory, and I want to avoid Boost dependency - which would have boost::unordered_map.
*/
metrics m("labelanalysis");
stripedio * iomgr = new stripedio(m);
/* Initialize the vertex-data reader */
vid_t readwindow = 1024 * 1024;
vid_t numvertices = (vid_t) get_num_vertices(basefilename);
vertex_data_store<LabelType> * vertexdata =
new vertex_data_store<LabelType>(basefilename, numvertices, iomgr);
std::vector<labelcount_t> curlabels;
bool first = true;
vid_t curvid = 0;
LabelType * buffer = (LabelType*) calloc(readwindow, sizeof(LabelType));
/* Iterate the vertex values and maintain the top-list */
vid_t st = 0;
vid_t en = numvertices - 1;
while(st <= numvertices - 1) {
en = st + readwindow - 1;
if (en >= numvertices - 1) en = numvertices - 1;
/* Load the vertex values */
vertexdata->load(st, en);
int nt = en - st + 1;
/* Mark vertices with its own label with 0xffffffff so they will be ignored */
for(int i=0; i < nt; i++) {
LabelType l = *vertexdata->vertex_data_ptr(i + st);
if (l == curvid) buffer[i] = 0xffffffff;
else buffer[i] = l;
curvid++;
}
/* First sort the buffer */
quickSort(buffer, nt, std::less<LabelType>());
/* Then collect */
std::vector<labelcount_t> newlabels;
newlabels.reserve(nt);
vid_t lastlabel = 0xffffffff;
for(int i=0; i < nt; i++) {
if (buffer[i] != 0xffffffff) {
if (buffer[i] != lastlabel) {
newlabels.push_back(labelcount_t(buffer[i], 1));
} else {
newlabels[newlabels.size() - 1].count ++;
}
lastlabel = buffer[i];
}
}
if (first) {
for(int i=0; i < (int)newlabels.size(); i++) {
curlabels.push_back(newlabels[i]);
}
} else {
/* Merge current and new label counts */
int cl = 0;
int nl = 0;
std::vector< labelcount_t > merged;
merged.reserve(curlabels.size() + newlabels.size());
while(cl < (int)curlabels.size() && nl < (int)newlabels.size()) {
if (newlabels[nl].label == curlabels[cl].label) {
merged.push_back(labelcount_t(newlabels[nl].label, newlabels[nl].count + curlabels[cl].count));
nl++; cl++;
} else {
if (newlabels[nl].label < curlabels[cl].label) {
merged.push_back(newlabels[nl]);
nl++;
} else {
merged.push_back(curlabels[cl]);
cl++;
}
}
}
while(cl < (int)curlabels.size()) merged.push_back(curlabels[cl++]);
while(nl < (int)newlabels.size()) merged.push_back(newlabels[nl++]);
curlabels = merged;
}
first = false;
st += readwindow;
}
/* Sort */
std::sort(curlabels.begin(), curlabels.end(), label_count_greater<LabelType>);
/* Write output file */
std::string outname = basefilename + "_components.txt";
FILE * resf = fopen(outname.c_str(), "w");
if (resf == NULL) {
logstream(LOG_ERROR) << "Could not write label outputfile : " << outname << std::endl;
return;
}
for(int i=0; i < (int) curlabels.size(); i++) {
fprintf(resf, "%u,%u\n", curlabels[i].label, curlabels[i].count + 1);
}
fclose(resf);
std::cout << "Total number of different labels (components/communities): " << curlabels.size() << std::endl;
std::cout << "List of labels was written to file: " << outname << std::endl;
for(int i=0; i < (int)std::min((size_t)printtop, curlabels.size()); i++) {
std::cout << (i+1) << ". label: " << curlabels[i].label << ", size: " << curlabels[i].count << std::endl;
}
free(buffer);
delete vertexdata;
delete iomgr;
}
#endif
| C++ |
// This code is part of the Problem Based Benchmark Suite (PBBS)
// Copyright (c) 2010 Guy Blelloch and Harsha Vardhan Simhadri and the PBBS team
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights (to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#ifndef GRAPHCHI_QSORT_INCLUDED
#define GRAPHCHI_QSORT_INCLUDED
#include <algorithm>
#include <vector>
template <class E, class BinPred>
void insertionSort(E* A, int n, BinPred f) {
for (int i=0; i < n; i++) {
E v = A[i];
E* B = A + i;
while (--B >= A && f(v,*B)) *(B+1) = *B;
*(B+1) = v;
}
}
#define ISORT 25
template <class E, class BinPred>
E median(E a, E b, E c, BinPred f) {
return f(a,b) ? (f(b,c) ? b : (f(a,c) ? c : a))
: (f(a,c) ? a : (f(b,c) ? c : b));
}
// Partly copied from PBBS
template <class E, class BinPred>
void quickSort(E* A, int n, BinPred f) {
if (n < ISORT) insertionSort(A, n, f);
else {
E p = A[rand() % n]; // Random pivot
E* L = A; // below L are less than pivot
E* M = A; // between L and M are equal to pivot
E* R = A+n-1; // above R are greater than pivot
while (1) {
while (!f(p,*M)) {
if (f(*M,p)) std::swap(*M,*(L++));
if (M >= R) break;
M++;
}
while (f(p,*R)) R--;
if (M >= R) break;
std::swap(*M,*R--);
if (f(*M,p)) std::swap(*M,*(L++));
M++;
}
quickSort(A, (int) (L-A), f);
quickSort(M, (int) (A+n-M), f); // Exclude all elts that equal pivot
}
}
#endif
| C++ |
// This code is part of the Problem Based Benchmark Suite (PBBS)
// Copyright (c) 2010 Guy Blelloch and Harsha Vardhan Simhadri and the PBBS team
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights (to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#ifndef DEF_MERGE
#define DEF_MERGE
template <class ET, class F>
void merge(ET* S1, int l1, ET* S2, int l2, ET* R, F f) {
ET* pR = R;
ET* pS1 = S1;
ET* pS2 = S2;
ET* eS1 = S1+l1;
ET* eS2 = S2+l2;
while (true) {
*pR++ = f(*pS2,*pS1) ? *pS2++ : *pS1++;
if (pS1==eS1) {std::copy(pS2,eS2,pR); break;}
if (pS2==eS2) {std::copy(pS1,eS1,pR); break;}
}
}
#endif
| C++ |
#ifndef DEF_PTHREAD_TOOLS_HPP
#define DEF_PTHREAD_TOOLS_HPP
// Stolen from GraphLab
#include <cstdlib>
#include <memory.h>
#include <pthread.h>
#include <semaphore.h>
#include <sched.h>
#include <signal.h>
#include <sys/time.h>
#include <vector>
#include <cassert>
#include <list>
#include <iostream>
#undef _POSIX_SPIN_LOCKS
#define _POSIX_SPIN_LOCKS -1
/**
* \file pthread_tools.hpp A collection of utilities for threading
*/
namespace graphchi {
/**
* \class mutex
*
* Wrapper around pthread's mutex On single core systems mutex
* should be used. On multicore systems, spinlock should be used.
*/
class mutex {
private:
// mutable not actually needed
mutable pthread_mutex_t m_mut;
public:
mutex() {
int error = pthread_mutex_init(&m_mut, NULL);
assert(!error);
}
inline void lock() const {
int error = pthread_mutex_lock( &m_mut );
assert(!error);
}
inline void unlock() const {
int error = pthread_mutex_unlock( &m_mut );
assert(!error);
}
inline bool try_lock() const {
return pthread_mutex_trylock( &m_mut ) == 0;
}
~mutex(){
int error = pthread_mutex_destroy( &m_mut );
if (error)
perror("Error: failed to destroy mutex");
assert(!error);
}
friend class conditional;
}; // End of Mutex
#if _POSIX_SPIN_LOCKS >= 0
// We should change this to use a test for posix_spin_locks eventually
// #ifdef __linux__
/**
* \class spinlock
*
* Wrapper around pthread's spinlock On single core systems mutex
* should be used. On multicore systems, spinlock should be used.
* If pthread_spinlock is not available, the spinlock will be
* typedefed to a mutex
*/
class spinlock {
private:
// mutable not actually needed
mutable pthread_spinlock_t m_spin;
public:
spinlock () {
int error = pthread_spin_init(&m_spin, PTHREAD_PROCESS_PRIVATE);
assert(!error);
}
inline void lock() const {
int error = pthread_spin_lock( &m_spin );
assert(!error);
}
inline void unlock() const {
int error = pthread_spin_unlock( &m_spin );
assert(!error);
}
inline bool try_lock() const {
return pthread_spin_trylock( &m_spin ) == 0;
}
~spinlock(){
int error = pthread_spin_destroy( &m_spin );
assert(!error);
}
friend class conditional;
}; // End of spinlock
#define SPINLOCK_SUPPORTED 1
#else
//! if spinlock not supported, it is typedef it to a mutex.
typedef mutex spinlock;
#define SPINLOCK_SUPPORTED 0
#endif
/**
* \class conditional
* Wrapper around pthread's condition variable
*/
class conditional {
private:
mutable pthread_cond_t m_cond;
public:
conditional() {
int error = pthread_cond_init(&m_cond, NULL);
assert(!error);
}
inline void wait(const mutex& mut) const {
int error = pthread_cond_wait(&m_cond, &mut.m_mut);
assert(!error);
}
inline int timedwait(const mutex& mut, int sec) const {
struct timespec timeout;
struct timeval tv;
struct timezone tz;
gettimeofday(&tv, &tz);
timeout.tv_nsec = 0;
timeout.tv_sec = tv.tv_sec + sec;
return pthread_cond_timedwait(&m_cond, &mut.m_mut, &timeout);
}
inline void signal() const {
int error = pthread_cond_signal(&m_cond);
assert(!error);
}
inline void broadcast() const {
int error = pthread_cond_broadcast(&m_cond);
assert(!error);
}
~conditional() {
int error = pthread_cond_destroy(&m_cond);
assert(!error);
}
}; // End conditional
/**
* \class semaphore
* Wrapper around pthread's semaphore
*/
class semaphore {
private:
mutable sem_t m_sem;
public:
semaphore() {
int error = sem_init(&m_sem, 0,0);
assert(!error);
}
inline void post() const {
int error = sem_post(&m_sem);
assert(!error);
}
inline void wait() const {
int error = sem_wait(&m_sem);
assert(!error);
}
~semaphore() {
int error = sem_destroy(&m_sem);
assert(!error);
}
}; // End semaphore
#define atomic_xadd(P, V) __sync_fetch_and_add((P), (V))
#define cmpxchg(P, O, N) __sync_val_compare_and_swap((P), (O), (N))
#define atomic_inc(P) __sync_add_and_fetch((P), 1)
/**
* \class spinrwlock
* rwlock built around "spinning"
* source adapted from http://locklessinc.com/articles/locks/
* "Scalable Reader-Writer Synchronization for Shared-Memory Multiprocessors"
* John Mellor-Crummey and Michael Scott
*/
class spinrwlock {
union rwticket {
unsigned u;
unsigned short us;
__extension__ struct {
unsigned char write;
unsigned char read;
unsigned char users;
} s;
};
mutable bool writing;
mutable volatile rwticket l;
public:
spinrwlock() {
memset(const_cast<rwticket*>(&l), 0, sizeof(rwticket));
}
inline void writelock() const {
unsigned me = atomic_xadd(&l.u, (1<<16));
unsigned char val = me >> 16;
while (val != l.s.write) sched_yield();
writing = true;
}
inline void wrunlock() const{
rwticket t = *const_cast<rwticket*>(&l);
t.s.write++;
t.s.read++;
*(volatile unsigned short *) (&l) = t.us;
writing = false;
__asm("mfence");
}
inline void readlock() const {
unsigned me = atomic_xadd(&l.u, (1<<16));
unsigned char val = me >> 16;
while (val != l.s.read) sched_yield();
l.s.read++;
}
inline void rdunlock() const {
atomic_inc(&l.s.write);
}
inline void unlock() const {
if (!writing) rdunlock();
else wrunlock();
}
};
#undef atomic_xadd
#undef cmpxchg
#undef atomic_inc
/**
* \class rwlock
* Wrapper around pthread's rwlock
*/
class rwlock {
private:
mutable pthread_rwlock_t m_rwlock;
public:
rwlock() {
int error = pthread_rwlock_init(&m_rwlock, NULL);
assert(!error);
}
~rwlock() {
int error = pthread_rwlock_destroy(&m_rwlock);
assert(!error);
}
inline void readlock() const {
pthread_rwlock_rdlock(&m_rwlock);
//assert(!error);
}
inline void writelock() const {
pthread_rwlock_wrlock(&m_rwlock);
//assert(!error);
}
inline void unlock() const {
pthread_rwlock_unlock(&m_rwlock);
//assert(!error);
}
inline void rdunlock() const {
unlock();
}
inline void wrunlock() const {
unlock();
}
}; // End rwlock
/**
* \class barrier
* Wrapper around pthread's barrier
*/
#ifdef __linux__
/**
* \class barrier
* Wrapper around pthread's barrier
*/
class barrier {
private:
mutable pthread_barrier_t m_barrier;
public:
barrier(size_t numthreads) { pthread_barrier_init(&m_barrier, NULL, numthreads); }
~barrier() { pthread_barrier_destroy(&m_barrier); }
inline void wait() const { pthread_barrier_wait(&m_barrier); }
};
#else
/**
* \class barrier
* Wrapper around pthread's barrier
*/
class barrier {
private:
mutex m;
int needed;
int called;
conditional c;
// we need the following to protect against spurious wakeups
std::vector<unsigned char> waiting;
public:
barrier(size_t numthreads) {
needed = (int)numthreads;
called = 0;
waiting.resize(numthreads);
std::fill(waiting.begin(), waiting.end(), 0);
}
~barrier() {}
inline void wait() {
m.lock();
// set waiting;
size_t myid = called;
waiting[myid] = 1;
called++;
if (called == needed) {
// if I have reached the required limit, wait up. Set waiting
// to 0 to make sure everyone wakes up
called = 0;
// clear all waiting
std::fill(waiting.begin(), waiting.end(), 0);
c.broadcast();
}
else {
// while no one has broadcasted, sleep
while(waiting[myid]) c.wait(m);
}
m.unlock();
}
};
#endif
inline void prefetch_range(void *addr, size_t len) {
char *cp;
char *end = (char*)(addr) + len;
for (cp = (char*)(addr); cp < end; cp += 64) __builtin_prefetch(cp, 0);
}
inline void prefetch_range_write(void *addr, size_t len) {
char *cp;
char *end = (char*)(addr) + len;
for (cp = (char*)(addr); cp < end; cp += 64) __builtin_prefetch(cp, 1);
}
};
#endif
| C++ |
// NOTE, copied from GraphLab v 0.5
#ifndef DENSE_BITSET_HPP
#define DENSE_BITSET_HPP
#include <cstdio>
#include <cstdlib>
#include <stdint.h>
namespace graphchi {
class dense_bitset {
public:
dense_bitset() : array(NULL), len(0) {
generate_bit_masks();
}
dense_bitset(size_t size) : array(NULL), len(size) {
resize(size);
clear();
generate_bit_masks();
}
virtual ~dense_bitset() {free(array);}
void resize(size_t n) {
len = n;
//need len bits
arrlen = n / (8*sizeof(size_t)) + 1;
array = (size_t*)realloc(array, sizeof(size_t) * arrlen);
}
void clear() {
for (size_t i = 0;i < arrlen; ++i) array[i] = 0;
}
void setall() {
memset(array, 0xff, arrlen * sizeof(size_t));
}
inline bool get(uint32_t b) const{
uint32_t arrpos, bitpos;
bit_to_pos(b, arrpos, bitpos);
return array[arrpos] & (size_t(1) << size_t(bitpos));
}
//! Set the bit returning the old value
inline bool set_bit(uint32_t b) {
// use CAS to set the bit
uint32_t arrpos, bitpos;
bit_to_pos(b, arrpos, bitpos);
const size_t mask(size_t(1) << size_t(bitpos));
return __sync_fetch_and_or(array + arrpos, mask) & mask;
}
//! Set the state of the bit returning the old value
inline bool set(uint32_t b, bool value) {
if (value) return set_bit(b);
else return clear_bit(b);
}
//! Clear the bit returning the old value
inline bool clear_bit(uint32_t b) {
// use CAS to set the bit
uint32_t arrpos, bitpos;
bit_to_pos(b, arrpos, bitpos);
const size_t test_mask(size_t(1) << size_t(bitpos));
const size_t clear_mask(~test_mask);
return __sync_fetch_and_and(array + arrpos, clear_mask) & test_mask;
}
inline void clear_bits(uint32_t fromb, uint32_t tob) { // tob is inclusive
// Careful with alignment
const size_t bitsperword = sizeof(size_t)*8;
while((fromb%bitsperword != 0)) {
clear_bit(fromb);
if (fromb>=tob) return;
fromb++;
}
while((tob%bitsperword != 0)) {
clear_bit(tob);
if(tob<=fromb) return;
tob--;
}
clear_bit(tob);
uint32_t from_arrpos = fromb / (8 * (int) sizeof(size_t));
uint32_t to_arrpos = tob / (8 * (int) sizeof(size_t));
memset(&array[from_arrpos], 0, (to_arrpos-from_arrpos) * (int) sizeof(size_t));
}
inline size_t size() const {
return len;
}
private:
inline static void bit_to_pos(uint32_t b, uint32_t &arrpos, uint32_t &bitpos) {
// the compiler better optimize this...
arrpos = b / (8 * (int)sizeof(size_t));
bitpos = b & (8 * (int)sizeof(size_t) - 1);
}
void generate_bit_masks() {
below_selectedbit[0] = size_t(-2);
for (size_t i = 0;i < 8 * sizeof(size_t) ; ++i) {
selectbit[i] = size_t(1) << i;
notselectbit[i] = ~selectbit[i];
if (i > 0) below_selectedbit[i] = below_selectedbit[i-1] - selectbit[i];
}
}
// returns 0 on failure
inline size_t next_bit_in_block(const uint32_t &b, const size_t &block) {
// use CAS to set the bit
size_t x = block & below_selectedbit[b] ;
if (x == 0) return 0;
else return __builtin_ctzl(x);
}
// returns 0 on failure
inline size_t first_bit_in_block(const size_t &block) {
// use CAS to set the bit
if (block == 0) return 0;
else return __builtin_ctzl(block);
}
size_t* array;
size_t len;
size_t arrlen;
// selectbit[i] has a bit in the i'th position
size_t selectbit[8 * sizeof(size_t)];
size_t notselectbit[8 * sizeof(size_t)];
size_t below_selectedbit[8 * sizeof(size_t)];
};
}
#endif
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Parses a simple configuration file.
* Why did I write my own?
*/
#ifndef GRAPHCHI_CONFIGFILE_DEF
#define GRAPHCHI_CONFIGFILE_DEF
#include <iostream>
#include <cstdio>
#include <string>
#include <map>
#include <assert.h>
namespace graphchi {
// Code for trimming strings copied from + modified
// http://stackoverflow.com/questions/479080/trim-is-not-part-of-the-standard-c-c-library
const std::string whiteSpaces( " \f\n\r\t\v" );
static void trimRight( std::string &str,
const std::string& trimChars )
{
std::string::size_type pos = str.find_last_not_of( trimChars );
str.erase( pos + 1 );
}
static void trimLeft( std::string &str,
const std::string& trimChars )
{
std::string::size_type pos = str.find_first_not_of( trimChars );
str.erase( 0, pos );
}
static std::string trim( std::string str)
{
std::string trimChars = " \f\n\r\t\v";
trimRight( str, trimChars );
trimLeft( str, trimChars );
return str;
}
// Removes \n from the end of line
static void _FIXLINE(char * s) {
int len = (int)strlen(s)-1;
if(s[len] == '\n') s[len] = 0;
}
/**
* Returns a key-value map of a configuration file key-values.
* If file is not found, fails with an assertion.
* @param filename filename of the configuration file
* @param secondary_filename secondary filename if the first version is not found.
*/
static std::map<std::string, std::string> loadconfig(std::string filename, std::string secondary_filename) {
FILE * f = fopen(filename.c_str(), "r");
if (f == NULL) {
f = fopen(secondary_filename.c_str(), "r");
if (f == NULL) {
std::cout << "ERROR: Could not read configuration file: " << filename << std::endl;
std::cout << "Please define environment variable GRAPHCHI_ROOT or run the program from that directory." << std::endl;
}
assert(f != NULL);
}
char s[4096];
std::map<std::string, std::string> conf;
// I like C parsing more than C++, that is why this is such a mess
while(fgets(s, 4096, f) != NULL) {
_FIXLINE(s);
if (s[0] == '#') continue; // Comment
if (s[0] == '%') continue; // Comment
char delims[] = "=";
char * t;
t = strtok(s, delims);
const char * ckey = t;
t = strtok(NULL, delims);
const char * cval = t;
if (ckey != NULL && cval != NULL) {
std::string key = trim(std::string(ckey));
std::string val = trim(std::string(cval));
conf[key] = val;
}
}
return conf;
}
};
#endif
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* I/O manager.
*/
#ifndef DEF_STRIPEDIO_HPP
#define DEF_STRIPEDIO_HPP
#include <iostream>
#include <fcntl.h>
#include <unistd.h>
#include <assert.h>
#include <stdint.h>
#include <pthread.h>
#include <errno.h>
//#include <omp.h>
#include <vector>
#include "logger/logger.hpp"
#include "metrics/metrics.hpp"
#include "util/synchronized_queue.hpp"
#include "util/ioutil.hpp"
#include "util/cmdopts.hpp"
namespace graphchi {
static size_t get_filesize(std::string filename);
struct pinned_file;
/**
* Defines a striped file access.
*/
struct io_descriptor {
std::string filename;
std::vector<int> readdescs;
std::vector<int> writedescs;
pinned_file * pinned_to_memory;
int start_mplex;
bool open;
bool compressed;
};
enum BLOCK_ACTION { READ, WRITE };
// Very simple ref count system
struct refcountptr {
char * ptr;
volatile int count;
refcountptr(char * ptr, int count) : ptr(ptr), count(count) {}
};
// Forward declaration
class stripedio;
struct iotask {
BLOCK_ACTION action;
int fd;
int session;
refcountptr * ptr;
size_t length;
size_t offset;
size_t ptroffset;
bool free_after;
stripedio * iomgr;
bool compressed;
bool closefd;
volatile int * doneptr;
iotask() : action(READ), fd(0), session(0), ptr(NULL), length(0), offset(0), ptroffset(0), free_after(false), iomgr(NULL), compressed(false), closefd(false), doneptr(NULL) {}
iotask(stripedio * iomgr, BLOCK_ACTION act, int fd, int session, refcountptr * ptr, size_t length, size_t offset, size_t ptroffset, bool free_after, bool compressed, bool closefd=false) :
action(act), fd(fd), session(session), ptr(ptr),length(length), offset(offset), ptroffset(ptroffset), free_after(free_after), iomgr(iomgr),compressed(compressed), closefd(closefd) {
if (closefd) assert(free_after);
doneptr = NULL;
}
};
struct thrinfo {
synchronized_queue<iotask> * readqueue;
synchronized_queue<iotask> * commitqueue;
synchronized_queue<iotask> * prioqueue;
bool running;
metrics * m;
volatile int pending_writes;
volatile int pending_reads;
int mplex;
};
// Forward declaration
static void * io_thread_loop(void * _info);
struct stripe_chunk {
int mplex_thread;
size_t offset;
size_t len;
stripe_chunk(int mplex_thread, size_t offset, size_t len) : mplex_thread(mplex_thread), offset(offset), len(len) {}
};
struct streaming_task {
stripedio * iomgr;
int session;
size_t len;
volatile size_t curpos;
char ** buf;
streaming_task() {}
streaming_task(stripedio * iomgr, int session, size_t len, char ** buf) : iomgr(iomgr), session(session), len(len), curpos(0), buf(buf) {}
};
struct pinned_file {
std::string filename;
size_t length;
uint8_t * data;
bool touched;
};
// Forward declaration
static void * stream_read_loop(void * _info);
class stripedio {
std::vector<io_descriptor *> sessions;
mutex mlock;
int stripesize;
int multiplex;
std::string multiplex_root;
bool disable_preloading;
std::vector< synchronized_queue<iotask> > mplex_readtasks;
std::vector< synchronized_queue<iotask> > mplex_writetasks;
std::vector< synchronized_queue<iotask> > mplex_priotasks;
std::vector< pthread_t > threads;
std::vector< thrinfo * > thread_infos;
metrics &m;
/* Memory-pinned files */
std::vector<pinned_file *> preloaded_files;
mutex preload_lock;
size_t preloaded_bytes;
size_t max_preload_bytes;
int niothreads; // threads per mplex
public:
stripedio( metrics &_m) : m(_m) {
disable_preloading = false;
stripesize = get_option_int("io.stripesize", 4096 * 1024 / 2);
preloaded_bytes = 0;
max_preload_bytes = 1024 * 1024 * get_option_long("preload.max_megabytes", 0);
if (max_preload_bytes > 0) {
logstream(LOG_INFO) << "Preloading maximum " << max_preload_bytes << " bytes." << std::endl;
}
multiplex = get_option_int("multiplex", 1);
if (multiplex>1) {
multiplex_root = get_option_string("multiplex_root", "<not-set>");
} else {
multiplex_root = "";
stripesize = 1024*1024*1024;
}
m.set("stripesize", (size_t)stripesize);
// Start threads (niothreads is now threads per multiplex)
niothreads = get_option_int("niothreads", 1);
m.set("niothreads", (size_t)niothreads);
logstream(LOG_DEBUG) << "Start io-manager with " << niothreads << " threads." << std::endl;
// Each multiplex partition has its own queues
for(int i=0; i < multiplex * niothreads; i++) {
mplex_readtasks.push_back(synchronized_queue<iotask>());
mplex_writetasks.push_back(synchronized_queue<iotask>());
mplex_priotasks.push_back(synchronized_queue<iotask>());
}
int k = 0;
for(int i=0; i < multiplex; i++) {
for(int j=0; j < niothreads; j++) {
thrinfo * cthreadinfo = new thrinfo();
cthreadinfo->commitqueue = &mplex_writetasks[k];
cthreadinfo->readqueue = &mplex_readtasks[k];
cthreadinfo->prioqueue = &mplex_priotasks[k];
cthreadinfo->running = true;
cthreadinfo->pending_writes = 0;
cthreadinfo->pending_reads = 0;
cthreadinfo->mplex = i;
cthreadinfo->m = &m;
thread_infos.push_back(cthreadinfo);
pthread_t iothread;
int ret = pthread_create(&iothread, NULL, io_thread_loop, cthreadinfo);
threads.push_back(iothread);
assert(ret>=0);
k++;
}
}
}
~stripedio() {
int mplex = (int) thread_infos.size();
// Quit all threads
for(int i=0; i<mplex; i++) {
thread_infos[i]->running=false;
}
size_t nthreads = threads.size();
for(unsigned int i=0; i<nthreads; i++) {
pthread_join(threads[i], NULL);
}
for(int i=0; i<mplex; i++) {
delete thread_infos[i];
}
for(int j=0; j<(int)sessions.size(); j++) {
if (sessions[j] != NULL) {
close_session(j);
delete sessions[j];
sessions[j] = NULL;
}
}
for(std::vector<pinned_file *>::iterator it=preloaded_files.begin();
it != preloaded_files.end(); ++it) {
pinned_file * preloaded = (*it);
delete preloaded->data;
delete preloaded;
}
}
void set_disable_preloading(bool b) {
disable_preloading = b;
if (b) logstream(LOG_INFO) << "Disabled preloading." << std::endl;
}
bool multiplexed() {
return multiplex>1;
}
void print_session(int session) {
for(int i=0; i<multiplex; i++) {
std::cout << "multiplex: " << multiplex << std::endl;
std::cout << "Read desc: " << sessions[session]->readdescs[i] << std::endl;
}
for(int i=0; i<(int)sessions[session]->writedescs.size(); i++) {
std::cout << "multiplex: " << multiplex << std::endl;
std::cout << "Read desc: " << sessions[session]->writedescs[i] << std::endl;
}
}
// Compute a hash for filename which is used for
// permuting the stripes. It is important the permutation
// is same regardless of when the file is opened.
int hash(std::string filename) {
const char * cstr = filename.c_str();
int hash = 1;
int l = (int) strlen(cstr);
for(int i=0; i<l; i++) {
hash = 31*hash + cstr[i];
}
return std::abs(hash);
}
int open_session(std::string filename, bool readonly=false, bool compressed=false) {
mlock.lock();
// FIXME: known memory leak: sessions table is never shrunk
int session_id = (int) sessions.size();
io_descriptor * iodesc = new io_descriptor();
iodesc->open = true;
iodesc->compressed = compressed;
iodesc->pinned_to_memory = is_preloaded(filename);
iodesc->start_mplex = hash(filename) % multiplex;
sessions.push_back(iodesc);
mlock.unlock();
if (NULL != iodesc->pinned_to_memory) {
logstream(LOG_INFO) << "Opened preloaded session: " << filename << std::endl;
return session_id;
}
for(int i=0; i<multiplex; i++) {
std::string fname = multiplexprefix(i) + filename;
for(int j=0; j<niothreads+(multiplex == 1 ? 1 : 0); j++) { // Hack to have one fd for synchronous
int rddesc = open(fname.c_str(), (readonly ? O_RDONLY : O_RDWR));
if (rddesc < 0) logstream(LOG_ERROR) << "Could not open: " << fname << " session: " << session_id
<< " error: " << strerror(errno) << std::endl; assert(rddesc>=0);
iodesc->readdescs.push_back(rddesc);
#ifdef F_NOCACHE
if (!readonly)
fcntl(rddesc, F_NOCACHE, 1);
#endif
if (!readonly) {
int wrdesc = rddesc; // Change by Aapo: Aug 11, 2012. I don't think we need separate wrdesc?
if (wrdesc < 0) logstream(LOG_ERROR) << "Could not open for writing: " << fname << " session: " << session_id
<< " error: " << strerror(errno) << std::endl;
assert(wrdesc>=0);
#ifdef F_NOCACHE
fcntl(wrdesc, F_NOCACHE, 1);
#endif
iodesc->writedescs.push_back(wrdesc);
}
}
}
iodesc->filename = filename;
if (iodesc->writedescs.size() > 0) {
// logstream(LOG_INFO) << "Opened write-session: " << session_id << "(" << iodesc->writedescs[0] << ") for " << filename << std::endl;
} else {
// logstream(LOG_INFO) << "Opened read-session: " << session_id << "(" << iodesc->readdescs[0] << ") for " << filename << std::endl;
}
return session_id;
}
void close_session(int session) {
mlock.lock();
// Note: currently io-descriptors are left into the vertex array
// in purpose to make managed memory work. Should be fixed as this is
// a (relatively minor) memory leak.
bool wasopen;
io_descriptor * iodesc = sessions[session];
wasopen = iodesc->open;
iodesc->open = false;
mlock.unlock();
if (wasopen) {
// std::cout << "Closing: " << iodesc->filename << " " << iodesc->readdescs[0] << std::endl;
for(std::vector<int>::iterator it=iodesc->readdescs.begin(); it!=iodesc->readdescs.end(); ++it) {
close(*it);
}
// for(std::vector<int>::iterator it=iodesc->writedescs.begin(); it!=iodesc->writedescs.end(); ++it) {
// close(*it);
// }
}
}
int mplex_for_offset(int session, size_t off) {
return ((int) (off / stripesize) + sessions[session]->start_mplex) % multiplex;
}
// Returns vector of <mplex, offset>
std::vector< stripe_chunk > stripe_offsets(int session, size_t nbytes, size_t off) {
size_t end = off+nbytes;
size_t idx = off;
size_t bufoff = 0;
std::vector<stripe_chunk> stripelist;
while(idx<end) {
size_t blockoff = idx % stripesize;
size_t blocklen = std::min(stripesize-blockoff, end-idx);
int mplex_thread = (int) mplex_for_offset(session, idx) * niothreads + (int) (random() % niothreads);
stripelist.push_back(stripe_chunk(mplex_thread, bufoff, blocklen));
bufoff += blocklen;
idx += blocklen;
}
return stripelist;
}
template <typename T>
void preada_async(int session, T * tbuf, size_t nbytes, size_t off, volatile int * doneptr = NULL) {
std::vector<stripe_chunk> stripelist = stripe_offsets(session, nbytes, off);
if (compressed_session(session)) {
assert(stripelist.size() == 1);
assert(off == 0);
}
refcountptr * refptr = new refcountptr((char*)tbuf, (int)stripelist.size());
for(int i=0; i<(int)stripelist.size(); i++) {
stripe_chunk chunk = stripelist[i];
__sync_add_and_fetch(&thread_infos[chunk.mplex_thread]->pending_reads, 1);
iotask task = iotask(this, READ, sessions[session]->readdescs[chunk.mplex_thread],
session,
refptr, chunk.len, chunk.offset+off, chunk.offset, false,
compressed_session(session));
task.doneptr = doneptr;
mplex_readtasks[chunk.mplex_thread].push(task);
}
}
/* Used for pipelined read */
void launch_stream_reader(streaming_task * task) {
pthread_t t;
int ret = pthread_create(&t, NULL, stream_read_loop, (void*)task);
assert(ret>=0);
}
/**
* Pinned sessions process files that are permanently
* pinned to memory.
*/
bool pinned_session(int session) {
return sessions[session]->pinned_to_memory;
}
bool compressed_session(int session) {
return sessions[session]->compressed;
}
/**
* Call to allow files to be preloaded. Note: using this requires
* that all files are accessed with same path. This is true if
* standard chifilenames.hpp -given filenames are used.
*/
void allow_preloading(std::string filename) {
if (disable_preloading) {
return;
}
preload_lock.lock();
assert(max_preload_bytes == 0);
/* size_t filesize = get_filesize(filename);
if (preloaded_bytes + filesize <= max_preload_bytes) {
preloaded_bytes += filesize;
m.set("preload_bytes", preloaded_bytes);
pinned_file * pfile = new pinned_file();
pfile->filename = filename;
pfile->length = filesize;
pfile->data = (uint8_t*) malloc(filesize);
pfile->touched = false;
assert(pfile->data != NULL);
int fid = open(filename.c_str(), O_RDONLY);
if (fid < 0) {
logstream(LOG_ERROR) << "Could not read file: " << filename
<< " error: " << strerror(errno) << std::endl;
}
assert(fid >= 0);
logstream(LOG_INFO) << "Preloading: " << filename << std::endl;
preada(fid, pfile->data, filesize, 0);
close(fid);
preloaded_files.push_back(pfile);
}*/
preload_lock.unlock();
}
void commit_preloaded() {
for(std::vector<pinned_file *>::iterator it=preloaded_files.begin();
it != preloaded_files.end(); ++it) {
pinned_file * preloaded = (*it);
if (preloaded->touched) {
logstream(LOG_INFO) << "Commit preloaded file: " << preloaded->filename << std::endl;
int fid = open(preloaded->filename.c_str(), O_WRONLY);
if (fid < 0) {
logstream(LOG_ERROR) << "Could not read file: " << preloaded->filename
<< " error: " << strerror(errno) << std::endl;
continue;
}
pwritea(fid, preloaded->data, preloaded->length, 0);
close(fid);
}
preloaded->touched = false;
}
}
pinned_file * is_preloaded(std::string filename) {
preload_lock.lock();
pinned_file * preloaded = NULL;
for(std::vector<pinned_file *>::iterator it=preloaded_files.begin();
it != preloaded_files.end(); ++it) {
if (filename == (*it)->filename) {
preloaded = *it;
break;
}
}
preload_lock.unlock();
return preloaded;
}
// Note: data is freed after write!
template <typename T>
void pwritea_async(int session, T * tbuf, size_t nbytes, size_t off, bool free_after, bool close_fd=false) {
std::vector<stripe_chunk> stripelist = stripe_offsets(session, nbytes, off);
refcountptr * refptr = new refcountptr((char*)tbuf, (int) stripelist.size());
if (compressed_session(session)) {
assert(stripelist.size() == 1);
assert(off == 0);
}
for(int i=0; i<(int)stripelist.size(); i++) {
stripe_chunk chunk = stripelist[i];
__sync_add_and_fetch(&thread_infos[chunk.mplex_thread]->pending_writes, 1);
mplex_writetasks[chunk.mplex_thread].push(iotask(this, WRITE, sessions[session]->writedescs[chunk.mplex_thread], session,
refptr, chunk.len, chunk.offset+off, chunk.offset, free_after, compressed_session(session),
close_fd));
}
}
template <typename T>
void preada_now(int session, T * tbuf, size_t nbytes, size_t off) {
metrics_entry me = m.start_time();
if (compressed_session(session)) {
// Compressed sessions do not support multiplexing for now
assert(off == 0);
read_compressed(sessions[session]->readdescs[0], tbuf, nbytes);
m.stop_time(me, "preada_now", false);
return;
}
if (multiplex > 1) {
std::vector<stripe_chunk> stripelist = stripe_offsets(session, nbytes, off);
size_t checklen=0;
refcountptr * refptr = new refcountptr((char*)tbuf, (int) stripelist.size());
refptr->count++; // Take a reference so we can spin on it
for(int i=0; i < (int)stripelist.size(); i++) {
stripe_chunk chunk = stripelist[i];
__sync_add_and_fetch(&thread_infos[chunk.mplex_thread]->pending_reads, 1);
// Use prioritized task queue
mplex_priotasks[chunk.mplex_thread].push(iotask(this, READ, sessions[session]->readdescs[chunk.mplex_thread], session,
refptr, chunk.len, chunk.offset+off, chunk.offset, false,
false));
checklen += chunk.len;
}
assert(checklen == nbytes);
// Spin
while(refptr->count>1) {
usleep(5000);
}
delete refptr;
} else {
preada(sessions[session]->readdescs[threads.size()], tbuf, nbytes, off);
}
m.stop_time(me, "preada_now", false);
}
template <typename T>
void pwritea_now(int session, T * tbuf, size_t nbytes, size_t off) {
metrics_entry me = m.start_time();
if (compressed_session(session)) {
// Compressed sessions do not support multiplexing for now
assert(off == 0);
write_compressed(sessions[session]->writedescs[0], tbuf, nbytes);
m.stop_time(me, "pwritea_now", false);
return;
}
std::vector<stripe_chunk> stripelist = stripe_offsets(session, nbytes, off);
size_t checklen=0;
for(int i=0; i<(int)stripelist.size(); i++) {
stripe_chunk chunk = stripelist[i];
pwritea(sessions[session]->writedescs[chunk.mplex_thread], (char*)tbuf+chunk.offset, chunk.len, chunk.offset+off);
checklen += chunk.len;
}
assert(checklen == nbytes);
m.stop_time(me, "pwritea_now", false);
}
/**
* Memory managed versino of the I/O functions.
*/
template <typename T>
void managed_pwritea_async(int session, T ** tbuf, size_t nbytes, size_t off, bool free_after, bool close_fd=false) {
if (!pinned_session(session)) {
pwritea_async(session, *tbuf, nbytes, off, free_after, close_fd);
} else {
// Do nothing but mark the descriptor as 'dirty'
sessions[session]->pinned_to_memory->touched = true;
}
}
template <typename T>
void managed_preada_now(int session, T ** tbuf, size_t nbytes, size_t off) {
if (!pinned_session(session)) {
preada_now(session, *tbuf, nbytes, off);
} else {
io_descriptor * iodesc = sessions[session];
*tbuf = (T*) (iodesc->pinned_to_memory->data + off);
}
}
template <typename T>
void managed_pwritea_now(int session, T ** tbuf, size_t nbytes, size_t off) {
if (!pinned_session(session)) {
pwritea_now(session, *tbuf, nbytes, off);
} else {
// Do nothing but mark the descriptor as 'dirty'
sessions[session]->pinned_to_memory->touched = true;
}
}
template<typename T>
void managed_malloc(int session, T ** tbuf, size_t nbytes, size_t noff) {
if (!pinned_session(session)) {
*tbuf = (T*) malloc(nbytes);
} else {
io_descriptor * iodesc = sessions[session];
*tbuf = (T*) (iodesc->pinned_to_memory->data + noff);
}
}
/**
* @param doneptr is decremented to zero when task is ready
*/
template <typename T>
void managed_preada_async(int session, T ** tbuf, size_t nbytes, size_t off, volatile int * doneptr = NULL) {
if (!pinned_session(session)) {
preada_async(session, *tbuf, nbytes, off, doneptr);
} else {
io_descriptor * iodesc = sessions[session];
*tbuf = (T*) (iodesc->pinned_to_memory->data + off);
if (doneptr != NULL) {
__sync_sub_and_fetch(doneptr, 1);
}
}
}
template <typename T>
void managed_release(int session, T ** ptr) {
if (!pinned_session(session)) {
assert(*ptr != NULL);
free(*ptr);
}
*ptr = NULL;
}
void truncate(int session, size_t nbytes) {
assert(!pinned_session(session));
assert(multiplex <= 1); // We do not support truncating on multiplex yet
int stat = ftruncate(sessions[session]->writedescs[0], nbytes);
if (stat != 0) {
logstream(LOG_ERROR) << "Could not truncate " << sessions[session]->filename <<
" error: " << strerror(errno) << std::endl;
assert(false);
}
}
void wait_for_reads() {
metrics_entry me = m.start_time();
int loops = 0;
int mplex = (int) thread_infos.size();
for(int i=0; i<mplex; i++) {
while(thread_infos[i]->pending_reads > 0) {
usleep(10000);
loops++;
}
}
m.stop_time(me, "stripedio_wait_for_reads", false);
}
void wait_for_writes() {
metrics_entry me = m.start_time();
int mplex = (int) thread_infos.size();
for(int i=0; i<mplex; i++) {
while(thread_infos[i]->pending_writes>0) {
usleep(10000);
}
}
m.stop_time(me, "stripedio_wait_for_writes", false);
}
std::string multiplexprefix(int stripe) {
if (multiplex > 1) {
char mstr[255];
sprintf(mstr, "%d/", 1+stripe%multiplex);
return multiplex_root + std::string(mstr);
} else return "";
}
std::string multiplexprefix_random() {
return multiplexprefix((int)random() % multiplex);
}
};
static void * io_thread_loop(void * _info) {
iotask task;
thrinfo * info = (thrinfo*)_info;
int ntasks = 0;
// logstream(LOG_INFO) << "Thread for multiplex :" << info->mplex << " starting." << std::endl;
while(info->running) {
bool success;
if (info->pending_reads>0) { // Prioritize read queue
success = info->prioqueue->safepop(&task);
if (!success) {
success = info->readqueue->safepop(&task);
}
} else {
success = info->commitqueue->safepop(&task);
}
if (success) {
++ntasks;
if (task.action == WRITE) { // Write
metrics_entry me = info->m->start_time();
if (task.compressed) {
assert(task.offset == 0);
write_compressed(task.fd, task.ptr->ptr, task.length);
} else {
pwritea(task.fd, task.ptr->ptr + task.ptroffset, task.length, task.offset);
}
if (task.free_after) {
// Threead-safe method of memory managment - ugly!
if (__sync_sub_and_fetch(&task.ptr->count, 1) == 0) {
free(task.ptr->ptr);
delete task.ptr;
if (task.closefd) {
task.iomgr->close_session(task.session);
}
}
}
__sync_sub_and_fetch(&info->pending_writes, 1);
info->m->stop_time(me, "commit_thr");
} else {
if (task.compressed) {
assert(task.offset == 0);
read_compressed(task.fd, task.ptr->ptr, task.length);
} else {
preada(task.fd, task.ptr->ptr+task.ptroffset, task.length, task.offset);
}
__sync_sub_and_fetch(&info->pending_reads, 1);
if (__sync_sub_and_fetch(&task.ptr->count, 1) == 0) {
free(task.ptr);
if (task.closefd) {
task.iomgr->close_session(task.session);
}
}
}
if (task.doneptr != NULL) {
__sync_sub_and_fetch(task.doneptr, 1);
}
} else {
usleep(50000); // 50 ms
}
}
// logstream(LOG_INFO) << "I/O thread exists. Handled " << ntasks << " i/o tasks." << std::endl;
return NULL;
}
static void * stream_read_loop(void * _info) {
streaming_task * task = (streaming_task*)_info;
timeval start, end;
gettimeofday(&start, NULL);
size_t bufsize = 32*1024*1024; // 32 megs
char * tbuf;
/**
* If this is not pinned, we just malloc the
* buffer. Otherwise - shuold just return pointer
* to the in-memory file buffer.
*/
if (task->iomgr->pinned_session(task->session)) {
__sync_add_and_fetch(&task->curpos, task->len);
return NULL;
}
tbuf = *task->buf;
while(task->curpos < task->len) {
size_t toread = std::min((size_t)task->len - (size_t)task->curpos, (size_t)bufsize);
task->iomgr->preada_now(task->session, tbuf + task->curpos, toread, task->curpos);
__sync_add_and_fetch(&task->curpos, toread);
}
gettimeofday(&end, NULL);
return NULL;
}
static size_t get_filesize(std::string filename) {
std::string fname = filename;
int f = open(fname.c_str(), O_RDONLY);
if (f < 0) {
logstream(LOG_ERROR) << "Could not open file " << filename << " error: " << strerror(errno) << std::endl;
assert(false);
}
off_t sz = lseek(f, 0, SEEK_END);
close(f);
return sz;
}
}
#endif
| C++ |
/**
* Simple tool for creating input for streaming graph demos.
* An edgelist is read and two files are created: base-graph and
* streaming input file. Streaming input is shuffled.
* NOTE: This is unsupported code and requires plenty of memory.
*/
#include <vector>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <assert.h>
#include <string.h>
#include <string>
#include <errno.h>
#include <algorithm>
#include <iterator>
struct edge {
unsigned int from;
unsigned int to;
};
// Removes \n from the end of line
void FIXLINE(char * s) {
int len = (int) strlen(s)-1;
if(s[len] == '\n') s[len] = 0;
}
int main(int argc, const char ** argv) {
if (argc != 3) {
std::cout << "Usage: [inputfile-edgelist] [stream-edges-per-base-edges] [max-base-id]" << std::endl;
}
const char * input = argv[1];
int stream_edges_per_base_edges = atoi(argv[2]);
int maxbaseid = atoi(argv[3]);
std::cout << "Processing: " << input << std::endl;
FILE * inf = fopen(input, "r");
std::vector<edge> base_edges;
std::vector<edge> stream_edges;
base_edges.reserve(1e6);
stream_edges.reserve(1e6);
if (inf == NULL) {
std::cout << "Could not load :" << input << " error: " << strerror(errno) << std::endl;
}
assert(inf != NULL);
std::cout << "Reading in edge list format!" << std::endl;
char s[1024];
while(fgets(s, 1024, inf) != NULL) {
FIXLINE(s);
if (s[0] == '#') continue; // Comment
if (s[0] == '%') continue; // Comment
char delims[] = "\t ";
char * t;
t = strtok(s, delims);
edge e;
e.from = atoi(t);
t = strtok(NULL, delims);
e.to = atoi(t);
if (std::rand() % stream_edges_per_base_edges == 0 && e.from <= maxbaseid && e.to <= maxbaseid) base_edges.push_back(e);
else stream_edges.push_back(e);
}
fclose(inf);
std::cout << "Number of edges in base: " << base_edges.size() << std::endl;
std::cout << "Number of edges to stream: " << stream_edges.size() << std::endl;
std::string base_file_name = std::string(input) + "_base";
std::string stream_file_name = std::string(input) + "_stream";
FILE * basef = fopen(base_file_name.c_str(), "w");
for(std::vector<edge>::iterator it=base_edges.begin();
it != base_edges.end(); ++it) {
fprintf(basef, "%u %u\n", it->from, it->to);
}
fclose(basef);
/* Shuffle */
std::random_shuffle(stream_edges.begin(), stream_edges.end());
FILE * strmf = fopen(stream_file_name.c_str(), "w");
for(std::vector<edge>::iterator it=stream_edges.begin();
it != stream_edges.end(); ++it) {
fprintf(strmf, "%u %u\n", it->from, it->to);
}
fclose(strmf);
return 0;
}
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Template for GraphChi applications. To create a new application, duplicate
* this template.
*/
#include <string>
#include "graphchi_basic_includes.hpp"
using namespace graphchi;
/**
* Type definitions. Remember to create suitable graph shards using the
* Sharder-program.
*/
typedef my_vertex_type VertexDataType;
typedef my_edge_type EdgeDataType;
/**
* GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type>
* class. The main logic is usually in the update function.
*/
struct MyGraphChiProgram : public GraphChiProgram<VertexDataType, EdgeDataType> {
/**
* Vertex update function.
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) {
if (ginfo.iteration == 0) {
/* On first iteration, initialize vertex (and its edges). This is usually required, because
on each run, GraphChi will modify the data files. To start from scratch, it is easiest
do initialize the program in code. Alternatively, you can keep a copy of initial data files. */
// vertex.set_data(init_value);
} else {
/* Do computation */
/* Loop over in-edges (example) */
for(int i=0; i < vertex.num_inedges(); i++) {
// Do something
// value += vertex.inedge(i).get_data();
}
/* Loop over out-edges (example) */
for(int i=0; i < vertex.num_outedges(); i++) {
// Do something
// vertex.outedge(i).set_data(x)
}
/* Loop over all edges (ignore direction) */
for(int i=0; i < vertex.num_edges(); i++) {
// vertex.edge(i).get_data()
}
// v.set_data(new_value);
}
}
/**
* Called before an iteration starts.
*/
void before_iteration(int iteration, graphchi_context &gcontext) {
}
/**
* Called after an iteration has finished.
*/
void after_iteration(int iteration, graphchi_context &gcontext) {
}
/**
* Called before an execution interval is started.
*/
void before_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) {
}
/**
* Called after an execution interval has finished.
*/
void after_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) {
}
};
int main(int argc, const char ** argv) {
/* GraphChi initialization will read the command line
arguments and the configuration file. */
graphchi_init(argc, argv);
/* Metrics object for keeping track of performance counters
and other information. Currently required. */
metrics m("my-application-name");
/* Basic arguments for application */
std::string filename = get_option_string("file"); // Base filename
int niters = get_option_int("niters", 4); // Number of iterations
bool scheduler = get_option_int("scheduler", 0); // Whether to use selective scheduling
/* Detect the number of shards or preprocess an input to create them */
int nshards = convert_if_notexists<EdgeDataType>(filename,
get_option_string("nshards", "auto"));
/* Run */
MyGraphChiProgram program;
graphchi_engine<VertexDataType, EdgeDataType> engine(filename, nshards, scheduler, m);
engine.run(program, niters);
/* Report execution metrics */
metrics_report(m);
return 0;
}
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* "Functional" version of pagerank, which is quite a bit more efficient, because
* it does not construct the vertex-objects but directly processes the edges.
*
* This program can be run either in the semi-synchronous mode (faster, but
* less clearly defined semantics), or synchronously. Synchronous version needs
* double the amount of I/O because it needs to store both previous and
* current values. Use command line parameter mode with semisync or sync.
*/
#define RANDOMRESETPROB 0.15
#define GRAPHCHI_DISABLE_COMPRESSION
#include <string>
#include <fstream>
#include <cmath>
#include "graphchi_basic_includes.hpp"
#include "api/functional/functional_api.hpp"
#include "graphchi_basic_includes.hpp"
#include "util/toplist.hpp"
using namespace graphchi;
struct pagerank_kernel : public functional_kernel<float, float> {
/* Initial value - on first iteration */
float initial_value(graphchi_context &info, vertex_info& myvertex) {
return 1.0;
}
/* Called before first "gather" */
float reset() {
return 0.0;
}
// Note: Unweighted version, edge value should also be passed
// "Gather"
float op_neighborval(graphchi_context &info, vertex_info& myvertex, vid_t nbid, float nbval) {
return nbval;
}
// "Sum"
float plus(float curval, float toadd) {
return curval + toadd;
}
// "Apply"
float compute_vertexvalue(graphchi_context &ginfo, vertex_info& myvertex, float nbvalsum) {
assert(ginfo.nvertices > 0);
return RANDOMRESETPROB / ginfo.nvertices + (1 - RANDOMRESETPROB) * nbvalsum;
}
// "Scatter
float value_to_neighbor(graphchi_context &info, vertex_info& myvertex, vid_t nbid, float myval) {
assert(myvertex.outdegree > 0);
return myval / myvertex.outdegree;
}
};
int main(int argc, const char ** argv) {
graphchi_init(argc, argv);
metrics m("pagerank");
std::string filename = get_option_string("file");
int niters = get_option_int("niters", 4);
bool onlytop = get_option_int("onlytop", 0);
int ntop = get_option_int("top", 20);
std::string mode = get_option_string("mode", "semisync");
if (onlytop == 0) {
/* Run */
if (mode == "semisync") {
logstream(LOG_INFO) << "Running pagerank in semi-synchronous mode." << std::endl;
run_functional_unweighted_semisynchronous<pagerank_kernel>(filename, niters, m);
} else if (mode == "sync") {
logstream(LOG_INFO) << "Running pagerank in (bulk) synchronous mode." << std::endl;
run_functional_unweighted_synchronous<pagerank_kernel>(filename, niters, m);
} else {
logstream(LOG_ERROR) << "Mode needs to be either 'semisync' or 'sync'." << std::endl;
assert(false);
}
/* Output metrics */
metrics_report(m);
}
/* Write Top 20 */
std::vector< vertex_value<float> > top = get_top_vertices<float>(filename, ntop);
std::cout << "Print top 20 vertices: " << std::endl;
for(int i=0; i < (int) top.size(); i++) {
std::cout << (i+1) << ". " << top[i].vertex << "\t" << top[i].value << std::endl;
}
return 0;
}
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Simple program that writes a graph into adjacency list
*/
#include <iostream>
#include "graphchi_basic_includes.hpp"
using namespace graphchi;
/**
* Type definitions. Remember to create suitable graph shards using the
* Sharder-program.
*/
typedef bool VertexDataType;
typedef bool EdgeDataType;
FILE * f;
#define MODE_ADJLIST 0
#define MODE_CASSOVARY_ADJ 1
int mode;
struct AdjConverter : public GraphChiProgram<VertexDataType, EdgeDataType> {
/**
* Vertex update function.
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) {
if (vertex.id() % 10000 == 0) std::cout << vertex.id() << std::endl;
switch(mode) {
case MODE_ADJLIST: {
fprintf(f, "%d %d", vertex.id(), vertex.num_outedges());
for(int i=0; i<vertex.num_outedges(); i++)
fprintf(f, " %d", vertex.outedge(i)->vertex_id());
fprintf(f, "\n");
break;
}
case MODE_CASSOVARY_ADJ: {
fprintf(f, "%d %d\n", vertex.id(), vertex.num_outedges());
for(int i=0; i<vertex.num_outedges(); i++)
fprintf(f, "%d\n", vertex.outedge(i)->vertex_id());
break;
}
}
}
/**
* Called before an iteration starts.
*/
void before_iteration(int iteration, graphchi_context &gcontext) {
}
/**
* Called after an iteration has finished.
*/
void after_iteration(int iteration, graphchi_context &gcontext) {
}
/**
* Called before an execution interval is started.
*/
void before_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) {
}
/**
* Called after an execution interval has finished.
*/
void after_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) {
}
};
int main(int argc, const char ** argv) {
/* GraphChi initialization will read the command line
arguments and the configuration file. */
graphchi_init(argc, argv);
/* Metrics object for keeping track of performance counters
and other information. Currently required. */
metrics m("adjconverter");
/* Basic arguments for application */
std::string filename = get_option_string("file"); // Base filename
/* Detect the number of shards or preprocess an input to create them */
int nshards = convert_if_notexists<EdgeDataType>(filename,
get_option_string("nshards", "auto"));
mode = get_option_int("mode", 0);
std::string outfile = filename + ".adj";
f = fopen(outfile.c_str(), "w");
/* Run */
AdjConverter program;
graphchi_engine<VertexDataType, EdgeDataType> engine(filename, nshards, false, m);
engine.set_exec_threads(1);
engine.run(program, 1);
fclose(f);
/* Report execution metrics */
metrics_report(m);
return 0;
}
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Triangle counting application. Counts the number of incident (full) triangles
* for each vertex. Edge direction is ignored.
*
* This algorithm is quite complicated and requires 'trickery' to work
* well on GraphChi. The complexity stems from the need to store large number
* of adjacency lists in memory: we cannot store the adjacency lists reasonable
* to edges, nor can we store all of them once at memory. Therefore the problems
* is solved in a series of phases. On each phase, the relevant adjacency lists of an interval
* of vertices (called 'pivots') is loaded into memory, and all vertices that have id smaller than the
* pivots are matched with them. With 'relevant adjacency list' I mean the list of neighbors
* that have higher id then the pivots themselves. That is, we only count triangles a -> b -> c
* where a > b > c.
*
* The application involves a special preprocessing step which orders the vertices in ascending
* order of their degree. This turns out to be a very important optimization on big graphs.
*
* This algorithm also utilizes the dynamic graph engine, and deletes edges after they have been
* accounted for.
*/
#include <string>
#include <vector>
/**
* Need to define prior to including GraphChi
* headers. This enabled edge-deletion in the vertex object.
*/
#define SUPPORT_DELETIONS 1
#define GRAPHCHI_DISABLE_COMPRESSION
#include "graphchi_basic_includes.hpp"
#include "engine/dynamic_graphs/graphchi_dynamicgraph_engine.hpp"
using namespace graphchi;
/**
* Type definitions. Vertex data stores the number of incident triangles.
* Edge stores number of unaccounted triangles that the edge participates on.
* When vertex is updated, it updates its vertex count by summing up the
* counts from edges (after which the edges are deleted).
*/
typedef uint32_t VertexDataType;
typedef uint32_t EdgeDataType;
/*
* Class for writing the output number of triangles for each node
*/
class OutputVertexCallback : public VCallback<VertexDataType> {
public:
virtual void callback(vid_t vertex_id, VertexDataType &value) {
if (value > 0)
std::cout << vertex_id << " " << value << std::endl;
}
};
/**
* Code for intersection size computation and
* pivot management.
*/
int grabbed_edges = 0;
// Linear search
inline bool findadj_linear(vid_t * datachunk, size_t n, vid_t target) {
for(int i=0; i<(int)n; i++) {
if (datachunk[i] == target) return true;
else if (datachunk[i] > target) return false;
}
return false;
}
// Binary search
inline bool findadj(vid_t * datachunk, size_t n, vid_t target) {
if (n<32) return findadj_linear(datachunk, n, target);
register size_t lo = 0;
register size_t hi = n;
register size_t m = lo + (hi-lo)/2;
while(hi>lo) {
vid_t eto = datachunk[m];
if (target == eto) {
return true;
}
if (target > eto) {
lo = m+1;
} else {
hi = m;
}
m = lo + (hi-lo)/2;
}
return false;
}
struct dense_adj {
int count;
vid_t * adjlist;
dense_adj() { adjlist = NULL; }
dense_adj(int _count, vid_t * _adjlist) : count(_count), adjlist(_adjlist) {
}
};
// This is used for keeping in-memory
class adjlist_container {
std::vector<dense_adj> adjs;
mutex m;
public:
vid_t pivot_st, pivot_en;
adjlist_container() {
pivot_st = 0;
pivot_en = 0;
}
void clear() {
for(std::vector<dense_adj>::iterator it=adjs.begin(); it != adjs.end(); ++it) {
if (it->adjlist != NULL) {
free(it->adjlist);
it->adjlist = NULL;
}
}
adjs.clear();
pivot_st = pivot_en;
}
/**
* Extend the interval of pivot vertices to en.
*/
void extend_pivotrange(vid_t en) {
assert(en>=pivot_en);
pivot_en = en;
adjs.resize(pivot_en - pivot_st);
}
/**
* Grab pivot's adjacency list into memory.
*/
int grab_adj(graphchi_vertex<uint32_t, uint32_t> &v) {
if(is_pivot(v.id())) {
int ncount = v.num_edges();
// Count how many neighbors have larger id than v
v.sort_edges_indirect();
int actcount = 0;
vid_t lastvid = 0;
for(int i=0; i<ncount; i++) {
if (v.edge(i)->vertexid > v.id() && v.edge(i)->vertexid != lastvid)
actcount++; // Need to store only ids larger than me
lastvid = v.edge(i)->vertex_id();
}
// Allocate the in-memory adjacency list, using the
// knowledge of the number of edges.
dense_adj dadj = dense_adj(actcount, (vid_t*) calloc(sizeof(vid_t), actcount));
actcount = 0;
lastvid = 0;
for(int i=0; i<ncount; i++) {
if (v.edge(i)->vertexid > v.id() && v.edge(i)->vertexid != lastvid) { // Need to store only ids larger than me
dadj.adjlist[actcount++] = v.edge(i)->vertex_id();
}
lastvid = v.edge(i)->vertex_id();
}
assert(dadj.count == actcount);
adjs[v.id() - pivot_st] = dadj;
assert(v.id() - pivot_st < adjs.size());
__sync_add_and_fetch(&grabbed_edges, actcount);
return actcount;
}
return 0;
}
int acount(vid_t pivot) {
return adjs[pivot - pivot_st].count;
}
/**
* Compute size of the relevant intersection of v and a pivot
*/
int intersection_size(graphchi_vertex<uint32_t, uint32_t> &v, vid_t pivot, int start_i) {
assert(is_pivot(pivot));
int count = 0;
if (pivot > v.id()) {
dense_adj &dadj = adjs[pivot - pivot_st];
int vc = v.num_edges();
/**
* If the adjacency list sizes are not too different, use
* 'merge'-type of operation to compute size intersection.
*/
if (dadj.count < 32 * (vc - start_i)) { // TODO: do real profiling to find best cutoff value
// Do merge-style of check
assert(v.edge(start_i)->vertex_id() == pivot);
int i1 = 0;
int i2 = start_i+1;
int nedges = v.num_edges();
while (i1 < dadj.count && i2 < nedges) {
vid_t dst = v.edge(i2)->vertexid;
vid_t a = dadj.adjlist[i1];
if (a == dst) {
/* Add one to edge between v and the match */
v.edge(i2)->set_data(v.edge(i2)->get_data() + 1);
count++;
i1++; i2++;
} else {
i1 += a < dst;
i2 += a > dst;
}
}
} else {
/**
* Otherwise, use linear/binary search.
*/
vid_t lastvid = 0;
for(int i=start_i+1; i < vc; i++) {
vid_t nb = v.edge(i)->vertexid;
if (nb > pivot && nb != lastvid) {
int match = findadj(dadj.adjlist, dadj.count, nb);
count += match;
if (match > 0) {
/* Add one to edge between v and the match */
v.edge(i)->set_data(v.edge(i)->get_data() + 1);
}
}
lastvid = nb;
}
}
}
return count;
}
inline bool is_pivot(vid_t vid) {
return vid >= pivot_st && vid < pivot_en;
}
};
adjlist_container * adjcontainer;
/**
* GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type>
* class. The main logic is usually in the update function.
*/
struct TriangleCountingProgram : public GraphChiProgram<VertexDataType, EdgeDataType> {
/**
* Vertex update function.
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType> &v, graphchi_context &gcontext) {
if (gcontext.iteration % 2 == 0) {
adjcontainer->grab_adj(v);
} else {
uint32_t oldcount = v.get_data();
uint32_t newcounts = 0;
v.sort_edges_indirect();
vid_t lastvid = 0;
/**
* Iterate through the edges, and if an edge is from a
* pivot vertex, compute intersection of the relevant
* adjacency lists.
*/
for(int i=0; i<v.num_edges(); i++) {
graphchi_edge<uint32_t> * e = v.edge(i);
if (e->vertexid > v.id() && e->vertexid >= adjcontainer->pivot_st) {
assert(!is_deleted_edge_value(e->get_data()));
if (e->vertexid != lastvid) { // Handles reciprocal edges (a->b, b<-a)
if (adjcontainer->is_pivot(e->vertexid)) {
uint32_t pivot_triangle_count = adjcontainer->intersection_size(v, e->vertexid, i);
newcounts += pivot_triangle_count;
/* Write the number of triangles into edge between this vertex and pivot */
if (pivot_triangle_count == 0 && e->get_data() == 0) {
/* ... or remove the edge, if the count is zero. */
v.remove_edge(i);
} else {
e->set_data(e->get_data() + pivot_triangle_count);
}
} else {
break;
}
}
lastvid = e->vertexid;
}
assert(newcounts >= 0);
}
if (newcounts > 0) {
v.set_data(oldcount + newcounts);
}
}
/* Collect triangle counts matched by vertices with id lower than
his one, and delete */
if (gcontext.iteration % 2 == 0) {
int newcounts = 0;
for(int i=0; i < v.num_edges(); i++) {
graphchi_edge<uint32_t> * e = v.edge(i);
if (e->vertexid < v.id()) {
newcounts += e->get_data();
e->set_data(0);
// This edge can be now deleted. Is there some other situations we can delete?
if (v.id() < adjcontainer->pivot_st && e->vertexid < adjcontainer->pivot_st) {
v.remove_edge(i);
}
}
}
v.set_data(v.get_data() + newcounts);
}
}
/**
* Called before an iteration starts.
*/
void before_iteration(int iteration, graphchi_context &gcontext) {
gcontext.scheduler->remove_tasks(0, (int) gcontext.nvertices - 1);
if (gcontext.iteration % 2 == 0) {
// Schedule vertices that were pivots on last iteration, so they can
// keep count of the triangles counted by their lower id neighbros.
for(vid_t i=adjcontainer->pivot_st; i < adjcontainer->pivot_en; i++) {
gcontext.scheduler->add_task(i);
}
grabbed_edges = 0;
adjcontainer->clear();
} else {
// Schedule everything that has id < pivot
logstream(LOG_INFO) << "Now pivots: " << adjcontainer->pivot_st << " " << adjcontainer->pivot_en << std::endl;
for(vid_t i=0; i < gcontext.nvertices; i++) {
if (i < adjcontainer->pivot_en) {
gcontext.scheduler->add_task(i);
}
}
}
}
/**
* Called after an iteration has finished.
*/
void after_iteration(int iteration, graphchi_context &gcontext) {
}
/**
* Called before an execution interval is started.
*
* On every even iteration, we store pivot's adjacency lists to memory.
* Here we manage the memory to ensure that we do not load too much
* edges into memory.
*/
void before_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) {
if (gcontext.iteration % 2 == 0) {
if (adjcontainer->pivot_st <= window_en) {
size_t max_grab_edges = get_option_long("membudget_mb", 1024) * 1024 * 1024 / 8;
if (grabbed_edges < max_grab_edges * 0.8) {
logstream(LOG_DEBUG) << "Window init, grabbed: " << grabbed_edges << " edges" << std::endl;
for(vid_t vid=window_st; vid <= window_en; vid++) {
gcontext.scheduler->add_task(vid);
}
adjcontainer->extend_pivotrange(window_en + 1);
if (window_en == gcontext.nvertices) {
// Last iteration needed for collecting last triangle counts
gcontext.set_last_iteration(gcontext.iteration + 3);
}
} else {
std::cout << "Too many edges, already grabbed: " << grabbed_edges << std::endl;
}
}
}
}
/**
* Called after an execution interval has finished.
*/
void after_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) {
}
};
int main(int argc, const char ** argv) {
/* GraphChi initialization will read the command line
arguments and the configuration file. */
graphchi_init(argc, argv);
/* Metrics object for keeping track of performance counters
and other information. Currently required. */
metrics m("triangle-counting");
/* Basic arguments for application */
std::string filename = get_option_string("file"); // Base filename
int niters = 100000; // Automatically determined during running
bool scheduler = true;
/* Preprocess the file, and order the vertices in the order of their degree.
Mapping from original ids to new ids is saved separately. */
OrderByDegree<EdgeDataType> * orderByDegreePreprocessor = new OrderByDegree<EdgeDataType> ();
int nshards = convert_if_notexists<EdgeDataType>(filename,
get_option_string("nshards", "auto"),
orderByDegreePreprocessor);
/* Initialize adjacency container */
adjcontainer = new adjlist_container();
// TODO: ordering by degree.
/* Run */
TriangleCountingProgram program;
graphchi_dynamicgraph_engine<VertexDataType, EdgeDataType> engine(filename + orderByDegreePreprocessor->getSuffix(),
nshards, scheduler, m);
engine.set_enable_deterministic_parallelism(false);
// Low memory budget is required to prevent swapping as triangle counting
// uses more memory than standard GraphChi apps.
engine.set_membudget_mb(std::min(get_option_int("membudget_mb", 1024), 1024));
engine.run(program, niters);
/* Report execution metrics */
metrics_report(m);
/* Count triangles */
size_t ntriangles = sum_vertices<vid_t, size_t>(filename + "_degord", 0, (vid_t)engine.num_vertices());
std::cout << "Number of triangles: " << ntriangles / 3 << "(" << ntriangles << ")" << std::endl;
/* If run as a test, see the number matches */
size_t expected = get_option_long("assertequals", 0);
if (expected > 0) {
std::cout << "Testing the result is as expected: " << (ntriangles / 3) << " vs. " << expected << std::endl;
assert(expected == ntriangles / 3);
}
/* write the output */
// OutputVertexCallback callback;
// foreach_vertices<VertexDataType>(filename + "_degord", 0, engine.num_vertices(), callback);
return 0;
}
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Application for computing the connected components of a graph.
* The algorithm is simple: on first iteration each vertex sends its
* id to neighboring vertices. On subsequent iterations, each vertex chooses
* the smallest id of its neighbors and broadcasts its (new) label to
* its neighbors. The algorithm terminates when no vertex changes label.
*
* @section REMARKS
*
* This application is interesting demonstration of the asyncronous capabilities
* of GraphChi, improving the convergence considerably. Consider
* a chain graph 0->1->2->...->n. First, vertex 0 will write its value to its edges,
* which will be observed by vertex 1 immediatelly, changing its label to 0. Nexgt,
* vertex 2 changes its value to 0, and so on. This all happens in one iteration.
* A subtle issue is that as any pair of vertices a<->b share an edge, they will
* overwrite each others value. However, because they will be never run in parallel
* (due to deterministic parallellism of graphchi), this does not compromise correctness.
*
* @author Aapo Kyrola
*/
#include <cmath>
#include <string>
#include "graphchi_basic_includes.hpp"
#include "util/labelanalysis.hpp"
using namespace graphchi;
/**
* Type definitions. Remember to create suitable graph shards using the
* Sharder-program.
*/
typedef vid_t VertexDataType; // vid_t is the vertex id type
typedef vid_t EdgeDataType;
/**
* GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type>
* class. The main logic is usually in the update function.
*/
struct ConnectedComponentsProgram : public GraphChiProgram<VertexDataType, EdgeDataType> {
/**
* Vertex update function.
* On first iteration ,each vertex chooses a label = the vertex id.
* On subsequent iterations, each vertex chooses the minimum of the neighbor's
* label (and itself).
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) {
/* This program requires selective scheduling. */
assert(gcontext.scheduler != NULL);
if (gcontext.iteration == 0) {
vertex.set_data(vertex.id());
gcontext.scheduler->add_task(vertex.id());
}
/* On subsequent iterations, find the minimum label of my neighbors */
vid_t curmin = vertex.get_data();
for(int i=0; i < vertex.num_edges(); i++) {
vid_t nblabel = vertex.edge(i)->get_data();
if (gcontext.iteration == 0) nblabel = vertex.edge(i)->vertex_id(); // Note!
curmin = std::min(nblabel, curmin);
}
/* Set my label */
vertex.set_data(curmin);
/**
* Broadcast new label to neighbors by writing the value
* to the incident edges.
* Note: on first iteration, write only to out-edges to avoid
* overwriting data (this is kind of a subtle point)
*/
vid_t label = vertex.get_data();
if (gcontext.iteration > 0) {
for(int i=0; i < vertex.num_edges(); i++) {
if (label < vertex.edge(i)->get_data()) {
vertex.edge(i)->set_data(label);
/* Schedule neighbor for update */
gcontext.scheduler->add_task(vertex.edge(i)->vertex_id());
}
}
} else if (gcontext.iteration == 0) {
for(int i=0; i < vertex.num_outedges(); i++) {
vertex.outedge(i)->set_data(label);
}
}
}
/**
* Called before an iteration starts.
*/
void before_iteration(int iteration, graphchi_context &info) {
}
/**
* Called after an iteration has finished.
*/
void after_iteration(int iteration, graphchi_context &ginfo) {
}
/**
* Called before an execution interval is started.
*/
void before_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &ginfo) {
}
/**
* Called after an execution interval has finished.
*/
void after_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &ginfo) {
}
};
int main(int argc, const char ** argv) {
/* GraphChi initialization will read the command line
arguments and the configuration file. */
graphchi_init(argc, argv);
/* Metrics object for keeping track of performance counters
and other information. Currently required. */
metrics m("connected-components");
/* Basic arguments for application */
std::string filename = get_option_string("file"); // Base filename
int niters = get_option_int("niters", 10); // Number of iterations (max)
bool scheduler = true; // Always run with scheduler
/* Process input file - if not already preprocessed */
int nshards = (int) convert_if_notexists<EdgeDataType>(filename, get_option_string("nshards", "auto"));
if (get_option_int("onlyresult", 0) == 0) {
/* Run */
ConnectedComponentsProgram program;
graphchi_engine<VertexDataType, EdgeDataType> engine(filename, nshards, scheduler, m);
engine.run(program, niters);
}
/* Run analysis of the connected components (output is written to a file) */
m.start_time("label-analysis");
analyze_labels<vid_t>(filename);
m.stop_time("label-analysis");
/* Report execution metrics */
metrics_report(m);
return 0;
}
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Application for computing the connected components of a graph.
* The algorithm is simple: on first iteration each vertex sends its
* id to neighboring vertices. On subsequent iterations, each vertex chooses
* the smallest id of its neighbors and broadcasts its (new) label to
* its neighbors. The algorithm terminates when no vertex changes label.
*
* @section REMARKS
*
* Version of connected components that keeps the vertex values
* in memory.
* @author Aapo Kyrola
*/
#include <cmath>
#include <string>
#include "graphchi_basic_includes.hpp"
#include "util/labelanalysis.hpp"
using namespace graphchi;
/**
* Type definitions. Remember to create suitable graph shards using the
* Sharder-program.
*/
typedef vid_t VertexDataType; // vid_t is the vertex id type
typedef vid_t EdgeDataType;
/**
* GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type>
* class. The main logic is usually in the update function.
*/
struct ConnectedComponentsProgram : public GraphChiProgram<VertexDataType, EdgeDataType> {
VertexDataType * vertex_values;
vid_t neighbor_value(graphchi_edge<EdgeDataType> * edge) {
return vertex_values[edge->vertex_id()];
}
void set_data(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, vid_t value) {
vertex_values[vertex.id()] = value;
vertex.set_data(value);
}
/**
* Vertex update function.
* On first iteration ,each vertex chooses a label = the vertex id.
* On subsequent iterations, each vertex chooses the minimum of the neighbor's
* label (and itself).
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) {
/* This program requires selective scheduling. */
assert(gcontext.scheduler != NULL);
/* On subsequent iterations, find the minimum label of my neighbors */
vid_t curmin = vertex.get_data();
for(int i=0; i < vertex.num_edges(); i++) {
vid_t nblabel = neighbor_value(vertex.edge(i));
curmin = std::min(nblabel, curmin);
}
/* If my label changes, schedule neighbors */
if (vertex.get_data() != curmin) {
vid_t newlabel = curmin;
for(int i=0; i < vertex.num_edges(); i++) {
if (newlabel < neighbor_value(vertex.edge(i))) {
/* Schedule neighbor for update */
gcontext.scheduler->add_task(vertex.edge(i)->vertex_id());
}
}
}
set_data(vertex, curmin);
}
/**
* Called before an iteration starts.
*/
void before_iteration(int iteration, graphchi_context &ctx) {
if (iteration == 0) {
/* initialize each vertex with its own lable */
vertex_values = new VertexDataType[ctx.nvertices];
for(int i=0; i < (int)ctx.nvertices; i++) {
vertex_values[i] = i;
}
}
}
/**
* Called after an iteration has finished.
*/
void after_iteration(int iteration, graphchi_context &ginfo) {
}
/**
* Called before an execution interval is started.
*/
void before_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &ginfo) {
}
/**
* Called after an execution interval has finished.
*/
void after_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &ginfo) {
}
};
int main(int argc, const char ** argv) {
/* GraphChi initialization will read the command line
arguments and the configuration file. */
graphchi_init(argc, argv);
/* Metrics object for keeping track of performance counters
and other information. Currently required. */
metrics m("connected-components-inmem");
/* Basic arguments for application */
std::string filename = get_option_string("file"); // Base filename
int niters = get_option_int("niters", 10); // Number of iterations (max)
bool scheduler = true; // Always run with scheduler
/* Process input file - if not already preprocessed */
int nshards = (int) convert_if_notexists<EdgeDataType>(filename, get_option_string("nshards", "auto"));
if (get_option_int("onlyresult", 0) == 0) {
/* Run */
ConnectedComponentsProgram program;
graphchi_engine<VertexDataType, EdgeDataType> engine(filename, nshards, scheduler, m);
engine.set_modifies_inedges(false); // Improves I/O performance.
engine.set_modifies_outedges(false); // Improves I/O performance.
engine.run(program, niters);
}
/* Run analysis of the connected components (output is written to a file) */
m.start_time("label-analysis");
analyze_labels<vid_t>(filename);
m.stop_time("label-analysis");
/* Report execution metrics */
metrics_report(m);
return 0;
}
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Simple pagerank implementation. Uses the basic vertex-based API for
* demonstration purposes. A faster implementation uses the functional API,
* "pagerank_functional".
*/
#include <string>
#include <fstream>
#include <cmath>
#define GRAPHCHI_DISABLE_COMPRESSION
#include "graphchi_basic_includes.hpp"
#include "util/toplist.hpp"
using namespace graphchi;
#define THRESHOLD 1e-1
#define RANDOMRESETPROB 0.15
typedef float VertexDataType;
typedef float EdgeDataType;
struct PagerankProgram : public GraphChiProgram<VertexDataType, EdgeDataType> {
/**
* Called before an iteration starts. Not implemented.
*/
void before_iteration(int iteration, graphchi_context &info) {
}
/**
* Called after an iteration has finished. Not implemented.
*/
void after_iteration(int iteration, graphchi_context &ginfo) {
}
/**
* Called before an execution interval is started. Not implemented.
*/
void before_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &ginfo) {
}
/**
* Pagerank update function.
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType> &v, graphchi_context &ginfo) {
float sum=0;
if (ginfo.iteration == 0) {
/* On first iteration, initialize vertex and out-edges.
The initialization is important,
because on every run, GraphChi will modify the data in the edges on disk.
*/
for(int i=0; i < v.num_outedges(); i++) {
graphchi_edge<float> * edge = v.outedge(i);
edge->set_data(1.0 / v.num_outedges());
}
v.set_data(RANDOMRESETPROB);
} else {
/* Compute the sum of neighbors' weighted pageranks by
reading from the in-edges. */
for(int i=0; i < v.num_inedges(); i++) {
float val = v.inedge(i)->get_data();
sum += val;
}
/* Compute my pagerank */
float pagerank = RANDOMRESETPROB + (1 - RANDOMRESETPROB) * sum;
/* Write my pagerank divided by the number of out-edges to
each of my out-edges. */
if (v.num_outedges() > 0) {
float pagerankcont = pagerank / v.num_outedges();
for(int i=0; i < v.num_outedges(); i++) {
graphchi_edge<float> * edge = v.outedge(i);
edge->set_data(pagerankcont);
}
}
/* Keep track of the progression of the computation.
GraphChi engine writes a file filename.deltalog. */
ginfo.log_change(std::abs(pagerank - v.get_data()));
/* Set my new pagerank as the vertex value */
v.set_data(pagerank);
}
}
};
int main(int argc, const char ** argv) {
graphchi_init(argc, argv);
metrics m("pagerank");
/* Parameters */
std::string filename = get_option_string("file"); // Base filename
int niters = get_option_int("niters", 4);
bool scheduler = false; // Non-dynamic version of pagerank.
int ntop = get_option_int("top", 20);
/* Process input file - if not already preprocessed */
int nshards = convert_if_notexists<EdgeDataType>(filename, get_option_string("nshards", "auto"));
/* Run */
graphchi_engine<float, float> engine(filename, nshards, scheduler, m);
engine.set_modifies_inedges(false); // Improves I/O performance.
PagerankProgram program;
engine.run(program, niters);
/* Output top ranked vertices */
std::vector< vertex_value<float> > top = get_top_vertices<float>(filename, ntop);
std::cout << "Print top " << ntop << " vertices:" << std::endl;
for(int i=0; i < (int)top.size(); i++) {
std::cout << (i+1) << ". " << top[i].vertex << "\t" << top[i].value << std::endl;
}
metrics_report(m);
return 0;
}
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Random walk simulation. From a set of source vertices, a set of
* random walks is started. Random walks walk via edges, and we use the
* dynamic chivectors to support multiple walks in one edge. Each
* vertex keeps track of the walks that pass by it, thus in the end
* we have estimate of the "pagerank" of each vertex.
*
* Note, this version does not support 'resets' of random walks.
* TODO: from each vertex, start new random walks with some probability,
* and also terminate a walk with some probablity.
*
*/
#define DYNAMICEDATA 1
#include <string>
#include "graphchi_basic_includes.hpp"
#include "api/dynamicdata/chivector.hpp"
#include "util/toplist.hpp"
using namespace graphchi;
/**
* Type definitions. Remember to create suitable graph shards using the
* Sharder-program.
*/
typedef unsigned int VertexDataType;
typedef chivector<vid_t> EdgeDataType;
struct RandomWalkProgram : public GraphChiProgram<VertexDataType, EdgeDataType> {
int walks_per_source() {
return 100;
}
bool is_source(vid_t v) {
return (v % 50 == 0);
}
/**
* Vertex update function.
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType > &vertex, graphchi_context &gcontext) {
if (gcontext.iteration == 0) {
if (is_source(vertex.id())) {
for(int i=0; i < walks_per_source(); i++) {
/* Get random out edge's vector */
graphchi_edge<EdgeDataType> * outedge = vertex.random_outedge();
if (outedge != NULL) {
chivector<vid_t> * evector = outedge->get_vector();
/* Add a random walk particle, represented by the vertex-id of the source (this vertex) */
evector->add(vertex.id());
gcontext.scheduler->add_task(outedge->vertex_id()); // Schedule destination
}
}
}
vertex.set_data(0);
} else {
/* Check inbound edges for walks and advance them. */
int num_walks = 0;
for(int i=0; i < vertex.num_inedges(); i++) {
graphchi_edge<EdgeDataType> * edge = vertex.inedge(i);
chivector<vid_t> * invector = edge->get_vector();
for(int j=0; j < invector->size(); j++) {
/* Get one walk */
vid_t walk = invector->get(j);
/* Move to a random out-edge */
graphchi_edge<EdgeDataType> * outedge = vertex.random_outedge();
if (outedge != NULL) {
chivector<vid_t> * outvector = outedge->get_vector();
/* Add a random walk particle, represented by the vertex-id of the source (this vertex) */
outvector->add(walk);
gcontext.scheduler->add_task(outedge->vertex_id()); // Schedule destination
}
num_walks ++;
}
/* Remove all walks from the inbound vector */
invector->clear();
}
/* Keep track of the walks passed by via this vertex */
vertex.set_data(vertex.get_data() + num_walks);
}
}
/**
* Called before an iteration starts.
*/
void before_iteration(int iteration, graphchi_context &gcontext) {
}
/**
* Called after an iteration has finished.
*/
void after_iteration(int iteration, graphchi_context &gcontext) {
}
/**
* Called before an execution interval is started.
*/
void before_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) {
}
/**
* Called after an execution interval has finished.
*/
void after_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) {
}
};
int main(int argc, const char ** argv) {
/* GraphChi initialization will read the command line
arguments and the configuration file. */
graphchi_init(argc, argv);
/* Metrics object for keeping track of performance counters
and other information. Currently required. */
metrics m("randomwalk");
/* Basic arguments for application */
std::string filename = get_option_string("file"); // Base filename
int niters = get_option_int("niters", 4); // Number of iterations
bool scheduler = true; // Whether to use selective scheduling
/* Detect the number of shards or preprocess an input to create them */
bool preexisting_shards;
int nshards = convert_if_notexists<vid_t>(filename, get_option_string("nshards", "auto"), preexisting_shards);
/* Run */
RandomWalkProgram program;
graphchi_engine<VertexDataType, EdgeDataType> engine(filename, nshards, scheduler, m);
if (preexisting_shards) {
engine.reinitialize_edge_data(0);
}
engine.run(program, niters);
/* List top 20 */
int ntop = 20;
std::vector< vertex_value<VertexDataType> > top = get_top_vertices<VertexDataType>(filename, ntop);
std::cout << "Print top 20 vertices: " << std::endl;
for(int i=0; i < (int) top.size(); i++) {
std::cout << (i+1) << ". " << top[i].vertex << "\t" << top[i].value << std::endl;
}
/* Report execution metrics */
metrics_report(m);
return 0;
}
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Demonstration for streaming graph updates. This application reads from a file
* list of edges and adds them into the graph continuously. Simultaneously, pagerank
* is computed for the evolving graph.
*
* This code includes a fair amount of code for demo purposes. To be cleaned
* eventually.
*/
#include <string>
#include <fstream>
#include <cmath>
#define GRAPHCHI_DISABLE_COMPRESSION
#include "graphchi_basic_includes.hpp"
#include "engine/dynamic_graphs/graphchi_dynamicgraph_engine.hpp"
#include "util/toplist.hpp"
/* HTTP admin tool */
#include "httpadmin/chi_httpadmin.hpp"
#include "httpadmin/plotter.hpp"
using namespace graphchi;
#define THRESHOLD 1e-1f
#define RANDOMRESETPROB 0.15f
#define DEMO 1
typedef float VertexDataType;
typedef float EdgeDataType;
graphchi_dynamicgraph_engine<float, float> * dyngraph_engine;
std::string streaming_graph_file;
std::string getname(vid_t v);
std::string getname(vid_t userid) {
#ifdef DEMO
// Temporary code for demo purposes!
int f = open("/Users/akyrola/graphs/twitter_names.dat", O_RDONLY);
if (f < 0) return "n/a";
char s[16];
size_t idx = userid * 16;
preada(f, s, 16, idx);
close(f);
s[15] = '\0';
return std::string(s);
#else
return "":
#endif
}
struct PagerankProgram : public GraphChiProgram<VertexDataType, EdgeDataType> {
/**
* Called before an iteration starts.
*/
void before_iteration(int iteration, graphchi_context &gcontext) {
}
/**
* Called after an iteration has finished.
*/
void after_iteration(int iteration, graphchi_context &gcontext) {
#ifdef DEMO
std::vector< vertex_value<float> > top = get_top_vertices<float>(gcontext.filename, 20);
for(int i=0; i < (int) top.size(); i++) {
vertex_value<float> vv = top[i];
std::cout << (i+1) << ". " << vv.vertex << " " << getname(vv.vertex) << ": " << vv.value << std::endl;
}
/* Keep top 20 available for http admin */
for(int i=0; i < (int) top.size(); i++) {
vertex_value<float> vv = top[i];
std::stringstream ss;
ss << "rank" << i;
std::stringstream sv;
sv << vv.vertex << ":" << getname(vv.vertex) << ":" << vv.value<< "";
dyngraph_engine->set_json(ss.str(), sv.str());
}
#endif
}
/**
* Called before an execution interval is started.
*/
void before_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &ginfo) {
#ifdef DEMO
update_plotdata(dyngraph_engine);
#endif
}
/**
* Pagerank update function.
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType> &v, graphchi_context &ginfo) {
float sum=0;
if (ginfo.iteration == 0) {
/* On first iteration, initialize vertex */
for(int i=0; i < v.num_outedges(); i++) {
graphchi_edge<float> * edge = v.outedge(i);
edge->set_data(1.0f / v.num_outedges());
if (ginfo.scheduler != NULL)
ginfo.scheduler->add_task(edge->vertex_id());
}
v.set_data(RANDOMRESETPROB);
/* If using selective scheduling, schedule myself for next iteration */
if (ginfo.scheduler != NULL)
ginfo.scheduler->add_task(v.id());
} else {
/* Compute the sum of neighbors' weighted pageranks */
for(int i=0; i < v.num_inedges(); i++) {
float val = v.inedge(i)->get_data();
sum += val;
}
/* Compute my pagerank */
float pagerank = RANDOMRESETPROB + (1 - RANDOMRESETPROB) * sum;
float oldvalue = v.get_data();
float delta = (float) fabs(oldvalue - pagerank);
bool significant_change = (delta >= THRESHOLD);
if (v.num_outedges() > 0) {
float pagerankcont = pagerank/v.num_outedges();
for(int i=0; i < v.num_outedges(); i++) {
graphchi_edge<float> * edge = v.outedge(i);
/* If using selective scheduling, and the change was larger than
a threshold, add neighbor to schedule. */
if (ginfo.scheduler != NULL) {
if (significant_change) {
ginfo.scheduler->add_task(edge->vertex_id());
}
}
edge->set_data(pagerankcont);
}
}
v.set_data(pagerank);
/* Keep track of the progression of the computation */
ginfo.log_change(delta);
}
}
};
/* Demo stuff. */
class IntervalTopRequest : public custom_request_handler {
public:
virtual std::string handle(const char * req) {
const char * shardnum_str = &req[strlen("/ajax/shardpagerank")];
int shardnum = atoi(shardnum_str);
logstream(LOG_DEBUG) << "Requested shard pagerank: " << shardnum_str << std::endl;
if (shardnum >= 0 && shardnum < dyngraph_engine->get_nshards()) {
vid_t fromvid = dyngraph_engine->get_interval_start(shardnum);
vid_t tovid = dyngraph_engine->get_interval_end(shardnum);
std::vector< vertex_value<float> > top =
get_top_vertices<float>(dyngraph_engine->get_context().filename, 10,
fromvid, tovid + 1);
std::stringstream ss;
ss << "{";
for(int i=0; i < (int) top.size(); i++) {
vertex_value<float> vv = top[i];
if (i > 0) ss << ",";
ss << "\"rank" << i << "\": \"" << vv.vertex << ":" << getname(vv.vertex) << ":" << vv.value<< "\"";
}
ss << "}";
std::cout << ss.str();
return ss.str();
}
return "error";
}
virtual bool responds_to(const char * req) {
return (strncmp(req, "/ajax/shardpagerank", 19) == 0);
}
};
bool running = true;
void * plotter_thread(void * info);
void * plotter_thread(void * info) {
usleep(1000000 * 10);
init_plots(dyngraph_engine);
while(running) {
/* Update plots */
drawplots();
usleep(1000000 * 10);
}
return NULL;
}
/**
* Function executed by a separate thread that streams
* graph from a file.
*/
void * dynamic_graph_reader(void * info);
void * dynamic_graph_reader(void * info) {
std::cout << "Start sleeping..." << std::endl;
usleep(50000);
std::cout << "End sleeping..." << std::endl;
int edges_per_sec = get_option_int("edges_per_sec", 100000);
logstream(LOG_INFO) << "Going to stream from: " << streaming_graph_file << std::endl;
FILE * f = fopen(streaming_graph_file.c_str(), "r");
if (f == NULL) {
logstream(LOG_ERROR) << "Could not open file for streaming: " << streaming_graph_file <<
" error: " << strerror(errno) << std::endl;
}
assert(f != NULL);
logstream(LOG_INFO) << "Streaming speed capped at: " << edges_per_sec << " edges/sec." << std::endl;
size_t c = 0;
size_t ingested = 0;
// Used for flow control
timeval last, now;
gettimeofday(&last, NULL);
vid_t from;
vid_t to;
char s[1024];
while(fgets(s, 1024, f) != NULL) {
FIXLINE(s);
/* Read next line */
char delims[] = "\t ";
char * t;
t = strtok(s, delims);
from = atoi(t);
t = strtok(NULL, delims);
to = atoi(t);
if (from == to) {
// logstream(LOG_WARNING) << "Self-edge in stream: " << from << " <-> " << to << std::endl;
continue;
}
bool success=false;
while (!success) {
success = dyngraph_engine->add_edge(from, to, 0.0f);
}
dyngraph_engine->add_task(from);
ingested++;
if (++c % edges_per_sec == 0) {
std::cout << "Stream speed check...." << std::endl;
double sincelast;
double speed;
// Throttling - keeps average speed of edges/sec in control
do {
gettimeofday(&now, NULL);
sincelast = now.tv_sec-last.tv_sec+ ((double)(now.tv_usec-last.tv_usec))/1.0E6;
usleep(20000);
speed = (c / sincelast);
} while (speed > edges_per_sec);
dyngraph_engine->set_json("ingestspeed", speed);
logstream(LOG_INFO) << "Stream speed check ended.... Speed now:" << speed << " edges/sec" << std::endl;
dyngraph_engine->set_json("ingestedges", ingested);
}
if (c % 1000 == 0) {
set_ingested_edges(ingested);
}
}
fclose(f);
dyngraph_engine->finish_after_iters(10);
return NULL;
}
int main(int argc, const char ** argv) {
graphchi_init(argc, argv);
metrics m("streaming-pagerank");
/* Parameters */
std::string filename = get_option_string("file"); // Base filename
int niters = 100000; // End of computation to be determined programmatically
// Pagerank can be run with or without selective scheduling
bool scheduler = true;
int ntop = get_option_int("top", 20);
/* Process input file (the base graph) - if not already preprocessed */
int nshards = convert_if_notexists<EdgeDataType>(filename, get_option_string("nshards", "auto"));
/* Streaming input graph - must be in edge-list format */
streaming_graph_file = get_option_string_interactive("streaming_graph_file",
"Pathname to graph file to stream edges from");
/* Create the engine object */
dyngraph_engine = new graphchi_dynamicgraph_engine<float, float>(filename, nshards, scheduler, m);
dyngraph_engine->set_modifies_inedges(false); // Improves I/O performance.
/* Start streaming thread */
pthread_t strthread;
int ret = pthread_create(&strthread, NULL, dynamic_graph_reader, NULL);
assert(ret>=0);
/* Start HTTP admin */
start_httpadmin< graphchi_dynamicgraph_engine<float, float> >(dyngraph_engine);
register_http_request_handler(new IntervalTopRequest());
pthread_t plotterthr;
ret = pthread_create(&plotterthr, NULL, plotter_thread, NULL);
assert(ret>=0);
/* Run the engine */
PagerankProgram program;
dyngraph_engine->run(program, niters);
running = false;
/* Output top ranked vertices */
std::vector< vertex_value<float> > top = get_top_vertices<float>(filename, ntop);
std::cout << "Print top " << ntop << " vertices:" << std::endl;
for(int i=0; i < (int)top.size(); i++) {
std::cout << (i+1) << ". " << top[i].vertex << "\t" << top[i].value << std::endl;
}
metrics_report(m);
return 0;
}
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Program for running ALS-matrix factorizatino toolkit from
* GraphLab. This is an example of GraphLab v2.1 programs development
* for GraphChi.
*/
#include <string>
#include <algorithm>
#include "../matrixmarket/mmio.h"
#include "../matrixmarket/mmio.c"
#include "graphchi_basic_includes.hpp"
#include "api/graphlab2_1_GAS_api/graphlab.hpp"
#include "als_vertex_program.hpp"
using namespace graphchi;
using namespace graphlab;
// Forward declaration
int convert_matrixmarket_for_ALS_graphlab(std::string filename);
size_t vertex_data::NLATENT = 5;
double als_vertex_program::TOLERANCE = 1e-3;
double als_vertex_program::LAMBDA = 0.01;
size_t als_vertex_program::MAX_UPDATES = -1;
int main(int argc, const char ** argv) {
/* GraphChi initialization will read the command line
arguments and the configuration file. */
graphchi_init(argc, argv);
/* Metrics object for keeping track of performance counters
and other information. Currently required. */
metrics m("als-graphlab");
/* Basic arguments for application. NOTE: File will be automatically 'sharded'. */
std::string filename = get_option_string("file"); // Base filename
int niters = get_option_int("niters", 4); // Number of iterations
/* Preprocess data if needed, or discover preprocess files */
int nshards = convert_matrixmarket_for_ALS_graphlab(filename);
/* Run */
std::vector<vertex_data> * vertices =
run_graphlab_vertexprogram<als_vertex_program>(filename, nshards, niters, false, m, false, false);
/* Error aggregation */
error_aggregator final_error =
run_graphlab_edge_aggregator<als_vertex_program, error_aggregator>(filename, nshards,
error_aggregator::map, error_aggregator::finalize, vertices, m);
std::cout << "Final train error: " << final_error.train_error << std::endl;
/* TODO: write output latent matrices */
delete vertices;
/* Report execution metrics */
metrics_report(m);
return 0;
}
/**
* Create a bipartite graph from a matrix. Each row corresponds to vertex
* with the same id as the row number (0-based), but vertices correponsing to columns
* have id + num-rows.
*/
int convert_matrixmarket_for_ALS_graphlab(std::string base_filename) {
// Note, code based on: http://math.nist.gov/MatrixMarket/mmio/c/example_read.c
int ret_code;
MM_typecode matcode;
FILE *f;
int M, N, nz;
/**
* Create sharder object
*/
int nshards;
if ((nshards = find_shards<edge_data>(base_filename, get_option_string("nshards", "auto")))) {
logstream(LOG_INFO) << "File " << base_filename << " was already preprocessed, won't do it again. " << std::endl;
logstream(LOG_INFO) << "If this is not intended, please delete the shard files and try again. " << std::endl;
return nshards;
}
sharder<edge_data> sharderobj(base_filename);
sharderobj.start_preprocessing();
if ((f = fopen(base_filename.c_str(), "r")) == NULL) {
logstream(LOG_ERROR) << "Could not open file: " << base_filename << ", error: " << strerror(errno) << std::endl;
exit(1);
}
if (mm_read_banner(f, &matcode) != 0)
{
logstream(LOG_ERROR) << "Could not process Matrix Market banner. File: " << base_filename << std::endl;
logstream(LOG_ERROR) << "Matrix must be in the Matrix Market format. " << std::endl;
exit(1);
}
/* This is how one can screen matrix types if their application */
/* only supports a subset of the Matrix Market data types. */
if (mm_is_complex(matcode) || !mm_is_sparse(matcode))
{
logstream(LOG_ERROR) << "Sorry, this application does not support complex values and requires a sparse matrix." << std::endl;
logstream(LOG_ERROR) << "Market Market type: " << mm_typecode_to_str(matcode) << std::endl;
exit(1);
}
/* find out size of sparse matrix .... */
if ((ret_code = mm_read_mtx_crd_size(f, &M, &N, &nz)) !=0) {
logstream(LOG_ERROR) << "Failed reading matrix size: error=" << ret_code << std::endl;
exit(1);
}
logstream(LOG_INFO) << "Starting to read matrix-market input. Matrix dimensions: "
<< M << " x " << N << ", non-zeros: " << nz << std::endl;
if (M < 5 || N < 5 || nz < 10) {
logstream(LOG_ERROR) << "File is suspiciously small. Something wrong? File: " << base_filename << std::endl;
assert(M < 5 || N < 5 || nz < 10);
}
if (!sharderobj.preprocessed_file_exists()) {
for (int i=0; i<nz; i++)
{
int I, J;
double val;
fscanf(f, "%d %d %lg\n", &I, &J, &val);
I--; /* adjust from 1-based to 0-based */
J--;
sharderobj.preprocessing_add_edge(I, M + J, edge_data(val, edge_data::TRAIN));
}
sharderobj.end_preprocessing();
} else {
logstream(LOG_INFO) << "Matrix already preprocessed, just run sharder." << std::endl;
}
if (f !=stdin) fclose(f);
logstream(LOG_INFO) << "Now creating shards." << std::endl;
// Shard with a specified number of shards, or determine automatically if not defined
nshards = sharderobj.execute_sharding(get_option_string("nshards", "auto"));
return nshards;
}
| C++ |
/**
* Copyright (c) 2009 Carnegie Mellon University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an "AS
* IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*
* For more about this software visit:
*
* http://www.graphlab.ml.cmu.edu
*
*/
#ifndef ALS_VERTEX_PROGRAM_HPP
#define ALS_VERTEX_PROGRAM_HPP
/**
* \file
* \ingroup toolkit_matrix_factorization
*
* \brief This file describes the vertex program for the alternating
* least squares (ALS) matrix factorization algorithm. See
* \ref als_vertex_program for description of the ALS Algorithm.
*/
#include <Eigen/Dense>
//#include <graphlab.hpp>
//#include "eigen_serialization.hpp"
typedef Eigen::VectorXd vec_type;
typedef Eigen::MatrixXd mat_type;
/**
* \ingroup toolkit_matrix_factorization
*
* \brief the vertex data type which contains the latent factor.
*
* Each row and each column in the matrix corresponds to a different
* vertex in the ALS graph. Associated with each vertex is a factor
* (vector) of latent parameters that represent that vertex. The goal
* of the ALS algorithm is to find the values for these latent
* parameters such that the non-zero entries in the matrix can be
* predicted by taking the dot product of the row and column factors.
*/
struct vertex_data {
/**
* \brief A shared "constant" that specifies the number of latent
* values to use.
*/
static size_t NLATENT;
/** \brief The number of times this vertex has been updated. */
uint32_t nupdates;
/** \brief The most recent L1 change in the factor value */
float residual; //! how much the latent value has changed
/** \brief The latent factor for this vertex */
vec_type factor;
/**
* \brief Simple default constructor which randomizes the vertex
* data
*/
vertex_data() : nupdates(0), residual(1) { randomize(); }
/** \brief Randomizes the latent factor */
void randomize() { factor.resize(NLATENT); factor.setRandom(); }
/** \brief Save the vertex data to a binary archive */
//void save(graphlab::oarchive& arc) const {
// arc << nupdates << residual << factor;
//}
/** \brief Load the vertex data from a binary archive */
//void load(graphlab::iarchive& arc) {
// arc >> nupdates >> residual >> factor;
//}
}; // end of vertex data
/**
* \brief The edge data stores the entry in the matrix.
*
* In addition the edge data also stores the most recent error estimate.
*/
struct edge_data : public graphlab::IS_POD_TYPE {
/**
* \brief The type of data on the edge;
*
* \li *Train:* the observed value is correct and used in training
* \li *Validate:* the observed value is correct but not used in training
* \li *Predict:* The observed value is not correct and should not be
* used in training.
*/
enum data_role_type { TRAIN, VALIDATE, PREDICT };
/** \brief the observed value for the edge */
float obs;
/** \brief The train/validation/test designation of the edge */
data_role_type role;
/** \brief basic initialization */
edge_data(float obs = 0, data_role_type role = PREDICT) :
obs(obs), role(role) { }
}; // end of edge data
/**
* \brief The graph type is defined in terms of the vertex and edge
* data.
*/
typedef graphlab::distributed_graph<vertex_data, edge_data> graph_type;
/**
* \brief Given a vertex and an edge return the other vertex in the
* edge.
*/
inline graph_type::vertex_type
get_other_vertex(graph_type::edge_type& edge,
const graph_type::vertex_type& vertex) {
return vertex.id() == edge.source().id()? edge.target() : edge.source();
}; // end of get_other_vertex
/**
* \brief Given an edge compute the error associated with that edge
*/
double extract_l2_error(const graph_type::edge_type & edge) {
const double pred =
edge.source().data().factor.dot(edge.target().data().factor);
return (edge.data().obs - pred) * (edge.data().obs - pred);
} // end of extract_l2_error
/**
* \brief The graph loader function is a line parser used for
* distributed graph construction.
*/
// Commented out for graphchi
/*
inline bool graph_loader(graph_type& graph,
const std::string& filename,
const std::string& line) {
ASSERT_FALSE(line.empty());
// Determine the role of the data
edge_data::data_role_type role = edge_data::TRAIN;
if(boost::ends_with(filename,".validate")) role = edge_data::VALIDATE;
else if(boost::ends_with(filename, ".predict")) role = edge_data::PREDICT;
// Parse the line
std::stringstream strm(line);
graph_type::vertex_id_type source_id(-1), target_id(-1);
float obs(0);
strm >> source_id >> target_id;
if(role == edge_data::TRAIN || role == edge_data::VALIDATE) strm >> obs;
// Create an edge and add it to the graph
graph.add_edge(source_id, target_id+1000000, edge_data(obs, role));
return true; // successful load
} // end of graph_loader
*/
/**
* \brief The gather type used to construct XtX and Xty needed for the ALS
* update
*
* To compute the ALS update we need to compute the sum of
* \code
* sum: XtX = nbr.factor.transpose() * nbr.factor
* sum: Xy = nbr.factor * edge.obs
* \endcode
* For each of the neighbors of a vertex.
*
* To do this in the Gather-Apply-Scatter model the gather function
* computes and returns a pair consisting of XtX and Xy which are then
* added. The gather type represents that tuple and provides the
* necessary gather_type::operator+= operation.
*
*/
class gather_type {
public:
/**
* \brief Stores the current sum of nbr.factor.transpose() *
* nbr.factor
*/
mat_type XtX;
/**
* \brief Stores the current sum of nbr.factor * edge.obs
*/
vec_type Xy;
/** \brief basic default constructor */
gather_type() { }
/**
* \brief This constructor computes XtX and Xy and stores the result
* in XtX and Xy
*/
gather_type(const vec_type& X, const double y) :
XtX(X.size(), X.size()), Xy(X.size()) {
XtX.triangularView<Eigen::Upper>() = X * X.transpose();
Xy = X * y;
} // end of constructor for gather type
/** \brief Save the values to a binary archive */
// void save(graphlab::oarchive& arc) const { arc << XtX << Xy; }
/** \brief Read the values from a binary archive */
// void load(graphlab::iarchive& arc) { arc >> XtX >> Xy; }
/**
* \brief Computes XtX += other.XtX and Xy += other.Xy updating this
* tuples value
*/
gather_type& operator+=(const gather_type& other) {
if(other.Xy.size() == 0) {
ASSERT_EQ(other.XtX.rows(), 0);
ASSERT_EQ(other.XtX.cols(), 0);
} else {
if(Xy.size() == 0) {
ASSERT_EQ(XtX.rows(), 0);
ASSERT_EQ(XtX.cols(), 0);
XtX = other.XtX; Xy = other.Xy;
} else {
XtX.triangularView<Eigen::Upper>() += other.XtX;
Xy += other.Xy;
}
}
return *this;
} // end of operator+=
}; // end of gather type
/**
* ALS vertex program type
*/
class als_vertex_program :
public graphlab::ivertex_program<graph_type, gather_type,
graphlab::messages::sum_priority>,
public graphlab::IS_POD_TYPE {
public:
/** The convergence tolerance */
static double TOLERANCE;
static double LAMBDA;
static size_t MAX_UPDATES;
/** The set of edges to gather along */
edge_dir_type gather_edges(icontext_type& context,
const vertex_type& vertex) const {
return graphlab::ALL_EDGES;
}; // end of gather_edges
/** The gather function computes XtX and Xy */
gather_type gather(icontext_type& context, const vertex_type& vertex,
edge_type& edge) const {
if(edge.data().role == edge_data::TRAIN) {
const vertex_type other_vertex = get_other_vertex(edge, vertex);
return gather_type(other_vertex.data().factor, edge.data().obs);
} else return gather_type();
} // end of gather function
/** apply collects the sum of XtX and Xy */
void apply(icontext_type& context, vertex_type& vertex,
const gather_type& sum) {
// Get and reset the vertex data
vertex_data& vdata = vertex.data();
// Determine the number of neighbors. Each vertex has only in or
// out edges depending on which side of the graph it is located
if(sum.Xy.size() == 0) { vdata.residual = 0; ++vdata.nupdates; return; }
mat_type XtX = sum.XtX;
vec_type Xy = sum.Xy;
// Add regularization
for(int i = 0; i < XtX.rows(); ++i) XtX(i,i) += LAMBDA; // /nneighbors;
// Solve the least squares problem using eigen ----------------------------
const vec_type old_factor = vdata.factor;
vdata.factor = XtX.selfadjointView<Eigen::Upper>().ldlt().solve(Xy);
// Compute the residual change in the factor factor -----------------------
vdata.residual = (vdata.factor - old_factor).cwiseAbs().sum() / XtX.rows();
++vdata.nupdates;
} // end of apply
/** The edges to scatter along */
edge_dir_type scatter_edges(icontext_type& context,
const vertex_type& vertex) const {
return graphlab::ALL_EDGES;
}; // end of scatter edges
/** Scatter reschedules neighbors */
void scatter(icontext_type& context, const vertex_type& vertex,
edge_type& edge) const {
/* edge_data& edata = edge.data();
if(edata.role == edge_data::TRAIN) {
const vertex_type other_vertex = get_other_vertex(edge, vertex);
const vertex_data& vdata = vertex.data();
const vertex_data& other_vdata = other_vertex.data();
const double pred = vdata.factor.dot(other_vdata.factor);
const float error = std::fabs(edata.obs - pred);
const double priority = (error * vdata.residual);
// Reschedule neighbors ------------------------------------------------
if( priority > TOLERANCE && other_vdata.nupdates < MAX_UPDATES)
context.signal(other_vertex, priority);
}*/
} // end of scatter function
/**
* \brief Signal all vertices on one side of the bipartite graph
*/
static graphlab::empty signal_left(icontext_type& context,
vertex_type& vertex) {
if(vertex.num_out_edges() > 0) context.signal(vertex);
return graphlab::empty();
} // end of signal_left
}; // end of als vertex program
struct error_aggregator : public graphlab::IS_POD_TYPE {
typedef als_vertex_program::icontext_type icontext_type;
typedef graph_type::edge_type edge_type;
double train_error, validation_error;
size_t ntrain, nvalidation;
error_aggregator() :
train_error(0), validation_error(0), ntrain(0), nvalidation(0) { }
error_aggregator& operator+=(const error_aggregator& other) {
train_error += other.train_error;
validation_error += other.validation_error;
ntrain += other.ntrain;
nvalidation += other.nvalidation;
return *this;
}
static error_aggregator map(icontext_type& context, const graph_type::edge_type& edge) {
error_aggregator agg;
if(edge.data().role == edge_data::TRAIN) {
agg.train_error = extract_l2_error(edge); agg.ntrain = 1;
} else if(edge.data().role == edge_data::VALIDATE) {
agg.validation_error = extract_l2_error(edge); agg.nvalidation = 1;
}
return agg;
}
static void finalize(icontext_type& context, error_aggregator& agg) {
ASSERT_GT(agg.ntrain, 0);
agg.train_error = std::sqrt(agg.train_error / agg.ntrain);
context.cout() << context.elapsed_seconds() << "\t" << agg.train_error;
if(agg.nvalidation > 0) {
const double validation_error =
std::sqrt(agg.validation_error / agg.nvalidation);
context.cout() << "\t" << validation_error;
}
context.cout() << std::endl;
}
}; // end of error aggregator
struct prediction_saver {
typedef graph_type::vertex_type vertex_type;
typedef graph_type::edge_type edge_type;
std::string save_vertex(const vertex_type& vertex) const {
return ""; //nop
}
std::string save_edge(const edge_type& edge) const {
std::stringstream strm;
const double prediction =
edge.source().data().factor.dot(edge.target().data().factor);
strm << edge.source().id() << '\t'
<< edge.target().id() << '\t'
<< prediction << '\n';
return strm.str();
}
}; // end of prediction_saver
#endif
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Matrix factorization with the Alternative Least Squares (ALS) algorithm.
* This code is based on GraphLab's implementation of ALS by Joey Gonzalez
* and Danny Bickson (CMU). A good explanation of the algorithm is
* given in the following paper:
* Large-Scale Parallel Collaborative Filtering for the Netflix Prize
* Yunhong Zhou, Dennis Wilkinson, Robert Schreiber and Rong Pan
* http://www.springerlink.com/content/j1076u0h14586183/
*
* Faster version of ALS, which stores latent factors of vertices in-memory.
* Thus, this version requires more memory. See the version "als_edgefactors"
* for a low-memory implementation.
*
*
* In the code, we use movie-rating terminology for clarity. This code has been
* tested with the Netflix movie rating challenge, where the task is to predict
* how user rates movies in range from 1 to 5.
*
* This code is has integrated preprocessing, 'sharding', so it is not necessary
* to run sharder prior to running the matrix factorization algorithm. Input
* data must be provided in the Matrix Market format (http://math.nist.gov/MatrixMarket/formats.html).
*
* ALS uses free linear algebra library 'Eigen'. See Readme_Eigen.txt for instructions
* how to obtain it.
*
* At the end of the processing, the two latent factor matrices are written into files in
* the matrix market format.
*
* @section USAGE
*
* bin/example_apps/matrix_factorization/als_edgefactors file <matrix-market-input> niters 5
*
*
*/
#define GRAPHCHI_DISABLE_COMPRESSION
#include <string>
#include <algorithm>
#include "graphchi_basic_includes.hpp"
/* ALS-related classes are contained in als.hpp */
#include "als.hpp"
using namespace graphchi;
/**
* Type definitions. Remember to create suitable graph shards using the
* Sharder-program.
*/
typedef latentvec_t VertexDataType;
typedef float EdgeDataType; // Edges store the "rating" of user->movie pair
/**
* GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type>
* class. The main logic is usually in the update function.
*/
struct ALSVerticesInMemProgram : public GraphChiProgram<VertexDataType, EdgeDataType> {
mutex lock;
std::vector<latentvec_t> latent_factors_inmem;
// Helper
virtual void set_latent_factor(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, latentvec_t &fact) {
vertex.set_data(fact); // Note, also stored on disk. This is non-optimal...
latent_factors_inmem[vertex.id()] = fact;
}
/**
* Called before an iteration starts.
*/
void before_iteration(int iteration, graphchi_context &gcontext) {
if (iteration == 0) {
latent_factors_inmem.resize(gcontext.nvertices); // Initialize in-memory vertices.
}
}
/**
* Vertex update function.
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) {
if (gcontext.iteration == 0) {
/* On first iteration, initialize vertex (and its edges). This is usually required, because
on each run, GraphChi will modify the data files. To start from scratch, it is easiest
do initialize the program in code. Alternatively, you can keep a copy of initial data files. */
latentvec_t latentfac;
latentfac.init();
set_latent_factor(vertex, latentfac);
} else {
mat XtX(NLATENT, NLATENT);
XtX.setZero();
vec Xty(NLATENT);
Xty.setZero();
// Compute XtX and Xty (NOTE: unweighted)
for(int e=0; e < vertex.num_edges(); e++) {
float observation = vertex.edge(e)->get_data();
latentvec_t & nbr_latent = latent_factors_inmem[vertex.edge(e)->vertex_id()];
for(int i=0; i<NLATENT; i++) {
Xty(i) += nbr_latent[i] * observation;
for(int j=i; j < NLATENT; j++) {
XtX(j,i) += nbr_latent[i] * nbr_latent[j];
}
}
}
// Symmetrize
for(int i=0; i <NLATENT; i++)
for(int j=i + 1; j< NLATENT; j++) XtX(i,j) = XtX(j,i);
// Diagonal
for(int i=0; i < NLATENT; i++) XtX(i,i) += (LAMBDA) * vertex.num_edges();
// Solve the least squares problem with eigen using Cholesky decomposition
vec veclatent = XtX.ldlt().solve(Xty);
// Convert to plain doubles (this is useful because now the output data by GraphCHI
// is plain binary double matrix that can be read, for example, by Matlab).
latentvec_t newlatent;
for(int i=0; i < NLATENT; i++) newlatent[i] = veclatent[i];
double sqerror = 0;
bool compute_rmse = (gcontext.iteration == gcontext.num_iterations-1 && vertex.num_outedges() == 0);
if (compute_rmse) { // Compute RMSE only on "right side" of bipartite graph
for(int e=0; e < vertex.num_edges(); e++) {
// Compute RMSE
float observation = vertex.edge(e)->get_data();
latentvec_t & nbr_latent = latent_factors_inmem[vertex.edge(e)->vertex_id()];
double prediction = nbr_latent.dot(newlatent);
sqerror += (prediction - observation) * (prediction - observation);
}
rmselock.lock();
rmse += sqerror;
rmselock.unlock();
if (vertex.id() % 5000 == 1) {
logstream(LOG_DEBUG) << "Computed RMSE for : " << vertex.id() << std::endl;
}
}
set_latent_factor(vertex, newlatent);
if (vertex.id() % 100000 == 1) {
std::cout << gcontext.iteration << ": " << vertex.id() << std::endl;
}
}
/* Hack: we need to count ourselves the number of vertices on left
and right side of the bipartite graph.
TODO: maybe there should be specialized support for bipartite graphs in GraphChi?
*/
if (vertex.num_outedges() > 0) {
// Left side on the bipartite graph
if (vertex.id() > max_left_vertex) {
lock.lock();
max_left_vertex = std::max(vertex.id(), max_left_vertex);
lock.unlock();
}
} else {
if (vertex.id() > max_right_vertex) {
lock.lock();
max_right_vertex = std::max(vertex.id(), max_right_vertex);
lock.unlock();
}
}
}
/**
* Called after an iteration has finished.
*/
void after_iteration(int iteration, graphchi_context &gcontext) {
}
/**
* Called before an execution interval is started.
*/
void before_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) {
}
/**
* Called after an execution interval has finished.
*/
void after_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) {
}
};
int main(int argc, const char ** argv) {
/* GraphChi initialization will read the command line
arguments and the configuration file. */
graphchi_init(argc, argv);
/* Metrics object for keeping track of performance counters
and other information. Currently required. */
metrics m("als-inmemory-factors");
/* Basic arguments for application. NOTE: File will be automatically 'sharded'. */
std::string filename = get_option_string("file"); // Base filename
int niters = get_option_int("niters", 6); // Number of iterations
bool scheduler = false; // Selective scheduling not supported for now.
/* Preprocess data if needed, or discover preprocess files */
int nshards = convert_matrixmarket_for_ALS<float>(filename);
/* Run */
ALSVerticesInMemProgram program;
graphchi_engine<VertexDataType, EdgeDataType> engine(filename, nshards, scheduler, m);
engine.set_modifies_inedges(false);
engine.set_modifies_outedges(false);
engine.set_enable_deterministic_parallelism(false);
engine.run(program, niters);
/* Report result (train RMSE) */
double trainRMSE = sqrt(rmse / (1.0 * engine.num_edges()));
m.set("train_rmse", trainRMSE);
m.set("latent_dimension", NLATENT);
std::cout << "Latent factor dimension: " << NLATENT << " - train RMSE: " << trainRMSE << std::endl;
/* Output latent factor matrices in matrix-market format */
vid_t numvertices = engine.num_vertices();
assert(numvertices == max_right_vertex + 1); // Sanity check
output_als_result(filename, numvertices, max_left_vertex);
/* Report execution metrics */
metrics_report(m);
return 0;
}
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Matrix factorization with the Alternative Least Squares (ALS) algorithm.
* This code is based on GraphLab's implementation of ALS by Joey Gonzalez
* and Danny Bickson (CMU). A good explanation of the algorithm is
* given in the following paper:
* Large-Scale Parallel Collaborative Filtering for the Netflix Prize
* Yunhong Zhou, Dennis Wilkinson, Robert Schreiber and Rong Pan
* http://www.springerlink.com/content/j1076u0h14586183/
*
* There are two versions of the ALS in the example applications. This version
* is slower, but works with very low memory. In this implementation, a vertex
* writes its D-dimensional latent factor to its incident edges. See application
* "als_vertices_inmem" for a faster version, which requires more memory.
*
* In the code, we use movie-rating terminology for clarity. This code has been
* tested with the Netflix movie rating challenge, where the task is to predict
* how user rates movies in range from 1 to 5.
*
* This code is has integrated preprocessing, 'sharding', so it is not necessary
* to run sharder prior to running the matrix factorization algorithm. Input
* data must be provided in the Matrix Market format (http://math.nist.gov/MatrixMarket/formats.html).
*
* ALS uses free linear algebra library 'Eigen'. See Readme_Eigen.txt for instructions
* how to obtain it.
*
* At the end of the processing, the two latent factor matrices are written into files in
* the matrix market format.
*
* @section USAGE
*
* bin/example_apps/matrix_factorization/als_edgefactors file <matrix-market-input> niters 5
*
*
*/
#include <string>
#include <algorithm>
#include "graphchi_basic_includes.hpp"
/* ALS-related classes are contained in als.hpp */
#include "als.hpp"
using namespace graphchi;
/**
* Type definitions. Remember to create suitable graph shards using the
* Sharder-program.
*/
typedef latentvec_t VertexDataType;
typedef als_factor_and_weight EdgeDataType; // Edges store the "rating" of user->movie pair
// and the latent factor of their incident vertex.
/**
* GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type>
* class. The main logic is usually in the update function.
*/
struct ALSEdgeFactorsProgram : public GraphChiProgram<VertexDataType, EdgeDataType> {
mutex lock;
// Helper
virtual void set_latent_factor(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, latentvec_t &fact) {
vertex.set_data(fact);
for(int i=0; i < vertex.num_edges(); i++) {
als_factor_and_weight factwght = vertex.edge(i)->get_data();
factwght.factor = fact;
vertex.edge(i)->set_data(factwght); // Note that neighbors override the values they have written to edges.
// This is ok, because vertices are always executed in same order.
}
}
/**
* Vertex update function.
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) {
if (gcontext.iteration == 0) {
/* On first iteration, initialize vertex (and its edges). This is usually required, because
on each run, GraphChi will modify the data files. To start from scratch, it is easiest
do initialize the program in code. Alternatively, you can keep a copy of initial data files. */
latentvec_t latentfac;
latentfac.init();
set_latent_factor(vertex, latentfac);
} else {
mat XtX(NLATENT, NLATENT);
XtX.setZero();
vec Xty(NLATENT);
Xty.setZero();
// Compute XtX and Xty (NOTE: unweighted)
for(int e=0; e < vertex.num_edges(); e++) {
float observation = vertex.edge(e)->get_data().weight;
latentvec_t nbr_latent = vertex.edge(e)->get_data().factor;
for(int i=0; i<NLATENT; i++) {
Xty(i) += nbr_latent[i] * observation;
for(int j=i; j < NLATENT; j++) {
XtX(j,i) += nbr_latent[i] * nbr_latent[j];
}
}
}
// Symmetrize
for(int i=0; i <NLATENT; i++)
for(int j=i + 1; j< NLATENT; j++) XtX(i,j) = XtX(j,i);
// Diagonal
for(int i=0; i < NLATENT; i++) XtX(i,i) += (LAMBDA) * vertex.num_edges();
// Solve the least squares problem with eigen using Cholesky decomposition
vec veclatent = XtX.ldlt().solve(Xty);
// Convert to plain doubles (this is useful because now the output data by GraphCHI
// is plain binary double matrix that can be read, for example, by Matlab).
latentvec_t newlatent;
for(int i=0; i < NLATENT; i++) newlatent[i] = veclatent[i];
double sqerror = 0;
bool compute_rmse = (gcontext.iteration == gcontext.num_iterations-1 && vertex.num_outedges() == 0);
if (compute_rmse) { // Compute RMSE only on "right side" of bipartite graph
for(int e=0; e < vertex.num_edges(); e++) {
// Compute RMSE
float observation = vertex.edge(e)->get_data().weight;
latentvec_t nbr_latent = vertex.edge(e)->get_data().factor;
double prediction = nbr_latent.dot(newlatent);
sqerror += (prediction - observation) * (prediction - observation);
}
rmselock.lock();
rmse += sqerror;
rmselock.unlock();
if (vertex.id() % 5000 == 1) {
logstream(LOG_DEBUG) << "Computed RMSE for : " << vertex.id() << std::endl;
}
}
set_latent_factor(vertex, newlatent);
if (vertex.id() % 100000 == 1) {
std::cout << gcontext.iteration << ": " << vertex.id() << std::endl;
}
}
/* Hack: we need to count ourselves the number of vertices on left
and right side of the bipartite graph.
TODO: maybe there should be specialized support for bipartite graphs in GraphChi?
*/
if (vertex.num_outedges() > 0) {
// Left side on the bipartite graph
if (vertex.id() > max_left_vertex) {
lock.lock();
max_left_vertex = std::max(vertex.id(), max_left_vertex);
lock.unlock();
}
} else {
if (vertex.id() > max_right_vertex) {
lock.lock();
max_right_vertex = std::max(vertex.id(), max_right_vertex);
lock.unlock();
}
}
}
/**
* Called before an iteration starts.
*/
void before_iteration(int iteration, graphchi_context &gcontext) {
}
/**
* Called after an iteration has finished.
*/
void after_iteration(int iteration, graphchi_context &gcontext) {
}
/**
* Called before an execution interval is started.
*/
void before_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) {
}
/**
* Called after an execution interval has finished.
*/
void after_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) {
}
};
int main(int argc, const char ** argv) {
/* GraphChi initialization will read the command line
arguments and the configuration file. */
graphchi_init(argc, argv);
/* Metrics object for keeping track of performance counters
and other information. Currently required. */
metrics m("als-edgefactors");
/* Basic arguments for application. NOTE: File will be automatically 'sharded'. */
std::string filename = get_option_string("file"); // Base filename
int niters = get_option_int("niters", 6); // Number of iterations
bool scheduler = false; // Selective scheduling not supported for now.
/* Preprocess data if needed, or discover preprocess files */
int nshards = convert_matrixmarket_for_ALS<als_factor_and_weight>(filename);
/* Run */
ALSEdgeFactorsProgram program;
graphchi_engine<VertexDataType, EdgeDataType> engine(filename, nshards, scheduler, m);
engine.set_enable_deterministic_parallelism(false);
engine.run(program, niters);
/* Report result (train RMSE) */
double trainRMSE = sqrt(rmse / (1.0 * engine.num_edges()));
m.set("train_rmse", trainRMSE);
m.set("latent_dimension", NLATENT);
std::cout << "Latent factor dimension: " << NLATENT << " - train RMSE: " << trainRMSE << std::endl;
/* Output latent factor matrices in matrix-market format */
vid_t numvertices = engine.num_vertices();
assert(numvertices == max_right_vertex + 1); // Sanity check
output_als_result(filename, numvertices, max_left_vertex);
/* Report execution metrics */
metrics_report(m);
return 0;
}
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Common code for ALS implementations.
*/
#ifndef DEF_ALSHPP
#define DEF_ALSHPP
#include <assert.h>
#include <cmath>
#include <errno.h>
#include <string>
#include <stdint.h>
#include "matrixmarket/mmio.h"
#include "matrixmarket/mmio.c"
#include "api/chifilenames.hpp"
#include "api/vertex_aggregator.hpp"
#include "preprocessing/sharder.hpp"
// See note above about Eigen
#include "Eigen/Dense"
#define EIGEN_YES_I_KNOW_SPARSE_MODULE_IS_NOT_STABLE_YET
#include "Eigen/Sparse"
#include "Eigen/Cholesky"
#include "Eigen/Eigenvalues"
#include "Eigen/SVD"
using namespace Eigen;
typedef MatrixXd mat;
typedef VectorXd vec;
typedef VectorXi ivec;
typedef MatrixXi imat;
typedef SparseVector<double> sparse_vec;
using namespace graphchi;
#ifndef NLATENT
#define NLATENT 5 // Dimension of the latent factors. You can specify this in compile time as well (in make).
#endif
double LAMBDA = 0.065;
/// RMSE computation
double rmse=0.0;
mutex rmselock;
// Hackish: we need to count the number of left
// and right vertices in the bipartite graph ourselves.
vid_t max_left_vertex =0 ;
vid_t max_right_vertex = 0;
struct latentvec_t {
double d[NLATENT];
latentvec_t() {
}
void init() {
for(int k=0; k < NLATENT; k++) d[k] = 0.001 * (std::rand() % 1000);
}
double & operator[] (int idx) {
return d[idx];
}
bool operator!=(const latentvec_t &oth) const {
for(int i=0; i<NLATENT; i++) { if (d[i] != oth.d[i]) return true; }
return false;
}
double dot(latentvec_t &oth) const {
double x=0;
for(int i=0; i<NLATENT; i++) x+= oth.d[i]*d[i];
return x;
}
};
struct als_factor_and_weight {
latentvec_t factor;
float weight;
als_factor_and_weight() {}
als_factor_and_weight(float obs) {
weight = obs;
factor.init();
}
};
/**
* Create a bipartite graph from a matrix. Each row corresponds to vertex
* with the same id as the row number (0-based), but vertices correponsing to columns
* have id + num-rows.
*/
template <typename als_edge_type>
int convert_matrixmarket_for_ALS(std::string base_filename) {
// Note, code based on: http://math.nist.gov/MatrixMarket/mmio/c/example_read.c
int ret_code;
MM_typecode matcode;
FILE *f;
uint M, N;
size_t nz;
/**
* Create sharder object
*/
int nshards;
if ((nshards = find_shards<als_edge_type>(base_filename, get_option_string("nshards", "auto")))) {
if (check_origfile_modification_earlier<als_edge_type>(base_filename, nshards)) {
logstream(LOG_INFO) << "File " << base_filename << " was already preprocessed, won't do it again. " << std::endl;
logstream(LOG_INFO) << "If this is not intended, please delete the shard files and try again. " << std::endl;
return nshards;
}
}
sharder<als_edge_type> sharderobj(base_filename);
sharderobj.start_preprocessing();
if ((f = fopen(base_filename.c_str(), "r")) == NULL) {
logstream(LOG_ERROR) << "Could not open file: " << base_filename << ", error: " << strerror(errno) << std::endl;
exit(1);
}
if (mm_read_banner(f, &matcode) != 0)
{
logstream(LOG_ERROR) << "Could not process Matrix Market banner. File: " << base_filename << std::endl;
logstream(LOG_ERROR) << "Matrix must be in the Matrix Market format. " << std::endl;
exit(1);
}
/* This is how one can screen matrix types if their application */
/* only supports a subset of the Matrix Market data types. */
if (mm_is_complex(matcode) || !mm_is_sparse(matcode))
{
logstream(LOG_ERROR) << "Sorry, this application does not support complex values and requires a sparse matrix." << std::endl;
logstream(LOG_ERROR) << "Market Market type: " << mm_typecode_to_str(matcode) << std::endl;
exit(1);
}
/* find out size of sparse matrix .... */
if ((ret_code = mm_read_mtx_crd_size(f, &M, &N, &nz)) !=0) {
logstream(LOG_ERROR) << "Failed reading matrix size: error=" << ret_code << std::endl;
exit(1);
}
logstream(LOG_INFO) << "Starting to read matrix-market input. Matrix dimensions: "
<< M << " x " << N << ", non-zeros: " << nz << std::endl;
if (M < 5 || N < 5 || nz < 10) {
logstream(LOG_ERROR) << "File is suspiciously small. Something wrong? File: " << base_filename << std::endl;
assert(M < 5 || N < 5 || nz < 10);
}
if (!sharderobj.preprocessed_file_exists()) {
for (size_t i=0; i<nz; i++)
{
uint I, J;
double val;
int rc = fscanf(f, "%u %u %lg\n", &I, &J, &val);
if (rc != 3)
logstream(LOG_FATAL)<<"Error reading line: " << i << std::endl;
I--; /* adjust from 1-based to 0-based */
J--;
sharderobj.preprocessing_add_edge(I, M + J, als_edge_type((float)val));
}
sharderobj.end_preprocessing();
} else {
logstream(LOG_INFO) << "Matrix already preprocessed, just run sharder." << std::endl;
}
if (f !=stdin) fclose(f);
logstream(LOG_INFO) << "Now creating shards." << std::endl;
// Shard with a specified number of shards, or determine automatically if not defined
nshards = sharderobj.execute_sharding(get_option_string("nshards", "auto"));
return nshards;
}
struct MMOutputter : public VCallback<latentvec_t> {
FILE * outf;
MMOutputter(std::string fname, vid_t nvertices) {
MM_typecode matcode;
mm_initialize_typecode(&matcode);
mm_set_matrix(&matcode);
mm_set_array(&matcode);
mm_set_real(&matcode);
outf = fopen(fname.c_str(), "w");
assert(outf != NULL);
mm_write_banner(outf, matcode);
mm_write_mtx_array_size(outf, nvertices, NLATENT);
}
void callback(vid_t vertex_id, latentvec_t &vec) {
for(int i=0; i < NLATENT; i++) {
fprintf(outf, "%lf\n", vec.d[i]);
}
}
~MMOutputter() {
if (outf != NULL) fclose(outf);
}
};
void output_als_result(std::string filename, vid_t numvertices, vid_t max_left_vertex) {
MMOutputter mmoutput_left(filename + "_U.mm", max_left_vertex + 1);
foreach_vertices<latentvec_t>(filename, 0, max_left_vertex + 1, mmoutput_left);
MMOutputter mmoutput_right(filename + "_V.mm", numvertices - max_left_vertex - 1);
foreach_vertices<latentvec_t>(filename, max_left_vertex + 1, numvertices-1, mmoutput_right);
logstream(LOG_INFO) << "ALS output files (in matrix market format): " << filename + "_U.mm" <<
", " << filename + "_V.mm" << std::endl;
}
#endif
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* A simple community detection algorithm based on label propagation.
* LPA-algorithm is explained in http://arxiv.org/pdf/0910.1154.pdf
* "Advanced modularity-specialized label propagation algorithm for detecting communities in networks
* X. Liu, T. Murata Tokyo Institute of Technology, 2-12-1 Ookayama, Meguro, Tokyo 152-8552, Japan
*
* @section REMARKS
*
* The algorithm is very similar to the connected components algorithm, but instead
* of vertex choosing the minimum label of its neighbor, it chooses the most frequent one.
*
* However, because the operation (most frequent label) is not commutative,
* we need to store both vertices labels in an edge. See comment below, above the
* struct "bidirectional_label".
*
* Note, that this algorithm is not very sophisticated and is prone to local minimas.
* If you want to use this seriously, try with different initial labeling.
* Also, a more sophisticated algorithm called LPAm should be doable on GraphChi.
*
* @author Aapo Kyrola
*/
#include <cmath>
#include <map>
#include <string>
#include "graphchi_basic_includes.hpp"
#include "util/labelanalysis.hpp"
using namespace graphchi;
#define GRAPHCHI_DISABLE_COMPRESSION
/**
* Unlike in connected components, we need
* to ensure that neighbors do not overwrite each
* others values. This is achieved by keeping two values
* in an edge. In this struct, smaller_one is the id of the
* vertex that has smaller id, and larger_one the others.
* This complexity is due to us ignoring the direction of an edge.
*/
struct bidirectional_label {
vid_t smaller_one;
vid_t larger_one;
};
vid_t & neighbor_label(bidirectional_label & bidir, vid_t myid, vid_t nbid) {
if (myid < nbid) {
return bidir.larger_one;
} else {
return bidir.smaller_one;
}
}
vid_t & my_label(bidirectional_label & bidir, vid_t myid, vid_t nbid) {
if (myid < nbid) {
return bidir.smaller_one;
} else {
return bidir.larger_one;
}
}
typedef vid_t VertexDataType; // vid_t is the vertex id type
typedef bidirectional_label EdgeDataType; // Note, 8-byte edge data
/**
* GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type>
* class. The main logic is usually in the update function.
*/
struct CommunityDetectionProgram : public GraphChiProgram<VertexDataType, EdgeDataType> {
/**
* Vertex update function.
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) {
/* This program requires selective scheduling. */
assert(gcontext.scheduler != NULL);
vid_t newlabel;
if (gcontext.iteration == 0) {
/* On first iteration, choose label vertex id */
vid_t firstlabel = vertex.id();
vertex.set_data(firstlabel);
newlabel = firstlabel;
/* Scheduler myself for next iteration */
gcontext.scheduler->add_task(vertex.id());
} else {
if (vertex.num_edges() == 0) return; // trivial
/* The basic idea is to find the label that is most popular among
this vertex's neighbors. This label will be chosen as the new label
of this vertex. */
// This part could be optimized: STL map is quite slow.
std::map<vid_t, int> counts;
int maxcount=0;
vid_t maxlabel=0;
/* Iterate over all the edges */
for(int i=0; i < vertex.num_edges(); i++) {
/* Extract neighbor's current label. The edge contains the labels of
both vertices it connects, so we need to use the right one.
(See comment for bidirectional_label above) */
bidirectional_label edgelabel = vertex.edge(i)->get_data();
vid_t nblabel = neighbor_label(edgelabel, vertex.id(), vertex.edge(i)->vertex_id());
/* Check if this label (nblabel) has been encountered before ... */
std::map<vid_t, int>::iterator existing = counts.find(nblabel);
int newcount = 0;
if(existing == counts.end()) {
/* ... if not, we add this label with count of one to the map */
counts.insert(std::pair<vid_t,int>(nblabel, 1));
newcount = 1;
} else {
/* ... if yes, we increment the counter for this label by 1 */
existing->second++;
newcount = existing->second;
}
/* Finally, we keep track of the most frequent label */
if (newcount > maxcount || (maxcount == newcount && nblabel > maxlabel)) {
maxlabel = nblabel;
maxcount = newcount;
}
}
newlabel = maxlabel;
}
/**
* Write my label to my neighbors.
*/
if (newlabel != vertex.get_data() || gcontext.iteration == 0) {
vertex.set_data(newlabel);
for(int i=0; i<vertex.num_edges(); i++) {
bidirectional_label labels_on_edge = vertex.edge(i)->get_data();
my_label(labels_on_edge, vertex.id(), vertex.edge(i)->vertex_id()) = newlabel;
vertex.edge(i)->set_data(labels_on_edge);
// On first iteration, everyone schedules themselves.
if (gcontext.iteration > 0)
gcontext.scheduler->add_task(vertex.edge(i)->vertex_id());
}
}
}
/**
* Called before an iteration starts.
*/
void before_iteration(int iteration, graphchi_context &info) {
}
/**
* Called after an iteration has finished.
*/
void after_iteration(int iteration, graphchi_context &ginfo) {
}
/**
* Called before an execution interval is started.
*/
void before_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &ginfo) {
}
/**
* Called after an execution interval has finished.
*/
void after_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &ginfo) {
}
};
int main(int argc, const char ** argv) {
/* GraphChi initialization will read the command line
arguments and the configuration file. */
graphchi_init(argc, argv);
/* Metrics object for keeping track of performance counters
and other information. Currently required. */
metrics m("community-detection");
/* Basic arguments for application */
std::string filename = get_option_string("file"); // Base filename
int niters = get_option_int("niters", 10); // Number of iterations (max)
bool scheduler = true; // Always run with scheduler
/* Process input file - if not already preprocessed */
int nshards = convert_if_notexists<EdgeDataType>(filename, get_option_string("nshards", "auto"));
if (get_option_int("onlyresult", 0) == 0) {
/* Run */
CommunityDetectionProgram program;
graphchi_engine<VertexDataType, EdgeDataType> engine(filename, nshards, scheduler, m);
engine.run(program, niters);
}
/* Run analysis of the communities (output is written to a file) */
m.start_time("label-analysis");
analyze_labels<vid_t>(filename);
m.stop_time("label-analysis");
/* Report execution metrics */
metrics_report(m);
return 0;
}
| C++ |
/*
* Copyright (c) 2009 Carnegie Mellon University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an "AS
* IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*
* For more about this software visit:
*
* http://www.graphlab.ml.cmu.edu
*
*/
/**
* \file cgs_lda.cpp
*
* \brief This file contains a GraphLab based implementation of the
* Collapsed Gibbs Sampler (CGS) for the Latent Dirichlet Allocation
* (LDA) model.
*
*
*
* \author Joseph Gonzalez, Diana Hu
*/
#include <vector>
#include <set>
#include <algorithm>
#include "util/atomic.hpp"
#include <boost/math/special_functions/gamma.hpp>
#include <vector>
#include <algorithm>
#include <boost/algorithm/string.hpp>
#include <boost/config/warning_disable.hpp>
#include <boost/spirit/include/qi.hpp>
#include <boost/spirit/include/phoenix_core.hpp>
#include <boost/spirit/include/phoenix_operator.hpp>
#include <boost/spirit/include/phoenix_stl.hpp>
#include <boost/iostreams/stream.hpp>
#include <boost/iostreams/filtering_stream.hpp>
#include <boost/iostreams/input_sequence.hpp>
// Global Types
// ============================================================================
typedef int count_type;
/**
* \brief The factor type is used to store the counts of tokens in
* each topic for words, documents, and assignments.
*
* Atomic counts are used because we violate the abstraction by
* modifying adjacent vertex data on scatter. As a consequence
* multiple threads on the same machine may try to update the same
* vertex data at the same time. The graphlab::atomic type ensures
* that multiple increments are serially consistent.
*/
typedef std::vector< graphchi::atomic<count_type> > factor_type;
/**
* \brief We use the factor type in accumulators and so we define an
* operator+=
*/
inline factor_type& operator+=(factor_type& lvalue,
const factor_type& rvalue) {
if(!rvalue.empty()) {
if(lvalue.empty()) lvalue = rvalue;
else {
for(size_t t = 0; t < lvalue.size(); ++t) lvalue[t] += rvalue[t];
}
}
return lvalue;
} // end of operator +=
/**
* \brief The latent topic id of a token is the smallest reasonable
* type.
*/
typedef uint16_t topic_id_type;
// We require a null topic to represent the topic assignment for
// tokens that have not yet been assigned.
#define NULL_TOPIC (topic_id_type(-1))
#define NTOPICS 20
/**
* \brief The assignment type is used on each edge to store the
* assignments of each token. There can be several occurrences of the
* same word in a given document and so a vector is used to store the
* assignments of each occurrence.
*/
typedef uint16_t assignment_type[NTOPICS];
// Global Variables
// ============================================================================
/**
* \brief The alpha parameter determines the sparsity of topics for
* each document.
*/
double ALPHA = 1;
/**
* \brief the Beta parameter determines the sparsity of words in each
* document.
*/
double BETA = 0.1;
/**
* \brief the total number of topics to uses
*/
/**
* \brief The total number of words in the dataset.
*/
size_t NWORDS = 0;
/**
* \brief The total number of docs in the dataset.
*/
size_t NDOCS = 0;
/**
* \brief The total number of tokens in the corpus
*/
size_t NTOKENS = 0;
/**
* \brief The number of top words to display during execution (from
* each topic).
*/
size_t TOPK = 5;
/**
* \brief The interval to display topics during execution.
*/
size_t INTERVAL = 10;
/**
* \brief The global variable storing the global topic count across
* all machines. This is maintained periodically using aggregation.
*/
factor_type GLOBAL_TOPIC_COUNT;
/**
* \brief A dictionary of words used to print the top words during
* execution.
*/
std::vector<std::string> DICTIONARY;
/**
* \brief The maximum occurences allowed for an individual term-doc
* pair. (edge data)
*/
size_t MAX_COUNT = 100;
/**
* \brief The time to run until the first sample is taken. If less
* than zero then the sampler will run indefinitely.
*/
float BURNIN = -1;
// Graph Types
// ============================================================================
/**
* \brief The vertex data represents each term and document in the
* corpus and contains the counts of tokens in each topic.
*/
struct vertex_data {
///! The total number of updates
uint32_t nupdates;
///! The total number of changes to adjacent tokens
uint32_t nchanges;
///! The count of tokens in each topic
factor_type factor;
vertex_data() : nupdates(0), nchanges(0), factor(NTOPICS) { }
}; // end of vertex_data
/**
* \brief The edge data represents the individual tokens (word,doc)
* pairs and their assignment to topics.
*/
struct edge_data {
///! The number of changes on the last update
uint16_t nchanges;
///! The assignment of all tokens
assignment_type assignment;
edge_data(size_t ntokens = 0) : nchanges(0) {
for(int i=0; i<NTOPICS; i++) assignment[i] = 0;
}
}; // end of edge_data
typedef graphlab::distributed_graph<vertex_data, edge_data> graph_type;
static void parse(edge_data &x, const char * s) {
size_t count = atol(s);
count = std::min(count, MAX_COUNT);
x = (edge_data(count));
}
/**
* \brief Edge data parser used in graph.load_json
*
* Make sure that the edge file list
* has docids from -2 to -(total #docid) and wordids 0 to (total #words -1)
*/
bool eparser(edge_data& ed, const std::string& line){
const int BASE = 10;
char* next_char_ptr = NULL;
size_t count = strtoul(line.c_str(), &next_char_ptr, BASE);
if(next_char_ptr ==NULL) return false;
//threshold count
count = std::min(count, MAX_COUNT);
ed = (edge_data(count));
return true;
}
/**
* \brief Vertex data parser used in graph.load_json
*/
bool vparser(vertex_data& vd, const std::string& line){
vd = vertex_data();
return true;
}
/**
* \brief Determine if the given vertex is a word vertex or a doc
* vertex.
*
* For simplicity we connect docs --> words and therefore if a vertex
* has in edges then it is a word.
*/
inline bool is_word(const graph_type::vertex_type& vertex) {
return vertex.num_in_edges() > 0 ? 1 : 0;
}
/**
* \brief Determine if the given vertex is a doc vertex
*
* For simplicity we connect docs --> words and therefore if a vertex
* has out edges then it is a doc
*/
inline bool is_doc(const graph_type::vertex_type& vertex) {
return vertex.num_out_edges() > 0 ? 1 : 0;
}
/**
* \brief return the number of tokens on a particular edge.
*/
inline size_t count_tokens(const graph_type::edge_type& edge) {
return edge.data().assignment.size();
}
/**
* \brief Get the other vertex in the edge.
*/
inline graph_type::vertex_type
get_other_vertex(const graph_type::edge_type& edge,
const graph_type::vertex_type& vertex) {
return vertex.id() == edge.source().id()? edge.target() : edge.source();
}
// ========================================================
// The Collapsed Gibbs Sampler Function
/**
* \brief The gather type for the collapsed Gibbs sampler is used to
* collect the topic counts on adjacent edges so that the apply
* function can compute the correct topic counts for the center
* vertex.
*
*/
struct gather_type {
factor_type factor;
uint32_t nchanges;
gather_type() : nchanges(0) { };
gather_type(uint32_t nchanges) : factor(NTOPICS), nchanges(nchanges) { };
gather_type& operator+=(const gather_type& other) {
factor += other.factor;
nchanges += other.nchanges;
return *this;
}
}; // end of gather type
/**
* \brief The collapsed Gibbs sampler vertex program updates the topic
* counts for the center vertex and then draws new topic assignments
* for each edge durring the scatter phase.
*
*/
class cgs_lda_vertex_program :
public graphlab::ivertex_program<graph_type, gather_type> {
public:
/**
* \brief At termination we want to disable sampling to allow the
* correct final counts to be computed.
*/
static bool DISABLE_SAMPLING;
/** \brief gather on all edges */
edge_dir_type gather_edges(icontext_type& context,
const vertex_type& vertex) const {
return graphlab::ALL_EDGES;
} // end of gather_edges
/**
* \brief Collect the current topic count on each edge.
*/
gather_type gather(icontext_type& context, const vertex_type& vertex,
edge_type& edge) const {
gather_type ret(edge.data().nchanges);
const assignment_type& assignment = edge.data().assignment;
foreach(topic_id_type asg, assignment) {
if(asg != NULL_TOPIC) ++ret.factor[asg];
}
return ret;
} // end of gather
/**
* \brief Update the topic count for the center vertex. This
* ensures that the center vertex has the correct topic count before
* resampling the topics for each token along each edge.
*/
void apply(icontext_type& context, vertex_type& vertex,
const gather_type& sum) {
const size_t num_neighbors = vertex.num_in_edges() + vertex.num_out_edges();
ASSERT_GT(num_neighbors, 0);
// There should be no new edge data since the vertex program has been cleared
vertex_data& vdata = vertex.data();
ASSERT_EQ(sum.factor.size(), NTOPICS);
ASSERT_EQ(vdata.factor.size(), NTOPICS);
vdata.nupdates++;
vdata.nchanges = sum.nchanges;
vdata.factor = sum.factor;
} // end of apply
/**
* \brief Scatter on all edges if the computation is on-going.
* Computation stops after bunrin or when disable sampling is set to
* true.
*/
edge_dir_type scatter_edges(icontext_type& context,
const vertex_type& vertex) const {
return (DISABLE_SAMPLING || (BURNIN > 0 && context.elapsed_seconds() > BURNIN))?
graphlab::NO_EDGES : graphlab::ALL_EDGES;
}; // end of scatter edges
/**
* \brief Draw new topic assignments for each edge token.
*
* Note that we exploit the GraphLab caching model here by DIRECTLY
* modifying the topic counts of adjacent vertices. Making the
* changes immediately visible to any adjacent vertex programs
* running on the same machine. However, these changes will be
* overwritten during the apply step and are only used to accelerate
* sampling. This is a potentially dangerous violation of the
* abstraction and should be taken with caution. In our case all
* vertex topic counts are preallocated and atomic operations are
* used. In addition during the sampling phase we must be careful
* to guard against potentially negative temporary counts.
*/
void scatter(icontext_type& context, const vertex_type& vertex,
edge_type& edge) const {
factor_type& doc_topic_count = is_doc(edge.source()) ?
edge.source().data().factor : edge.target().data().factor;
factor_type& word_topic_count = is_word(edge.source()) ?
edge.source().data().factor : edge.target().data().factor;
ASSERT_EQ(doc_topic_count.size(), NTOPICS);
ASSERT_EQ(word_topic_count.size(), NTOPICS);
// run the actual gibbs sampling
std::vector<double> prob(NTOPICS);
assignment_type& assignment = edge.data().assignment;
edge.data().nchanges = 0;
foreach(topic_id_type& asg, assignment) {
const topic_id_type old_asg = asg;
if(asg != NULL_TOPIC) { // construct the cavity
--doc_topic_count[asg];
--word_topic_count[asg];
--GLOBAL_TOPIC_COUNT[asg];
}
for(size_t t = 0; t < NTOPICS; ++t) {
const double n_dt =
std::max(count_type(doc_topic_count[t]), count_type(0));
const double n_wt =
std::max(count_type(word_topic_count[t]), count_type(0));
const double n_t =
std::max(count_type(GLOBAL_TOPIC_COUNT[t]), count_type(0));
prob[t] = (ALPHA + n_dt) * (BETA + n_wt) / (BETA * NWORDS + n_t);
}
asg = graphlab::random::multinomial(prob);
// asg = std::max_element(prob.begin(), prob.end()) - prob.begin();
++doc_topic_count[asg];
++word_topic_count[asg];
++GLOBAL_TOPIC_COUNT[asg];
if(asg != old_asg) {
++edge.data().nchanges;
}
} // End of loop over each token
// singla the other vertex
context.signal(get_other_vertex(edge, vertex));
} // end of scatter function
}; // end of cgs_lda_vertex_program
bool cgs_lda_vertex_program::DISABLE_SAMPLING = false;
/**
* \brief The icontext type associated with the cgs_lda_vertex program
* is needed for all aggregators.
*/
typedef cgs_lda_vertex_program::icontext_type icontext_type;
// ========================================================
// Aggregators
/**
* \brief The topk aggregator is used to periodically compute and
* display the topk most common words in each topic.
*
* The number of words is determined by the global variable \ref TOPK
* and the interval is determined by the global variable \ref INTERVAL.
*
*/
class topk_aggregator {
typedef std::pair<float, graphlab::vertex_id_type> cw_pair_type;
private:
std::vector< std::set<cw_pair_type> > top_words;
size_t nchanges, nupdates;
public:
topk_aggregator(size_t nchanges = 0, size_t nupdates = 0) :
nchanges(nchanges), nupdates(nupdates) { }
topk_aggregator& operator+=(const topk_aggregator& other) {
nchanges += other.nchanges;
nupdates += other.nupdates;
if(other.top_words.empty()) return *this;
if(top_words.empty()) top_words.resize(NTOPICS);
for(size_t i = 0; i < top_words.size(); ++i) {
// Merge the topk
top_words[i].insert(other.top_words[i].begin(),
other.top_words[i].end());
// Remove excess elements
while(top_words[i].size() > TOPK)
top_words[i].erase(top_words[i].begin());
}
return *this;
} // end of operator +=
static topk_aggregator map(icontext_type& context,
const graph_type::vertex_type& vertex) {
topk_aggregator ret_value;
const vertex_data& vdata = vertex.data();
ret_value.nchanges = vdata.nchanges;
ret_value.nupdates = vdata.nupdates;
if(is_word(vertex)) {
const graphlab::vertex_id_type wordid = vertex.id();
ret_value.top_words.resize(vdata.factor.size());
for(size_t i = 0; i < vdata.factor.size(); ++i) {
const cw_pair_type pair(vdata.factor[i], wordid);
ret_value.top_words[i].insert(pair);
}
}
return ret_value;
} // end of map function
static void finalize(icontext_type& context,
const topk_aggregator& total) {
if(context.procid() != 0) return;
for(size_t i = 0; i < total.top_words.size(); ++i) {
std::cout << "Topic " << i << ": ";
rev_foreach(cw_pair_type pair, total.top_words[i]) {
std::cout << DICTIONARY[pair.second]
<< "(" << pair.first << ")" << ", ";
}
std::cout << std::endl;
}
std::cout << "\nNumber of token changes: " << total.nchanges << std::endl;
std::cout << "\nNumber of updates: " << total.nupdates << std::endl;
} // end of finalize
}; // end of topk_aggregator struct
/**
* \brief The global counts aggregator computes the total number of
* tokens in each topic across all words and documents and then
* updates the \ref GLOBAL_TOPIC_COUNT variable.
*
*/
struct global_counts_aggregator {
typedef graph_type::vertex_type vertex_type;
static factor_type map(icontext_type& context, const vertex_type& vertex) {
return vertex.data().factor;
} // end of map function
static void finalize(icontext_type& context, const factor_type& total) {
size_t sum = 0;
for(size_t t = 0; t < total.size(); ++t) {
GLOBAL_TOPIC_COUNT[t] =
std::max(count_type(total[t]/2), count_type(0));
sum += GLOBAL_TOPIC_COUNT[t];
}
context.cout() << "Total Tokens: " << sum << std::endl;
} // end of finalize
}; // end of global_counts_aggregator struct
/**
* \brief The Likelihood aggregators maintains the current estimate of
* the log-likelihood of the current token assignments.
*
* llik_words_given_topics = ...
* ntopics * (gammaln(nwords * beta) - nwords * gammaln(beta)) - ...
* sum_t(gammaln( n_t + nwords * beta)) +
* sum_w(sum_t(gammaln(n_wt + beta)));
*
* llik_topics = ...
* ndocs * (gammaln(ntopics * alpha) - ntopics * gammaln(alpha)) + ...
* sum_d(sum_t(gammaln(n_td + alpha)) - gammaln(sum_t(n_td) + ntopics * alpha));
*/
class likelihood_aggregator : public graphlab::IS_POD_TYPE {
typedef graph_type::vertex_type vertex_type;
double lik_words_given_topics;
double lik_topics;
public:
likelihood_aggregator() : lik_words_given_topics(0), lik_topics(0) { }
likelihood_aggregator& operator+=(const likelihood_aggregator& other) {
lik_words_given_topics += other.lik_words_given_topics;
lik_topics += other.lik_topics;
return *this;
} // end of operator +=
static likelihood_aggregator
map(icontext_type& context, const vertex_type& vertex) {
using boost::math::lgamma;
const factor_type& factor = vertex.data().factor;
ASSERT_EQ(factor.size(), NTOPICS);
likelihood_aggregator ret;
if(is_word(vertex)) {
for(size_t t = 0; t < NTOPICS; ++t) {
const double value = std::max(count_type(factor[t]), count_type(0));
ret.lik_words_given_topics += lgamma(value + BETA);
}
} else { ASSERT_TRUE(is_doc(vertex));
double ntokens_in_doc = 0;
for(size_t t = 0; t < NTOPICS; ++t) {
const double value = std::max(count_type(factor[t]), count_type(0));
ret.lik_topics += lgamma(value + ALPHA);
ntokens_in_doc += factor[t];
}
ret.lik_topics -= lgamma(ntokens_in_doc + NTOPICS * ALPHA);
}
return ret;
} // end of map function
static void finalize(icontext_type& context, const likelihood_aggregator& total) {
using boost::math::lgamma;
// Address the global sum terms
double denominator = 0;
for(size_t t = 0; t < NTOPICS; ++t) {
denominator += lgamma(GLOBAL_TOPIC_COUNT[t] + NWORDS * BETA);
} // end of for loop
const double lik_words_given_topics =
NTOPICS * (lgamma(NWORDS * BETA) - NWORDS * lgamma(BETA)) -
denominator + total.lik_words_given_topics;
const double lik_topics =
NDOCS * (lgamma(NTOPICS * ALPHA) - NTOPICS * lgamma(ALPHA)) +
total.lik_topics;
const double lik = lik_words_given_topics + lik_topics;
context.cout() << "Likelihood: " << lik << std::endl;
} // end of finalize
}; // end of likelihood_aggregator struct
/**
* \brief The selective signal functions are used to signal only the
* vertices corresponding to words or documents. This is done by
* using the iengine::map_reduce_vertices function.
*/
struct signal_only {
/**
* \brief Signal only the document vertices and skip the word
* vertices.
*/
static graphlab::empty
docs(icontext_type& context, const graph_type::vertex_type& vertex) {
if(is_doc(vertex)) context.signal(vertex);
return graphlab::empty();
} // end of signal_docs
/**
* \brief Signal only the word vertices and skip the document
* vertices.
*/
static graphlab::empty
words(icontext_type& context, const graph_type::vertex_type& vertex) {
if(is_word(vertex)) context.signal(vertex);
return graphlab::empty();
} // end of signal_words
}; // end of selective_only
/**
* \brief Load the dictionary global variable from the file containing
* the terms (one term per line).
*
* Note that while graphs can be loaded from multiple files the
* dictionary must be in a single file. The dictionary is loaded
* entirely into memory and used to display word clouds and the top
* terms in each topic.
*
* \param [in] fname the file containing the dictionary data. The
* data can be located on HDFS and can also be gzipped (must end in
* ".gz").
*
*/
bool load_dictionary(const std::string& fname) {
// std::cout << "staring load on: "
// << graphlab::get_local_ip_as_str() << std::endl;
const bool gzip = boost::ends_with(fname, ".gz");
// test to see if the graph_dir is an hadoop path
std::cout << "opening: " << fname << std::endl;
std::ifstream in_file(fname.c_str(),
std::ios_base::in | std::ios_base::binary);
boost::iostreams::filtering_stream<boost::iostreams::input> fin;
fin.push(in_file);
if(!fin.good() || !fin.good()) {
logstream(LOG_ERROR) << "Error loading dictionary: "
<< fname << std::endl;
return false;
}
std::string term;
std::cout << "Loooping" << std::endl;
while(std::getline(fin, term).good()) DICTIONARY.push_back(term);
fin.pop();
in_file.close();
// std::cout << "Finished load on: "
// << graphlab::get_local_ip_as_str() << std::endl;
std::cout << "Dictionary Size: " << DICTIONARY.size() << std::endl;
return true;
} // end of load dictionary
struct count_saver {
bool save_words;
count_saver(bool save_words) : save_words(save_words) { }
typedef graph_type::vertex_type vertex_type;
typedef graph_type::edge_type edge_type;
std::string save_vertex(const vertex_type& vertex) const {
// Skip saving vertex data if the vertex type is not consistent
// with the save type
if((save_words && is_doc(vertex)) ||
(!save_words && is_word(vertex))) return "";
// Proceed to save
std::stringstream strm;
if(save_words) {
const graphlab::vertex_id_type vid = vertex.id();
strm << vid << '\t';
} else { // save documents
const graphlab::vertex_id_type vid = (-vertex.id()) - 2;
strm << vid << '\t';
}
const factor_type& factor = vertex.data().factor;
for(size_t i = 0; i < factor.size(); ++i) {
strm << factor[i];
if(i+1 < factor.size()) strm << '\t';
}
strm << '\n';
return strm.str();
}
std::string save_edge(const edge_type& edge) const {
return ""; //nop
}
}; // end of prediction_saver
| C++ |
//
// cgs_lda.cpp
// graphchi_xcode
//
// Created by Aapo Kyrola on 8/8/12.
//
//
#include <string>
#include <algorithm>
#include "graphchi_basic_includes.hpp"
#include "api/graphlab2_1_GAS_api/graphlab.hpp"
#include "cgs_lda_vertexprogram.hpp"
using namespace graphchi;
using namespace graphlab;
int main(int argc, const char ** argv) {
/* GraphChi initialization will read the command line
arguments and the configuration file. */
graphchi_init(argc, argv);
/* Metrics object for keeping track of performance counters
and other information. Currently required. */
metrics m("LDA-graphlab");
/* Basic arguments for application. NOTE: File will be automatically 'sharded'. */
std::string filename = get_option_string("file"); // Base filename
int niters = get_option_int("niters", 4); // Number of iterations
/* Preprocess data if needed, or discover preprocess files */
int nshards = convert_if_notexists<edge_data>(filename, get_option_string("nshards", "auto"));
/* Run */
std::vector<vertex_data> * vertices =
run_graphlab_vertexprogram<cgs_lda_vertex_program>(filename, nshards, niters, false, m, false, false);
/* TODO: write output latent matrices */
delete vertices;
/* Report execution metrics */
metrics_report(m);
return 0;
}
| C++ |
#ifndef __GRAPHCHI_RMSE_ENGINE
#define __GRAPHCHI_RMSE_ENGINE
/**
* @file
* @author Danny Bickson
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* File for aggregating and siplaying error mesasures and algorithm progress
*/
float (*pprediction_func)(const vertex_data&, const vertex_data&, const float, double &, void *) = NULL;
vec validation_rmse_vec;
vec users_vec;
vec sum_ap_vec;
bool user_nodes = true;
int num_threads = 1;
bool converged_engine = false;
int cur_iteration = 0;
/**
* GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type>
* class. The main logic is usually in the update function.
*/
struct ValidationAPProgram : public GraphChiProgram<VertexDataType, EdgeDataType> {
/**
* compute validaton AP for a single user
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) {
if (user_nodes && vertex.id() >= M)
return;
else if (!user_nodes && vertex.id() < M)
return;
vertex_data & vdata = latent_factors_inmem[vertex.id()];
vec ratings = zeros(vertex.num_outedges());
vec real_vals = zeros(vertex.num_outedges());
if (ratings.size() > 0){
users_vec[omp_get_thread_num()]++;
int j=0;
int real_click_count = 0;
for(int e=0; e < vertex.num_outedges(); e++) {
const EdgeDataType & observation = vertex.edge(e)->get_data();
vertex_data & pdata = latent_factors_inmem[vertex.edge(e)->vertex_id()];
double prediction;
(*pprediction_func)(vdata, pdata, observation, prediction, NULL);
ratings[j] = prediction;
real_vals[j] = observation;
if (observation > 0)
real_click_count++;
j++;
}
int count = 0;
double ap = 0;
ivec pos = sort_index(ratings);
for (int j=0; j< std::min(ap_number, (int)ratings.size()); j++){
if (real_vals[pos[ratings.size() - j - 1]] > 0)
ap += (++count * 1.0/(j+1));
}
if (real_click_count > 0 )
ap /= real_click_count;
else ap = 0;
sum_ap_vec[omp_get_thread_num()] += ap;
}
}
void before_iteration(int iteration, graphchi_context & gcontext){
last_validation_rmse = dvalidation_rmse;
users_vec = zeros(num_threads);
sum_ap_vec = zeros(num_threads);
}
/**
* Called after an iteration has finished.
*/
void after_iteration(int iteration, graphchi_context &gcontext) {
assert(Le > 0);
dvalidation_rmse = finalize_rmse(sum(sum_ap_vec) , (double)sum(users_vec));
std::cout<<" Validation " << error_names[loss_type] << ":" << std::setw(10) << dvalidation_rmse << std::endl;
if (halt_on_rmse_increase > 0 && halt_on_rmse_increase < cur_iteration && dvalidation_rmse > last_validation_rmse){
logstream(LOG_WARNING)<<"Stopping engine because of validation " << error_names[loss_type] << " increase" << std::endl;
//gcontext.set_last_iteration(gcontext.iteration);
converged_engine = true;
}
}
};
/**
* GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type>
* class. The main logic is usually in the update function.
*/
struct ValidationRMSEProgram : public GraphChiProgram<VertexDataType, EdgeDataType> {
/**
* compute validaton RMSE for a single user
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) {
if (user_nodes && vertex.id() >= M)
return;
else if (!user_nodes && vertex.id() < M)
return;
vertex_data & vdata = latent_factors_inmem[vertex.id()];
for(int e=0; e < vertex.num_outedges(); e++) {
const EdgeDataType & observation = vertex.edge(e)->get_data();
vertex_data & nbr_latent = latent_factors_inmem[vertex.edge(e)->vertex_id()];
double prediction;
double rmse = (*pprediction_func)(vdata, nbr_latent, observation, prediction, NULL);
assert(rmse <= pow(maxval - minval, 2));
assert(validation_rmse_vec.size() > omp_get_thread_num());
validation_rmse_vec[omp_get_thread_num()] += rmse;
}
}
void before_iteration(int iteration, graphchi_context & gcontext){
last_validation_rmse = dvalidation_rmse;
validation_rmse_vec = zeros(num_threads);
}
/**
* Called after an iteration has finished.
*/
void after_iteration(int iteration, graphchi_context &gcontext) {
assert(Le > 0);
dvalidation_rmse = finalize_rmse(sum(validation_rmse_vec) , (double)Le);
std::cout<<" Validation " << error_names[loss_type] << ":" << std::setw(10) << dvalidation_rmse << std::endl;
if (halt_on_rmse_increase > 0 && halt_on_rmse_increase < cur_iteration && dvalidation_rmse > last_validation_rmse){
logstream(LOG_WARNING)<<"Stopping engine because of validation RMSE increase" << std::endl;
converged_engine = true;
}
}
};
void reset_rmse(int exec_threads){
logstream(LOG_DEBUG)<<"Detected number of threads: " << exec_threads << std::endl;
num_threads = exec_threads;
rmse_vec = zeros(exec_threads);
}
template<typename VertexDataType, typename EdgeDataType>
void init_validation_rmse_engine(graphchi_engine<VertexDataType,EdgeDataType> *& pvalidation_engine, int nshards,float (*prediction_func)(const vertex_data & user, const vertex_data & movie, float rating, double & prediction, void * extra)){
if (nshards == -1)
return;
metrics * m = new metrics("validation_rmse_engine");
graphchi_engine<VertexDataType, EdgeDataType> * engine = new graphchi_engine<VertexDataType, EdgeDataType>(validation, nshards, false, *m);
set_engine_flags(*engine);
pvalidation_engine = engine;
pprediction_func = prediction_func;
}
template<typename VertexDataType, typename EdgeDataType>
void run_validation(graphchi_engine<VertexDataType, EdgeDataType> * pvalidation_engine, graphchi_context & context){
//no validation data, no need to run validation engine calculations
cur_iteration = context.iteration;
if (pvalidation_engine == NULL){
std::cout << std::endl;
return;
}
if (calc_ap){ //AP
ValidationAPProgram program;
pvalidation_engine->run(program, 1);
}
else { //RMSE
ValidationRMSEProgram program;
pvalidation_engine->run(program, 1);
}
if (converged_engine)
context.set_last_iteration(context.iteration);
}
#endif //__GRAPHCHI_RMSE_ENGINE
| C++ |
#ifndef _IMPLICIT_HPP__
#define _IMPLICIT_HPP__
/**
* @file
* @author Danny Bickson
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "eigen_wrapper.hpp"
enum{
IMPLICIT_RATING_DISABLED = 0,
IMPLICIT_RATING_RANDOM = 1
};
double implicitratingweight;
double implicitratingvalue = -1;
double implicitratingpercentage;
int implicitratingtype;
template<typename als_edge_type>
uint add_implicit_edges4(int type, sharder<als_edge_type>& shrd){
switch(type){
case IMPLICIT_RATING_DISABLED: return 0;
case IMPLICIT_RATING_RANDOM: break;
default: assert(false);
};
uint added = 0;
uint toadd = (uint)(implicitratingpercentage*N*M);
logstream(LOG_INFO)<<"Going to add: " << toadd << " implicit edges. " << std::endl;
assert(toadd >= 1);
for (uint j=0; j< toadd; j++){
ivec item = ::randi(1,0,N-1);
ivec user = ::randi(1,0,M-1);
shrd.preprocessing_add_edge(user[0], item[0], als_edge_type(implicitratingvalue, implicitratingweight));
added++;
}
logstream(LOG_INFO)<<"Finished adding " << toadd << " implicit edges. " << std::endl;
return added;
}
template<typename als_edge_type>
uint add_implicit_edges(int type, sharder<als_edge_type>& shrd ){
switch(type){
case IMPLICIT_RATING_DISABLED: return 0;
case IMPLICIT_RATING_RANDOM: break;
default: assert(false);
};
uint added = 0;
uint toadd = (uint)(implicitratingpercentage*N*M);
logstream(LOG_INFO)<<"Going to add: " << toadd << " implicit edges. " << std::endl;
assert(toadd >= 1);
for (uint j=0; j< toadd; j++){
ivec item = ::randi(1,0,N-1);
ivec user = ::randi(1,0,M-1);
shrd.preprocessing_add_edge(user[0], item[0], als_edge_type(implicitratingvalue));
added++;
}
logstream(LOG_INFO)<<"Finished adding " << toadd << " implicit edges. " << std::endl;
return added;
}
void parse_implicit_command_line(){
implicitratingweight = get_option_float("implicitratingweight", implicitratingweight);
implicitratingvalue = get_option_float("implicitratingvalue", implicitratingvalue);
implicitratingtype = get_option_int("implicitratingtype", implicitratingtype);
if (implicitratingtype != IMPLICIT_RATING_RANDOM && implicitratingtype != IMPLICIT_RATING_DISABLED)
logstream(LOG_FATAL)<<"Implicit rating type should be either 0 (IMPLICIT_RATING_DISABLED) or 1 (IMPLICIT_RATING_RANDOM)" << std::endl;
implicitratingpercentage = get_option_float("implicitratingpercentage", implicitratingpercentage);
if (implicitratingpercentage < 1e-8 && implicitratingpercentage > 0.8)
logstream(LOG_FATAL)<<"Implicit rating percentage should be (1e-8, 0.8)" << std::endl;
if (implicitratingtype != IMPLICIT_RATING_DISABLED && implicitratingvalue == 0)
logstream(LOG_FATAL)<<"You are not allowed to use --implicitratingvalue=0. Please select a non zero value, for example -1" << std::endl;
}
#endif //_IMPLICIT_HPP__
| C++ |
/**
* Copyright (c) 2009 Carnegie Mellon University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an "AS
* IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*
* For more about this software visit:
*
* http://graphlab.org
*
*/
/**
* Code by Danny Bickson, CMU
*/
#ifndef EIGEN_WRAPPER
#define EIGEN_WRAPPER
#ifdef EIGEN_NDEBUG
#define NDEBUG
#endif
/**
* SET OF WRAPPER FUNCTIONS FOR EIGEN
*
*
*/
#include <iostream>
#include <fstream>
#include <ostream>
#include "Eigen/Dense"
#define EIGEN_YES_I_KNOW_SPARSE_MODULE_IS_NOT_STABLE_YET
#include "Eigen/Sparse"
#include "Eigen/Cholesky"
#include "Eigen/Eigenvalues"
#include "Eigen/SVD"
#define EIGEN_DONT_PARALLELIZE //eigen parallel for loop interfers with ours.
using namespace Eigen;
typedef MatrixXd mat;
typedef VectorXd vec;
typedef VectorXf fvec;
typedef VectorXi ivec;
typedef MatrixXi imat;
typedef Matrix<size_t, Dynamic, Dynamic> matst;
typedef SparseVector<double> sparse_vec;
inline void debug_print_vec(const char * name,const vec& _vec, int len){
printf("%s ) ", name);
for (int i=0; i< len; i++)
if (_vec[i] == 0)
printf(" 0 ");
else printf("%12.4g ", _vec[i]);
printf("\n");
}
inline void debug_print_vec(const char * name,const double* _vec, int len){
printf("%s ) ", name);
for (int i=0; i< len; i++)
if (_vec[i] == 0)
printf(" 0 ");
else printf("%12.4g ", _vec[i]);
printf("\n");
}
mat randn1(int dx, int dy, int col);
template<typename mat, typename data>
inline void set_val(mat &A, int row, int col, data val){
A(row, col) = val;
}
inline double get_val(const mat &A, int row, int col){
return A(row, col);
}
inline int get_val(const imat &A, int row, int col){
return A(row, col);
}
inline vec get_col(const mat& A, int col){
return A.col(col);
}
inline vec get_row(const mat& A, int row){
return A.row(row);
}
inline void set_col(mat& A, int col, const vec & val){
A.col(col) = val;
}
inline void set_row(mat& A, int row, const vec & val){
A.row(row) = val;
}
inline mat eye(int size){
return mat::Identity(size, size);
}
inline vec ones(int size){
return vec::Ones(size);
}
inline fvec fones(int size){
return fvec::Ones(size);
}
inline vec init_vec(const double * array, int size){
vec ret(size);
memcpy(ret.data(), array, size*sizeof(double));
return ret;
}
inline mat init_mat(const char * string, int row, int col){
mat out(row, col);
char buf[2056];
strcpy(buf, string);
char *pch = strtok(buf," \r\n\t;");
for (int i=0; i< row; i++){
for (int j=0; j< col; j++){
out(i,j) = atof(pch);
pch = strtok (NULL, " \r\n\t;");
}
}
return out;
}
inline imat init_imat(const char * string, int row, int col){
imat out(row, col);
char buf[2056];
strcpy(buf, string);
char *pch = strtok(buf," \r\n\t;");
for (int i=0; i< row; i++){
for (int j=0; j< col; j++){
out(i,j) = atol(pch);
pch = strtok (NULL, " \r\n\t;");
}
}
return out;
}
inline vec init_vec(const char * string, int size){
vec out(size);
char buf[2056];
strcpy(buf, string);
char *pch = strtok (buf," \r\n\t;");
int i=0;
while (pch != NULL)
{
out(i) =atof(pch);
pch = strtok (NULL, " \r\n\t;");
i++;
}
assert(i == size);
return out;
}
inline vec init_dbl_vec(const char * string, int size){
return init_vec(string, size);
}
inline vec zeros(int size){
return vec::Zero(size);
}
inline fvec fzeros(int size){
return fvec::Zero(size);
}
inline mat zeros(int rows, int cols){
return mat::Zero(rows, cols);
}
inline vec head(const vec& v, int num){
return v.head(num);
}
inline vec mid(const vec&v, int start, int num){
return v.segment(start, std::min(num, (int)(v.size()-start)));
}
inline vec tail(const vec&v, int num){
return v.segment(v.size() - num, num);
}
inline ivec head(const ivec& v, int num){
return v.head(num);
}
inline void sort(ivec &a){
std::sort(a.data(), a.data()+a.size());
}
inline void sort(vec & a){
std::sort(a.data(), a.data()+a.size());
}
inline ivec sort_index(const vec&a){
ivec ret(a.size());
std::vector<std::pair<double,int> > D;
//
D.reserve(a.size());
for (int i=0;i<a.size();i++)
D.push_back(std::make_pair<double,int>(a.coeff(i),i));
std::sort(D.begin(),D.end());
for (int i=0;i<a.size();i++)
{
ret[i]=D[i].second;
}
return ret;
}
inline void dot2(const vec& x1, const vec& x3, mat & Q, int j, int len){
for (int i=0; i< len; i++){
Q(i,j) = (x1(i) * x3(i));
}
}
inline bool ls_solve_chol(const mat &A, const vec &b, vec &result){
//result = A.jacobiSvd(ComputeThinU | ComputeThinV).solve(b);
result = A.ldlt().solve(b);
return true;
}
inline bool ls_solve(const mat &A, const vec &b, vec &result){
//result = A.jacobiSvd(ComputeThinU | ComputeThinV).solve(b);
result = A.ldlt().solve(b);
return true;
}
inline bool chol(mat& sigma, mat& out){
out = sigma.llt().matrixLLT();
return true;
}
inline bool backslash(const mat& A, const vec & b, vec & x){
x = A.jacobiSvd(ComputeThinU | ComputeThinV).solve(b);
return true;
}
inline mat transpose(mat & A){
return A.transpose();
}
inline mat randn(int dx, int dy){
return randn1(dx,dy,-1);
}
inline void set_diag(mat &A, vec & v){
A.diagonal()=v;
}
inline mat diag(vec & v){
return v.asDiagonal();
}
template<typename mat>
inline double sumsum(const mat & A){
return A.sum();
}
inline double norm(const mat &A, int pow=2){
return A.squaredNorm();
}
inline mat inv(const mat&A){
return A.inverse();
}
inline bool inv(const mat&A, mat &out){
out = A.inverse();
return true;
}
inline mat outer_product(const vec&a, const vec&b){
return a*b.transpose();
}
//Eigen does not sort eigenvalues, as done in matlab
inline bool eig_sym(const mat & T, vec & eigenvalues, mat & eigenvectors){
//
//Column of the returned matrix is an eigenvector corresponding to eigenvalue number as returned by eigenvalues(). The eigenvectors are normalized to have (Euclidean) norm equal to one.
SelfAdjointEigenSolver<mat> solver(T);
eigenvectors = solver.eigenvectors();
eigenvalues = solver.eigenvalues();
ivec index = sort_index(eigenvalues);
sort(eigenvalues);
vec eigenvalues2 = eigenvalues.reverse();
mat T2 = zeros(eigenvectors.rows(), eigenvectors.cols());
for (int i=0; i< eigenvectors.cols(); i++){
set_col(T2, index[i], get_col(eigenvectors, i));
}
eigenvectors = T2;
eigenvalues = eigenvalues2;
return true;
}
inline vec elem_mult(const vec&a, const vec&b){
vec ret = a;
for (int i=0; i<b.size(); i++)
ret(i) *= b(i);
return ret;
}
inline sparse_vec elem_mult(const sparse_vec&a, const sparse_vec&b){
return a.cwiseProduct(b);
}
inline double sum(const vec & a){
return a.sum();
}
inline double min(const vec &a){
return a.minCoeff();
}
inline double max(const vec & a){
return a.maxCoeff();
}
inline vec randu(int size){
return vec::Random(size);
}
inline fvec frandu(int size){
return fvec::Random(size);
}
inline double randu(){
return vec::Random(1)(0);
}
inline ivec randi(int size, int from, int to){
ivec ret(size);
for (int i=0; i<size; i++)
ret[i]= internal::random<int>(from,to);
return ret;
}
inline int randi(int from, int to){
return internal::random<int>(from,to);
}
inline ivec concat(const ivec&a, const ivec&b){
ivec ret(a.size()+b.size());
ret << a,b;
return ret;
}
inline void del(ivec&a, int i){
memcpy(a.data()+i, a.data() + i+1, (a.size() - i - 1)*sizeof(int));
a.conservativeResize(a.size() - 1); //resize without deleting values!
}
inline mat get_cols(const mat&A, ivec & cols){
mat a(A.rows(), cols.size());
for (int i=0; i< cols.size(); i++)
set_col(a, i, get_col(A, cols[i]));
return a;
}
inline mat get_cols(const mat&A, int start_col, int end_col){
assert(end_col > start_col);
assert(end_col <= A.cols());
assert(start_col >= 0);
mat a(A.rows(), end_col-start_col);
for (int i=0; i< end_col-start_col; i++)
set_col(a, i, get_col(A, i));
return a;
}
inline void set_val(vec & v, int pos, double val){
v(pos) = val;
}
inline void set_val(sparse_vec & v, int pos, double val){
v.coeffRef(pos) = val;
}
inline double dot(const vec&a, const vec& b){
return a.dot(b);
}
inline vec reverse(vec& a){
return a.reverse();
}
inline ivec reverse(ivec& a){
return a.reverse();
}
inline const double * data(const mat &A){
return A.data();
}
inline const int * data(const imat &A){
return A.data();
}
inline const double * data(const vec &v){
return v.data();
}
class it_file{
std::fstream fb;
public:
it_file(const char * name){
fb.open(name, std::fstream::in);
fb.close();
if (fb.fail()){
fb.clear(std::fstream::failbit);
fb.open(name, std::fstream::out | std::fstream::trunc );
}
else {
fb.open(name, std::fstream::in);
}
if (!fb.is_open()){
perror("Failed opening file ");
printf("filename is: %s\n", name);
assert(false);
}
};
std::fstream & operator<<(const std::string str){
int size = str.size();
fb.write((char*)&size, sizeof(int));
assert(!fb.fail());
fb.write(str.c_str(), size);
return fb;
}
std::fstream &operator<<(mat & A){
int rows = A.rows(), cols = A.cols();
fb.write( (const char*)&rows, sizeof(int));
fb.write( (const char *)&cols, sizeof(int));
for (int i=0; i< A.rows(); i++)
for (int j=0; j< A. cols(); j++){
double val = A(i,j);
fb.write( (const char *)&val, sizeof(double));
assert(!fb.fail());
}
return fb;
}
std::fstream &operator<<(const vec & v){
int size = v.size();
fb.write( (const char*)&size, sizeof(int));
assert(!fb.fail());
for (int i=0; i< v.size(); i++){
double val = v(i);
fb.write( (const char *)&val, sizeof(double));
assert(!fb.fail());
}
return fb;
}
std::fstream & operator<<(const double &v){
fb.write((const char*)&v, sizeof(double));
return fb;
}
std::fstream & operator>>(std::string str){
int size = -1;
fb.read((char*)&size, sizeof(int));
if (fb.fail() || fb.eof()){
perror("Failed reading file");
assert(false);
}
char buf[256];
fb.read(buf, std::min(256,size));
assert(!fb.fail());
assert(!strncmp(str.c_str(), buf, std::min(256,size)));
return fb;
}
std::fstream &operator>>(mat & A){
int rows, cols;
fb.read( (char *)&rows, sizeof(int));
assert(!fb.fail());
fb.read( (char *)&cols, sizeof(int));
assert(!fb.fail());
A = mat(rows, cols);
double val;
for (int i=0; i< A.rows(); i++)
for (int j=0; j< A. cols(); j++){
fb.read((char*)&val, sizeof(double));
assert(!fb.fail());
A(i,j) = val;
}
return fb;
}
std::fstream &operator>>(vec & v){
int size;
fb.read((char*)&size, sizeof(int));
assert(!fb.fail());
assert(size >0);
v = vec(size);
double val;
for (int i=0; i< v.size(); i++){
fb.read((char*)& val, sizeof(double));
assert(!fb.fail());
v(i) = val;
}
return fb;
}
std::fstream &operator>>(double &v){
fb.read((char*)&v, sizeof(double));
assert(!fb.fail());
return fb;
}
void close(){
fb.close();
}
};
#define Name(a) std::string(a)
inline void set_size(sparse_vec &v, int size){
//did not find a way to declare vector dimension, yet
}
inline void set_new(sparse_vec&v, int ind, double val){
v.insert(ind) = val;
}
inline int nnz(sparse_vec& v){
return v.nonZeros();
}
inline int get_nz_index(sparse_vec &v, sparse_vec::InnerIterator& i){
return i.index();
}
inline double get_nz_data(sparse_vec &v, sparse_vec::InnerIterator& i){
return i.value();
}
#define FOR_ITERATOR(i,v) \
for (sparse_vec::InnerIterator i(v); i; ++i)
template<typename T>
inline double sum_sqr(const T& a);
template<>
inline double sum_sqr<vec>(const vec & a){
vec ret = a.array().pow(2);
return ret.sum();
}
template<>
inline double sum_sqr<sparse_vec>(const sparse_vec & a){
double sum=0;
FOR_ITERATOR(i,a){
sum+= powf(i.value(),2);
}
return sum;
}
inline double trace(const mat & a){
return a.trace();
}
inline double get_nz_data(sparse_vec &v, int i){
assert(nnz(v) > i);
int cnt=0;
FOR_ITERATOR(j, v){
if (cnt == i){
return j.value();
}
cnt++;
}
return 0.0;
}
inline void print(sparse_vec & vec){
int cnt = 0;
FOR_ITERATOR(i, vec){
std::cout<<get_nz_index(vec, i)<<":"<< get_nz_data(vec, i) << " ";
cnt++;
if (cnt >= 20)
break;
}
std::cout<<std::endl;
}
inline vec pow(const vec&v, int exponent){
vec ret = vec(v.size());
for (int i=0; i< v.size(); i++)
ret[i] = powf(v[i], exponent);
return ret;
}
inline double dot_prod(sparse_vec &v1, sparse_vec & v2){
return v1.dot(v2);
}
inline double dot_prod(const vec &v1, const vec & v2){
return v1.dot(v2);
}
inline double dot3(const vec &v1, const vec & v2, const vec & v3){
double ret = 0;
for (int i=0; i < v1.size(); i++)
ret+= v1[i]*v2[i]*v3[i];
return ret;
}
inline double dot_prod(sparse_vec &v1, const vec & v2){
double sum = 0;
for (int i=0; i< v2.size(); i++){
sum+= v2[i] * v1.coeffRef(i);
}
return sum;
}
inline vec cumsum(vec& v){
vec ret = v;
for (int i=1; i< v.size(); i++)
for (int j=0; j< i; j++)
ret(i) += v(j);
return ret;
}
inline double get_val(sparse_vec & v1, int i){ //TODO optimize performance
for (sparse_vec::InnerIterator it(v1); it; ++it)
if (it.index() == i)
return it.value();
return 0;
}
inline double get_val(vec & v1, int i){
return v1(i);
}
inline void set_div(sparse_vec&v, sparse_vec::InnerIterator i, double val){
v.coeffRef(i.index()) /= val;
}
inline sparse_vec minus(sparse_vec &v1,sparse_vec &v2){
return v1-v2;
}
inline vec minus( sparse_vec &v1, vec &v2){
vec ret = -v2;
FOR_ITERATOR(i, v1){
ret[i.index()] += i.value();
}
return ret;
}
inline void plus( vec &v1, sparse_vec &v2){
FOR_ITERATOR(i, v2){
v1[i.index()] += i.value();
}
}
inline void minus( vec &v1, sparse_vec &v2){
FOR_ITERATOR(i, v2){
v1[i.index()] -= i.value();
}
}
inline sparse_vec fabs( sparse_vec & dvec1){
sparse_vec ret = dvec1;
FOR_ITERATOR(i, ret){
ret.coeffRef(i.index()) = fabs(i.value());
}
return ret;
};
inline vec fabs( const vec & dvec1){
vec ret(dvec1.size());
for (int i=0; i< dvec1.size(); i++){
ret(i) = fabs(dvec1(i));
}
return ret;
};
inline double abs_sum(const mat& A){
double sum =0;
for (int i=0; i< A.rows(); i++)
for (int j=0; j< A.cols(); j++)
sum += fabs(A(i,j));
return sum;
}
inline double abs_sum(const vec &v){
double sum =0;
for (int i=0; i< v.size(); i++)
sum += fabs(v(i));
return sum;
}
inline double sum(const sparse_vec &v){
double sum =0;
FOR_ITERATOR(i, v){
sum += i.value();
}
return sum;
}
inline vec sqrt(const vec & v){
vec ret(v.size());
for (int i=0; i< v.size(); i++){
ret[i] = std::sqrt(v(i));
}
return ret;
}
inline void svd(const mat & A, mat & U, mat & V, vec & singular_values){
Eigen::JacobiSVD<mat> svdEigen(A, Eigen::ComputeFullU | Eigen::ComputeFullV);
U= svdEigen.matrixU();
V= svdEigen.matrixV();
singular_values =svdEigen.singularValues();
}
inline bool pair_compare (std::pair<double,int> &x1, std::pair<double,int> & x2) { return (x1.first>x2.first); }
inline ivec reverse_sort_index2(const vec&a, const ivec&indices, vec & out, int K){
assert(a.size() == indices.size());
assert(K > 0);
int size = std::min((unsigned int)a.size(), (unsigned int)K);
ivec ret(size);
std::vector<std::pair<double,int> > D;
D.reserve(a.size());
for (int i=0;i<a.size();i++)
D.push_back(std::make_pair<double,int>(a[i],indices[i]));
std::partial_sort(D.begin(),D.begin() + size, D.end(), pair_compare);
for (int i=0;i< size;i++)
{
ret[i]=D[i].second;
out[i] = D[i].first;
}
return ret;
}
inline ivec reverse_sort_index(const vec& a, int K){
assert(K > 0);
int size = std::min((unsigned int)a.size(), (unsigned int)K);
ivec ret(size);
std::vector<std::pair<double,int> > D;
D.reserve(a.size());
for (int i=0;i<a.size();i++)
D.push_back(std::make_pair<double,int>(a[i],i));
std::partial_sort(D.begin(),D.begin() + size, D.end(), pair_compare);
for (int i=0;i< size;i++)
{
ret[i]=D[i].second;
}
return ret;
}
inline ivec reverse_sort_index(sparse_vec& a, int K){
assert(K > 0);
int size = std::min((unsigned int)nnz(a), (unsigned int)K);
ivec ret(size);
std::vector<std::pair<double,int> > D;
D.reserve(nnz(a));
FOR_ITERATOR(i, a){
D.push_back(std::make_pair<double,int>(i.value(),i.index()));
}
std::partial_sort(D.begin(),D.begin() + size, D.end(), pair_compare);
for (int i=0;i< size;i++)
{
ret[i]=D[i].second;
}
return ret;
}
//define function to be applied coefficient-wise
double equal_greater(double x){
if (x != 0)
return 1;
else
return 0;
}
//sort(edges.begin(), edges.end());
#undef NDEBUG
#endif
| C++ |
/**
* Copyright (c) 2009 Carnegie Mellon University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an "AS
* IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*
* For more about this software visit:
*
* http://graphchi.org
*
*/
#include "common.hpp"
#include "types.hpp"
#include "eigen_wrapper.hpp"
#include "timer.hpp"
using namespace std;
int input_cols = 3;
/* Metrics object for keeping track of performance counters
and other information. Currently required. */
metrics m("svd-onesided-inmemory-factors");
int nshards;
struct vertex_data {
vec pvec;
double value;
double A_ii;
vertex_data(){ value = 0; A_ii = 1; }
//TODO void add_self_edge(double value) { A_ii = value; }
void set_val(double value, int field_type) {
pvec[field_type] = value;
}
//double get_output(int field_type){ return pred_x; }
}; // end of vertex_data
struct edge_data {
float weight;
edge_data(double weight = 0) : weight(weight) { }
edge_data(double weight, double ignored) : weight(weight) { }
//void set_field(int pos, double val){ weight = val; }
//double get_field(int pos){ return weight; }
};
/**
* Type definitions. Remember to create suitable graph shards using the
* Sharder-program.
*/
typedef vertex_data VertexDataType;
typedef edge_data EdgeDataType; // Edges store the "rating" of user->movie pair
graphchi_engine<VertexDataType, EdgeDataType> * pengine = NULL;
std::vector<vertex_data> latent_factors_inmem;
#include "io.hpp"
/**
*
* Implementation of the Lanczos algorithm, as given in:
* http://en.wikipedia.org/wiki/Lanczos_algorithm
*
* Code written by Danny Bickson, CMU, June 2011
* */
//LANCZOS VARIABLES
int max_iter = 10;
bool no_edge_data = false;
int actual_vector_len;
int nv = 0;
int nsv = 0;
double tol = 1e-8;
bool finished = false;
int ortho_repeats = 3;
bool save_vectors = false;
std::string format = "matrixmarket";
int nodes = 0;
int data_size = max_iter;
#include "math.hpp"
#include "printouts.hpp"
void init_lanczos(bipartite_graph_descriptor & info){
srand48(time(NULL));
latent_factors_inmem.resize(info.total());
data_size = nsv + nv+1 + max_iter;
actual_vector_len = data_size;
if (info.is_square())
actual_vector_len = data_size + 3;
#pragma omp parallel for
for (int i=0; i< info.total(); i++){
if (i < info.get_start_node(false) || info.is_square())
latent_factors_inmem[i].pvec = zeros(actual_vector_len);
else latent_factors_inmem[i].pvec = zeros(3);
}
logstream(LOG_INFO)<<"Allocated a total of: " <<
((double)(data_size * info.num_nodes(true) +3.0*info.num_nodes(false)) * sizeof(double)/ 1e6) << " MB for storing vectors." << " rows: " << info.num_nodes(true) << std::endl;
}
vec one_sided_lanczos( bipartite_graph_descriptor & info, timer & mytimer, vec & errest,
const std::string & vecfile){
int nconv = 0;
int its = 1;
DistMat A(info);
int other_size_offset = info.is_square() ? data_size : 0;
DistSlicedMat U(other_size_offset, other_size_offset + 3, true, info, "U");
DistSlicedMat V(0, data_size, false, info, "V");
DistVec v(info, 1, false, "v");
DistVec u(info, other_size_offset+ 0, true, "u");
DistVec u_1(info, other_size_offset+ 1, true, "u_1");
DistVec tmp(info, other_size_offset + 2, true, "tmp");
vec alpha, beta, b;
vec sigma = zeros(data_size);
errest = zeros(nv);
DistVec v_0(info, 0, false, "v_0");
if (vecfile.size() == 0)
v_0 = randu(size(A,2));
PRINT_VEC2("svd->V", v_0);
DistDouble vnorm = norm(v_0);
v_0=v_0/vnorm;
PRINT_INT(nv);
while(nconv < nsv && its < max_iter){
std::cout<<"Starting iteration: " << its << " at time: " << mytimer.current_time() << std::endl;
int k = nconv;
int n = nv;
PRINT_INT(k);
PRINT_INT(n);
PRINT_VEC2("v", v);
PRINT_VEC2("u", u);
alpha = zeros(n);
beta = zeros(n);
u = V[k]*A._transpose();
PRINT_VEC2("u",u);
for (int i=k+1; i<n; i++){
std::cout <<"Starting step: " << i << " at time: " << mytimer.current_time() << std::endl;
PRINT_INT(i);
V[i]=u*A;
double a = norm(u).toDouble();
u = u / a;
multiply(V, i, a);
PRINT_DBL(a);
double b;
orthogonalize_vs_all(V, i, b);
PRINT_DBL(b);
u_1 = V[i]*A._transpose();
u_1 = u_1 - u*b;
alpha(i-k-1) = a;
beta(i-k-1) = b;
PRINT_VEC3("alpha", alpha, i-k-1);
PRINT_VEC3("beta", beta, i-k-1);
tmp = u;
u = u_1;
u_1 = tmp;
}
V[n]= u*A;
double a = norm(u).toDouble();
PRINT_DBL(a);
u = u/a;
double b;
multiply(V, n, a);
orthogonalize_vs_all(V, n, b);
alpha(n-k-1)= a;
beta(n-k-1) = b;
PRINT_VEC3("alpha", alpha, n-k-1);
PRINT_VEC3("beta", beta, n-k-1);
//compute svd of bidiagonal matrix
PRINT_INT(nv);
PRINT_NAMED_INT("svd->nconv", nconv);
n = nv - nconv;
PRINT_INT(n);
alpha.conservativeResize(n);
beta.conservativeResize(n);
PRINT_MAT2("Q",eye(n));
PRINT_MAT2("PT",eye(n));
PRINT_VEC2("alpha",alpha);
PRINT_VEC2("beta",beta);
mat T=diag(alpha);
for (int i=0; i<n-1; i++)
set_val(T, i, i+1, beta(i));
PRINT_MAT2("T", T);
mat aa,PT;
vec bb;
svd(T, aa, PT, bb);
PRINT_MAT2("Q", aa);
alpha=bb.transpose();
PRINT_MAT2("alpha", alpha);
for (int t=0; t< n-1; t++)
beta(t) = 0;
PRINT_VEC2("beta",beta);
PRINT_MAT2("PT", PT.transpose());
//estiamte the error
int kk = 0;
for (int i=nconv; i < nv; i++){
int j = i-nconv;
PRINT_INT(j);
sigma(i) = alpha(j);
PRINT_NAMED_DBL("svd->sigma[i]", sigma(i));
PRINT_NAMED_DBL("Q[j*n+n-1]",aa(n-1,j));
PRINT_NAMED_DBL("beta[n-1]",beta(n-1));
errest(i) = abs(aa(n-1,j)*beta(n-1));
PRINT_NAMED_DBL("svd->errest[i]", errest(i));
if (alpha(j) > tol){
errest(i) = errest(i) / alpha(j);
PRINT_NAMED_DBL("svd->errest[i]", errest(i));
}
if (errest(i) < tol){
kk = kk+1;
PRINT_NAMED_INT("k",kk);
}
if (nconv +kk >= nsv){
printf("set status to tol\n");
finished = true;
}
}//end for
PRINT_NAMED_INT("k",kk);
vec v;
if (!finished){
vec swork=get_col(PT,kk);
PRINT_MAT2("swork", swork);
v = zeros(size(A,1));
for (int ttt=nconv; ttt < nconv+n; ttt++){
v = v+swork(ttt-nconv)*(V[ttt].to_vec());
}
PRINT_VEC2("svd->V",V[nconv]);
PRINT_VEC2("v[0]",v);
}
//compute the ritz eigenvectors of the converged singular triplets
if (kk > 0){
PRINT_VEC2("svd->V", V[nconv]);
mat tmp= V.get_cols(nconv,nconv+n)*PT;
V.set_cols(nconv, nconv+kk, get_cols(tmp, 0, kk));
PRINT_VEC2("svd->V", V[nconv]);
}
nconv=nconv+kk;
if (finished)
break;
V[nconv]=v;
PRINT_VEC2("svd->V", V[nconv]);
PRINT_NAMED_INT("svd->nconv", nconv);
its++;
PRINT_NAMED_INT("svd->its", its);
PRINT_NAMED_INT("svd->nconv", nconv);
//nv = min(nconv+mpd, N);
//if (nsv < 10)
// nv = 10;
PRINT_NAMED_INT("nv",nv);
} // end(while)
printf(" Number of computed signular values %d",nconv);
printf("\n");
DistVec normret(info, other_size_offset + 1, true, "normret");
DistVec normret_tranpose(info, nconv, false, "normret_tranpose");
for (int i=0; i < nconv; i++){
u = V[i]*A._transpose();
double a = norm(u).toDouble();
u = u / a;
if (save_vectors){
char output_filename[256];
sprintf(output_filename, "%s.U.%d", training.c_str(), i);
write_output_vector(output_filename, u.to_vec(), false, "GraphLab v2 SVD output. This file contains eigenvector number i of the matrix U");
}
normret = V[i]*A._transpose() - u*sigma(i);
double n1 = norm(normret).toDouble();
PRINT_DBL(n1);
normret_tranpose = u*A -V[i]*sigma(i);
double n2 = norm(normret_tranpose).toDouble();
PRINT_DBL(n2);
double err=sqrt(n1*n1+n2*n2);
PRINT_DBL(err);
PRINT_DBL(tol);
if (sigma(i)>tol){
err = err/sigma(i);
}
PRINT_DBL(err);
PRINT_DBL(sigma(i));
printf("Singular value %d \t%13.6g\tError estimate: %13.6g\n", i, sigma(i),err);
}
if (save_vectors){
if (nconv == 0)
logstream(LOG_FATAL)<<"No converged vectors. Aborting the save operation" << std::endl;
char output_filename[256];
for (int i=0; i< nconv; i++){
sprintf(output_filename, "%s.V.%d", training.c_str(), i);
write_output_vector(output_filename, V[i].to_vec(), false, "GraphLab v2 SVD output. This file contains eigenvector number i of the matrix V'");
}
}
return sigma;
}
int main(int argc, const char *argv[]) {
print_copyright();
//* GraphChi initialization will read the command line arguments and the configuration file. */
graphchi_init(argc, argv);
std::string vecfile;
vecfile = get_option_string("initial_vector", "");
debug = get_option_int("debug", 0);
ortho_repeats = get_option_int("ortho_repeats", 3);
nv = get_option_int("nv", 1);
nsv = get_option_int("nsv", 1);
tol = get_option_float("tol", 1e-5);
save_vectors = get_option_int("save_vectors", 1);
input_cols = get_option_int("input_cols", 3);
max_iter = get_option_int("max_iter", max_iter);
parse_command_line_args();
parse_implicit_command_line();
if (nv < nsv){
logstream(LOG_FATAL)<<"Please set the number of vectors --nv=XX, to be greater than the number of support vectors --nsv=XX " << std::endl;
}
//unit testing
if (unittest == 1){
training = "gklanczos_testA";
vecfile = "gklanczos_testA_v0";
nsv = 3; nv = 3;
debug = true;
//TODO core.set_ncpus(1);
}
else if (unittest == 2){
training = "gklanczos_testB";
vecfile = "gklanczos_testB_v0";
nsv = 10; nv = 10;
debug = true; max_iter = 100;
//TODO core.set_ncpus(1);
}
else if (unittest == 3){
training = "gklanczos_testC";
vecfile = "gklanczos_testC_v0";
nsv = 25; nv = 25;
debug = true; max_iter = 100;
//TODO core.set_ncpus(1);
}
std::cout << "Load matrix " << training << std::endl;
/* Preprocess data if needed, or discover preprocess files */
if (input_cols == 3)
nshards = convert_matrixmarket<edge_data>(training);
else if (input_cols == 4)
nshards = convert_matrixmarket4<edge_data>(training);
else logstream(LOG_FATAL)<<"--input_cols=XX should be either 3 or 4 input columns" << std::endl;
info.rows = M; info.cols = N; info.nonzeros = L;
assert(info.rows > 0 && info.cols > 0 && info.nonzeros > 0);
timer mytimer; mytimer.start();
init_lanczos(info);
init_math(info, ortho_repeats);
//read initial vector from file (optional)
if (vecfile.size() > 0){
std::cout << "Load inital vector from file" << vecfile << std::endl;
load_matrix_market_vector(vecfile, info, 0, true, false);
}
//or start with a random initial vector
else {
#pragma omp parallel for
for (int i=0; i< (int)M; i++)
latent_factors_inmem[i].pvec[0] = drand48();
}
graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m);
set_engine_flags(engine);
pengine = &engine;
vec errest;
vec singular_values = one_sided_lanczos(info, mytimer, errest, vecfile);
std::cout << "Lanczos finished in " << mytimer.current_time() << std::endl;
write_output_vector(training + ".singular_values", singular_values,false, "%GraphLab SVD Solver library. This file contains the singular values.");
if (unittest == 1){
assert(errest.size() == 3);
for (int i=0; i< errest.size(); i++)
assert(errest[i] < 1e-30);
}
else if (unittest == 2){
assert(errest.size() == 10);
for (int i=0; i< errest.size(); i++)
assert(errest[i] < 1e-15);
}
/* Report execution metrics */
if (!quiet)
metrics_report(m);
return 0;
}
| C++ |
/**
* Copyright (c) 2009 Carnegie Mellon University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an "AS
* IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*
* For more about this software visit:
*
* http://www.graphlab.ml.cmu.edu
*
*/
#ifndef GRAPHLAB_TIMER_HPP
#define GRAPHLAB_TIMER_HPP
#include <sys/time.h>
#include <stdio.h>
#include <iostream>
/**
* \ingroup util
*
* \brief A simple class that can be used for benchmarking/timing up
* to microsecond resolution.
*
* Standard Usage
* =================
*
* The timer is used by calling \ref graphlab::timer::start and then
* by getting the current time since start by calling
* \ref graphlab::timer::current_time.
*
* For example:
*
* \code
*
* graphlab::timer timer;
* timer.start();
* // do something
* std::cout << "Elapsed time: " << timer.current_time() << std::endl;
* \endcode
*
* Fast approximate time
* ====================
*
* Calling current item in a tight loop can be costly and so we
* provide a faster less accurate timing primitive which reads a
* local time variable that is updated roughly every 100 millisecond.
* These are the \ref graphlab::timer::approx_time_seconds and
* \ref graphlab::timer::approx_time_millis.
*/
class timer {
private:
/**
* \brief The internal start time for this timer object
*/
timeval start_time_;
public:
/**
* \brief The timer starts on construction but can be restarted by
* calling \ref graphlab::timer::start.
*/
inline timer() { start(); }
/**
* \brief Reset the timer.
*/
inline void start() { gettimeofday(&start_time_, NULL); }
/**
* \brief Returns the elapsed time in seconds since
* \ref graphlab::timer::start was last called.
*
* @return time in seconds since \ref graphlab::timer::start was called.
*/
inline double current_time() const {
timeval current_time;
gettimeofday(¤t_time, NULL);
double answer =
// (current_time.tv_sec + ((double)current_time.tv_usec)/1.0E6) -
// (start_time_.tv_sec + ((double)start_time_.tv_usec)/1.0E6);
(double)(current_time.tv_sec - start_time_.tv_sec) +
((double)(current_time.tv_usec - start_time_.tv_usec))/1.0E6;
return answer;
} // end of current_time
/**
* \brief Returns the elapsed time in milliseconds since
* \ref graphlab::timer::start was last called.
*
* @return time in milliseconds since \ref graphlab::timer::start was called.
*/
inline double current_time_millis() const { return current_time() * 1000; }
}; // end of Timer
#endif
| C++ |
/**
* @file
* @author Danny Bickson
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Implementation of the libfm algorithm.
* Steffen Rendle (2010): Factorization Machines, in Proceedings of the 10th IEEE International Conference on Data Mining (ICDM 2010), Sydney, Australia.
* Original implementation by Qiang Yan, Chinese Academy of Science.
* note: this code version implements the SGD version of libfm. In the original library there are also ALS and MCMC methods.
* Also the treatment of features is richer in libfm. The code here can serve for a quick evaluation but the user
* is encouraged to try libfm as well.
*/
#include "common.hpp"
#include "eigen_wrapper.hpp"
double libfm_rate = 1e-02;
double libfm_mult_dec = 0.9;
double libfm_regw = 1e-3;
double libfm_regv = 1e-3;
double reg0 = 0.1;
bool debug = false;
int time_offset = 1; //time bin starts from 1?
bool is_user(vid_t id){ return id < M; }
bool is_item(vid_t id){ return id >= M && id < N; }
bool is_time(vid_t id){ return id >= M+N; }
#define BIAS_POS -1
struct vertex_data {
vec pvec;
double bias;
int last_item;
vertex_data() {
bias = 0;
last_item = 0;
}
void set_val(int index, float val){
if (index == BIAS_POS)
bias = val;
else pvec[index] = val;
}
float get_val(int index){
if (index== BIAS_POS)
return bias;
else return pvec[index];
}
};
struct edge_data {
double weight;
double time;
edge_data() { weight = time = 0; }
edge_data(double weight, double time) : weight(weight), time(time) { }
};
struct vertex_data_libfm{
double * bias;
double * v;
int *last_item;
vertex_data_libfm(const vertex_data & vdata){
v = (double*)&vdata.pvec[0];
bias = (double*)&vdata.bias;
last_item = (int*)&vdata.last_item;
}
vertex_data_libfm & operator=(vertex_data & data){
v = (double*)&data.pvec[0];
bias = (double*)&data.bias;
last_item = (int*)&data.last_item;
return * this;
}
};
/**
* Type definitions. Remember to create suitable graph shards using the
* Sharder-program.
*/
typedef vertex_data VertexDataType;
typedef edge_data EdgeDataType; // Edges store the "rating" of user->movie pair
graphchi_engine<VertexDataType, EdgeDataType> * pengine = NULL;
graphchi_engine<VertexDataType, EdgeDataType> * pvalidation_engine = NULL;
std::vector<vertex_data> latent_factors_inmem;
#include "io.hpp"
#include "rmse.hpp"
#include "rmse_engine4.hpp"
float libfm_predict(const vertex_data_libfm& user,
const vertex_data_libfm& movie,
const vertex_data_libfm& time,
const float rating,
double& prediction, vec * sum){
vertex_data & last_item = latent_factors_inmem[M+N+K+(*user.last_item)]; //TODO, when no ratings, last item is 0
vec sum_sqr = zeros(D);
*sum = zeros(D);
prediction = globalMean + *user.bias + *movie.bias + *time.bias + last_item.bias;
for (int j=0; j< D; j++){
sum->operator[](j) += user.v[j] + movie.v[j] + time.v[j] + last_item.pvec[j];
sum_sqr[j] = pow(user.v[j],2) + pow(movie.v[j],2) + pow(time.v[j],2) + pow(last_item.pvec[j],2);
prediction += 0.5 * (pow(sum->operator[](j),2) - sum_sqr[j]);
}
//truncate prediction to allowed values
prediction = std::min((double)prediction, maxval);
prediction = std::max((double)prediction, minval);
//return the squared error
float err = rating - prediction;
assert(!std::isnan(err));
return err*err;
}
float libfm_predict(const vertex_data& user,
const vertex_data& movie,
const float rating,
double & prediction,
void * extra){
vec sum;
return libfm_predict(vertex_data_libfm((vertex_data&)user), vertex_data_libfm((vertex_data&)movie), vertex_data_libfm(*(vertex_data*)extra), rating, prediction, &sum);
}
void init_libfm(){
srand(time(NULL));
latent_factors_inmem.resize(M+N+K+M);
assert(D > 0);
double factor = 0.1/sqrt(D);
#pragma omp parallel for
for (int i=0; i< (int)(M+N+K+M); i++){
latent_factors_inmem[i].pvec = (debug ? 0.1*ones(D) : (::randu(D)*factor));
}
}
/**
* GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type>
* class. The main logic is usually in the update function.
*/
struct LIBFMVerticesInMemProgram : public GraphChiProgram<VertexDataType, EdgeDataType> {
/*
* Vertex update function - computes the least square step
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) {
if (gcontext.iteration == 0){
if (is_user(vertex.id())) { //user node. find the last rated item and store it
vertex_data_libfm user = latent_factors_inmem[vertex.id()];
int max_time = 0;
for(int e=0; e < vertex.num_outedges(); e++) {
const edge_data & edge = vertex.edge(e)->get_data();
if (edge.time >= max_time){
max_time = (int)(edge.time - time_offset);
*user.last_item = vertex.edge(e)->vertex_id() - M;
}
}
}
if (is_user(vertex.id()) && vertex.num_outedges() == 0)
logstream(LOG_WARNING)<<"Vertex: " << vertex.id() << " with no edges: " << std::endl;
return;
return;
}
//go over all user nodes
if (is_user(vertex.id())){
vertex_data_libfm user = latent_factors_inmem[vertex.id()];
assert(*user.last_item >= 0 && *user.last_item < (int)N);
vertex_data & last_item = latent_factors_inmem[M+N+K+(*user.last_item)];
for(int e=0; e < vertex.num_outedges(); e++) {
vertex_data_libfm movie(latent_factors_inmem[vertex.edge(e)->vertex_id()]);
float rui = vertex.edge(e)->get_data().weight;
double pui;
vec sum;
vertex_data & time = latent_factors_inmem[(int)vertex.edge(e)->get_data().time - time_offset];
float sqErr = libfm_predict(user, movie, time, rui, pui, &sum);
float eui = pui - rui;
globalMean -= libfm_rate * (eui + reg0 * globalMean);
*user.bias -= libfm_rate * (eui + libfm_regw * *user.bias);
*movie.bias -= libfm_rate * (eui + libfm_regw * *movie.bias);
time.bias -= libfm_rate * (eui + libfm_regw * time.bias);
assert(!std::isnan(time.bias));
last_item.bias -= libfm_rate * (eui + libfm_regw * last_item.bias);
for(int f = 0; f < D; f++){
// user
float grad = sum[f] - user.v[f];
user.v[f] -= libfm_rate * (eui * grad + libfm_regv * user.v[f]);
// item
grad = sum[f] - movie.v[f];
movie.v[f] -= libfm_rate * (eui * grad + libfm_regv * movie.v[f]);
// time
grad = sum[f] - time.pvec[f];
time.pvec[f] -= libfm_rate * (eui * grad + libfm_regv * time.pvec[f]);
// last item
grad = sum[f] - last_item.pvec[f];
last_item.pvec[f] -= libfm_rate * (eui * grad + libfm_regv * last_item.pvec[f]);
}
rmse_vec[omp_get_thread_num()] += sqErr;
}
}
};
/**
* Called before an iteration is started.
*/
void before_iteration(int iteration, graphchi_context &gcontext) {
reset_rmse(gcontext.execthreads);
}
/**
* Called after an iteration has finished.
*/
void after_iteration(int iteration, graphchi_context &gcontext) {
libfm_rate *= libfm_mult_dec;
training_rmse(iteration, gcontext);
run_validation4(pvalidation_engine, gcontext);
};
};
void output_libfm_result(std::string filename) {
MMOutputter_mat<vertex_data> mmoutput_left(filename + "_U.mm", 0, M, "This file contains LIBFM output matrix U. In each row D factors of a single user node.", latent_factors_inmem);
MMOutputter_mat<vertex_data> mmoutput_right(filename + "_V.mm", M ,M+N, "This file contains -LIBFM output matrix V. In each row D factors of a single item node.", latent_factors_inmem);
MMOutputter_mat<vertex_data> mmoutput_time(filename + "_T.mm", M+N ,M+N+K, "This file contains -LIBFM output matrix T. In each row D factors of a single time node.", latent_factors_inmem);
MMOutputter_mat<vertex_data> mmoutput_last_item(filename + "_L.mm", M+N+K ,M+N+K+M, "This file contains -LIBFM output matrix L. In each row D factors of a single last item node.", latent_factors_inmem);
MMOutputter_vec<vertex_data> mmoutput_bias_left(filename + "_U_bias.mm", 0, M, BIAS_POS, "This file contains LIBFM output bias vector. In each row a single user bias.", latent_factors_inmem);
MMOutputter_vec<vertex_data> mmoutput_bias_right(filename + "_V_bias.mm",M ,M+N, BIAS_POS, "This file contains LIBFM output bias vector. In each row a single item bias.", latent_factors_inmem);
MMOutputter_vec<vertex_data> mmoutput_bias_time(filename + "_T_bias.mm",M+N ,M+N+K , BIAS_POS, "This file contains LIBFM output bias vector. In each row a single time bias.", latent_factors_inmem);
MMOutputter_vec<vertex_data> mmoutput_bias_last_item(filename + "_L_bias.mm",M+N+K ,M+N+K+M , BIAS_POS, "This file contains LIBFM output bias vector. In each row a single last item bias.", latent_factors_inmem);
MMOutputter_scalar gmean(filename + "_global_mean.mm", "This file contains LIBFM global mean which is required for computing predictions.", globalMean);
logstream(LOG_INFO) << " LIBFM output files (in matrix market format): " << filename << "_U.mm" << ", " << filename + "_V.mm " << filename + "_T.mm, " << filename << "_L.mm, " << filename << "_global_mean.mm, " << filename << "_U_bias.mm " << filename << "_V_bias.mm, " << filename << "_T_bias.mm, " << filename << "_L_bias.mm " <<std::endl;
}
int main(int argc, const char ** argv) {
print_copyright();
/* GraphChi initialization will read the command line
arguments and the configuration file. */
graphchi_init(argc, argv);
/* Metrics object for keeping track of performance counters
and other information. Currently required. */
metrics m("libfm");
//specific command line parameters for libfm
libfm_rate = get_option_float("libfm_rate", libfm_rate);
libfm_regw = get_option_float("libfm_regw", libfm_regw);
libfm_regv = get_option_float("libfm_regv", libfm_regv);
libfm_mult_dec = get_option_float("libfm_mult_dec", libfm_mult_dec);
D = get_option_int("D", D);
parse_command_line_args();
parse_implicit_command_line();
/* Preprocess data if needed, or discover preprocess files */
int nshards = convert_matrixmarket4<edge_data>(training, false);
init_libfm();
if (validation != ""){
int vshards = convert_matrixmarket4<EdgeDataType>(validation, true, M==N, VALIDATION);
init_validation_rmse_engine<VertexDataType, EdgeDataType>(pvalidation_engine, vshards, &libfm_predict, false, true, 1);
}
if (load_factors_from_file){
load_matrix_market_matrix(training + "_U.mm", 0, D);
load_matrix_market_matrix(training + "_V.mm", M, D);
load_matrix_market_matrix(training + "_T.mm", M+N, D);
load_matrix_market_matrix(training + "_L.mm", M+N+K, D);
vec user_bias = load_matrix_market_vector(training +"_U_bias.mm", false, true);
vec item_bias = load_matrix_market_vector(training +"_V_bias.mm", false, true);
vec time_bias = load_matrix_market_vector(training+ "_T_bias.mm", false, true);
vec last_item_bias = load_matrix_market_vector(training+"_L_bias.m", false, true);
for (uint i=0; i<M+N+K+M; i++){
if (i < M)
latent_factors_inmem[i].bias = user_bias[i];
else if (i <M+N)
latent_factors_inmem[i].bias = item_bias[i-M];
else if (i <M+N+K)
latent_factors_inmem[i].bias = time_bias[i-M-N];
else
latent_factors_inmem[i].bias = last_item_bias[i-M-N-K];
}
vec gm = load_matrix_market_vector(training + "_global_mean.mm", false, true);
globalMean = gm[0];
}
/* Run */
LIBFMVerticesInMemProgram program;
graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m);
set_engine_flags(engine);
pengine = &engine;
engine.run(program, niters);
/* Output test predictions in matrix-market format */
output_libfm_result(training);
test_predictions3(&libfm_predict, 1);
/* Report execution metrics */
if (!quiet)
metrics_report(m);
return 0;
}
| C++ |
/**
* Copyright (c) 2009 Carnegie Mellon University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an "AS
* IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*
* For more about this software visit:
*
* http://graphchi.org
*
* Written by Danny Bickson
*
*/
#ifndef _MATH_HPP
#define _MATH_HPP
#include "types.hpp"
#include "eigen_wrapper.hpp"
extern graphchi_engine<VertexDataType, EdgeDataType> * pengine;
double regularization;
bool debug;
void print_vec(const char * name, const vec & pvec, bool high);
struct math_info{
//for Axb operation
int increment;
double c;
double d;
int x_offset, b_offset , y_offset, r_offset, div_offset, prev_offset, div_const;
bool A_offset, A_transpose;
std::vector<std::string> names;
bool use_diag;
int ortho_repeats;
int start, end;
//for backslash operation
bool dist_sliced_mat_backslash;
mat eDT;
double maxval, minval;
math_info(){
reset_offsets();
}
void reset_offsets(){
increment = 2;
c=1.0; d=0.0;
x_offset = b_offset = y_offset = r_offset = div_offset = prev_offset = -1;
div_const = 0;
A_offset = false;
A_transpose = false;
use_diag = true;
start = end = -1;
dist_sliced_mat_backslash = false;
}
int increment_offset(){
return increment++;
}
};
bipartite_graph_descriptor info;
math_info mi;
#define MAX_PRINT_ITEMS 25
double runtime = 0;
/***
* UPDATE FUNCTION (ROWS)
*/
/**
* GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type>
* class. The main logic is usually in the update function.
*/
struct Axb : public GraphChiProgram<VertexDataType, EdgeDataType> {
/**
* Vertex update function.
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) {
if (vertex.id() < (uint)mi.start || vertex.id() >= (uint)mi.end)
return;
vertex_data& user = latent_factors_inmem[vertex.id()];
bool rows = vertex.id() < (uint)info.get_start_node(false);
if (info.is_square())
rows = mi.A_transpose;
assert(mi.r_offset >=0);
//store previous value for convergence detection
if (mi.prev_offset >= 0)
user.pvec[mi.prev_offset ] = user.pvec[mi.r_offset];
double val = 0;
assert(mi.x_offset >=0 || mi.y_offset>=0);
/*** COMPUTE r = c*A*x ********/
if (mi.A_offset && mi.x_offset >= 0){
for(int e=0; e < vertex.num_edges(); e++) {
const edge_data & edge = vertex.edge(e)->get_data();
const vertex_data & movie = latent_factors_inmem[vertex.edge(e)->vertex_id()];
val += (edge.weight * movie.pvec[mi.x_offset]);
}
if (info.is_square() && mi.use_diag)// add the diagonal term
val += (/*mi.c**/ (user.A_ii+ regularization) * user.pvec[mi.x_offset]);
val *= mi.c;
}
/***** COMPUTE r = c*I*x *****/
else if (!mi.A_offset && mi.x_offset >= 0){
val = mi.c*user.pvec[mi.x_offset];
}
/**** COMPUTE r+= d*y (optional) ***/
if (mi.y_offset>= 0){
val += mi.d*user.pvec[mi.y_offset];
}
/***** compute r = (... ) / div */
if (mi.div_offset >= 0){
val /= user.pvec[mi.div_offset];
}
user.pvec[mi.r_offset] = val;
} //end update
}; //end Axb
Axb program;
void init_math(bipartite_graph_descriptor & _info, int ortho_repeats = 3){
info = _info;
mi.reset_offsets();
mi.ortho_repeats = ortho_repeats;
}
class DistMat;
class DistDouble;
class DistVec{
public:
int offset; //real location in memory
int display_offset; //offset to print out
int prev_offset;
std::string name; //optional
bool transpose;
bipartite_graph_descriptor info;
int start;
int end;
void init(){
start = info.get_start_node(!transpose);
end = info.get_end_node(!transpose);
assert(start < end && start >= 0 && end >= 1);
//debug_print(name);
};
int size(){ return end-start; }
DistVec(const bipartite_graph_descriptor &_info, int _offset, bool _transpose, const std::string & _name){
offset = _offset;
display_offset = _offset;
name = _name;
info = _info;
transpose = _transpose;
prev_offset = -1;
init();
}
DistVec(const bipartite_graph_descriptor &_info, int _offset, bool _transpose, const std::string & _name, int _prev_offset){
offset = _offset;
display_offset = _offset;
name = _name;
info = _info;
transpose = _transpose;
assert(_prev_offset < data_size);
prev_offset = _prev_offset;
init();
}
DistVec& operator-(){
mi.d=-1.0;
return *this;
}
DistVec& operator-(const DistVec & other){
mi.x_offset = offset;
mi.y_offset = other.offset;
transpose = other.transpose;
if (mi.d == 0)
mi.d = -1.0;
else
mi.d*=-1.0;
return *this;
}
DistVec& operator+(){
if (mi.d == 0)
mi.d=1.0;
return *this;
}
DistVec& operator+(const DistVec &other){
mi.x_offset =offset;
mi.y_offset = other.offset;
transpose = other.transpose;
return *this;
}
DistVec& operator+(const DistMat &other);
DistVec& operator-(const DistMat &other);
DistVec& operator/(const DistVec &other){
mi.div_offset = other.offset;
return *this;
}
DistVec& operator/(const DistDouble & other);
DistVec& operator/(double val){
assert(val != 0);
assert(mi.d == 0);
mi.d = 1/val;
return *this;
}
DistVec& operator=(const DistVec & vec){
assert(offset < (info.is_square() ? 2*data_size: data_size));
if (mi.x_offset == -1 && mi.y_offset == -1){
mi.y_offset = vec.offset;
}
mi.r_offset = offset;
assert(prev_offset < data_size);
mi.prev_offset = prev_offset;
if (mi.d == 0.0)
mi.d=1.0;
transpose = vec.transpose;
end = vec.end;
start = vec.start;
mi.start = start;
mi.end = end;
//graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m);
//set_engine_flags(engine);
//Axb program;
pengine->run(program, 1);
debug_print(name);
mi.reset_offsets();
return *this;
}
DistVec& operator=(const vec & pvec){
assert(offset >= 0);
assert(pvec.size() == info.num_nodes(true) || pvec.size() == info.num_nodes(false));
assert(start < end);
if (!info.is_square() && pvec.size() == info.num_nodes(false)){
transpose = true;
}
else {
transpose = false;
}
for (int i=start; i< end; i++){
latent_factors_inmem[i].pvec[offset] = pvec[i-start];
}
debug_print(name);
return *this;
}
vec to_vec(){
vec ret = zeros(end-start);
for (int i=start; i< end; i++){
ret[i-start] = latent_factors_inmem[i].pvec[offset];
}
return ret;
}
double get_pos(int i){
return latent_factors_inmem[i].pvec[offset];
}
void debug_print(const char * name){
if (debug){
std::cout<<name<<"["<<display_offset<<"]" << std::endl;
for (int i=start; i< std::min(end, start+MAX_PRINT_ITEMS); i++){
//std::cout<<latent_factors_inmem(i).pvec[(mi.r_offset==-1)?offset:mi.r_offset]<<" ";
printf("%.5lg ", fabs(latent_factors_inmem[i].pvec[(mi.r_offset==-1)?offset:mi.r_offset]));
}
printf("\n");
}
}
void debug_print(std::string name){ return debug_print(name.c_str());}
double operator[](int i){
assert(i < end - start);
return latent_factors_inmem[i+start].pvec[offset];
}
DistDouble operator*(const DistVec & other);
DistVec& operator*(const double val){
assert(val!= 0);
mi.d=val;
return *this;
}
DistVec& operator*(const DistDouble &dval);
DistMat &operator*(DistMat & v);
DistVec& _transpose() {
/*if (!config.square){
start = n; end = m+n;
}*/
return *this;
}
DistVec& operator=(DistMat &mat);
};
class DistSlicedMat{
public:
bipartite_graph_descriptor info;
int start_offset;
int end_offset;
std::string name; //optional
int start;
int end;
bool transpose;
DistSlicedMat(int _start_offset, int _end_offset, bool _transpose, const bipartite_graph_descriptor &_info, std::string _name){
//assert(_start_offset < _end_offset);
assert(_start_offset >= 0);
assert(_info.total() > 0);
transpose = _transpose;
info = _info;
init();
start_offset = _start_offset;
end_offset = _end_offset;
name = _name;
}
DistSlicedMat& operator=(DistMat & other);
void init(){
start = info.get_start_node(!transpose);
end = info.get_end_node(!transpose);
assert(start < end && start >= 0 && end >= 1);
//debug_print(name);
};
int size(int dim){ return (dim == 1) ? (end-start) : (end_offset - start_offset) ; }
void set_cols(int start_col, int end_col, const mat& pmat){
assert(start_col >= 0);
assert(end_col <= end_offset - start_offset);
assert(pmat.rows() == end-start);
assert(pmat.cols() >= end_col - start_col);
for (int i=start_col; i< end_col; i++)
this->operator[](i) = get_col(pmat, i-start_col);
}
mat get_cols(int start_col, int end_col){
assert(start_col < end_offset - start_offset);
assert(start_offset + end_col <= end_offset);
mat retmat = zeros(end-start, end_col - start_col);
for (int i=start_col; i< end_col; i++)
set_col(retmat, i-start_col, this->operator[](i-start_col).to_vec());
return retmat;
}
void operator=(mat & pmat){
assert(end_offset-start_offset <= pmat.cols());
assert(end-start == pmat.rows());
set_cols(0, pmat.cols(), pmat);
}
std::string get_name(int pos){
assert(pos < end_offset - start_offset);
assert(pos >= 0);
return name;
}
DistVec operator[](int pos){
assert(pos < end_offset-start_offset);
assert(pos >= 0);
DistVec ret(info, start_offset + pos, transpose, get_name(pos));
ret.display_offset = pos;
return ret;
}
};
/*
* wrapper for computing r = c*A*x+d*b*y
*/
class DistMat{
public:
bool transpose;
bipartite_graph_descriptor info;
DistMat(const bipartite_graph_descriptor& _info) {
info = _info;
transpose = false;
};
DistMat &operator*(const DistVec & v){
mi.x_offset = v.offset;
mi.A_offset = true;
//v.transpose = transpose;
//r_offset = A_offset;
return *this;
}
DistMat &operator*(const DistDouble &d);
DistMat &operator-(){
mi.c=-1.0;
return *this;
}
DistMat &operator/(const DistVec & v){
mi.div_offset = v.offset;
return *this;
}
DistMat &operator+(){
mi.c=1.0;
return *this;
}
DistMat &operator+(const DistVec &v){
mi.y_offset = v.offset;
if (mi.d == 0.0)
mi.d=1.0;
return *this;
}
DistMat &operator-(const DistVec &v){
mi.y_offset = v.offset;
if (mi.d == 0.0)
mi.d=-1.0;
else
mi.d*=-1.0;
return *this;
}
DistMat & _transpose(){
transpose = true;
mi.A_transpose = true;
return *this;
}
DistMat & operator~(){
return _transpose();
}
DistMat & backslash(DistSlicedMat & U){
mi.dist_sliced_mat_backslash = true;
transpose = U.transpose;
return *this;
}
void set_use_diag(bool use){
mi.use_diag = use;
}
};
DistVec& DistVec::operator=(DistMat &mat){
mi.r_offset = offset;
assert(prev_offset < data_size);
mi.prev_offset = prev_offset;
transpose = mat.transpose;
mi.start = info.get_start_node(!transpose);
mi.end = info.get_end_node(!transpose);
//graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m);
//set_engine_flags(engine);
//Axb program;
pengine->run(program, 1);
debug_print(name);
mi.reset_offsets();
mat.transpose = false;
return *this;
}
DistVec& DistVec::operator+(const DistMat &other){
mi.y_offset = offset;
transpose = other.transpose;
return *this;
}
DistVec& DistVec::operator-(const DistMat & other){
mi.y_offset = offset;
transpose = other.transpose;
if (mi.c == 0)
mi.c = -1;
else mi.c *= -1;
return *this;
}
DistMat& DistVec::operator*(DistMat & v){
mi.x_offset = offset;
mi.A_offset = true;
return v;
}
class DistDouble{
public:
double val;
std::string name;
DistDouble() {};
DistDouble(double _val) : val(_val) {};
DistVec& operator*(DistVec & dval){
mi.d=val;
return dval;
}
DistMat& operator*(DistMat & mat){
mi.c = val;
return mat;
}
DistDouble operator/(const DistDouble dval){
DistDouble mval;
mval.val = val / dval.val;
return mval;
}
bool operator<(const double other){
return val < other;
}
DistDouble & operator=(const DistDouble & other){
val = other.val;
debug_print(name);
return *this;
}
bool operator==(const double _val){
return val == _val;
}
void debug_print(const char * name){
std::cout<<name<<" "<<val<<std::endl;
}
double toDouble(){
return val;
}
void debug_print(std::string name){ return debug_print(name.c_str()); }
};
DistDouble DistVec::operator*(const DistVec & vec){
mi.y_offset = offset;
mi.b_offset = vec.offset;
if (mi.d == 0)
mi.d = 1.0;
assert(mi.y_offset >=0 && mi.b_offset >= 0);
double val = 0;
for (int i=start; i< end; i++){
const vertex_data * data = &latent_factors_inmem[i];
double * pv = (double*)&data->pvec[0];
// if (y_offset >= 0 && b_offset == -1)
//val += pv[y_offset] * pv[y_offset];
val += mi.d* pv[mi.y_offset] * pv[mi.b_offset];
}
mi.reset_offsets();
DistDouble mval;
mval.val = val;
return mval;
}
DistVec& DistVec::operator*(const DistDouble &dval){
mi.d = dval.val;
return *this;
}
int size(DistMat & A, int pos){
assert(pos == 1 || pos == 2);
return A.info.num_nodes(!A.transpose);
}
DistMat &DistMat::operator*(const DistDouble &d){
mi.c = d.val;
return *this;
}
DistDouble sqrt(DistDouble & dval){
DistDouble mval;
mval.val=sqrt(dval.val);
return mval;
}
DistDouble norm(const DistVec &vec){
assert(vec.offset>=0);
assert(vec.start < vec.end);
DistDouble mval;
mval.val = 0;
for (int i=vec.start; i < vec.end; i++){
const vertex_data * data = &latent_factors_inmem[i];
double * px = (double*)&data->pvec[0];
mval.val += px[vec.offset]*px[vec.offset];
}
mval.val = sqrt(mval.val);
return mval;
}
DistDouble norm(DistMat & mat){
DistVec vec(info, 0, mat.transpose, "norm");
vec = mat;
return norm((const DistVec&)vec);
}
vec diag(DistMat & mat){
assert(info.is_square());
vec ret = zeros(info.total());
for (int i=0; i< info.total(); i++){
ret[i] = latent_factors_inmem[i].A_ii;
}
return ret;
}
#if 0
void orthogonalize_vs_all(DistSlicedMat & mat, int curoffset){
assert(mi.ortho_repeats >=1 && mi.ortho_repeats <= 3);
INITIALIZE_TRACER(orthogonalize_vs_alltrace, "orthogonalization step");
BEGIN_TRACEPOINT(orthogonalize_vs_alltrace);
bool old_debug = debug;
debug = false;
DistVec current = mat[curoffset];
//DistDouble * alphas = new DistDouble[curoffset];
//cout<<current.to_vec().transpose() << endl;
for (int j=0; j < mi.ortho_repeats; j++){
for (int i=0; i< curoffset; i++){
DistDouble alpha = mat[i]*current;
// //cout<<mat[i].to_vec().transpose()<<endl;
// //cout<<"alpha is: " <<alpha.toDouble()<<endl;
if (alpha.toDouble() > 1e-10)
current = current - mat[i]*alpha;
}
}
END_TRACEPOINT(orthogonalize_vs_alltrace);
debug = old_debug;
current.debug_print(current.name);
}
#endif
void orthogonalize_vs_all(DistSlicedMat & mat, int curoffset, double &alpha){
assert(mi.ortho_repeats >=1 && mi.ortho_repeats <= 3);
bool old_debug = debug;
debug = false;
DistVec current = mat[curoffset];
assert(mat.start_offset <= current.offset);
double * alphas = new double[curoffset];
//DistDouble * alphas = new DistDouble[curoffset];
//cout<<current.to_vec().transpose() << endl;
if (curoffset > 0){
for (int j=0; j < mi.ortho_repeats; j++){
memset(alphas, 0, sizeof(double)*curoffset);
#pragma omp parallel for
for (int i=mat.start_offset; i< current.offset; i++){
for (int k=info.get_start_node(!current.transpose); k< info.get_end_node(!current.transpose); k++){
alphas[i-mat.start_offset] += latent_factors_inmem[k].pvec[i] * latent_factors_inmem[k].pvec[current.offset];
}
}
for (int i=mat.start_offset; i< current.offset; i++){
#pragma omp parallel for
for (int k=info.get_start_node(!current.transpose); k< info.get_end_node(!current.transpose); k++){
latent_factors_inmem[k].pvec[current.offset] -= alphas[i-mat.start_offset] * latent_factors_inmem[k].pvec[i];
}
}
} //for ortho_repeast
}
delete [] alphas;
debug = old_debug;
current.debug_print(current.name);
// alpha = 0;
double sum = 0;
int k;
//#pragma omp parallel for private(k) reduction(+: sum)
for (k=info.get_start_node(!current.transpose); k< info.get_end_node(!current.transpose); k++){
sum = sum + pow(latent_factors_inmem[k].pvec[current.offset],2);
}
alpha = sqrt(sum);
if (alpha >= 1e-10 ){
#pragma omp parallel for
for (int k=info.get_start_node(!current.transpose); k< info.get_end_node(!current.transpose); k++){
latent_factors_inmem[k].pvec[current.offset]/=alpha;
}
}
}
void multiply(DistSlicedMat & mat, int curoffset, double a){
assert(a>0);
DistVec current = mat[curoffset];
assert(mat.start_offset <= current.offset);
vec result = zeros(curoffset);
if (curoffset > 0){
#pragma omp parallel for
for (int i=mat.start_offset; i< current.offset; i++){
for (int k=info.get_start_node(!current.transpose); k< info.get_end_node(!current.transpose); k++){
result[i-mat.start_offset] += latent_factors_inmem[k].pvec[i] * latent_factors_inmem[k].pvec[current.offset];
}
}
#pragma omp parallel for
for (int k=info.get_start_node(!current.transpose); k< info.get_end_node(!current.transpose); k++){
latent_factors_inmem[k].pvec[curoffset] /= a;
}
for (int i=mat.start_offset; i< current.offset; i++){
#pragma omp parallel for
for (int k=info.get_start_node(!current.transpose); k< info.get_end_node(!current.transpose); k++){
latent_factors_inmem[k].pvec[current.offset] -= result[i-mat.start_offset]/a * latent_factors_inmem[k].pvec[i];
}
}
}
current.debug_print(current.name);
}
DistVec& DistVec::operator/(const DistDouble & other){
assert(other.val != 0);
assert(mi.d == 0);
mi.d = 1/other.val;
return *this;
}
#endif //_MATH_HPP
| C++ |
/**
* @file
* @author Danny Bickson
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
* Matrix factorization using RBM (Restricted Bolzman Machines) algorithm.
* Algorithm is described in the paper:
* G. Hinton. A Practical Guide to Training Restricted Boltzmann Machines. University of Toronto Tech report UTML TR 2010-003
*
*/
#include "common.hpp"
#include "eigen_wrapper.hpp"
double rbm_alpha = 0.1;
double rbm_beta = 0.06;
int rbm_bins = 6;
double rbm_scaling = 1;
double rbm_mult_step_dec= 0.9;
bool is_user(vid_t id){ return id < M; }
bool is_item(vid_t id){ return id >= M && id < N; }
bool is_time(vid_t id){ return id >= M+N; }
void setRand2(double * a, int d, float c){
for(int i = 0; i < d; ++i)
a[i] = ((drand48() - 0.5) * c);
}
float dot(double * a, double * b){
float ret = 0;
for(int i = 0; i < D; ++i)
ret += a[i] * b[i];
return ret;
}
#define BIAS_POS -1
struct vertex_data {
vec pvec; //storing the feature vector
double bias;
vertex_data() {
bias = 0;
}
void set_val(int index, float val){
if (index == BIAS_POS)
bias = val;
else pvec[index] = val;
}
float get_val(int index){
if (index== BIAS_POS)
return bias;
else return pvec[index];
}
};
/*
* h = pvec = D * DOUBLE
* h0 = weight = D * DOUBLE
* h1 = weight+D = D * DOUBLE
*/
struct rbm_user{
double * h;
double * h0;
double * h1;
rbm_user(const vertex_data & vdata){
h = (double*)&vdata.pvec[0];
h0 = h + D;
h1 = h0 + D;
}
rbm_user & operator=(vertex_data & data){
h = &data.pvec[0];
h0 = h + D;
h1 = h0 + D;
return * this;
}
};
/**
* ni = bias = DOUBLE
* bi = pvec = rbm_bins * DOUBLE
* w = weight = rbm_bins * D * Double
*/
struct rbm_movie{
double * bi;
double * ni;
double * w;
rbm_movie(const vertex_data& vdata){
ni = (double*)&vdata.bias;
bi = (double*)&vdata.pvec[0];
w = bi + rbm_bins;
}
rbm_movie & operator=(vertex_data & data){
ni = (double*)&data.bias;
bi = (double*)&data.pvec[0];
w = bi + rbm_bins;
return * this;
}
};
float rbm_predict(const rbm_user & usr,
const rbm_movie & mov,
const float rating,
double & prediction,
void * extra){
float ret = 0;
double nn = 0;
for(int r = 0; r < rbm_bins; ++r){
double zz = exp(mov.bi[r] + dot(usr.h, &mov.w[r*D]));
if (std::isinf(zz))
std::cout<<" mov.bi[r] " << mov.bi[r] << " dot: " << dot(usr.h, &mov.w[r*D]) << std::endl;
ret += zz * (float)(r);
assert(!std::isnan(ret));
nn += zz;
}
assert(!std::isnan(ret));
assert(std::fabs(nn) > 1e-32);
ret /= nn;
if(ret < minval) ret = minval;
else if(ret > maxval) ret = maxval;
assert(!std::isnan(ret));
prediction = ret * rbm_scaling;
assert(!std::isnan(prediction));
return pow(prediction - rating,2);
}
float rbm_predict(const vertex_data & usr,
const vertex_data & mov,
const float rating,
double & prediction,
void * extra){
return rbm_predict(rbm_user((vertex_data&)usr), rbm_movie((vertex_data&)mov), rating, prediction, NULL);
}
float predict1(const rbm_user & usr,
const rbm_movie & mov,
const float rating,
double & prediction){
vec zz = zeros(rbm_bins);
float szz = 0;
for(int r = 0; r < rbm_bins; ++r){
zz[r] = exp(mov.bi[r] + dot(usr.h0, &mov.w[r*D]));
szz += zz[r];
}
float rd = drand48() * szz;
szz = 0;
int ret = 0;
for(int r = 0; r < rbm_bins; ++r){
szz += zz[r];
if(rd < szz){
ret = r;
break;
}
}
prediction = ret * rbm_scaling;
assert(!std::isnan(prediction));
return pow(prediction - rating, 2);
}
inline float sigmoid(float x){
return 1 / (1 + exp(-1 * x));
}
#include "util.hpp"
/**
* Type definitions. Remember to create suitable graph shards using the
* Sharder-program.
*/
typedef vertex_data VertexDataType;
typedef float EdgeDataType; // Edges store the "rating" of user->movie pair
graphchi_engine<VertexDataType, EdgeDataType> * pengine = NULL;
graphchi_engine<VertexDataType, EdgeDataType> * pvalidation_engine = NULL;
std::vector<vertex_data> latent_factors_inmem;
#include "rmse.hpp"
#include "rmse_engine.hpp"
#include "io.hpp"
/**
* GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type>
* class. The main logic is usually in the update function.
*/
struct RBMVerticesInMemProgram : public GraphChiProgram<VertexDataType, EdgeDataType> {
/**
* Called before an iteration is started.
*/
void before_iteration(int iteration, graphchi_context &gcontext) {
reset_rmse(gcontext.execthreads);
}
/**
* Called after an iteration has finished.
*/
void after_iteration(int iteration, graphchi_context &gcontext) {
rbm_alpha *= rbm_mult_step_dec;
training_rmse(iteration, gcontext);
if (iteration >= 2)
run_validation(pvalidation_engine, gcontext);
else std::cout<<std::endl;
}
/**
* Vertex update function.
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) {
if (gcontext.iteration == 0){
if (is_user(vertex.id()) && vertex.num_outedges() > 0){
vertex_data& user = latent_factors_inmem[vertex.id()];
user.pvec = zeros(D*3);
for(int e=0; e < vertex.num_outedges(); e++) {
rbm_movie mov = latent_factors_inmem[vertex.edge(e)->vertex_id()];
float observation = vertex.edge(e)->get_data();
int r = (int)(observation/rbm_scaling);
assert(r < rbm_bins);
mov.bi[r]++;
}
}
return;
}
else if (gcontext.iteration == 1){
if (vertex.num_inedges() > 0){
rbm_movie mov = latent_factors_inmem[vertex.id()];
setRand2(mov.w, D*rbm_bins, 0.001);
for(int r = 0; r < rbm_bins; ++r){
mov.bi[r] /= (double)vertex.num_inedges();
mov.bi[r] = log(1E-9 + mov.bi[r]);
if (mov.bi[r] > 1000){
assert(false);
logstream(LOG_FATAL)<<"Numerical overflow" <<std::endl;
}
}
}
return; //done with initialization
}
//go over all user nodes
if (is_user(vertex.id()) && vertex.num_outedges()){
vertex_data & user = latent_factors_inmem[vertex.id()];
user.pvec = zeros(3*D);
rbm_user usr(user);
vec v1 = zeros(vertex.num_outedges());
//go over all ratings
for(int e=0; e < vertex.num_outedges(); e++) {
float observation = vertex.edge(e)->get_data();
rbm_movie mov = latent_factors_inmem[vertex.edge(e)->vertex_id()];
int r = (int)(observation / rbm_scaling);
assert(r < rbm_bins);
for(int k=0; k < D; k++){
usr.h[k] += mov.w[D*r + k];
assert(!std::isnan(usr.h[k]));
}
}
for(int k=0; k < D; k++){
usr.h[k] = sigmoid(usr.h[k]);
if (drand48() < usr.h[k])
usr.h0[k] = 1;
else usr.h0[k] = 0;
}
int i = 0;
double prediction;
for(int e=0; e < vertex.num_outedges(); e++) {
rbm_movie mov = latent_factors_inmem[vertex.edge(e)->vertex_id()];
float observation = vertex.edge(e)->get_data();
predict1(usr, mov, observation, prediction);
int vi = (int)(prediction / rbm_scaling);
v1[i] = vi;
i++;
}
i = 0;
for(int e=0; e < vertex.num_outedges(); e++) {
rbm_movie mov = latent_factors_inmem[vertex.edge(e)->vertex_id()];
int r = (int)v1[i];
for (int k=0; k< D;k++){
usr.h1[k] += mov.w[r*D+k];
}
i++;
}
for (int k=0; k < D; k++){
usr.h1[k] = sigmoid(usr.h1[k]);
if (drand48() < usr.h1[k])
usr.h1[k] = 1;
else usr.h1[k] = 0;
}
i = 0;
for(int e=0; e < vertex.num_outedges(); e++) {
rbm_movie mov = latent_factors_inmem[vertex.edge(e)->vertex_id()];
float observation = vertex.edge(e)->get_data();
double prediction;
rbm_predict(user, mov, observation, prediction, NULL);
double pui = prediction / rbm_scaling;
double rui = observation / rbm_scaling;
rmse_vec[omp_get_thread_num()] += (pui - rui) * (pui - rui);
//nn += 1.0;
int vi0 = (int)(rui);
int vi1 = (int)v1[i];
for (int k = 0; k < D; k++){
mov.w[D*vi0+k] += rbm_alpha * (usr.h0[k] - rbm_beta * mov.w[vi0*D+k]);
assert(!std::isnan(mov.w[D*vi0+k]));
mov.w[D*vi1+k] -= rbm_alpha * (usr.h1[k] + rbm_beta * mov.w[vi1*D+k]);
assert(!std::isnan(mov.w[D*vi1+k]));
}
i++;
}
}
}
};
//dump output to file
void output_rbm_result(std::string filename) {
MMOutputter_mat<vertex_data> user_mat(filename + "_U.mm", 0, M, "This file contains RBM output matrix U. In each row D factors of a single user node.", latent_factors_inmem);
MMOutputter_mat<vertex_data> mmoutput_right(filename + "_V.mm", M ,M+N, "This file contains RBM output matrix V. In each row D factors of a single item node.", latent_factors_inmem);
MMOutputter_vec<vertex_data> mmoutput_bias_right(filename + "_V_bias.mm",M ,M+N , BIAS_POS, "This file contains RBM output bias vector. In each row a single item ni.", latent_factors_inmem);
logstream(LOG_INFO) << "RBM output files (in matrix market format): " << filename << "_U.mm" <<
", " << filename + "_V.mm " << std::endl;
}
void rbm_init(){
srand48(time(NULL));
latent_factors_inmem.resize(M+N);
#pragma omp parallel for
for(int i = 0; i < (int)N; ++i){
vertex_data & movie = latent_factors_inmem[M+i];
movie.pvec = zeros(rbm_bins + D * rbm_bins);
movie.bias = 0;
}
logstream(LOG_INFO) << "RBM initialization ok" << std::endl;
}
int main(int argc, const char ** argv) {
print_copyright();
//* GraphChi initialization will read the command line arguments and the configuration file. */
graphchi_init(argc, argv);
/* Metrics object for keeping track of performance counters
and other information. Currently required. */
metrics m("rbm-inmemory-factors");
/* Basic arguments for application. NOTE: File will be automatically 'sharded'. */
rbm_bins = get_option_int("rbm_bins", rbm_bins);
rbm_alpha = get_option_float("rbm_alpha", rbm_alpha);
rbm_beta = get_option_float("rbm_beta", rbm_beta);
rbm_mult_step_dec = get_option_float("rbm_mult_step_dec", rbm_mult_step_dec);
rbm_scaling = get_option_float("rbm_scaling", rbm_scaling);
parse_command_line_args();
parse_implicit_command_line();
mytimer.start();
/* Preprocess data if needed, or discover preprocess files */
int nshards = convert_matrixmarket<float>(training);
rbm_init();
if (validation != ""){
int vshards = convert_matrixmarket<EdgeDataType>(validation, NULL, 0, 0, 3, VALIDATION);
init_validation_rmse_engine<VertexDataType, EdgeDataType>(pvalidation_engine, vshards, &rbm_predict);
}
/* load initial state from disk (optional) */
if (load_factors_from_file){
load_matrix_market_matrix(training + "_U.mm", 0, 3*D);
load_matrix_market_matrix(training + "_V.mm", M, rbm_bins*(D+1));
vec item_bias = load_matrix_market_vector(training +"_V_bias.mm", false, true);
for (uint i=0; i< N; i++){
latent_factors_inmem[M+i].bias = item_bias[i];
}
}
print_config();
/* Run */
RBMVerticesInMemProgram program;
graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m);
set_engine_flags(engine);
pengine = &engine;
engine.run(program, niters);
/* Output latent factor matrices in matrix-market format */
output_rbm_result(training);
test_predictions(&rbm_predict);
/* Report execution metrics */
if (!quiet)
metrics_report(m);
return 0;
}
| C++ |
/**
* @file
* @author Danny Bickson
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Matrix factorization with the Stochastic Gradient Descent (Baseline) algorithm.
* Algorithm is described in the papers:
* 1) Matrix Factorization Techniques for Recommender Systems Yehuda Koren, Robert Bell, Chris Volinsky. In IEEE Computer, Vol. 42, No. 8. (07 August 2009), pp. 30-37.
* 2) Takács, G, Pilászy, I., Németh, B. and Tikk, D. (2009). Scalable Collaborative Filtering Approaches for Large Recommender Systems. Journal of Machine Learning Research, 10, 623-656.
*
*
*/
#include "common.hpp"
#include "eigen_wrapper.hpp"
//types of algorithms supported when computing prediction
enum{
GLOBAL_MEAN = 0, USER_MEAN = 1, ITEM_MEAN = 2
};
int algo = GLOBAL_MEAN;
std::string algorithm;
struct vertex_data {
double mean_rating;
vec pvec;
vertex_data() {
mean_rating = 0;
}
void set_val(int index, float val){
pvec[index] = val;
}
float get_val(int index){
return pvec[index];
}
};
#include "util.hpp"
typedef vertex_data VertexDataType;
typedef float EdgeDataType; // Edges store the "rating" of user->movie pair
graphchi_engine<VertexDataType, EdgeDataType> * pengine = NULL;
std::vector<vertex_data> latent_factors_inmem;
#include "rmse.hpp"
#include "io.hpp"
/** compute a missing value based on SGD algorithm */
float baseline_predict(const vertex_data& user,
const vertex_data& movie,
const float rating,
double & prediction,
void * extra = NULL){
prediction = globalMean;
if (algo == USER_MEAN)
prediction = user.mean_rating;
else if (algo == ITEM_MEAN)
prediction = movie.mean_rating;
//truncate prediction to allowed values
prediction = std::min((double)prediction, maxval);
prediction = std::max((double)prediction, minval);
//return the squared error
float err = rating - prediction;
assert(!std::isnan(err));
return err*err;
}
/**
* GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type>
* class. The main logic is usually in the update function.
*/
struct BaselineVerticesInMemProgram : public GraphChiProgram<VertexDataType, EdgeDataType> {
void after_iteration(int iteration, graphchi_context &gcontext) {
training_rmse(iteration, gcontext, algo == ITEM_MEAN);
validation_rmse(&baseline_predict, gcontext);
}
/**
* Vertex update function.
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) {
//go over all user nodes
if ( vertex.num_outedges() > 0 && (algo == GLOBAL_MEAN || algo == USER_MEAN)){
vertex_data & user = latent_factors_inmem[vertex.id()];
//go over all ratings
if (algo == USER_MEAN){
user.mean_rating = 0;
for(int e=0; e < vertex.num_edges(); e++) {
float observation = vertex.edge(e)->get_data();
user.mean_rating += observation;
}
if (vertex.num_edges() > 0)
user.mean_rating /= vertex.num_edges();
}
//go over all ratings
for(int e=0; e < vertex.num_edges(); e++) {
double prediction;
float observation = vertex.edge(e)->get_data();
vertex_data & movie = latent_factors_inmem[vertex.edge(e)->vertex_id()];
rmse_vec[omp_get_thread_num()] += baseline_predict(user, movie, observation, prediction);
}
}
else if (vertex.num_inedges() > 0 && algo == ITEM_MEAN){
vertex_data & user = latent_factors_inmem[vertex.id()];
user.mean_rating = 0;
//go over all ratings
for(int e=0; e < vertex.num_edges(); e++) {
float observation = vertex.edge(e)->get_data();
user.mean_rating += observation;
}
if (vertex.num_edges() > 0)
user.mean_rating /= vertex.num_edges();
for(int e=0; e < vertex.num_edges(); e++) {
float observation = vertex.edge(e)->get_data();
double prediction;
vertex_data & movie = latent_factors_inmem[vertex.edge(e)->vertex_id()];
rmse_vec[omp_get_thread_num()] += baseline_predict(movie, user, observation, prediction);
}
}
}
};
//struct for writing the output feature vectors into file
struct MMOutputter2{
FILE * outf;
MMOutputter2(std::string fname, uint start, uint end, std::string comment) {
MM_typecode matcode;
set_matcode(matcode);
outf = fopen(fname.c_str(), "w");
assert(outf != NULL);
mm_write_banner(outf, matcode);
if (comment != "")
fprintf(outf, "%%%s\n", comment.c_str());
mm_write_mtx_array_size(outf, end-start, 1);
for (uint i=start; i < end; i++)
fprintf(outf, "%1.12e\n", latent_factors_inmem[i].mean_rating);
}
~MMOutputter2() {
if (outf != NULL) fclose(outf);
}
};
//dump output to file
void output_baseline_result(std::string filename) {
if (algo == USER_MEAN){
MMOutputter2 mmoutput_left(filename + ".baseline_user", 0, M, "This file contains Baseline output matrix U. In each row rating mean a single user node.");
}
else if (algo == ITEM_MEAN){
MMOutputter2 mmoutput_right(filename + ".baseline_item", M ,M+N, "This file contains Baseline output vector V. In each row rating mean of a single item node.");
}
logstream(LOG_INFO) << "Baseline output files (in matrix market format): " << filename << (algo == USER_MEAN ? ".baseline_user" : ".baseline_item") << std::endl;
}
int main(int argc, const char ** argv) {
print_copyright();
//* GraphChi initialization will read the command line arguments and the configuration file. */
graphchi_init(argc, argv);
/* Metrics object for keeping track of performance counters
and other information. Currently required. */
metrics m("sgd-inmemory-factors");
algorithm = get_option_string("algorithm", "global_mean");
if (algorithm == "global_mean")
algo = GLOBAL_MEAN;
else if (algorithm == "user_mean")
algo = USER_MEAN;
else if (algorithm == "item_mean")
algo = ITEM_MEAN;
else logstream(LOG_FATAL)<<"Unsupported algorithm name. Should be --algorithm=XX where XX is one of [global_mean,user_mean,item_mean] for example --algorithm=global_mean" << std::endl;
parse_command_line_args();
mytimer.start();
/* Preprocess data if needed, or discover preprocess files */
int nshards = convert_matrixmarket<float>(training, NULL, 0, 0, 3, TRAINING, false);
init_feature_vectors<std::vector<vertex_data> >(M+N, latent_factors_inmem, false);
rmse_vec = zeros(number_of_omp_threads());
print_config();
/* Run */
BaselineVerticesInMemProgram program;
graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m);
set_engine_flags(engine);
pengine = &engine;
engine.run(program, 1);
if (algo == USER_MEAN || algo == ITEM_MEAN)
output_baseline_result(training);
test_predictions(&baseline_predict);
/* Report execution metrics */
if (!quiet)
metrics_report(m);
return 0;
}
| C++ |
/**
* @file
* @author Danny Bickson, CMU
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
* This code implements the paper:
* Lee, D..D., and Seung, H.S., (2001), 'Algorithms for Non-negative Matrix
* Factorization', Adv. Neural Info. Proc. Syst. 13, 556-562.
*
*
*/
#include "common.hpp"
#include "eigen_wrapper.hpp"
const double epsilon = 1e-16;
struct vertex_data {
vec pvec;
vertex_data() {
pvec = zeros(D);
}
void set_val(int index, float val){
pvec[index] = val;
}
float get_val(int index){
return pvec[index];
}
};
typedef vertex_data VertexDataType;
typedef float EdgeDataType; // Edges store the "rating" of user->movie pair
graphchi_engine<VertexDataType, EdgeDataType> * pengine = NULL;
graphchi_engine<VertexDataType, EdgeDataType> * pvalidation_engine = NULL;
std::vector<vertex_data> latent_factors_inmem;
vec sum_of_item_latent_features, sum_of_user_latent_feautres;
int iter;
#include "rmse.hpp"
#include "rmse_engine.hpp"
#include "io.hpp"
/** compute a missing value based on NMF algorithm */
float nmf_predict(const vertex_data& user,
const vertex_data& movie,
const float rating,
double & prediction,
void * extra = NULL){
prediction = dot_prod(user.pvec, movie.pvec);
//truncate prediction to allowed values
prediction = std::min((double)prediction, maxval);
prediction = std::max((double)prediction, minval);
//return the squared error
float err = rating - prediction;
assert(!std::isnan(err));
return err*err;
}
/* sum up all item data vectors */
void pre_user_iter(){
sum_of_item_latent_features = zeros(D);
for (uint i=M; i<M+N; i++){
vertex_data & data = latent_factors_inmem[i];
sum_of_item_latent_features += data.pvec;
}
}
/* sum up all user data vectors */
void pre_movie_iter(){
sum_of_user_latent_feautres = zeros(D);
for (uint i=0; i<M; i++){
vertex_data & data = latent_factors_inmem[i];
sum_of_user_latent_feautres += data.pvec;
}
}
/**
* GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type>
* class. The main logic is usually in the update function.
*/
struct NMFVerticesInMemProgram : public GraphChiProgram<VertexDataType, EdgeDataType> {
/**
* Called before an iteration starts.
*/
void before_iteration(int iteration, graphchi_context &gcontext) {
reset_rmse(gcontext.execthreads);
iter = iteration;
if (iteration > 0) {
if (iteration % 2 == 1)
pre_user_iter();
else pre_movie_iter();
}
}
/**
* Vertex update function - computes the least square step
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) {
if (gcontext.iteration == 0){
if (vertex.num_outedges() == 0 && vertex.id() < M)
logstream(LOG_FATAL)<<"NMF algorithm can not work when the row " << vertex.id() << " of the matrix contains all zeros" << std::endl;
for(int e=0; e < vertex.num_edges(); e++) {
float observation = vertex.edge(e)->get_data();
if (observation < 0 ){
logstream(LOG_FATAL)<<"Found a negative entry in matirx row " << vertex.id() << " with value: " << observation << std::endl;
}
}
return;
}
bool isuser = (vertex.id() < M);
if ((iter % 2 == 1 && !isuser) ||
(iter % 2 == 0 && isuser))
return;
vec ret = zeros(D);
vertex_data & vdata = latent_factors_inmem[vertex.id()];
for(int e=0; e < vertex.num_edges(); e++) {
float observation = vertex.edge(e)->get_data();
vertex_data & nbr_latent = latent_factors_inmem[vertex.edge(e)->vertex_id()];
double prediction;
rmse_vec[omp_get_thread_num()] += nmf_predict(vdata, nbr_latent, observation, prediction);
if (prediction == 0)
logstream(LOG_FATAL)<<"Got into numerical error! Please submit a bug report." << std::endl;
ret += nbr_latent.pvec * (observation / prediction);
}
vec px;
if (isuser)
px = sum_of_item_latent_features;
else
px = sum_of_user_latent_feautres;
for (int i=0; i<D; i++){
assert(px[i] != 0);
vdata.pvec[i] *= ret[i] / px[i];
if (vdata.pvec[i] < epsilon)
vdata.pvec[i] = epsilon;
}
}
/**
* Called after an iteration has finished.
*/
void after_iteration(int iteration, graphchi_context &gcontext) {
//print rmse every other iteration, since 2 iterations are considered one NMF round
int now = iteration % 2;
if (now == 0){
training_rmse(iteration/2, gcontext);
run_validation(pvalidation_engine, gcontext);
}
}
};
void output_nmf_result(std::string filename){
MMOutputter_mat<vertex_data> user_mat(filename + "_U.mm", 0, M, "This file contains NMF output matrix U. In each row D factors of a single user node.", latent_factors_inmem);
MMOutputter_mat<vertex_data> item_mat(filename + "_V.mm", M, M+N, "This file contains NMF output matrix V. In each row D factors of a single item node.", latent_factors_inmem);
logstream(LOG_INFO) << "NMF output files (in matrix market format): " << filename << "_U.mm" <<
", " << filename + "_V.mm " << std::endl;
}
int main(int argc, const char ** argv) {
print_copyright();
/* GraphChi initialization will read the command line
arguments and the configuration file. */
graphchi_init(argc, argv);
metrics m("nmf-inmemory-factors");
parse_command_line_args();
parse_implicit_command_line();
niters *= 2; //each NMF iteration is composed of two sub iters
/* Preprocess data if needed, or discover preprocess files */
int nshards = convert_matrixmarket<float>(training, NULL, 0, 0, 3, TRAINING, false);
init_feature_vectors<std::vector<vertex_data> >(M+N, latent_factors_inmem, !load_factors_from_file);
if (validation != ""){
int vshards = convert_matrixmarket<EdgeDataType>(validation, NULL, 0, 0, 3, VALIDATION, false);
if (vshards != -1)
init_validation_rmse_engine<VertexDataType, EdgeDataType>(pvalidation_engine, vshards, &nmf_predict);
}
if (load_factors_from_file){
load_matrix_market_matrix(training + "_U.mm", 0, D);
load_matrix_market_matrix(training + "_V.mm", M, D);
}
sum_of_item_latent_features = zeros(D);
sum_of_user_latent_feautres = zeros(D);
/* Run */
NMFVerticesInMemProgram program;
graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m);
set_engine_flags(engine);
pengine = &engine;
engine.run(program, niters);
/* Output latent factor matrices in matrix-market format */
output_nmf_result(training);
test_predictions(&nmf_predict);
/* Report execution metrics */
if (!quiet)
metrics_report(m);
return 0;
}
| C++ |
/**
* @file
* @author Danny Bickson, based on code by Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* This file implements item based collaborative filtering by comparing all item pairs which
* are connected by one or more user nodes.
*
* For the Jaccard index see: http://en.wikipedia.org/wiki/Jaccard_index
*
* For the AA index see: http://arxiv.org/abs/0907.1728 "Role of Weak Ties in Link Prediction of Complex Networks", equation (2)
*
* For the RA index see the above paper, equation (3)
*
* For Asym. Cosine see: F. Aiolli, A Preliminary Study on a Recommender System for the Million Songs Dataset Challenge
* Preference Learning: Problems and Applications in AI (PL-12), ECAI-12 Workshop, Montpellier
*
* Acknowledgements: thanks to Clive Cox, Rummble Labs, for implementing Asym. Cosince metric and contributing the code.
*/
#include <set>
#include <iomanip>
#include <algorithm>
#include "common.hpp"
#include "timer.hpp"
#include "eigen_wrapper.hpp"
#include "engine/dynamic_graphs/graphchi_dynamicgraph_engine.hpp"
enum DISTANCE_METRICS{
JACCARD = 0,
AA = 1,
RA = 2,
ASYM_COSINE = 3,
};
int min_allowed_intersection = 1;
vec written_pairs;
size_t zero_dist = 0;
size_t actual_written = 0;
size_t item_pairs_compared = 0;
size_t not_enough = 0;
std::vector<FILE*> out_files;
timer mytimer;
bool * relevant_items = NULL;
int grabbed_edges = 0;
int distance_metric;
float asym_cosine_alpha = 0.5;
int debug = 0;
bool is_item(vid_t v){ return v >= M; }
bool is_user(vid_t v){ return v < M; }
/**
* Type definitions. Remember to create suitable graph shards using the
* Sharder-program.
*/
typedef unsigned int VertexDataType;
typedef unsigned int EdgeDataType; // Edges store the "rating" of user->movie pair
struct vertex_data{
vec pvec;
int degree;
vertex_data(){ degree = 0; }
void set_val(int index, float val){
pvec[index] = val;
}
float get_val(int index){
return pvec[index];
}
};
std::vector<vertex_data> latent_factors_inmem;
#include "io.hpp"
struct dense_adj {
int count;
vid_t * adjlist;
dense_adj() { adjlist = NULL; }
dense_adj(int _count, vid_t * _adjlist) : count(_count), adjlist(_adjlist) {
}
};
// This is used for keeping in-memory
class adjlist_container {
std::vector<dense_adj> adjs;
//mutex m;
public:
vid_t pivot_st, pivot_en;
adjlist_container() {
pivot_st = M; //start pivor on item nodes (excluding user nodes)
pivot_en = M;
}
void clear() {
for(std::vector<dense_adj>::iterator it=adjs.begin(); it != adjs.end(); ++it) {
if (it->adjlist != NULL) {
free(it->adjlist);
it->adjlist = NULL;
}
}
adjs.clear();
pivot_st = pivot_en;
}
/**
* Extend the interval of pivot vertices to en.
*/
void extend_pivotrange(vid_t en) {
assert(en>=pivot_en);
pivot_en = en;
adjs.resize(pivot_en - pivot_st);
}
/**
* Grab pivot's adjacency list into memory.
*/
int load_edges_into_memory(graphchi_vertex<uint32_t, uint32_t> &v) {
//assert(is_pivot(v.id()));
//assert(is_item(v.id()));
int num_edges = v.num_edges();
//not enough user rated this item, we don't need to compare to it
if (num_edges < min_allowed_intersection){
relevant_items[v.id() - M] = false;
return 0;
}
relevant_items[v.id() - M] = true;
// Count how many neighbors have larger id than v
dense_adj dadj = dense_adj(num_edges, (vid_t*) calloc(sizeof(vid_t), num_edges));
for(int i=0; i<num_edges; i++) {
dadj.adjlist[i] = v.edge(i)->vertex_id();
}
std::sort(dadj.adjlist, dadj.adjlist + num_edges);
adjs[v.id() - pivot_st] = dadj;
assert(v.id() - pivot_st < adjs.size());
__sync_add_and_fetch(&grabbed_edges, num_edges /*edges_to_larger_id*/);
return num_edges;
}
int acount(vid_t pivot) {
return adjs[pivot - pivot_st].count;
}
/**
* calc distance between two items.
* Let a be all the users rated item 1
* Let b be all the users rated item 2
*
* 1) Using Jackard index:
* Dist_ab = intersection(a,b) / (size(a) + size(b) - size(intersection(a,b))
*
* 2) Using AA index:
* Dist_ab = sum_user k in intersection(a,b) [ 1 / log(degree(k)) ]
*
* 3) Using RA index:
* Dist_ab = sum_user k in intersection(a,b) [ 1 / degree(k) ]
*
* 4) Using Asym Cosine:
* Dist_ab = intersection(a,b) / size(a)^alpha * size(b)^(1-alpha)
*/
double calc_distance(graphchi_vertex<uint32_t, uint32_t> &v, vid_t pivot, int distance_metric) {
//assert(is_pivot(pivot));
//assert(is_item(pivot) && is_item(v.id()));
dense_adj &pivot_edges = adjs[pivot - pivot_st];
int num_edges = v.num_edges();
//if there are not enough neighboring user nodes to those two items there is no need
//to actually count the intersection
if (num_edges < min_allowed_intersection || pivot_edges.count < min_allowed_intersection)
return 0;
std::vector<vid_t> edges;
edges.resize(num_edges);
for(int i=0; i < num_edges; i++) {
vid_t other_vertex = v.edge(i)->vertexid;
edges[i] = other_vertex;
}
sort(edges.begin(), edges.end());
std::set<vid_t> intersection;
std::set_intersection(
pivot_edges.adjlist, pivot_edges.adjlist + pivot_edges.count,
edges.begin(), edges.end(),
std::inserter(intersection, intersection.begin()));
double intersection_size = (double)intersection.size();
//not enough user nodes rated both items, so the pairs of items are not compared.
if (intersection_size < (double)min_allowed_intersection)
return 0;
if (distance_metric == JACCARD){
uint set_a_size = v.num_edges(); //number of users connected to current item
uint set_b_size = acount(pivot); //number of users connected to current pivot
return intersection_size / (double)(set_a_size + set_b_size - intersection_size); //compute the distance
}
else if (distance_metric == AA){
double dist = 0;
for (std::set<vid_t>::iterator i= intersection.begin() ; i != intersection.end(); i++){
vid_t user = *i;
assert(latent_factors_inmem.size() == M && is_user(user));
assert(latent_factors_inmem[user].degree > 0);
dist += 1.0 / log(latent_factors_inmem[user].degree);
}
return dist;
}
else if (distance_metric == RA){
double dist = 0;
for (std::set<vid_t>::iterator i= intersection.begin() ; i != intersection.end(); i++){
vid_t user = *i;
assert(latent_factors_inmem.size() == M && is_user(user));
assert(latent_factors_inmem[user].degree > 0);
dist += 1.0 / latent_factors_inmem[user].degree;
}
return dist;
}
else if (distance_metric == ASYM_COSINE){
uint set_a_size = v.num_edges(); //number of users connected to current item
uint set_b_size = acount(pivot); //number of users connected to current pivot
return intersection_size / (pow(set_a_size,asym_cosine_alpha) * pow(set_b_size,1-asym_cosine_alpha));
}
return 0;
}
inline bool is_pivot(vid_t vid) {
return vid >= pivot_st && vid < pivot_en;
}
};
adjlist_container * adjcontainer;
struct index_val{
uint index;
float val;
index_val(){
index = -1; val = 0;
}
index_val(uint index, float val): index(index), val(val){ }
};
bool Greater(const index_val& a, const index_val& b)
{
return a.val > b.val;
}
struct ItemDistanceProgram : public GraphChiProgram<VertexDataType, EdgeDataType> {
/**
* Vertex update function.
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType> &v, graphchi_context &gcontext) {
if (debug)
printf("Entered iteration %d with %d\n", gcontext.iteration, v.id());
/* even iteration numbers:
* 1) load a subset of items into memory (pivots)
* 2) Find which subset of items needs to compared to the users
*/
if (gcontext.iteration % 2 == 0) {
if (adjcontainer->is_pivot(v.id()) && is_item(v.id())){
adjcontainer->load_edges_into_memory(v);
if (debug)
printf("Loading pivot %dintro memory\n", v.id());
}
else if (is_user(v.id())){
//in the zero iteration, if using AA distance metric, initialize array
//with node degrees
if (gcontext.iteration == 0 && (distance_metric == AA || distance_metric == RA)){
latent_factors_inmem[v.id()].degree = v.num_edges();
}
//check if this user is connected to any pivot item
bool has_pivot = false;
int pivot = -1;
for(int i=0; i<v.num_edges(); i++) {
graphchi_edge<uint32_t> * e = v.edge(i);
//assert(is_item(e->vertexid));
if (adjcontainer->is_pivot(e->vertexid)) {
has_pivot = true;
pivot = e->vertexid;
break;
}
}
if (debug)
printf("user %d is linked to pivot %d\n", v.id(), pivot);
if (!has_pivot) //this user is not connected to any of the pivot item nodes and thus
//it is not relevant at this point
return;
//this user is connected to a pivot items, thus all connected items should be compared
for(int i=0; i<v.num_edges(); i++) {
graphchi_edge<uint32_t> * e = v.edge(i);
//assert(v.id() != e->vertexid);
relevant_items[e->vertexid - M] = true;
}
}//is_user
} //iteration % 2 = 1
/* odd iteration number:
* 1) For any item connected to a pivot item
* compute itersection
*/
else {
if (!relevant_items[v.id() - M]){
if (debug)
logstream(LOG_DEBUG)<<"Skipping item: " << v.id() << " since not relevant" << std::endl;
return;
}
std::vector<index_val> heap;
for (vid_t i=adjcontainer->pivot_st; i< adjcontainer->pivot_en; i++){
//if JACCARD which is symmetric, compare only to pivots which are smaller than this item id
if ((distance_metric != ASYM_COSINE && i >= v.id()) || (!relevant_items[i-M]))
continue;
else if (distance_metric == ASYM_COSINE && i == v.id())
continue;
double dist = adjcontainer->calc_distance(v, i, distance_metric);
item_pairs_compared++;
if (item_pairs_compared % 10000000 == 0)
logstream(LOG_INFO)<< std::setw(10) << mytimer.current_time() << ") " << std::setw(10) << item_pairs_compared << " pairs compared " << std::setw(10) <<sum(written_pairs) << " written. " << std::endl;
if (debug)
printf("comparing %d to pivot %d distance is %g\n", i - M + 1, v.id() - M + 1, dist);
if (dist != 0){
heap.push_back(index_val(i, dist));
//where the output format is:
//[item A] [ item B ] [ distance ]
}
else zero_dist++;
}
sort(heap.begin(), heap.end(), &Greater);
int thread_num = omp_get_thread_num();
if (heap.size() < K)
not_enough++;
for (uint i=0; i< std::min(heap.size(), (size_t)K); i++){
int rc = fprintf(out_files[thread_num], "%u %u %.12lg\n", v.id()-M+1, heap[i].index-M+1, (double)heap[i].val);//write item similarity to file
written_pairs[omp_get_thread_num()]++;
if (rc <= 0){
perror("Failed to write output");
logstream(LOG_FATAL)<<"Failed to write output to: file: " << training << omp_get_thread_num() << ".out" << std::endl;
}
}
}//end of iteration % 2 == 1
}//end of update function
/**
* Called before an iteration starts.
* On odd iteration, schedule both users and items.
* on even iterations, schedules only item nodes
*/
void before_iteration(int iteration, graphchi_context &gcontext) {
gcontext.scheduler->remove_tasks(0, (int) gcontext.nvertices - 1);
if (gcontext.iteration == 0)
written_pairs = zeros(gcontext.execthreads);
if (gcontext.iteration % 2 == 0){
memset(relevant_items, 0, sizeof(bool)*N);
for (vid_t i=0; i < M+N; i++){
gcontext.scheduler->add_task(i);
}
grabbed_edges = 0;
adjcontainer->clear();
} else { //iteration % 2 == 1
for (vid_t i=M; i < M+N; i++){
gcontext.scheduler->add_task(i);
}
}
}
/**
* Called before an execution interval is started.
*
* On every even iteration, we load pivot's item connected user lists to memory.
* Here we manage the memory to ensure that we do not load too much
* edges into memory.
*/
void before_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) {
/* on even iterations, loads pivot items into memory base on the membudget_mb allowed memory size */
if (gcontext.iteration % 2 == 0) {
if (!quiet){
printf("entering iteration: %d on before_exec_interval\n", gcontext.iteration);
printf("pivot_st is %d window_en %d\n", adjcontainer->pivot_st, window_en);
}
if (adjcontainer->pivot_st <= window_en) {
size_t max_grab_edges = get_option_long("membudget_mb", 1024) * 1024 * 1024 / 8;
if (grabbed_edges < max_grab_edges * 0.8) {
logstream(LOG_DEBUG) << "Window init, grabbed: " << grabbed_edges << " edges" << " extending pivor_range to : " << window_en + 1 << std::endl;
adjcontainer->extend_pivotrange(window_en + 1);
logstream(LOG_DEBUG) << "Window en is: " << window_en << " vertices: " << gcontext.nvertices << std::endl;
if (window_en+1 == gcontext.nvertices) {
// every item was a pivot item, so we are done
logstream(LOG_DEBUG)<<"Setting last iteration to: " << gcontext.iteration + 2 << std::endl;
gcontext.set_last_iteration(gcontext.iteration + 2);
}
} else {
logstream(LOG_DEBUG) << "Too many edges, already grabbed: " << grabbed_edges << std::endl;
}
}
}
}
};
int main(int argc, const char ** argv) {
print_copyright();
/* GraphChi initialization will read the command line
arguments and the configuration file. */
graphchi_init(argc, argv);
/* Metrics object for keeping track of performance counters
and other information. Currently required. */
metrics m("item-cf");
/* Basic arguments for application */
min_allowed_intersection = get_option_int("min_allowed_intersection", min_allowed_intersection);
distance_metric = get_option_int("distance", JACCARD);
asym_cosine_alpha = get_option_float("asym_cosine_alpha", 0.5);
debug = get_option_int("debug", debug);
if (distance_metric != JACCARD && distance_metric != AA && distance_metric != RA && distance_metric != ASYM_COSINE)
logstream(LOG_FATAL)<<"Wrong distance metric. --distance_metric=XX, where XX should be either 0) JACCARD, 1) AA, 2) RA, 3) ASYM_COSINE" << std::endl;
parse_command_line_args();
mytimer.start();
int nshards = convert_matrixmarket<EdgeDataType>(training, NULL, 0, 0, 3, TRAINING, false);
if (nshards != 1)
logstream(LOG_FATAL)<<"This application currently supports only 1 shard" << std::endl;
K = get_option_int("K", K);
if (K <= 0)
logstream(LOG_FATAL)<<"Please specify the number of ratings to generate for each user using the --K command" << std::endl;
assert(M > 0 && N > 0);
//initialize data structure which saves a subset of the items (pivots) in memory
adjcontainer = new adjlist_container();
//array for marking which items are conected to the pivot items via users.
relevant_items = new bool[N];
//store node degrees in an array to be used for AA distance metric
if (distance_metric == AA || distance_metric == RA)
latent_factors_inmem.resize(M);
/* Run */
ItemDistanceProgram program;
graphchi_engine<VertexDataType, EdgeDataType> engine(training, 1, true, m);
set_engine_flags(engine);
engine.set_maxwindow(M+N+1);
//open output files as the number of operating threads
out_files.resize(number_of_omp_threads());
for (uint i=0; i< out_files.size(); i++){
char buf[256];
sprintf(buf, "%s.out%d", training.c_str(), i);
out_files[i] = open_file(buf, "w");
}
//run the program
engine.run(program, niters);
/* Report execution metrics */
if (!quiet)
metrics_report(m);
std::cout<<"Total item pairs compared: " << item_pairs_compared << " total written to file: " << sum(written_pairs) << " pairs with zero distance: " << zero_dist << std::endl;
if (not_enough)
logstream(LOG_WARNING)<<"Items that did not have enough similar items: " << not_enough << std::endl;
for (uint i=0; i< out_files.size(); i++){
fflush(out_files[i]);
fclose(out_files[i]);
}
std::cout<<"Created " << number_of_omp_threads() << " output files with the format: " << training << ".outXX, where XX is the output thread number" << std::endl;
delete[] relevant_items;
return 0;
}
| C++ |
/**
* @file
* @author Danny Bickson
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
* This code implements the PMF (probablistic matrix factorization) algorithm
* as explained in Liang Xiong et al SDM 2010 paper.
*
*/
#include "eigen_wrapper.hpp"
#include "common.hpp"
#include "prob.hpp"
double lambda = 0.065;
int pmf_burn_in = 10;//number of iterations for burn in (itermediate solutions are thrown)
int pmf_additional_output = 0;
int debug = 0;
/* variables for PMF */
double nuAlpha = 1;
double Walpha = 1;
double nu0 = D;
double alpha = 0;
double beta = 1;
vec beta0 = init_vec("1", 1);
//vec mu0T = init_vec("1", 1);
mat W0;
//mat W0T;
double iWalpha;
mat iW0;
//mat iW0T;
mat A_U, A_V;// A_T;
vec mu_U, mu_V; //, mu_T;
int iiter = 0;
vec validation_avgprod; //vector for storing temporary aggregated predictions for the MCMC method
vec test_avgprod; //vector for strogin temporary aggregated predictions for the MCMC method
size_t rmse_index = 0;
int rmse_type = 0;
struct vertex_data {
vec pvec;
vertex_data() {
pvec = zeros(D);
}
void set_val(int index, float val){
pvec[index] = val;
}
float get_val(int index){
return pvec[index];
}
};
struct edge_data {
float weight;
float avgprd;
edge_data() { weight = 0; avgprd = 0; }
edge_data(double weight): weight(weight) { avgprd = 0; }
};
/**
* Type definitions. Remember to create suitable graph shards using the
* Sharder-program.
*/
typedef vertex_data VertexDataType;
typedef edge_data EdgeDataType; // Edges store the "rating" of user->movie pair
graphchi_engine<VertexDataType, EdgeDataType> * pengine = NULL;
std::vector<vertex_data> latent_factors_inmem;
#include "io.hpp"
#include "rmse.hpp"
/** compute a missing value based on PMF algorithm */
float pmf_predict(const vertex_data& user,
const vertex_data& movie,
const float rating,
double & prediction,
void * pedge){
prediction = dot_prod(user.pvec, movie.pvec);
//truncate prediction to allowed values
prediction = std::min((double)prediction, maxval);
prediction = std::max((double)prediction, minval);
float err = 0;
if (iiter > pmf_burn_in){
if (pedge){
if (iiter == pmf_burn_in+1)
(*(float*)pedge) = 0;
(*(float*)pedge) += prediction;
err = pow(((*(float*)pedge) / (iiter - pmf_burn_in)) - rating, 2);
}
}
else {
err = pow(prediction - rating,2);
}
assert(!std::isnan(err));
if (!pedge)
rmse_index++;
return err;
}
void init_self_pot(){
W0 = eye(D);
//W0T = eye(D);
iWalpha = 1.0/Walpha;
iW0 = inv(W0);
//iW0T = inv(W0T);
nu0 = D;
A_U = eye(D); //cov prior for users
A_V = eye(D); //cov prior for movies
//A_T = eye(D); //cov prior for time nodes
mu_U = zeros(D); mu_V = zeros(D);// mu_T = zeros(D);
//printf("nuAlpha=%g, Walpha=%g, mu0=%d, muT=%g, nu=%g, "
// "beta=%g, W=%g, WT=%g pmf_burn_in=%d\n", nuAlpha, Walpha, 0,
// mu0T[0], nu0, beta0[0], W0(1,1), W0T(1,1), pmf_burn_in);
//test_randn();
//test_wishrnd();
//test_wishrnd2();
//test_chi2rnd();
//test_wishrnd3();
//test_mvnrndex();
}
/**
* sample the noise level
* Euqation A.2 in Xiong paper
*/
void sample_alpha(double res2){
if (debug)
printf("res is %g\n", res2);
double res = res2;
if (nuAlpha > 0){
double nuAlpha_ =nuAlpha+ L;
mat iWalpha_(1,1);
set_val(iWalpha_, 0,0,iWalpha + res);
mat iiWalpha_ = zeros(1,1);
iiWalpha_ = inv(iWalpha_);
alpha = get_val(wishrnd(iiWalpha_, nuAlpha_),0,0);
assert(alpha != 0);
if (debug)
std::cout<<"Sampling from alpha" <<nuAlpha_<<" "<<iWalpha<<" "<< iiWalpha_<<" "<<alpha<<endl;
//printf("sampled alpha is %g\n", alpha);
}
}
mat calc_MMT(int start_pos, int end_pos, vec &Umean){
int batchSize = 1000;
mat U = zeros(batchSize,D);
mat MMT = zeros(D,D);
int cnt = 0;
for (int i=start_pos; i< end_pos; i++){
if ((i-start_pos) % batchSize == 0){
U=zeros(batchSize, D);
cnt = 1;
}
const vertex_data * data= &latent_factors_inmem[i];
vec mean = data->pvec;
Umean += mean;
for (int s=0; s<D; s++)
U(i%batchSize,s)=mean(s);
if (debug && (i==start_pos || i == end_pos-1))
std::cout<<" clmn "<<i<< " vec: " << mean <<std::endl;
if ((cnt == batchSize) || (cnt < batchSize && i == end_pos-1)){
MMT = MMT+transpose(U)*U;
}
cnt++;
}
Umean /= (end_pos-start_pos);
if (debug)
cout<<"mean: "<<Umean<<endl;
assert(MMT.rows() == D && MMT.cols() == D);
assert(Umean.size() == D);
return MMT;
}
// sample movie nodes hyperprior
// according to equation A.3 in Xiong paper.
void sample_U(){
vec Umean = zeros(D);
mat UUT = calc_MMT(0,M,Umean);
double beta0_ = beta0[0] + M;
vec mu0_ = (M*Umean)/beta0_;
double nu0_ = nu0 +M;
vec dMu = - Umean;
if (debug)
std::cout<<"dMu:"<<dMu<<"beta0: "<<beta0[0]<<" beta0_ "<<beta0_<<" nu0_ " <<nu0_<<" mu0_ " << mu0_<<endl;
mat UmeanT = M*outer_product(Umean, Umean);
assert(UmeanT.rows() == D && UmeanT.cols() == D);
mat dMuT = (beta0[0]/beta0_)*UmeanT;
mat iW0_ = iW0 + UUT - UmeanT + dMuT;
mat W0_;
bool ret =inv(iW0_, W0_);
assert(ret);
mat tmp = (W0_+transpose(W0_))*0.5;
if (debug)
std::cout<<iW0<<UUT<<UmeanT<<dMuT<<W0_<<tmp<<nu0_<<endl;
A_U = wishrnd(tmp, nu0_);
mat tmp2;
ret = inv(beta0_ * A_U, tmp2);
assert(ret);
mu_U = mvnrndex(mu0_, tmp2, D, 0);
if (debug)
std::cout<<"Sampling from U" <<A_U<<" "<<mu_U<<" "<<Umean<<" "<<W0_<<tmp<<endl;
}
// sample user nodes hyperprior
// according to equation A.4 in Xiong paper
void sample_V(){
vec Vmean = zeros(D);
mat VVT = calc_MMT(M, M+N, Vmean);
double beta0_ = beta0[0] + N;
vec mu0_ = (N*Vmean)/beta0_;
double nu0_ = nu0 +N;
vec dMu = - Vmean;
if (debug)
std::cout<<"dMu:"<<dMu<<"beta0: "<<beta0[0]<<" beta0_ "<<beta0_<<" nu0_ " <<nu0_<<endl;
mat VmeanT = N*outer_product(Vmean, Vmean);
assert(VmeanT.rows() == D && VmeanT.cols() == D);
mat dMuT = (beta0[0]/beta0_)*VmeanT;
mat iW0_ = iW0 + VVT - VmeanT + dMuT;
mat W0_;
bool ret = inv(iW0_, W0_);
assert(ret);
mat tmp = (W0_+transpose(W0_))*0.5;
if (debug)
std::cout<<"iW0: "<<iW0<<" VVT: "<<VVT<<" VmeanT: "<<VmeanT<<" dMuT: " <<dMuT<<"W0_"<< W0_<<" tmp: " << tmp<<" nu0_: "<<nu0_<<endl;
A_V = wishrnd(tmp, nu0_);
mat tmp2;
ret = inv(beta0_*A_V, tmp2);
assert(ret);
mu_V = mvnrndex(mu0_, tmp2, D, 0);
if (debug)
std::cout<<"Sampling from V: A_V" <<A_V<<" mu_V: "<<mu_V<<" Vmean: "<<Vmean<<" W0_: "<<W0_<<" tmp: "<<tmp<<endl;
}
void sample_hyperpriors(double res){
sample_alpha(res);
sample_U();
sample_V();
//if (tensor)
// sample_T();
}
void output_pmf_result(std::string filename) {
MMOutputter_mat<vertex_data> user_mat(filename + "_U.mm", 0, M , "This file contains PMF output matrix U. In each row D factors of a single user node.", latent_factors_inmem);
MMOutputter_mat<vertex_data> item_mat(filename + "_V.mm", M ,M+N, "This file contains PMF output matrix V. In each row D factors of a single item node.", latent_factors_inmem);
logstream(LOG_INFO) << "PMF output files (in matrix market format): " << filename << "_U.mm" <<
", " << filename + "_V.mm " << std::endl;
}
/**
* GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type>
* class. The main logic is usually in the update function.
*/
struct PMFVerticesInMemProgram : public GraphChiProgram<VertexDataType, EdgeDataType> {
/**
* Vertex update function - computes the least square step
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) {
vertex_data & vdata = latent_factors_inmem[vertex.id()];
bool isuser = vertex.id() < M;
mat XtX = mat::Zero(D, D);
vec Xty = vec::Zero(D);
bool compute_rmse = (vertex.num_outedges() > 0);
// Compute XtX and Xty (NOTE: unweighted)
for(int e=0; e < vertex.num_edges(); e++) {
const edge_data & edge = vertex.edge(e)->get_data();
float observation = edge.weight;
vertex_data & nbr_latent = latent_factors_inmem[vertex.edge(e)->vertex_id()];
Xty += nbr_latent.pvec * observation;
XtX.triangularView<Eigen::Upper>() += nbr_latent.pvec * nbr_latent.pvec.transpose();
if (compute_rmse) {
double prediction;
rmse_vec[omp_get_thread_num()] += pmf_predict(vdata, nbr_latent, observation, prediction, (void*)&edge.avgprd);
vertex.edge(e)->set_data(edge);
}
}
double regularization = lambda;
if (regnormal)
lambda *= vertex.num_edges();
for(int i=0; i < D; i++) XtX(i,i) += regularization;
// Solve the least squares problem with eigen using Cholesky decomposition
mat iAi_;
bool ret =inv((isuser? A_U : A_V) + alpha * XtX, iAi_);
assert(ret);
vec mui_ = iAi_*((isuser? (A_U*mu_U) : (A_V*mu_V)) + alpha * Xty);
vdata.pvec = mvnrndex(mui_, iAi_, D, 0);
assert(vdata.pvec.size() == D);
}
/**
* Called before an iteration is started.
*/
void before_iteration(int iteration, graphchi_context &gcontext) {
rmse_vec = zeros(gcontext.execthreads);
}
/**
* Called after an iteration has finished.
*/
void after_iteration(int iteration, graphchi_context &gcontext) {
if (iteration == pmf_burn_in){
printf("Finished burn-in period. starting to aggregate samples\n");
}
if (pmf_additional_output && iiter >= pmf_burn_in){
char buf[256];
sprintf(buf, "%s-%d", training.c_str(), iiter-pmf_burn_in);
output_pmf_result(buf);
}
double res = training_rmse(iteration, gcontext);
sample_hyperpriors(res);
rmse_index = 0;
rmse_type = VALIDATION;
validation_rmse(&pmf_predict, gcontext, 3, &validation_avgprod, pmf_burn_in);
if (iteration >= pmf_burn_in){
rmse_index = 0;
rmse_type = TEST;
test_predictions(&pmf_predict, &gcontext, iiter == niters-1, &test_avgprod);
}
iiter++;
}
};
void init_pmf(){
init_self_pot();
}
int main(int argc, const char ** argv) {
print_copyright();
/* GraphChi initialization will read the command line
arguments and the configuration file. */
graphchi_init(argc, argv);
/* Metrics object for keeping track of performance counters
and other information. Currently required. */
metrics m("pmf-inmemory-factors");
lambda = get_option_float("lambda", 0.065);
debug = get_option_int("debug", debug);
pmf_burn_in = get_option_int("pmf_burn_in", pmf_burn_in);
pmf_additional_output = get_option_int("pmf_additional_output", pmf_additional_output);
parse_command_line_args();
parse_implicit_command_line();
/* Preprocess data if needed, or discover preprocess files */
int nshards = convert_matrixmarket<edge_data>(training, NULL, 0, 0, 3, TRAINING, false);
init_feature_vectors<std::vector<vertex_data> >(M+N, latent_factors_inmem, !load_factors_from_file);
init_pmf();
if (load_factors_from_file){
load_matrix_market_matrix(training + "_U.mm", 0, D);
load_matrix_market_matrix(training + "_V.mm", M, D);
}
/* Run */
PMFVerticesInMemProgram program;
graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m);
set_engine_flags(engine, true);
pengine = &engine;
engine.run(program, niters);
/* Report execution metrics */
if (!quiet)
metrics_report(m);
return 0;
}
| C++ |
#ifndef _COMMON_H__
#define _COMMON_H__
/**
* @file
* @author Danny Bickson
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <assert.h>
#include <cmath>
#include <errno.h>
#include <string>
#include "util.hpp"
#include "graphchi_basic_includes.hpp"
#include "api/vertex_aggregator.hpp"
#include "preprocessing/sharder.hpp"
#include "../../example_apps/matrix_factorization/matrixmarket/mmio.h"
#include "../../example_apps/matrix_factorization/matrixmarket/mmio.c"
#include <stdio.h>
#ifdef __APPLE__
#include "getline.hpp" //fix for missing getline() function on MAC OS
#endif
using namespace graphchi;
double minval = -1e100; //max allowed value in matrix
double maxval = 1e100; //min allowed value in matrix
double valrange = 1; //range of allowed values in matrix
std::string training;
std::string validation;
std::string test;
uint M, N, K;
size_t L, Le;
uint Me, Ne;
double globalMean = 0;
double globalMean2 = 0;
double rmse=0.0;
bool load_factors_from_file = false;
int unittest = 0;
int niters = 10;
int halt_on_rmse_increase = 0;
int D = 20; //feature vector width
bool quiet = false;
int input_file_offset = 1;
int kfold_cross_validation = 0;
int kfold_cross_validation_index = 0;
int regnormal = 0; // if set to 1, compute LS regularization according to the paper "Yunhong Zhou, Dennis Wilkinson, Robert Schreiber and Rong Pan. Large-Scale Parallel Collaborative Filtering for the Netflix Prize."
int clean_cache = 0;
int R_output_format = 0; // if set to 1, all matrices and vectors are written in sparse matrix market format since
// R does not currently support array format (dense format).
/* support for different loss types (for SGD variants) */
std::string loss = "square";
enum {
LOGISTIC = 0, SQUARE = 1, ABS = 2, AP = 3
};
const char * error_names[] = {"LOGISTIC LOSS", "RMSE", "MAE", "AP"};
int loss_type = SQUARE;
int calc_ap = 0;
int ap_number = 3; //AP@3
enum {
TRAINING= 0, VALIDATION = 1, TEST = 2
};
void remove_cached_files(){
//remove cached files
int rc;
assert(training != "");
rc = system((std::string("rm -fR ") + training + std::string(".*")).c_str());
assert(!rc);
if (validation != ""){
rc = system((std::string("rm -fR ") + validation + std::string(".*")).c_str());
assert(!rc);
}
}
void parse_command_line_args(){
/* Basic arguments for application. NOTE: File will be automatically 'sharded'. */
unittest = get_option_int("unittest", 0);
niters = get_option_int("max_iter", 6); // Number of iterations
if (unittest > 0)
training = get_option_string("training", ""); // Base filename
else training = get_option_string("training");
validation = get_option_string("validation", "");
test = get_option_string("test", "");
D = get_option_int("D", D);
maxval = get_option_float("maxval", 1e100);
minval = get_option_float("minval", -1e100);
if (minval >= maxval)
logstream(LOG_FATAL)<<"Min allowed rating (--minval) should be smaller than max allowed rating (--maxval)" << std::endl;
valrange = maxval - minval;
assert(valrange > 0);
quiet = get_option_int("quiet", 0);
if (quiet)
global_logger().set_log_level(LOG_ERROR);
halt_on_rmse_increase = get_option_int("halt_on_rmse_increase", 0);
load_factors_from_file = get_option_int("load_factors_from_file", 0);
input_file_offset = get_option_int("input_file_offset", input_file_offset);
/* find out loss type (optional, for SGD variants only) */
loss = get_option_string("loss", loss);
if (loss == "square")
loss_type = SQUARE;
else if (loss == "logistic")
loss_type = LOGISTIC;
else if (loss == "abs")
loss_type = ABS;
else if (loss == "ap")
loss_type = AP;
else logstream(LOG_FATAL)<<"Loss type should be one of [square,logistic,abs] (for example, --loss==square);" << std::endl;
calc_ap = get_option_int("calc_ap", calc_ap);
if (calc_ap)
loss_type = AP;
ap_number = get_option_int("ap_number", ap_number);
kfold_cross_validation = get_option_int("kfold_cross_validation", kfold_cross_validation);
kfold_cross_validation_index = get_option_int("kfold_cross_validation_index", kfold_cross_validation_index);
if (kfold_cross_validation_index > 0){
if (kfold_cross_validation_index >= kfold_cross_validation)
logstream(LOG_FATAL)<<"kfold_cross_validation index should be between 0 to kfold_cross_validation-1 parameter" << std::endl;
}
if (kfold_cross_validation != 0){
logstream(LOG_WARNING)<<"Activating kfold cross vlidation with K="<< kfold_cross_validation << std::endl;
if (training == validation)
logstream(LOG_FATAL)<<"Using cross validation, validation file (--validation=filename) should have a different name than training" << std::endl;
if (validation == "")
logstream(LOG_FATAL)<<"You must provide validation input file name (--validation=filename) when using k-fold cross validation" << std::endl;
clean_cache = 1;
}
regnormal = get_option_int("regnormal", regnormal);
clean_cache = get_option_int("clean_cache", clean_cache);
if (clean_cache)
remove_cached_files();
R_output_format = get_option_int("R_output_format", R_output_format);
}
template<typename T>
void set_engine_flags(T & pengine){
pengine.set_disable_vertexdata_storage();
pengine.set_enable_deterministic_parallelism(false);
pengine.set_modifies_inedges(false);
pengine.set_modifies_outedges(false);
pengine.set_preload_commit(false);
}
template<typename T>
void set_engine_flags(T & pengine, bool modify_outedges){
pengine.set_disable_vertexdata_storage();
pengine.set_enable_deterministic_parallelism(false);
pengine.set_modifies_inedges(false);
pengine.set_modifies_outedges(modify_outedges);
pengine.set_preload_commit(false);
}
void print_copyright(){
logstream(LOG_WARNING)<<"GraphChi Collaborative filtering library is written by Danny Bickson (c). Send any "
" comments or bug reports to danny.bickson@gmail.com " << std::endl;
}
void print_config(){
std::cout<<"[feature_width] => [" << D << "]" << std::endl;
std::cout<<"[users] => [" << M << "]" << std::endl;
std::cout<<"[movies] => [" << N << "]" <<std::endl;
std::cout<<"[training_ratings] => [" << L << "]" << std::endl;
std::cout<<"[number_of_threads] => [" << number_of_omp_threads() << "]" <<std::endl;
std::cout<<"[membudget_Mb] => [" << get_option_int("membudget_mb") << "]" <<std::endl;
}
template<typename T>
void init_feature_vectors(uint size, T& latent_factors_inmem, bool randomize = true, double scale = 1.0){
assert(size > 0);
srand48(time(NULL));
latent_factors_inmem.resize(size); // Initialize in-memory vertices.
if (!randomize)
return;
#pragma omp parallel for
for (int i=0; i < (int)size; i++){
for (int j=0; j<D; j++)
latent_factors_inmem[i].pvec[j] = scale * drand48();
}
}
#endif //_COMMON_H__
| C++ |
/**
* @file
* @author Danny Bickson
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Implementation of the gensgd algorithm. A generalization of SGD algorithm when there are multiple features for each
* rating, in the form
* [from] [to] [feature1] [feature2] [feature3] ... [featureN] [rating]
* (It is also possible to dynamically specify column numbers which are relevant)
* Steffen Rendle (2010): Factorization Machines, in Proceedings of the 10th IEEE International Conference on Data Mining (ICDM 2010), Sydney, Australia.
* Original implementation by Qiang Yan, Chinese Academy of Science.
* note: this code version implements the SGD version of gensgd. In the original library there are also ALS and MCMC methods.
* Also the treatment of features is richer in gensgd. The code here can serve for a quick evaluation but the user
* is encouraged to try gensgd as well.
*/
#include <vector>
#include "common.hpp"
#include "eigen_wrapper.hpp"
#include "../parsers/common.hpp"
#include <omp.h>
#define MAX_FEATURES 256
#define FEATURE_WIDTH 11 //MAX NUMBER OF ALLOWED FEATURES IN TEXT FILE
double gensgd_rate1 = 1e-02;
double gensgd_rate2 = 1e-02;
double gensgd_rate3 = 1e-02;
double gensgd_rate4 = 1e-02;
double gensgd_rate5 = 1e-02;
double gensgd_mult_dec = 0.9;
double gensgd_regw = 1e-3;
double gensgd_regv = 1e-3;
double gensgd_reg0 = 1e-1;
bool debug = false;
std::string user_file; //optional file with user features
std::string item_file; //optional file with item features
std::string user_links; //optional file with user to user links
int limit_rating = 0;
size_t vertex_with_no_edges = 0;
int calc_error = 0;
int file_columns = 0;
std::vector<std::string> header_titles;
int has_header_titles = 0;
int has_user_titles = 0;
int has_item_titles = 0;
float cutoff = 0;
size_t new_validation_users = 0;
size_t new_test_users = 0;
int json_input = 0;
int cold_start = 0;
double inputGlobalMean = 0;
int binary_prediction = 0;
struct stats{
float minval;
float maxval;
float meanval;
stats(){
minval = maxval = meanval = 0;
}
};
enum _cold_start{
NONE = 0,
GLOBAL = 1,
ITEM = 3
};
struct feature_control{
std::vector<double_map> node_id_maps;
double_map val_map;
int rehash_value;
int last_item;
std::vector<stats> stats_array;
int feature_num;
int node_features;
int node_links;
int total_features;
std::vector<bool> feature_selection;
const std::string default_feature_str;
std::vector<int> offsets;
bool hash_strings;
int from_pos;
int to_pos;
int val_pos;
feature_control(){
rehash_value = 0;
last_item = 0;
total_features = 0;
node_features = 0;
feature_num = FEATURE_WIDTH;
hash_strings = true;
from_pos = 0;
to_pos = 1;
val_pos = -1;
node_links = 0;
feature_selection.resize(MAX_FEATURES+3);
}
};
feature_control fc;
int num_feature_bins(){
int sum = 0;
if (fc.hash_strings){
assert(2+fc.total_features+fc.node_features == (int)fc.node_id_maps.size());
for (int i=2; i < 2+fc.total_features+fc.node_features; i++){
sum+= fc.node_id_maps[i].string2nodeid.size();
}
}
else {
for (int i=0; i< fc.total_features; i++)
sum += (int)ceil((fc.stats_array[i].maxval - fc.stats_array[i].minval) + 1);
}
if (fc.total_features > 0)
assert(sum > 0);
return sum;
}
int calc_feature_num(){
return 2+fc.total_features+fc.last_item+fc.node_features;
}
void get_offsets(std::vector<int> & offsets){
assert(offsets.size() >= 2);
offsets[0] = 0;
offsets[1] = M;
if (offsets.size() >= 3)
offsets[2] = M+N;
if (fc.hash_strings){
for (uint j=2; j< offsets.size()-1; j++){
offsets[j+1] = offsets[j] + fc.node_id_maps[j].string2nodeid.size();
logstream(LOG_DEBUG)<<"Offset " << j+1 << " is: " << offsets[j+1] << std::endl;
}
} else {
for (uint j=2; j < offsets.size(); j++){
offsets[j+1] = offsets[j] + (int)ceil((fc.stats_array[j-2].maxval-fc.stats_array[j-2].minval)+1);
logstream(LOG_DEBUG)<<"Offset " << j+1 << " is: " << offsets[j+1] << std::endl;
}
}
}
bool is_user(vid_t id){ return id < M; }
bool is_item(vid_t id){ return id >= M && id < M+N; }
bool is_time(vid_t id){ return id >= M+N; }
vec errors_vec;
#define BIAS_POS -1
struct vertex_data {
vec pvec;
double bias;
int last_item;
float avg_rating;
sparse_vec features;
sparse_vec links; //links to other users or items
vertex_data() {
bias = 0;
last_item = 0;
avg_rating = -1;
}
void set_val(int index, float val){
if (index == BIAS_POS)
bias = val;
else pvec[index] = val;
}
float get_val(int index){
if (index== BIAS_POS)
return bias;
else return pvec[index];
}
};
struct edge_data {
float features[FEATURE_WIDTH];
float weight;
edge_data() { weight = 0; memset(features, 0, sizeof(float)*FEATURE_WIDTH); }
edge_data(float weight, float * valarray, int size): weight(weight) { memcpy(features, valarray, sizeof(float)*size); }
};
/**
* Type definitions. Remember to create suitable graph shards using the
* Sharder-program.
*/
typedef vertex_data VertexDataType;
typedef edge_data EdgeDataType; // Edges store the "rating" of user->movie pair
graphchi_engine<VertexDataType, EdgeDataType> * pengine = NULL;
std::vector<vertex_data> latent_factors_inmem;
int calc_feature_node_array_size(uint node, uint item){
if (node != (uint)-1){
assert(node <= M);
assert(node < latent_factors_inmem.size());
}
if (item != (uint)-1){
assert(item <= N);
assert(fc.offsets[1]+item < latent_factors_inmem.size());
}
int ret = fc.total_features+fc.last_item;
if (node != (uint)-1)
ret+= (1+nnz(latent_factors_inmem[node].features));
if (item != (uint)-1)
ret += (1+nnz(latent_factors_inmem[fc.offsets[1]+item].features));
assert(ret > 0);
return ret;
}
/**
* return a numeric node ID out of the string text read from file (training, validation or test)
*/
float get_node_id(char * pch, int pos, size_t i, bool read_only = false){
assert(pch != NULL);
assert(i >= 0);
float ret;
//read numeric id
if (!fc.hash_strings){
ret = (pos < 2 ? atoi(pch) : atof(pch));
if (pos < 2)
ret-=input_file_offset;
if (pos == 0 && ret >= M)
logstream(LOG_FATAL)<<"Row index larger than the matrix row size " << ret << " > " << M << " in line: " << i << std::endl;
else if (pos == 1 && ret >= N)
logstream(LOG_FATAL)<<"Col index larger than the matrix row size " << ret << " > " << N << " in line: " << i << std::endl;
}
//else read string id and assign numeric id
else {
uint id;
assert(pos < (int)fc.node_id_maps.size());
if (read_only){ // find if node was in map
std::map<std::string,uint>::iterator it = fc.node_id_maps[pos].string2nodeid.find(pch);
if (it != fc.node_id_maps[pos].string2nodeid.end()){
ret = it->second;
assert(ret < fc.node_id_maps[pos].string2nodeid.size());
}
else ret = -1;
}
else { //else enter node into map (in case it did not exist) and return its position
assign_id(fc.node_id_maps[pos], id, pch);
assert(id < fc.node_id_maps[pos].string2nodeid.size());
ret = id;
}
}
if (!read_only)
assert(ret != -1);
return ret;
}
float get_value(char * pch, bool read_only){
float ret;
if (!fc.rehash_value){
if ( pch[0] == '"' ) {
pch++;
}
ret = atof(pch);
}
else {
uint id;
if (read_only){ // find if node was in map
std::map<std::string,uint>::iterator it = fc.val_map.string2nodeid.find(pch);
if (it != fc.val_map.string2nodeid.end()){
ret = it->second;
}
else ret = -1;
}
else { //else enter node into map (in case it did not exist) and return its position
assign_id(fc.val_map, id, pch);
assert(id < fc.val_map.string2nodeid.size());
ret = id;
}
}
if (std::isnan(ret) || std::isinf(ret))
logstream(LOG_FATAL)<<"Failed to read value" << std::endl;
return ret;
}
char * read_one_token(char *& linebuf, const char * pspaces, size_t i, char * linebuf_debug, int token, int type = TRAINING){
char *pch = strsep(&linebuf,pspaces);
if (pch == NULL && type == TRAINING)
logstream(LOG_FATAL)<<"Error reading line " << i << " [ " << linebuf_debug << " ] " << std::endl;
else if (pch == NULL && type == TEST)
return NULL;
if (json_input){
//for json, multiple separators may lead to empty strings, we simply skip them
while(pch && !strcmp(pch, "")){
pch = strsep(&linebuf, pspaces);
if (pch == NULL)
logstream(LOG_FATAL)<<"Error reading line " << i << " [ " << linebuf_debug << " ] " << " token number: " << token << std::endl;
}
//toekn should not be empty
assert(strcmp(pch, ""));
if (i == 0)
header_titles.push_back(pch);
pch = strsep(&linebuf, pspaces);
//for json, multiple separators may lead to empty strings, we simply skip them
while(pch && !strcmp(pch, "")){
pch = strsep(&linebuf, pspaces);
if (pch == NULL)
logstream(LOG_FATAL)<<"Error reading line " << i << " [ " << linebuf_debug << " ] " << " token number: " << token << std::endl;
}
}
return pch;
}
/* Read and parse one input line from file */
bool read_line(FILE * f, const std::string filename, size_t i, uint & I, uint & J, float &val, std::vector<float>& valarray, int type, char * linebuf_debug){
char * linebuf = NULL;
size_t linesize = 0;
int token = 0;
int index = 0;
int rc = getline(&linebuf, &linesize, f);
if (rc == -1){
perror("getline");
logstream(LOG_FATAL)<<"Failed to get line: " << i << " in file: " << filename << std::endl;
}
char * linebuf_to_free = linebuf;
strncpy(linebuf_debug, linebuf, 1024);
assert(file_columns >= 2);
const char* spaces[] = {"\t,\r\n "};
const char * json_spaces[] = {"\t,\r\n \":{}"};
const char * pspaces = ((!json_input) ? *spaces : *json_spaces);
char * pch = NULL;
while (token < file_columns){
/* READ FROM */
if (token == fc.from_pos){
pch = read_one_token(linebuf, pspaces, i, linebuf_debug, token);
I = (uint)get_node_id(pch, 0, i, type != TRAINING);
if (type == TRAINING){
assert( I >= 0 && I < M);
}
token++;
}
else if (token == fc.to_pos){
/* READ TO */
pch = read_one_token(linebuf, pspaces, i, linebuf_debug, token);
J = (uint)get_node_id(pch, 1, i, type != TRAINING);
if (type == TRAINING)
assert(J >= 0 && J < N);
token++;
}
else if (token == fc.val_pos){
/* READ RATING */
pch = read_one_token(linebuf, pspaces, i, linebuf_debug, token, type);
if (pch == NULL && type == TEST)
return true;
val = get_value(pch, type != TRAINING);
token++;
}
else {
if (token >= file_columns)
break;
/* READ FEATURES */
pch = read_one_token(linebuf, pspaces, i, linebuf_debug, token, type);
if (pch == NULL && type == TEST)
return true;
if (!fc.feature_selection[token]){
token++;
continue;
}
assert(index < (int)valarray.size());
valarray[index] = get_node_id(pch, index+2, i, type != TRAINING);
if (type == TRAINING)
if (std::isnan(valarray[index]))
logstream(LOG_FATAL)<<"Error reading line " << i << " feature " << token << " [ " << linebuf_debug << " ] " << std::endl;
//calc stats about ths feature
if (type == TRAINING && !fc.hash_strings){
fc.stats_array[index].minval = std::min(fc.stats_array[index].minval, valarray[index]);
fc.stats_array[index].maxval = std::max(fc.stats_array[index].maxval, valarray[index]);
fc.stats_array[index].meanval += valarray[index];
}
index++;
token++;
}
}//end while
free(linebuf_to_free);
return true;
}//end read_line
/* compute an edge prediction based on input features */
float compute_prediction(
const uint I,
const uint J,
const float val,
double & prediction,
float * valarray,
float (*prediction_func)(const vertex_data ** array, int arraysize, float rating, double & prediction, vec * psum),
vec * psum,
vertex_data **& node_array){
if (I == (uint)-1 && J == (uint)-1)
logstream(LOG_FATAL)<<"BUG: can not compute prediction for new user and new item" << std::endl;
if (J != (uint)-1)
assert(J >=0 && J <= N);
if (I != (uint)-1)
assert(I>=0 && I <= M);
/* COMPUTE PREDICTION */
/* USER NODE **/
int index = 0;
int loc = 0;
if (I != (uint)-1){
node_array[index] = &latent_factors_inmem[I+fc.offsets[loc]];
if (node_array[index]->pvec[0] >= 1e5)
logstream(LOG_FATAL)<<"Got into numerical problem, try to decrease SGD step size" << std::endl;
index++;
}
loc++;
/* 1) ITEM NODE */
if (J != (uint)-1){
assert(J+fc.offsets[index] < latent_factors_inmem.size());
node_array[index] = &latent_factors_inmem[J+fc.offsets[loc]];
if (node_array[index]->pvec[0] >= 1e5)
logstream(LOG_FATAL)<<"Got into numerical problem, try to decrease SGD step size" << std::endl;
index++;
}
loc++;
/* 2) FEATURES GIVEN IN RATING LINE */
for (int j=0; j< fc.total_features; j++){
uint pos = (uint)ceil(valarray[j]+fc.offsets[j+loc]-fc.stats_array[j].minval);
//assert(pos >= 0 && pos < latent_factors_inmem.size());
if (pos < 0 || pos >= latent_factors_inmem.size())
logstream(LOG_FATAL)<<"Bug: j is: " << j << " fc.total_features " << fc.total_features << " index : " << index << " loc: " << loc <<
" fc.offsets " << fc.offsets[j+loc] << " vlarray[j] " << valarray[j] << " pos: " << pos << " latent_factors_inmem.size() " << latent_factors_inmem.size() << std::endl;
node_array[j+index] = & latent_factors_inmem[pos];
if (node_array[j+index]->pvec[0] >= 1e5)
logstream(LOG_FATAL)<<"Got into numerical problem, try to decrease SGD step size" << std::endl;
}
index+= fc.total_features;
loc += fc.total_features;
/* 3) USER FEATURES */
if (user_file != ""){
if (I != (uint)-1){
int i = 0;
FOR_ITERATOR(j, latent_factors_inmem[I+fc.offsets[0]].features){
int pos;
if (user_links != ""){
pos = j.index();
assert(pos < (int)M);
}
else {
pos = j.index()+fc.offsets[loc];
assert((uint)loc < fc.node_id_maps.size());
assert(j.index() < (int)fc.node_id_maps[loc].string2nodeid.size());
assert(pos >= 0 && pos < (int)latent_factors_inmem.size());
assert(pos >= (int)fc.offsets[loc]);
}
//logstream(LOG_INFO)<<"setting index " << i+index << " to: " << pos << std::endl;
node_array[i+index] = & latent_factors_inmem[pos];
if (node_array[i+index]->pvec[0] >= 1e5)
logstream(LOG_FATAL)<<"Got into numerical problem, try to decrease SGD step size" << std::endl;
i++;
}
assert(i == nnz(latent_factors_inmem[I+fc.offsets[0]].features));
index+= nnz(latent_factors_inmem[I+fc.offsets[0]].features);
loc+=1;
}
}
/* 4) ITEM FEATURES */
if (item_file != ""){
if (J != (uint)-1){
int i=0;
FOR_ITERATOR(j, latent_factors_inmem[J+fc.offsets[1]].features){
uint pos = j.index()+fc.offsets[loc];
assert(j.index() < (int)fc.node_id_maps[loc].string2nodeid.size());
assert(pos >= 0 && pos < latent_factors_inmem.size());
assert(pos >= (uint)fc.offsets[loc]);
//logstream(LOG_INFO)<<"setting index " << i+index << " to: " << pos << std::endl;
node_array[i+index] = & latent_factors_inmem[pos];
if (node_array[i+index]->pvec[0] >= 1e5)
logstream(LOG_FATAL)<<"Got into numerical problem, try to decrease SGD step size" << std::endl;
i++;
}
assert(i == nnz(latent_factors_inmem[J+fc.offsets[1]].features));
index+= nnz(latent_factors_inmem[J+fc.offsets[1]].features);
loc+=1;
}
}
if (fc.last_item){
uint pos = latent_factors_inmem[I].last_item + fc.offsets[2+fc.total_features+fc.node_features];
assert(pos < latent_factors_inmem.size());
node_array[index] = &latent_factors_inmem[pos];
if (node_array[index]->pvec[0] >= 1e5)
logstream(LOG_FATAL)<<"Got into numerical problem, try to decrease SGD step size" << std::endl;
index++;
loc+=1;
}
assert(index == calc_feature_node_array_size(I,J));
(*prediction_func)((const vertex_data**)node_array, calc_feature_node_array_size(I,J), val, prediction, psum);
return pow(val - prediction,2);
}
#include "io.hpp"
#include "../parsers/common.hpp"
/**
* Create a bipartite graph from a matrix. Each row corresponds to vertex
* with the same id as the row number (0-based), but vertices correponsing to columns
* have id + num-rows.
* Line format of the type
* [user] [item] [feature1] [feature2] ... [featureN] [rating]
*/
/* Read input file, process it and save a binary representation for faster loading */
template <typename als_edge_type>
int convert_matrixmarket_N(std::string base_filename, bool square, feature_control & fc, int limit_rating = 0) {
// Note, code based on: http://math.nist.gov/MatrixMarket/mmio/c/example_read.c
FILE *f;
size_t nz;
/**
* Create sharder object
*/
int nshards;
sharder<als_edge_type> sharderobj(base_filename);
sharderobj.start_preprocessing();
detect_matrix_size(base_filename, f, M, N, nz);
if (f == NULL)
logstream(LOG_FATAL) << "Could not open file: " << base_filename << ", error: " << strerror(errno) << std::endl;
if (M == 0 && N == 0)
logstream(LOG_FATAL)<<"Failed to detect matrix size. Please prepare a file named: " << base_filename << ":info with matrix market header, as explained here: http://bickson.blogspot.co.il/2012/12/collaborative-filtering-3rd-generation_14.html " << std::endl;
logstream(LOG_INFO) << "Starting to read matrix-market input. Matrix dimensions: " << M << " x " << N << ", non-zeros: " << nz << std::endl;
if (has_header_titles){
char * linebuf = NULL;
size_t linesize;
char linebuf_debug[1024];
/* READ LINE */
int rc = getline(&linebuf, &linesize, f);
if (rc == -1)
logstream(LOG_FATAL)<<"Error header line " << " [ " << linebuf_debug << " ] " << std::endl;
strncpy(linebuf_debug, linebuf, 1024);
char *pch = strtok(linebuf,"\t,\r;");
if (pch == NULL)
logstream(LOG_FATAL)<<"Error header line " << " [ " << linebuf_debug << " ] " << std::endl;
header_titles.push_back(pch);
while (pch != NULL){
pch = strtok(NULL, "\t,\r;");
if (pch == NULL)
break;
header_titles.push_back(pch);
}
}
compute_matrix_size(nz, TRAINING);
uint I, J;
int val_array_len = std::max(1, fc.total_features);
assert(val_array_len < FEATURE_WIDTH);
std::vector<float> valarray; valarray.resize(val_array_len);
float val;
if (!fc.hash_strings){
for (int i=0; i< fc.total_features; i++){
fc.stats_array[i].minval = 1e100;
fc.stats_array[i].maxval = -1e100;
}
}
if (limit_rating > 0 && limit_rating < (int)nz)
nz = limit_rating;
char linebuf_debug[1024];
for (size_t i=0; i<nz; i++)
{
if (!read_line(f, base_filename, i,I, J, val, valarray, TRAINING, linebuf_debug))
logstream(LOG_FATAL)<<"Failed to read line: " <<i<< " in file: " << base_filename << std::endl;
if (I>= M || J >= N || I < 0 || J < 0){
if (i == 0)
logstream(LOG_FATAL)<<"Failed to parsed first line, there are too many tokens. Did you forget the --has_header_titles=1 flag when file has string column headers?" << std::endl;
else
logstream(LOG_FATAL)<<"Bug: can not add edge from " << I << " to J " << J << " since max is: " << M <<"x" <<N<<std::endl;
}
bool active_edge = decide_if_edge_is_active(i, TRAINING);
if (active_edge){
//calc stats
globalMean += val;
sharderobj.preprocessing_add_edge(I, square?J:M+J, als_edge_type(val, &valarray[0], val_array_len));
}
}
sharderobj.end_preprocessing();
//calc stats
assert(L > 0);
for (int i=0; i< fc.total_features; i++){
fc.stats_array[i].meanval /= L;
}
//assert(globalMean != 0);
if (globalMean == 0)
logstream(LOG_WARNING)<<"Found global mean of the data to be zero (val_pos). Please verify this is correct." << std::endl;
globalMean /= L;
logstream(LOG_INFO)<<"Computed global mean is: " << globalMean << std::endl;
inputGlobalMean = globalMean;
//print features
for (int i=0; i< fc.total_features; i++){
logstream(LOG_INFO) << "Feature " << i << " min val: " << fc.stats_array[i].minval << " max val: " << fc.stats_array[i].maxval << " mean val: " << fc.stats_array[i].meanval << std::endl;
}
FILE * outf = fopen((base_filename + ".gm").c_str(), "w");
fprintf(outf, "%d\n%d\n%ld\n%d\n%12.8lg", M, N, L, fc.total_features, globalMean);
for (int i=0; i < fc.total_features; i++){
fprintf(outf, "%12.8g\n%12.8g\n%12.8g\n", fc.stats_array[i].minval, fc.stats_array[i].maxval, fc.stats_array[i].meanval);
}
fclose(outf);
fclose(f);
if (fc.hash_strings){
for (int i=0; i< fc.total_features+2; i++){
if (fc.node_id_maps[i].string2nodeid.size() == 0)
logstream(LOG_FATAL)<<"Failed to save feature number : " << i << " no values find in data " << std::endl;
}
}
logstream(LOG_INFO) << "Now creating shards." << std::endl;
// Shard with a specified number of shards, or determine automatically if not defined
nshards = sharderobj.execute_sharding(get_option_string("nshards", "auto"));
return nshards;
}
/* read node features from file */
void read_node_features(std::string base_filename, bool square, feature_control & fc, bool user, bool binary) {
FILE *f;
if ((f = fopen(base_filename.c_str(), "r")) == NULL) {
logstream(LOG_FATAL) << "Could not open file: " << base_filename << ", error: " << strerror(errno) << std::endl;
}
binary = true; //TODO
double_map fmap;
fc.node_id_maps.push_back(fmap);
fc.node_features++;
stats stat;
fc.stats_array.push_back(stat);
uint I, J = -1;
char * linebuf = NULL;
char linebuf_debug[1024];
size_t linesize;
size_t lines = 0;
size_t tokens = 0;
float val = 1;
int missing_nodes = 0;
while(true){
/* READ LINE */
int rc = getline(&linebuf, &linesize, f);
if (rc == -1)
break;
strncpy(linebuf_debug, linebuf, 1024);
lines++;
//skip over header titles (if any)
if (lines == 1 && user && has_user_titles)
continue;
else if (lines == 1 && !user && has_item_titles)
continue;
/** READ [FROM] */
char *pch = strtok(linebuf,"\t,\r; ");
if (pch == NULL)
logstream(LOG_FATAL)<<"Error reading line " << lines << " [ " << linebuf_debug << " ] " << std::endl;
I = (uint)get_node_id(pch, user?0:1, lines, true);
if (I == (uint)-1){ //user id was not found in map, so we do not need this users features
missing_nodes++;
continue;
}
if (user)
assert(I >= 0 && I < M);
else assert(I>=0 && I< N);
/** READ USER FEATURES */
while (pch != NULL){
pch = strtok(NULL, "\t,\r; ");
if (pch == NULL)
break;
if (binary){
J = (uint)get_node_id(pch, 2+fc.total_features+fc.node_features-1, lines);
}
else {
pch = strtok(NULL, "\t\r,;: ");
if (pch == NULL)
logstream(LOG_FATAL)<<"Failed to read feture value" << std::endl;
val = atof(pch);
}
assert(J >= 0);
if (user)
assert(I < latent_factors_inmem.size());
else assert(I+M < latent_factors_inmem.size());
set_new(latent_factors_inmem[user? I : I+M].features, J, val);
tokens++;
//update stats if needed
}
}
assert(tokens > 0);
logstream(LOG_DEBUG)<<"Read a total of " << lines << " node features. Tokens: " << tokens << " avg tokens: " << (lines/tokens)
<< " user? " << user << " new entries: " << fc.node_id_maps[2+fc.total_features+fc.node_features-1].string2nodeid.size() << std::endl;
if (missing_nodes > 0)
std::cerr<<"Warning: missing: " << missing_nodes << " from node feature file: " << base_filename << " out of: " << lines << std::endl;
}
/* read node features from file */
void read_node_links(std::string base_filename, bool square, feature_control & fc, bool user, bool binary) {
FILE *f;
if ((f = fopen(base_filename.c_str(), "r")) == NULL) {
logstream(LOG_FATAL) << "Could not open file: " << base_filename << ", error: " << strerror(errno) << std::endl;
}
//double_map fmap;
//fc.node_id_maps.push_back(fmap);
fc.node_links++;
//stats stat;
//fc.stats_array.push_back(stat);
uint I, J = -1;
char * linebuf = NULL;
char linebuf_debug[1024];
size_t linesize;
size_t lines = 0;
size_t tokens = 0;
float val = 1;
while(true){
/* READ LINE */
int rc = getline(&linebuf, &linesize, f);
if (rc == -1)
break;
strncpy(linebuf_debug, linebuf, 1024);
lines++;
/** READ [FROM] */
char *pch = strtok(linebuf,"\t,\r; ");
if (pch == NULL)
logstream(LOG_FATAL)<<"Error reading line " << lines << " [ " << linebuf_debug << " ] " << std::endl;
I = (uint)get_node_id(pch, user? 0 : 1, lines, true);
if (I == (uint)-1)//user id was not found in map, we do not need this user link features
continue;
if (user)
assert(I < (uint)fc.offsets[1]);
else assert(I < (uint)fc.offsets[2]);
/** READ TO */
pch = strtok(NULL, "\t,\r; ");
if (pch == NULL)
logstream(LOG_FATAL)<<"Failed to read to field [ " << linebuf_debug << " ] " << std::endl;
J = (uint)get_node_id(pch, user? 0 : 1, lines);
set_new(latent_factors_inmem[user? I : I+M].links, J, val);
tokens++;
//update stats if needed
}
logstream(LOG_DEBUG)<<"Read a total of " << lines << " node features. Tokens: " << tokens << " user? " << user << " new entries: " << fc.node_id_maps[user? 0 : 1].string2nodeid.size() << std::endl;
}
#include "rmse.hpp"
/**
compute validation rmse
*/
void validation_rmse_N(
float (*prediction_func)(const vertex_data ** array, int arraysize, float rating, double & prediction, vec * psum)
,graphchi_context & gcontext,
feature_control & fc,
bool square = false) {
assert(fc.total_features <= fc.feature_num);
if ((validation == "") || !file_exists(validation)) {
if ((validation != (training + "e")) && gcontext.iteration == 0)
logstream(LOG_WARNING) << "Validation file was specified, but not found:" << validation << std::endl;
std::cout << std::endl;
return;
}
FILE *f = NULL;
size_t nz;
detect_matrix_size(validation, f, Me, Ne, nz);
if (f == NULL){
logstream(LOG_WARNING)<<"Failed to open validation data. Skipping."<<std::endl;
return;
}
if ((M > 0 && N > 0) && (Me != M || Ne != N))
logstream(LOG_WARNING)<<"Input size of validation matrix must be identical to training matrix, namely " << M << "x" << N << std::endl;
compute_matrix_size(nz, VALIDATION);
last_validation_rmse = dvalidation_rmse;
dvalidation_rmse = 0;
double validation_error = 0;
std::vector<float> valarray; valarray.resize(fc.total_features);
uint I, J;
float val;
char linebuf_debug[1024];
for (size_t i=0; i<nz; i++)
{
int size = num_feature_bins();
if (!read_line(f, validation, i, I, J, val, valarray, VALIDATION, linebuf_debug))
logstream(LOG_FATAL)<<"Failed to read line: " << i << " in file: " << validation << std::endl;
bool active_edge = decide_if_edge_is_active(i, VALIDATION);
if (active_edge){
assert(size == num_feature_bins());
size = 0; //to avoid warning
if (I == (uint)-1 || J == (uint)-1){
new_validation_users++;
continue;
}
double prediction;
vertex_data ** node_array = new vertex_data*[calc_feature_node_array_size(I,J)];
for (int k=0; k< calc_feature_node_array_size(I,J); k++)
node_array[k] = NULL;
vec sum;
compute_prediction(I, J, val, prediction, &valarray[0], prediction_func, &sum, node_array);
delete [] node_array;
dvalidation_rmse += pow(prediction - val, 2);
if (calc_error)
if ((prediction < cutoff && val > cutoff) || (prediction > cutoff && val < cutoff))
validation_error++;
}
}
fclose(f);
assert(Le > 0);
dvalidation_rmse = sqrt(dvalidation_rmse / (double)Le);
std::cout<<" Validation RMSE: " << std::setw(10) << dvalidation_rmse;
if (!calc_error)
std::cout << std::endl;
else std::cout << " Validation error: " << std::setw(10) << validation_error/Le << std::endl;
if (halt_on_rmse_increase && dvalidation_rmse > last_validation_rmse && gcontext.iteration > 0){
logstream(LOG_WARNING)<<"Stopping engine because of validation RMSE increase" << std::endl;
gcontext.set_last_iteration(gcontext.iteration);
}
}
/* compute predictions for test data */
void test_predictions_N(
float (*prediction_func)(const vertex_data ** node_array, int node_array_size, float rating, double & predictioni, vec * sum),
feature_control & fc,
bool square = false) {
FILE * f = NULL;
uint Mt, Nt;
size_t nz;
if (test == ""){
logstream(LOG_INFO)<<"No test file was found, skipping test predictions " << std::endl;
return;
}
if (!file_exists(test)) {
if (test != (training + "t"))
logstream(LOG_WARNING)<<" test predictions file was specified but not found: " << test << std::endl;
return;
}
detect_matrix_size(test, f, Mt, Nt, nz);
if (f == NULL){
logstream(LOG_WARNING)<<"Failed to open test file. Skipping " << std::endl;
return;
}
if ((M > 0 && N > 0 ) && (Mt != M || Nt != N))
logstream(LOG_FATAL)<<"Input size of test matrix must be identical to training matrix, namely " << M << "x" << N << std::endl;
FILE * fout = open_file((test + ".predict").c_str(),"w");
std::vector<float> valarray; valarray.resize(fc.total_features);
float val;
double prediction;
uint I,J;
uint i=0;
char linebuf_debug[1024];
for (i=0; i<nz; i++)
{
if (!read_line(f, test, i, I, J, val, valarray, TEST, linebuf_debug))
logstream(LOG_FATAL)<<"Failed to read line: " <<i << " in file: " << test << std::endl;
if (I == (uint)-1 || J == (uint)-1){
if (cold_start == NONE){
fprintf(fout, "N/A\n");
new_test_users++;
}
else if (cold_start ==2 || (cold_start == 1 && I ==(uint)-1 && J==(uint)-1)){
fprintf(fout, "%12.8g\n", inputGlobalMean);
new_test_users++;
}
else if (cold_start == ITEM && I == (uint)-1 && J != (uint)-1)
fprintf(fout, "%12.8g\n", latent_factors_inmem[fc.offsets[1]+J].avg_rating);
else if (cold_start == ITEM && I != (uint)-1 && J == (uint)-1)
fprintf(fout, "%12.8g\n", latent_factors_inmem[I].avg_rating);
else if (cold_start == ITEM){
fprintf(fout, "%12.8g\n", inputGlobalMean);
new_test_users++;
}
continue;
}
vertex_data ** node_array = new vertex_data*[calc_feature_node_array_size(I,J)];
vec sum;
compute_prediction(I, J, val, prediction, &valarray[0], prediction_func, &sum, node_array);
if (binary_prediction)
prediction = (prediction > cutoff);
fprintf(fout, "%12.8lg\n", prediction);
delete[] node_array;
}
if (i != nz)
logstream(LOG_FATAL)<<"Missing input lines in test file. Should be : " << nz << " found only " << i << std::endl;
fclose(f);
fclose(fout);
logstream(LOG_INFO)<<"Finished writing " << nz << " predictions to file: " << test << ".predict" << std::endl;
}
/* This function implements equation (5) in the libFM paper:
* http://www.csie.ntu.edu.tw/~b97053/paper/Factorization%20Machines%20with%20libFM.pdf
* Note that in our implementation x_i are all 1 so the formula is slightly simpler */
float gensgd_predict(const vertex_data** node_array, int node_array_size,
const float rating, double& prediction, vec* sum){
vec sum_sqr = zeros(D);
*sum = zeros(D);
prediction = globalMean;
assert(!std::isnan(prediction));
for (int i=0; i< node_array_size; i++)
prediction += node_array[i]->bias;
assert(!std::isnan(prediction));
for (int j=0; j< D; j++){
for (int i=0; i< node_array_size; i++){
sum->operator[](j) += node_array[i]->pvec[j];
if (sum->operator[](j) >= 1e5)
logstream(LOG_FATAL)<<"Got into numerical problems. Try to decrease step size" << std::endl;
sum_sqr[j] += pow(node_array[i]->pvec[j],2);
}
prediction += 0.5 * (pow(sum->operator[](j),2) - sum_sqr[j]);
assert(!std::isnan(prediction));
}
//truncate prediction to allowed values
prediction = std::min((double)prediction, maxval);
prediction = std::max((double)prediction, minval);
//return the squared error
float err = rating - prediction;
assert(!std::isnan(err));
return err*err;
}
float gensgd_predict(const vertex_data** node_array, int node_array_size,
const float rating, double & prediction){
vec sum;
return gensgd_predict(node_array, node_array_size, rating, prediction, &sum);
}
void init_gensgd(bool load_factors_from_file){
srand(time(NULL));
int nodes = M+N+num_feature_bins()+fc.last_item*M;
latent_factors_inmem.resize(nodes);
int howmany = calc_feature_num();
logstream(LOG_DEBUG)<<"Going to calculate: " << howmany << " offsets." << std::endl;
fc.offsets.resize(howmany);
get_offsets(fc.offsets);
assert(D > 0);
if (!load_factors_from_file){
double factor = 0.1/sqrt(D);
#pragma omp parallel for
for (int i=0; i< nodes; i++){
latent_factors_inmem[i].pvec = (debug ? 0.1*ones(D) : (::randu(D)*factor));
}
}
}
void training_rmse_N(int iteration, graphchi_context &gcontext, bool items = false){
last_training_rmse = dtraining_rmse;
dtraining_rmse = 0;
size_t total_errors = 0;
int start = 0;
int end = M;
if (items){
start = M;
end = M+N;
}
dtraining_rmse = sum(rmse_vec);
if (calc_error)
total_errors = (size_t)sum(errors_vec);
dtraining_rmse = sqrt(dtraining_rmse / pengine->num_edges());
if (calc_error)
std::cout<< std::setw(10) << mytimer.current_time() << ") Iteration: " << std::setw(3) <<iteration<<" Training RMSE: " << std::setw(10)<< dtraining_rmse << " Train err: " << std::setw(10) << (total_errors/(double)L);
else
std::cout<< std::setw(10) << mytimer.current_time() << ") Iteration: " << std::setw(3) <<iteration<<" Training RMSE: " << std::setw(10)<< dtraining_rmse;
}
/**
* GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type>
* class. The main logic is usually in the update function.
*/
struct GensgdVerticesInMemProgram : public GraphChiProgram<VertexDataType, EdgeDataType> {
/*
* Vertex update function - computes the least square step
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) {
if (fc.last_item && gcontext.iteration == 0){
if (is_user(vertex.id()) && vertex.num_outedges() > 0) { //user node. find the last rated item and store it. we assume items are sorted by time!
vertex_data& user = latent_factors_inmem[vertex.id()];
int max_time = 0;
for(int e=0; e < vertex.num_outedges(); e++) {
const edge_data & edge = vertex.outedge(e)->get_data();
if (edge.features[0] >= max_time){ //first feature is time
max_time = (int)ceil(edge.features[0]);
user.last_item = vertex.outedge(e)->vertex_id() - M;
}
}
}
else if (is_user(vertex.id()) && vertex.num_outedges() == 0)
vertex_with_no_edges++;
return;
}
if (cold_start == ITEM && gcontext.iteration == 0){
vertex_data & item = latent_factors_inmem[vertex.id()];
item.avg_rating = 0;
for(int e=0; e < vertex.num_edges(); e++) {
item.avg_rating += vertex.edge(e)->get_data().weight;
}
item.avg_rating /= vertex.num_edges();
}
//go over all user nodes
if (is_user(vertex.id())){
//vertex_data& user = latent_factors_inmem[vertex.id()];
//assert(user.last_item >= 0 && user.last_item < (int)N);
//go over all observed ratings
for(int e=0; e < vertex.num_outedges(); e++) {
int howmany = calc_feature_node_array_size(vertex.id(), vertex.outedge(e)->vertex_id()-M);
vertex_data ** node_array = new vertex_data*[howmany];
for (int i=0; i< howmany; i++)
node_array[i] = NULL;
const edge_data & data = vertex.outedge(e)->get_data();
float rui = data.weight;
double pui;
vec sum;
//compute current prediction
rmse_vec[omp_get_thread_num()] += compute_prediction(vertex.id(), vertex.outedge(e)->vertex_id()-M, rui ,pui, (float*)data.features, gensgd_predict, &sum, node_array);
if (calc_error)
if ((pui < cutoff && rui > cutoff) || (pui > cutoff && rui < cutoff))
errors_vec[omp_get_thread_num()]++;
float eui = pui - rui;
//update global mean bias
globalMean -= gensgd_rate1 * (eui + gensgd_reg0 * globalMean);
//update node biases and vectors
for (int i=0; i < calc_feature_node_array_size(vertex.id(), vertex.outedge(e)->vertex_id()-M); i++){
double gensgd_rate;
if (i == 0) //user
gensgd_rate = gensgd_rate1;
else if (i == 1) //item
gensgd_rate = gensgd_rate2;
else if (i < 2+fc.total_features) //rating features
gensgd_rate = gensgd_rate3;
else if (i < 2+fc.total_features+fc.node_features) //user and item features
gensgd_rate = gensgd_rate4;
else
gensgd_rate = gensgd_rate5; //last item
node_array[i]->bias -= gensgd_rate * (eui + gensgd_regw* node_array[i]->bias);
assert(!std::isnan(node_array[i]->bias));
assert(node_array[i]->bias < 1e5);
vec grad = sum - node_array[i]->pvec;
node_array[i]->pvec -= gensgd_rate * (eui*grad + gensgd_regv * node_array[i]->pvec);
assert(!std::isnan(node_array[i]->pvec[0]));
assert(node_array[i]->pvec[0] < 1e5);
}
delete[] node_array;
}
}
};
/**
* Called after an iteration has finished.
*/
void after_iteration(int iteration, graphchi_context &gcontext) {
if (iteration == 1 && vertex_with_no_edges > 0)
logstream(LOG_WARNING)<<"There are " << vertex_with_no_edges << " users without ratings" << std::endl;
gensgd_rate1 *= gensgd_mult_dec;
gensgd_rate2 *= gensgd_mult_dec;
gensgd_rate3 *= gensgd_mult_dec;
gensgd_rate4 *= gensgd_mult_dec;
gensgd_rate5 *= gensgd_mult_dec;
training_rmse_N(iteration, gcontext);
validation_rmse_N(&gensgd_predict, gcontext, fc);
};
/**
* Called before an iteration is started.
*/
void before_iteration(int iteration, graphchi_context &gcontext) {
rmse_vec = zeros(gcontext.execthreads);
if (calc_error)
errors_vec = zeros(gcontext.execthreads);
}
};
void output_gensgd_result(std::string filename) {
MMOutputter_mat<vertex_data> mmoutput(filename + "_U.mm", 0, latent_factors_inmem.size(), "This file contains Gensgd output matrices. In each row D factors of a single user node, then item nodes, then features", latent_factors_inmem);
MMOutputter_vec<vertex_data> mmoutput_bias(filename + "_U_bias.mm", 0, latent_factors_inmem.size(), BIAS_POS, "This file contains Gensgd output bias vector. In each row a single user bias.", latent_factors_inmem);
MMOutputter_scalar gmean(filename + "_global_mean.mm", "This file contains Gensgd global mean which is required for computing predictions.", globalMean);
//output mapping between string to array index of features.
if (fc.hash_strings){
assert(2+fc.total_features+fc.node_features == (int)fc.node_id_maps.size());
for (int i=0; i < 2+fc.total_features+fc.node_features; i++){
char buf[256];
sprintf(buf, "%s.map.%d", filename.c_str(), i);
save_map_to_text_file(fc.node_id_maps[i].string2nodeid, buf, fc.offsets[i]);
}
}
logstream(LOG_INFO) << " GENSGD output files (in matrix market format): " << filename << "_U.mm" << ", "<< filename << "_global_mean.mm, " << filename << "_U_bias.mm " <<std::endl;
}
int main(int argc, const char ** argv) {
print_copyright();
/* GraphChi initialization will read the command line
arguments and the configuration file. */
graphchi_init(argc, argv);
/* Metrics object for keeping track of performance counters
and other information. Currently required. */
metrics m("als-tensor-inmemory-factors");
//specific command line parameters for gensgd
gensgd_rate1 = get_option_float("gensgd_rate1", gensgd_rate1);
gensgd_rate2 = get_option_float("gensgd_rate2", gensgd_rate2);
gensgd_rate3 = get_option_float("gensgd_rate3", gensgd_rate3);
gensgd_rate4 = get_option_float("gensgd_rate4", gensgd_rate4);
gensgd_rate5 = get_option_float("gensgd_rate5", gensgd_rate5);
gensgd_regw = get_option_float("gensgd_regw", gensgd_regw);
gensgd_regv = get_option_float("gensgd_regv", gensgd_regv);
gensgd_reg0 = get_option_float("gensgd_reg0", gensgd_reg0);
gensgd_mult_dec = get_option_float("gensgd_mult_dec", gensgd_mult_dec);
fc.last_item = get_option_int("last_item", fc.last_item);
fc.hash_strings = get_option_int("rehash", fc.hash_strings);
user_file = get_option_string("user_file", user_file);
user_links = get_option_string("user_links", user_links);
item_file = get_option_string("item_file", item_file);
file_columns = get_option_int("file_columns"); //get the number of columns in the edge file
if (file_columns < 3)
logstream(LOG_FATAL)<<"You must have at least 3 columns in input file: [from] [to] [value] on each line"<<std::endl;
if (file_columns >= FEATURE_WIDTH)
logstream(LOG_FATAL)<<"file_columns exceeds the allowed storage limit - please increase FEATURE_WIDTH and recompile." << std::endl;
D = get_option_int("D", D);
if (D <=2 || D>= 300)
logstream(LOG_FATAL)<<"Allowed range for latent factor vector D is [2,300]." << std::endl;
fc.from_pos = get_option_int("from_pos", fc.from_pos);
fc.to_pos = get_option_int("to_pos", fc.to_pos);
fc.val_pos = get_option_int("val_pos", fc.val_pos);
if (fc.from_pos >= file_columns || fc.to_pos >= file_columns || fc.val_pos >= file_columns)
logstream(LOG_FATAL)<<"Please note that column numbering of from_pos, to_pos and val_pos starts from zero and should be smaller than file_columns" << std::endl;
if (fc.from_pos == fc.to_pos || fc.from_pos == fc.val_pos || fc.to_pos == fc.val_pos)
logstream(LOG_FATAL)<<"from_pos, to_pos and val_pos should have uniqu values" << std::endl;
if (fc.val_pos == -1)
logstream(LOG_FATAL)<<"you must specify a target column using --val_pos=XXX. Colmn index starts from 0." << std::endl;
limit_rating = get_option_int("limit_rating", limit_rating);
calc_error = get_option_int("calc_error", calc_error);
has_header_titles = get_option_int("has_header_titles", has_header_titles);
has_user_titles = get_option_int("has_user_titles", has_user_titles);
has_item_titles = get_option_int("has_item_titles", has_item_titles);
fc.rehash_value = get_option_int("rehash_value", fc.rehash_value);
cutoff = get_option_float("cutoff", cutoff);
json_input = get_option_int("json_input", json_input);
cold_start = get_option_int("cold_start", cold_start);
binary_prediction = get_option_int("binary_prediction", 0);
parse_command_line_args();
parse_implicit_command_line();
std::string string_features = get_option_string("features", fc.default_feature_str);
if (string_features != ""){
char * pfeatures = strdup(string_features.c_str());
char * pch = strtok(pfeatures, ",\n\r\t ");
int node = atoi(pch);
if (node < 0 || node >= MAX_FEATURES+3)
logstream(LOG_FATAL)<<"Feature id using the --features=XX command should be non negative, starting from zero"<<std::endl;
if (node >= file_columns)
logstream(LOG_FATAL)<<"Feature id using the --feature=XX command should be < file_columns (counting starts from zero)" << std::endl;
fc.feature_selection[node] = true;
fc.total_features++;
while ((pch = strtok(NULL, ",\n\r\t "))!= NULL){
node = atoi(pch);
if (node < 0 || node >= MAX_FEATURES+3)
logstream(LOG_FATAL)<<"Feature id using the --features=XX command should be non negative, starting from zero"<<std::endl;
fc.feature_selection[node] = true;
fc.total_features++;
}
}
fc.node_id_maps.resize(2+fc.total_features);
fc.stats_array.resize(fc.total_features);
int nshards = convert_matrixmarket_N<edge_data>(training, false, fc, limit_rating);
init_gensgd(load_factors_from_file);
if (user_file != "")
read_node_features(user_file, false, fc, true, false);
if (item_file != "")
read_node_features(item_file, false, fc, false, false);
if (user_links != "")
read_node_links(user_links, false, fc, true, false);
if (json_input)
has_header_titles = 1;
if (has_header_titles && header_titles.size() == 0)
logstream(LOG_FATAL)<<"Please delete temp files (using : \"rm -f " << training << ".*\") and run again" << std::endl;
logstream(LOG_INFO) <<"Total selected features: " << fc.total_features << " : " << std::endl;
for (int i=0; i < MAX_FEATURES+3; i++)
if (fc.feature_selection[i])
logstream(LOG_INFO)<<"Selected feature: " << std::setw(3) << i << " : " << (has_header_titles? header_titles[i] : "") <<std::endl;
logstream(LOG_INFO)<<"Target variable " << std::setw(3) << fc.val_pos << " : " << (has_header_titles? header_titles[fc.val_pos] : "") <<std::endl;
logstream(LOG_INFO)<<"From " << std::setw(3) << fc.from_pos<< " : " << (has_header_titles? header_titles[fc.from_pos] : "") <<std::endl;
logstream(LOG_INFO)<<"To " << std::setw(3) << fc.to_pos << " : " << (has_header_titles? header_titles[fc.to_pos] : "") <<std::endl;
if (fc.node_features){
int last_offset = fc.node_id_maps.size();
int toadd = 0;
for (int i = last_offset - fc.node_features; i < last_offset; i++){
toadd += fc.node_id_maps[i].string2nodeid.size();
}
logstream(LOG_DEBUG)<<"Going to add " << toadd << std::endl;
vertex_data data;
for (int i=0; i < toadd; i++){
data.pvec = zeros(D);
for (int j=0; j < D; j++)
data.pvec[j] = drand48();
latent_factors_inmem.push_back(data);
}
fc.offsets.resize(calc_feature_num());
get_offsets(fc.offsets);
}
if (load_factors_from_file){
load_matrix_market_matrix(training + "_U.mm", 0, D);
vec user_bias = load_matrix_market_vector(training +"_U_bias.mm", false, true);
assert(user_bias.size() == num_feature_bins());
for (uint i=0; num_feature_bins(); i++){
latent_factors_inmem[i].bias = user_bias[i];
}
vec gm = load_matrix_market_vector(training + "_global_mean.mm", false, true);
globalMean = gm[0];
}
/* Run */
GensgdVerticesInMemProgram program;
graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m);
set_engine_flags(engine);
pengine = &engine;
engine.run(program, niters);
/* Output test predictions in matrix-market format */
output_gensgd_result(training);
test_predictions_N(&gensgd_predict, fc);
if (new_validation_users > 0)
logstream(LOG_WARNING)<<"Found " << new_validation_users<< " new users with no information about them in training dataset!" << std::endl;
if (new_test_users > 0)
std::cout<<"Found " << new_test_users<< " new test users with no information about them in training dataset!" << std::endl;
/* Report execution metrics */
if (!quiet)
metrics_report(m);
return 0;
}
| C++ |
/*
* @file
* @author Danny Bickson
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Matrix factorizatino with the Alternative Least Squares (ALS) algorithm
* using sparse factors. Sparsity is obtained using the CoSaMP algorithm.
*
*
*/
#include "cosamp.hpp"
#include "eigen_wrapper.hpp"
#include "common.hpp"
double lambda = 0.065;
struct vertex_data {
vec pvec;
vertex_data() {
pvec = zeros(D);
}
void set_val(int index, float val){
pvec[index] = val;
}
float get_val(int index){
return pvec[index];
}
};
/**
* Type definitions. Remember to create suitable graph shards using the
* Sharder-program.
*/
typedef vertex_data VertexDataType;
typedef float EdgeDataType; // Edges store the "rating" of user->movie pair
graphchi_engine<VertexDataType, EdgeDataType> * pengine = NULL;
graphchi_engine<VertexDataType, EdgeDataType> * pvalidation_engine = NULL;
std::vector<vertex_data> latent_factors_inmem;
#include "io.hpp"
//algorithm run mode
enum {
SPARSE_USR_FACTOR = 1, SPARSE_ITM_FACTOR = 2, SPARSE_BOTH_FACTORS = 3
};
int algorithm;
double user_sparsity;
double movie_sparsity;
#include "rmse.hpp"
#include "rmse_engine.hpp"
/** compute a missing value based on ALS algorithm */
float sparse_als_predict(const vertex_data& user,
const vertex_data& movie,
const float rating,
double & prediction,
void * extra = NULL){
prediction = user.pvec.dot(movie.pvec);
//truncate prediction to allowed values
prediction = std::min((double)prediction, maxval);
prediction = std::max((double)prediction, minval);
//return the squared error
float err = rating - prediction;
assert(!std::isnan(err));
return err*err;
}
/**
* GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type>
* class. The main logic is usually in the update function.
*/
struct ALSVerticesInMemProgram : public GraphChiProgram<VertexDataType, EdgeDataType> {
/**
* Vertex update function - computes the least square step
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) {
vertex_data & vdata = latent_factors_inmem[vertex.id()];
mat XtX = mat::Zero(D, D);
vec Xty = vec::Zero(D);
bool compute_rmse = (vertex.num_outedges() > 0);
// Compute XtX and Xty (NOTE: unweighted)
for(int e=0; e < vertex.num_edges(); e++) {
float observation = vertex.edge(e)->get_data();
vertex_data & nbr_latent = latent_factors_inmem[vertex.edge(e)->vertex_id()];
Xty += nbr_latent.pvec * observation;
XtX += nbr_latent.pvec * nbr_latent.pvec.transpose();
if (compute_rmse) {
double prediction;
rmse_vec[omp_get_thread_num()] += sparse_als_predict(vdata, nbr_latent, observation, prediction);
}
}
double regularization = lambda;
if (regnormal)
lambda *= vertex.num_edges();
for(int i=0; i < D; i++) XtX(i,i) += regularization;
bool isuser = vertex.id() < (uint)M;
if (algorithm == SPARSE_BOTH_FACTORS || (algorithm == SPARSE_USR_FACTOR && isuser) ||
(algorithm == SPARSE_ITM_FACTOR && !isuser)){
double sparsity_level = 1.0;
if (isuser)
sparsity_level -= user_sparsity;
else sparsity_level -= movie_sparsity;
vdata.pvec = CoSaMP(XtX, Xty, (int)ceil(sparsity_level*(double)D), 10, 1e-4, D);
}
else vdata.pvec = XtX.selfadjointView<Eigen::Upper>().ldlt().solve(Xty);
}
/**
* Called before an iteration is started.
*/
void before_iteration(int iteration, graphchi_context &gcontext) {
reset_rmse(gcontext.execthreads);
}
/**
* Called after an iteration has finished.
*/
void after_iteration(int iteration, graphchi_context &gcontext) {
training_rmse(iteration, gcontext);
run_validation(pvalidation_engine, gcontext);
}
};
void output_als_result(std::string filename) {
MMOutputter_mat<vertex_data> user_mat(filename + "_U.mm", 0, M, "This file contains ALS output matrix U. In each row D factors of a single user node.", latent_factors_inmem);
MMOutputter_mat<vertex_data> item_mat(filename + "_V.mm", M, M+N, "This file contains ALS output matrix V. In each row D factors of a single item node.", latent_factors_inmem);
logstream(LOG_INFO) << "ALS output files (in matrix market format): " << filename << "_U.mm" <<
", " << filename + "_V.mm " << std::endl;
}
int main(int argc, const char ** argv) {
print_copyright();
/* GraphChi initialization will read the command line
arguments and the configuration file. */
graphchi_init(argc, argv);
/* Metrics object for keeping track of performance counters
and other information. Currently required. */
metrics m("als-inmemory-factors");
lambda = get_option_float("lambda", 0.065);
user_sparsity = get_option_float("user_sparsity", 0.9);
movie_sparsity = get_option_float("movie_sparsity", 0.9);
algorithm = get_option_int("algorithm", SPARSE_USR_FACTOR);
parse_command_line_args();
parse_implicit_command_line();
if (user_sparsity < 0.5 || user_sparsity >= 1)
logstream(LOG_FATAL)<<"Sparsity level should be [0.5,1). Please run again using --user_sparsity=XX in this range" << std::endl;
if (movie_sparsity < 0.5 || movie_sparsity >= 1)
logstream(LOG_FATAL)<<"Sparsity level should be [0.5,1). Please run again using --movie_sparsity=XX in this range" << std::endl;
if (algorithm != SPARSE_USR_FACTOR && algorithm != SPARSE_BOTH_FACTORS && algorithm != SPARSE_ITM_FACTOR)
logstream(LOG_FATAL)<<"Algorithm should be 1 for SPARSE_USR_FACTOR, 2 for SPARSE_ITM_FACTOR and 3 for SPARSE_BOTH_FACTORS" << std::endl;
/* Preprocess data if needed, or discover preprocess files */
int nshards = convert_matrixmarket<EdgeDataType>(training);
init_feature_vectors<std::vector<vertex_data> >(M+N, latent_factors_inmem, !load_factors_from_file);
if (validation != ""){
int vshards = convert_matrixmarket<EdgeDataType>(validation, NULL, 0, 0, 3, VALIDATION);
init_validation_rmse_engine<VertexDataType, EdgeDataType>(pvalidation_engine, vshards, &sparse_als_predict);
}
if (load_factors_from_file){
load_matrix_market_matrix(training + "_U.mm", 0, D);
load_matrix_market_matrix(training + "_V.mm", M, D);
}
/* Run */
ALSVerticesInMemProgram program;
graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m);
set_engine_flags(engine);
pengine = &engine;
engine.run(program, niters);
/* Output latent factor matrices in matrix-market format */
output_als_result(training);
test_predictions(&sparse_als_predict);
/* Report execution metrics */
if (!quiet)
metrics_report(m);
return 0;
}
| C++ |
#ifndef __GRAPHCHI_MRR_ENGINE
#define __GRAPHCHI_MRR_ENGINE
/**
* @file
* @author Mark Levy
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* File for aggregating and displaying error mesasures and algorithm progress
*/
#include <set>
#include <sstream>
#include "climf.hpp"
vec mrr_vec; // cumulative sum of MRR per thread
vec users_vec; // user count per thread
int num_threads = 1;
int cur_iteration = 0;
/**
* GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type>
* class. The main logic is usually in the update function.
*/
struct ValidationMRRProgram : public GraphChiProgram<VertexDataType, EdgeDataType> {
/**
* compute MRR for a single user
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) {
if (vertex.id() < M)
{
// we're at a user node
const vec & U = latent_factors_inmem[vertex.id()].pvec;
std::set<int> known_likes;
{
for(int j = 0; j < vertex.num_edges(); j++)
{
if (is_relevant(vertex.edge(j)))
{
known_likes.insert(vertex.edge(j)->vertex_id() - M);
}
}
}
if (!known_likes.empty())
{
// make predictions
ivec indices = ivec::Zero(N);
vec distances = zeros(N);
for (uint i = M; i < M+N; i++)
{
const vec & V = latent_factors_inmem[i].pvec;
indices[i-M] = i-M;
distances[i-M] = dot(U,V);
}
int num_predictions = std::min(num_ratings, static_cast<int>(N));
vec sorted_distances(num_predictions);
ivec sorted_indices = reverse_sort_index2(distances, indices, sorted_distances, num_predictions);
// compute actual MRR
double MRR = 0;
for (uint i = 0; i < sorted_indices.size(); ++i)
{
if (known_likes.find(sorted_indices[i]) != known_likes.end())
{
MRR = 1.0/(i+1);
break;
}
}
assert(mrr_vec.size() > omp_get_thread_num());
mrr_vec[omp_get_thread_num()] += MRR;
assert(users_vec.size() > omp_get_thread_num());
users_vec[omp_get_thread_num()]++;
}
}
}
void before_iteration(int iteration, graphchi_context & gcontext)
{
users_vec = zeros(num_threads);
mrr_vec = zeros(num_threads);
}
/**
* Called after an iteration has finished.
*/
void after_iteration(int iteration, graphchi_context &gcontext)
{
double mrr = sum(mrr_vec) / sum(users_vec);
std::cout<<" Validation MRR:" << std::setw(10) << mrr << std::endl;
}
};
void reset_mrr(int exec_threads)
{
logstream(LOG_DEBUG)<<"Detected number of threads: " << exec_threads << std::endl;
num_threads = exec_threads;
mrr_vec = zeros(num_threads);
}
template<typename VertexDataType, typename EdgeDataType>
void init_mrr_engine(graphchi_engine<VertexDataType,EdgeDataType> *& pvalidation_engine, int nshards)
{
if (nshards == -1)
return;
metrics * m = new metrics("validation_mrr_engine");
graphchi_engine<VertexDataType, EdgeDataType> * engine = new graphchi_engine<VertexDataType, EdgeDataType>(validation, nshards, false, *m);
set_engine_flags(*engine);
pvalidation_engine = engine;
}
template<typename VertexDataType, typename EdgeDataType>
void run_validation(graphchi_engine<VertexDataType, EdgeDataType> * pvalidation_engine, graphchi_context & context)
{
//no validation data, no need to run validation engine calculations
cur_iteration = context.iteration;
if (pvalidation_engine == NULL)
return;
ValidationMRRProgram program;
pvalidation_engine->run(program, 1);
}
#endif //__GRAPHCHI_MRR_ENGINE
| C++ |
/**
* Copyright (c) 2009 Carnegie Mellon University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an "AS
* IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*
* For more about this software visit:
*
* http://graphchi.org
*
* Written by Danny Bickson
*
*/
#include "common.hpp"
#include "types.hpp"
#include "eigen_wrapper.hpp"
#include "timer.hpp"
using namespace std;
int nshards;
int input_cols = 3;
/* Metrics object for keeping track of performance counters
and other information. Currently required. */
metrics m("svd-inmemory-factors");
struct vertex_data {
vec pvec;
double value;
double A_ii;
vertex_data(){ value = 0; A_ii = 1; }
//TODO void add_self_edge(double value) { A_ii = value; }
void set_val(double value, int field_type) {
pvec[field_type] = value;
}
//double get_output(int field_type){ return pred_x; }
}; // end of vertex_data
struct edge_data {
float weight;
edge_data(double weight = 0) : weight(weight) { }
edge_data(double weight, double ignored) : weight(weight) { }
//void set_field(int pos, double val){ weight = val; }
//double get_field(int pos){ return weight; }
};
/**
* Type definitions. Remember to create suitable graph shards using the
* Sharder-program.
*/
typedef vertex_data VertexDataType;
typedef edge_data EdgeDataType; // Edges store the "rating" of user->movie pair
graphchi_engine<VertexDataType, EdgeDataType> * pengine = NULL;
std::vector<vertex_data> latent_factors_inmem;
#include "io.hpp"
/**
*
* Implementation of the Lanczos algorithm, as given in:
* http://en.wikipedia.org/wiki/Lanczos_algorithm
*
* Code written by Danny Bickson, CMU, June 2011
* */
//LANCZOS VARIABLES
int max_iter = 10;
bool no_edge_data = false;
int actual_vector_len;
int nv = 0;
int nsv = 0;
double tol = 1e-8;
bool finished = false;
int ortho_repeats = 3;
bool save_vectors = false;
std::string format = "matrixmarket";
int nodes = 0;
int data_size = max_iter;
#include "math.hpp"
#include "printouts.hpp"
void init_lanczos(bipartite_graph_descriptor & info){
srand48(time(NULL));
latent_factors_inmem.resize(info.total());
data_size = nsv + nv+1 + max_iter;
if (info.is_square())
data_size *= 2;
actual_vector_len = data_size;
#pragma omp parallel for
for (int i=0; i< info.total(); i++){
latent_factors_inmem[i].pvec = zeros(actual_vector_len);
}
logstream(LOG_INFO)<<"Allocated a total of: " << ((double)actual_vector_len * info.total() * sizeof(double)/ 1e6) << " MB for storing vectors." << std::endl;
}
vec lanczos( bipartite_graph_descriptor & info, timer & mytimer, vec & errest,
const std::string & vecfile){
int nconv = 0;
int its = 1;
DistMat A(info);
DistSlicedMat U(info.is_square() ? data_size : 0, info.is_square() ? 2*data_size : data_size, true, info, "U");
DistSlicedMat V(0, data_size, false, info, "V");
vec alpha, beta, b;
vec sigma = zeros(data_size);
errest = zeros(nv);
DistVec v_0(info, 0, false, "v_0");
if (vecfile.size() == 0)
v_0 = randu(size(A,2));
PRINT_VEC2("svd->V", v_0);
DistDouble vnorm = norm(v_0);
v_0=v_0/vnorm;
PRINT_INT(nv);
while(nconv < nsv && its < max_iter){
std::cout<<"Starting iteration: " << its << " at time: " << mytimer.current_time() << std::endl;
int k = nconv;
int n = nv;
PRINT_INT(k);
PRINT_INT(n);
alpha = zeros(n);
beta = zeros(n);
U[k] = V[k]*A._transpose();
orthogonalize_vs_all(U, k, alpha(0));
//alpha(0)=norm(U[k]).toDouble();
PRINT_VEC3("alpha", alpha, 0);
//U[k] = U[k]/alpha(0);
for (int i=k+1; i<n; i++){
std::cout <<"Starting step: " << i << " at time: " << mytimer.current_time() << std::endl;
PRINT_INT(i);
V[i]=U[i-1]*A;
orthogonalize_vs_all(V, i, beta(i-k-1));
//beta(i-k-1)=norm(V[i]).toDouble();
//V[i] = V[i]/beta(i-k-1);
PRINT_VEC3("beta", beta, i-k-1);
U[i] = V[i]*A._transpose();
orthogonalize_vs_all(U, i, alpha(i-k));
//alpha(i-k)=norm(U[i]).toDouble();
//U[i] = U[i]/alpha(i-k);
PRINT_VEC3("alpha", alpha, i-k);
}
V[n]= U[n-1]*A;
orthogonalize_vs_all(V, n, beta(n-k-1));
//beta(n-k-1)=norm(V[n]).toDouble();
PRINT_VEC3("beta", beta, n-k-1);
//compute svd of bidiagonal matrix
PRINT_INT(nv);
PRINT_NAMED_INT("svd->nconv", nconv);
n = nv - nconv;
PRINT_INT(n);
alpha.conservativeResize(n);
beta.conservativeResize(n);
PRINT_MAT2("Q",eye(n));
PRINT_MAT2("PT",eye(n));
PRINT_VEC2("alpha",alpha);
PRINT_VEC2("beta",beta);
mat T=diag(alpha);
for (int i=0; i<n-1; i++)
set_val(T, i, i+1, beta(i));
PRINT_MAT2("T", T);
mat a,PT;
svd(T, a, PT, b);
PRINT_MAT2("Q", a);
alpha=b.transpose();
PRINT_MAT2("alpha", alpha);
for (int t=0; t< n-1; t++)
beta(t) = 0;
PRINT_VEC2("beta",beta);
PRINT_MAT2("PT", PT.transpose());
//estiamte the error
int kk = 0;
for (int i=nconv; i < nv; i++){
int j = i-nconv;
PRINT_INT(j);
sigma(i) = alpha(j);
PRINT_NAMED_DBL("svd->sigma[i]", sigma(i));
PRINT_NAMED_DBL("Q[j*n+n-1]",a(n-1,j));
PRINT_NAMED_DBL("beta[n-1]",beta(n-1));
errest(i) = abs(a(n-1,j)*beta(n-1));
PRINT_NAMED_DBL("svd->errest[i]", errest(i));
if (alpha(j) > tol){
errest(i) = errest(i) / alpha(j);
PRINT_NAMED_DBL("svd->errest[i]", errest(i));
}
if (errest(i) < tol){
kk = kk+1;
PRINT_NAMED_INT("k",kk);
}
if (nconv +kk >= nsv){
printf("set status to tol\n");
finished = true;
}
}//end for
PRINT_NAMED_INT("k",kk);
vec v;
if (!finished){
vec swork=get_col(PT,kk);
PRINT_MAT2("swork", swork);
v = zeros(size(A,1));
for (int ttt=nconv; ttt < nconv+n; ttt++){
v = v+swork(ttt-nconv)*(V[ttt].to_vec());
}
PRINT_VEC2("svd->V",V[nconv]);
PRINT_VEC2("v[0]",v);
}
//compute the ritz eigenvectors of the converged singular triplets
if (kk > 0){
PRINT_VEC2("svd->V", V[nconv]);
mat tmp= V.get_cols(nconv,nconv+n)*PT;
V.set_cols(nconv, nconv+kk, get_cols(tmp, 0, kk));
PRINT_VEC2("svd->V", V[nconv]);
PRINT_VEC2("svd->U", U[nconv]);
tmp= U.get_cols(nconv, nconv+n)*a;
U.set_cols(nconv, nconv+kk,get_cols(tmp,0,kk));
PRINT_VEC2("svd->U", U[nconv]);
}
nconv=nconv+kk;
if (finished)
break;
V[nconv]=v;
PRINT_VEC2("svd->V", V[nconv]);
PRINT_NAMED_INT("svd->nconv", nconv);
its++;
PRINT_NAMED_INT("svd->its", its);
PRINT_NAMED_INT("svd->nconv", nconv);
//nv = min(nconv+mpd, N);
//if (nsv < 10)
// nv = 10;
PRINT_NAMED_INT("nv",nv);
} // end(while)
printf(" Number of computed signular values %d",nconv);
printf("\n");
DistVec normret(info, nconv, false, "normret");
DistVec normret_tranpose(info, nconv, true, "normret_tranpose");
for (int i=0; i < nconv; i++){
normret = V[i]*A._transpose() -U[i]*sigma(i);
double n1 = norm(normret).toDouble();
PRINT_DBL(n1);
normret_tranpose = U[i]*A -V[i]*sigma(i);
double n2 = norm(normret_tranpose).toDouble();
PRINT_DBL(n2);
double err=sqrt(n1*n1+n2*n2);
PRINT_DBL(err);
PRINT_DBL(tol);
if (sigma(i)>tol){
err = err/sigma(i);
}
PRINT_DBL(err);
PRINT_DBL(sigma(i));
printf("Singular value %d \t%13.6g\tError estimate: %13.6g\n", i, sigma(i),err);
}
if (save_vectors){
std::cout<<"Going to save output vectors U and V" << std::endl;
if (nconv == 0)
logstream(LOG_FATAL)<<"No converged vectors. Aborting the save operation" << std::endl;
char output_filename[256];
for (int i=0; i< nconv; i++){
sprintf(output_filename, "%s.U.%d", training.c_str(), i);
write_output_vector(output_filename, U[i].to_vec(), false, "GraphLab v2 SVD output. This file contains eigenvector number i of the matrix U");
sprintf(output_filename, "%s.V.%d", training.c_str(), i);
write_output_vector(output_filename, V[i].to_vec(), false, "GraphLab v2 SVD output. This file contains eigenvector number i of the matrix V'");
}
}
return sigma;
}
int main(int argc, const char *argv[]) {
print_copyright();
//* GraphChi initialization will read the command line arguments and the configuration file. */
graphchi_init(argc, argv);
std::string vecfile;
vecfile = get_option_string("initial_vector", "");
debug = get_option_int("debug", 0);
ortho_repeats = get_option_int("ortho_repeats", 3);
nv = get_option_int("nv", 1);
nsv = get_option_int("nsv", 1);
tol = get_option_float("tol", 1e-5);
save_vectors = get_option_int("save_vectors", 1);
input_cols = get_option_int("input_cols", 3);
max_iter = get_option_int("max_iter", max_iter);
parse_command_line_args();
parse_implicit_command_line();
if (nv < nsv){
logstream(LOG_FATAL)<<"Please set the number of vectors --nv=XX, to be at least the number of support vectors --nsv=XX or larger" << std::endl;
}
//unit testing
if (unittest == 1){
training = "gklanczos_testA";
vecfile = "gklanczos_testA_v0";
nsv = 3; nv = 3;
debug = true;
//TODO core.set_ncpus(1);
}
else if (unittest == 2){
training = "gklanczos_testB";
vecfile = "gklanczos_testB_v0";
nsv = 10; nv = 10;
debug = true; max_iter = 100;
//TODO core.set_ncpus(1);
}
else if (unittest == 3){
training = "gklanczos_testC";
vecfile = "gklanczos_testC_v0";
nsv = 4; nv = 10;
debug = true; max_iter = 100;
//TODO core.set_ncpus(1);
}
std::cout << "Load matrix " << training << std::endl;
/* Preprocess data if needed, or discover preprocess files */
if (input_cols == 3)
nshards = convert_matrixmarket<edge_data>(training);
else if (input_cols == 4)
nshards = convert_matrixmarket4<edge_data>(training);
else logstream(LOG_FATAL)<<"--input_cols=XX should be either 3 or 4 input columns" << std::endl;
info.rows = M; info.cols = N; info.nonzeros = L;
assert(info.rows > 0 && info.cols > 0 && info.nonzeros > 0);
timer mytimer; mytimer.start();
init_lanczos(info);
init_math(info, ortho_repeats);
//read initial vector from file (optional)
if (vecfile.size() > 0){
std::cout << "Load inital vector from file" << vecfile << std::endl;
load_matrix_market_vector(vecfile, info, 0, true, false);
}
//or start with a random initial vector
else {
#pragma omp parallel for
for (int i=0; i< (int)M; i++)
latent_factors_inmem[i].pvec[0] = drand48();
}
graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m);
set_engine_flags(engine);
pengine = &engine;
vec errest;
vec singular_values = lanczos(info, mytimer, errest, vecfile);
std::cout << "Lanczos finished " << mytimer.current_time() << std::endl;
write_output_vector(training + ".singular_values", singular_values,false, "%GraphLab SVD Solver library. This file contains the singular values.");
if (unittest == 1){
assert(errest.size() == 3);
for (int i=0; i< errest.size(); i++)
assert(errest[i] < 1e-30);
}
else if (unittest == 2){
assert(errest.size() == 10);
for (int i=0; i< errest.size(); i++)
assert(errest[i] < 1e-15);
}
/* Report execution metrics */
if (!quiet)
metrics_report(m);
return 0;
}
| C++ |
#ifndef __GRAPHCHI_RMSE_ENGINE4
#define __GRAPHCHI_RMSE_ENGINE4
/**
* @file
* @author Danny Bickson
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* File for aggregating and siplaying error mesasures and algorithm progress
*/
float (*pprediction_func)(const vertex_data&, const vertex_data&, const float, double &, void *) = NULL;
vec validation_rmse_vec;
bool user_nodes = true;
int counter = 0;
bool time_weighting = false;
bool time_nodes = false;
int matlab_time_offset = 0;
int num_threads = 1;
bool converged_engine = false;
int cur_iteration = 0;
/**
* GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type>
* class. The main logic is usually in the update function.
*/
struct ValidationRMSEProgram4 : public GraphChiProgram<VertexDataType, EdgeDataType> {
/**
* compute validaton RMSE for a single user
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) {
if (user_nodes && vertex.id() >= M)
return;
else if (!user_nodes && vertex.id() < M)
return;
vertex_data & vdata = latent_factors_inmem[vertex.id()];
for(int e=0; e < vertex.num_outedges(); e++) {
double observation = vertex.edge(e)->get_data().weight;
uint time = (uint)vertex.edge(e)->get_data().time - matlab_time_offset;
vertex_data * time_node = NULL;
if (time_nodes){
assert(time >= 0 && time < M+N+K);
time_node = &latent_factors_inmem[time];
}
vertex_data & nbr_latent = latent_factors_inmem[vertex.edge(e)->vertex_id()];
double prediction;
double rmse = (*pprediction_func)(vdata, nbr_latent, observation, prediction, (void*)time_node);
assert(rmse <= pow(maxval - minval, 2));
if (time_weighting)
rmse *= vertex.edge(e)->get_data().time;
assert(validation_rmse_vec.size() > omp_get_thread_num());
validation_rmse_vec[omp_get_thread_num()] += rmse;
}
}
void before_iteration(int iteration, graphchi_context & gcontext){
last_validation_rmse = dvalidation_rmse;
validation_rmse_vec = zeros(num_threads);
}
/**
* Called after an iteration has finished.
*/
void after_iteration(int iteration, graphchi_context &gcontext) {
assert(Le > 0);
dvalidation_rmse = finalize_rmse(sum(validation_rmse_vec) , (double)Le);
std::cout<<" Validation " << error_names[loss_type] << ":" << std::setw(10) << dvalidation_rmse << std::endl;
if (halt_on_rmse_increase > 0 && halt_on_rmse_increase < cur_iteration && dvalidation_rmse > last_validation_rmse){
logstream(LOG_WARNING)<<"Stopping engine because of validation RMSE increase" << std::endl;
converged_engine = true;
}
}
};
template<typename VertexDataType, typename EdgeDataType>
void init_validation_rmse_engine(graphchi_engine<VertexDataType,EdgeDataType> *& pvalidation_engine, int nshards,float (*prediction_func)(const vertex_data & user, const vertex_data & movie, float rating, double & prediction, void * extra), bool _time_weighting, bool _time_nodes, int _matlab_time_offset){
metrics * m = new metrics("validation_rmse_engine");
graphchi_engine<VertexDataType, EdgeDataType> * engine = new graphchi_engine<VertexDataType, EdgeDataType>(validation, nshards, false, *m);
set_engine_flags(*engine);
pvalidation_engine = engine;
time_weighting = _time_weighting;
time_nodes = _time_nodes;
matlab_time_offset = _matlab_time_offset;
pprediction_func = prediction_func;
num_threads = number_of_omp_threads();
}
template<typename VertexDataType, typename EdgeDataType>
void run_validation4(graphchi_engine<VertexDataType, EdgeDataType> * pvalidation_engine, graphchi_context & context){
//no validation data, no need to run validation engine calculations
cur_iteration = context.iteration;
if (pvalidation_engine == NULL){
std::cout << std::endl;
return;
}
ValidationRMSEProgram4 program;
pvalidation_engine->run(program, 1);
if (converged_engine)
context.set_last_iteration(cur_iteration);
}
void reset_rmse(int exec_threads){
logstream(LOG_DEBUG)<<"Detected number of threads: " << exec_threads << std::endl;
num_threads = exec_threads;
rmse_vec = zeros(exec_threads);
}
#endif //__GRAPHCHI_RMSE_ENGINE4
| C++ |
/**
* @file
* @author Danny Bickson
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Implementation of the gensgd algorithm. A generalization of SGD algorithm when there are multiple features for each
* rating, in the form
* [from] [to] [feature1] [feature2] [feature3] ... [featureN] [rating]
* (It is also possible to dynamically specify column numbers which are relevant)
* Steffen Rendle (2010): Factorization Machines, in Proceedings of the 10th IEEE International Conference on Data Mining (ICDM 2010), Sydney, Australia.
* Original implementation by Qiang Yan, Chinese Academy of Science.
* note: this code version implements the SGD version of gensgd. In the original library there are also ALS and MCMC methods.
* Also the treatment of features is richer in gensgd. The code here can serve for a quick evaluation but the user
* is encouraged to try gensgd as well.
*/
#include <vector>
#include "common.hpp"
#include "eigen_wrapper.hpp"
#include "../parsers/common.hpp"
#include <omp.h>
#define MAX_FEATAURES 256
#define FEATURE_WIDTH 21//MAX NUMBER OF ALLOWED FEATURES IN TEXT FILE
double gensgd_rate1 = 1e-02;
double gensgd_rate2 = 1e-02;
double gensgd_rate3 = 1e-02;
double gensgd_rate4 = 1e-02;
double gensgd_rate5 = 1e-02;
double gensgd_mult_dec = 0.9;
double gensgd_regw = 1e-3;
double gensgd_regv = 1e-3;
double gensgd_reg0 = 1e-1;
bool debug = false;
std::string user_file; //optional file with user features
std::string item_file; //optional file with item features
std::string user_links; //optional file with user to user links
int limit_rating = 0;
size_t vertex_with_no_edges = 0;
int calc_error = 0;
int calc_roc = 0;
int binary = 1;
int round_float = 0;
std::vector<std::string> header_titles;
int has_header_titles = 0;
float cutoff = 0;
std::string format = "libsvm";
vec errors_vec;
struct single_map{
std::map<float,uint> string2nodeid;
single_map(){
}
};
struct feature_control{
std::vector<single_map> node_id_maps;
single_map val_map;
single_map index_map;
int rehash_value;
int feature_num;
int node_features;
int node_links;
int total_features;
const std::string default_feature_str;
std::vector<int> offsets;
bool hash_strings;
int from_pos;
int to_pos;
int val_pos;
feature_control(){
rehash_value = 0;
total_features = 0;
node_features = 0;
feature_num = FEATURE_WIDTH;
hash_strings = false;
from_pos = 0;
to_pos = 1;
val_pos = -1;
node_links = 0;
}
};
feature_control fc;
int num_feature_bins(){
int sum = 0;
if (fc.hash_strings){
assert(2+fc.total_features+fc.node_features == (int)fc.node_id_maps.size());
for (int i=2; i < 2+fc.total_features+fc.node_features; i++){
sum+= fc.node_id_maps[i].string2nodeid.size();
}
}
else assert(false);
return sum;
}
int calc_feature_num(){
return 2+fc.total_features+fc.node_features;
}
void get_offsets(std::vector<int> & offsets){
assert(offsets.size() > 3);
offsets[0] = 0;
offsets[1] = M;
offsets[2] = M+N;
for (uint i=3; i< offsets.size(); i++){
assert(fc.node_id_maps.size() > (uint)i);
offsets[i] += offsets[i-1] + fc.node_id_maps[i].string2nodeid.size();
}
}
bool is_user(vid_t id){ return id < M; }
bool is_item(vid_t id){ return id >= M && id < N; }
bool is_time(vid_t id){ return id >= M+N; }
#define BIAS_POS -1
struct vertex_data {
fvec pvec;
double bias;
vertex_data() {
bias = 0;
}
void set_val(int index, float val){
if (index == BIAS_POS)
bias = val;
else pvec[index] = val;
}
float get_val(int index){
if (index== BIAS_POS)
return bias;
else return pvec[index];
}
};
struct edge_data {
uint features[FEATURE_WIDTH];
uint index[FEATURE_WIDTH];
uint size;
float weight;
edge_data() {
weight = 0;
size = 0;
memset(features, 0, sizeof(uint)*FEATURE_WIDTH);
memset(index, 0, sizeof(uint)*FEATURE_WIDTH);
}
edge_data(float weight, uint * valarray, uint * _index, uint size): size(size), weight(weight) {
memcpy(features, valarray, sizeof(uint)*FEATURE_WIDTH);
memcpy(index, _index, sizeof(uint)*FEATURE_WIDTH);
}
};
/**
* Type definitions. Remember to create suitable graph shards using the
* Sharder-program.
*/
typedef vertex_data VertexDataType;
typedef edge_data EdgeDataType; // Edges store the "rating" of user->movie pair
graphchi_engine<VertexDataType, EdgeDataType> * pengine = NULL;
std::vector<vertex_data> latent_factors_inmem;
int calc_feature_node_array_size(uint node, uint item, uint edge_size){
assert(node <= M);
assert(item <= N);
assert(edge_size >= 0);
assert(node < latent_factors_inmem.size());
assert(fc.offsets[1]+item < latent_factors_inmem.size());
return 2+edge_size;
}
void assign_id(single_map& dmap, unsigned int & outval, const float name){
std::map<float,uint>::iterator it = dmap.string2nodeid.find(name);
//if an id was already assigned, return it
if (it != dmap.string2nodeid.end()){
outval = it->second - 1;
assert(outval < dmap.string2nodeid.size());
return;
}
mymutex.lock();
//assign a new id
outval = dmap.string2nodeid[name];
if (outval == 0){
dmap.string2nodeid[name] = dmap.string2nodeid.size();
outval = dmap.string2nodeid.size() - 1;
}
mymutex.unlock();
}
/**
* return a numeric node ID out of the string text read from file (training, validation or test)
*/
float get_node_id(char * pch, int pos, size_t i, bool read_only = false){
assert(pch != NULL);
assert(pch[0] != 0);
assert(i >= 0);
float ret;
//read numeric id
if (!fc.hash_strings){
ret = (pos < 2 ? atoi(pch) : atof(pch));
if (pos < 2)
ret--;
if (pos == 0 && ret >= M)
logstream(LOG_FATAL)<<"Row index larger than the matrix row size " << ret << " > " << M << " in line: " << i << std::endl;
else if (pos == 1 && ret >= N)
logstream(LOG_FATAL)<<"Col index larger than the matrix row size " << ret << " > " << N << " in line: " << i << std::endl;
}
//else read string id and assign numeric id
else {
uint id;
float val = atof(pch);
assert(!std::isnan(val));
if (round_float)
val = floorf(val * 10000 + 0.5) / 10000;
if (pos >= 0)
assert(pos < (int)fc.node_id_maps.size());
single_map * pmap = NULL;
if (pos == -1)
pmap = &fc.index_map;
else pmap = &fc.node_id_maps[pos];
if (read_only){ // find if node was in map
std::map<float,uint>::iterator it = pmap->string2nodeid.find(val);
if (it != pmap->string2nodeid.end()){
ret = it->second - 1;
assert(ret < pmap->string2nodeid.size());
}
else ret = -1;
}
else { //else enter node into map (in case it did not exist) and return its position
assign_id(*pmap, id, val);
if (pos == -1 && fc.index_map.string2nodeid.size() == id+1 && fc.node_id_maps.size() < fc.index_map.string2nodeid.size()+2){//TODO debug
single_map newmap;
fc.node_id_maps.push_back(newmap);
}
ret = id;
}
}
if (!read_only)
assert(ret != -1);
return ret;
}
#include "io.hpp"
#include "../parsers/common.hpp"
float get_value(char * pch, bool read_only){
float ret;
if (!fc.rehash_value){
ret = atof(pch);
}
else {
uint id;
if (read_only){ // find if node was in map
std::map<float,uint>::iterator it = fc.val_map.string2nodeid.find(atof(pch));
if (it != fc.val_map.string2nodeid.end()){
ret = it->second - 1;
}
else ret = -1;
}
else { //else enter node into map (in case it did not exist) and return its position
assign_id(fc.val_map, id, atof(pch));
ret = id;
}
}
if (std::isnan(ret) || std::isinf(ret))
logstream(LOG_FATAL)<<"Failed to read value" << std::endl;
return ret;
}
/* Read and parse one input line from file */
bool read_line(FILE * f, const std::string filename, size_t i, uint & I, uint & J, float &val, std::vector<uint>& valarray, std::vector<uint>& positions, int & index, int type, int & skipped_features){
char * linebuf = NULL;
size_t linesize;
char linebuf_debug[1024];
int token = 0;
index = 0;
int rc = getline(&linebuf, &linesize, f);
if (rc == -1)
logstream(LOG_FATAL)<<"Failed to get line: " << i << " in file: " << filename << std::endl;
char * linebuf_to_free = linebuf;
strncpy(linebuf_debug, linebuf, 1024);
while (index < FEATURE_WIDTH){
/* READ FROM */
if (token == fc.from_pos){
char *pch = strsep(&linebuf,"\t,\r\n: ");
if (pch == NULL)
logstream(LOG_FATAL)<<"Error reading line " << i << " [ " << linebuf_debug << " ] " << std::endl;
I = (uint)get_node_id(pch, 0, i, type != TRAINING);
token++;
}
else if (token == fc.to_pos){
/* READ TO */
char * pch = strsep(&linebuf, "\t,\r\n: ");
if (pch == NULL)
logstream(LOG_FATAL)<<"Error reading line " << i << " [ " << linebuf_debug << " ] " << std::endl;
J = (uint)get_node_id(pch, 1, i, type != TRAINING);
token++;
}
else if (token == fc.val_pos){
/* READ RATING */
char * pch = strsep(&linebuf, "\t,\r\n ");
if (pch == NULL)
logstream(LOG_FATAL)<<"Error reading line " << i << " [ " << linebuf_debug << " ] " << std::endl;
val = get_value(pch, type != TRAINING);
token++;
}
else {
/* READ FEATURES */
char * pch = strsep(&linebuf, "\t,\r\n:; ");
if (pch == NULL || pch[0] == 0)
break;
uint pos = get_node_id(pch, -1, i, type != TRAINING);
if (type != TRAINING && pos == (uint)-1){ //this feature was not observed on training, skip
char * pch2 = strsep(&linebuf, "\t\r\n ");
if (pch2 == NULL || pch2[0] == 0)
logstream(LOG_FATAL)<<"Error reading line " << i << " feature2 " << index << " [ " << linebuf_debug << " ] " << std::endl;
skipped_features++;
continue;
}
assert(pos != (uint)-1 && pos < fc.index_map.string2nodeid.size());
char * pch2 = strsep(&linebuf, "\t\r\n ");
if (pch2 == NULL || pch2[0] == 0)
logstream(LOG_FATAL)<<"Error reading line " << i << " feature2 " << index << " [ " << linebuf_debug << " ] " << std::endl;
uint second_index = get_node_id(pch2, pos, i, type != TRAINING);
if (type != TRAINING && second_index == (uint)-1){ //this value was not observed in training, skip
second_index = 0; //skipped_features++;
//continue;
}
assert(second_index != (uint)-1);
assert(index< (int)valarray.size());
assert(index< (int)positions.size());
valarray[index] = second_index;
positions[index] = pos;
index++;
token++;
}
}//end while
free(linebuf_to_free);
return true;
}//end read_line
/* compute an edge prediction based on input features */
float compute_prediction(
uint I,
uint J,
const float val,
double & prediction,
uint * valarray,
uint * positions,
uint edge_size,
float (*prediction_func)(std::vector<vertex_data*>& node_array, int arraysize, float rating, double & prediction, fvec * psum),
fvec * psum,
std::vector<vertex_data*>& node_array,
uint node_array_size){
/* COMPUTE PREDICTION */
/* USER NODE **/
int index = 0;
int loc = 0;
node_array[index] = &latent_factors_inmem[I+fc.offsets[index]];
assert(node_array[index]->pvec[0] < 1e5);
index++; loc++;
/* 1) ITEM NODE */
assert(J+fc.offsets[index] < latent_factors_inmem.size());
node_array[index] = &latent_factors_inmem[J+fc.offsets[index]];
assert(node_array[index]->pvec[0] < 1e5);
index++; loc++;
/* 2) FEATURES GIVEN IN RATING LINE */
for (int j=0; j< (int)edge_size; j++){
assert(fc.offsets.size() > positions[j]);
uint pos = fc.offsets[positions[j]] + valarray[j];
assert(pos >= 0 && pos < latent_factors_inmem.size());
assert(j+index < (int)node_array_size);
node_array[j+index] = & latent_factors_inmem[pos];
assert(node_array[j+index]->pvec[0] < 1e5);
}
index+= edge_size;
loc += edge_size;
assert(index == calc_feature_node_array_size(I,J, edge_size));
(*prediction_func)(node_array, node_array_size, val, prediction, psum);
return pow(val - prediction,2);
}
#include "rmse.hpp"
/**
* Create a bipartite graph from a matrix. Each row corresponds to vertex
* with the same id as the row number (0-based), but vertices correponsing to columns
* have id + num-rows.
* Line format of the type
* [user] [item] [feature1] [feature2] ... [featureN] [rating]
*/
/* Read input file, process it and save a binary representation for faster loading */
template <typename als_edge_type>
int convert_matrixmarket_N(std::string base_filename, bool square, feature_control & fc, int limit_rating = 0) {
// Note, code based on: http://math.nist.gov/MatrixMarket/mmio/c/example_read.c
FILE *f;
size_t nz;
/**
* Create sharder object
*/
int nshards;
sharder<als_edge_type> sharderobj(base_filename);
sharderobj.start_preprocessing();
detect_matrix_size(base_filename, f, M, N, nz);
/* if .info file is not present, try to find matrix market header inside the base_filename file */
if (format == "libsvm")
assert(!has_header_titles);
if (has_header_titles){
char * linebuf = NULL;
size_t linesize;
char linebuf_debug[1024];
/* READ LINE */
int rc = getline(&linebuf, &linesize, f);
if (rc == -1)
logstream(LOG_FATAL)<<"Error header line " << " [ " << linebuf_debug << " ] " << std::endl;
strncpy(linebuf_debug, linebuf, 1024);
/** READ [FROM] */
char *pch = strtok(linebuf,"\t,\r; ");
if (pch == NULL)
logstream(LOG_FATAL)<<"Error header line " << " [ " << linebuf_debug << " ] " << std::endl;
header_titles.push_back(pch);
/** READ USER FEATURES */
while (pch != NULL){
pch = strtok(NULL, "\t,\r; ");
if (pch == NULL)
break;
header_titles.push_back(pch);
//update stats if needed
}
}
if (M == 0 && N == 0)
logstream(LOG_FATAL)<<"Failed to detect matrix size. Please prepare a file named: " << base_filename << ":info with matrix market header, as explained here: http://bickson.blogspot.co.il/2012/12/collaborative-filtering-3rd-generation_14.html " << std::endl;
logstream(LOG_INFO) << "Starting to read matrix-market input. Matrix dimensions: " << M << " x " << N << ", non-zeros: " << nz << std::endl;
uint I, J;
std::vector<uint> valarray; valarray.resize(FEATURE_WIDTH);
std::vector<uint> positions; positions.resize(FEATURE_WIDTH);
float val;
if (limit_rating > 0)
nz = limit_rating;
int skipped_features = 0;
for (size_t i=0; i<nz; i++)
{
int index;
if (!read_line(f, base_filename, i,I, J, val, valarray, positions, index, TRAINING, skipped_features))
logstream(LOG_FATAL)<<"Failed to read line: " <<i<< " in file: " << base_filename << std::endl;
if (index < 1)
logstream(LOG_FATAL)<<"Failed to read line: " <<i<< " in file: " << base_filename << std::endl;
if (nz > 1000000 && (i % 1000000) == 0)
logstream(LOG_INFO)<< mytimer.current_time() << " Finished reading " << i << " lines " << std::endl;
//calc stats
L++;
globalMean += val;
sharderobj.preprocessing_add_edge(I, square?J:M+J, als_edge_type(val, &valarray[0], &positions[0], index));
}
sharderobj.end_preprocessing();
//calc stats
assert(L > 0);
assert(globalMean != 0);
globalMean /= L;
logstream(LOG_INFO)<<"Coputed global mean is: " << globalMean << std::endl;
fclose(f);
logstream(LOG_INFO) << "Now creating shards." << std::endl;
// Shard with a specified number of shards, or determine automatically if not defined
nshards = sharderobj.execute_sharding(get_option_string("nshards", "auto"));
return nshards;
}
static bool mySort(const std::pair<double, double> &p1,const std::pair<double, double> &p2)
{
return p1.second > p2.second;
}
/**
compute validation rmse
*/
void validation_rmse_N(
float (*prediction_func)(std::vector<vertex_data*>& array, int arraysize, float rating, double & prediction, fvec * psum)
,graphchi_context & gcontext,
feature_control & fc,
bool square = false) {
if (validation == "")
return;
FILE * f = NULL;
size_t nz = 0;
detect_matrix_size(validation, f, Me, Ne, nz);
if (f == NULL){
logstream(LOG_WARNING)<<"Failed to open validation file: " << validation << " - skipping."<<std::endl;
return;
}
if ((M > 0 && N > 0) && (Me != M || Ne != N))
logstream(LOG_WARNING)<<"Input size of validation matrix must be identical to training matrix, namely " << M << "x" << N << std::endl;
Le = nz;
last_validation_rmse = dvalidation_rmse;
dvalidation_rmse = 0;
std::vector<uint> valarray; valarray.resize(FEATURE_WIDTH);
std::vector<uint> positions; positions.resize(FEATURE_WIDTH);
uint I, J;
float val;
int skipped_features = 0;
int skipped_nodes = 0;
int errors = 0;
//FOR ROC. ROC code thanks to Justin Yan.
double _M = 0;
double _N = 0;
std::vector<std::pair<double, double> > realPrediction;
for (size_t i=0; i<nz; i++)
{
int index;
if (!read_line(f, validation, i, I, J, val, valarray, positions, index, VALIDATION, skipped_features))
logstream(LOG_FATAL)<<"Failed to read line: " << i << " in file: " << validation << std::endl;
if (I == (uint)-1 || J == (uint)-1){
skipped_nodes++;
continue;
}
double prediction;
int howmany = calc_feature_node_array_size(I,J, index);
std::vector<vertex_data*> node_array; node_array.resize(howmany);
for (int k=0; k< howmany; k++)
node_array[k] = NULL;
fvec sum;
compute_prediction(I, J, val, prediction, &valarray[0], &positions[0], index, prediction_func, &sum, node_array, howmany);
if (calc_roc)
realPrediction.push_back(std::make_pair(val, prediction));
double temp_pred = prediction;
temp_pred = std::min(temp_pred, maxval);
temp_pred = std::max(temp_pred, minval);
dvalidation_rmse += pow(prediction - val, 2);
if (prediction < cutoff && val >= cutoff)
errors++;
else if (prediction >= cutoff && val < cutoff)
errors++;
}
fclose(f);
assert(Le > 0);
dvalidation_rmse = sqrt(dvalidation_rmse / (double)Le);
std::cout<<" Validation RMSE: " << std::setw(10) << dvalidation_rmse;
if (calc_error)
std::cout<<" Validation Err: " << std::setw(10) << ((double)errors/(double)(nz-skipped_nodes));
if (calc_roc){
double roc = 0;
double ret = 0;
std::vector<double> L;
std::sort(realPrediction.begin(), realPrediction.end(),mySort);
std::vector<std::pair<double, double> >::iterator iter;
for(iter=realPrediction.begin();iter!=realPrediction.end();iter++)
{
L.push_back(iter->first);
if(iter->first > cutoff) _M++;
else _N++;
}
std::vector<double>:: iterator iter2;
int i=0;
for(iter2=L.begin();iter2!=L.end();iter2++)
{
if(*iter2 > cutoff) ret += ((_M+_N) - i);
i++;
}
double ret2 = _M *(_M+1)/2;
roc= (ret-ret2)/(_M*_N);
std::cout<<" Validation ROC: " << roc << std::endl;
}
else std::cout<<std::endl;
if (halt_on_rmse_increase && dvalidation_rmse > last_validation_rmse && gcontext.iteration > 0){
logstream(LOG_WARNING)<<"Stopping engine because of validation RMSE increase" << std::endl;
gcontext.set_last_iteration(gcontext.iteration);
}
if (skipped_features > 0)
logstream(LOG_DEBUG)<<"Skipped " << skipped_features << " when reading from file. " << std::endl;
if (skipped_nodes > 0)
logstream(LOG_DEBUG)<<"Skipped " << skipped_nodes << " when reading from file. " << std::endl;
}
/* compute predictions for test data */
void test_predictions_N(
float (*prediction_func)(std::vector<vertex_data*>& node_array, int node_array_size, float rating, double & predictioni, fvec * sum),
feature_control & fc,
bool square = false) {
FILE *f = NULL;
uint Me, Ne;
size_t nz;
if (test == ""){
logstream(LOG_INFO)<<"No test file was found, skipping test predictions " << std::endl;
return;
}
detect_matrix_size(test, f, Me, Ne, nz);
if (f == NULL){
logstream(LOG_WARNING)<<"Failed to open test file " << test<< " skipping test predictions " << std::endl;
return;
}
if ((M > 0 && N > 0 ) && (Me != M || Ne != N))
logstream(LOG_FATAL)<<"Input size of test matrix must be identical to training matrix, namely " << M << "x" << N << std::endl;
FILE * fout = open_file((test + ".predict").c_str(),"w", false);
MM_typecode matcode;
mm_set_array(&matcode);
mm_write_banner(fout, matcode);
mm_write_mtx_array_size(fout ,nz, 1);
std::vector<uint> valarray; valarray.resize(FEATURE_WIDTH);
std::vector<uint> positions; positions.resize(FEATURE_WIDTH);
float val;
double prediction;
uint I,J;
int skipped_features = 0;
int skipped_nodes = 0;
for (uint i=0; i<nz; i++)
{
int index;
if (!read_line(f, test, i, I, J, val, valarray, positions, index, TEST, skipped_features))
logstream(LOG_FATAL)<<"Failed to read line: " <<i << " in file: " << test << std::endl;
if (I == (uint)-1 || J == (uint)-1){
skipped_nodes++;
fprintf(fout, "%d\n", 0); //features for this node are not found in the training set, write a default value
continue;
}
int howmany = calc_feature_node_array_size(I,J,index);
std::vector<vertex_data*> node_array; node_array.resize(howmany);
for (int k=0; k< howmany; k++)
node_array[k] = NULL;
fvec sum;
compute_prediction(I, J, val, prediction, &valarray[0], &positions[0], index, prediction_func, &sum, node_array, howmany);
fprintf(fout, "%12.8lg\n", prediction);
}
fclose(f);
fclose(fout);
logstream(LOG_INFO)<<"Finished writing " << nz << " predictions to file: " << test << ".predict" << std::endl;
if (skipped_features > 0)
logstream(LOG_DEBUG)<<"Skipped " << skipped_features << " when reading from file. " << std::endl;
if (skipped_nodes > 0)
logstream(LOG_WARNING)<<"Skipped node in test dataset: " << skipped_nodes << std::endl;
}
float gensgd_predict(std::vector<vertex_data*> & node_array, int node_array_size,
const float rating, double& prediction, fvec* sum){
fvec sum_sqr = fzeros(D);
*sum = fzeros(D);
prediction = globalMean;
assert(!std::isnan(prediction));
for (int i=0; i< node_array_size; i++)
prediction += node_array[i]->bias;
assert(!std::isnan(prediction));
for (int j=0; j< D; j++){
for (int i=0; i< node_array_size; i++){
sum->operator[](j) += node_array[i]->pvec[j];
assert(sum->operator[](j) < 1e5);
sum_sqr[j] += pow(node_array[i]->pvec[j],2);
}
prediction += 0.5 * (pow(sum->operator[](j),2) - sum_sqr[j]);
assert(!std::isnan(prediction));
}
//truncate prediction to allowed values
prediction = std::min((double)prediction, maxval);
prediction = std::max((double)prediction, minval);
//return the squared error
float err = rating - prediction;
assert(!std::isnan(err));
return err*err;
}
float gensgd_predict(std::vector<vertex_data*>& node_array, int node_array_size,
const float rating, double & prediction){
fvec sum;
return gensgd_predict(node_array, node_array_size, rating, prediction, &sum);
}
void init_gensgd(bool load_factors_from_file){
srand(time(NULL));
int nodes = M+N+num_feature_bins();
latent_factors_inmem.resize(nodes);
int howmany = calc_feature_num();
logstream(LOG_DEBUG)<<"Going to calculate: " << howmany << " offsets." << std::endl;
fc.offsets.resize(howmany);
get_offsets(fc.offsets);
assert(D > 0);
if (!load_factors_from_file){
double factor = 0.1/sqrt(D);
#pragma omp parallel for
for (int i=0; i< nodes; i++){
latent_factors_inmem[i].pvec = (debug ? 0.1*fones(D) : (::frandu(D)*factor));
}
}
}
void training_rmse_N(int iteration, graphchi_context &gcontext, bool items = false){
last_training_rmse = dtraining_rmse;
dtraining_rmse = 0;
size_t total_errors = 0;
int start = 0;
int end = M;
if (items){
start = M;
end = M+N;
}
dtraining_rmse = sum(rmse_vec);
if (calc_error){
total_errors = sum(errors_vec);
}
dtraining_rmse = sqrt(dtraining_rmse / pengine->num_edges());
if (calc_error)
std::cout<< std::setw(10) << mytimer.current_time() << ") Iteration: " << std::setw(3) <<iteration<<" Training RMSE: " << std::setw(10)<< dtraining_rmse << " Train err: " << std::setw(10) << (total_errors/(double)L);
else
std::cout<< std::setw(10) << mytimer.current_time() << ") Iteration: " << std::setw(3) <<iteration<<" Training RMSE: " << std::setw(10)<< dtraining_rmse;
}
/**
* GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type>
* class. The main logic is usually in the update function.
*/
struct Sparse_GensgdVerticesInMemProgram : public GraphChiProgram<VertexDataType, EdgeDataType> {
/*
* Vertex update function - computes the least square step
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) {
//go over all user nodes
if (is_user(vertex.id())){
//go over all observed ratings
for(int e=0; e < vertex.num_outedges(); e++) {
const edge_data & data = vertex.edge(e)->get_data();
int howmany = calc_feature_node_array_size(vertex.id(), vertex.edge(e)->vertex_id()-M, data.size);
std::vector<vertex_data*> node_array; node_array.resize(howmany);
for (int i=0; i< howmany; i++)
node_array[i] = NULL;
float rui = data.weight;
double pui;
fvec sum;
//compute current prediction
rmse_vec[omp_get_thread_num()] += compute_prediction(vertex.id(), vertex.edge(e)->vertex_id()-M, rui ,pui, (uint*)data.features, (uint*)data.index, data.size, gensgd_predict, &sum, node_array, howmany);
if (calc_error){
if ((pui < cutoff && rui > cutoff) || (pui > cutoff && rui < cutoff))
errors_vec[omp_get_thread_num()]++;
}
float eui = pui - rui;
//update global mean bias
globalMean -= gensgd_rate1 * (eui + gensgd_reg0 * globalMean);
//update node biases and vectors
for (int i=0; i < howmany; i++){
double gensgd_rate;
if (i == 0) //user
gensgd_rate = gensgd_rate1;
else if (i == 1) //item
gensgd_rate = gensgd_rate2;
else if (i < (int)(data.size+2)) //rating features
gensgd_rate = gensgd_rate3;
else if (i < (int)(2+data.size+fc.node_features)) //user and item features
gensgd_rate = gensgd_rate4;
else
gensgd_rate = gensgd_rate5; //last item
node_array[i]->bias -= gensgd_rate * (eui + gensgd_regw* node_array[i]->bias);
assert(!std::isnan(node_array[i]->bias));
assert(node_array[i]->bias < 1e3);
fvec grad = sum - node_array[i]->pvec;
node_array[i]->pvec -= gensgd_rate * (eui*grad + gensgd_regv * node_array[i]->pvec);
assert(!std::isnan(node_array[i]->pvec[0]));
assert(node_array[i]->pvec[0] < 1e3);
}
}
}
};
/**
* Called after an iteration has finished.
*/
void after_iteration(int iteration, graphchi_context &gcontext) {
if (iteration == 1 && vertex_with_no_edges > 0)
logstream(LOG_WARNING)<<"There are " << vertex_with_no_edges << " users without ratings" << std::endl;
gensgd_rate1 *= gensgd_mult_dec;
gensgd_rate2 *= gensgd_mult_dec;
gensgd_rate3 *= gensgd_mult_dec;
gensgd_rate4 *= gensgd_mult_dec;
gensgd_rate5 *= gensgd_mult_dec;
training_rmse_N(iteration, gcontext);
validation_rmse_N(&gensgd_predict, gcontext, fc);
};
/**
* Called before an iteration is started.
*/
void before_iteration(int iteration, graphchi_context &gcontext) {
rmse_vec = zeros(number_of_omp_threads());
if (calc_error)
errors_vec = zeros(number_of_omp_threads());
}
};
void output_gensgd_result(std::string filename) {
MMOutputter_mat<vertex_data> mmoutput(filename + "_U.mm", 0, M+N+num_feature_bins(), "This file contains Sparse_Gensgd output matrices. In each row D factors of a single user node, then item nodes, then features", latent_factors_inmem);
MMOutputter_vec<vertex_data> mmoutput_bias(filename + "_U_bias.mm", 0, num_feature_bins(), BIAS_POS, "This file contains Sparse_Gensgd output bias vector. In each row a single user bias.", latent_factors_inmem);
MMOutputter_scalar gmean(filename + "_global_mean.mm", "This file contains Sparse_Gensgd global mean which is required for computing predictions.", globalMean);
logstream(LOG_INFO) << " GENSGD output files (in matrix market format): " << filename << "_U.mm" << ", "<< filename << "_global_mean.mm, " << filename << "_U_bias.mm " <<std::endl;
}
int main(int argc, const char ** argv) {
print_copyright();
/* GraphChi initialization will read the command line
arguments and the configuration file. */
graphchi_init(argc, argv);
/* Metrics object for keeping track of performance counters
and other information. Currently required. */
metrics m("als-tensor-inmemory-factors");
//specific command line parameters for gensgd
gensgd_rate1 = get_option_float("gensgd_rate1", gensgd_rate1);
gensgd_rate2 = get_option_float("gensgd_rate2", gensgd_rate2);
gensgd_rate3 = get_option_float("gensgd_rate3", gensgd_rate3);
gensgd_rate4 = get_option_float("gensgd_rate4", gensgd_rate4);
gensgd_rate5 = get_option_float("gensgd_rate5", gensgd_rate5);
gensgd_regw = get_option_float("gensgd_regw", gensgd_regw);
gensgd_regv = get_option_float("gensgd_regv", gensgd_regv);
gensgd_reg0 = get_option_float("gensgd_reg0", gensgd_reg0);
gensgd_mult_dec = get_option_float("gensgd_mult_dec", gensgd_mult_dec);
fc.hash_strings = get_option_int("rehash", fc.hash_strings);
user_file = get_option_string("user_file", user_file);
user_links = get_option_string("user_links", user_links);
item_file = get_option_string("item_file", item_file);
D = get_option_int("D", D);
fc.from_pos = get_option_int("from_pos", fc.from_pos);
fc.to_pos = get_option_int("to_pos", fc.to_pos);
fc.val_pos = get_option_int("val_pos", fc.val_pos);
limit_rating = get_option_int("limit_rating", limit_rating);
calc_error = get_option_int("calc_error", calc_error);
calc_roc = get_option_int("calc_roc", calc_roc);
round_float = get_option_int("round_float", round_float);
has_header_titles = get_option_int("has_header_titles", has_header_titles);
fc.rehash_value = get_option_int("rehash_value", fc.rehash_value);
cutoff = get_option_float("cutoff", cutoff);
binary = get_option_int("binary", binary);
parse_command_line_args();
parse_implicit_command_line();
fc.node_id_maps.resize(2); //initial place for from/to map
//fc.stats_array.resize(fc.total_features);
if (format == "libsvm"){
fc.val_pos = 0;
fc.to_pos = 2;
fc.from_pos = 1;
binary = false;
fc.hash_strings = true;
}
int nshards = convert_matrixmarket_N<edge_data>(training, false, fc, limit_rating);
fc.total_features = fc.index_map.string2nodeid.size();
if (load_factors_from_file){
load_matrix_market_matrix(training + "_U.mm", 0, D);
vec user_bias = load_matrix_market_vector(training +"_U_bias.mm", false, true);
assert(user_bias.size() == num_feature_bins());
for (uint i=0; num_feature_bins(); i++){
latent_factors_inmem[i].bias = user_bias[i];
}
vec gm = load_matrix_market_vector(training + "_global_mean.mm", false, true);
globalMean = gm[0];
}
init_gensgd(load_factors_from_file);
if (has_header_titles && header_titles.size() == 0)
logstream(LOG_FATAL)<<"Please delete temp files (using : \"rm -f " << training << ".*\") and run again" << std::endl;
logstream(LOG_INFO)<<"Target variable " << std::setw(3) << fc.val_pos << " : " << (has_header_titles? header_titles[fc.val_pos] : "") <<std::endl;
logstream(LOG_INFO)<<"From " << std::setw(3) << fc.from_pos<< " : " << (has_header_titles? header_titles[fc.from_pos] : "") <<std::endl;
logstream(LOG_INFO)<<"To " << std::setw(3) << fc.to_pos << " : " << (has_header_titles? header_titles[fc.to_pos] : "") <<std::endl;
/* Run */
Sparse_GensgdVerticesInMemProgram program;
graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m);
set_engine_flags(engine);
pengine = &engine;
engine.run(program, niters);
/* Output test predictions in matrix-market format */
output_gensgd_result(training);
test_predictions_N(&gensgd_predict, fc);
/* Report execution metrics */
metrics_report(m);
return 0;
}
| C++ |
#ifndef DEF_RMSEHPP
#define DEF_RMSEHPP
#include <iostream>
#include <iomanip>
#include <omp.h>
/**
* @file
* @author Danny Bickson
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* File for aggregating and siplaying error mesasures and algorithm progress
*/
#include "timer.hpp"
#include "eigen_wrapper.hpp"
#include "common.hpp"
void read_matrix_market_banner_and_size(FILE * pfile, MM_typecode & matcode, uint & M, uint & N, size_t & nz, const std::string & filename);
FILE * open_file(const char * filename, const char * mode, bool optional);
timer mytimer;
double dtraining_rmse = 0;
double last_training_rmse = 0;
double dvalidation_rmse = 0;
double last_validation_rmse = 0;
int sign(double x){ if (x < 0) return -1; else if (x > 0) return 1; else return 0; }
/* compute the average of the loss after aggregating it */
double finalize_rmse(double rmse, double num_edges){
double ret = 0;
switch(loss_type){
case SQUARE:
ret = sqrt(rmse / num_edges);
break;
case LOGISTIC:
ret = rmse/num_edges;
break;
case ABS:
ret = rmse / num_edges;
case AP:
ret = rmse / num_edges;
break;
}
return ret;
}
/** calc the loss measure based on the cost function */
double calc_loss(double exp_prediction, double err){
double ret = 0;
switch (loss_type){
case LOGISTIC: ret= (exp_prediction * log(exp_prediction) + (1-exp_prediction)*log(1-exp_prediction));
break;
case SQUARE: ret = err*err;
break;
case ABS: ret = fabs(err);
break;
}
return ret;
}
/** calc prediction error based on the cost function */
double calc_error_f(double exp_prediction, double err){
switch (loss_type){
case LOGISTIC:
return err;
case SQUARE:
return err *= (exp_prediction*(1.0-exp_prediction)*(maxval-minval));
case ABS:
return err = sign(err)*(exp_prediction*(1-exp_prediction)*(maxval-minval));
}
return NAN;
}
/**
compute predictions on test data
*/
void test_predictions(float (*prediction_func)(const vertex_data & user, const vertex_data & movie, float rating, double & prediction, void * extra), graphchi_context * gcontext = NULL, bool dosave = true, vec * avgprd = NULL, int pmf_burn_in = 0) {
MM_typecode matcode;
FILE *f;
uint Me, Ne;
size_t nz;
if ((f = fopen(test.c_str(), "r")) == NULL) {
return; //missing validaiton data, nothing to compute
}
FILE * fout = NULL;
if (dosave)
fout = open_file((test + ".predict").c_str(),"w", false);
read_matrix_market_banner_and_size(f, matcode, Me, Ne, nz, test+".predict");
if ((M > 0 && N > 0 ) && (Me != M || Ne != N))
logstream(LOG_FATAL)<<"Input size of test matrix must be identical to training matrix, namely " << M << "x" << N << std::endl;
if (avgprd && gcontext->iteration == pmf_burn_in)
*avgprd = zeros(nz);
if (dosave){
mm_write_banner(fout, matcode);
fprintf(fout, "%%This file contains predictions of user/item pair, one prediction in each line. The first column is user id. The second column is the item id. The third column is the computed prediction.\n");
mm_write_mtx_crd_size(fout ,M,N,nz);
}
for (uint i=0; i<nz; i++)
{
int I, J;
double val;
int rc = fscanf(f, "%d %d %lg\n", &I, &J, &val);
if (rc != 3)
logstream(LOG_FATAL)<<"Error when reading input file: " << i << std::endl;
I--; /* adjust from 1-based to 0-based */
J--;
double prediction;
(*prediction_func)(latent_factors_inmem[I], latent_factors_inmem[J+M], val, prediction, NULL); //TODO
//for mcmc methods, store the sum of predictions
if (avgprd && avgprd->size() > 0 && gcontext->iteration >= pmf_burn_in)
avgprd->operator[](i) += prediction;
if (dosave){
if (avgprd && avgprd->size() > 0)
prediction = avgprd->operator[](i) /(gcontext->iteration - pmf_burn_in);
fprintf(fout, "%d %d %12.8lg\n", I+1, J+1, prediction);
}
}
fclose(f);
if (dosave)
fclose(fout);
if (dosave)
std::cout<<"Finished writing " << nz << " predictions to file: " << test << ".predict" << std::endl;
}
void test_predictions3(float (*prediction_func)(const vertex_data & user, const vertex_data & movie, float rating, double & prediction, void * extra), int time_offset = 0) {
MM_typecode matcode;
FILE *f;
uint Me, Ne;
size_t nz;
if ((f = fopen(test.c_str(), "r")) == NULL) {
return; //missing validaiton data, nothing to compute
}
FILE * fout = open_file((test + ".predict").c_str(),"w", false);
read_matrix_market_banner_and_size(f, matcode, Me, Ne, nz, test+".predict");
if ((M > 0 && N > 0 ) && (Me != M || Ne != N))
logstream(LOG_FATAL)<<"Input size of test matrix must be identical to training matrix, namely " << M << "x" << N << std::endl;
mm_write_banner(fout, matcode);
mm_write_mtx_crd_size(fout ,M,N,nz);
for (uint i=0; i<nz; i++)
{
int I, J;
double val;
int time;
int rc = fscanf(f, "%d %d %d %lg\n", &I, &J, &time, &val);
if (rc != 4)
logstream(LOG_FATAL)<<"Error when reading input file: " << i << std::endl;
if (time - input_file_offset < 0)
logstream(LOG_FATAL)<<"Error: we assume time values >= " << input_file_offset << std::endl;
I--; /* adjust from 1-based to 0-based */
J--;
double prediction;
(*prediction_func)(latent_factors_inmem[I], latent_factors_inmem[J+M], 1, prediction, (void*)&latent_factors_inmem[time+M+N-input_file_offset]);
fprintf(fout, "%d %d %12.8lg\n", I+1, J+1, prediction);
}
fclose(f);
fclose(fout);
logstream(LOG_INFO)<<"Finished writing " << nz << " predictions to file: " << test << ".predict" << std::endl;
}
float (*prediction_func)(const vertex_data & user, const vertex_data & movie, float rating, double & prediction, void * extra);
void detect_matrix_size(std::string filename, FILE *&f, uint &_M, uint &_N, size_t & nz, uint nodes, size_t edges, int type);
/**
compute validation rmse
*/
void validation_rmse(float (*prediction_func)(const vertex_data & user, const vertex_data & movie, float rating, double & prediction, void * extra)
,graphchi_context & gcontext, int tokens_per_row = 3, vec * avgprd = NULL, int pmf_burn_in = 0) {
FILE *f;
size_t nz;
detect_matrix_size(validation, f, Me, Ne, nz, 0, 0, VALIDATION);
if (f == NULL)
return;
if ((M > 0 && N > 0) && (Me != M || Ne != N))
logstream(LOG_FATAL)<<"Input size of validation matrix must be identical to training matrix, namely " << M << "x" << N << std::endl;
Le = nz;
if (avgprd != NULL && gcontext.iteration == 0)
*avgprd = zeros(nz);
last_validation_rmse = dvalidation_rmse;
dvalidation_rmse = 0;
int I, J;
double val, time = 1.0;
for (size_t i=0; i<nz; i++)
{
int rc;
if (tokens_per_row == 3)
rc = fscanf(f, "%d %d %lg\n", &I, &J, &val);
else rc = fscanf(f, "%d %d %lg %lg\n", &I, &J, &time, &val);
if (rc != tokens_per_row)
logstream(LOG_FATAL)<<"Error when reading input file on line: " << i << " . should have" << tokens_per_row << std::endl;
if (val < minval || val > maxval)
logstream(LOG_FATAL)<<"Value is out of range: " << val << " should be: " << minval << " to " << maxval << std::endl;
I--; /* adjust from 1-based to 0-based */
J--;
double prediction;
dvalidation_rmse += time *(*prediction_func)(latent_factors_inmem[I], latent_factors_inmem[J+M], val, prediction, avgprd == NULL ? NULL : &avgprd->operator[](i));
}
fclose(f);
assert(Le > 0);
dvalidation_rmse = finalize_rmse(dvalidation_rmse , (double)Le);
std::cout<<" Validation " << error_names[loss_type] << ":" << std::setw(10) << dvalidation_rmse <<
" ratings_per_sec: " << std::setw(10) << (gcontext.iteration*L/mytimer.current_time()) << std::endl;
if (halt_on_rmse_increase > 0 && halt_on_rmse_increase < gcontext.iteration && dvalidation_rmse > last_validation_rmse){
logstream(LOG_WARNING)<<"Stopping engine because of validation RMSE increase" << std::endl;
gcontext.set_last_iteration(gcontext.iteration);
}
}
/**
compute validation rmse
*/
void validation_rmse3(float (*prediction_func)(const vertex_data & user, const vertex_data & movie, const vertex_data & time, float rating, double & prediction)
,graphchi_context & gcontext,int tokens_per_row = 4, int time_offset = 0) {
MM_typecode matcode;
FILE *f;
size_t nz;
if ((f = fopen(validation.c_str(), "r")) == NULL) {
std::cout<<std::endl;
return; //missing validaiton data, nothing to compute
}
read_matrix_market_banner_and_size(f, matcode, Me, Ne, nz, validation);
if ((M > 0 && N > 0) && (Me != M || Ne != N))
logstream(LOG_FATAL)<<"Input size of validation matrix must be identical to training matrix, namely " << M << "x" << N << std::endl;
Le = nz;
last_validation_rmse = dvalidation_rmse;
dvalidation_rmse = 0;
int I, J;
double val, time = 1.0;
for (size_t i=0; i<nz; i++)
{
int rc;
rc = fscanf(f, "%d %d %lg %lg\n", &I, &J, &time, &val);
time -= time_offset;
if (rc != 4)
logstream(LOG_FATAL)<<"Error when reading input file on line: " << i << " . should have 4 columns " << std::endl;
if (val < minval || val > maxval)
logstream(LOG_FATAL)<<"Value is out of range: " << val << " should be: " << minval << " to " << maxval << std::endl;
if ((uint)time > K)
logstream(LOG_FATAL)<<"Third column value time should be smaller than " << K << " while observed " << time << " in line : " << i << std::endl;
I--; /* adjust from 1-based to 0-based */
J--;
double prediction;
dvalidation_rmse += (*prediction_func)(latent_factors_inmem[I], latent_factors_inmem[J+M], latent_factors_inmem[M+N+(uint)time], val, prediction);
}
fclose(f);
assert(Le > 0);
dvalidation_rmse = finalize_rmse(dvalidation_rmse , (double)Le);
std::cout<<" Validation " << error_names[loss_type] << ":" << std::setw(10) << dvalidation_rmse << std::endl;
if (halt_on_rmse_increase >= gcontext.iteration && dvalidation_rmse > last_validation_rmse){
logstream(LOG_WARNING)<<"Stopping engine because of validation RMSE increase" << std::endl;
gcontext.set_last_iteration(gcontext.iteration);
}
}
vec rmse_vec;
double training_rmse(int iteration, graphchi_context &gcontext, bool items = false){
last_training_rmse = dtraining_rmse;
dtraining_rmse = 0;
double ret = 0;
dtraining_rmse = sum(rmse_vec);
int old_loss = loss_type;
if (loss_type == AP)
loss_type = SQUARE;
ret = dtraining_rmse = finalize_rmse(dtraining_rmse, pengine->num_edges());
std::cout<< std::setw(10) << mytimer.current_time() << ") Iteration: " << std::setw(3) <<iteration<<" Training " << error_names[loss_type] << ":"<< std::setw(10)<< dtraining_rmse;
loss_type = old_loss;
return ret;
}
#endif //DEF_RMSEHPP
| C++ |
/**
* @file
* @author Danny Bickson, based on code by Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* This file implements item based collaborative filtering by comparing all item pairs which
* are connected by one or more user nodes.
*
*
* For Pearson's correlation
*
* see: http://en.wikipedia.org/wiki/Pearson_product-moment_correlation_coefficient
Cosine Similarity
See: http://en.wikipedia.org/wiki/Cosine_similarity
Manhattan Distance
See http://en.wikipedia.org/wiki/Taxicab_geometry
Log Similarity Distance
See http://tdunning.blogspot.co.il/2008/03/surprise-and-coincidence.html
Chebychev Distance
http://en.wikipedia.org/wiki/Chebyshev_distance
Tanimoto Distance
See http://en.wikipedia.org/wiki/Jaccard_index
Slope One
See "A prorammers guide to data mining" page 18:
http://guidetodatamining.com/guide/ch3/DataMining-ch3.pdf
*/
#include <string>
#include <vector>
#include <algorithm>
#include <iomanip>
#include <set>
#include <iostream>
#include "eigen_wrapper.hpp"
#include "distance.hpp"
#include "util.hpp"
#include "timer.hpp"
#include "common.hpp"
enum DISTANCE_METRICS{
JACKARD = 0,
AA = 1,
RA = 2,
PEARSON = 3,
COSINE = 4,
CHEBYCHEV = 5,
MANHATTEN = 6,
TANIMOTO = 7,
LOG_LIKELIHOOD = 8,
SLOPE_ONE = 9
};
int min_allowed_intersection = 1;
size_t written_pairs = 0;
size_t item_pairs_compared = 0;
std::vector<FILE*> out_files;
timer mytimer;
bool * relevant_items = NULL;
vec mean;
vec stddev;
int grabbed_edges = 0;
int distance_metric;
int debug;
bool is_item(vid_t v){ return v >= M; }
bool is_user(vid_t v){ return v < M; }
/**
* Type definitions. Remember to create suitable graph shards using the
* Sharder-program.
*/
typedef unsigned int VertexDataType;
typedef float EdgeDataType; // Edges store the "rating" of user->movie pair
struct vertex_data{
vec pvec;
vertex_data(){ }
void set_val(int index, float val){
pvec[index] = val;
}
float get_val(int index){
return pvec[index];
}
};
std::vector<vertex_data> latent_factors_inmem;
#include "io.hpp"
struct dense_adj {
sparse_vec edges;
dense_adj() { }
double intersect(const dense_adj & other){
sparse_vec x1 = edges.unaryExpr(std::ptr_fun(equal_greater));
sparse_vec x2 = other.edges.unaryExpr(std::ptr_fun(equal_greater));
sparse_vec x3 = x1.cwiseProduct(x2);
return sum(x3);
}
};
// This is used for keeping in-memory
class adjlist_container {
std::vector<dense_adj> adjs;
//mutex m;
public:
vid_t pivot_st, pivot_en;
adjlist_container() {
if (debug)
std::cout<<"setting pivot st and end to " << M << std::endl;
pivot_st = M; //start pivor on item nodes (excluding user nodes)
pivot_en = M;
}
void clear() {
for(std::vector<dense_adj>::iterator it=adjs.begin(); it != adjs.end(); ++it) {
if (nnz(it->edges)) {
it->edges.resize(0);
}
}
adjs.clear();
if (debug)
std::cout<<"setting pivot st to " << pivot_en << std::endl;
pivot_st = pivot_en;
}
/**
* Extend the interval of pivot vertices to en.
*/
void extend_pivotrange(vid_t en) {
assert(en>pivot_en);
pivot_en = en;
adjs.resize(pivot_en - pivot_st);
}
/**
* Grab pivot's adjacency list into memory.
*/
int load_edges_into_memory(graphchi_vertex<uint32_t, float> &v) {
//assert(is_pivot(v.id()));
//assert(is_item(v.id()));
int num_edges = v.num_edges();
//not enough user rated this item, we don't need to compare to it
if (num_edges < min_allowed_intersection){
relevant_items[v.id() - M] = false;
return 0;
}
relevant_items[v.id() - M] = true;
// Count how many neighbors have larger id than v
dense_adj dadj;
for(int i=0; i<num_edges; i++)
set_new( dadj.edges, v.edge(i)->vertex_id(), v.edge(i)->get_data());
//std::sort(&dadj.adjlist[0], &dadj.adjlist[0] + num_edges);
adjs[v.id() - pivot_st] = dadj;
assert(v.id() - pivot_st < adjs.size());
__sync_add_and_fetch(&grabbed_edges, num_edges /*edges_to_larger_id*/);
return num_edges;
}
int acount(vid_t pivot) {
return nnz(adjs[pivot - pivot_st].edges);
}
/**
* calc distance between two items.
* Let a be all the users rated item 1
* Let b be all the users rated item 2
*
* 3) Using Pearson correlation
* Dist_12 = (a - mean)*(b- mean)' / (std(a)*std(b))
*
* 4) Using cosine similarity:
* Dist_12 = (a*b) / sqrt(sum_sqr(a)) * sqrt(sum_sqr(b)))
*
* 5) Using chebychev:
* Dist_12 = max(abs(a-b))
*
* 6) Using manhatten distance:
* Dist_12 = sum(abs(a-b))
*
* 7) Using tanimoto:
* Dist_12 = 1.0 - [(a*b) / (sum_sqr(a) + sum_sqr(b) - a*b)]
*
* 8) Using log likelihood similarity
* Dist_12 = 1.0 - 1.0/(1.0 + loglikelihood)
*
* 9) Using slope one:
* Dist_12 = sum_(u in intersection (a,b) (r_u1-ru2 ) / size(intersection(a,b)))
*/
double calc_distance(graphchi_vertex<uint32_t, float> &v, vid_t pivot, int distance_metric) {
//assert(is_pivot(pivot));
//assert(is_item(pivot) && is_item(v.id()));
dense_adj &pivot_edges = adjs[pivot - pivot_st];
int num_edges = v.num_edges();
//if there are not enough neighboring user nodes to those two items there is no need
//to actually count the intersection
if (num_edges < min_allowed_intersection || nnz(pivot_edges.edges) < min_allowed_intersection)
return 0;
dense_adj item_edges;
for(int i=0; i < num_edges; i++)
set_new(item_edges.edges, v.edge(i)->vertexid, v.edge(i)->get_data());
double intersection_size = item_edges.intersect(pivot_edges);
//not enough user nodes rated both items, so the pairs of items are not compared.
if (intersection_size < (double)min_allowed_intersection)
return 0;
if (distance_metric == PEARSON){
if (debug){
std::cout<< pivot -M+1<<" Pivot edges: " <<pivot_edges.edges << std::endl;
std::cout<< "Minusmean: " << minus(pivot_edges.edges,mean) << std::endl;
std::cout<< v.id() -M+1<<"Item edges: " <<item_edges.edges << std::endl;
std::cout<< "Minusmean: " << minus(item_edges.edges, mean) << std::endl;
}
double dist = minus(pivot_edges.edges, mean).dot(minus(item_edges.edges, mean));
if (debug)
std::cout<<"dist " << pivot-M+1 << ":" << v.id()-M+1 << " " << dist << std::endl;
return dist / (stddev[pivot-M] * stddev[v.id()-M]);
}
else if (distance_metric == TANIMOTO){
return calc_tanimoto_distance(pivot_edges.edges,
item_edges.edges,
sum_sqr(pivot_edges.edges),
sum_sqr(item_edges.edges));
}
else if (distance_metric == CHEBYCHEV){
return calc_chebychev_distance(pivot_edges.edges,
item_edges.edges);
}
else if (distance_metric == LOG_LIKELIHOOD){
return calc_loglikelihood_distance(pivot_edges.edges,
item_edges.edges,
sum_sqr(pivot_edges.edges),
sum_sqr(item_edges.edges));
}
else if (distance_metric == COSINE){
return calc_cosine_distance(pivot_edges.edges,
item_edges.edges,
sum_sqr(pivot_edges.edges),
sum_sqr(item_edges.edges));
}
else if (distance_metric ==MANHATTEN){
return calc_manhatten_distance(pivot_edges.edges,
item_edges.edges);
}
else if (distance_metric == SLOPE_ONE){
return calc_slope_one_distance(pivot_edges.edges, item_edges.edges) / intersection_size;
}
return NAN;
}
inline bool is_pivot(vid_t vid) {
return vid >= pivot_st && vid < pivot_en;
}
};
adjlist_container * adjcontainer;
struct ItemDistanceProgram : public GraphChiProgram<VertexDataType, EdgeDataType> {
/**
* Vertex update function.
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType> &v, graphchi_context &gcontext) {
if (debug)
printf("Entered iteration %d with %d\n", gcontext.iteration, v.id());
//in the zero iteration compute the mean
if (gcontext.iteration == 0){
if (is_item(v.id())){
for(int i=0; i<v.num_edges(); i++) {
graphchi_edge<float> * e = v.edge(i);
vid_t user = e->vertexid;
mean[user] += e->get_data() / (float)N;
}
}
}
//at the first iteration compute the stddev of each item from the mean
else if (gcontext.iteration == 1){
if (is_item(v.id())){
dense_adj item_edges;
for(int i=0; i < v.num_edges(); i++)
set_new(item_edges.edges, v.edge(i)->vertexid, v.edge(i)->get_data());
stddev[v.id() - M] = sum(minus(item_edges.edges, mean).array().pow(2)) / (M-1.0);
if (debug)
std::cout<<"item: " << v.id() - M+1 << " stddev: " << stddev[v.id() - M] << std::endl;
}
}
/* even iteration numbers:
* 1) load a subset of items into memory (pivots)
* 2) Find which subset of items needs to compared to the users
*/
else if (gcontext.iteration % 2 == 0) {
if (adjcontainer->is_pivot(v.id()) && is_item(v.id())){
adjcontainer->load_edges_into_memory(v);
if (debug)
printf("Loading pivot %d intro memory\n", v.id());
}
else if (is_user(v.id())){
//check if this user is connected to any pivot item
bool has_pivot = false;
int pivot = -1;
for(int i=0; i<v.num_edges(); i++) {
graphchi_edge<float> * e = v.edge(i);
//assert(is_item(e->vertexid));
if (adjcontainer->is_pivot(e->vertexid) && relevant_items[e->vertexid-M]) {
has_pivot = true;
pivot = e->vertexid;
break;
}
}
if (debug)
printf("user %d is linked to pivot %d\n", v.id(), pivot);
if (!has_pivot) //this user is not connected to any of the pivot item nodes and thus
//it is not relevant at this point
return;
//this user is connected to a pivot items, thus all connected items should be compared
for(int i=0; i<v.num_edges(); i++) {
graphchi_edge<float> * e = v.edge(i);
//assert(v.id() != e->vertexid);
relevant_items[e->vertexid - M] = true;
}
}//is_user
} //iteration % 2 = 1
/* odd iteration number:
* 1) For any item connected to a pivot item
* compute itersection
*/
else {
if (!relevant_items[v.id() - M]){
return;
}
for (vid_t i=adjcontainer->pivot_st; i< adjcontainer->pivot_en; i++){
//since metric is symmetric, compare only to pivots which are smaller than this item id
if (i >= v.id() || (!relevant_items[i-M]))
continue;
double dist = adjcontainer->calc_distance(v, i, distance_metric);
item_pairs_compared++;
if (item_pairs_compared % 1000000 == 0)
logstream(LOG_INFO)<< std::setw(10) << mytimer.current_time() << ") " << std::setw(10) << item_pairs_compared << " pairs compared " << std::endl;
if (debug)
printf("comparing %d to pivot %d distance is %lg\n", i - M + 1, v.id() - M + 1, dist);
if (dist != 0){
fprintf(out_files[omp_get_thread_num()], "%u %u %.12lg\n", v.id()-M+1, i-M+1, (double)dist);//write item similarity to file
//where the output format is:
//[item A] [ item B ] [ distance ]
written_pairs++;
}
}
}//end of iteration % 2 == 1
}//end of update function
/**
* Called before an iteration starts.
* On odd iteration, schedule both users and items.
* on even iterations, schedules only item nodes
*/
void before_iteration(int iteration, graphchi_context &gcontext) {
gcontext.scheduler->remove_tasks(0, (int) gcontext.nvertices - 1);
if (gcontext.iteration % 2 == 0){
memset(relevant_items, 0, sizeof(bool)*N);
for (vid_t i=0; i < M+N; i++){
gcontext.scheduler->add_task(i);
}
if (debug)
printf("scheduling all nodes, setting relevant_items to zero\n");
grabbed_edges = 0;
adjcontainer->clear();
} else { //iteration % 2 == 1
for (vid_t i=M; i < M+N; i++){
gcontext.scheduler->add_task(i);
}
}
}
/**
* Called after an iteration has finished.
*/
void after_iteration(int iteration, graphchi_context &gcontext) {
if (debug && gcontext.iteration == 0)
std::cout<<"Mean : " << mean << std::endl;
}
/**
* Called before an execution interval is started.
*
* On every even iteration, we load pivot's item connected user lists to memory.
* Here we manage the memory to ensure that we do not load too much
* edges into memory.
*/
void before_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) {
/* on even iterations, loads pivot items into memory base on the membudget_mb allowed memory size */
if ((gcontext.iteration % 2 == 0) && (gcontext.iteration >= 2)) {
if (debug){
printf("entering iteration: %d on before_exec_interval\n", gcontext.iteration);
printf("pivot_st is %d window_en %d\n", adjcontainer->pivot_st, window_en);
}
if (adjcontainer->pivot_st <= window_en) {
size_t max_grab_edges = get_option_long("membudget_mb", 1024) * 1024 * 1024 / 8;
if (grabbed_edges < max_grab_edges * 0.8) {
logstream(LOG_DEBUG) << "Window init, grabbed: " << grabbed_edges << " edges" << " extending pivor_range to : " << window_en + 1 << std::endl;
adjcontainer->extend_pivotrange(window_en + 1);
logstream(LOG_DEBUG) << "Window en is: " << window_en << " vertices: " << gcontext.nvertices << std::endl;
if (window_en+1 == gcontext.nvertices) {
// every item was a pivot item, so we are done
logstream(LOG_DEBUG)<<"Setting last iteration to: " << gcontext.iteration + 2 << std::endl;
gcontext.set_last_iteration(gcontext.iteration + 2);
}
} else {
logstream(LOG_DEBUG) << "Too many edges, already grabbed: " << grabbed_edges << std::endl;
}
}
}
}
};
int main(int argc, const char ** argv) {
print_copyright();
/* GraphChi initialization will read the command line
arguments and the configuration file. */
graphchi_init(argc, argv);
/* Metrics object for keeping track of performance counters
and other information. Currently required. */
metrics m("item-cf2");
/* Basic arguments for application */
min_allowed_intersection = get_option_int("min_allowed_intersection", min_allowed_intersection);
distance_metric = get_option_int("distance", PEARSON);
if (distance_metric != PEARSON && distance_metric != MANHATTEN && distance_metric != COSINE &&
distance_metric != CHEBYCHEV && distance_metric != LOG_LIKELIHOOD && distance_metric != TANIMOTO && distance_metric != SLOPE_ONE)
logstream(LOG_FATAL)<<"--distance_metrix=XX should be one of: 3=PEARSON, 4=COSINE, 5=CHEBYCHEV, 6=MANHATTEN, 7=TANIMOTO, 8=LOG_LIKELIHOOD, 9 = SLOPE_ONE" << std::endl;
debug = get_option_int("debug", 0);
parse_command_line_args();
//if (distance_metric != JACKARD && distance_metric != AA && distance_metric != RA)
// logstream(LOG_FATAL)<<"Wrong distance metric. --distance_metric=XX, where XX should be either 0) JACKARD, 1) AA, 2) RA" << std::endl;
mytimer.start();
int nshards = convert_matrixmarket<EdgeDataType>(training, NULL, 0, 0, 3, TRAINING, false);
assert(M > 0 && N > 0);
//initialize data structure which saves a subset of the items (pivots) in memory
adjcontainer = new adjlist_container();
//array for marking which items are conected to the pivot items via users.
relevant_items = new bool[N];
mean = vec::Zero(M);
stddev = vec::Zero(N);
/* Run */
ItemDistanceProgram program;
graphchi_engine<VertexDataType, EdgeDataType> engine(training/*+orderByDegreePreprocessor->getSuffix()*/ ,nshards, true, m);
set_engine_flags(engine);
//open output files as the number of operating threads
out_files.resize(number_of_omp_threads());
for (uint i=0; i< out_files.size(); i++){
char buf[256];
sprintf(buf, "%s.out%d", training.c_str(), i);
out_files[i] = open_file(buf, "w");
}
//run the program
engine.run(program, niters);
/* Report execution metrics */
if (!quiet)
metrics_report(m);
std::cout<<"Total item pairs compared: " << item_pairs_compared << " total written to file: " << written_pairs << std::endl;
for (uint i=0; i< out_files.size(); i++)
fclose(out_files[i]);
std::cout<<"Created output files with the format: " << training << ".outXX, where XX is the output thread number" << std::endl;
delete[] relevant_items;
return 0;
}
| C++ |
/**
* @file
* @author Danny Bickson
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Matrix factorization with the Bias Stochastic Gradient Descent (BIASSGD) algorithm.
* Algorithm is described in the paper:
* Y. Koren. Factorization Meets the Neighborhood: a Multifaceted Collaborative Filtering Model. ACM SIGKDD 2008. Equation (5).
* Thanks to Zeno Gantner, MyMediaLight for teaching me how to compute the derivative in case of logistic and absolute loss.
* http://mymedialite.net/
*/
#include "common.hpp"
#include "eigen_wrapper.hpp"
double biassgd_lambda = 1e-3; //sgd step size
double biassgd_gamma = 1e-3; //sgd regularization
double biassgd_step_dec = 0.9; //sgd step decrement
#define BIAS_POS -1
struct vertex_data {
vec pvec; //storing the feature vector
double bias;
vertex_data() {
pvec = zeros(D);
bias = 0;
}
void set_val(int index, float val){
if (index == BIAS_POS)
bias = val;
else pvec[index] = val;
}
float get_val(int index){
if (index== BIAS_POS)
return bias;
else return pvec[index];
}
};
/**
* Type definitions. Remember to create suitable graph shards using the
* Sharder-program.
*/
typedef vertex_data VertexDataType;
typedef float EdgeDataType; // Edges store the "rating" of user->movie pair
graphchi_engine<VertexDataType, EdgeDataType> * pengine = NULL;
graphchi_engine<VertexDataType, EdgeDataType> * pvalidation_engine = NULL;
std::vector<vertex_data> latent_factors_inmem;
#include "rmse.hpp"
#include "rmse_engine.hpp"
#include "io.hpp"
/** compute a missing value based on bias-SGD algorithm */
float bias_sgd_predict(const vertex_data& user,
const vertex_data& movie,
const float rating,
double & prediction,
void * extra = NULL){
prediction = globalMean/maxval + user.bias + movie.bias + dot_prod(user.pvec, movie.pvec);
double exp_prediction = 1.0 / (1.0 + exp(-prediction));
//truncate prediction to allowed values
prediction = minval + exp_prediction *(maxval-minval);
//return the squared error
float err = rating - prediction;
if (std::isnan(err))
logstream(LOG_FATAL)<<"Got into numerical errors. Try to decrease step size using bias-SGD command line arugments)" << std::endl;
if (extra != NULL)
*(double*)extra = exp_prediction;
return calc_loss(exp_prediction, err);
}
/**
* GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type>
* class. The main logic is usually in the update function.
*/
struct BIASSGDVerticesInMemProgram : public GraphChiProgram<VertexDataType, EdgeDataType> {
/**
* Called before an iteration is started.
*/
void before_iteration(int iteration, graphchi_context &gcontext) {
reset_rmse(gcontext.execthreads);
}
/**
* Called after an iteration has finished.
*/
void after_iteration(int iteration, graphchi_context &gcontext) {
biassgd_gamma *= biassgd_step_dec;
training_rmse(iteration, gcontext);
run_validation(pvalidation_engine, gcontext);
}
/**
* Vertex update function.
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) {
//user node
if ( vertex.num_outedges() > 0){
vertex_data & user = latent_factors_inmem[vertex.id()];
for(int e=0; e < vertex.num_edges(); e++) {
float observation = vertex.edge(e)->get_data();
vertex_data & movie = latent_factors_inmem[vertex.edge(e)->vertex_id()];
double prediction;
double exp_prediction;
rmse_vec[omp_get_thread_num()] += bias_sgd_predict(user, movie, observation, prediction, &exp_prediction);
double err = observation - prediction;
err = calc_error_f(exp_prediction, err);
if (std::isnan(err) || std::isinf(err))
logstream(LOG_FATAL)<<"BIASSGD got into numerical error. Please tune step size using --biassgd_gamma and biassgd_lambda" << std::endl;
user.bias += biassgd_gamma*(err - biassgd_lambda* user.bias);
movie.bias += biassgd_gamma*(err - biassgd_lambda* movie.bias);
//NOTE: the following code is not thread safe, since potentially several
//user nodes may update this item gradient vector concurrently. However in practice it
//did not matter in terms of accuracy on a multicore machine.
//if you like to defend the code, you can define a global variable
//mutex mymutex;
//
//and then do: mymutex.lock()
movie.pvec += biassgd_gamma*(err*user.pvec - biassgd_lambda*movie.pvec);
//here add: mymutex.unlock();
user.pvec += biassgd_gamma*(err*movie.pvec - biassgd_lambda*user.pvec);
}
}
}
};
void output_biassgd_result(std::string filename){
MMOutputter_mat<vertex_data> user_mat(filename + "_U.mm", 0, M, "This file contains bias-SGD output matrix U. In each row D factors of a single user node.", latent_factors_inmem);
MMOutputter_mat<vertex_data> item_mat(filename + "_V.mm", M, M+N , "This file contains bias-SGD output matrix V. In each row D factors of a single item node.", latent_factors_inmem);
MMOutputter_vec<vertex_data> user_bias_vec(filename + "_U_bias.mm", 0, M, BIAS_POS, "This file contains bias-SGD output bias vector. In each row a single user bias.",latent_factors_inmem);
MMOutputter_vec<vertex_data> item_bias_vec(filename + "_V_bias.mm",M ,M+N, BIAS_POS, "This file contains bias-SGD output bias vector. In each row a single item bias.", latent_factors_inmem);
MMOutputter_scalar gmean(filename + "_global_mean.mm", "This file contains SVD++ global mean which is required for computing predictions.", globalMean);
logstream(LOG_INFO) << "SVDPP output files (in matrix market format): " << filename << "_U.mm" <<
", " << filename + "_V.mm, " << filename << "_U_bias.mm, " << filename << "_V_bias.mm, " << filename << "_global_mean.mm" << std::endl;
}
int main(int argc, const char ** argv) {
print_copyright();
//* GraphChi initialization will read the command line arguments and the configuration file. */
graphchi_init(argc, argv);
/* Metrics object for keeping track of performance counters
and other information. Currently required. */
metrics m("biassgd2");
biassgd_lambda = get_option_float("biassgd_lambda", 1e-3);
biassgd_gamma = get_option_float("biassgd_gamma", 1e-3);
biassgd_step_dec = get_option_float("biassgd_step_dec", 0.9);
parse_command_line_args();
parse_implicit_command_line();
if (maxval == 1e100 || minval == -1e100)
logstream(LOG_FATAL)<<"You must set min allowed rating and max allowed rating using the --minval and --maval flags" << std::endl;
/* Preprocess data if needed, or discover preprocess files */
int nshards = convert_matrixmarket<EdgeDataType>(training, NULL,0, 0, 3, TRAINING, false);
init_feature_vectors<std::vector<vertex_data> >(M+N, latent_factors_inmem, !load_factors_from_file);
if (validation != ""){
int vshards = convert_matrixmarket<EdgeDataType>(validation, NULL, 0, 0, 3, VALIDATION, false);
init_validation_rmse_engine<VertexDataType, EdgeDataType>(pvalidation_engine, vshards, &bias_sgd_predict);
}
/* load initial state from disk (optional) */
if (load_factors_from_file){
load_matrix_market_matrix(training + "_U.mm", 0, D);
load_matrix_market_matrix(training + "_V.mm", M, D);
vec user_bias = load_matrix_market_vector(training +"_U_bias.mm", false, true);
vec item_bias = load_matrix_market_vector(training +"_V_bias.mm", false, true);
for (uint i=0; i<M+N; i++){
latent_factors_inmem[i].bias = ((i<M)?user_bias[i] : item_bias[i-M]);
}
vec gm = load_matrix_market_vector(training + "_global_mean.mm", false, true);
globalMean = gm[0];
}
/* Run */
BIASSGDVerticesInMemProgram program;
graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m);
set_engine_flags(engine);
pengine = &engine;
engine.run(program, niters);
/* Output latent factor matrices in matrix-market format */
output_biassgd_result(training);
test_predictions(&bias_sgd_predict);
/* Report execution metrics */
if (!quiet)
metrics_report(m);
return 0;
}
| C++ |
/**
* @file
* @author Danny Bickson
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Matrix factorization with the Stochastic Gradient Descent (SGD) algorithm.
* Algorithm is described in the papers:
* 1) Matrix Factorization Techniques for Recommender Systems Yehuda Koren, Robert Bell, Chris Volinsky. In IEEE Computer, Vol. 42, No. 8. (07 August 2009), pp. 30-37.
* 2) Takács, G, Pilászy, I., Németh, B. and Tikk, D. (2009). Scalable Collaborative Filtering Approaches for Large Recommender Systems. Journal of Machine Learning Research, 10, 623-656.
*
*
*/
#include "eigen_wrapper.hpp"
#include "common.hpp"
double sgd_lambda = 1e-3; //sgd regularization
double sgd_gamma = 1e-3; //sgd step size
double sgd_step_dec = 0.9; //sgd step decrement
struct vertex_data {
vec pvec; //storing the feature vector
vertex_data() {
pvec = zeros(D);
}
void set_val(int index, float val){
pvec[index] = val;
}
float get_val(int index){
return pvec[index];
}
};
#include "util.hpp"
/**
* Type definitions. Remember to create suitable graph shards using the
* Sharder-program.
*/
typedef vertex_data VertexDataType;
typedef float EdgeDataType; // Edges store the "rating" of user->movie pair
graphchi_engine<VertexDataType, EdgeDataType> * pengine = NULL;
graphchi_engine<VertexDataType, EdgeDataType> * pvalidation_engine = NULL;
std::vector<vertex_data> latent_factors_inmem;
#include "rmse.hpp"
#include "rmse_engine.hpp"
#include "io.hpp"
/** compute a missing value based on SGD algorithm */
float sgd_predict(const vertex_data& user,
const vertex_data& movie,
const float rating,
double & prediction,
void * extra = NULL){
prediction = dot_prod(user.pvec,movie.pvec);
//truncate prediction to allowed values
prediction = std::min((double)prediction, maxval);
prediction = std::max((double)prediction, minval);
//return the squared error
float err = rating - prediction;
assert(!std::isnan(err));
return err*err;
}
/**
* GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type>
* class. The main logic is usually in the update function.
*/
struct SGDVerticesInMemProgram : public GraphChiProgram<VertexDataType, EdgeDataType> {
/**
* Called before an iteration is started.
*/
void before_iteration(int iteration, graphchi_context &gcontext) {
reset_rmse(gcontext.execthreads);
}
/**
* Called after an iteration has finished.
*/
void after_iteration(int iteration, graphchi_context &gcontext) {
sgd_gamma *= sgd_step_dec;
training_rmse(iteration, gcontext);
run_validation(pvalidation_engine, gcontext);
}
/**
* Vertex update function.
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) {
//go over all user nodes
if ( vertex.num_outedges() > 0){
vertex_data & user = latent_factors_inmem[vertex.id()];
//go over all ratings
for(int e=0; e < vertex.num_edges(); e++) {
float observation = vertex.edge(e)->get_data();
vertex_data & movie = latent_factors_inmem[vertex.edge(e)->vertex_id()];
double estScore;
rmse_vec[omp_get_thread_num()] += sgd_predict(user, movie, observation, estScore);
double err = observation - estScore;
if (std::isnan(err) || std::isinf(err))
logstream(LOG_FATAL)<<"SGD got into numerical error. Please tune step size using --sgd_gamma and sgd_lambda" << std::endl;
//NOTE: the following code is not thread safe, since potentially several
//user nodes may updates this item gradient vector concurrently. However in practice it
//did not matter in terms of accuracy on a multicore machine.
//if you like to defend the code, you can define a global variable
//mutex mymutex;
//
//and then do: mymutex.lock()
movie.pvec += sgd_gamma*(err*user.pvec - sgd_lambda*movie.pvec);
//and here add: mymutex.unlock();
user.pvec += sgd_gamma*(err*movie.pvec - sgd_lambda*user.pvec);
}
}
}
};
//dump output to file
void output_sgd_result(std::string filename) {
MMOutputter_mat<vertex_data> user_mat(filename + "_U.mm", 0, M, "This file contains SGD output matrix U. In each row D factors of a single user node.", latent_factors_inmem);
MMOutputter_mat<vertex_data> item_mat(filename + "_V.mm", M ,M+N, "This file contains SGD output matrix V. In each row D factors of a single item node.", latent_factors_inmem);
logstream(LOG_INFO) << "SGD output files (in matrix market format): " << filename << "_U.mm" <<
", " << filename + "_V.mm " << std::endl;
}
int main(int argc, const char ** argv) {
print_copyright();
//* GraphChi initialization will read the command line arguments and the configuration file. */
graphchi_init(argc, argv);
/* Metrics object for keeping track of performance counters
and other information. Currently required. */
metrics m("sgd-inmemory-factors");
/* Basic arguments for application. NOTE: File will be automatically 'sharded'. */
sgd_lambda = get_option_float("sgd_lambda", 1e-3);
sgd_gamma = get_option_float("sgd_gamma", 1e-3);
sgd_step_dec = get_option_float("sgd_step_dec", 0.9);
parse_command_line_args();
parse_implicit_command_line();
/* Preprocess data if needed, or discover preprocess files */
int nshards = convert_matrixmarket<EdgeDataType>(training, NULL, 0, 0, 3, TRAINING, false);
init_feature_vectors<std::vector<vertex_data> >(M+N, latent_factors_inmem, !load_factors_from_file);
if (validation != ""){
int vshards = convert_matrixmarket<EdgeDataType>(validation, NULL, 0, 0, 3, VALIDATION, false);
init_validation_rmse_engine<VertexDataType, EdgeDataType>(pvalidation_engine, vshards, &sgd_predict);
}
/* load initial state from disk (optional) */
if (load_factors_from_file){
load_matrix_market_matrix(training + "_U.mm", 0, D);
load_matrix_market_matrix(training + "_V.mm", M, D);
}
print_config();
/* Run */
SGDVerticesInMemProgram program;
graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m);
set_engine_flags(engine);
pengine = &engine;
engine.run(program, niters);
/* Output latent factor matrices in matrix-market format */
output_sgd_result(training);
test_predictions(&sgd_predict);
/* Report execution metrics */
if (!quiet)
metrics_report(m);
return 0;
}
| C++ |
/**
* @file
* @author Danny Bickson, based on code by Aapo Kyrola
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Matrix factorizatino with the Alternative Least Squares (ALS) algorithm.
* This code is based on GraphLab's implementation of ALS by Joey Gonzalez
* and Danny Bickson (CMU). A good explanation of the algorithm is
* given in the following paper:
* Large-Scale Parallel Collaborative Filtering for the Netflix Prize
* Yunhong Zhou, Dennis Wilkinson, Robert Schreiber and Rong Pan
* http://www.springerlink.com/content/j1076u0h14586183/
*
* Faster version of ALS, which stores latent factors of vertices in-memory.
* Thus, this version requires more memory. See the version "als_edgefactors"
* for a low-memory implementation.
*
*
* In the code, we use movie-rating terminology for clarity. This code has been
* tested with the Netflix movie rating challenge, where the task is to predict
* how user rates movies in range from 1 to 5.
*
* This code is has integrated preprocessing, 'sharding', so it is not necessary
* to run sharder prior to running the matrix factorization algorithm. Input
* data must be provided in the Matrix Market format (http://math.nist.gov/MatrixMarket/formats.html).
*
* ALS uses free linear algebra library 'Eigen'. See Readme_Eigen.txt for instructions
* how to obtain it.
*
* At the end of the processing, the two latent factor matrices are written into files in
* the matrix market format.
*
*/
#include "common.hpp"
#include "eigen_wrapper.hpp"
double lambda = 0.065;
struct vertex_data {
vec pvec;
vertex_data() {
pvec = zeros(D);
}
void set_val(int index, float val){
pvec[index] = val;
}
float get_val(int index){
return pvec[index];
}
};
/**
* Type definitions. Remember to create suitable graph shards using the
* Sharder-program.
*/
typedef vertex_data VertexDataType;
typedef float EdgeDataType; // Edges store the "rating" of user->movie pair
graphchi_engine<VertexDataType, EdgeDataType> * pengine = NULL;
graphchi_engine<VertexDataType, EdgeDataType> * pvalidation_engine = NULL;
std::vector<vertex_data> latent_factors_inmem;
#include "io.hpp"
#include "rmse.hpp"
#include "rmse_engine.hpp"
/** compute a missing value based on ALS algorithm */
float als_predict(const vertex_data& user,
const vertex_data& movie,
const float rating,
double & prediction,
void * extra = NULL){
prediction = dot_prod(user.pvec, movie.pvec);
//truncate prediction to allowed values
prediction = std::min((double)prediction, maxval);
prediction = std::max((double)prediction, minval);
//return the squared error
float err = rating - prediction;
assert(!std::isnan(err));
return err*err;
}
/**
* GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type>
* class. The main logic is usually in the update function.
*/
struct ALSVerticesInMemProgram : public GraphChiProgram<VertexDataType, EdgeDataType> {
/**
* Vertex update function - computes the least square step
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) {
vertex_data & vdata = latent_factors_inmem[vertex.id()];
mat XtX = mat::Zero(D, D);
vec Xty = vec::Zero(D);
bool compute_rmse = (vertex.num_outedges() > 0);
// Compute XtX and Xty (NOTE: unweighted)
for(int e=0; e < vertex.num_edges(); e++) {
float observation = vertex.edge(e)->get_data();
vertex_data & nbr_latent = latent_factors_inmem[vertex.edge(e)->vertex_id()];
Xty += nbr_latent.pvec * observation;
XtX.triangularView<Eigen::Upper>() += nbr_latent.pvec * nbr_latent.pvec.transpose();
if (compute_rmse) {
double prediction;
rmse_vec[omp_get_thread_num()] += als_predict(vdata, nbr_latent, observation, prediction);
}
}
double regularization = lambda;
if (regnormal)
regularization *= vertex.num_edges();
for(int i=0; i < D; i++) XtX(i,i) += regularization;
// Solve the least squares problem with eigen using Cholesky decomposition
vdata.pvec = XtX.selfadjointView<Eigen::Upper>().ldlt().solve(Xty);
}
/**
* Called before an iteration is started.
*/
void before_iteration(int iteration, graphchi_context &gcontext) {
reset_rmse(gcontext.execthreads);
}
/**
* Called after an iteration has finished.
*/
void after_iteration(int iteration, graphchi_context &gcontext) {
training_rmse(iteration, gcontext);
run_validation(pvalidation_engine, gcontext);
}
};
void output_als_result(std::string filename) {
MMOutputter_mat<vertex_data> user_mat(filename + "_U.mm", 0, M , "This file contains ALS output matrix U. In each row D factors of a single user node.", latent_factors_inmem);
MMOutputter_mat<vertex_data> item_mat(filename + "_V.mm", M ,M+N, "This file contains ALS output matrix V. In each row D factors of a single item node.", latent_factors_inmem);
logstream(LOG_INFO) << "ALS output files (in matrix market format): " << filename << "_U.mm" <<
", " << filename + "_V.mm " << std::endl;
}
int main(int argc, const char ** argv) {
print_copyright();
/* GraphChi initialization will read the command line
arguments and the configuration file. */
graphchi_init(argc, argv);
/* Metrics object for keeping track of performance counters
and other information. Currently required. */
metrics m("als-inmemory-factors");
lambda = get_option_float("lambda", 0.065);
parse_command_line_args();
parse_implicit_command_line();
/* Preprocess data if needed, or discover preprocess files */
int nshards = convert_matrixmarket<EdgeDataType>(training, NULL, 0, 0, 3, TRAINING, false);
init_feature_vectors<std::vector<vertex_data> >(M+N, latent_factors_inmem, !load_factors_from_file);
if (validation != ""){
int vshards = convert_matrixmarket<EdgeDataType>(validation, NULL, 0, 0, 3, VALIDATION, false);
init_validation_rmse_engine<VertexDataType, EdgeDataType>(pvalidation_engine, vshards, &als_predict);
}
/* load initial state from disk (optional) */
if (load_factors_from_file){
load_matrix_market_matrix(training + "_U.mm", 0, D);
load_matrix_market_matrix(training + "_V.mm", M, D);
}
/* Run */
ALSVerticesInMemProgram program;
graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m);
set_engine_flags(engine);
pengine = &engine;
engine.run(program, niters);
/* Output latent factor matrices in matrix-market format */
output_als_result(training);
test_predictions(&als_predict);
/* Report execution metrics */
if (!quiet)
metrics_report(m);
return 0;
}
| C++ |
/**
* @file
* @author Danny Bickson
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* time-SVD++ algorithm implementation. As described in the paper:
* Yehuda Koren. 2009. Collaborative filtering with temporal dynamics. In Proceedings of the 15th ACM SIGKDD international conference on Knowledge discovery and data mining (KDD '09). ACM, New York, NY, USA, 447-456. DOI=10.1145/1557019.1557072
*
*/
#include "common.hpp"
#include "eigen_wrapper.hpp"
struct timesvdpp_params{
double lrate;
double beta;
double gamma;
double lrate_mult_dec;
timesvdpp_params(){
lrate =0.0001;
beta = 0.00001;
gamma = 0.0001;
lrate_mult_dec = 0.9;
}
};
timesvdpp_params tsp;
bool is_user(vid_t id){ return id < M; }
bool is_item(vid_t id){ return id >= M && id < N; }
bool is_time(vid_t id){ return id >= M+N; }
#define BIAS_POS -1
struct vertex_data {
vec pvec;
double bias;
vertex_data() {
bias = 0;
}
void set_val(int index, float val){
if (index == BIAS_POS)
bias = val;
else pvec[index] = val;
}
float get_val(int index){
if (index == BIAS_POS)
return bias;
else return pvec[index];
}
};
struct edge_data {
double weight;
double time;
edge_data() { weight = time = 0; }
edge_data(double weight, double time) : weight(weight), time(time) { }
};
struct time_svdpp_usr{
double * bu;
double * p;
double * pu;
double * x;
double * ptemp;
time_svdpp_usr(vertex_data & vdata){
bu = &vdata.bias;
assert(vdata.pvec.size() == D*4); //TO REMOVE
p = &vdata.pvec[0];
pu = p+D;
x = pu+D;
ptemp = x+D;
}
time_svdpp_usr & operator = (vertex_data & vdata){
bu = &vdata.bias;
assert(vdata.pvec.size() == D*4); //TO REMOVE
p = &vdata.pvec[0];
pu = p+D;
x = pu+D;
ptemp = x+D;
return *this;
}
};
struct time_svdpp_movie{
double * bi;
double * q;
double * y;
time_svdpp_movie(vertex_data& vdata){
assert(vdata.pvec.size() == D*2);
bi = &vdata.bias;
q = &vdata.pvec[0];
y = q+D;
}
time_svdpp_movie & operator=(const vertex_data& vdata){
assert(vdata.pvec.size() == D*2);
bi = (double*)&vdata.bias;
q = (double*)&vdata.pvec[0];
y = (double*)(q+D);
return *this;
}
};
struct time_svdpp_time{
double * bt;
double * z;
double * pt;
time_svdpp_time(vertex_data& vdata){
bt = &vdata.bias;
z = &vdata.pvec[0];
pt = z+D;
assert(vdata.pvec.size() == D*2);
}
time_svdpp_time & operator=(vertex_data & vdata){
bt = &vdata.bias;
z = &vdata.pvec[0];
pt = z+D;
assert(vdata.pvec.size() == D*2);
return *this;
}
};
float time_svdpp_predict(const time_svdpp_usr & usr,
const time_svdpp_movie & mov,
const time_svdpp_time & ptime,
const float rating,
double & prediction){
//prediction = global_mean + user_bias + movie_bias
double pui = globalMean + *usr.bu + *mov.bi;
for(int k=0;k<D;k++){
// + user x movie factors
pui += (usr.ptemp[k] * mov.q[k]);
// + user x time factors
pui += usr.x[k] * ptime.z[k];
// + user x time x movies factors
pui += usr.pu[k] * ptime.pt[k] * mov.q[k];
}
pui = std::min(pui,maxval);
pui = std::max(pui,minval);
prediction = pui;
if (std::isnan(prediction))
logstream(LOG_FATAL)<<"Got into numerical errors! Try to decrease --lrate, --gamma, --beta" <<std::endl;
float err = rating - prediction;
return err*err;
}
float time_svdpp_predict(const vertex_data& user,
const vertex_data& movie,
const float rating,
double & prediction,
void * extra){
return time_svdpp_predict(time_svdpp_usr((vertex_data&)user), time_svdpp_movie((vertex_data&)movie), time_svdpp_time(*(vertex_data*)extra), rating, prediction);
}
/**
* Type definitions. Remember to create suitable graph shards using the
* Sharder-program.
*/
typedef vertex_data VertexDataType;
typedef edge_data EdgeDataType; // Edges store the "rating" of user->movie pair
graphchi_engine<VertexDataType, EdgeDataType> * pengine = NULL;
graphchi_engine<VertexDataType, EdgeDataType> * pvalidation_engine = NULL;
std::vector<vertex_data> latent_factors_inmem;
void init_time_svdpp_node_data(){
int k = D;
#pragma omp parallel for
for (int u = 0; u < (int)M; u++) {
vertex_data & data = latent_factors_inmem[u];
data.pvec = zeros(4*k);
time_svdpp_usr usr(data);
*usr.bu = 0;
for (int m=0; m< k; m++){
usr.p[m] = 0.01*drand48() / (double) (k);
usr.pu[m] = 0.001 * drand48() / (double) (k);
usr.x[m] = 0.001 * drand48() / (double) (k);
usr.ptemp[m] = usr.p[m];
}
}
#pragma omp parallel for
for (int i = M; i < (int)(N+M); i++) {
vertex_data & data = latent_factors_inmem[i];
data.pvec = zeros(2*k);
time_svdpp_movie movie(data);
*movie.bi = 0;
for (int m = 0; m < k; m++){
movie.q[m] = 0.01 * drand48() / (double) (k);
movie.y[m] = 0.001 * drand48() / (double) (k);
}
}
}
void init_time_svdpp(){
fprintf(stderr, "time-SVD++ %d factors\n", D);
int k = D;
latent_factors_inmem.resize(M+N+K);
init_time_svdpp_node_data();
#pragma omp parallel for
for (int i = M+N; i < (int)(M+N+K); i++) {
vertex_data & data = latent_factors_inmem[i];
data.pvec = zeros(2*k);
time_svdpp_time timenode(data);
*timenode.bt = 0;
for (int m = 0; m < k; m++){
timenode.z[m] = 0.001 * drand48() / (double) (k);
timenode.pt[m] = 0.001 * drand48() / (double) (k);
}
}
}
#include "io.hpp"
#include "rmse.hpp"
#include "rmse_engine4.hpp"
/**
* GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type>
* class. The main logic is usually in the update function.
*/
struct TIMESVDPPVerticesInMemProgram : public GraphChiProgram<VertexDataType, EdgeDataType> {
/*
* Vertex update function - computes the least square step
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) {
//go over all user nodes
if (is_user(vertex.id())){
vertex_data & user = latent_factors_inmem[vertex.id()];
time_svdpp_usr usr(user);
unsigned int userRatings = vertex.num_outedges();
double rRuNum = 1/sqrt(userRatings+10);
int dim = D;
double sumY = 0.0;
//go over all ratings
for(int e=0; e < vertex.num_outedges(); e++) {
uint pos = vertex.edge(e)->vertex_id();
assert(pos >= M && pos < M+N);
vertex_data & data = latent_factors_inmem[pos];
time_svdpp_movie movie(data);
Map<vec> y(movie.y, D);
sumY += sum((const vec&)y); //y
}
for( int k=0; k<dim; ++k) {
usr.ptemp[k] = usr.pu[k] + rRuNum * sumY; // pTemp = pu + rRuNum*sumY
}
vec sum = zeros(dim);
for(int e=0; e < vertex.num_edges(); e++) {
//edge_data & edge = scope.edge_data(oedgeid);
//float rui = edge.weight;
float rui = vertex.edge(e)->get_data().weight;
uint t = (uint)(vertex.edge(e)->get_data().time - 1); // we assume time bins start from 1
assert(t < M+N+K);
vertex_data & data = latent_factors_inmem[vertex.edge(e)->vertex_id()];
time_svdpp_movie mov(data);
time_svdpp_time time(latent_factors_inmem[t]);
double pui = 0;
time_svdpp_predict(usr, mov, time, rui, pui);
double eui = rui - pui;
*usr.bu += tsp.lrate*(eui - tsp.beta* *usr.bu);
*mov.bi += tsp.lrate * (eui - tsp.beta* *mov.bi);
for (int k = 0; k < dim; k++) {
double oldValue = mov.q[k];
double userValue = usr.ptemp[k] + usr.pu[k] * time.pt[k];
sum[k] += eui * mov.q[k];
mov.q[k] += tsp.lrate * (eui * userValue - tsp.gamma*mov.q[k]);
usr.ptemp[k] += tsp.lrate * ( eui * oldValue - tsp.gamma * usr.ptemp[k]);
usr.p[k] += tsp.lrate * ( eui * oldValue - tsp.gamma*usr.p[k] );
usr.pu[k] += tsp.lrate * (eui * oldValue * time.pt[k] - tsp.gamma * usr.pu[k]);
time.pt[k] += tsp.lrate * (eui * oldValue * usr.pu[k] - tsp.gamma * time.pt[k]);
double xOldValue = usr.x[k];
double zOldValue = time.z[k];
usr.x[k] += tsp.lrate * (eui * zOldValue - tsp.gamma * xOldValue);
time.z[k] += tsp.lrate * (eui * xOldValue - tsp.gamma * zOldValue);
}
rmse_vec[omp_get_thread_num()] += eui*eui;
}
for(int e=0; e < vertex.num_edges(); e++) {
time_svdpp_movie mov = latent_factors_inmem[vertex.edge(e)->vertex_id()];
for(int k=0;k<dim;k++){
mov.y[k] += tsp.lrate * (rRuNum * sum[k]- tsp.gamma*mov.y[k]);
}
}
}
};
/**
* Called before an iteration is started.
*/
void before_iteration(int iteration, graphchi_context &gcontext) {
reset_rmse(gcontext.execthreads);
}
/**
* Called after an iteration has finished.
*/
void after_iteration(int iteration, graphchi_context &gcontext) {
tsp.lrate *= tsp.lrate_mult_dec;
training_rmse(iteration, gcontext);
run_validation4(pvalidation_engine, gcontext);
};
};
void output_timesvdpp_result(std::string filename) {
MMOutputter_mat<vertex_data> user_mat(filename + "_U.mm", 0, M, "This file contains TIMESVDPP output matrix U. In each row 4xD factors of a single user node. The vectors are [p pu x ptemp]", latent_factors_inmem);
MMOutputter_mat<vertex_data> item_mat(filename + "_V.mm", M ,M+N, "This file contains -TIMESVDPP output matrix V. In each row 2xD factors of a single item node. The vectors are [q y]", latent_factors_inmem);
MMOutputter_mat<vertex_data> time_mat(filename + "_T.mm", M+N ,M+N+K, "This file contains -TIMESVDPP output matrix T. In each row 2xD factors of a single time node. The vectors are [z pt]", latent_factors_inmem);
MMOutputter_vec<vertex_data> mmoutput_bias_left(filename + "_U_bias.mm", 0, M, BIAS_POS, "This file contains time-svd++ output bias vector. In each row a single user bias.", latent_factors_inmem);
MMOutputter_vec<vertex_data> mmoutput_bias_right(filename + "_V_bias.mm",M ,M+N , BIAS_POS, "This file contains time-svd++ output bias vector. In each row a single item bias.", latent_factors_inmem);
MMOutputter_scalar gmean(filename + "_global_mean.mm", "This file contains time-svd++ global mean which is required for computing predictions.", globalMean);
logstream(LOG_INFO) << " time-svd++ output files (in matrix market format): " << filename << "_U.mm" << ", " << filename + "_V.mm " << filename + "_T.mm, " << filename << " _global_mean.mm, " << filename << "_U_bias.mm " << filename << "_V_bias.mm " << std::endl;
}
int main(int argc, const char ** argv) {
print_copyright();
/* GraphChi initialization will read the command line
arguments and the configuration file. */
graphchi_init(argc, argv);
/* Metrics object for keeping track of performance counters
and other information. Currently required. */
metrics m("time-svdpp-inmemory-factors");
//specific command line parameters for time-svd++
tsp.lrate = get_option_float("lrate", tsp.lrate);
tsp.beta = get_option_float("beta", tsp.beta);
tsp.gamma = get_option_float("gamma", tsp.gamma);
tsp.lrate_mult_dec = get_option_float("lrate_mult_dec", tsp.lrate_mult_dec);
parse_command_line_args();
parse_implicit_command_line();
/* Preprocess data if needed, or discover preprocess files */
int nshards = convert_matrixmarket4<edge_data>(training, false);
init_time_svdpp();
if (validation != ""){
int vshards = convert_matrixmarket4<EdgeDataType>(validation, false, M==N, VALIDATION);
init_validation_rmse_engine<VertexDataType, EdgeDataType>(pvalidation_engine, vshards, &time_svdpp_predict, false, true, 1);
}
if (load_factors_from_file){
load_matrix_market_matrix(training + "_U.mm", 0, 4*D);
load_matrix_market_matrix(training + "_V.mm", M, 2*D);
load_matrix_market_matrix(training + "_T.mm", M+N, 2*D);
vec user_bias = load_matrix_market_vector(training +"_U_bias.mm", false, true);
vec item_bias = load_matrix_market_vector(training +"_V_bias.mm", false, true);
vec time_bias = load_matrix_market_vector(training+ "_T_bias.mm", false, true);
for (uint i=0; i<M+N+K; i++){
if (i < M)
latent_factors_inmem[i].bias = user_bias[i];
else if (i <M+N)
latent_factors_inmem[i].bias = item_bias[i-M];
else
latent_factors_inmem[i].bias = time_bias[i-M-N];
}
vec gm = load_matrix_market_vector(training + "_global_mean.mm", false, true);
globalMean = gm[0];
}
/* Run */
TIMESVDPPVerticesInMemProgram program;
graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m);
set_engine_flags(engine);
pengine = &engine;
engine.run(program, niters);
/* Output test predictions in matrix-market format */
output_timesvdpp_result(training);
test_predictions3(&time_svdpp_predict, 1);
/* Report execution metrics */
if (!quiet)
metrics_report(m);
return 0;
}
| C++ |
/**
* Copyright (c) 2009 Carnegie Mellon University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an "AS
* IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*
* For more about this software visit:
*
* http://www.graphlab.ml.cmu.edu
*
* Code written by Danny Bickson, CMU
* Any changes to the code must include this original license notice in full.
* This file implements the shooting algorithm for solving Lasso problem
*/
#ifndef _COSAMP_HPP
#define _COSAMP_HPP
#include "eigen_wrapper.hpp"
ivec sort_union(ivec a, ivec b){
ivec ab = concat(a,b);
sort(ab);
for (int i=1; i< ab.size(); i++){
if (ab[i] == ab[i-1])
del(ab,i);
}
return ab;
}
vec CoSaMP(const mat & Phi, const vec & u, int K, int max_iter, double tol1, int D){
assert(K<= 2*D);
assert(K>=1);
assert(Phi.rows() == Phi.cols());
assert(Phi.rows() == D);
assert(u.size() == D);
vec Sest = zeros(D);
vec utrue = Sest;
vec v = u;
int t=1;
ivec T2;
while (t<max_iter){
ivec z = sort_index(fabs(Phi.transpose() * v));
z = reverse(z);
ivec Omega = head(z,2*K);
ivec T=sort_union(Omega,T2);
mat phit=get_cols(Phi, T);
vec b;
bool ret = backslash(phit, u, b);
assert(ret);
ret = false;//avoid warning
b= fabs(b);
ivec z3 = sort_index(b);
z3 = reverse(z3);
Sest=zeros(D);
for (int i=0; i< K; i++)
set_val(Sest, z3[i], b[z3[i]]);
ivec z2 = sort_index(fabs(Sest));
z2 = reverse(z2);
T2 = head(z2,K-1);
v=u-Phi*Sest;
double n2 = max(fabs(v));
if (n2 < tol1)
break;
t++;
}
return Sest;
}
void test_cosamp(){
mat A= init_mat("0.9528 0.5982 0.8368 ; 0.7041 0.8407 0.5187; 0.9539 0.4428 0.0222", 3, 3);
vec b= init_vec(" 0.3759 0.8986 0.4290",3);
int K=1;
double epsilon =1e-3;
vec ret = CoSaMP(A,b,K,10, epsilon,3);
vec right = init_vec("0 1.2032 0", 3);
double diff = norm(ret - right);
assert(diff <1e-4);
diff = 0; //avoid warning
}
#endif
| C++ |
/**
* @file
* @author Danny Bickson, CMU
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* This program computes top K recommendations based on the linear model computed
* by one of: als,sparse_als,wals, sgd and nmf applications.
*
*/
#include "common.hpp"
#include "eigen_wrapper.hpp"
#include "timer.hpp"
int debug;
int num_ratings;
double knn_sample_percent = 1.0;
const double epsilon = 1e-16;
timer mytimer;
int tokens_per_row = 3;
int algo = 0;
#define BIAS_POS -1
enum {
SVDPP = 0, BIASSGD = 1
};
struct vertex_data {
vec ratings;
ivec ids;
vec pvec;
vec weight;
double bias;
vertex_data() {
bias = 0;
assert(num_ratings > 0);
ratings = zeros(num_ratings);
ids = ivec::Zero(num_ratings);
assert(D > 0);
pvec = zeros(D);
weight = zeros(D);
}
void set_val(int index, float val){
if (index == BIAS_POS)
bias = val;
else if (index < D)
pvec[index] = val;
else weight[index-D] = val;
}
float get_val(int index){
if (index== BIAS_POS)
return bias;
else if (index < D)
return pvec[index];
else return weight[index-D];
}
};
struct edge_data {
double weight;
edge_data() { weight = 0; }
edge_data(double weight) : weight(weight) { }
};
struct edge_data4 {
double weight;
double time;
edge_data4() { weight = time = 0; }
edge_data4(double weight, double time) : weight(weight), time(time) { }
};
/**
* Type definitions. Remember to create suitable graph shards using the
* Sharder-program.
*/
typedef vertex_data VertexDataType;
typedef edge_data EdgeDataType; // Edges store the "rating" of user->movie pair
graphchi_engine<VertexDataType, EdgeDataType> * pengine = NULL;
std::vector<vertex_data> latent_factors_inmem;
/** compute a missing value based on SVD++ algorithm */
float svdpp_predict(const vertex_data& user, const vertex_data& movie, const float rating, double & prediction, void * extra = NULL){
//\hat(r_ui) = \mu +
prediction = globalMean;
// + b_u + b_i +
prediction += user.bias + movie.bias;
// + q_i^T *(p_u +sqrt(|N(u)|)\sum y_j)
//prediction += dot_prod(movie.pvec,(user.pvec+user.weight));
for (int j=0; j< D; j++)
prediction += movie.pvec[j] * (user.pvec[j] + user.weight[j]);
prediction = std::min((double)prediction, maxval);
prediction = std::max((double)prediction, minval);
float err = rating - prediction;
if (std::isnan(err))
logstream(LOG_FATAL)<<"Got into numerical errors. Try to decrease step size using the command line: svdpp_user_bias_step, svdpp_item_bias_step, svdpp_user_factor2_step, svdpp_user_factor_step, svdpp_item_step" << std::endl;
return err*err;
}
/** compute a missing value based on bias-SGD algorithm */
float biassgd_predict(const vertex_data& user,
const vertex_data& movie,
const float rating,
double & prediction,
void * extra = NULL){
prediction = globalMean + user.bias + movie.bias + dot_prod(user.pvec, movie.pvec);
//truncate prediction to allowed values
prediction = std::min((double)prediction, maxval);
prediction = std::max((double)prediction, minval);
//return the squared error
float err = rating - prediction;
if (std::isnan(err))
logstream(LOG_FATAL)<<"Got into numerical errors. Try to decrease step size using bias-SGD command line arugments)" << std::endl;
return err*err;
}
void rating_stats(){
double min=1e100, max=0, avg=0;
int cnt = 0;
int startv = 0;
int endv = M;
for (int i=startv; i< endv; i++){
vertex_data& data = latent_factors_inmem[i];
if (data.ratings.size() > 0){
min = std::min(min, data.ratings[0]);
max = std::max(max, data.ratings[0]);
if (std::isnan(data.ratings[0]))
printf("bug: nan on %d\n", i);
else {
avg += data.ratings[0];
cnt++;
}
}
}
printf("Distance statistics: min %g max %g avg %g\n", min, max, avg/cnt);
}
#include "io.hpp"
void read_factors(std::string base_filename){
if (algo == SVDPP)
load_matrix_market_matrix(training + "_U.mm", 0, 2*D);
else if (algo == BIASSGD)
load_matrix_market_matrix(training + "_U.mm", 0, D);
else assert(false);
load_matrix_market_matrix(training + "_V.mm", M, D);
vec user_bias = load_matrix_market_vector(training +"_U_bias.mm", false, true);
assert(user_bias.size() == M);
vec item_bias = load_matrix_market_vector(training +"_V_bias.mm", false, true);
assert(item_bias.size() == N);
for (uint i=0; i<M+N; i++){
latent_factors_inmem[i].bias = ((i<M)?user_bias[i] : item_bias[i-M]);
}
vec gm = load_matrix_market_vector(training + "_global_mean.mm", false, true);
globalMean = gm[0];
}
template<typename VertexDataType, typename EdgeDataType>
struct RatingVerticesInMemProgram : public GraphChiProgram<VertexDataType, EdgeDataType> {
/**
* Vertex update function - computes the least square step
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) {
//compute only for user nodes
if (vertex.id() >= M)
return;
vertex_data & vdata = latent_factors_inmem[vertex.id()];
int howmany = (int)(N*knn_sample_percent);
assert(howmany > 0 );
vec distances = zeros(howmany);
ivec indices = ivec::Zero(howmany);
for (int i=0; i< howmany; i++){
indices[i]= -1;
}
std::vector<bool> curratings;
curratings.resize(N);
for(int e=0; e < vertex.num_edges(); e++) {
//no need to calculate this rating since it is given in the training data reference
assert(vertex.edge(e)->vertex_id() - M >= 0 && vertex.edge(e)->vertex_id() - M < N);
curratings[vertex.edge(e)->vertex_id() - M] = true;
}
if (knn_sample_percent == 1.0){
for (uint i=M; i< M+N; i++){
if (curratings[i-M])
continue;
vertex_data & other = latent_factors_inmem[i];
double dist;
if (algo == SVDPP)
svdpp_predict(vdata, other, 0, dist);
else biassgd_predict(vdata, other, 0, dist);
indices[i-M] = i-M;
distances[i-M] = dist + 1e-10;
}
}
else for (int i=0; i<howmany; i++){
int random_other = ::randi(M, M+N-1);
vertex_data & other = latent_factors_inmem[random_other];
double dist;
if (algo == SVDPP)
svdpp_predict(vdata, other, 0, dist);
else biassgd_predict(vdata, other, 0, dist);
indices[i] = random_other-M;
distances[i] = dist;
}
vec out_dist(num_ratings);
ivec indices_sorted = reverse_sort_index2(distances, indices, out_dist, num_ratings);
assert(indices_sorted.size() <= num_ratings);
assert(out_dist.size() <= num_ratings);
vdata.ids = indices_sorted;
vdata.ratings = out_dist;
if (debug)
printf("Closest is: %d with distance %g\n", (int)vdata.ids[0], vdata.ratings[0]);
if (vertex.id() % 1000 == 0)
printf("Computing recommendations for user %d at time: %g\n", vertex.id()+1, mytimer.current_time());
}
};
struct MMOutputter_ratings{
MMOutputter_ratings(std::string fname, uint start, uint end, std::string comment) {
assert(start < end);
MM_typecode matcode;
set_matcode(matcode);
FILE * outf = fopen(fname.c_str(), "w");
assert(outf != NULL);
mm_write_banner(outf, matcode);
if (comment != "")
fprintf(outf, "%%%s\n", comment.c_str());
mm_write_mtx_array_size(outf, end-start, num_ratings+1);
for (uint i=start; i < end; i++){
fprintf(outf, "%u ", i+1);
for(int j=0; j < latent_factors_inmem[i].ratings.size(); j++) {
fprintf(outf, "%1.12e ", latent_factors_inmem[i].ratings[j]);
}
fprintf(outf, "\n");
}
fclose(outf);
}
};
struct MMOutputter_ids{
MMOutputter_ids(std::string fname, uint start, uint end, std::string comment) {
assert(start < end);
MM_typecode matcode;
set_matcode(matcode);
FILE * outf = fopen(fname.c_str(), "w");
assert(outf != NULL);
mm_write_banner(outf, matcode);
if (comment != "")
fprintf(outf, "%%%s\n", comment.c_str());
mm_write_mtx_array_size(outf, end-start, num_ratings+1);
for (uint i=start; i < end; i++){
fprintf(outf, "%u ", i+1);
for(int j=0; j < latent_factors_inmem[i].ids.size(); j++) {
fprintf(outf, "%u ", (int)latent_factors_inmem[i].ids[j]+1);//go back to item ids starting from 1,2,3, (and not from zero as in c)
}
fprintf(outf, "\n");
}
fclose(outf);
}
};
void output_knn_result(std::string filename) {
MMOutputter_ratings ratings(filename + ".ratings", 0, M,"This file contains user scalar ratings. In each row i, num_ratings top scalar ratings of different items for user i. (First column: user id, next columns, top K ratings)");
MMOutputter_ids mmoutput_ids(filename + ".ids", 0, M ,"This file contains item ids matching the ratings. In each row i, num_ratings top item ids for user i. (First column: user id, next columns, top K ratings). Note: 0 item id means there are no more items to recommend for this user.");
std::cout << "Rating output files (in matrix market format): " << filename << ".ratings" <<
", " << filename + ".ids " << std::endl;
}
int main(int argc, const char ** argv) {
mytimer.start();
print_copyright();
/* GraphChi initialization will read the command line
arguments and the configuration file. */
graphchi_init(argc, argv);
/* Metrics object for keeping track of performance counters
and other information. Currently required. */
metrics m("nmf-inmemory-factors");
knn_sample_percent = get_option_float("knn_sample_percent", 1.0);
if (knn_sample_percent <= 0 || knn_sample_percent > 1)
logstream(LOG_FATAL)<<"Sample percente should be in the range (0, 1] " << std::endl;
num_ratings = get_option_int("num_ratings", 10);
if (num_ratings <= 0)
logstream(LOG_FATAL)<<"num_ratings, the number of recomended items for each user, should be >=1 " << std::endl;
debug = get_option_int("debug", 0);
tokens_per_row = get_option_int("tokens_per_row", tokens_per_row);
std::string algorithm = get_option_string("algorithm");
if (algorithm == "svdpp" || algorithm == "svd++")
algo = SVDPP;
else if (algorithm == "biassgd")
algo = BIASSGD;
else logstream(LOG_FATAL)<<"--algorithm should be svd++ or biassgd"<<std::endl;
parse_command_line_args();
/* Preprocess data if needed, or discover preprocess files */
int nshards = 0;
if (tokens_per_row == 3)
nshards = convert_matrixmarket<edge_data>(training, NULL, 0, 0, 3, TRAINING, false);
else if (tokens_per_row == 4)
nshards = convert_matrixmarket4<edge_data4>(training);
else logstream(LOG_FATAL)<<"--tokens_per_row should be either 3 or 4" << std::endl;
assert(M > 0 && N > 0);
latent_factors_inmem.resize(M+N); // Initialize in-memory vertices.
read_factors(training);
if ((uint)num_ratings > N){
logstream(LOG_WARNING)<<"num_ratings is too big - setting it to: " << N << std::endl;
num_ratings = N;
}
srand(time(NULL));
/* Run */
if (tokens_per_row == 3){
RatingVerticesInMemProgram<VertexDataType, EdgeDataType> program;
graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m);
set_engine_flags(engine);
engine.run(program, 1);
}
else if (tokens_per_row == 4){
RatingVerticesInMemProgram<VertexDataType, edge_data4> program;
graphchi_engine<VertexDataType, edge_data4> engine(training, nshards, false, m);
set_engine_flags(engine);
engine.run(program, 1);
}
/* Output latent factor matrices in matrix-market format */
output_knn_result(training);
rating_stats();
/* Report execution metrics */
if (!quiet)
metrics_report(m);
return 0;
}
| C++ |
/**
* @file
* @author Danny Bickson, based on code by Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* This program takes both a rating file (user to item rasting) and a similarity
* file (item to item similarities).
* The output of this program is K top recommendations for each user based using
* the current user ratings and the item similarities.
*
*/
#include <string>
#include <vector>
#include <algorithm>
#include <iomanip>
#include <set>
#include <iostream>
#include "eigen_wrapper.hpp"
#include "distance.hpp"
#include "util.hpp"
#include "timer.hpp"
#include "common.hpp"
int min_allowed_intersection = 1;
size_t written_pairs = 0;
size_t item_pairs_compared = 0;
FILE * out_file;
timer mytimer;
bool * relevant_items = NULL;
int grabbed_edges = 0;
int distance_metric;
int debug;
int undirected = 1;
double Q = 3; //the power of the weights added into the total score
bool is_item(vid_t v){ return v >= M; }
bool is_user(vid_t v){ return v < M; }
/**
* Type definitions. Remember to create suitable graph shards using the
* Sharder-program.
*/
typedef unsigned int VertexDataType;
struct edge_data{
float up_weight;
float down_weight;
edge_data(){ up_weight = 0; down_weight = 0; }
edge_data(float up_weight, float down_weight) : up_weight(up_weight), down_weight(down_weight) { };
};
typedef edge_data EdgeDataType; // Edges store the "rating" of user->movie pair
struct vertex_data{
vec pvec;
vertex_data(){ }
void set_val(int index, float val){
pvec[index] = val;
}
float get_val(int index){
return pvec[index];
}
};
std::vector<vertex_data> latent_factors_inmem;
#include "io.hpp"
struct dense_adj {
sparse_vec edges;
sparse_vec ratings;
mutex mymutex;
vid_t vid;
dense_adj() {
vid = -1;
}
};
bool find_twice(std::vector<vid_t>& edges, vid_t val){
int ret = 0;
for (int i=0; i < (int)edges.size(); i++){
if (edges[i] == val)
ret++;
}
assert(ret >= 0 && ret <= 2);
return (ret == 2);
}
// This is used for keeping in-memory
class adjlist_container {
public:
std::vector<dense_adj> adjs;
vid_t pivot_st, pivot_en;
adjlist_container() {
if (debug)
std::cout<<"setting pivot st and end to " << 0 << std::endl;
pivot_st = 0; //start pivot on user nodes (excluding item nodes)
pivot_en = 0;
}
void clear() {
for(std::vector<dense_adj>::iterator it=adjs.begin(); it != adjs.end(); ++it) {
if (nnz(it->edges)) {
it->edges.resize(0);
}
it->ratings.resize(0);
}
adjs.clear();
if (debug)
std::cout<<"setting pivot st to " << pivot_en << std::endl;
pivot_st = pivot_en;
}
/**
* Extend the interval of pivot vertices to en.
*/
void extend_pivotrange(vid_t en) {
assert(en>pivot_en);
assert(en > pivot_st);
pivot_en = en;
adjs.resize(pivot_en - pivot_st);
//for (uint i=0; i< pivot_en - pivot_st; i++)
// adjs[i].ratings = zeros(N);
}
/**
* Grab pivot's adjacency list into memory.
*/
int load_edges_into_memory(graphchi_vertex<uint32_t, EdgeDataType> &v) {
assert(is_pivot(v.id()));
assert(is_user(v.id()));
int num_edges = v.num_edges();
dense_adj dadj;
for(int i=0; i<num_edges; i++)
set_new( dadj.edges, v.edge(i)->vertex_id(), v.edge(i)->get_data().up_weight);
//dadj.ratings = zeros(N);
dadj.vid = v.id();
adjs[v.id() - pivot_st] = dadj;
assert(v.id() - pivot_st < adjs.size());
__sync_add_and_fetch(&grabbed_edges, num_edges /*edges_to_larger_id*/);
return num_edges;
}
/**
* add weighted ratings for each linked item
*
*/
double compute_ratings(graphchi_vertex<uint32_t, EdgeDataType> &item, vid_t user_pivot, int distance_metric) {
assert(is_pivot(user_pivot));
//assert(is_item(pivot) && is_item(v.id()));
dense_adj &pivot_edges = adjs[user_pivot - pivot_st];
if (!get_val(pivot_edges.edges, item.id())){
if (debug)
logstream(LOG_DEBUG)<<"Skipping item pivot pair since not connected!" << item.id() << std::endl;
return 0;
}
int num_edges = item.num_edges();
if (debug)
logstream(LOG_DEBUG)<<"Found " << num_edges << " edges from item : " << item.id() << std::endl;
//if there are not enough neighboring user nodes to those two items there is no need
//to actually count the intersection
if (num_edges < min_allowed_intersection || nnz(pivot_edges.edges) < min_allowed_intersection){
if (debug)
logstream(LOG_DEBUG)<<"skipping item pivot pair since < min_allowed_intersection" << std::endl;
return 0;
}
std::vector<vid_t> edges;
for(int i=0; i < num_edges; i++){
if (is_item(item.edge(i)->vertex_id()))
edges.push_back(item.edge(i)->vertex_id());
}
std::sort(edges.data(), edges.data()+edges.size());
for(int i=0; i < num_edges; i++){
vid_t other_item = item.edge(i)->vertex_id();
bool up = item.id() < other_item;
if (debug)
logstream(LOG_DEBUG)<<"Checking now edge: " << other_item << std::endl;
if (is_user(other_item)){
if (debug)
logstream(LOG_DEBUG)<<"skipping edge to user " << other_item << std::endl;
continue;
}
if (!undirected && ((!up && item.edge(i)->get_data().up_weight == 0) ||
(up && item.edge(i)->get_data().down_weight == 0))){
if (debug)
logstream(LOG_DEBUG)<<"skipping edge with wrong direction to " << other_item << std::endl;
continue;
}
if (get_val(pivot_edges.edges, other_item)){
if (debug)
logstream(LOG_DEBUG)<<"skipping edge to " << other_item << " because alrteady connected to pivot" << std::endl;
continue;
}
assert(get_val(pivot_edges.edges, item.id()) != 0);
float weight = std::max(item.edge(i)->get_data().down_weight, item.edge(i)->get_data().up_weight);
assert(weight != 0);
if (undirected || find_twice(edges, other_item)){
//pivot_edges.ratings[edges[i]-M] += item.edge(i)->get_data() * get_val(pivot_edges.edges, item.id());
pivot_edges.mymutex.lock();
set_val(pivot_edges.ratings, other_item-M, get_val(pivot_edges.ratings, other_item-M) + pow(weight,Q) /* * get_val(pivot_edges.edges, item.id())*/);
pivot_edges.mymutex.unlock();
if (debug)
logstream(LOG_DEBUG)<<"Adding weight: " << weight << " to item: " << other_item-M+1 << " for user: " << user_pivot+1<<std::endl;
}
}
if (debug)
logstream(LOG_DEBUG)<<"Finished user pivot " << user_pivot << std::endl;
return 0;
}
inline bool is_pivot(vid_t vid) {
return vid >= pivot_st && vid < pivot_en;
}
};
adjlist_container * adjcontainer;
struct ItemDistanceProgram : public GraphChiProgram<VertexDataType, EdgeDataType> {
/**
* Vertex update function.
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType> &v, graphchi_context &gcontext) {
if (debug)
printf("Entered iteration %d with %d\n", gcontext.iteration, is_item(v.id()) ? (v.id() - M + 1): v.id());
/* Even iteration numbers:
* 1) load a subset of users into memory (pivots)
* 2) Find which subset of items is connected to the users
*/
if (gcontext.iteration % 2 == 0) {
if (adjcontainer->is_pivot(v.id()) && is_user(v.id())){
adjcontainer->load_edges_into_memory(v);
if (debug)
printf("Loading pivot %d intro memory\n", v.id());
}
}
/* odd iteration number:
* 1) For any item connected to a pivot item
* compute itersection
*/
else {
assert(is_item(v.id()));
for (int i=0; i< v.num_edges(); i++){
if (!adjcontainer->is_pivot(v.edge(i)->vertex_id()))
continue;
if (debug)
printf("comparing user pivot %d to item %d\n", v.edge(i)->vertex_id()+1 , v.id() - M + 1);
adjcontainer->compute_ratings(v, v.edge(i)->vertex_id(), distance_metric);
item_pairs_compared++;
if (item_pairs_compared % 1000000 == 0)
logstream(LOG_INFO)<< std::setw(10) << mytimer.current_time() << ") " << std::setw(10) << item_pairs_compared << " pairs compared " << std::endl;
}
}//end of iteration % 2 == 1
}//end of update function
/**
* Called before an iteration starts.
* On odd iteration, schedule both users and items.
* on even iterations, schedules only item nodes
*/
void before_iteration(int iteration, graphchi_context &gcontext) {
gcontext.scheduler->remove_tasks(0, (int) gcontext.nvertices - 1);
if (gcontext.iteration % 2 == 0){
for (vid_t i=0; i < M; i++){
//even iterations, schedule only user nodes
gcontext.scheduler->add_task(i);
}
} else { //iteration % 2 == 1, schedule only item nodes
for (vid_t i=M; i < M+N; i++){
gcontext.scheduler->add_task(i);
}
}
}
void after_iteration(int iteration, graphchi_context &gcontext){
if (gcontext.iteration % 2 == 1){
for (int i=0; i< (int)adjcontainer->adjs.size(); i++){
if (debug)
logstream(LOG_DEBUG)<<"Going over user" << adjcontainer->adjs[i].vid << std::endl;
dense_adj &user = adjcontainer->adjs[i];
if (nnz(user.edges) == 0 || nnz(user.ratings) == 0){
if (debug)
logstream(LOG_DEBUG)<<"User with no edges" << std::endl;
continue;
}
//assert(user.ratings.size() == N);
ivec positions = reverse_sort_index(user.ratings, K);
assert(positions.size() > 0);
for (int j=0; j < positions.size(); j++){
assert(positions[j] >= 0);
assert(positions[j] < (int)N);
//skip zero entries
if (get_val(user.ratings, positions[j])== 0){
if (debug)
logstream(LOG_DEBUG)<<"Found zero in position " << j << std::endl;
break;
}
int rc = fprintf(out_file, "%u %u %lg\n", user.vid+1, positions[j]+1, get_val(user.ratings, positions[j]));//write item similarity to file
if (debug)
logstream(LOG_DEBUG)<<"Writing rating from user" << user.vid+1 << " to item: " << positions[j] << std::endl;
assert(rc > 0);
written_pairs++;
}
}
grabbed_edges = 0;
adjcontainer->clear();
}
}
/**
* Called before an execution interval is started.
*
* On every even iteration, we load pivot's item connected user lists to memory.
* Here we manage the memory to ensure that we do not load too much
* edges into memory.
*/
void before_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) {
/* on even iterations, loads pivot items into memory base on the membudget_mb allowed memory size */
if ((gcontext.iteration % 2 == 0)) {
//if (debug){
printf("entering iteration: %d on before_exec_interval\n", gcontext.iteration);
printf("pivot_st is %d window_St %d, window_en %d\n", adjcontainer->pivot_st, window_st, window_en);
//}
if (adjcontainer->pivot_st < window_en){
size_t max_grab_edges = get_option_long("membudget_mb", 1024) * 1024 * 1024 / 8;
if (grabbed_edges < max_grab_edges * 0.8) {
logstream(LOG_DEBUG) << "Window init, grabbed: " << grabbed_edges << " edges" << " extending pivor_range to : " << window_en + 1 << std::endl;
adjcontainer->extend_pivotrange(window_en + 1);
logstream(LOG_DEBUG) << "Window en is: " << window_en << " vertices: " << gcontext.nvertices << std::endl;
if (window_en+1 >= gcontext.nvertices) {
// every user was a pivot item, so we are done
logstream(LOG_DEBUG)<<"Setting last iteration to: " << gcontext.iteration + 2 << std::endl;
gcontext.set_last_iteration(gcontext.iteration + 2);
}
} else {
logstream(LOG_DEBUG) << "Too many edges, already grabbed: " << grabbed_edges << std::endl;
}
}
}
}
/**
* Called before an execution interval is started.
*
*/
void after_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) {
//on odd iterations, dump user recommendations computed so far to disk
if (gcontext.iteration % 2 == 1){
printf("entering iteration: %d on after_exec_interval\n", gcontext.iteration);
printf("pivot_st is %d window_st %d, window_en %d\n", adjcontainer->pivot_st, window_st, window_en);
}
}
};
int main(int argc, const char ** argv) {
print_copyright();
/* GraphChi initialization will read the command line
arguments and the configuration file. */
graphchi_init(argc, argv);
/* Metrics object for keeping track of performance counters
and other information. Currently required. */
metrics m("itemsim2rating");
/* Basic arguments for application */
min_allowed_intersection = get_option_int("min_allowed_intersection", min_allowed_intersection);
debug = get_option_int("debug", 0);
parse_command_line_args();
std::string similarity = get_option_string("similarity", "");
if (similarity == "")
logstream(LOG_FATAL)<<"Missing similarity input file. Please specify one using the --similarity=filename command line flag" << std::endl;
undirected = get_option_int("undirected", 1);
Q = get_option_float("Q", Q);
mytimer.start();
int nshards = convert_matrixmarket_and_item_similarity<EdgeDataType>(training, similarity);
K = get_option_int("K");
assert(M > 0 && N > 0);
//initialize data structure which saves a subset of the items (pivots) in memory
adjcontainer = new adjlist_container();
//array for marking which items are conected to the pivot items via users.
relevant_items = new bool[N];
/* Run */
ItemDistanceProgram program;
graphchi_engine<VertexDataType, EdgeDataType> engine(training,nshards, true, m);
set_engine_flags(engine);
//engine.set_maxwindow(M+N+1);
out_file = open_file((training + "-rec").c_str(), "w");
//run the program
engine.run(program, niters);
/* Report execution metrics */
if (!quiet)
metrics_report(m);
std::cout<<"Total item pairs compared: " << item_pairs_compared << " total written to file: " << written_pairs << std::endl;
std::cout<<"Created output files with the format: " << training << "-rec" << std::endl;
delete[] relevant_items;
fclose(out_file);
return 0;
}
| C++ |
/**
* @file
* @author Danny Bickson
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Tensor factorization with the Alternative Least Squares (ALS) algorithm.
* Algorithm is described in: Tensor Decompositions, Alternating Least Squares and other Tales. P. Comon, X. Luciani and A. L. F. de Almeida. Special issue, Journal of Chemometrics. In memory of R. Harshman.
* August 16, 2009
*
*/
#include "common.hpp"
#include "eigen_wrapper.hpp"
double lambda = 0.065;
bool is_user(vid_t id){ return id < M; }
bool is_item(vid_t id){ return id >= M && id < N; }
bool is_time(vid_t id){ return id >= M+N; }
struct vertex_data {
vec pvec;
vertex_data() {
pvec = zeros(D);
}
void set_val(int index, float val){
pvec[index] = val;
}
float get_val(int index){
return pvec[index];
}
};
struct edge_data {
double weight;
double time;
edge_data() { weight = time = 0; }
edge_data(double weight, double time) : weight(weight), time(time) { }
};
/**
* Type definitions. Remember to create suitable graph shards using the
* Sharder-program.
*/
typedef vertex_data VertexDataType;
typedef edge_data EdgeDataType; // Edges store the "rating" of user->movie pair
graphchi_engine<VertexDataType, EdgeDataType> * pengine = NULL;
graphchi_engine<VertexDataType, EdgeDataType> * pvalidation_engine = NULL;
std::vector<vertex_data> latent_factors_inmem;
#include "io.hpp"
#include "rmse.hpp"
#include "rmse_engine4.hpp"
float als_tensor_predict(const vertex_data& user,
const vertex_data& movie,
const float rating,
double & prediction,
void * extra){
vertex_data * time_node = (vertex_data*)extra;
assert(time_node != NULL && time_node->pvec.size() == D);
prediction = dot3(user.pvec, movie.pvec, time_node->pvec);
//truncate prediction to allowed values
prediction = std::min((double)prediction, maxval);
prediction = std::max((double)prediction, minval);
//return the squared error
float err = rating - prediction;
assert(!std::isnan(err));
return err*err;
}
/**
* GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type>
* class. The main logic is usually in the update function.
*/
struct ALSVerticesInMemProgram : public GraphChiProgram<VertexDataType, EdgeDataType> {
/*
* Vertex update function - computes the least square step
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) {
vertex_data & vdata = latent_factors_inmem[vertex.id()];
mat XtX = mat::Zero(D, D);
vec Xty = vec::Zero(D);
bool compute_rmse = is_user(vertex.id());
// Compute XtX and Xty (NOTE: unweighted)
for(int e=0; e < vertex.num_edges(); e++){
float observation = vertex.edge(e)->get_data().weight;
uint time = (uint)vertex.edge(e)->get_data().time;
assert(time >= 0 && time < M+N+K);
assert(time != vertex.id());
vertex_data & nbr_latent = latent_factors_inmem[vertex.edge(e)->vertex_id()];
vertex_data & time_node = latent_factors_inmem[time];
assert(time != vertex.id() && time != vertex.edge(e)->vertex_id());
vec XY = nbr_latent.pvec.cwiseProduct(time_node.pvec);
Xty += XY * observation;
XtX.triangularView<Eigen::Upper>() += XY * XY.transpose();
if (compute_rmse) {
double prediction;
rmse_vec[omp_get_thread_num()] += als_tensor_predict(vdata, nbr_latent, observation, prediction, (void*)&time_node);
}
}
double regularization = lambda;
if (regnormal)
lambda *= vertex.num_edges();
for(int i=0; i < D; i++) XtX(i,i) += regularization;
// Solve the least squares problem with eigen using Cholesky decomposition
vdata.pvec = XtX.selfadjointView<Eigen::Upper>().ldlt().solve(Xty);
}
/**
* Called before an iteration is started.
*/
void before_iteration(int iteration, graphchi_context &gcontext) {
reset_rmse(gcontext.execthreads);
}
/**
* Called after an iteration has finished.
*/
void after_iteration(int iteration, graphchi_context &gcontext) {
training_rmse(iteration, gcontext);
run_validation4(pvalidation_engine, gcontext);
}
};
void output_als_result(std::string filename) {
MMOutputter_mat<vertex_data> user_mat(filename + "_U.mm", 0, M, "This file contains tensor-ALS output matrix U. In each row D factors of a single user node.", latent_factors_inmem);
MMOutputter_mat<vertex_data> item_mat(filename + "_V.mm", M ,M+N, "This file contains tensor-ALS output matrix V. In each row D factors of a single item node.", latent_factors_inmem);
MMOutputter_mat<vertex_data> time_mat(filename + "_T.mm", M+N ,M+N+K, "This file contains tensor-ALS output matrix T. In each row D factors of a single time node.", latent_factors_inmem);
logstream(LOG_INFO) << "tensor - ALS output files (in matrix market format): " << filename << "_U.mm" <<
", " << filename + "_V.mm " << filename + "_T.mm" << std::endl;
}
int main(int argc, const char ** argv) {
print_copyright();
/* GraphChi initialization will read the command line
arguments and the configuration file. */
graphchi_init(argc, argv);
/* Metrics object for keeping track of performance counters
and other information. Currently required. */
metrics m("als-tensor-inmemory-factors");
lambda = get_option_float("lambda", 0.065);
parse_command_line_args();
parse_implicit_command_line();
/* Preprocess data if needed, or discover preprocess files */
int nshards = convert_matrixmarket4<edge_data>(training, true);
init_feature_vectors<std::vector<vertex_data> >(M+N+K, latent_factors_inmem, !load_factors_from_file);
if (validation != ""){
int vshards = convert_matrixmarket4<EdgeDataType>(validation, true, M==N, VALIDATION);
init_validation_rmse_engine<VertexDataType, EdgeDataType>(pvalidation_engine, vshards, &als_tensor_predict, false, true, 1);
}
if (load_factors_from_file){
load_matrix_market_matrix(training + "_U.mm", 0, D);
load_matrix_market_matrix(training + "_V.mm", M, D);
load_matrix_market_matrix(training + "_T.mm", M+N, D);
}
/* Run */
ALSVerticesInMemProgram program;
graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m);
set_engine_flags(engine);
pengine = &engine;
engine.run(program, niters);
/* Output test predictions in matrix-market format */
output_als_result(training);
test_predictions3(&als_tensor_predict);
/* Report execution metrics */
if (!quiet)
metrics_report(m);
return 0;
}
| C++ |
/**
* Copyright (c) 2009 Carnegie Mellon University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an "AS
* IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*
* Written by Danny Bickson, CMU
* Code for computing ranking metrics
*
* */
#include <algorithm>
#include "eigen_wrapper.hpp"
/* average_precision_at_k code based on Ben Hamer's Kaggle code:
* https://github.com/benhamner/Metrics/blob/master/MATLAB/metrics/averagePrecisionAtK.m
*/
double average_precision_at_k(vec & predictions, int prediction_size, vec & actual, int actual_size, int k){
double score = 0;
int num_hits = 0;
vec sorted_actual = actual;
actual_size = std::min(k, actual_size);
std::sort(sorted_actual.data(), sorted_actual.data()+ actual_size);
for (int i=0; i < std::min((int)predictions.size(), k); i++){
if (std::binary_search(sorted_actual.data(), sorted_actual.data()+ actual_size, predictions[i])){
num_hits++;
score += num_hits / (i+1.0);
}
}
score /= (double)std::min(actual_size, k);
return score;
}
| C++ |
#ifndef __CF_UTILS__
#define __CF_UTILS__
#include <omp.h>
#include <stdio.h>
#include <iostream>
int number_of_omp_threads(){
int num_threads = 0;
int id;
#pragma omp parallel private(id)
{
id = omp_get_thread_num();
if (id == 0)
num_threads = omp_get_num_threads();
}
return num_threads;
}
struct in_file{
FILE * outf;
in_file(std::string fname) {
outf = fopen(fname.c_str(), "r");
if (outf == NULL){
std::cerr<<"Failed to open file: " << fname << std::endl;
exit(1);
}
}
~in_file() {
if (outf != NULL) fclose(outf);
}
};
struct out_file{
FILE * outf;
out_file(const std::string fname){
outf = fopen(fname.c_str(), "w");
}
~out_file(){
if (outf != NULL) fclose(outf);
}
};
/*
template<typename T1>
void load_map_from_txt_file(T1 & map, const std::string filename, bool gzip, int fields){
logstream(LOG_INFO)<<"loading map from txt file: " << filename << std::endl;
gzip_in_file fin(filename, gzip);
char linebuf[1024];
char saveptr[1024];
bool mm_header = false;
int line = 0;
char * pch2 = NULL;
while (!fin.get_sp().eof() && fin.get_sp().good()){
fin.get_sp().getline(linebuf, 10000);
if (fin.get_sp().eof())
break;
if (linebuf[0] == '%'){
logstream(LOG_INFO)<<"Detected matrix market header: " << linebuf << " skipping" << std::endl;
mm_header = true;
continue;
}
if (mm_header){
mm_header = false;
continue;
}
char *pch = strtok_r(linebuf," \r\n\t",(char**)&saveptr);
if (!pch){
logstream(LOG_FATAL) << "Error when parsing file: " << filename << ":" << line <<std::endl;
}
if (fields == 2){
pch2 = strtok_r(NULL,"\n",(char**)&saveptr);
if (!pch2)
logstream(LOG_FATAL) << "Error when parsing file: " << filename << ":" << line <<std::endl;
}
if (fields == 1)
map[boost::lexical_cast<std::string>(line)] = pch;
else map[pch] = pch2;
line++;
}
logstream(LOG_INFO)<<"Map size is: " << map.size() << std::endl;
}*/
#endif
| C++ |
/**
* @file
* @author Danny Bickson
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Matrix factorization with the Bias Stochastic Gradient Descent (BIASSGD) algorithm.
* Algorithm is described in the paper:
* Y. Koren. Factorization Meets the Neighborhood: a Multifaceted Collaborative Filtering Model. ACM SIGKDD 2008. Equation (5).
*
*/
#include "common.hpp"
#include "eigen_wrapper.hpp"
double biassgd_lambda = 1e-3; //sgd step size
double biassgd_gamma = 1e-3; //sgd regularization
double biassgd_step_dec = 0.9; //sgd step decrement
#define BIAS_POS -1
struct vertex_data {
vec pvec; //storing the feature vector
double bias;
vertex_data() {
pvec = zeros(D);
bias = 0;
}
void set_val(int index, float val){
if (index == BIAS_POS)
bias = val;
else pvec[index] = val;
}
float get_val(int index){
if (index== BIAS_POS)
return bias;
else return pvec[index];
}
};
/**
* Type definitions. Remember to create suitable graph shards using the
* Sharder-program.
*/
typedef vertex_data VertexDataType;
typedef float EdgeDataType; // Edges store the "rating" of user->movie pair
graphchi_engine<VertexDataType, EdgeDataType> * pengine = NULL;
graphchi_engine<VertexDataType, EdgeDataType> * pvalidation_engine = NULL;
std::vector<vertex_data> latent_factors_inmem;
#include "rmse.hpp"
#include "rmse_engine.hpp"
#include "io.hpp"
/** compute a missing value based on bias-SGD algorithm */
float bias_sgd_predict(const vertex_data& user,
const vertex_data& movie,
const float rating,
double & prediction,
void * extra = NULL){
prediction = globalMean + user.bias + movie.bias + dot_prod(user.pvec, movie.pvec);
//truncate prediction to allowed values
prediction = std::min((double)prediction, maxval);
prediction = std::max((double)prediction, minval);
//return the squared error
float err = rating - prediction;
if (std::isnan(err))
logstream(LOG_FATAL)<<"Got into numerical errors. Try to decrease step size using bias-SGD command line arugments)" << std::endl;
return err*err;
}
/**
* GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type>
* class. The main logic is usually in the update function.
*/
struct BIASSGDVerticesInMemProgram : public GraphChiProgram<VertexDataType, EdgeDataType> {
/**
* Called before an iteration is started.
*/
void before_iteration(int iteration, graphchi_context &gcontext) {
reset_rmse(gcontext.execthreads);
}
/**
* Called after an iteration has finished.
*/
void after_iteration(int iteration, graphchi_context &gcontext) {
biassgd_gamma *= biassgd_step_dec;
training_rmse(iteration, gcontext);
run_validation(pvalidation_engine, gcontext);
}
/**
* Vertex update function.
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) {
if ( vertex.num_outedges() > 0){
vertex_data & user = latent_factors_inmem[vertex.id()];
for(int e=0; e < vertex.num_edges(); e++) {
float observation = vertex.edge(e)->get_data();
vertex_data & movie = latent_factors_inmem[vertex.edge(e)->vertex_id()];
double estScore = 0;
rmse_vec[omp_get_thread_num()] += bias_sgd_predict(user, movie, observation, estScore);
double err = observation - estScore;
if (std::isnan(err) || std::isinf(err))
logstream(LOG_FATAL)<<"BIASSGD got into numerical error. Please tune step size using --biassgd_gamma and biassgd_lambda" << std::endl;
user.bias += biassgd_gamma*(err - biassgd_lambda* user.bias);
movie.bias += biassgd_gamma*(err - biassgd_lambda* movie.bias);
//NOTE: the following code is not thread safe, since potentially several
//user nodes may update this item gradient vector concurrently. However in practice it
//did not matter in terms of accuracy on a multicore machine.
//if you like to defend the code, you can define a global variable
//mutex mymutex;
//
//and then do: mymutex.lock()
movie.pvec += biassgd_gamma*(err*user.pvec - biassgd_lambda*movie.pvec);
//here add: mymutex.unlock();
user.pvec += biassgd_gamma*(err*movie.pvec - biassgd_lambda*user.pvec);
}
}
}
};
void output_biassgd_result(std::string filename){
MMOutputter_mat<vertex_data> user_output(filename + "_U.mm", 0, M, "This file contains bias-SGD output matrix U. In each row D factors of a single user node.", latent_factors_inmem);
MMOutputter_mat<vertex_data> item_output(filename + "_V.mm", M, M+N , "This file contains bias-SGD output matrix V. In each row D factors of a single item node.", latent_factors_inmem);
MMOutputter_vec<vertex_data> user_bias_vec(filename + "_U_bias.mm", 0, M, BIAS_POS, "This file contains bias-SGD output bias vector. In each row a single user bias.", latent_factors_inmem);
MMOutputter_vec<vertex_data> item_bias_vec(filename + "_V_bias.mm",M ,M+N, BIAS_POS, "This file contains bias-SGD output bias vector. In each row a single item bias.", latent_factors_inmem);
MMOutputter_scalar gmean(filename + "_global_mean.mm", "This file contains SVD++ global mean which is required for computing predictions.", globalMean);
logstream(LOG_INFO) << "SVDPP output files (in matrix market format): " << filename << "_U.mm" <<
", " << filename + "_V.mm, " << filename << "_U_bias.mm, " << filename << "_V_bias.mm, " << filename << "_global_mean.mm" << std::endl;
}
int main(int argc, const char ** argv) {
print_copyright();
//* GraphChi initialization will read the command line arguments and the configuration file. */
graphchi_init(argc, argv);
/* Metrics object for keeping track of performance counters
and other information. Currently required. */
metrics m("biassgd-inmemory-factors");
biassgd_lambda = get_option_float("biassgd_lambda", 1e-3);
biassgd_gamma = get_option_float("biassgd_gamma", 1e-3);
biassgd_step_dec = get_option_float("biassgd_step_dec", 0.9);
parse_command_line_args();
parse_implicit_command_line();
/* Preprocess data if needed, or discover preprocess files */
int nshards = convert_matrixmarket<EdgeDataType>(training, NULL, 0, 0, 3, TRAINING, false);
init_feature_vectors<std::vector<vertex_data> >(M+N, latent_factors_inmem, !load_factors_from_file);
if (validation != ""){
int vshards = convert_matrixmarket<EdgeDataType>(validation, NULL, 0, 0, 3, VALIDATION, false);
init_validation_rmse_engine<VertexDataType, EdgeDataType>(pvalidation_engine, vshards, &bias_sgd_predict);
}
/* load initial state from disk (optional) */
if (load_factors_from_file){
load_matrix_market_matrix(training + "_U.mm", 0, D);
load_matrix_market_matrix(training + "_V.mm", M, D);
vec user_bias = load_matrix_market_vector(training +"_U_bias.mm", false, true);
assert(user_bias.size() == M);
vec item_bias = load_matrix_market_vector(training +"_V_bias.mm", false, true);
assert(item_bias.size() == N);
for (uint i=0; i<M+N; i++){
latent_factors_inmem[i].bias = ((i<M)?user_bias[i] : item_bias[i-M]);
}
vec gm = load_matrix_market_vector(training + "_global_mean.mm", false, true);
globalMean = gm[0];
}
/* Run */
BIASSGDVerticesInMemProgram program;
graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m);
set_engine_flags(engine);
pengine = &engine;
engine.run(program, niters);
/* Output latent factor matrices in matrix-market format */
output_biassgd_result(training);
test_predictions(&bias_sgd_predict);
/* Report execution metrics */
if (!quiet)
metrics_report(m);
return 0;
}
| C++ |
#ifndef DEF_IOHPP
#define DEF_IOHPP
/**
* @file
* @author Danny Bickson
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "types.hpp"
#include "implicit.hpp"
/*
* open a file and verify open success
*/
FILE * open_file(const char * name, const char * mode, bool optional = false){
FILE * f = fopen(name, mode);
if (f == NULL && !optional){
perror("fopen failed");
logstream(LOG_FATAL) <<" Failed to open file" << name << std::endl;
}
return f;
}
void set_matcode(MM_typecode & matcode, bool sparse = false){
mm_initialize_typecode(&matcode);
mm_set_matrix(&matcode);
if (sparse)
mm_set_coordinate(&matcode);
else
mm_set_array(&matcode);
mm_set_real(&matcode);
}
void read_matrix_market_banner_and_size(FILE * f, MM_typecode & matcode, uint & Me, uint & Ne, size_t & nz, const std::string & filename){
if (mm_read_banner(f, &matcode) != 0)
logstream(LOG_FATAL) << "Could not process Matrix Market banner. File: " << filename << std::endl;
/* This is how one can screen matrix types if their application */
/* only supports a subset of the Matrix Market data types. */
if (mm_is_complex(matcode) || !mm_is_sparse(matcode))
logstream(LOG_FATAL) << "Sorry, this application does not support complex values and requires a sparse matrix." << std::endl;
/* find out size of sparse matrix .... */
if (mm_read_mtx_crd_size(f, &Me, &Ne, &nz) != 0) {
logstream(LOG_FATAL) << "Failed reading matrix size: error" << std::endl;
}
}
void detect_matrix_size(std::string filename, FILE *&f, uint &_M, uint &_N, size_t & nz, uint nodes = 0, size_t edges = 0, int type = TRAINING){
MM_typecode matcode;
bool info_file = false;
if (nodes == 0 && edges == 0){
FILE * ff = NULL;
/* auto detect presence of file named base_filename.info to find out matrix market size */
if ((ff = fopen((filename + ":info").c_str(), "r")) != NULL) {
info_file = true;
read_matrix_market_banner_and_size(ff, matcode, _M, _N, nz, filename + ":info");
fclose(ff);
}
}
if ((f = fopen(filename.c_str(), "r")) == NULL) {
if (type == VALIDATION){
std::cout<<std::endl;
return; //missing validaiton data
}
else logstream(LOG_FATAL)<<"Failed to open input file: " << filename << std::endl;
}
if (!info_file && nodes == 0 && edges == 0){
read_matrix_market_banner_and_size(f, matcode, _M, _N, nz, filename);
}
else if (nodes > 0 && edges > 0){
_M = _N = nodes;
nz = edges;
}
}
void read_global_mean(std::string base_filename, int type){
FILE * inf = fopen((base_filename + ".gm").c_str(), "r");
int rc;
if (type == TRAINING)
rc = fscanf(inf,"%d\n%d\n%ld\n%lg\n%d\n",&M, &N, &L, &globalMean, &K);
else rc = fscanf(inf,"%d\n%d\n%ld\n%lg\n%d\n",&Me, &Ne, &Le, &globalMean2, &K);
if (rc != 5)
logstream(LOG_FATAL)<<"Failed to read global mean from file" << base_filename << ".gm" << std::endl;
fclose(inf);
if (type == TRAINING)
logstream(LOG_INFO) << "Opened matrix size: " <<M << " x " << N << " edges: " << L << " Global mean is: " << globalMean << " time bins: " << K << " Now creating shards." << std::endl;
else
logstream(LOG_INFO) << "Opened VLIDATION matrix size: " <<Me << " x " << Ne << " edges: " << Le << " Global mean is: " << globalMean2 << " time bins: " << K << " Now creating shards." << std::endl;
}
void write_global_mean(std::string base_filename, int type){
FILE * outf = fopen((base_filename + ".gm").c_str(), "w");
if (type == TRAINING)
fprintf(outf, "%d\n%d\n%ld\n%lg\n%d\n", M, N, L, globalMean, K);
else
fprintf(outf, "%d\n%d\n%ld\n%lg\n%d\n", Me, Ne, Le, globalMean2, K);
fclose(outf);
}
void compute_matrix_size(size_t nz, int type){
if (kfold_cross_validation > 0){
if (type == TRAINING)
L = (1 - 1.0/(double)kfold_cross_validation)*nz;
else Le = (1.0/(double)kfold_cross_validation)*nz;
}
else {
if (type == TRAINING)
L = nz;
else Le = nz;
}
if (type == TRAINING)
logstream(LOG_INFO) << "Starting to read matrix-market input. Matrix dimensions: "
<< M << " x " << N << ", non-zeros: " << L << std::endl;
else
logstream(LOG_INFO) << "Starting to read VALIDATION matrix-market input. Matrix dimensions: "
<< Me << " x " << Ne << ", non-zeros: " << Le << std::endl;
}
/** decide on training vs. validation split in case of k fold cross validation */
bool decide_if_edge_is_active(size_t i, int type){
bool active_edge = true;
if (type == TRAINING){
if (kfold_cross_validation > 0 && (((int)(i % kfold_cross_validation)) == kfold_cross_validation_index))
active_edge = false;
}
else if (type == VALIDATION){
if (kfold_cross_validation > 0){
if ((((int)(i % kfold_cross_validation)) == kfold_cross_validation_index))
active_edge = true;
else active_edge = false;
}
}
return active_edge;
}
template<typename vertex_data>
struct MMOutputter_vec{
MMOutputter_vec(std::string fname, uint start, uint end, int index, std::string comment, std::vector<vertex_data> & latent_factors_inmem) {
MM_typecode matcode;
set_matcode(matcode, R_output_format);
FILE * outf = open_file(fname.c_str(), "w");
mm_write_banner(outf, matcode);
if (comment != "")
fprintf(outf, "%%%s\n", comment.c_str());
if (R_output_format)
mm_write_mtx_crd_size(outf, end-start, 1, end-start);
else
mm_write_mtx_array_size(outf, end-start, 1);
for (uint i=start; i< end; i++)
if (R_output_format)
fprintf(outf, "%d %d %12.8g\n", i-start+input_file_offset, 1, latent_factors_inmem[i].get_val(index));
else
fprintf(outf, "%1.12e\n", latent_factors_inmem[i].get_val(index));
fclose(outf);
}
};
template<typename vertex_data>
struct MMOutputter_mat{
MMOutputter_mat(std::string fname, uint start, uint end, std::string comment, std::vector<vertex_data> & latent_factors_inmem, int size = 0) {
assert(start < end);
MM_typecode matcode;
set_matcode(matcode, R_output_format);
FILE * outf = open_file(fname.c_str(), "w");
mm_write_banner(outf, matcode);
if (comment != "")
fprintf(outf, "%%%s\n", comment.c_str());
int actual_Size = size > 0 ? size : latent_factors_inmem[start].pvec.size();
if (R_output_format)
mm_write_mtx_crd_size(outf, end-start, actual_Size, (end-start)*actual_Size);
else
mm_write_mtx_array_size(outf, end-start, actual_Size);
for (uint i=start; i < end; i++){
for(int j=0; j < actual_Size; j++) {
if (R_output_format)
fprintf(outf, "%d %d %12.8g\n", i-start+input_file_offset, j+input_file_offset, latent_factors_inmem[i].get_val(j));
else
fprintf(outf, "%1.12e\n", latent_factors_inmem[i].get_val(j));
}
}
fclose(outf);
}
};
struct MMOutputter_scalar {
MMOutputter_scalar(std::string fname, std::string comment, double val) {
MM_typecode matcode;
set_matcode(matcode, R_output_format);
FILE * outf = open_file(fname.c_str(), "w");
mm_write_banner(outf, matcode);
if (comment != "")
fprintf(outf, "%%%s\n", comment.c_str());
if (R_output_format)
mm_write_mtx_crd_size(outf, 1, 1, 1);
else
mm_write_mtx_array_size(outf, 1, 1);
if (R_output_format)
fprintf(outf, "%d %d %12.8g\n", 1, 1, val);
else
fprintf(outf, "%1.12e\n", val);
fclose(outf);
}
};
/**
* Create a bipartite graph from a matrix. Each row corresponds to vertex
* with the same id as the row number (0-based), but vertices correponsing to columns
* have id + num-rows.
* Line format of the type
* [user] [item] [time/weight] [rating]
*/
template <typename als_edge_type>
int convert_matrixmarket4(std::string base_filename, bool add_time_edges = false, bool square = false, int type = TRAINING, int matlab_time_offset = 1) {
// Note, code based on: http://math.nist.gov/MatrixMarket/mmio/c/example_read.c
FILE *f = NULL;
size_t nz;
/**
* Create sharder object
*/
int nshards;
if ((nshards = find_shards<als_edge_type>(base_filename, get_option_string("nshards", "auto")))) {
if (check_origfile_modification_earlier<als_edge_type>(base_filename, nshards)) {
logstream(LOG_INFO) << "File " << base_filename << " was already preprocessed, won't do it again. " << std::endl;
read_global_mean(base_filename, type);
}
return nshards;
}
sharder<als_edge_type> sharderobj(base_filename);
sharderobj.start_preprocessing();
detect_matrix_size(base_filename, f, type == TRAINING? M:Me, type == TRAINING? N:Ne, nz);
if (f == NULL){
if (type == VALIDATION){
logstream(LOG_INFO)<< "Did not find validation file: " << base_filename << std::endl;
return -1;
}
else if (type == TRAINING)
logstream(LOG_FATAL)<<"Failed to open training input file: " << base_filename << std::endl;
}
compute_matrix_size(nz, type);
uint I, J;
double val, time;
bool active_edge = true;
if (!sharderobj.preprocessed_file_exists()) {
for (size_t i=0; i<nz; i++)
{
int rc = fscanf(f, "%d %d %lg %lg\n", &I, &J, &time, &val);
if (rc != 4)
logstream(LOG_FATAL)<<"Error when reading input file - line " << i << std::endl;
if (time < 0)
logstream(LOG_FATAL)<<"Time (third columns) should be >= 0 " << std::endl;
I-=input_file_offset; /* adjust from 1-based to 0-based */
J-=input_file_offset;
if (I >= M)
logstream(LOG_FATAL)<<"Row index larger than the matrix row size " << I << " > " << M << " in line: " << i << std::endl;
if (J >= N)
logstream(LOG_FATAL)<<"Col index larger than the matrix col size " << J << " > " << N << " in line; " << i << std::endl;
K = std::max((int)time, (int)K);
time -= matlab_time_offset;
if (time < 0 && add_time_edges)
logstream(LOG_FATAL)<<"Time bins should be >= 1 in row " << i << std::endl;
//avoid self edges
if (square && I == J)
continue;
active_edge = decide_if_edge_is_active(i, type);
if (active_edge){
if (type == TRAINING)
globalMean += val;
else globalMean2 += val;
sharderobj.preprocessing_add_edge(I, (square? J : (M + J)), als_edge_type(val, time+M+N));
}
//in case of a tensor, add besides of the user-> movie edge also
//time -> user and time-> movie edges
if (add_time_edges){
sharderobj.preprocessing_add_edge((uint)time + M + N, I, als_edge_type(val, M+J));
sharderobj.preprocessing_add_edge((uint)time + M + N, M+J , als_edge_type(val, I));
}
}
if (type == TRAINING){
uint toadd = 0;
if (implicitratingtype == IMPLICIT_RATING_RANDOM)
toadd = add_implicit_edges4(implicitratingtype, sharderobj);
globalMean += implicitratingvalue * toadd;
L += toadd;
globalMean /= L;
logstream(LOG_INFO) << "Global mean is: " << globalMean << " time bins: " << K << " . Now creating shards." << std::endl;
}
else {
globalMean2 /= Le;
logstream(LOG_INFO) << "Global mean is: " << globalMean2 << " time bins: " << K << " . Now creating shards." << std::endl;
}
write_global_mean(base_filename, type);
sharderobj.end_preprocessing();
} else {
logstream(LOG_INFO) << "Matrix already preprocessed, just run sharder." << std::endl;
}
fclose(f);
logstream(LOG_INFO) << "Now creating shards." << std::endl;
// Shard with a specified number of shards, or determine automatically if not defined
nshards = sharderobj.execute_sharding(get_option_string("nshards", "auto"));
return nshards;
}
/**
* Create a bipartite graph from a matrix. Each row corresponds to vertex
* with the same id as the row number (0-based), but vertices correponsing to columns
* have id + num-rows.
*/
template <typename als_edge_type>
int convert_matrixmarket_and_item_similarity(std::string base_filename, std::string similarity_file, int tokens_per_row = 3) {
FILE *f = NULL, *fsim = NULL;
size_t nz, nz_sim;
/**
* Create sharder object
*/
int nshards;
if ((nshards = find_shards<als_edge_type>(base_filename, get_option_string("nshards", "auto")))) {
if (check_origfile_modification_earlier<als_edge_type>(base_filename, nshards)) {
logstream(LOG_INFO) << "File " << base_filename << " was already preprocessed, won't do it again. " << std::endl;
read_global_mean(base_filename, TRAINING);
return nshards;
}
}
sharder<als_edge_type> sharderobj(base_filename);
sharderobj.start_preprocessing();
detect_matrix_size(base_filename, f, M, N, nz);
if (f == NULL)
logstream(LOG_FATAL)<<"Failed to open training input file: " << base_filename << std::endl;
uint N_row = 0 ,N_col = 0;
detect_matrix_size(similarity_file, fsim, N_row, N_col, nz_sim);
if (fsim == NULL)
logstream(LOG_FATAL)<<"Failed to open item similarity input file: " << similarity_file << std::endl;
if (N_row != N || N_col != N)
logstream(LOG_FATAL)<<"Wrong item similarity file matrix size: " << N_row <<" x " << N_col << " Instead of " << N << " x " << N << std::endl;
L=nz + nz_sim;
uint I, J;
double val = 1.0;
if (!sharderobj.preprocessed_file_exists()) {
logstream(LOG_INFO) << "Starting to read matrix-market input. Matrix dimensions: "
<< M << " x " << N << ", non-zeros: " << nz << std::endl;
for (size_t i=0; i<nz; i++){
if (tokens_per_row == 3){
int rc = fscanf(f, "%u %u %lg\n", &I, &J, &val);
if (rc != 3)
logstream(LOG_FATAL)<<"Error when reading input file: " << i << std::endl;
}
else if (tokens_per_row == 2){
int rc = fscanf(f, "%u %u\n", &I, &J);
if (rc != 2)
logstream(LOG_FATAL)<<"Error when reading input file: " << i << std::endl;
}
else assert(false);
I-=input_file_offset; /* adjust from 1-based to 0-based */
J-=input_file_offset;
if (I >= M)
logstream(LOG_FATAL)<<"Row index larger than the matrix row size " << I << " > " << M << " in line: " << i << std::endl;
if (J >= N)
logstream(LOG_FATAL)<<"Col index larger than the matrix col size " << J << " > " << N << " in line; " << i << std::endl;
sharderobj.preprocessing_add_edge(I, M==N?J:M + J, als_edge_type((float)val, 0));
}
logstream(LOG_DEBUG)<<"Finished loading " << nz << " ratings from file: " << base_filename << std::endl;
for (size_t i=0; i<nz_sim; i++){
if (tokens_per_row == 3){
int rc = fscanf(fsim, "%u %u %lg\n", &I, &J, &val);
if (rc != 3)
logstream(LOG_FATAL)<<"Error when reading input file: " << similarity_file << " line: " << i << std::endl;
}
else if (tokens_per_row == 2){
int rc = fscanf(fsim, "%u %u\n", &I, &J);
if (rc != 2)
logstream(LOG_FATAL)<<"Error when reading input file: " << i << std::endl;
}
else assert(false);
I-=input_file_offset; /* adjust from 1-based to 0-based */
J-=input_file_offset;
if (I >= N)
logstream(LOG_FATAL)<<"Row index larger than the matrix row size " << I << " > " << M << " in line: " << i << std::endl;
if (J >= N)
logstream(LOG_FATAL)<<"Col index larger than the matrix col size " << J << " > " << N << " in line; " << i << std::endl;
sharderobj.preprocessing_add_edge(M+I, M+J, als_edge_type(I < J? val: 0, I>J? val: 0));
}
logstream(LOG_DEBUG)<<"Finished loading " << nz_sim << " ratings from file: " << similarity_file << std::endl;
write_global_mean(base_filename, TRAINING);
sharderobj.end_preprocessing();
} else {
logstream(LOG_INFO) << "Matrix already preprocessed, just run sharder." << std::endl;
}
fclose(f);
fclose(fsim);
logstream(LOG_INFO) << "Now creating shards." << std::endl;
// Shard with a specified number of shards, or determine automatically if not defined
nshards = sharderobj.execute_sharding(get_option_string("nshards", "auto"));
logstream(LOG_INFO) << "Successfully finished sharding for " << base_filename << std::endl;
logstream(LOG_INFO) << "Created " << nshards << " shards." << std::endl;
return nshards;
}
/**
* Create a bipartite graph from a matrix. Each row corresponds to vertex
* with the same id as the row number (0-based), but vertices correponsing to columns
* have id + num-rows.
*/
template <typename als_edge_type>
int convert_matrixmarket(std::string base_filename, SharderPreprocessor<als_edge_type> * preprocessor = NULL, size_t nodes = 0, size_t edges = 0, int tokens_per_row = 3, int type = TRAINING, int allow_square = true) {
// Note, code based on: http://math.nist.gov/MatrixMarket/mmio/c/example_read.c
FILE *f;
size_t nz;
/**
* Create sharder object
*/
int nshards;
if ((nshards = find_shards<als_edge_type>(base_filename, get_option_string("nshards", "auto")))) {
if (check_origfile_modification_earlier<als_edge_type>(base_filename, nshards)) {
logstream(LOG_INFO) << "File " << base_filename << " was already preprocessed, won't do it again. " << std::endl;
read_global_mean(base_filename, type);
return nshards;
}
}
sharder<als_edge_type> sharderobj(base_filename);
sharderobj.start_preprocessing();
detect_matrix_size(base_filename, f, type == TRAINING?M:Me, type == TRAINING?N:Ne, nz, nodes, edges, type);
if (f == NULL){
if (type == TRAINING){
logstream(LOG_FATAL)<<"Failed to open training input file: " << base_filename << std::endl;
}
else if (type == VALIDATION){
logstream(LOG_INFO)<<"Validation file: " << base_filename << " is not found. " << std::endl;
return -1;
}
}
compute_matrix_size(nz, type);
uint I, J;
double val = 1.0;
bool active_edge = true;
if (!sharderobj.preprocessed_file_exists()) {
for (size_t i=0; i<nz; i++)
{
if (tokens_per_row == 3){
int rc = fscanf(f, "%u %u %lg\n", &I, &J, &val);
if (rc != 3)
logstream(LOG_FATAL)<<"Error when reading input file: " << i << std::endl;
}
else if (tokens_per_row == 2){
int rc = fscanf(f, "%u %u\n", &I, &J);
if (rc != 2)
logstream(LOG_FATAL)<<"Error when reading input file: " << i << std::endl;
}
else assert(false);
if (I ==987654321 || J== 987654321) //hack - to be removed later
continue;
I-=(uint)input_file_offset; /* adjust from 1-based to 0-based */
J-=(uint)input_file_offset;
if (I >= M)
logstream(LOG_FATAL)<<"Row index larger than the matrix row size " << I+1 << " > " << M << " in line: " << i << std::endl;
if (J >= N)
logstream(LOG_FATAL)<<"Col index larger than the matrix col size " << J+1 << " > " << N << " in line; " << i << std::endl;
if (minval != -1e100 && val < minval)
logstream(LOG_FATAL)<<"Found illegal rating value: " << val << " where min value is: " << minval << std::endl;
if (maxval != 1e100 && val > maxval)
logstream(LOG_FATAL)<<"Found illegal rating value: " << val << " where max value is: " << maxval << std::endl;
active_edge = decide_if_edge_is_active(i, type);
if (active_edge){
if (type == TRAINING)
globalMean += val;
else globalMean2 += val;
sharderobj.preprocessing_add_edge(I, (M==N && allow_square)?J:M + J, als_edge_type((float)val));
}
}
if (type == TRAINING){
uint toadd = 0;
if (implicitratingtype == IMPLICIT_RATING_RANDOM)
toadd = add_implicit_edges(implicitratingtype, sharderobj);
globalMean += implicitratingvalue * toadd;
L += toadd;
globalMean /= L;
logstream(LOG_INFO) << "Global mean is: " << globalMean << " Now creating shards." << std::endl;
}
else {
globalMean2 /= Le;
logstream(LOG_INFO) << "Global mean is: " << globalMean2 << " Now creating shards." << std::endl;
}
write_global_mean(base_filename, type);
sharderobj.end_preprocessing();
if (preprocessor != NULL) {
preprocessor->reprocess(sharderobj.preprocessed_name(), base_filename);
}
} else {
logstream(LOG_INFO) << "Matrix already preprocessed, just run sharder." << std::endl;
}
fclose(f);
logstream(LOG_INFO) << "Now creating shards." << std::endl;
// Shard with a specified number of shards, or determine automatically if not defined
nshards = sharderobj.execute_sharding(get_option_string("nshards", "auto"));
logstream(LOG_INFO) << "Successfully finished sharding for " << base_filename<< std::endl;
logstream(LOG_INFO) << "Created " << nshards << " shards." << std::endl;
return nshards;
}
void load_matrix_market_vector(const std::string & filename, const bipartite_graph_descriptor & desc,
int type, bool optional_field, bool allow_zeros)
{
int ret_code;
MM_typecode matcode;
uint M, N;
size_t i,nz;
logstream(LOG_INFO) <<"Going to read matrix market vector from input file: " << filename << std::endl;
FILE * f = open_file(filename.c_str(), "r", optional_field);
//if optional file not found return
if (f== NULL && optional_field){
return;
}
if (mm_read_banner(f, &matcode) != 0)
logstream(LOG_FATAL) << "Could not process Matrix Market banner." << std::endl;
/* This is how one can screen matrix types if their application */
/* only supports a subset of the Matrix Market data types. */
if (mm_is_complex(matcode) && mm_is_matrix(matcode) &&
mm_is_sparse(matcode) )
logstream(LOG_FATAL) << "sorry, this application does not support " << std::endl <<
"Market Market type: " << mm_typecode_to_str(matcode) << std::endl;
/* find out size of sparse matrix .... */
if (mm_is_sparse(matcode)){
if ((ret_code = mm_read_mtx_crd_size(f, &M, &N, &nz)) !=0)
logstream(LOG_FATAL) << "failed to read matrix market cardinality size " << std::endl;
}
else {
if ((ret_code = mm_read_mtx_array_size(f, &M, &N))!= 0)
logstream(LOG_FATAL) << "failed to read matrix market vector size " << std::endl;
if (N > M){ //if this is a row vector, transpose
int tmp = N;
N = M;
M = tmp;
}
nz = M*N;
}
uint row,col;
double val;
for (i=0; i<nz; i++)
{
if (mm_is_sparse(matcode)){
int rc = fscanf(f, "%u %u %lg\n", &row, &col, &val);
if (rc != 3){
logstream(LOG_FATAL) << "Failed reading input file: " << filename << "Problm at data row " << i << " (not including header and comment lines)" << std::endl;
}
row--; /* adjust from 1-based to 0-based */
col--;
}
else {
int rc = fscanf(f, "%lg\n", &val);
if (rc != 1){
logstream(LOG_FATAL) << "Failed reading input file: " << filename << "Problm at data row " << i << " (not including header and comment lines)" << std::endl;
}
row = i;
col = 0;
}
//some users have gibrish in text file - better check both I and J are >=0 as well
assert(row >=0 && row< M);
assert(col == 0);
if (val == 0 && !allow_zeros)
logstream(LOG_FATAL)<<"Zero entries are not allowed in a sparse matrix market vector. Use --zero=true to avoid this error"<<std::endl;
//set observation value
vertex_data & vdata = latent_factors_inmem[row];
vdata.pvec[type] = val;
}
fclose(f);
}
vec load_matrix_market_vector(const std::string & filename, bool optional_field, bool allow_zeros)
{
int ret_code;
MM_typecode matcode;
uint M, N;
size_t i,nz;
logstream(LOG_INFO) <<"Going to read matrix market vector from input file: " << filename << std::endl;
FILE * f = open_file(filename.c_str(), "r", optional_field);
//if optional file not found return
if (f== NULL && optional_field){
return zeros(1);
}
if (mm_read_banner(f, &matcode) != 0)
logstream(LOG_FATAL) << "Could not process Matrix Market banner." << std::endl;
/* This is how one can screen matrix types if their application */
/* only supports a subset of the Matrix Market data types. */
if (mm_is_complex(matcode) && mm_is_matrix(matcode) &&
mm_is_sparse(matcode) )
logstream(LOG_FATAL) << "sorry, this application does not support " << std::endl <<
"Market Market type: " << mm_typecode_to_str(matcode) << std::endl;
/* find out size of sparse matrix .... */
if (mm_is_sparse(matcode)){
if ((ret_code = mm_read_mtx_crd_size(f, &M, &N, &nz)) !=0)
logstream(LOG_FATAL) << "failed to read matrix market cardinality size " << std::endl;
}
else {
if ((ret_code = mm_read_mtx_array_size(f, &M, &N))!= 0)
logstream(LOG_FATAL) << "failed to read matrix market vector size " << std::endl;
if (N > M){ //if this is a row vector, transpose
int tmp = N;
N = M;
M = tmp;
}
nz = M*N;
}
vec ret = zeros(M);
uint row,col;
double val;
for (i=0; i<nz; i++)
{
if (mm_is_sparse(matcode)){
int rc = fscanf(f, "%u %u %lg\n", &row, &col, &val);
if (rc != 3){
logstream(LOG_FATAL) << "Failed reading input file: " << filename << "Problm at data row " << i << " (not including header and comment lines)" << std::endl;
}
row--; /* adjust from 1-based to 0-based */
col--;
}
else {
int rc = fscanf(f, "%lg\n", &val);
if (rc != 1){
logstream(LOG_FATAL) << "Failed reading input file: " << filename << "Problm at data row " << i << " (not including header and comment lines)" << std::endl;
}
row = i;
col = 0;
}
//some users have gibrish in text file - better check both I and J are >=0 as well
assert(row >=0 && row< M);
assert(col == 0);
if (val == 0 && !allow_zeros)
logstream(LOG_FATAL)<<"Zero entries are not allowed in a sparse matrix market vector. Use --zero=true to avoid this error"<<std::endl;
//set observation value
ret[row] = val;
}
fclose(f);
logstream(LOG_INFO)<<"Succesfully read a vector of size: " << M << " [ " << nz << "]" << std::endl;
return ret;
}
inline void write_row(int row, int col, double val, FILE * f, bool issparse){
if (issparse)
fprintf(f, "%d %d %10.13g\n", row, col, val);
else fprintf(f, "%10.13g ", val);
}
inline void write_row(int row, int col, int val, FILE * f, bool issparse){
if (issparse)
fprintf(f, "%d %d %d\n", row, col, val);
else fprintf(f, "%d ", val);
}
template<typename T>
inline void set_typecode(MM_typecode & matcore);
template<>
inline void set_typecode<vec>(MM_typecode & matcode){
mm_set_real(&matcode);
}
template<>
inline void set_typecode<ivec>(MM_typecode & matcode){
mm_set_integer(&matcode);
}
template<typename vec>
void save_matrix_market_format_vector(const std::string datafile, const vec & output, bool issparse, std::string comment)
{
MM_typecode matcode;
mm_initialize_typecode(&matcode);
mm_set_matrix(&matcode);
mm_set_coordinate(&matcode);
if (issparse)
mm_set_sparse(&matcode);
else mm_set_dense(&matcode);
set_typecode<vec>(matcode);
FILE * f = fopen(datafile.c_str(),"w");
if (f == NULL)
logstream(LOG_FATAL)<<"Failed to open file: " << datafile << " for writing. " << std::endl;
mm_write_banner(f, matcode);
if (comment.size() > 0) // add a comment to the matrix market header
fprintf(f, "%c%s\n", '%', comment.c_str());
if (issparse)
mm_write_mtx_crd_size(f, output.size(), 1, output.size());
else
mm_write_mtx_array_size(f, output.size(), 1);
for (int j=0; j<(int)output.size(); j++){
write_row(j+1, 1, output[j], f, issparse);
if (!issparse)
fprintf(f, "\n");
}
fclose(f);
}
template<typename vec>
inline void write_output_vector(const std::string & datafile, const vec& output, bool issparse, std::string comment = ""){
logstream(LOG_INFO)<<"Going to write output to file: " << datafile << " (vector of size: " << output.size() << ") " << std::endl;
save_matrix_market_format_vector(datafile, output,issparse, comment);
}
/** load a matrix market file into a matrix */
void load_matrix_market_matrix(const std::string & filename, int offset, int D){
MM_typecode matcode;
uint i,I,J;
double val;
uint rows, cols;
size_t nnz;
FILE * f = open_file(filename.c_str() ,"r");
int rc = mm_read_banner(f, &matcode);
if (rc != 0)
logstream(LOG_FATAL)<<"Failed to load matrix market banner in file: " << filename << std::endl;
if (mm_is_sparse(matcode)){
int rc = mm_read_mtx_crd_size(f, &rows, &cols, &nnz);
if (rc != 0)
logstream(LOG_FATAL)<<"Failed to load matrix market banner in file: " << filename << std::endl;
}
else { //dense matrix
rc = mm_read_mtx_array_size(f, &rows, &cols);
if (rc != 0)
logstream(LOG_FATAL)<<"Failed to load matrix market banner in file: " << filename << std::endl;
nnz = rows * cols;
}
if (D != (int)cols)
logstream(LOG_FATAL)<<"Wrong matrix size detected, command line argument should be --D=" << D << " instead of : " << cols << std::endl;
for (i=0; i<nnz; i++){
if (mm_is_sparse(matcode)){
rc = fscanf(f, "%u %u %lg\n", &I, &J, &val);
if (rc != 3)
logstream(LOG_FATAL)<<"Error reading input line " << i << std::endl;
I--; J--;
assert(I >= 0 && I < rows);
assert(J >= 0 && J < cols);
//set_val(a, I, J, val);
latent_factors_inmem[I+offset].set_val(J,val);
}
else {
rc = fscanf(f, "%lg", &val);
if (rc != 1)
logstream(LOG_FATAL)<<"Error reading nnz " << i << std::endl;
I = i / cols;
J = i % cols;
latent_factors_inmem[I+offset].set_val(J, val);
}
}
logstream(LOG_INFO) << "Factors from file: loaded matrix of size " << rows << " x " << cols << " from file: " << filename << " total of " << nnz << " entries. "<< i << std::endl;
fclose(f);
}
#endif
| C++ |
/* fix for MAC OS where sometime getline() is not supported */
#ifndef __GETLINE_GRAPHCHI_MAXOS_FIX
#define __GETLINE_GRAPHCHI_MAXOS_FIX
/* PASTE AT TOP OF FILE */
#include <stdio.h> /* flockfile, getc_unlocked, funlockfile */
#include <stdlib.h> /* malloc, realloc */
#include <errno.h> /* errno */
#include <unistd.h> /* ssize_t */
extern "C" ssize_t getline(char **lineptr, size_t *n, FILE *stream);
/* PASTE REMAINDER AT BOTTOM OF FILE */
ssize_t
getline(char **linep, size_t *np, FILE *stream)
{
char *p = NULL;
size_t i = 0;
if (!linep || !np) {
errno = EINVAL;
return -1;
}
if (!(*linep) || !(*np)) {
*np = 2400;
*linep = (char *)malloc(*np);
if (!(*linep)) {
return -1;
}
}
flockfile(stream);
p = *linep;
for (int ch = 0; (ch = getc_unlocked(stream)) != EOF;) {
if (i > *np) {
/* Grow *linep. */
size_t m = *np * 2;
char *s = (char *)realloc(*linep, m);
if (!s) {
int error = errno;
funlockfile(stream);
errno = error;
return -1;
}
*linep = s;
*np = m;
}
p[i] = ch;
if ('\n' == ch) break;
i += 1;
}
funlockfile(stream);
/* Null-terminate the string. */
if (i > *np) {
/* Grow *linep. */
size_t m = *np * 2;
char *s = (char *)realloc(*linep, m);
if (!s) {
return -1;
}
*linep = s;
*np = m;
}
p[i + 1] = '\0';
return ((i > 0)? i : -1);
}
#endif //__GETLINE_GRAPHCHI_MAXOS_FIX
| C++ |
/**
* @file
* @author Danny Bickson, based on code by Aapo Kyrola
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Matrix factorization with the Alternative Least Squares (ALS) algorithm.
* See the papers:
* H.-F. Yu, C.-J. Hsieh, S. Si, I. S. Dhillon, Scalable Coordinate Descent Approaches to Parallel Matrix Factorization for Recommender Systems. IEEE International Conference on Data Mining(ICDM), December 2012.
* Steffen Rendle, Zeno Gantner, Christoph Freudenthaler, and Lars Schmidt-Thieme. 2011. Fast context-aware recommendations with factorization machines. In Proceedings of the 34th international ACM SIGIR conference on Research and development in Information Retrieval (SIGIR '11). ACM, New York, NY, USA, 635-644. *
*/
#include "common.hpp"
#include "eigen_wrapper.hpp"
double lambda = 0.065;
struct vertex_data {
vec pvec;
vertex_data() {
pvec = zeros(D);
}
void set_val(int index, float val){
pvec[index] = val;
}
float get_val(int index){
return pvec[index];
}
};
/**
* Type definitions. Remember to create suitable graph shards using the
* Sharder-program.
*/
typedef vertex_data VertexDataType;
typedef float EdgeDataType; // Edges store the "rating" of user->movie pair
graphchi_engine<VertexDataType, EdgeDataType> * pengine = NULL;
graphchi_engine<VertexDataType, EdgeDataType> * pvalidation_engine = NULL;
std::vector<vertex_data> latent_factors_inmem;
#include "io.hpp"
#include "rmse.hpp"
#include "rmse_engine.hpp"
/** compute a missing value based on ALS algorithm */
float als_predict(const vertex_data& user,
const vertex_data& movie,
const float rating,
double & prediction,
void * extra = NULL){
prediction = dot_prod(user.pvec, movie.pvec);
//truncate prediction to allowed values
prediction = std::min((double)prediction, maxval);
prediction = std::max((double)prediction, minval);
//return the squared error
float err = rating - prediction;
assert(!std::isnan(err));
return err*err;
}
/**
* GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type>
* class. The main logic is usually in the update function.
*/
struct ALSVerticesInMemProgram : public GraphChiProgram<VertexDataType, EdgeDataType> {
/**
* Vertex update function - computes the ICDM update function in parallel
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) {
vertex_data & vdata = latent_factors_inmem[vertex.id()];
double regularization = lambda;
if (regnormal)
regularization *= vertex.num_edges();
vec R_cache = zeros(vertex.num_edges());
for (int t=0; t<D; t++){
double numerator = 0;
double denominator = regularization;
bool compute_rmse = (vertex.num_outedges() > 0 && t == 0);
for (int j=0; j < vertex.num_edges(); j++) {
float observation = vertex.edge(j)->get_data();
vertex_data & nbr_latent = latent_factors_inmem[vertex.edge(j)->vertex_id()];
double prediction;
double rmse = 0;
if (t == 0){
rmse = als_predict(vdata, nbr_latent, observation, prediction);
R_cache[j] = observation - prediction;
}
//compute numerator of equation (5) in ICDM paper above
// (A_ij - w_i^T*h_j + wit * h_jt )*h_jt
numerator += (R_cache[j] + vdata.pvec[t]* nbr_latent.pvec[t])*nbr_latent.pvec[t];
//compute denominator of equation (5) in ICDM paper above
// h_jt^2
denominator += pow(nbr_latent.pvec[t],2);
//record rmse
if (compute_rmse)
rmse_vec[omp_get_thread_num()]+=rmse;
}
assert(denominator > 0);
double z = numerator/denominator;
vec old = vdata.pvec;
//if (t > 0){
for (int j=0; j< vertex.num_edges(); j++){
vertex_data & nbr_latent = latent_factors_inmem[vertex.edge(j)->vertex_id()];
//update using equation (7) in ICDM paper
//R_ij -= (z - w_it )*h_jt
R_cache[j] -= ((z - old[t])*nbr_latent.pvec[t]);
}
//}
//update using equation (8) in ICDM paper
//w_it = z;
vdata.pvec[t] = z;
}
}
/**
* Called before an iteration is started.
*/
void before_iteration(int iteration, graphchi_context &gcontext) {
reset_rmse(gcontext.execthreads);
}
/**
* Called after an iteration has finished.
*/
void after_iteration(int iteration, graphchi_context &gcontext) {
training_rmse(iteration, gcontext);
run_validation(pvalidation_engine, gcontext);
}
};
void output_als_result(std::string filename) {
MMOutputter_mat<vertex_data> user_mat(filename + "_U.mm", 0, M , "This file contains ALS output matrix U. In each row D factors of a single user node.", latent_factors_inmem);
MMOutputter_mat<vertex_data> item_mat(filename + "_V.mm", M ,M+N, "This file contains ALS output matrix V. In each row D factors of a single item node.", latent_factors_inmem);
logstream(LOG_INFO) << "ALS output files (in matrix market format): " << filename << "_U.mm" <<
", " << filename + "_V.mm " << std::endl;
}
int main(int argc, const char ** argv) {
print_copyright();
/* GraphChi initialization will read the command line
arguments and the configuration file. */
graphchi_init(argc, argv);
/* Metrics object for keeping track of performance counters
and other information. Currently required. */
metrics m("als-inmemory-factors");
lambda = get_option_float("lambda", 0.065);
parse_command_line_args();
parse_implicit_command_line();
/* Preprocess data if needed, or discover preprocess files */
int nshards = convert_matrixmarket<EdgeDataType>(training, NULL, 0, 0, 3, TRAINING, false);
init_feature_vectors<std::vector<vertex_data> >(M+N, latent_factors_inmem, !load_factors_from_file);
if (validation != ""){
int vshards = convert_matrixmarket<EdgeDataType>(validation, NULL, 0, 0, 3, VALIDATION, false);
init_validation_rmse_engine<VertexDataType, EdgeDataType>(pvalidation_engine, vshards, &als_predict);
}
/* load initial state from disk (optional) */
if (load_factors_from_file){
load_matrix_market_matrix(training + "_U.mm", 0, D);
load_matrix_market_matrix(training + "_V.mm", M, D);
}
/* Run */
ALSVerticesInMemProgram program;
graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m);
set_engine_flags(engine);
pengine = &engine;
engine.run(program, niters);
/* Output latent factor matrices in matrix-market format */
output_als_result(training);
test_predictions(&als_predict);
/* Report execution metrics */
if (!quiet)
metrics_report(m);
return 0;
}
| C++ |
#ifndef TYPES_COMMON
#define TYPES_COMMON
typedef double real_type;
/*
* store a matrix is a bipartite graph. One side is the rows and the other is the column.
*/
struct bipartite_graph_descriptor {
int rows, cols;
size_t nonzeros;
bool force_non_square; //do not optimize, so each row and column will get its own graph node, even if the matrix is square
bipartite_graph_descriptor() : rows(0), cols(0), nonzeros(0), force_non_square(false) { }
// is the matrix square?
bool is_square() const { return rows == cols && !force_non_square; }
// get the position of the starting row/col node
int get_start_node(bool _rows) const { if (is_square()) return 0; else return (_rows?0:rows); }
// get the position of the ending row/col node
int get_end_node(bool _rows) const { if (is_square()) return rows; else return (_rows?rows:(rows+cols)); }
// get howmany row/column nodes
int num_nodes(bool _rows) const { if (_rows) return rows; else return cols; }
// how many total nodes
int total() const { if (is_square()) return rows; else return rows+cols; }
//is this a row node
bool is_row_node(int id) const { return id < rows; }
//debug print?
bool toprint(int id) const { return (id == 0) || (id == rows - 1) || (id == rows) || (id == rows+cols-1); }
}; // end of bipartite graph descriptor
#endif
| C++ |
#ifndef _CLIMF_HPP__
#define _CLIMF_HPP__
/**
* @file
* @author Mark Levy
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* CLiMF Collaborative Less-is-More Filtering, a variant of latent factor CF
* which optimises a lower bound of the smoothed reciprocal rank of "relevant"
* items in ranked recommendation lists. The intention is to promote diversity
* as well as accuracy in the recommendations. The method assumes binary
* relevance data, as for example in friendship or follow relationships.
*
* CLiMF: Learning to Maximize Reciprocal Rank with Collaborative Less-is-More Filtering
* Yue Shi, Martha Larson, Alexandros Karatzoglou, Nuria Oliver, Linas Baltrunas, Alan Hanjalic
* ACM RecSys 2012
*
*/
struct vertex_data {
vec pvec; //storing the feature vector
vertex_data()
{
pvec = zeros(D);
}
void set_val(int index, float val)
{
pvec[index] = val;
}
float get_val(int index) const
{
return pvec[index];
}
};
typedef vertex_data VertexDataType; // Vertices store the low-dimensional factorized feature vector
typedef float EdgeDataType; // Edges store the rating/observed count for a user->item pair
graphchi_engine<VertexDataType, EdgeDataType> * pengine = NULL;
graphchi_engine<VertexDataType, EdgeDataType> * pvalidation_engine = NULL;
std::vector<vertex_data> latent_factors_inmem;
double sgd_gamma = 1e-3; // sgd step size
double sgd_step_dec = 0.9; // sgd step decrement
double sgd_lambda = 1e-3; // sgd regularization
double binary_relevance_thresh = 0; // min rating for binary relevance
int halt_on_mrr_decrease = 0; // whether to halt if smoothed MRR increases
int num_ratings = 10000; // number of top predictions over which we compute actual MRR
vec objective_vec; // cumulative sum of smoothed MRR per thread
double training_objective;
double last_training_objective;
/* other relevant global args defined in common.hpp:
uint M; // number of users
uint N; // number of items
uint Me; // number of users (validation file)
uint Ne; // number of items (validation file)
uint Le; // number of ratings (validation file)
size_t L; // number of ratings (training file)
int D = 20; // feature vector width
*/
// logistic function
double g(double x)
{
double ret = 1.0 / (1.0 + std::exp(-x));
if (std::isinf(ret) || std::isnan(ret))
{
logstream(LOG_FATAL) << "overflow in g()" << std::endl;
}
return ret;
}
// derivative of logistic function
double dg(double x)
{
double ret = std::exp(x) / ((1.0 + std::exp(x)) * (1.0 + std::exp(x)));
if (std::isinf(ret) || std::isnan(ret))
{
logstream(LOG_FATAL) << "overflow in dg()" << std::endl;
}
return ret;
}
bool is_relevant(graphchi_edge<EdgeDataType> * e)
{
return e->get_data() >= binary_relevance_thresh; // for some reason get_data() is non const :(
}
#endif
| C++ |
/**
* @file
* @author Danny Bickson
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Matrix factorization using weighted alternating least squares algorithm (WALS)
* The algorithm is explained in: Collaborative Filtering for Implicit Feedback Datasets Hu, Y.; Koren, Y.; Volinsky, C. IEEE International Conference on Data Mining (ICDM 2008), IEEE (2008).
* @section USAGE
*
*
*
*/
#include "common.hpp"
#include "eigen_wrapper.hpp"
double lambda = 1e-3;
struct vertex_data {
vec pvec;
vertex_data() {
pvec = zeros(D);
}
void set_val(int index, float val){
pvec[index] = val;
}
float get_val(int index){
return pvec[index];
}
};
struct edge_data {
double weight;
double time;
edge_data() { weight = time = 0; }
edge_data(double weight, double time) : weight(weight), time(time) { }
};
/**
* Type definitions. Remember to create suitable graph shards using the
* Sharder-program.
*/
typedef vertex_data VertexDataType;
typedef edge_data EdgeDataType; // Edges store the "rating" of user->movie pair
graphchi_engine<VertexDataType, EdgeDataType> * pengine = NULL;
graphchi_engine<VertexDataType, EdgeDataType> * pvalidation_engine = NULL;
std::vector<vertex_data> latent_factors_inmem;
#include "io.hpp"
#include "rmse.hpp"
#include "rmse_engine4.hpp"
/** compute a missing value based on WALS algorithm */
float wals_predict(const vertex_data& user,
const vertex_data& movie,
const float rating,
double & prediction,
void * extra = NULL){
prediction = dot_prod(user.pvec, movie.pvec);
//truncate prediction to allowed values
prediction = std::min((double)prediction, maxval);
prediction = std::max((double)prediction, minval);
//return the squared error
float err = rating - prediction;
assert(!std::isnan(err));
return err*err;
}
/**
* GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type>
* class. The main logic is usually in the update function.
*/
struct WALSVerticesInMemProgram : public GraphChiProgram<VertexDataType, EdgeDataType> {
/**
* Vertex update function.
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) {
vertex_data & vdata = latent_factors_inmem[vertex.id()];
mat XtX = mat::Zero(D, D);
vec Xty = vec::Zero(D);
bool compute_rmse = (vertex.num_outedges() > 0);
// Compute XtX and Xty (NOTE: unweighted)
for(int e=0; e < vertex.num_edges(); e++) {
const edge_data & edge = vertex.edge(e)->get_data();
vertex_data & nbr_latent = latent_factors_inmem[vertex.edge(e)->vertex_id()];
Xty += nbr_latent.pvec * edge.weight * edge.time;
XtX.triangularView<Eigen::Upper>() += nbr_latent.pvec * nbr_latent.pvec.transpose() * edge.time;
if (compute_rmse) {
double prediction;
rmse_vec[omp_get_thread_num()] += wals_predict(vdata, nbr_latent, edge.weight, prediction) * edge.time;
}
}
double regularization = lambda;
if (regnormal)
lambda *= vertex.num_edges();
for(int i=0; i < D; i++) XtX(i,i) += regularization;
// Solve the least squares problem with eigen using Cholesky decomposition
vdata.pvec = XtX.selfadjointView<Eigen::Upper>().ldlt().solve(Xty);
}
/**
* Called before an iteration is started.
*/
void before_iteration(int iteration, graphchi_context &gcontext) {
reset_rmse(gcontext.execthreads);
}
/**
* Called after an iteration has finished.
*/
void after_iteration(int iteration, graphchi_context &gcontext) {
training_rmse(iteration, gcontext);
run_validation4(pvalidation_engine, gcontext);
}
};
void output_als_result(std::string filename) {
MMOutputter_mat<vertex_data> user_mat(filename + "_U.mm", 0, M, "This file contains WALS output matrix U. In each row D factors of a single user node.", latent_factors_inmem);
MMOutputter_mat<vertex_data> item_mat(filename + "_V.mm", M, M+N, "This file contains WALS output matrix V. In each row D factors of a single item node.", latent_factors_inmem);
logstream(LOG_INFO) << "WALS output files (in matrix market format): " << filename << "_U.mm" <<
", " << filename + "_V.mm " << std::endl;
}
int main(int argc, const char ** argv) {
print_copyright();
/* GraphChi initialization will read the command line
arguments and the configuration file. */
graphchi_init(argc, argv);
/* Metrics object for keeping track of performance counters
and other information. Currently required. */
metrics m("als-inmemory-factors");
lambda = get_option_float("lambda", 0.065);
parse_command_line_args();
parse_implicit_command_line();
if (unittest == 1){
if (training == "") training = "test_wals";
niters = 100;
}
/* Preprocess data if needed, or discover preprocess files */
int nshards = convert_matrixmarket4<edge_data>(training);
init_feature_vectors<std::vector<vertex_data> >(M+N, latent_factors_inmem, !load_factors_from_file);
if (validation != ""){
int vshards = convert_matrixmarket4<EdgeDataType>(validation, false, M==N, VALIDATION, 0);
init_validation_rmse_engine<VertexDataType, EdgeDataType>(pvalidation_engine, vshards, &wals_predict, true, false, 0);
}
if (load_factors_from_file){
load_matrix_market_matrix(training + "_U.mm", 0, D);
load_matrix_market_matrix(training + "_V.mm", M, D);
}
/* Run */
WALSVerticesInMemProgram program;
graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m);
set_engine_flags(engine);
pengine = &engine;
engine.run(program, niters);
/* Output latent factor matrices in matrix-market format */
output_als_result(training);
test_predictions(&wals_predict);
if (unittest == 1){
if (dtraining_rmse > 0.03)
logstream(LOG_FATAL)<<"Unit test 1 failed. Training RMSE is: " << training_rmse << std::endl;
if (dvalidation_rmse > 0.61)
logstream(LOG_FATAL)<<"Unit test 1 failed. Validation RMSE is: " << validation_rmse << std::endl;
}
/* Report execution metrics */
if (!quiet)
metrics_report(m);
return 0;
}
| C++ |
/**
* Copyright (c) 2009 Carnegie Mellon University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an "AS
* IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*
* For more about this software visit:
*
* http://www.graphlab.ml.cmu.edu
*
*/
#ifndef NPROB_HPP
#define NPROB_HPP
#include <cmath>
#define pi 3.14152965
/**
Porbability distribution helper functions written by Danny Bickson, CMU
*/
using namespace std;
//#define WISHART_TEST
//#define WISHART_TEST2
vec chi2rnd(vec v, int size){
vec ret = zeros(size);
for (int i=0; i<size; i++)
ret[i] = 2.0* gamma(v[i]/2.0);
#ifdef WISHART_TEST
ret = vec("9.3343 9.2811 9.3583 9.3652 9.3031");
ret*= 1e+04;
#elif defined(WISHART_TEST2)
ret = vec("4.0822e+03");
#endif
return ret;
}
void randv(int n, vec & ret){
assert(n>=1);
for (int i=0; i< n; i++)
ret[i] = drand48();
}
mat randn1(int Dx, int Dy, int col){
if (Dx == 0)
Dx = 1;
assert(Dy>=1);
mat ret = zeros(Dx,Dy);
vec us = zeros(ceil(Dx*Dy/2.0)*2);
randv(ceil(Dx*Dy/2.0)*2, us);
int k=0;
for (int i=0; i<Dx; i++){
for (int j=0; j< Dy; j++){
if (k % 2 == 0)
ret(i,j) = sqrt(-2.0*std::log(us[k/2]))*std::cos(2*pi*us[k/2+1]);
else
ret(i,j) = sqrt(-2.0*std::log(us[k/2]))*std::sin(2*pi*us[k/2+1]);
k++;
}
}
assert(k == Dx*Dy);
assert(ret.rows() == Dx && ret.cols() == Dy);
return ret;
}
vec randn1_vec(int Dx, int Dy, int col){
mat ret = randn1(Dx,Dy,col);
return get_col(ret, col);
}
vec mvnrndex(vec &mu, mat &sigma, int d, double regularization){
assert(mu.size() == d);
if (regularization > 0)
sigma = sigma+ regularization*eye(sigma.rows());
mat tmp;
bool ret = chol(sigma, tmp);
if (!ret)
logstream(LOG_FATAL)<<"Cholesky decomposition in mvnrned() got into numerical errors. Try to set --bptf_chol_diagonal_weighting command line argument to add regularization" << std::endl;
vec x = zeros(d);
vec col = randn1_vec(mu.size(), 1,0);
x = mu + transpose(tmp) * col;
assert(x.size() == d);
return x;
}
/* The following code is taken from the ACM paper:
* George Marsaglia and Wai Wan Tsang. 2000. A simple method for generating gamma variables. ACM Trans. Math. Softw. 26, 3 (September 2000), 363-372.
*/
float rgama(float a) {
float d,c,x,v,u;
d = a-1.0/3.0; c=1.0/sqrt(9.0*d);
for(;;) {
do {vec xvec = randn1_vec(1,1,0); x=xvec[0]; v=1.0+c*x;} while(v<=0.0);
v=v*v*v; u=drand48();
if( u<1.0-0.0331*(x*x)*(x*x) ) return (d*v);
if( log(u)<0.5*x*x+d*(1.0-v+log(v)) ) return (d*v);
}
}
float gamma(int alpha){
return rgama(alpha);
}
mat load_itiru(mat &a, mat& b){
assert(a.size() >= 1);
//nothing to do in case of a scalar
if (a.rows() == 1 && a.cols() == 1)
return a;
assert(b.cols() == 1);
assert(a.rows() == a.cols());
int n = a.rows();
int k = 0;
for (int i=0; i< n; i++)
for (int j=i+1; j< n; j++){
//a.set(i,j, b.get(k++,0));
set_val(a,i,j,get_val(b,k++,0));
}
assert(k == (n*(n-1))/2.0);
return a;
}
vec sequence(int df, int n){
assert(n >= 1);
assert(df>= 0);
vec ret(n);
for (int i=0; i<n; i++)
ret[i] = df - i;
return ret;
}
mat wishrnd(mat& sigma, double df){
mat d;
//cout<<sigma<<endl;
bool ret = chol(sigma, d);
//cout<<d<<endl;
assert(ret);
int n = sigma.rows();
mat x = zeros(n,n) ,a = zeros(n,n);
mat b;
if ((df <= 81+sigma.rows()) && (df == ::round(df))){
x = randn((int)df, d.rows())*d;
}
else {
vec seq = sequence(df, n);
//cout<<seq<<endl;
vec ret = chi2rnd(seq, n);
//cout<<ret<<endl;
ret = ::sqrt(ret);
//assert(ret.size() == n);
//cout<<ret<<endl;
if (ret.size() == 1) // a scalar variable
set_val(a,0,0, ret[0]);
else //a matrix
set_diag(a,ret);
assert(a.rows() == n && a.cols() == n);
//cout<<a<<endl;
if (ret.size() > 1){
b = randn(n*(n-1)/2,1);
assert(b.cols() == 1);
}
#ifdef WISHART_TEST
b=zeros(10,1); b = mat("0.1139 ; 1.0668 ; -0.0956 ; -1.3362; 0.0593 ; -0.8323 ; 0.7143; 0.2944 ; 1.6236; -0.6918");
//b = zeros(10,1); b = mat(" 1.1909 ; 1.1892 ; -0.0376; 0.3273; 0.1746; -0.1867; 0.7258; -0.5883; 2.1832; -0.1364");
#elif defined(WISHART_TEST2)
b = mat(0,0);
#endif
if (ret.size() > 1)
a = load_itiru(a,b);
//assert(a.rows() == n && a.cols() == n);
//cout<<a<<endl;
x = a*d;
//assert(x.cols() == x.rows() && x.cols() == n);
//cout<<x<<endl;
}
mat c= transpose(x)*x;
assert(c.rows() == n && c.cols() == n);
//cout<<a<<endl;
//
assert(abs_sum(c)!= 0);
return c;
}
void test_wishrnd(){
#ifndef WISHART_TEST
assert(false);
#endif
mat a = init_mat(" 0.2977 -0.0617 -0.1436 -0.0929 0.0136;"
"-0.0617 0.3489 -0.0736 -0.0581 -0.1337;"
"-0.1436 -0.0736 0.4457 -0.0348 -0.0301;"
"-0.0929 -0.0581 -0.0348 0.3165 0.0029;"
" 0.0136 -0.1337 -0.0301 0.0029 0.1862;", 5, 5);
assert(a.rows() == a.cols() && a.rows() == 5);
a *= 1.0e-03;
int df = 93531;
mat b = wishrnd(a,df);
mat trueret = init_mat(" 27.7883 -5.6442 -13.3231 -8.7063 1.2395;"
"-5.6442 32.3413 -6.8909 -5.3947 -12.4362;"
"-13.3231 -6.8909 41.5932 -3.3148 -2.6792;"
" -8.7063 -5.3947 -3.3148 29.6388 0.2115;"
" 1.2395 -12.4362 -2.6792 0.2115 17.3015;", 5, 5);
double diff = sumsum(b-trueret)/(25.0);
assert(fabs(diff) < 1e-2);
}
void test_wishrnd2(){
#ifndef WISHART_TEST2
assert(false);
#endif
mat a = init_mat("3", 1, 1);
assert(a.rows() == a.cols() && a.rows() == 1);
int df = 4122;
mat b = wishrnd(a,df);
mat trueret = init_mat("1.2247e+04",1,1);
double diff = sumsum(b-trueret);
assert(fabs(diff) < 1);
}
void test_randn(){
#if (defined(WISHART_TEST2) || defined(WISHART_TEST))
assert(false);
#endif
mat a = randn(10000000,1);
double ret = fabs(sumsum(a)/10000000);
assert(ret < 4e-3);
//ret = fabs(variance(get_col(a,0)) - 1);
//assert(ret < 1e-3);
//TODO
}
void test_mvnrndex(){
mat sigma = init_mat(
" 0.990566133945187 -0.009433866054813 -0.009433866054813 -0.009433866054813 -0.009433866054813 -0.009433866054813 ;" \
" -0.009433866054813 0.990566133945187 -0.009433866054813 -0.009433866054813 -0.009433866054813 -0.009433866054813 ;" \
" -0.009433866054813 -0.009433866054813 0.990566133945187 -0.009433866054813 -0.009433866054813 -0.009433866054813 ;" \
" -0.009433866054813 -0.009433866054813 -0.009433866054813 0.990566133945187 -0.009433866054813 -0.009433866054813 ;" \
" -0.009433866054813 -0.009433866054813 -0.009433866054813 -0.009433866054813 0.990566133945187 -0.009433866054813 ;" \
" -0.009433866054813 -0.009433866054813 -0.009433866054813 -0.009433866054813 -0.009433866054813 0.990566133945187 ", 6, 6);
vec mu = init_vec("95532 -1 2 3 0 22", 5);
vec ret = zeros(6);
for (int i=0; i<10000; i++){
ret+= mvnrndex(mu,sigma,6,0);
}
ret /= 10000;
vec ans = init_vec("95532.0115 -0.996855354 2.00914034 2.99521376 -0.0105874825 22.0127606", 6);
cout<<ret<<endl<<norm(ans-ret)<<endl;
}
void test_chi2rnd(){
vec ret = zeros(6);
vec v = init_vec("95532 95531 95530 95529 95528 95527", 6);
for (int i=0; i< 1000000; i++){
ret += chi2rnd(v, 6);
}
ret /= 1000000;
vec ans = init_vec("95531.99 95531.672 95530.016 95530.005 95527.495 95527.447", 6);
cout<<ret<<endl<<norm(ans-ret)<<endl;
}
void test_wishrnd3(){
mat sigma = init_mat(
" 0.990566133945187 -0.009433866054813 -0.009433866054813 -0.009433866054813 -0.009433866054813 -0.009433866054813 ;" \
" -0.009433866054813 0.990566133945187 -0.009433866054813 -0.009433866054813 -0.009433866054813 -0.009433866054813 ;" \
" -0.009433866054813 -0.009433866054813 0.990566133945187 -0.009433866054813 -0.009433866054813 -0.009433866054813 ;" \
" -0.009433866054813 -0.009433866054813 -0.009433866054813 0.990566133945187 -0.009433866054813 -0.009433866054813 ;" \
" -0.009433866054813 -0.009433866054813 -0.009433866054813 -0.009433866054813 0.990566133945187 -0.009433866054813 ;" \
" -0.009433866054813 -0.009433866054813 -0.009433866054813 -0.009433866054813 -0.009433866054813 0.990566133945187 ", 6,6);
int df = 95532;
mat ret = zeros(6,6);
for (int i=0; i<10000; i++){
ret+= wishrnd(sigma, df);
}
ret /= 10000;
mat ans = init_mat("9.463425666451165 -0.090326102576222 -0.091016693707751 -0.089933822069320 -0.090108095540542 -0.089915220156678 ;\
-0.090326102576222 9.462305273959634 -0.090071105888907 -0.090355541326424 -0.090556802227637 -0.089887181087950 ;\
-0.091016693707751 -0.090071105888907 9.462973812711439 -0.090185140101184 -0.090013352917616 -0.089803917691135 ;\
-0.089933822069320 -0.090355541326424 -0.090185140101184 9.462292292298272 -0.090233918216433 -0.090557276073727 ;\
-0.090108095540542 -0.090556802227637 -0.090013352917616 -0.090233918216433 9.461377045031403 -0.089774502028808 ;\
-0.089915220156678 -0.089887181087950 -0.089803917691135 -0.090557276073727 -0.089774502028808 9.463220187604252", 6, 6);
ans *= 1.0e+04;
cout<<ret<<endl<<norm(ans-ret)<<endl;
}
#endif //NPROB_HPP
| C++ |
#ifndef _DISTANCE_HPP__
#define _DISTANCE_HPP__
#include "graphchi_basic_includes.hpp"
typedef double flt_dbl;
typedef sparse_vec sparse_flt_dbl_vec;
typedef vec flt_dbl_vec;
extern int debug;
double safeLog(double d) {
return d <= 0.0 ? 0.0 : log(d);
}
double logL(double p, double k, double n) {
return k * safeLog(p) + (n - k) * safeLog(1.0 - p);
}
double twoLogLambda(double k1, double k2, double n1, double n2) {
double p = (k1 + k2) / (n1 + n2);
return 2.0 * (logL(k1 / n1, k1, n1) + logL(k2 / n2, k2, n2) - logL(p, k1, n1) - logL(p, k2, n2));
}
flt_dbl calc_loglikelihood_distance( sparse_flt_dbl_vec & datapoint, sparse_flt_dbl_vec & cluster, flt_dbl sqr_sum, flt_dbl sqr_sum_datapoint){
flt_dbl intersection = dot_prod(datapoint , cluster);
flt_dbl logLikelihood = twoLogLambda(intersection,
sqr_sum - intersection,
sqr_sum_datapoint,
datapoint.size() - sqr_sum_datapoint);
return 1.0 - 1.0 / (1.0 + logLikelihood);
}
flt_dbl calc_loglikelihood_distance( sparse_flt_dbl_vec & datapoint, flt_dbl_vec &cluster, flt_dbl sqr_sum, flt_dbl sqr_sum_datapoint){
flt_dbl intersection = dot_prod(datapoint, cluster);
flt_dbl logLikelihood = twoLogLambda(intersection,
sqr_sum - intersection,
sqr_sum_datapoint,
datapoint.size() - sqr_sum_datapoint);
return 1.0 - 1.0 / (1.0 + logLikelihood);
}
flt_dbl calc_tanimoto_distance( sparse_flt_dbl_vec & datapoint, sparse_flt_dbl_vec & cluster, flt_dbl sqr_sum, flt_dbl sqr_sum_datapoint){
flt_dbl a_mult_b = dot_prod(datapoint , cluster);
flt_dbl div = (sqr_sum + sqr_sum_datapoint - a_mult_b);
if (debug && (div == 0 || a_mult_b/div < 0)){
logstream(LOG_ERROR) << "divisor is zeo: " << sqr_sum<< " " << sqr_sum_datapoint << " " << a_mult_b << " " << std::endl;
print(datapoint);
print(cluster);
exit(1);
}
return 1.0 - a_mult_b/div;
}
flt_dbl calc_tanimoto_distance( sparse_flt_dbl_vec & datapoint, flt_dbl_vec &cluster, flt_dbl sqr_sum, flt_dbl sqr_sum_datapoint){
flt_dbl a_mult_b = dot_prod(datapoint, cluster);
flt_dbl div = (sqr_sum + sqr_sum_datapoint - a_mult_b);
if (debug && (div == 0 || a_mult_b/div < 0)){
logstream(LOG_ERROR) << "divisor is zeo: " << sqr_sum << " " << sqr_sum_datapoint << " " << a_mult_b << " " << std::endl;
print(datapoint);
debug_print_vec("cluster", cluster, cluster.size());
exit(1);
}
return 1.0 - a_mult_b/div;
}
flt_dbl calc_jaccard_weight_distance( sparse_flt_dbl_vec & datapoint, sparse_flt_dbl_vec & cluster, flt_dbl sqr_sum, flt_dbl sqr_sum_datapoint){
flt_dbl a_size = 0;
FOR_ITERATOR(i, datapoint){
a_size+= i.value();
}
flt_dbl b_size = 0;
FOR_ITERATOR(i, cluster){
b_size+= i.value();
}
flt_dbl intersection_size = sqr_sum;
assert(intersection_size != 0);
return intersection_size / (a_size+b_size-intersection_size);
}
flt_dbl calc_euclidian_distance( sparse_flt_dbl_vec & datapoint, sparse_flt_dbl_vec &cluster, flt_dbl sqr_sum, flt_dbl sqr_sum_datapoint){
//sparse_flt_dbl_vec diff = minus(datapoint , cluster);
//return sqrt(sum_sqr(diff));
sparse_flt_dbl_vec mult = elem_mult(datapoint, cluster);
flt_dbl diff = (sqr_sum + sqr_sum_datapoint - 2*sum(mult));
return sqrt(fabs(diff)); //because of numerical errors, diff may be negative
}
/*
flt_dbl calc_euclidian_distance( sparse_flt_dbl_vec & datapoint, flt_dbl_vec &cluster, flt_dbl sqr_sum, flt_dbl sqr_sum_datapoint){
flt_dbl dist = sqr_sum + sqr_sum_datapoint;
//for (int i=0; i< datapoint.nnz(); i++){
FOR_ITERATOR_(i, datapoint){
flt_dbl val = get_nz_data(datapoint, i);
int pos = get_nz_index(datapoint, i);
dist -= 2*val*cluster[pos];
}
if (debug && dist < 0 && fabs(dist) > 1e-8){
logstream(LOG_WARNING)<<"Found a negative distance: " << dist << " initial sum: " << sqr_sum_datapoint + sqr_sum << std::endl;
logstream(LOG_WARNING)<<"sqr sum: " << sqr_sum << " sqr_sum_datapoint: " <<sqr_sum_datapoint<<std::endl;
FOR_ITERATOR_(i, datapoint){
int pos = get_nz_index(datapoint, i);
logstream(LOG_WARNING)<<"Data: " << get_nz_data(datapoint, i) << " Pos: " << get_nz_index(datapoint, i) <<" cluster valu: " << cluster[pos]
<< "reduction: " << 2*get_nz_data(datapoint,i)*cluster[pos] << std::endl;
}
dist = 0;
}
return sqrt(fabs(dist)); //should not happen, but distance is sometime negative because of the shortcut we make to calculate it..
}
*/
flt_dbl calc_chebychev_distance( sparse_flt_dbl_vec & datapoint, sparse_flt_dbl_vec &cluster){
sparse_flt_dbl_vec diff = minus(datapoint , cluster);
flt_dbl ret = 0;
FOR_ITERATOR(i, diff){
ret = std::max(ret, (flt_dbl)fabs(get_nz_data(diff, i)));
}
return ret;
}
flt_dbl calc_chebychev_distance( sparse_flt_dbl_vec & datapoint, flt_dbl_vec &cluster){
flt_dbl_vec diff = minus(datapoint , cluster);
flt_dbl ret = 0;
for (int i=0; i< diff.size(); i++)
ret = std::max(ret, (flt_dbl)fabs(diff[i]));
return ret;
}
flt_dbl calc_manhatten_distance( sparse_flt_dbl_vec & datapoint, sparse_flt_dbl_vec &cluster){
sparse_flt_dbl_vec diff = minus(datapoint , cluster);
sparse_flt_dbl_vec absvec = fabs(diff);
flt_dbl ret = sum(absvec);
return ret;
}
flt_dbl calc_manhatten_distance( sparse_flt_dbl_vec & datapoint, flt_dbl_vec &cluster){
flt_dbl_vec diff = minus(datapoint , cluster);
flt_dbl ret = sum(fabs(diff));
return ret;
}
/* note that distance should be divided by intersection size */
flt_dbl calc_slope_one_distance( sparse_flt_dbl_vec & datapoint, sparse_flt_dbl_vec &cluster){
sparse_flt_dbl_vec diff = minus(datapoint , cluster);
flt_dbl ret = sum(diff);
return ret;
}
flt_dbl calc_cosine_distance( sparse_flt_dbl_vec & datapoint, sparse_flt_dbl_vec & cluster, flt_dbl sum_sqr, flt_dbl sum_sqr0){
flt_dbl dotprod = dot_prod(datapoint,cluster);
flt_dbl denominator = sqrt(sum_sqr0)*sqrt(sum_sqr);
return 1.0 - dotprod / denominator;
}
flt_dbl calc_cosine_distance( sparse_flt_dbl_vec & datapoint, flt_dbl_vec & cluster, flt_dbl sum_sqr, flt_dbl sum_sqr0){
flt_dbl dotprod = dot_prod(datapoint,cluster);
flt_dbl denominator = sqrt(sum_sqr0)*sqrt(sum_sqr);
return 1.0 - dotprod / denominator;
}
flt_dbl calc_dot_product_distance( sparse_flt_dbl_vec & datapoint, flt_dbl_vec & cluster){
return dot_prod(datapoint, cluster);
}
flt_dbl calc_dot_product_distance( sparse_flt_dbl_vec & datapoint, sparse_flt_dbl_vec & cluster){
return dot_prod(datapoint, cluster);
}
#endif //_DISTANCE_HPP__
| C++ |
/**
* @file
* @author Danny Bickson, based on code by Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* This file implements item based collaborative filtering by comparing all item pairs which
* are connected by one or more user nodes.
*
*
* For Pearson's correlation
*
* see: http://en.wikipedia.org/wiki/Pearson_product-moment_correlation_coefficient
Cosine Similarity
See: http://en.wikipedia.org/wiki/Cosine_similarity
Manhattan Distance
See http://en.wikipedia.org/wiki/Taxicab_geometry
Log Similarity Distance
See http://tdunning.blogspot.co.il/2008/03/surprise-and-coincidence.html
Chebychev Distance
http://en.wikipedia.org/wiki/Chebyshev_distance
Tanimoto Distance
See http://en.wikipedia.org/wiki/Jaccard_index
*/
#include <string>
#include <vector>
#include <algorithm>
#include <iomanip>
#include <set>
#include <iostream>
#include "eigen_wrapper.hpp"
#include "distance.hpp"
#include "util.hpp"
#include "timer.hpp"
#include "common.hpp"
enum DISTANCE_METRICS{
JACKARD = 0,
AA = 1,
RA = 2,
PEARSON = 3,
COSINE = 4,
CHEBYCHEV = 5,
MANHATTEN = 6,
TANIMOTO = 7,
LOG_LIKELIHOOD = 8,
JACCARD_WEIGHT = 9
};
int min_allowed_intersection = 1;
size_t written_pairs = 0;
size_t item_pairs_compared = 0;
std::vector<FILE*> out_files;
timer mytimer;
vec mean;
vec stddev;
int grabbed_edges = 0;
int distance_metric;
int debug;
bool is_item(vid_t v){ return M == N ? true : v >= M; }
bool is_user(vid_t v){ return v < M; }
/**
* Type definitions. Remember to create suitable graph shards using the
* Sharder-program.
*/
typedef unsigned int VertexDataType;
typedef float EdgeDataType; // Edges store the "rating" of user->movie pair
struct vertex_data{
vec pvec;
vertex_data(){ }
void set_val(int index, float val){
pvec[index] = val;
}
float get_val(int index){
return pvec[index];
}
};
std::vector<vertex_data> latent_factors_inmem;
#include "io.hpp"
struct dense_adj {
sparse_vec edges;
dense_adj() { }
double intersect(const dense_adj & other){
sparse_vec x1 = edges.unaryExpr(std::ptr_fun(equal_greater));
sparse_vec x2 = other.edges.unaryExpr(std::ptr_fun(equal_greater));
sparse_vec x3 = x1.cwiseProduct(x2);
return sum(x3);
}
};
// This is used for keeping in-memory
class adjlist_container {
//mutex m;
public:
std::vector<dense_adj> adjs;
vid_t pivot_st, pivot_en;
adjlist_container() {
if (debug)
std::cout<<"setting pivot st and end to " << M << std::endl;
if (distance_metric == JACCARD_WEIGHT){
pivot_st = 0;
pivot_en = 0;
}
else {
pivot_st = M; //start pivor on item nodes (excluding user nodes)
pivot_en = M;
}
}
void clear() {
for(std::vector<dense_adj>::iterator it=adjs.begin(); it != adjs.end(); ++it) {
if (nnz(it->edges)) {
it->edges.resize(0);
}
}
adjs.clear();
if (debug)
std::cout<<"setting pivot end to " << pivot_en << std::endl;
pivot_st = pivot_en;
}
/**
* Extend the interval of pivot vertices to en.
*/
void extend_pivotrange(vid_t en) {
assert(en>pivot_en);
pivot_en = en;
adjs.resize(pivot_en - pivot_st);
}
/**
* Grab pivot's adjacency list into memory.
*/
int load_edges_into_memory(graphchi_vertex<VertexDataType, EdgeDataType> &v) {
//assert(is_pivot(v.id()));
//assert(is_item(v.id()));
int num_edges = v.num_edges();
//not enough user rated this item, we don't need to compare to it
if (num_edges < min_allowed_intersection){
if (debug)
logstream(LOG_DEBUG)<<"Skipping since num edges: " << num_edges << std::endl;
return 0;
}
// Count how many neighbors have larger id than v
dense_adj dadj;
for(int i=0; i<num_edges; i++)
set_new( dadj.edges, v.edge(i)->vertex_id(), v.edge(i)->get_data());
//std::sort(&dadj.adjlist[0], &dadj.adjlist[0] + num_edges);
adjs[v.id() - pivot_st] = dadj;
assert(v.id() - pivot_st < adjs.size());
__sync_add_and_fetch(&grabbed_edges, num_edges /*edges_to_larger_id*/);
return num_edges;
}
int acount(vid_t pivot) {
return nnz(adjs[pivot - pivot_st].edges);
}
/**
* calc distance between two items.
* Let a be all the users rated item 1
* Let b be all the users rated item 2
*
* 3) Using Pearson correlation
* Dist_ab = (a - mean)*(b- mean)' / (std(a)*std(b))
*
* 4) Using cosine similarity:
* Dist_ab = (a*b) / sqrt(sum_sqr(a)) * sqrt(sum_sqr(b)))
*
* 5) Using chebychev:
* Dist_ab = max(abs(a-b))
*
* 6) Using manhatten distance:
* Dist_ab = sum(abs(a-b))
*
* 7) Using tanimoto:
* Dist_ab = 1.0 - [(a*b) / (sum_sqr(a) + sum_sqr(b) - a*b)]
*
* 8) Using log likelihood similarity
* Dist_ab = 1.0 - 1.0/(1.0 + loglikelihood)
*
* 9) Using Jaccard:
* Dist_ab = intersect(a,b) / (size(a) + size(b) - intersect(a,b))
*/
double calc_distance(graphchi_vertex<VertexDataType, EdgeDataType> &v, vid_t pivot, int distance_metric) {
//assert(is_pivot(pivot));
//assert(is_item(pivot) && is_item(v.id()));
dense_adj &pivot_edges = adjs[pivot - pivot_st];
int num_edges = v.num_edges();
dense_adj item_edges;
for(int i=0; i < num_edges; i++){
set_new(item_edges.edges, v.edge(i)->vertexid, v.edge(i)->get_data());
}
if (distance_metric == JACCARD_WEIGHT){
return calc_jaccard_weight_distance(pivot_edges.edges, item_edges.edges, get_val( pivot_edges.edges, v.id()), 0);
}
return NAN;
}
inline bool is_pivot(vid_t vid) {
return vid >= pivot_st && vid < pivot_en;
}
};
adjlist_container * adjcontainer;
struct ItemDistanceProgram : public GraphChiProgram<VertexDataType, EdgeDataType> {
/**
* Vertex update function.
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType> &v, graphchi_context &gcontext) {
if (debug)
printf("Entered iteration %d with %d - edges %d\n", gcontext.iteration, v.id(), v.num_edges());
/* even iteration numbers:
* 1) load a subset of items into memory (pivots)
* 2) Find which subset of items needs to compared to the users
*/
if (gcontext.iteration % 2 == 0) {
if (adjcontainer->is_pivot(v.id())){
adjcontainer->load_edges_into_memory(v);
if (debug)
printf("Loading pivot %d intro memory\n", v.id());
}
}
else {
for (vid_t i=adjcontainer->pivot_st; i< adjcontainer->pivot_en; i++){
//since metric is symmetric, compare only to pivots which are smaller than this item id
if (i >= v.id())
continue;
dense_adj &pivot_edges = adjcontainer->adjs[i - adjcontainer->pivot_st];
//pivot is not connected to this item, continue
if (get_val(pivot_edges.edges, v.id()) == 0)
continue;
double dist = adjcontainer->calc_distance(v, i, distance_metric);
item_pairs_compared++;
if (item_pairs_compared % 1000000 == 0)
logstream(LOG_INFO)<< std::setw(10) << mytimer.current_time() << ") " << std::setw(10) << item_pairs_compared << " pairs compared " << std::endl;
if (debug)
printf("comparing %d to pivot %d distance is %lg\n", i+ 1, v.id() + 1, dist);
if (dist != 0){
fprintf(out_files[omp_get_thread_num()], "%u %u %.12lg\n", v.id()+1, i+1, (double)dist);//write item similarity to file
//where the output format is:
//[item A] [ item B ] [ distance ]
written_pairs++;
}
}
}//end of iteration % 2 == 1
}//end of update function
/**
* Called before an iteration starts.
* On odd iteration, schedule both users and items.
* on even iterations, schedules only item nodes
*/
void before_iteration(int iteration, graphchi_context &gcontext) {
gcontext.scheduler->remove_tasks(0, (int) gcontext.nvertices - 1);
if (gcontext.iteration % 2 == 0){
for (vid_t i=0; i < M; i++){
gcontext.scheduler->add_task(i);
}
grabbed_edges = 0;
adjcontainer->clear();
} else { //iteration % 2 == 1
for (vid_t i=0; i< M; i++){
gcontext.scheduler->add_task(i);
}
}
}
/**
* Called after an iteration has finished.
*/
void after_iteration(int iteration, graphchi_context &gcontext) {
}
/**
* Called before an execution interval is started.
*
* On every even iteration, we load pivot's item connected user lists to memory.
* Here we manage the memory to ensure that we do not load too much
* edges into memory.
*/
void before_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) {
/* on even iterations, loads pivot items into memory base on the membudget_mb allowed memory size */
if ((gcontext.iteration % 2 == 0)) {
if (debug){
printf("entering iteration: %d on before_exec_interval\n", gcontext.iteration);
printf("pivot_st is %d window_en %d\n", adjcontainer->pivot_st, window_en);
}
if (adjcontainer->pivot_st <= window_en) {
size_t max_grab_edges = get_option_long("membudget_mb", 1024) * 1024 * 1024 / 8;
if (grabbed_edges < max_grab_edges * 0.8) {
logstream(LOG_DEBUG) << "Window init, grabbed: " << grabbed_edges << " edges" << " extending pivor_range to : " << window_en + 1 << std::endl;
adjcontainer->extend_pivotrange(window_en + 1);
logstream(LOG_DEBUG) << "Window en is: " << window_en << " vertices: " << gcontext.nvertices << std::endl;
if (window_en+1 == gcontext.nvertices) {
// every item was a pivot item, so we are done
logstream(LOG_DEBUG)<<"Setting last iteration to: " << gcontext.iteration + 2 << std::endl;
gcontext.set_last_iteration(gcontext.iteration + 2);
}
} else {
logstream(LOG_DEBUG) << "Too many edges, already grabbed: " << grabbed_edges << std::endl;
}
}
}
}
};
int main(int argc, const char ** argv) {
print_copyright();
/* GraphChi initialization will read the command line
arguments and the configuration file. */
graphchi_init(argc, argv);
/* Metrics object for keeping track of performance counters
and other information. Currently required. */
metrics m("item-cf2");
/* Basic arguments for application */
min_allowed_intersection = get_option_int("min_allowed_intersection", min_allowed_intersection);
distance_metric = get_option_int("distance", JACCARD_WEIGHT);
if (distance_metric != JACCARD_WEIGHT)
logstream(LOG_FATAL)<<"--distance_metrix=XX should be one of:9= JACCARD_WEIGHT" << std::endl;
debug = get_option_int("debug", 0);
parse_command_line_args();
//if (distance_metric != JACKARD && distance_metric != AA && distance_metric != RA)
// logstream(LOG_FATAL)<<"Wrong distance metric. --distance_metric=XX, where XX should be either 0) JACKARD, 1) AA, 2) RA" << std::endl;
mytimer.start();
int nshards = convert_matrixmarket<EdgeDataType>(training, NULL, 0, 0, 3, TRAINING, true);
assert(M > 0 && N > 0);
//initialize data structure which saves a subset of the items (pivots) in memory
adjcontainer = new adjlist_container();
/* Run */
ItemDistanceProgram program;
graphchi_engine<VertexDataType, EdgeDataType> engine(training/*+orderByDegreePreprocessor->getSuffix()*/ ,nshards, true, m);
set_engine_flags(engine);
//open output files as the number of operating threads
out_files.resize(number_of_omp_threads());
for (uint i=0; i< out_files.size(); i++){
char buf[256];
sprintf(buf, "%s.out%d", training.c_str(), i);
out_files[i] = open_file(buf, "w");
}
//run the program
engine.run(program, niters);
/* Report execution metrics */
if (!quiet)
metrics_report(m);
std::cout<<"Total item pairs compared: " << item_pairs_compared << " total written to file: " << written_pairs << std::endl;
for (uint i=0; i< out_files.size(); i++)
fclose(out_files[i]);
std::cout<<"Created output files with the format: " << training << ".outXX, where XX is the output thread number" << std::endl;
return 0;
}
| C++ |
/**
* @file
* @author Danny Bickson, CMU
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* This program computes top K recommendations based on the linear model computed
* by one of: als,sparse_als,wals, sgd and nmf applications.
*
*/
#include "common.hpp"
#include "eigen_wrapper.hpp"
#include "timer.hpp"
int debug;
int num_ratings;
double knn_sample_percent = 1.0;
const double epsilon = 1e-16;
timer mytimer;
int tokens_per_row = 3;
int algo = 0;
enum {
ALS = 0, SPARSE_ALS = 1, SGD = 2, NMF = 3, WALS = 4
};
struct vertex_data {
vec ratings;
ivec ids;
vec pvec;
vertex_data() {
pvec = vec::Zero(D);
ids = ivec::Zero(num_ratings);
ratings = vec::Zero(num_ratings);
}
void set_val(int index, float val){
pvec[index] = val;
}
float get_val(int index){
return pvec[index];
}
};
struct edge_data {
double weight;
edge_data() { weight = 0; }
edge_data(double weight) : weight(weight) { }
};
struct edge_data4 {
double weight;
double time;
edge_data4() { weight = time = 0; }
edge_data4(double weight, double time) : weight(weight), time(time) { }
};
/**
* Type definitions. Remember to create suitable graph shards using the
* Sharder-program.
*/
typedef vertex_data VertexDataType;
typedef edge_data EdgeDataType; // Edges store the "rating" of user->movie pair
graphchi_engine<VertexDataType, EdgeDataType> * pengine = NULL;
std::vector<vertex_data> latent_factors_inmem;
float als_predict(const vertex_data& user,
const vertex_data& movie,
const float rating,
double & prediction){
prediction = dot_prod(user.pvec, movie.pvec);
//truncate prediction to allowed values
prediction = std::min((double)prediction, maxval);
prediction = std::max((double)prediction, minval);
//return the squared error
float err = rating - prediction;
assert(!std::isnan(err));
return err*err;
}
void rating_stats(){
double min=1e100, max=0, avg=0;
int cnt = 0;
int startv = 0;
int endv = M;
for (int i=startv; i< endv; i++){
vertex_data& data = latent_factors_inmem[i];
if (data.ratings.size() > 0){
min = std::min(min, data.ratings[0]);
max = std::max(max, data.ratings[0]);
if (std::isnan(data.ratings[0]))
printf("bug: nan on %d\n", i);
else {
avg += data.ratings[0];
cnt++;
}
}
}
printf("Distance statistics: min %g max %g avg %g\n", min, max, avg/cnt);
}
#include "io.hpp"
void read_factors(std::string base_filename){
load_matrix_market_matrix(training + "_U.mm", 0, D);
load_matrix_market_matrix(training + "_V.mm", M, D);
}
/**
* GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type>
* class. The main logic is usually in the update function.
*/
template<typename VertexDataType, typename EdgeDataType>
struct RatingVerticesInMemProgram : public GraphChiProgram<VertexDataType, EdgeDataType> {
/**
* Vertex update function - computes the least square step
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) {
//compute only for user nodes
if (vertex.id() >= M)
return;
vertex_data & vdata = latent_factors_inmem[vertex.id()];
int howmany = (int)(N*knn_sample_percent);
assert(howmany > 0 );
vec distances = zeros(howmany);
ivec indices = ivec::Zero(howmany);
for (int i=0; i< howmany; i++){
indices[i]= -1;
}
std::vector<bool> curratings;
curratings.resize(N);
for(int e=0; e < vertex.num_edges(); e++) {
//no need to calculate this rating since it is given in the training data reference
assert(vertex.edge(e)->vertex_id() - M >= 0 && vertex.edge(e)->vertex_id() - M < N);
curratings[vertex.edge(e)->vertex_id() - M] = true;
}
if (knn_sample_percent == 1.0){
for (uint i=M; i< M+N; i++){
if (curratings[i-M])
continue;
vertex_data & other = latent_factors_inmem[i];
double dist;
als_predict(vdata, other, 0, dist);
indices[i-M] = i-M;
distances[i-M] = dist + 1e-10;
}
}
else for (int i=0; i<howmany; i++){
int random_other = ::randi(M, M+N-1);
vertex_data & other = latent_factors_inmem[random_other];
double dist;
als_predict(vdata, other, 0, dist);
indices[i] = random_other-M;
distances[i] = dist;
}
vec out_dist(num_ratings);
ivec indices_sorted = reverse_sort_index2(distances, indices, out_dist, num_ratings);
assert(indices_sorted.size() <= num_ratings);
assert(out_dist.size() <= num_ratings);
vdata.ids = indices_sorted;
vdata.ratings = out_dist;
if (debug)
printf("Closest is: %d with distance %g\n", (int)vdata.ids[0], vdata.ratings[0]);
if (vertex.id() % 1000 == 0)
printf("Computing recommendations for user %d at time: %g\n", vertex.id()+1, mytimer.current_time());
}
};
struct MMOutputter_ratings{
MMOutputter_ratings(std::string fname, uint start, uint end, std::string comment) {
assert(start < end);
MM_typecode matcode;
set_matcode(matcode);
FILE * outf = fopen(fname.c_str(), "w");
assert(outf != NULL);
mm_write_banner(outf, matcode);
if (comment != "")
fprintf(outf, "%%%s\n", comment.c_str());
mm_write_mtx_array_size(outf, end-start, num_ratings+1);
for (uint i=start; i < end; i++){
fprintf(outf, "%u ", i+1);
for(int j=0; j < latent_factors_inmem[i].ratings.size(); j++) {
fprintf(outf, "%1.12e ", latent_factors_inmem[i].ratings[j]);
}
fprintf(outf, "\n");
}
fclose(outf);
}
};
struct MMOutputter_ids{
MMOutputter_ids(std::string fname, uint start, uint end, std::string comment) {
assert(start < end);
MM_typecode matcode;
set_matcode(matcode);
FILE * outf = fopen(fname.c_str(), "w");
assert(outf != NULL);
mm_write_banner(outf, matcode);
if (comment != "")
fprintf(outf, "%%%s\n", comment.c_str());
mm_write_mtx_array_size(outf, end-start, num_ratings+1);
for (uint i=start; i < end; i++){
fprintf(outf, "%u ", i+1);
for(int j=0; j < latent_factors_inmem[i].ids.size(); j++) {
fprintf(outf, "%u ", (int)latent_factors_inmem[i].ids[j]+1);//go back to item ids starting from 1,2,3, (and not from zero as in c)
}
fprintf(outf, "\n");
}
fclose(outf);
}
};
void output_knn_result(std::string filename) {
MMOutputter_ratings ratings(filename + ".ratings", 0, M,"This file contains user scalar ratings. In each row i, num_ratings top scalar ratings of different items for user i. (First column: user id, next columns, top K ratings)");
MMOutputter_ids mmoutput_ids(filename + ".ids", 0, M ,"This file contains item ids matching the ratings. In each row i, num_ratings top item ids for user i. (First column: user id, next columns, top K ratings). Note: 0 item id means there are no more items to recommend for this user.");
std::cout << "Rating output files (in matrix market format): " << filename << ".ratings" <<
", " << filename + ".ids " << std::endl;
}
int main(int argc, const char ** argv) {
mytimer.start();
print_copyright();
/* GraphChi initialization will read the command line
arguments and the configuration file. */
graphchi_init(argc, argv);
/* Metrics object for keeping track of performance counters
and other information. Currently required. */
metrics m("nmf-inmemory-factors");
knn_sample_percent = get_option_float("knn_sample_percent", 1.0);
if (knn_sample_percent <= 0 || knn_sample_percent > 1)
logstream(LOG_FATAL)<<"Sample percente should be in the range (0, 1] " << std::endl;
num_ratings = get_option_int("num_ratings", 10);
if (num_ratings <= 0)
logstream(LOG_FATAL)<<"num_ratings, the number of recomended items for each user, should be >=1 " << std::endl;
debug = get_option_int("debug", 0);
std::string algorithm = get_option_string("algorithm");
if (algorithm == "als" || algorithm == "sparse_als" || algorithm == "sgd" || algorithm == "nmf")
tokens_per_row = 3;
else if (algorithm == "wals")
tokens_per_row = 4;
else logstream(LOG_FATAL)<<"--algorithms should be one of: als, sparse_als, sgd, nmf, wals" << std::endl;
parse_command_line_args();
/* Preprocess data if needed, or discover preprocess files */
int nshards = 0;
if (tokens_per_row == 3)
nshards = convert_matrixmarket<edge_data>(training, NULL, 0, 0, 3, TRAINING, false);
else if (tokens_per_row == 4)
nshards = convert_matrixmarket4<edge_data4>(training);
else logstream(LOG_FATAL)<<"--tokens_per_row should be either 3 or 4" << std::endl;
assert(M > 0 && N > 0);
latent_factors_inmem.resize(M+N); // Initialize in-memory vertices.
read_factors(training);
if ((uint)num_ratings > N){
logstream(LOG_WARNING)<<"num_ratings is too big - setting it to: " << N << std::endl;
num_ratings = N;
}
srand(time(NULL));
/* Run */
if (tokens_per_row == 3){
RatingVerticesInMemProgram<VertexDataType, EdgeDataType> program;
graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m);
set_engine_flags(engine);
engine.run(program, 1);
}
else if (tokens_per_row == 4){
RatingVerticesInMemProgram<VertexDataType, edge_data4> program;
graphchi_engine<VertexDataType, edge_data4> engine(training, nshards, false, m);
set_engine_flags(engine);
engine.run(program, 1);
}
/* Output latent factor matrices in matrix-market format */
output_knn_result(training);
rating_stats();
/* Report execution metrics */
if (!quiet)
metrics_report(m);
return 0;
}
| C++ |
/**
* @file
* @author Danny Bickson
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Implementation of the gensgd algorithm. A generalization of SGD algorithm when there are multiple features for each
* rating, in the form
* [from] [to] [feature1] [feature2] [feature3] ... [featureN] [rating]
* (It is also possible to dynamically specify column numbers which are relevant)
* Steffen Rendle (2010): Factorization Machines, in Proceedings of the 10th IEEE International Conference on Data Mining (ICDM 2010), Sydney, Australia.
* Original implementation by Qiang Yan, Chinese Academy of Science.
* note: this code version implements the SGD version of gensgd. In the original library there are also ALS and MCMC methods.
* Also the treatment of features is richer in gensgd. The code here can serve for a quick evaluation but the user
* is encouraged to try gensgd as well.
*/
#include <vector>
#include "common.hpp"
#include "eigen_wrapper.hpp"
#include "../parsers/common.hpp"
#include <omp.h>
#define MAX_FEATAURES 256
#define FEATURE_WIDTH 56//MAX NUMBER OF ALLOWED FEATURES IN TEXT FILE
double gensgd_rate1 = 1e-02;
double gensgd_rate2 = 1e-02;
double gensgd_rate3 = 1e-02;
double gensgd_rate4 = 1e-02;
double gensgd_rate5 = 1e-02;
double gensgd_mult_dec = 0.9;
double gensgd_regw = 1e-3;
double gensgd_regv = 1e-3;
double gensgd_reg0 = 1e-1;
bool debug = false;
std::string user_file; //optional file with user features
std::string item_file; //optional file with item features
std::string user_links; //optional file with user to user links
int limit_rating = 0;
size_t vertex_with_no_edges = 0;
int calc_error = 0;
int calc_roc = 0;
int binary = 1;
int round_float = 0;
std::vector<std::string> header_titles;
int has_header_titles = 0;
float cutoff = 0;
float val_cutoff = 0;
std::string format = "libsvm";
vec errors_vec;
struct single_map{
std::map<float,uint> string2nodeid;
single_map(){
}
};
struct feature_control{
std::vector<single_map> node_id_maps;
single_map val_map;
single_map index_map;
int rehash_value;
int feature_num;
int node_features;
int node_links;
int total_features;
const std::string default_feature_str;
std::vector<int> offsets;
bool hash_strings;
int from_pos;
int to_pos;
int val_pos;
feature_control(){
rehash_value = 0;
total_features = 0;
node_features = 0;
feature_num = FEATURE_WIDTH;
hash_strings = false;
from_pos = 0;
to_pos = 1;
val_pos = -1;
node_links = 0;
}
};
feature_control fc;
int num_feature_bins(){
int sum = 0;
if (fc.hash_strings){
assert(2+fc.total_features+fc.node_features == (int)fc.node_id_maps.size());
for (int i=2; i < 2+fc.total_features+fc.node_features; i++){
sum+= fc.node_id_maps[i].string2nodeid.size();
}
}
else assert(false);
return sum;
}
int calc_feature_num(){
return 2+fc.total_features+fc.node_features;
}
void get_offsets(std::vector<int> & offsets){
assert(offsets.size() > 3);
offsets[0] = 0;
offsets[1] = M;
offsets[2] = M+N;
for (uint i=3; i< offsets.size(); i++){
assert(fc.node_id_maps.size() > (uint)i);
offsets[i] += offsets[i-1] + fc.node_id_maps[i].string2nodeid.size();
}
}
bool is_user(vid_t id){ return id < M; }
bool is_item(vid_t id){ return id >= M && id < N; }
bool is_time(vid_t id){ return id >= M+N; }
#define BIAS_POS -1
struct vertex_data {
fvec pvec;
double bias;
vertex_data() {
bias = 0;
}
void set_val(int index, float val){
if (index == BIAS_POS)
bias = val;
else pvec[index] = val;
}
float get_val(int index){
if (index== BIAS_POS)
return bias;
else return pvec[index];
}
};
struct edge_data {
uint features[FEATURE_WIDTH];
uint index[FEATURE_WIDTH];
uint size;
float weight;
edge_data() {
weight = 0;
size = 0;
memset(features, 0, sizeof(uint)*FEATURE_WIDTH);
memset(index, 0, sizeof(uint)*FEATURE_WIDTH);
}
edge_data(float weight, uint * valarray, uint * _index, uint size): size(size), weight(weight) {
memcpy(features, valarray, sizeof(uint)*FEATURE_WIDTH);
memcpy(index, _index, sizeof(uint)*FEATURE_WIDTH);
}
};
/**
* Type definitions. Remember to create suitable graph shards using the
* Sharder-program.
*/
typedef vertex_data VertexDataType;
typedef edge_data EdgeDataType; // Edges store the "rating" of user->movie pair
graphchi_engine<VertexDataType, EdgeDataType> * pengine = NULL;
std::vector<vertex_data> latent_factors_inmem;
int calc_feature_node_array_size(uint node, uint item, uint edge_size){
assert(node <= M);
assert(item <= N);
assert(edge_size >= 0);
assert(node < latent_factors_inmem.size());
assert(fc.offsets[1]+item < latent_factors_inmem.size());
return 2+edge_size;
}
void assign_id(single_map& dmap, unsigned int & outval, const float name){
std::map<float,uint>::iterator it = dmap.string2nodeid.find(name);
//if an id was already assigned, return it
if (it != dmap.string2nodeid.end()){
outval = it->second - 1;
assert(outval < dmap.string2nodeid.size());
return;
}
mymutex.lock();
//assign a new id
outval = dmap.string2nodeid[name];
if (outval == 0){
dmap.string2nodeid[name] = dmap.string2nodeid.size();
outval = dmap.string2nodeid.size() - 1;
}
mymutex.unlock();
}
/**
* return a numeric node ID out of the string text read from file (training, validation or test)
*/
float get_node_id(char * pch, int pos, size_t i, bool read_only = false){
assert(pch != NULL);
assert(pch[0] != 0);
assert(i >= 0);
float ret;
//read numeric id
if (!fc.hash_strings){
ret = (pos < 2 ? atoi(pch) : atof(pch));
if (pos < 2)
ret--;
if (pos == 0 && ret >= M)
logstream(LOG_FATAL)<<"Row index larger than the matrix row size " << ret << " > " << M << " in line: " << i << std::endl;
else if (pos == 1 && ret >= N)
logstream(LOG_FATAL)<<"Col index larger than the matrix row size " << ret << " > " << N << " in line: " << i << std::endl;
}
//else read string id and assign numeric id
else {
uint id;
float val = atof(pch);
assert(!std::isnan(val));
if (round_float)
val = floorf(val * 10000 + 0.5) / 10000;
if (pos >= 0)
assert(pos < (int)fc.node_id_maps.size());
single_map * pmap = NULL;
if (pos == -1)
pmap = &fc.index_map;
else pmap = &fc.node_id_maps[pos];
if (read_only){ // find if node was in map
std::map<float,uint>::iterator it = pmap->string2nodeid.find(val);
if (it != pmap->string2nodeid.end()){
ret = it->second - 1;
assert(ret < pmap->string2nodeid.size());
}
else ret = -1;
}
else { //else enter node into map (in case it did not exist) and return its position
assign_id(*pmap, id, val);
if (pos == -1 && fc.index_map.string2nodeid.size() == id+1 && fc.node_id_maps.size() < fc.index_map.string2nodeid.size()+2){//TODO debug
single_map newmap;
fc.node_id_maps.push_back(newmap);
}
ret = id;
}
}
if (!read_only)
assert(ret != -1);
return ret;
}
#include "io.hpp"
#include "../parsers/common.hpp"
float get_value(char * pch, bool read_only){
float ret;
if (!fc.rehash_value){
ret = atof(pch);
}
else {
uint id;
if (read_only){ // find if node was in map
std::map<float,uint>::iterator it = fc.val_map.string2nodeid.find(atof(pch));
if (it != fc.val_map.string2nodeid.end()){
ret = it->second - 1;
}
else ret = -1;
}
else { //else enter node into map (in case it did not exist) and return its position
assign_id(fc.val_map, id, atof(pch));
ret = id;
}
}
if (std::isnan(ret) || std::isinf(ret))
logstream(LOG_FATAL)<<"Failed to read value" << std::endl;
return ret;
}
/* Read and parse one input line from file */
bool read_line(FILE * f, const std::string filename, size_t i, uint & I, uint & J, float &val, std::vector<uint>& valarray, std::vector<uint>& positions, int & index, int type, int & skipped_features){
char * linebuf = NULL;
size_t linesize;
char linebuf_debug[1024];
int token = 0;
index = 0;
int rc = getline(&linebuf, &linesize, f);
if (rc == -1)
logstream(LOG_FATAL)<<"Failed to get line: " << i << " in file: " << filename << std::endl;
char * linebuf_to_free = linebuf;
strncpy(linebuf_debug, linebuf, 1024);
while (index < FEATURE_WIDTH){
/* READ FROM */
if (token == fc.from_pos){
char *pch = strsep(&linebuf,"\t,\r\n: ");
if (pch == NULL)
logstream(LOG_FATAL)<<"Error reading line " << i << " [ " << linebuf_debug << " ] " << std::endl;
I = (uint)get_node_id(pch, 0, i, type != TRAINING);
uint pos = get_node_id(pch, -1, i, type != TRAINING);
token++;
if (type != TRAINING && pos == (uint)-1){ //this feature was not observed on training, skip
continue;
}
valarray[index] = 0;
positions[index] = pos;
index++;
}
else if (token == fc.to_pos){
/* READ TO */
char * pch = strsep(&linebuf, "\t,\r\n: ");
if (pch == NULL)
logstream(LOG_FATAL)<<"Error reading line " << i << " [ " << linebuf_debug << " ] " << std::endl;
J = (uint)get_node_id(pch, 1, i, type != TRAINING);
token++;
}
else if (token == fc.val_pos){
/* READ RATING */
char * pch = strsep(&linebuf, "\t,\r\n ");
if (pch == NULL)
logstream(LOG_FATAL)<<"Error reading line " << i << " [ " << linebuf_debug << " ] " << std::endl;
val = get_value(pch, type != TRAINING);
token++;
}
else {
/* READ FEATURES */
char * pch = strsep(&linebuf, "\t,\r\n:; ");
if (pch == NULL || pch[0] == 0)
break;
uint pos = get_node_id(pch, -1, i, type != TRAINING);
if (type != TRAINING && pos == (uint)-1){ //this feature was not observed on training, skip
char * pch2 = strsep(&linebuf, "\t\r\n ");
if (pch2 == NULL || pch2[0] == 0)
logstream(LOG_FATAL)<<"Error reading line " << i << " feature2 " << index << " [ " << linebuf_debug << " ] " << std::endl;
skipped_features++;
continue;
}
assert(pos != (uint)-1 && pos < fc.index_map.string2nodeid.size());
char * pch2 = strsep(&linebuf, "\t\r\n ");
if (pch2 == NULL || pch2[0] == 0)
logstream(LOG_FATAL)<<"Error reading line " << i << " feature2 " << index << " [ " << linebuf_debug << " ] " << std::endl;
uint second_index = get_node_id(pch2, pos, i, type != TRAINING);
if (type != TRAINING && second_index == (uint)-1){ //this value was not observed in training, skip
second_index = 0; //skipped_features++;
//continue;
}
assert(second_index != (uint)-1);
assert(index< (int)valarray.size());
assert(index< (int)positions.size());
valarray[index] = second_index;
positions[index] = pos;
index++;
token++;
}
}//end while
free(linebuf_to_free);
return true;
}//end read_line
/* compute an edge prediction based on input features */
float compute_prediction(
uint I,
uint J,
const float val,
double & prediction,
uint * valarray,
uint * positions,
uint edge_size,
float (*prediction_func)(std::vector<vertex_data*>& node_array, int arraysize, float rating, double & prediction, fvec * psumi, double * exp_prediction),
fvec * psum,
std::vector<vertex_data*>& node_array,
uint node_array_size,
double & exp_prediction){
/* COMPUTE PREDICTION */
/* USER NODE **/
int index = 0;
int loc = 0;
node_array[index] = &latent_factors_inmem[I+fc.offsets[index]];
assert(node_array[index]->pvec[0] < 1e5);
index++; loc++;
/* 1) ITEM NODE */
assert(J+fc.offsets[index] < latent_factors_inmem.size());
node_array[index] = &latent_factors_inmem[J+fc.offsets[index]];
assert(node_array[index]->pvec[0] < 1e5);
index++; loc++;
/* 2) FEATURES GIVEN IN RATING LINE */
for (int j=0; j< (int)edge_size; j++){
assert(fc.offsets.size() > positions[j]);
uint pos = fc.offsets[positions[j]] + valarray[j];
assert(pos >= 0 && pos < latent_factors_inmem.size());
assert(j+index < (int)node_array_size);
node_array[j+index] = & latent_factors_inmem[pos];
assert(node_array[j+index]->pvec[0] < 1e5);
}
index+= edge_size;
loc += edge_size;
assert(index == calc_feature_node_array_size(I,J, edge_size));
return (*prediction_func)(node_array, node_array_size, val, prediction, psum, &exp_prediction);
}
#include "rmse.hpp"
/**
* Create a bipartite graph from a matrix. Each row corresponds to vertex
* with the same id as the row number (0-based), but vertices correponsing to columns
* have id + num-rows.
* Line format of the type
* [user] [item] [feature1] [feature2] ... [featureN] [rating]
*/
/* Read input file, process it and save a binary representation for faster loading */
template <typename als_edge_type>
int convert_matrixmarket_N(std::string base_filename, bool square, feature_control & fc, int limit_rating = 0) {
// Note, code based on: http://math.nist.gov/MatrixMarket/mmio/c/example_read.c
FILE *f;
size_t nz;
/**
* Create sharder object
*/
int nshards;
sharder<als_edge_type> sharderobj(base_filename);
sharderobj.start_preprocessing();
detect_matrix_size(base_filename, f, M, N, nz);
/* if .info file is not present, try to find matrix market header inside the base_filename file */
if (format == "libsvm")
assert(!has_header_titles);
if (has_header_titles){
char * linebuf = NULL;
size_t linesize;
char linebuf_debug[1024];
/* READ LINE */
int rc = getline(&linebuf, &linesize, f);
if (rc == -1)
logstream(LOG_FATAL)<<"Error header line " << " [ " << linebuf_debug << " ] " << std::endl;
strncpy(linebuf_debug, linebuf, 1024);
/** READ [FROM] */
char *pch = strtok(linebuf,"\t,\r; ");
if (pch == NULL)
logstream(LOG_FATAL)<<"Error header line " << " [ " << linebuf_debug << " ] " << std::endl;
header_titles.push_back(pch);
/** READ USER FEATURES */
while (pch != NULL){
pch = strtok(NULL, "\t,\r; ");
if (pch == NULL)
break;
header_titles.push_back(pch);
//update stats if needed
}
}
if (M == 0 && N == 0)
logstream(LOG_FATAL)<<"Failed to detect matrix size. Please prepare a file named: " << base_filename << ":info with matrix market header, as explained here: http://bickson.blogspot.co.il/2012/12/collaborative-filtering-3rd-generation_14.html " << std::endl;
logstream(LOG_INFO) << "Starting to read matrix-market input. Matrix dimensions: " << M << " x " << N << ", non-zeros: " << nz << std::endl;
uint I, J;
std::vector<uint> valarray; valarray.resize(FEATURE_WIDTH);
std::vector<uint> positions; positions.resize(FEATURE_WIDTH);
float val;
if (limit_rating > 0)
nz = limit_rating;
int skipped_features = 0;
for (size_t i=0; i<nz; i++)
{
int index;
if (!read_line(f, base_filename, i,I, J, val, valarray, positions, index, TRAINING, skipped_features))
logstream(LOG_FATAL)<<"Failed to read line: " <<i<< " in file: " << base_filename << std::endl;
if (index < 1)
logstream(LOG_FATAL)<<"Failed to read line: " <<i<< " in file: " << base_filename << std::endl;
if (nz > 1000000 && (i % 1000000) == 0)
logstream(LOG_INFO)<< mytimer.current_time() << " Finished reading " << i << " lines " << std::endl;
//calc stats
L++;
globalMean += val;
sharderobj.preprocessing_add_edge(I, square?J:M+J, als_edge_type(val, &valarray[0], &positions[0], index));
}
sharderobj.end_preprocessing();
//calc stats
assert(L > 0);
assert(globalMean != 0);
globalMean /= L;
std::cout<<"Coputed global mean is: " << globalMean << std::endl;
fclose(f);
logstream(LOG_INFO) << "Now creating shards." << std::endl;
// Shard with a specified number of shards, or determine automatically if not defined
nshards = sharderobj.execute_sharding(get_option_string("nshards", "auto"));
return nshards;
}
static bool mySort(const std::pair<double, double> &p1,const std::pair<double, double> &p2)
{
return p1.second > p2.second;
}
/**
compute validation rmse
*/
void validation_rmse_N(
float (*prediction_func)(std::vector<vertex_data*>& array, int arraysize, float rating, double & prediction, fvec * psum, double * exp_prediction)
,graphchi_context & gcontext,
feature_control & fc,
bool square = false) {
if (validation == "")
return;
FILE * f = NULL;
size_t nz = 0;
detect_matrix_size(validation, f, Me, Ne, nz);
if (f == NULL){
logstream(LOG_WARNING)<<"Failed to open validation file: " << validation << " - skipping."<<std::endl;
return;
}
if ((M > 0 && N > 0) && (Me != M || Ne != N))
logstream(LOG_WARNING)<<"Input size of validation matrix must be identical to training matrix, namely " << M << "x" << N << std::endl;
Le = nz;
last_validation_rmse = dvalidation_rmse;
dvalidation_rmse = 0;
std::vector<uint> valarray; valarray.resize(FEATURE_WIDTH);
std::vector<uint> positions; positions.resize(FEATURE_WIDTH);
uint I, J;
float val;
int skipped_features = 0;
int skipped_nodes = 0;
int errors = 0;
//FOR ROC. ROC code thanks to Justin Yan.
double _M = 0;
double _N = 0;
std::vector<std::pair<double, double> > realPrediction;
double avg_pred = 0;
for (size_t i=0; i<nz; i++)
{
int index;
if (!read_line(f, validation, i, I, J, val, valarray, positions, index, VALIDATION, skipped_features))
logstream(LOG_FATAL)<<"Failed to read line: " << i << " in file: " << validation << std::endl;
if (I == (uint)-1 || J == (uint)-1){
skipped_nodes++;
continue;
}
double prediction;
int howmany = calc_feature_node_array_size(I,J, index);
std::vector<vertex_data*> node_array; node_array.resize(howmany);
for (int k=0; k< howmany; k++)
node_array[k] = NULL;
fvec sum;
double exp_prediction = 0;
dvalidation_rmse += compute_prediction(I, J, val, prediction, &valarray[0], &positions[0], index, prediction_func, &sum, node_array, howmany, exp_prediction);
avg_pred += prediction;
if (calc_roc)
realPrediction.push_back(std::make_pair(val, exp_prediction));
if (prediction < cutoff && val >= val_cutoff)
errors++;
else if (prediction >= cutoff && val < val_cutoff)
errors++;
}
fclose(f);
std::cout<<"avg validation prediction: " << avg_pred/(double)nz << std::endl;
assert(Le > 0);
dvalidation_rmse = finalize_rmse(dvalidation_rmse , (double)(Le-skipped_nodes));
std::cout<<" Validation " << error_names[loss_type] << " : " << std::setw(10) << dvalidation_rmse;
if (calc_error)
std::cout<<" Validation Err: " << std::setw(10) << ((double)errors/(double)(nz-skipped_nodes));
if (calc_roc){
double roc = 0;
double ret = 0;
std::vector<double> L;
std::sort(realPrediction.begin(), realPrediction.end(),mySort);
std::vector<std::pair<double, double> >::iterator iter;
for(iter=realPrediction.begin();iter!=realPrediction.end();iter++)
{
L.push_back(iter->first);
if(iter->first > cutoff) _M++;
else _N++;
}
std::vector<double>:: iterator iter2;
int i=0;
for(iter2=L.begin();iter2!=L.end();iter2++)
{
if(*iter2 > cutoff) ret += ((_M+_N) - i);
i++;
}
double ret2 = _M *(_M+1)/2;
roc= (ret-ret2)/(_M*_N);
std::cout<<" Validation ROC: " << roc << std::endl;
}
else std::cout<<std::endl;
if (halt_on_rmse_increase && dvalidation_rmse > last_validation_rmse && gcontext.iteration > 0){
logstream(LOG_WARNING)<<"Stopping engine because of validation RMSE increase" << std::endl;
gcontext.set_last_iteration(gcontext.iteration);
}
if (skipped_features > 0)
std::cout<<"Skipped " << skipped_features << " when reading from file. " << std::endl;
if (skipped_nodes > 0)
std::cout<<"Skipped " << skipped_nodes << " when reading from file. " << std::endl;
}
/* compute predictions for test data */
void test_predictions_N(
float (*prediction_func)(std::vector<vertex_data*>& node_array, int node_array_size, float rating, double & prediction, fvec * sum, double * exp_prediction),
feature_control & fc,
bool square = false) {
FILE *f = NULL;
uint Me, Ne;
size_t nz;
if (test == ""){
logstream(LOG_INFO)<<"No test file was found, skipping test predictions " << std::endl;
return;
}
detect_matrix_size(test, f, Me, Ne, nz);
if (f == NULL){
logstream(LOG_WARNING)<<"Failed to open test file " << test<< " skipping test predictions " << std::endl;
return;
}
if ((M > 0 && N > 0 ) && (Me != M || Ne != N))
logstream(LOG_FATAL)<<"Input size of test matrix must be identical to training matrix, namely " << M << "x" << N << std::endl;
FILE * fout = open_file((test + ".predict").c_str(),"w", false);
MM_typecode matcode;
mm_set_array(&matcode);
mm_write_banner(fout, matcode);
mm_write_mtx_array_size(fout ,nz, 1);
std::vector<uint> valarray; valarray.resize(FEATURE_WIDTH);
std::vector<uint> positions; positions.resize(FEATURE_WIDTH);
float val;
double prediction;
uint I,J;
int skipped_features = 0;
int skipped_nodes = 0;
for (uint i=0; i<nz; i++)
{
int index;
if (!read_line(f, test, i, I, J, val, valarray, positions, index, TEST, skipped_features))
logstream(LOG_FATAL)<<"Failed to read line: " <<i << " in file: " << test << std::endl;
if (I == (uint)-1 || J == (uint)-1){
skipped_nodes++;
fprintf(fout, "%d\n", 0); //features for this node are not found in the training set, write a default value
continue;
}
int howmany = calc_feature_node_array_size(I,J,index);
std::vector<vertex_data*> node_array; node_array.resize(howmany);
for (int k=0; k< howmany; k++)
node_array[k] = NULL;
fvec sum;
double exp_prediction = 0;
compute_prediction(I, J, val, prediction, &valarray[0], &positions[0], index, prediction_func, &sum, node_array, howmany, exp_prediction);
fprintf(fout, "%12.8lg\n", prediction);
}
fclose(f);
fclose(fout);
logstream(LOG_INFO)<<"Finished writing " << nz << " predictions to file: " << test << ".predict" << std::endl;
if (skipped_features > 0)
logstream(LOG_DEBUG)<<"Skipped " << skipped_features << " when reading from file. " << std::endl;
if (skipped_nodes > 0)
logstream(LOG_WARNING)<<"Skipped node in test dataset: " << skipped_nodes << std::endl;
}
float gensgd_predict(std::vector<vertex_data*> & node_array, int node_array_size,
const float rating, double& prediction, fvec* sum, double * extra){
fvec sum_sqr = fzeros(D);
*sum = fzeros(D);
prediction = globalMean/maxval;
assert(!std::isnan(prediction));
for (int i=0; i< node_array_size; i++)
prediction += node_array[i]->bias;
assert(!std::isnan(prediction));
for (int j=0; j< D; j++){
for (int i=0; i< node_array_size; i++){
sum->operator[](j) += node_array[i]->pvec[j];
assert(sum->operator[](j) < 1e5);
sum_sqr[j] += pow(node_array[i]->pvec[j],2);
}
prediction += 0.5 * (pow(sum->operator[](j),2) - sum_sqr[j]);
assert(!std::isnan(prediction));
}
//truncate prediction to allowed values
double exp_prediction = 1.0 / (1.0 + exp(-prediction));
prediction = minval + exp_prediction *(maxval-minval);
//prediction = std::min((double)prediction, maxval);
//prediction = std::max((double)prediction, minval);
//return the squared error
float err = rating - prediction;
if (extra != NULL)
*(double*)extra = exp_prediction;
return calc_loss(exp_prediction, err);
}
void init_gensgd(bool load_factors_from_file){
srand(time(NULL));
int nodes = M+N+num_feature_bins();
latent_factors_inmem.resize(nodes);
int howmany = calc_feature_num();
logstream(LOG_DEBUG)<<"Going to calculate: " << howmany << " offsets." << std::endl;
fc.offsets.resize(howmany);
get_offsets(fc.offsets);
assert(D > 0);
if (!load_factors_from_file){
double factor = 0.1/sqrt(D);
#pragma omp parallel for
for (int i=0; i< nodes; i++){
latent_factors_inmem[i].pvec = (debug ? 0.1*fones(D) : (::frandu(D)*factor));
}
}
}
void training_rmse_N(int iteration, graphchi_context &gcontext, bool items = false){
last_training_rmse = dtraining_rmse;
dtraining_rmse = 0;
size_t total_errors = 0;
int start = 0;
int end = M;
if (items){
start = M;
end = M+N;
}
dtraining_rmse = sum(rmse_vec);
if (calc_error){
total_errors = sum(errors_vec);
}
dtraining_rmse = finalize_rmse(dtraining_rmse, (double)pengine->num_edges());
if (calc_error)
std::cout<< std::setw(10) << mytimer.current_time() << ") Iteration: " << std::setw(3) <<iteration<<" Training " << error_names[loss_type] << " : " <<std::setw(10)<< dtraining_rmse << " Train err: " << std::setw(10) << (total_errors/(double)L);
else
std::cout<< std::setw(10) << mytimer.current_time() << ") Iteration: " << std::setw(3) <<iteration<<" Training " << error_names[loss_type] << " : " << std::setw(10)<< dtraining_rmse;
}
/**
* GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type>
* class. The main logic is usually in the update function.
*/
struct Sparse_GensgdVerticesInMemProgram : public GraphChiProgram<VertexDataType, EdgeDataType> {
/*
* Vertex update function - computes the least square step
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) {
//go over all user nodes
if (is_user(vertex.id())){
//go over all observed ratings
for(int e=0; e < vertex.num_outedges(); e++) {
const edge_data & data = vertex.edge(e)->get_data();
int howmany = calc_feature_node_array_size(vertex.id(), vertex.edge(e)->vertex_id()-M, data.size);
std::vector<vertex_data*> node_array; node_array.resize(howmany);
for (int i=0; i< howmany; i++)
node_array[i] = NULL;
float rui = data.weight;
double pui;
fvec sum;
//compute current prediction
double exp_prediction = 0;
rmse_vec[omp_get_thread_num()] += compute_prediction(vertex.id(), vertex.edge(e)->vertex_id()-M, rui ,pui, (uint*)data.features, (uint*)data.index, data.size, gensgd_predict, &sum, node_array, howmany, exp_prediction);
if (calc_error){
if ((pui < cutoff && rui > cutoff) || (pui > cutoff && rui < cutoff))
errors_vec[omp_get_thread_num()]++;
}
float eui = pui - rui;
eui = calc_error_f(exp_prediction, eui);
//update global mean bias
globalMean -= gensgd_rate1 * (eui + gensgd_reg0 * globalMean);
//update node biases and vectors
for (int i=0; i < howmany; i++){
double gensgd_rate;
if (i == 0) //user
gensgd_rate = gensgd_rate1;
else if (i == 1) //item
gensgd_rate = gensgd_rate2;
else if (i < (int)(data.size+2)) //rating features
gensgd_rate = gensgd_rate3;
else if (i < (int)(2+data.size+fc.node_features)) //user and item features
gensgd_rate = gensgd_rate4;
else
gensgd_rate = gensgd_rate5; //last item
node_array[i]->bias -= gensgd_rate * (eui + gensgd_regw* node_array[i]->bias);
assert(!std::isnan(node_array[i]->bias));
assert(node_array[i]->bias < 1e3);
fvec grad = sum - node_array[i]->pvec;
node_array[i]->pvec -= gensgd_rate * (eui*grad + gensgd_regv * node_array[i]->pvec);
assert(!std::isnan(node_array[i]->pvec[0]));
assert(node_array[i]->pvec[0] < 1e3);
}
}
}
};
/**
* Called after an iteration has finished.
*/
void after_iteration(int iteration, graphchi_context &gcontext) {
if (iteration == 1 && vertex_with_no_edges > 0)
logstream(LOG_WARNING)<<"There are " << vertex_with_no_edges << " users without ratings" << std::endl;
gensgd_rate1 *= gensgd_mult_dec;
gensgd_rate2 *= gensgd_mult_dec;
gensgd_rate3 *= gensgd_mult_dec;
gensgd_rate4 *= gensgd_mult_dec;
gensgd_rate5 *= gensgd_mult_dec;
training_rmse_N(iteration, gcontext);
validation_rmse_N(&gensgd_predict, gcontext, fc);
};
/**
* Called before an iteration is started.
*/
void before_iteration(int iteration, graphchi_context &gcontext) {
rmse_vec = zeros(number_of_omp_threads());
if (calc_error)
errors_vec = zeros(number_of_omp_threads());
}
};
void output_gensgd_result(std::string filename) {
MMOutputter_mat<vertex_data> mmoutput(filename + "_U.mm", 0, M+N+num_feature_bins(), "This file contains Sparse_Gensgd output matrices. In each row D factors of a single user node, then item nodes, then features", latent_factors_inmem);
MMOutputter_vec<vertex_data> mmoutput_bias(filename + "_U_bias.mm", 0, num_feature_bins(), BIAS_POS,"This file contains Sparse_Gensgd output bias vector. In each row a single user bias.", latent_factors_inmem);
MMOutputter_scalar gmean(filename + "_global_mean.mm", "This file contains Sparse_Gensgd global mean which is required for computing predictions.", globalMean);
logstream(LOG_INFO) << " GENSGD output files (in matrix market format): " << filename << "_U.mm" << ", "<< filename << "_global_mean.mm, " << filename << "_U_bias.mm " <<std::endl;
}
int main(int argc, const char ** argv) {
print_copyright();
/* GraphChi initialization will read the command line
arguments and the configuration file. */
graphchi_init(argc, argv);
/* Metrics object for keeping track of performance counters
and other information. Currently required. */
metrics m("als-tensor-inmemory-factors");
//specific command line parameters for gensgd
gensgd_rate1 = get_option_float("gensgd_rate1", gensgd_rate1);
gensgd_rate2 = get_option_float("gensgd_rate2", gensgd_rate2);
gensgd_rate3 = get_option_float("gensgd_rate3", gensgd_rate3);
gensgd_rate4 = get_option_float("gensgd_rate4", gensgd_rate4);
gensgd_rate5 = get_option_float("gensgd_rate5", gensgd_rate5);
gensgd_regw = get_option_float("gensgd_regw", gensgd_regw);
gensgd_regv = get_option_float("gensgd_regv", gensgd_regv);
gensgd_reg0 = get_option_float("gensgd_reg0", gensgd_reg0);
gensgd_mult_dec = get_option_float("gensgd_mult_dec", gensgd_mult_dec);
fc.hash_strings = get_option_int("rehash", fc.hash_strings);
user_file = get_option_string("user_file", user_file);
user_links = get_option_string("user_links", user_links);
item_file = get_option_string("item_file", item_file);
D = get_option_int("D", D);
fc.from_pos = get_option_int("from_pos", fc.from_pos);
fc.to_pos = get_option_int("to_pos", fc.to_pos);
fc.val_pos = get_option_int("val_pos", fc.val_pos);
limit_rating = get_option_int("limit_rating", limit_rating);
calc_error = get_option_int("calc_error", calc_error);
calc_roc = get_option_int("calc_roc", calc_roc);
round_float = get_option_int("round_float", round_float);
has_header_titles = get_option_int("has_header_titles", has_header_titles);
fc.rehash_value = get_option_int("rehash_value", fc.rehash_value);
cutoff = get_option_float("cutoff", cutoff);
val_cutoff = get_option_float("val_cutoff", val_cutoff);
binary = get_option_int("binary", binary);
parse_command_line_args();
parse_implicit_command_line();
fc.node_id_maps.resize(2); //initial place for from/to map
//fc.stats_array.resize(fc.total_features);
if (format == "libsvm"){
fc.val_pos = 0;
fc.to_pos = 2;
fc.from_pos = 1;
binary = false;
fc.hash_strings = true;
}
int nshards = convert_matrixmarket_N<edge_data>(training, false, fc, limit_rating);
fc.total_features = fc.index_map.string2nodeid.size();
if (load_factors_from_file){
load_matrix_market_matrix(training + "_U.mm", 0, D);
vec user_bias = load_matrix_market_vector(training +"_U_bias.mm", false, true);
assert(user_bias.size() == num_feature_bins());
for (uint i=0; num_feature_bins(); i++){
latent_factors_inmem[i].bias = user_bias[i];
}
vec gm = load_matrix_market_vector(training + "_global_mean.mm", false, true);
globalMean = gm[0];
}
init_gensgd(load_factors_from_file);
if (has_header_titles && header_titles.size() == 0)
logstream(LOG_FATAL)<<"Please delete temp files (using : \"rm -f " << training << ".*\") and run again" << std::endl;
logstream(LOG_INFO)<<"Target variable " << std::setw(3) << fc.val_pos << " : " << (has_header_titles? header_titles[fc.val_pos] : "") <<std::endl;
logstream(LOG_INFO)<<"From " << std::setw(3) << fc.from_pos<< " : " << (has_header_titles? header_titles[fc.from_pos] : "") <<std::endl;
logstream(LOG_INFO)<<"To " << std::setw(3) << fc.to_pos << " : " << (has_header_titles? header_titles[fc.to_pos] : "") <<std::endl;
/* Run */
Sparse_GensgdVerticesInMemProgram program;
graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m);
set_engine_flags(engine);
pengine = &engine;
engine.run(program, niters);
/* Output test predictions in matrix-market format */
output_gensgd_result(training);
test_predictions_N(&gensgd_predict, fc);
/* Report execution metrics */
metrics_report(m);
return 0;
}
| C++ |
/**
* @file
* @author Mark Levy
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* CLiMF Collaborative Less-is-More Filtering, a variant of latent factor CF
* which optimises a lower bound of the smoothed reciprocal rank of "relevant"
* items in ranked recommendation lists. The intention is to promote diversity
* as well as accuracy in the recommendations. The method assumes binary
* relevance data, as for example in friendship or follow relationships.
*
* CLiMF: Learning to Maximize Reciprocal Rank with Collaborative Less-is-More Filtering
* Yue Shi, Martha Larson, Alexandros Karatzoglou, Nuria Oliver, Linas Baltrunas, Alan Hanjalic
* ACM RecSys 2012
*
*/
#include <string>
#include <algorithm>
#include "util.hpp"
#include "eigen_wrapper.hpp"
#include "common.hpp"
#include "climf.hpp"
#include "io.hpp"
#include "rmse.hpp" // just for test_predictions()
#include "mrr_engine.hpp"
/**
* GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type>
* class. The main logic is usually in the update function.
*/
struct SGDVerticesInMemProgram : public GraphChiProgram<VertexDataType, EdgeDataType> {
/**
* Called before an iteration is started.
*/
void before_iteration(int iteration, graphchi_context &gcontext)
{
logstream(LOG_DEBUG) << "before_iteration: resetting MRR" << std::endl;
reset_mrr(gcontext.execthreads);
last_training_objective = training_objective;
objective_vec = zeros(gcontext.execthreads);
}
/**
* Called after an iteration has finished.
*/
void after_iteration(int iteration, graphchi_context &gcontext)
{
training_objective = sum(objective_vec);
std::cout<<" Training objective:" << std::setw(10) << training_objective << std::endl;
if (halt_on_mrr_decrease > 0 && halt_on_mrr_decrease < cur_iteration && training_objective < last_training_objective)
{
logstream(LOG_WARNING) << "Stopping engine because of validation objective decrease" << std::endl;
gcontext.set_last_iteration(gcontext.iteration);
}
logstream(LOG_DEBUG) << "after_iteration: running validation engine" << std::endl;
run_validation(pvalidation_engine, gcontext);
sgd_gamma *= sgd_step_dec;
}
/**
* Vertex update function.
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext)
{
double objective = -0.5*sgd_lambda*latent_factors_inmem[vertex.id()].pvec.squaredNorm();
// go over all user nodes
if (vertex.num_outedges() > 1) // can't compute with CLiMF if we have only 1 out edge!
{
vec & U = latent_factors_inmem[vertex.id()].pvec;
int Ni = vertex.num_edges();
// precompute f_{ij} = <U_i,V_j> for j = 1..N_i
std::vector<double> f(Ni);
int num_relevant = 0;
for (int j = 0; j < Ni; ++j)
{
if (is_relevant(vertex.edge(j)))
{
const vec & Vj = latent_factors_inmem[vertex.edge(j)->vertex_id()].pvec;
f[j] = dot(U, Vj);
++num_relevant;
}
}
if (num_relevant < 2)
{
return; // need at least 2 edges to compute updates with CLiMF!
}
// compute gradients
vec dU = -sgd_lambda*U;
for (int j = 0; j < Ni; ++j)
{
if (is_relevant(vertex.edge(j)))
{
vec & Vj = latent_factors_inmem[vertex.edge(j)->vertex_id()].pvec;
vec dVj = g(-f[j])*ones(D) - sgd_lambda*Vj;
for (int k = 0; k < Ni; ++k)
{
if (k != j && is_relevant(vertex.edge(k)))
{
dVj += dg(f[j]-f[k])*(1.0/(1.0-g(f[k]-f[j]))-1.0/(1.0-g(f[j]-f[k])))*U;
}
}
Vj += sgd_gamma*dVj; // not thread-safe
dU += g(-f[j])*Vj;
for (int k = 0; k < Ni; ++k)
{
if (k != j && is_relevant(vertex.edge(k)))
{
const vec & Vk = latent_factors_inmem[vertex.edge(k)->vertex_id()].pvec;
dU += (Vj-Vk)*dg(f[k]-f[j])/(1.0-g(f[k]-f[j]));
}
}
}
}
U += sgd_gamma*dU; // not thread-safe
// compute smoothed MRR
for(int j = 0; j < Ni; j++)
{
if (is_relevant(vertex.edge(j)))
{
objective += std::log(g(f[j]));
for(int k = 0; k < Ni; k++)
{
if (is_relevant(vertex.edge(k)))
{
objective += std::log(1.0-g(f[k]-f[j]));
}
}
}
}
}
assert(objective_vec.size() > omp_get_thread_num());
objective_vec[omp_get_thread_num()] += objective;
}
};
//dump output to file
void output_sgd_result(std::string filename) {
MMOutputter_mat<vertex_data> user_mat(filename + "_U.mm", 0, M, "This file contains SGD output matrix U. In each row D factors of a single user node.", latent_factors_inmem);
MMOutputter_mat<vertex_data> item_mat(filename + "_V.mm", M, M+N, "This file contains SGD output matrix V. In each row D factors of a single item node.", latent_factors_inmem);
logstream(LOG_INFO) << "CLiMF output files (in matrix market format): " << filename << "_U.mm" <<
", " << filename + "_V.mm " << std::endl;
}
// compute test prediction
float climf_predict(const vertex_data& user,
const vertex_data& movie,
const float rating,
double & prediction,
void * extra = NULL)
{
prediction = g(dot(user.pvec,movie.pvec)); // this is actually a predicted reciprocal rank, not a rating
return 0; // as we have to return something
}
int main(int argc, const char ** argv) {
//* GraphChi initialization will read the command line arguments and the configuration file. */
graphchi_init(argc, argv);
/* Metrics object for keeping track of performance counters
and other information. Currently required. */
metrics m("climf-inmemory-factors");
/* Basic arguments for application. NOTE: File will be automatically 'sharded'. */
sgd_lambda = get_option_float("sgd_lambda", 1e-3);
sgd_gamma = get_option_float("sgd_gamma", 1e-4);
sgd_step_dec = get_option_float("sgd_step_dec", 1.0);
binary_relevance_thresh = get_option_float("binary_relevance_thresh", 0);
halt_on_mrr_decrease = get_option_int("halt_on_mrr_decrease", 0);
num_ratings = get_option_int("num_ratings", 10000); //number of top predictions over which we compute actual MRR
parse_command_line_args();
parse_implicit_command_line();
/* Preprocess data if needed, or discover preprocess files */
bool allow_square = false;
int nshards = convert_matrixmarket<EdgeDataType>(training, NULL, 0, 0, 3, TRAINING, allow_square);
init_feature_vectors<std::vector<vertex_data> >(M+N, latent_factors_inmem, !load_factors_from_file, 0.01);
if (validation != ""){
int vshards = convert_matrixmarket<EdgeDataType>(validation, NULL, 0, 0, 3, VALIDATION);
init_mrr_engine<VertexDataType, EdgeDataType>(pvalidation_engine, vshards);
}
if (load_factors_from_file)
{
load_matrix_market_matrix(training + "_U.mm", 0, D);
load_matrix_market_matrix(training + "_V.mm", M, D);
}
print_config();
/* Run */
SGDVerticesInMemProgram program;
graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m);
set_engine_flags(engine);
pengine = &engine;
engine.run(program, niters);
/* Output latent factor matrices in matrix-market format */
output_sgd_result(training);
test_predictions(&climf_predict);
/* Report execution metrics */
if (!quiet)
metrics_report(m);
return 0;
}
| C++ |
/**
* Copyright (c) 2009 Carnegie Mellon University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an "AS
* IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*
* Written by Danny Bickson, CMU
* Utility for computing ranking metrics - comparing between recommendations and actual
* ratings in test set.
* */
#include <cstdio>
#include <map>
#include <iostream>
#include <omp.h>
#include <assert.h>
#include "graphchi_basic_includes.hpp"
#include "timer.hpp"
#include "util.hpp"
#include "eigen_wrapper.hpp"
#include "metrics.hpp"
using namespace std;
using namespace graphchi;
timer mytime;
int K = 10;
int max_per_row = 1000;
std::string training, test;
double avg_train_size = 0;
double avg_test_size = 0;
//non word tokens that will be removed in the parsing
//it is possible to add additional special characters or remove ones you want to keep
const char spaces[] = {" \r\n\t;:"};
void get_one_line(FILE * pfile, int & index, vec & values, int & pos){
char * saveptr = NULL, * linebuf = NULL;
size_t linesize = 0;
int rc = getline(&linebuf, &linesize, pfile);
if (rc < 1){
index = -1;
return;
}
pos = 0;
bool first_time = true;
while(true){
//find from
char *pch = strtok_r(first_time ? linebuf : NULL, spaces, &saveptr);
if (!pch){
return;
}
float val = atof(pch);
if (first_time){
index = (int)val;
first_time = false;
}
else {
assert(pos < values.size());
values[pos] = val;
pos++;
}
}
}
void eval_metrics(){
in_file trf(training);
in_file testt(test);
vec train_vec = zeros(max_per_row);
vec test_vec = zeros(max_per_row);
size_t line = 0;
int train_index = 0, test_index = 0;
double ap = 0;
int train_size =0, test_size = 0;
while(true){
get_one_line(trf.outf, train_index, train_vec, train_size);
get_one_line(testt.outf, test_index, test_vec, test_size);
if (train_index == -1 || test_index == -1)
break;
while (test_index < train_index && test_index != -1){
logstream(LOG_WARNING)<<"Skipping over test index: " << test_index << " train: " << train_index << std::endl;
get_one_line(testt.outf, test_index, test_vec, test_size);
}
while (train_index < test_index && train_index != -1){
logstream(LOG_WARNING)<<"Skipping over train. test index: " << test_index << " train: " << train_index << std::endl;
get_one_line(trf.outf, train_index, train_vec, train_size);
}
if (train_index == test_index){
avg_train_size += train_size;
avg_test_size += test_size;
ap+= average_precision_at_k(train_vec, train_size, test_vec, test_size, K);
line++;
}
else {
logstream(LOG_WARNING)<<"Problem parsing file, got to train index: " << train_index << " test_index: " << test_index << std::endl;
break;
}
if (line % 100000 == 0)
logstream(LOG_INFO)<<mytime.current_time() <<" Finished evaluating " << line << " instances. " << std::endl;
}
logstream(LOG_INFO)<<"Computed AP@" << K << " metric: " << ap/(double)line << std::endl;
logstream(LOG_INFO)<<"Total compared: " << line << std::endl;
logstream(LOG_INFO)<<"Avg test length: " << avg_test_size / line << std::endl;
logstream(LOG_INFO)<<"Avg train length: " << avg_train_size / line << std::endl;
}
int main(int argc, const char *argv[]) {
logstream(LOG_WARNING)<<"GraphChi parsers library is written by Danny Bickson (c). Send any "
" comments or bug reports to danny.bickson@gmail.com " << std::endl;
global_logger().set_log_level(LOG_INFO);
global_logger().set_log_to_console(true);
graphchi_init(argc, argv);
K = get_option_int("K", K);
if (K < 1)
logstream(LOG_FATAL)<<"Number of top elements (--K=) should be >= 1"<<std::endl;
omp_set_num_threads(get_option_int("ncpus", 1));
mytime.start();
training = get_option_string("training");
test = get_option_string("test");
eval_metrics();
std::cout << "Finished in " << mytime.current_time() << std::endl;
return 0;
}
| C++ |
#ifndef PRINTOUTS
#define PRINTOUTS
#define MAX_PRINTOUT_LEN 25
bool absolute_value = true;
inline double fabs2(double val){
if (absolute_value)
return fabs(val);
else return val;
}
void print_vec(const char * name, const DistVec & vec, bool high = false){
if (!debug)
return;
int i;
printf("%s[%d]\n", name, vec.offset);
for (i=vec.start; i< std::min(vec.end, MAX_PRINTOUT_LEN); i++){
if (high)
printf("%15.15lg ", fabs2(latent_factors_inmem[i].pvec[vec.offset]));
else
printf("%.5lg ", fabs2(latent_factors_inmem[i].pvec[vec.offset]));
}
printf("\n");
}
void print_vec(const char * name, const vec & pvec, bool high = false){
if (!debug)
return;
printf("%s\n", name);
for (int i= 0; i< std::min((int)pvec.size(), MAX_PRINTOUT_LEN); i++){
if (high)
printf("%15.15lg ", fabs2(pvec[i]));
else
printf("%.5lg ", fabs2(pvec[i]));
}
printf("\n");
}
void print_mat(const char * name, const mat & pmat, bool high = false){
if (!debug)
return;
printf("%s\n", name);
mat pmat2 = transpose((mat&)pmat);
if (pmat2.cols() == 1)
pmat2 = pmat2.transpose();
for (int i= 0; i< std::min((int)pmat2.rows(), MAX_PRINTOUT_LEN); i++){
for (int j=0; j< std::min((int)pmat2.cols(), MAX_PRINTOUT_LEN); j++){
if (high)
printf("%15.15lg ", fabs2(get_val(pmat2, i, j)));
else
printf("%.5lg ", fabs2(get_val(pmat2, i, j)));
}
printf("\n");
}
}
void print_vec_pos(std::string name, vec & v, int i){
if (!debug)
return;
if (i == -1)
printf("%s\n", name.c_str());
else {
printf("%s[%d]: %.5lg\n", name.c_str(), i, fabs(v[i]));
return;
}
for (int j=0; j< std::min((int)v.size(),MAX_PRINTOUT_LEN); j++){
printf("%.5lg", fabs2(v(j)));
if (v.size() > 1)
printf(" ");
}
printf("\n");
}
#define PRINT_VEC(a) print_vec(#a,a,0)
#define PRINT_VEC2(a,b) print_vec(a,b,0)
#define PRINT_VEC3(a,b,c) print_vec_pos(a,b,c)
#define PRINT_VEC2_HIGH(a,i) print_vec(#a,a[i],1)
#define PRINT_INT(a) if (debug) printf("%s: %d\n", #a, a);
#define PRINT_NAMED_INT(a,b) if (debug) printf("%s: %d\n",a, b);
#define PRINT_DBL(a) if (debug) printf("%s: %.5lg\n", #a, a);
#define PRINT_NAMED_DBL(a,b) if (debug) printf("%s: %.5lg\n", a, b);
#define PRINT_MAT(a) print_mat(#a, a, 0);
#define PRINT_MAT2(a,b) print_mat(a,b,0);
#endif
| C++ |
/**
* @file
* @author Danny Bickson
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Matrix factorization with the Koren's SVD++ algorithm.
* Algorithm described in the paper:
*
*/
#include "common.hpp"
#include "eigen_wrapper.hpp"
struct svdpp_params{
float itmBiasStep;
float itmBiasReg;
float usrBiasStep;
float usrBiasReg;
float usrFctrStep;
float usrFctrReg;
float itmFctrStep;
float itmFctrReg; //gamma7
float itmFctr2Step;
float itmFctr2Reg;
float step_dec;
svdpp_params(){
itmBiasStep = 1e-4f;
itmBiasReg = 1e-4f;
usrBiasStep = 1e-4f;
usrBiasReg = 2e-4f;
usrFctrStep = 1e-4f;
usrFctrReg = 2e-4f;
itmFctrStep = 1e-4f;
itmFctrReg = 1e-4f; //gamma7
itmFctr2Step = 1e-4f;
itmFctr2Reg = 1e-4f;
step_dec = 0.9;
}
};
svdpp_params svdpp;
#define BIAS_POS -1
struct vertex_data {
vec pvec;
vec weight;
double bias;
vertex_data() {
pvec = zeros(D);
weight = zeros(D);
bias = 0;
}
void set_val(int index, float val){
if (index == BIAS_POS)
bias = val;
else if (index < D)
pvec[index] = val;
else weight[index-D] = val;
}
float get_val(int index){
if (index== BIAS_POS)
return bias;
else if (index < D)
return pvec[index];
else return weight[index-D];
}
};
/**
* Type definitions. Remember to create suitable graph shards using the
* Sharder-program.
*/
typedef vertex_data VertexDataType;
typedef float EdgeDataType; // Edges store the "rating" of user->movie pair
graphchi_engine<VertexDataType, EdgeDataType> * pengine = NULL;
graphchi_engine<VertexDataType, EdgeDataType> * pvalidation_engine = NULL;
std::vector<vertex_data> latent_factors_inmem;
#include "io.hpp"
#include "rmse.hpp"
#include "rmse_engine.hpp"
/** compute a missing value based on SVD++ algorithm */
float svdpp_predict(const vertex_data& user, const vertex_data& movie, const float rating, double & prediction, void * extra = NULL){
//\hat(r_ui) = \mu +
prediction = globalMean;
// + b_u + b_i +
prediction += user.bias + movie.bias;
// + q_i^T *(p_u +sqrt(|N(u)|)\sum y_j)
//prediction += dot_prod(movie.pvec,(user.pvec+user.weight));
for (int j=0; j< D; j++)
prediction += movie.pvec[j] * (user.pvec[j] + user.weight[j]);
prediction = std::min((double)prediction, maxval);
prediction = std::max((double)prediction, minval);
float err = rating - prediction;
if (std::isnan(err))
logstream(LOG_FATAL)<<"Got into numerical errors. Try to decrease step size using the command line: svdpp_user_bias_step, svdpp_item_bias_step, svdpp_user_factor2_step, svdpp_user_factor_step, svdpp_item_step" << std::endl;
return err*err;
}
/**
* GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type>
* class. The main logic is usually in the update function.
*/
struct SVDPPVerticesInMemProgram : public GraphChiProgram<VertexDataType, EdgeDataType> {
/**
* Called before an iteration is started.
*/
void before_iteration(int iteration, graphchi_context &gcontext) {
reset_rmse(gcontext.execthreads);
}
/**
* Called after an iteration has finished.
*/
void after_iteration(int iteration, graphchi_context &gcontext) {
svdpp.itmFctrStep *= svdpp.step_dec;
svdpp.itmFctr2Step *= svdpp.step_dec;
svdpp.usrFctrStep *= svdpp.step_dec;
svdpp.itmBiasStep *= svdpp.step_dec;
svdpp.usrBiasStep *= svdpp.step_dec;
training_rmse(iteration, gcontext);
validation_rmse(&svdpp_predict, gcontext);
}
/**
* Vertex update function.
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) {
if ( vertex.num_outedges() > 0){
vertex_data & user = latent_factors_inmem[vertex.id()];
memset(&user.weight[0], 0, sizeof(double)*D);
for(int e=0; e < vertex.num_outedges(); e++) {
vertex_data & movie = latent_factors_inmem[vertex.edge(e)->vertex_id()];
user.weight += movie.weight;
}
// sqrt(|N(u)|)
float usrNorm = double(1.0/sqrt(vertex.num_outedges()));
//sqrt(|N(u)| * sum_j y_j
user.weight *= usrNorm;
vec step = zeros(D);
// main algorithm, see Koren's paper, just below below equation (16)
for(int e=0; e < vertex.num_outedges(); e++) {
vertex_data & movie = latent_factors_inmem[vertex.edge(e)->vertex_id()];
float observation = vertex.edge(e)->get_data();
double estScore;
rmse_vec[omp_get_thread_num()] += svdpp_predict(user, movie,observation, estScore);
// e_ui = r_ui - \hat{r_ui}
float err = observation - estScore;
assert(!std::isnan(rmse_vec[omp_get_thread_num()]));
vec itmFctr = movie.pvec;
vec usrFctr = user.pvec;
//q_i = q_i + gamma2 *(e_ui*(p_u + sqrt(N(U))\sum_j y_j) - gamma7 *q_i)
for (int j=0; j< D; j++)
movie.pvec[j] += svdpp.itmFctrStep*(err*(usrFctr[j] + user.weight[j]) - svdpp.itmFctrReg*itmFctr[j]);
//p_u = p_u + gamma2 *(e_ui*q_i -gamma7 *p_u)
for (int j=0; j< D; j++)
user.pvec[j] += svdpp.usrFctrStep*(err *itmFctr[j] - svdpp.usrFctrReg*usrFctr[j]);
step += err*itmFctr;
//b_i = b_i + gamma1*(e_ui - gmma6 * b_i)
movie.bias += svdpp.itmBiasStep*(err-svdpp.itmBiasReg* movie.bias);
//b_u = b_u + gamma1*(e_ui - gamma6 * b_u)
user.bias += svdpp.usrBiasStep*(err-svdpp.usrBiasReg* user.bias);
}
step *= float(svdpp.itmFctr2Step*usrNorm);
//gamma7
double mult = svdpp.itmFctr2Step*svdpp.itmFctr2Reg;
for(int e=0; e < vertex.num_edges(); e++) {
vertex_data& movie = latent_factors_inmem[vertex.edge(e)->vertex_id()];
//y_j = y_j + gamma2*sqrt|N(u)| * q_i - gamma7 * y_j
movie.weight += step - mult * movie.weight;
}
}
}
};
void output_svdpp_result(std::string filename) {
MMOutputter_mat<vertex_data> user_output(filename + "_U.mm", 0, M, "This file contains SVD++ output matrix U. In each row D factors of a single user node. Then additional D weight factors.", latent_factors_inmem, 2*D);
MMOutputter_mat<vertex_data> item_output(filename + "_V.mm", M ,M+N, "This file contains SVD++ output matrix V. In each row D factors of a single item node.", latent_factors_inmem);
MMOutputter_vec<vertex_data> bias_user_vec(filename + "_U_bias.mm", 0, M, BIAS_POS, "This file contains SVD++ output bias vector. In each row a single user bias.", latent_factors_inmem);
MMOutputter_vec<vertex_data> bias_mov_vec(filename + "_V_bias.mm", M, M+N, BIAS_POS, "This file contains SVD++ output bias vector. In each row a single item bias.", latent_factors_inmem);
MMOutputter_scalar gmean(filename + "_global_mean.mm", "This file contains SVD++ global mean which is required for computing predictions.", globalMean);
logstream(LOG_INFO) << "SVDPP output files (in matrix market format): " << filename << "_U.mm" <<
", " << filename + "_V.mm, " << filename << "_U_bias.mm, " << filename << "_V_bias.mm, " << filename << "_global_mean.mm" << std::endl;
}
void svdpp_init(){
srand48(time(NULL));
latent_factors_inmem.resize(M+N);
#pragma omp parallel for
for(int i = 0; i < (int)(M+N); ++i){
vertex_data & data = latent_factors_inmem[i];
data.pvec = zeros(D);
if (i < (int)M) //user node
data.weight = zeros(D);
for (int j=0; j<D; j++)
latent_factors_inmem[i].pvec[j] = drand48();
}
logstream(LOG_INFO) << "SVD++ initialization ok" << std::endl;
}
int main(int argc, const char ** argv) {
print_copyright();
//* GraphChi initialization will read the command line arguments and the configuration file. */
graphchi_init(argc, argv);
/* Metrics object for keeping track of performance counters
and other information. Currently required. */
metrics m("svdpp-inmemory-factors");
svdpp.step_dec = get_option_float("svdpp_step_dec", 0.9);
svdpp.itmBiasStep = get_option_float("svdpp_item_bias_step", 1e-3);
svdpp.itmBiasReg = get_option_float("svdpp_item_bias_reg", 1e-3);
svdpp.usrBiasStep = get_option_float("svdpp_user_bias_step", 1e-3);
svdpp.usrBiasReg = get_option_float("svdpp_user_bias_reg", 1e-3);
svdpp.usrFctrStep = get_option_float("svdpp_user_factor_step", 1e-3);
svdpp.usrFctrReg = get_option_float("svdpp_user_factor_reg", 1e-3);
svdpp.itmFctrReg = get_option_float("svdpp_item_factor_reg", 1e-3);
svdpp.itmFctrStep = get_option_float("svdpp_item_factor_step", 1e-3);
svdpp.itmFctr2Reg = get_option_float("svdpp_item_factor2_reg", 1e-3);
svdpp.itmFctr2Step = get_option_float("svdpp_item_factor2_step", 1e-3);
parse_command_line_args();
parse_implicit_command_line();
/* Preprocess data if needed, or discover preprocess files */
int nshards = convert_matrixmarket<EdgeDataType>(training, NULL, 0, 0, 3, TRAINING, false);
if (validation != ""){
int vshards = convert_matrixmarket<EdgeDataType>(validation, NULL, 0, 0, 3, VALIDATION, false);
init_validation_rmse_engine<VertexDataType, EdgeDataType>(pvalidation_engine, vshards, &svdpp_predict);
}
svdpp_init();
if (load_factors_from_file){
load_matrix_market_matrix(training + "_U.mm", 0, 2*D);
load_matrix_market_matrix(training + "_V.mm", M, D);
vec user_bias = load_matrix_market_vector(training +"_U_bias.mm", false, true);
assert(user_bias.size() == M);
vec item_bias = load_matrix_market_vector(training +"_V_bias.mm", false, true);
assert(item_bias.size() == N);
for (uint i=0; i<M+N; i++){
latent_factors_inmem[i].bias = ((i<M)?user_bias[i] : item_bias[i-M]);
}
vec gm = load_matrix_market_vector(training + "_global_mean.mm", false, true);
globalMean = gm[0];
}
/* Run */
SVDPPVerticesInMemProgram program;
graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m);
set_engine_flags(engine);
pengine = &engine;
engine.run(program, niters);
/* Output latent factor matrices in matrix-market format */
output_svdpp_result(training);
test_predictions(&svdpp_predict);
/* Report execution metrics */
if (!quiet)
metrics_report(m);
return 0;
}
| C++ |
/**
* @file
* @author Danny Bickson
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Implementation of the label propagation algorithm
*/
#include "../collaborative_filtering/common.hpp"
#include "../parsers/common.hpp"
#include "../collaborative_filtering/eigen_wrapper.hpp"
double alpha = 0.15;
#define TEXT_LENGTH 64
std::string contexts_file, nouns_file, pos_seeds, neg_seeds;
double_map nouns;
double_map contexts;
struct vertex_data {
vec pvec;
bool seed;
double normalizer;
int nb_count;
char text[TEXT_LENGTH];
vertex_data() {
pvec = zeros(D);
seed = false;
normalizer = 0;
nb_count = 0;
}
//this function is only called for seed nodes
void set_val(int index, float val){
pvec[index] = val;
seed = true;
}
float get_val(int index){
return pvec[index];
}
};
struct edge_data{
int cooccurence_count;
edge_data(double val, double nothing){
cooccurence_count = (int)val;
}
edge_data(double val){
cooccurence_count = (int)val;
}
edge_data() : cooccurence_count(0) { }
};
/**
* Type definitions. Remember to create suitable graph shards using the
* Sharder-program.
*/
typedef vertex_data VertexDataType;
typedef edge_data EdgeDataType; // Edges store the "rating" of user->movie pair
graphchi_engine<VertexDataType, EdgeDataType> * pengine = NULL;
std::vector<vertex_data> latent_factors_inmem;
#include "../collaborative_filtering/io.hpp"
// tfidc is a modified weight formula for Co-EM (see Justin
// Betteridge's "CoEM results" page)
#define TFIDF(coocc, num_neighbors, vtype_total) (log(1+coocc)*log(vtype_total*1.0/num_neighbors))
/**
* GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type>
* class. The main logic is usually in the update function.
*/
struct COEMVerticesInMemProgram : public GraphChiProgram<VertexDataType, EdgeDataType> {
/**
* Vertex update function - computes the least square step
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) {
vertex_data & vdata = latent_factors_inmem[vertex.id()];
if (vertex.num_edges() == 0 || vdata.seed) //no edges, nothing to do here
return;
vec ret = zeros(D);
double normalization = 0;
for(int e=0; e < vertex.num_edges(); e++) {
edge_data edge = vertex.edge(e)->get_data();
vertex_data & nbr_latent = latent_factors_inmem[vertex.edge(e)->vertex_id()];
ret += edge.cooccurence_count * nbr_latent.pvec;
normalization += edge.cooccurence_count;
}
ret /= normalization;
vdata.pvec = alpha * vdata.pvec + (1-alpha)*ret;
}
};
void load_seeds_from_txt_file(std::map<std::string,uint> & map, const std::string filename, bool negative){
logstream(LOG_INFO)<<"loading " << (negative ? "negative" : "positive" ) << " seeds from txt file: " << filename << std::endl;
FILE * f = fopen(filename.c_str(), "r");
if (f == NULL)
logstream(LOG_FATAL)<<"Failed to open file: " << filename << std::endl;
char * linebuf = NULL;
size_t linesize;
int line = 0;
while (true){
int rc = getline(&linebuf, &linesize, f);
char * to_free = linebuf;
if (rc == -1)
break;
char *pch = strtok(linebuf,"\r\n\t_^$");
if (!pch){
logstream(LOG_FATAL) << "Error when parsing file: " << filename << ":" << line <<std::endl;
}
uint pos = map[pch];
if (pos <= 0)
logstream(LOG_FATAL)<<"Failed to find " << pch << " in map. Aborting" << std::endl;
assert(pos <= M);
latent_factors_inmem[pos-1].seed = true;
latent_factors_inmem[pos-1].pvec[0] = negative ? 0 : 1;
line++;
//free(to_free);
}
logstream(LOG_INFO)<<"Seed list size is: " << line << std::endl;
fclose(f);
}
void output_coem_result(std::string filename) {
MMOutputter_mat<vertex_data> user_mat(filename + "_U.mm", 0, M , "This file contains COEM output matrix U. In each row D probabilities for the Y labels", latent_factors_inmem);
logstream(LOG_INFO) << "COEM output files (in matrix market format): " << filename << "_U.mm" << std::endl;
}
int main(int argc, const char ** argv) {
print_copyright();
/* GraphChi initialization will read the command line
arguments and the configuration file. */
graphchi_init(argc, argv);
/* Metrics object for keeping track of performance counters
and other information. Currently required. */
metrics m("label_propagation");
contexts_file = get_option_string("contexts");
nouns_file = get_option_string("nouns");
pos_seeds = get_option_string("pos_seeds");
neg_seeds = get_option_string("neg_seeds");
parse_command_line_args();
load_map_from_txt_file(contexts.string2nodeid, contexts_file, 1);
load_map_from_txt_file(nouns.string2nodeid, nouns_file, 1);
//load graph (adj matrix) from file
int nshards = convert_matrixmarket<EdgeDataType>(training, NULL, 0, 0, 3, TRAINING, true);
init_feature_vectors<std::vector<vertex_data> >(M+N, latent_factors_inmem);
load_seeds_from_txt_file(nouns.string2nodeid, pos_seeds, false);
load_seeds_from_txt_file(nouns.string2nodeid, neg_seeds, true);
#pragma omp parallel for
for (int i=0; i< (int)M; i++){
//normalize seed probabilities to sum up to one
if (latent_factors_inmem[i].seed){
if (sum(latent_factors_inmem[i].pvec) != 0)
latent_factors_inmem[i].pvec /= sum(latent_factors_inmem[i].pvec);
continue;
}
//other nodes get random label probabilities
for (int j=0; j< D; j++)
latent_factors_inmem[i].pvec[j] = drand48();
}
/* load initial state from disk (optional) */
if (load_factors_from_file){
load_matrix_market_matrix(training + "_U.mm", 0, D);
}
/* Run */
COEMVerticesInMemProgram program;
graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m);
set_engine_flags(engine);
pengine = &engine;
engine.run(program, niters);
/* Output latent factor matrices in matrix-market format */
output_coem_result(training);
/* Report execution metrics */
if (!quiet)
metrics_report(m);
return 0;
}
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Application for computing the connected components of a graph.
* The algorithm is simple: on first iteration each vertex sends its
* id to neighboring vertices. On subsequent iterations, each vertex chooses
* the smallest id of its neighbors and broadcasts its (new) label to
* its neighbors. The algorithm terminates when no vertex changes label.
*
* @section REMARKS
*
* Version of connected components that keeps the vertex values
* in memory.
* @author Aapo Kyrola
*
* Danny B: added output of each vertex label
*/
#define GRAPHCHI_DISABLE_COMPRESSION
#include <cmath>
#include <string>
#include <map>
#include "graphchi_basic_includes.hpp"
#include "label_analysis.hpp"
#include "../collaborative_filtering/eigen_wrapper.hpp"
#include "../collaborative_filtering/timer.hpp"
using namespace graphchi;
bool edge_count = false;
std::map<uint,uint> state;
mutex mymutex;
/**
* Type definitions. Remember to create suitable graph shards using the
* Sharder-program.
*/
typedef vid_t VertexDataType; // vid_t is the vertex id type
typedef vid_t EdgeDataType;
VertexDataType * vertex_values;
size_t changes = 0;
timer mytimer;
int actual_vertices = 0;
bool * active_nodes;
int iter = 0;
/**
* GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type>
* class. The main logic is usually in the update function.
*/
struct ConnectedComponentsProgram : public GraphChiProgram<VertexDataType, EdgeDataType> {
/**
* Called after an iteration has finished.
*/
void after_iteration(int iteration, graphchi_context &ginfo) {
logstream(LOG_DEBUG)<<mytimer.current_time() << "iteration: " << iteration << " changes: " << changes << std::endl;
if (changes == 0)
ginfo.set_last_iteration(iteration);
changes = 0;
iter++;
}
vid_t neighbor_value(graphchi_edge<EdgeDataType> * edge) {
return vertex_values[edge->vertex_id()];
}
void set_data(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, vid_t value) {
vertex_values[vertex.id()] = value;
}
/**
* Vertex update function.
* On first iteration ,each vertex chooses a label = the vertex id.
* On subsequent iterations, each vertex chooses the minimum of the neighbor's
* label (and itself).
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) {
/* On subsequent iterations, find the minimum label of my neighbors */
if (!edge_count){
vid_t curmin = vertex_values[vertex.id()];
//first time, count the number of nodes which actually have edges
if (gcontext.iteration == 0 && vertex.num_edges() > 0){
mymutex.lock(); actual_vertices++; mymutex.unlock();
}
for(int i=0; i < vertex.num_edges(); i++) {
vid_t nblabel = neighbor_value(vertex.edge(i));
curmin = std::min(nblabel, curmin);
}
//in case of a new min reschedule neighbors
if (vertex_values[vertex.id()] > curmin) {
changes++;
set_data(vertex, curmin);
for (int i=0; i< vertex.num_edges(); i++){
active_nodes[vertex.edge(i)->vertex_id()] = true;
}
}
else active_nodes[vertex.id()] = false;
}
else {
vid_t curmin = vertex_values[vertex.id()];
for(int i=0; i < vertex.num_edges(); i++) {
vid_t nblabel = neighbor_value(vertex.edge(i));
curmin = std::min(nblabel, curmin);
if (vertex.edge(i)->vertex_id() > vertex.id()){
mymutex.lock();
state[curmin]++;
mymutex.unlock();
}
}
}
}
/**
* Called before an iteration starts.
*/
void before_iteration(int iteration, graphchi_context &ctx) {
changes = 0;
if (iteration == 0 && !edge_count) {
/* initialize each vertex with its own lable */
vertex_values = new VertexDataType[ctx.nvertices];
for(int i=0; i < (int)ctx.nvertices; i++) {
vertex_values[i] = i;
}
}
ctx.scheduler->remove_tasks(0, (int) ctx.nvertices - 1);
for (int i=0; i< ctx.nvertices; i++)
if (active_nodes[i])
ctx.scheduler->add_task(i);
}
};
int main(int argc, const char ** argv) {
/* GraphChi initialization will read the command line
arguments and the configuration file. */
graphchi_init(argc, argv);
/* Metrics object for keeping track of performance counters
and other information. Currently required. */
metrics m("connected-components-inmem");
/* Basic arguments for application */
std::string filename = get_option_string("file"); // Base filename
int niters = get_option_int("niters", 100); // Number of iterations (max)
int output_labels = get_option_int("output_labels", 0); //output node labels to file?
bool scheduler = true; // Always run with scheduler
/* Process input file - if not already preprocessed */
float p = get_option_float("p", -1);
int n = get_option_int("n", -1);
int quiet = get_option_int("quiet", 0);
if (quiet)
global_logger().set_log_level(LOG_ERROR);
int nshards = (int) convert_if_notexists<EdgeDataType>(filename, get_option_string("nshards", "auto"));
mytimer.start();
/* Run */
ConnectedComponentsProgram program;
graphchi_engine<VertexDataType, EdgeDataType> engine(filename, nshards, scheduler, m);
engine.set_disable_vertexdata_storage();
engine.set_enable_deterministic_parallelism(false);
engine.set_modifies_inedges(false);
engine.set_modifies_outedges(false);
engine.set_preload_commit(false);
engine.set_maxwindow(engine.num_vertices());
mytimer.start();
active_nodes = new bool[engine.num_vertices()];
for (int i=0; i< engine.num_vertices(); i++)
active_nodes[i] = true;
engine.run(program, niters);
/* Run analysis of the connected components (output is written to a file) */
if (output_labels){
FILE * pfile = fopen((filename + "-components").c_str(), "w");
if (!pfile)
logstream(LOG_FATAL)<<"Failed to open file: " << filename << std::endl;
fprintf(pfile, "%%%%MatrixMarket matrix array real general\n");
fprintf(pfile, "%lu %u\n", engine.num_vertices()-1, 1);
for (uint i=1; i< engine.num_vertices(); i++){
fprintf(pfile, "%u\n", vertex_values[i]);
assert(vertex_values[i] >= 0 && vertex_values[i] < engine.num_vertices());
}
fclose(pfile);
logstream(LOG_INFO)<<"Saved succesfully to out file: " << filename << "-components" << " time for saving: " << mytimer.current_time() << std::endl;
}
std::cout<<"Total runtime: " << mytimer.current_time() << std::endl;
if (p > 0)
std::cout << "site fraction p= " << p << std::endl;
if (n > 0){
std::cout << "n=" << n*p << std::endl;
std::cout << "isolated sites: " << p*(double)n-actual_vertices << std::endl;
}
std::cout << "Number of sites: " << actual_vertices << std::endl;
std::cout << "Number of bonds: " << engine.num_edges() << std::endl;
if (n){
std::cout << "Percentage of sites: " << (double)actual_vertices / (double)n << std::endl;
std::cout << "Percentage of bonds: " << (double)engine.num_edges() / (2.0*n) << std::endl;
}
std::cout << "Number of iterations: " << iter << std::endl;
std::cout << "SITES RESULT:\nsize\tcount\n";
std::map<uint,uint> final_countsv;
std::map<uint,uint> final_countse;
std::map<uint,uint> statv;
for (int i=0; i< engine.num_vertices(); i++)
statv[vertex_values[i]]++;
uint total_sites = 0;
for (std::map<uint, uint>::const_iterator iter = statv.begin();
iter != statv.end(); iter++) {
//std::cout << iter->first << "\t" << iter->second << "\n";
final_countsv[iter->second] += 1;
total_sites += iter->second;
}
for (std::map<uint, uint>::const_iterator iter = final_countsv.begin();
iter != final_countsv.end(); iter++) {
std::cout << iter->first << "\t" << iter->second << "\n";
}
edge_count = 1;
engine.run(program, 1);
std::cout << "BONDS RESULT:\nsize\tcount\n";
uint total_bonds = 0;
for (std::map<uint, uint>::const_iterator iter = state.begin();
iter != state.end(); iter++) {
//std::cout << iter->first << "\t" << iter->second << "\n";
final_countse[iter->second] += 1;
total_bonds += iter->second;
}
for (std::map<uint, uint>::const_iterator iter = final_countse.begin();
iter != final_countse.end(); iter++) {
std::cout << iter->first << "\t" << iter->second << "\n";
}
assert(total_sites == graph.num_vertices());
assert(total_bonds == graph.num_edges());
return 0;
}
| C++ |
/**
* * @file
* * @author Aapo Kyrola <akyrola@cs.cmu.edu>
* * @version 1.0
* *
* * @section LICENSE
* *
* * Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
*
* *
* * @section DESCRIPTION
* *
* * Analyses output of label propagation algorithms such as connected components
* * and community detection. Memory efficient implementation.
* *
* * @author Aapo Kyrola
* */
#include <vector>
#include <algorithm>
#include <errno.h>
#include <assert.h>
#include "io/stripedio.hpp"
#include "logger/logger.hpp"
#include "util/merge.hpp"
#include "util/ioutil.hpp"
#include "util/qsort.hpp"
#include "api/chifilenames.hpp"
#ifndef DEF_GRAPHCHI_LABELANALYSIS
#define DEF_GRAPHCHI_LABELANALYSIS
using namespace graphchi;
template <typename LabelType>
struct labelcount_tt {
LabelType label;
unsigned int count; // Count excludes the vertex which has its own id as the label. (Important optimization)
labelcount_tt(LabelType l, int c) : label(l), count(c) {}
labelcount_tt() {}
};
template <typename LabelType>
bool label_count_greater(const labelcount_tt<LabelType> &a, const labelcount_tt<LabelType> &b) {
return a.count > b.count;
}
template <typename LabelType>
void analyze_labels2(std::string base_filename, FILE * pfile, int printtop = 20) {
typedef labelcount_tt<LabelType> labelcount_t;
/**
* * NOTE: this implementation is quite a mouthful. Cleaner implementation
* * could be done by using a map implementation. But STL map takes too much
* * memory, and I want to avoid Boost dependency - which would have boost::unordered_map.
* */
std::string filename = filename_vertex_data<LabelType>(base_filename);
metrics m("labelanalysis");
stripedio * iomgr = new stripedio(m);
int f = iomgr->open_session(filename, true);
size_t sz = get_filesize(filename);
/* Setup buffer sizes */
size_t bufsize = 1024 * 1024; // Read one megabyte a time
int nbuf = (int) (bufsize / sizeof(LabelType));
std::vector<labelcount_t> curlabels;
size_t nread = 0;
bool first = true;
vid_t curvid = 0;
LabelType * buffer = (LabelType*) calloc(nbuf, sizeof(LabelType));
while (nread < sz) {
size_t len = std::min(sz - nread, bufsize);
iomgr->preada_now(f, buffer, len, nread);
nread += len;
int nt = (int) (len / sizeof(LabelType));
/* Mark vertices with its own label with 0xffffffff so they will be ignored */
for(int i=0; i < nt; i++) {
LabelType l = buffer[i];
if (curvid > 0)
fprintf(pfile, "%d 1 %d\n", curvid, l);
if (l == curvid) buffer[i] = 0xffffffff;
curvid++;
}
/* First sort the buffer */
quickSort(buffer, nt, std::less<LabelType>());
/* Then collect */
std::vector<labelcount_t> newlabels;
newlabels.reserve(nt);
vid_t lastlabel = 0xffffffff;
for(int i=0; i < nt; i++) {
if (buffer[i] != 0xffffffff) {
if (buffer[i] != lastlabel) {
newlabels.push_back(labelcount_t(buffer[i], 1));
} else {
newlabels[newlabels.size() - 1].count ++;
}
lastlabel = buffer[i];
}
}
if (first) {
for(int i=0; i < (int)newlabels.size(); i++) {
curlabels.push_back(newlabels[i]);
}
} else {
/* Merge current and new label counts */
int cl = 0;
int nl = 0;
std::vector< labelcount_t > merged;
merged.reserve(curlabels.size() + newlabels.size());
while(cl < (int)curlabels.size() && nl < (int)newlabels.size()) {
if (newlabels[nl].label == curlabels[cl].label) {
merged.push_back(labelcount_t(newlabels[nl].label, newlabels[nl].count + curlabels[cl].count));
nl++; cl++;
} else {
if (newlabels[nl].label < curlabels[cl].label) {
merged.push_back(newlabels[nl]);
nl++;
} else {
merged.push_back(curlabels[cl]);
cl++;
}
}
}
while(cl < (int)curlabels.size()) merged.push_back(curlabels[cl++]);
while(nl < (int)newlabels.size()) merged.push_back(newlabels[nl++]);
curlabels = merged;
}
first = false;
}
/* Sort */
std::sort(curlabels.begin(), curlabels.end(), label_count_greater<LabelType>);
/* Write output file */
std::string outname = base_filename + "_components.txt";
FILE * resf = fopen(outname.c_str(), "w");
if (resf == NULL) {
logstream(LOG_ERROR) << "Could not write label outputfile : " << outname << std::endl;
return;
}
for(int i=0; i < (int) curlabels.size(); i++) {
fprintf(resf, "%u,%u\n", curlabels[i].label, curlabels[i].count + 1);
}
fclose(resf);
std::cout << "Total number of different labels (components/communities): " << curlabels.size() << std::endl;
std::cout << "List of labels was written to file: " << outname << std::endl;
for(int i=0; i < (int)std::min((size_t)printtop, curlabels.size()); i++) {
std::cout << (i+1) << ". label: " << curlabels[i].label << ", size: " << curlabels[i].count << std::endl;
}
iomgr->close_session(f);
delete iomgr;
}
#endif
| C++ |
/**
* Copyright (c) 2009 Carnegie Mellon University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an "AS
* IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*
* For more about this software visit:
*
* http://www.graphlab.ml.cmu.edu
*
*/
#include <cmath>
#include <cstdio>
#include <limits>
#include <iostream>
#include "graphchi_basic_includes.hpp"
#include "api/chifilenames.hpp"
#include "api/vertex_aggregator.hpp"
#include "preprocessing/sharder.hpp"
#include "../collaborative_filtering/eigen_wrapper.hpp"
#include "../collaborative_filtering/timer.hpp"
#include "../collaborative_filtering/common.hpp"
using namespace graphchi;
int square = 0;
int tokens_per_row = 3;
bool debug = false;
int max_iter = 50;
ivec active_nodes_num;
ivec active_links_num;
int iiter = 0; //current iteration
uint nodes = 0;
uint orig_edges = 0;
uint num_active = 0;
uint links = 0;
mutex mymutex;
timer mytimer;
struct vertex_data {
bool active;
int kcore, degree;
vec pvec; //to remove
vertex_data() : active(true), kcore(-1), degree(0) {}
void set_val(int index, double val){}
float get_val(int index){ return 0;}
}; // end of vertex_data
//edges in kcore algorithm are binary
struct edge_data {
edge_data() { }
//compatible with parser which have edge value (we don't need it)
edge_data(double val) { }
edge_data(double val, double time) { }
};
typedef vertex_data VertexDataType;
typedef edge_data EdgeDataType; // Edges store the "rating" of user->movie pair
graphchi_engine<VertexDataType, EdgeDataType> * pengine = NULL;
std::vector<vertex_data> latent_factors_inmem;
#include "../collaborative_filtering/io.hpp"
struct KcoresProgram : public GraphChiProgram<VertexDataType, EdgeDataType> {
/**
* Vertex update function.
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) {
vertex_data & vdata = latent_factors_inmem[vertex.id()];
if (debug && iiter > 99 && vertex.id() % 1000 == 0)
std::cout<<"Entering node: " << vertex.id() << std::endl;
if (!vdata.active)
return;
int cur_iter = iiter;
int cur_links = 0;
int increasing_links = 0;
for(int e=0; e < vertex.num_edges(); e++) {
const vertex_data & other = latent_factors_inmem[vertex.edge(e)->vertex_id()];
if (other.active){
if (debug && iiter > 99)
std::cout<<"neighbor: "<<vertex.edge(e)->vertex_id()<<" active"<<std::endl;
cur_links++;
if (vertex.edge(e)->vertex_id() > vertex.id())
increasing_links++;
}
}
if (cur_links <= cur_iter){
vdata.active = false;
vdata.kcore = cur_iter;
}
else {
if (debug && iiter > 99)
std::cout<<vertex.id()<<": cur links: " << cur_links << std::endl;
mymutex.lock();
links += increasing_links;
mymutex.unlock();
}
if (vdata.active){
mymutex.lock();
num_active++;
mymutex.unlock();
}
}
void after_iteration(int iteration, graphchi_context &gcontext) {
active_nodes_num[iiter] = num_active;
if (num_active == 0)
links = 0;
printf("Number of active nodes in round %d is %di, links: %d\n", iiter, num_active, links);
active_links_num[iiter] = links;
}
void before_iteration(int iteration, graphchi_context &gcontext) {
num_active = 0;
links = 0;
}
}; // end of aggregator
vec fill_output(){
vec ret = vec::Zero(latent_factors_inmem.size());
for (uint i=0; i < latent_factors_inmem.size(); i++)
ret[i] = latent_factors_inmem[i].kcore;
return ret;
}
int main(int argc, const char *argv[]) {
logstream(LOG_WARNING)<<"GraphChi graph analytics library is written by Danny Bickson (c). Send any "
" comments or bug reports to danny.bickson@gmail.com " << std::endl;
//* GraphChi initialization will read the command line arguments and the configuration file. */
graphchi_init(argc, argv);
/* Metrics object for keeping track of performance counters
and other information. Currently required. */
metrics m("kcores-inmemory-factors");
std::string datafile;
int unittest = 0;
int max_iter = get_option_int("max_iter", 15000); // Number of iterations
maxval = get_option_float("maxval", 1e100);
minval = get_option_float("minval", -1e100);
bool quiet = get_option_int("quiet", 0);
if (quiet)
global_logger().set_log_level(LOG_ERROR);
debug = get_option_int("debug", 0);
unittest = get_option_int("unittest", 0);
datafile = get_option_string("training");
square = get_option_int("square", 0);
tokens_per_row = get_option_int("tokens_per_row", tokens_per_row);
nodes = get_option_int("nodes", nodes);
orig_edges = get_option_int("orig_edges", orig_edges);
active_nodes_num = ivec(max_iter+1);
active_links_num = ivec(max_iter+1);
//unit testing
if (unittest == 1){
datafile = "kcores_unittest1";
}
mytimer.start();
/* Preprocess data if needed, or discover preprocess files */
int nshards = 0;
if (tokens_per_row == 4 )
convert_matrixmarket4<edge_data>(datafile, false, square);
else if (tokens_per_row == 3 || tokens_per_row == 2)
convert_matrixmarket<edge_data>(datafile, NULL, nodes, orig_edges, tokens_per_row);
else logstream(LOG_FATAL)<<"Please use --tokens_per_row=3 or --tokens_per_row=4" << std::endl;
latent_factors_inmem.resize(square? std::max(M,N) : M+N);
KcoresProgram program;
graphchi_engine<VertexDataType, EdgeDataType> engine(datafile, nshards, false, m);
set_engine_flags(engine);
engine.set_maxwindow(nodes+1);
int pass = 0;
for (iiter=1; iiter< max_iter+1; iiter++){
logstream(LOG_INFO)<<mytimer.current_time() << ") Going to run k-cores iteration " << iiter << std::endl;
while(true){
int prev_nodes = active_nodes_num[iiter];
/* Run */
engine.run(program, 1);
pass++;
int cur_nodes = active_nodes_num[iiter];
if (prev_nodes == cur_nodes)
break;
}
if (active_nodes_num[iiter] == 0){
max_iter = iiter;
break;
}
}
std::cout << "KCORES finished in " << mytimer.current_time() << std::endl;
std::cout << "Number of updates: " << pass*(M+N) << " pass: " << pass << std::endl;
imat retmat = imat(max_iter+1, 4);
memset((int*)data(retmat),0,sizeof(int)*retmat.size());
active_nodes_num[0] = (square? std::max(M,N) : M+N);
active_links_num[0] = L;
assert(L>0);
std::cout<<" Core Removed Total Removed"<<std::endl;
std::cout<<" Num Nodes Removed Links" <<std::endl;
for (int i=0; i <= max_iter; i++){
set_val(retmat, i, 0, i);
if (i >= 1){
set_val(retmat, i, 1, active_nodes_num[i-1]-active_nodes_num[i]);
set_val(retmat, i, 2, active_nodes_num[0]-active_nodes_num[i]);
assert(active_nodes_num[i] >= 0);
set_val(retmat, i, 3, L - active_links_num[i]);
}
}
//write_output_matrix(datafile + ".kcores.out", format, retmat);
std::cout<<retmat<<std::endl;
vec ret = fill_output();
write_output_vector(datafile + "x.out", ret,false, "This vector holds for each node its kcore degree");
if (unittest == 1){
imat sol = init_imat("0 0 0 0; 1 1 1 1; 2 4 5 7; 3 4 9 13", 4, 4);
assert(sumsum(sol - retmat) == 0);
}
return EXIT_SUCCESS;
}
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Application for computing the connected components of a graph.
* The algorithm is simple: on first iteration each vertex sends its
* id to neighboring vertices. On subsequent iterations, each vertex chooses
* the smallest id of its neighbors and broadcasts its (new) label to
* its neighbors. The algorithm terminates when no vertex changes label.
*
* @section REMARKS
*
* This application is interesting demonstration of the asyncronous capabilities
* of GraphChi, improving the convergence considerably. Consider
* a chain graph 0->1->2->...->n. First, vertex 0 will write its value to its edges,
* which will be observed by vertex 1 immediatelly, changing its label to 0. Nexgt,
* vertex 2 changes its value to 0, and so on. This all happens in one iteration.
* A subtle issue is that as any pair of vertices a<->b share an edge, they will
* overwrite each others value. However, because they will be never run in parallel
* (due to deterministic parallellism of graphchi), this does not compromise correctness.
*
* @author Aapo Kyrola
*/
#include <cmath>
#include <string>
#include "graphchi_basic_includes.hpp"
#include "label_analysis.hpp"
#include "../collaborative_filtering/eigen_wrapper.hpp"
using namespace graphchi;
FILE * pfile = NULL;
/**
* Type definitions. Remember to create suitable graph shards using the
* Sharder-program.
*/
typedef vid_t VertexDataType; // vid_t is the vertex id type
typedef vid_t EdgeDataType;
vec unique_labels;
int niters;
/**
* GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type>
* class. The main logic is usually in the update function.
*/
struct ConnectedComponentsProgram : public GraphChiProgram<VertexDataType, EdgeDataType> {
/**
* Vertex update function.
* On first iteration ,each vertex chooses a label = the vertex id.
* On subsequent iterations, each vertex chooses the minimum of the neighbor's
* label (and itself).
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) {
/* This program requires selective scheduling. */
assert(gcontext.scheduler != NULL);
if (gcontext.iteration == 0) {
vertex.set_data(vertex.id());
gcontext.scheduler->add_task(vertex.id());
}
/* On subsequent iterations, find the minimum label of my neighbors */
vid_t curmin = vertex.get_data();
for(int i=0; i < vertex.num_edges(); i++) {
vid_t nblabel = vertex.edge(i)->get_data();
if (gcontext.iteration == 0) nblabel = vertex.edge(i)->vertex_id(); // Note!
curmin = std::min(nblabel, curmin);
}
/* Check if label changed */
vertex.set_data(curmin);
/**
* Broadcast new label to neighbors by writing the value
* to the incident edges.
* Note: on first iteration, write only to out-edges to avoid
* overwriting data (this is kind of a subtle point)
*/
vid_t label = vertex.get_data();
if (gcontext.iteration > 0) {
for(int i=0; i < vertex.num_edges(); i++) {
if (label < vertex.edge(i)->get_data()) {
vertex.edge(i)->set_data(label);
/* Schedule neighbor for update */
gcontext.scheduler->add_task(vertex.edge(i)->vertex_id());
}
}
} else if (gcontext.iteration == 0) {
for(int i=0; i < vertex.num_outedges(); i++) {
vertex.outedge(i)->set_data(label);
}
}
}
};
/* class for output the label number for each node (optional) */
class OutputVertexCallback : public VCallback<VertexDataType> {
public:
/* print node id and then the label id */
virtual void callback(vid_t vertex_id, VertexDataType &value) {
fprintf(pfile, "%u 1 %u\n", vertex_id+1, value); //graphchi offsets start from zero, while matlab from 1
unique_labels[value] = 1;
}
};
int main(int argc, const char ** argv) {
/* GraphChi initialization will read the command line
arguments and the configuration file. */
graphchi_init(argc, argv);
/* Metrics object for keeping track of performance counters
and other information. Currently required. */
metrics m("connected-components");
/* Basic arguments for application */
std::string filename = get_option_string("file"); // Base filename
niters = get_option_int("niters", 10); // Number of iterations (max)
int output_labels = get_option_int("output_labels", 0); //output node labels to file?
bool scheduler = true; // Always run with scheduler
/* Process input file - if not already preprocessed */
int nshards = convert_if_notexists<EdgeDataType>(filename, get_option_string("nshards", "auto"));
if (get_option_int("onlyresult", 0) == 0) {
/* Run */
ConnectedComponentsProgram program;
graphchi_engine<VertexDataType, EdgeDataType> engine(filename, nshards, scheduler, m);
engine.run(program, niters);
/* optional: output labels for each node to file */
if (output_labels){
pfile = fopen((filename + "-components").c_str(), "w");
if (!pfile)
logstream(LOG_FATAL)<<"Failed to open file: " << filename << std::endl;
fprintf(pfile, "%%%%MatrixMarket matrix coordinate real general\n");
fprintf(pfile, "%lu %u %lu\n", engine.num_vertices()-1, 1, engine.num_vertices()-1);
OutputVertexCallback callback;
//unique_labels = zeros(engine.num_vertices());
//foreach_vertices<VertexDataType>(filename, 0, engine.num_vertices(), callback);
//fclose(pfile);
logstream(LOG_INFO)<<"Found: " << sum(unique_labels) << " unique labels " << std::endl;
/* Run analysis of the connected components (output is written to a file) */
m.start_time("label-analysis");
analyze_labels2<vid_t>(filename, pfile);
m.stop_time("label-analysis");
fclose(pfile);
}
}
return 0;
}
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Application for computing the connected components of a graph.
* The algorithm is simple: on first iteration each vertex sends its
* id to neighboring vertices. On subsequent iterations, each vertex chooses
* the smallest id of its neighbors and broadcasts its (new) label to
* its neighbors. The algorithm terminates when no vertex changes label.
*
* @section REMARKS
*
* Version of connected components that keeps the vertex values
* in memory.
* @author Aapo Kyrola
*
* Danny B: added output of each vertex label
*/
#include <cmath>
#include <string>
#include "graphchi_basic_includes.hpp"
#include "label_analysis.hpp"
#include "../collaborative_filtering/eigen_wrapper.hpp"
#include "../collaborative_filtering/timer.hpp"
using namespace graphchi;
/**
* Type definitions. Remember to create suitable graph shards using the
* Sharder-program.
*/
typedef vid_t VertexDataType; // vid_t is the vertex id type
typedef vid_t EdgeDataType;
VertexDataType * vertex_values;
size_t changes = 0;
timer mytimer;
/**
* GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type>
* class. The main logic is usually in the update function.
*/
struct ConnectedComponentsProgram : public GraphChiProgram<VertexDataType, EdgeDataType> {
/**
* Called after an iteration has finished.
*/
void after_iteration(int iteration, graphchi_context &ginfo) {
logstream(LOG_DEBUG)<<mytimer.current_time() << "iteration: " << iteration << " changes: " << changes << std::endl;
if (changes == 0)
ginfo.set_last_iteration(iteration);
changes = 0;
}
vid_t neighbor_value(graphchi_edge<EdgeDataType> * edge) {
return vertex_values[edge->vertex_id()];
}
void set_data(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, vid_t value) {
vertex_values[vertex.id()] = value;
}
/**
* Vertex update function.
* On first iteration ,each vertex chooses a label = the vertex id.
* On subsequent iterations, each vertex chooses the minimum of the neighbor's
* label (and itself).
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) {
/* On subsequent iterations, find the minimum label of my neighbors */
vid_t curmin = vertex_values[vertex.id()];
for(int i=0; i < vertex.num_edges(); i++) {
vid_t nblabel = neighbor_value(vertex.edge(i));
curmin = std::min(nblabel, curmin);
}
if (vertex_values[vertex.id()] > curmin) {
changes++;
set_data(vertex, curmin);
}
}
/**
* Called before an iteration starts.
*/
void before_iteration(int iteration, graphchi_context &ctx) {
changes = 0;
if (iteration == 0) {
/* initialize each vertex with its own lable */
vertex_values = new VertexDataType[ctx.nvertices];
for(int i=0; i < (int)ctx.nvertices; i++) {
vertex_values[i] = i;
}
}
}
};
int main(int argc, const char ** argv) {
/* GraphChi initialization will read the command line
arguments and the configuration file. */
graphchi_init(argc, argv);
/* Metrics object for keeping track of performance counters
and other information. Currently required. */
metrics m("connected-components-inmem");
/* Basic arguments for application */
std::string filename = get_option_string("file"); // Base filename
int niters = get_option_int("niters", 100); // Number of iterations (max)
int output_labels = get_option_int("output_labels", 0); //output node labels to file?
bool scheduler = false; // Always run with scheduler
/* Process input file - if not already preprocessed */
int nshards = (int) convert_if_notexists<EdgeDataType>(filename, get_option_string("nshards", "auto"));
mytimer.start();
/* Run */
ConnectedComponentsProgram program;
graphchi_engine<VertexDataType, EdgeDataType> engine(filename, nshards, scheduler, m);
engine.set_disable_vertexdata_storage();
engine.set_enable_deterministic_parallelism(false);
engine.set_modifies_inedges(false);
engine.set_modifies_outedges(false);
engine.set_preload_commit(false);
engine.set_maxwindow(engine.num_vertices());
engine.run(program, niters);
mytimer.start();
/* Run analysis of the connected components (output is written to a file) */
if (output_labels){
FILE * pfile = fopen((filename + "-components").c_str(), "w");
if (!pfile)
logstream(LOG_FATAL)<<"Failed to open file: " << filename << std::endl;
fprintf(pfile, "%%%%MatrixMarket matrix array real general\n");
fprintf(pfile, "%lu %u\n", engine.num_vertices()-1, 1);
for (uint i=1; i< engine.num_vertices(); i++){
fprintf(pfile, "%u\n", vertex_values[i]);
assert(vertex_values[i] >= 0 && vertex_values[i] < engine.num_vertices());
}
fclose(pfile);
logstream(LOG_INFO)<<"Saved succesfully to out file: " << filename << "-components" << " time for saving: " << mytimer.current_time() << std::endl;
}
return 0;
}
| C++ |
/**
* Copyright (c) 2009 Carnegie Mellon University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an "AS
* IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*
* For more about this software visit:
*
* http://www.graphlab.ml.cmu.edu
Written by Danny Bickson, CMU
1) File for extracting a subgraph out of the input graph, starting with a given set of seeds, for X hops.
2) This program also prints the degree distribution of a graph (using --degrees=1 command line argument)
3) This program also counts the number of edges for each connected compoentns (using the --cc=filename command line)
*
*/
#include <cmath>
#include <cstdio>
#include <limits>
#include <iostream>
#include <set>
#include "graphchi_basic_includes.hpp"
#include "api/chifilenames.hpp"
#include "api/vertex_aggregator.hpp"
#include "preprocessing/sharder.hpp"
#include "../collaborative_filtering/util.hpp"
#include "../collaborative_filtering/eigen_wrapper.hpp"
#include "../collaborative_filtering/timer.hpp"
#include "../collaborative_filtering/common.hpp"
using namespace graphchi;
using namespace std;
int square = 0;
int tokens_per_row = 3;
int _degree = 0;
int seed_edges_only = 0;
int undirected = 1;
std::string cc;
size_t singleton_nodes = 0;
bool debug = false;
int max_iter = 50;
int iiter = 0; //current iteration
uint num_active = 0;
uint links = 0;
mutex mymutex;
timer mytimer;
FILE * pfile = NULL;
size_t edges = 1000; //number of edges to cut from graph
size_t nodes = 0; //number of nodes in original file (optional)
size_t orig_edges = 0; // number of edges in original file (optional)
int min_range = 0;
int max_range = 2400000000;
struct vertex_data {
bool active;
bool done;
bool next_active;
int component;
vec pvec; //to remove
vertex_data() : active(false), done(false), next_active(false), component(0) {}
void set_val(int index, double val){};
float get_val(int index){ return 0; }
}; // end of vertex_data
//edges in kcore algorithm are binary
struct edge_data {
edge_data() { }
//compatible with parser which have edge value (we don't need it)
edge_data(double val) { }
edge_data(double val, double time) { }
};
typedef vertex_data VertexDataType;
typedef edge_data EdgeDataType;
graphchi_engine<VertexDataType, EdgeDataType> * pengine = NULL;
std::vector<vertex_data> latent_factors_inmem;
vec component_edges;
vec component_nodes;
vec component_seeds;
size_t changes = 0;
#include "../collaborative_filtering/io.hpp"
struct SubgraphsProgram : public GraphChiProgram<VertexDataType, EdgeDataType> {
/**
* Vertex update function.
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) {
vertex_data & vdata = latent_factors_inmem[vertex.id()];
/* printout degree distribution and finish */
if (_degree){
if (vertex.num_edges() > 0 || max_range != 2400000000)
if (vertex.id() >= (uint)min_range && vertex.id() < (uint)max_range)
fprintf(pfile, "%u %u\n", vertex.id()+1, vertex.num_edges());
return;
}
/* calc component number of nodes and edges and finish */
else if (cc != ""){
assert(vdata.component>= 0 && vdata.component < component_nodes.size());
if (debug && vdata.component == 9322220)
logstream(LOG_DEBUG)<<"Node " << vertex.id() << " has " << vertex.num_edges() << std::endl;
if (vdata.component == 0)
return;
mymutex.lock();
component_nodes[vdata.component]++;
mymutex.unlock();
if (vertex.num_edges() == 0){
mymutex.lock();
singleton_nodes++;
mymutex.unlock();
}
for(int e=0; e < vertex.num_edges(); e++) {
vertex_data & other = latent_factors_inmem[vertex.edge(e)->vertex_id()];
if (debug && vdata.component == 9322220)
logstream(LOG_DEBUG)<<"Going over edge: " << vertex.id() << "=>" << vertex.edge(e)->vertex_id() << " component: " << vdata.component <<" : "<<other.component<< " seed? " << vdata.active << std::endl;
if (vdata.component != other.component)
logstream(LOG_FATAL)<<"BUG Going over edge: " << vertex.id() << "=>" << vertex.edge(e)->vertex_id() << " component: " << vdata.component <<" : "<<other.component<< " seed? " << vdata.active << std::endl;
if (vertex.id() < vertex.edge(e)->vertex_id()){
if (debug && other.component == 9322220)
logstream(LOG_INFO)<<"Added an edge for component: " << other.component << std::endl;
mymutex.lock();
component_edges[vdata.component]++;
mymutex.unlock();
}
}
return;
}
if (!vdata.active)
return;
mymutex.lock();
num_active++;
std::set<uint> myset;
std::set<uint>::iterator it;
for(int e=0; e < vertex.num_edges(); e++) {
vertex_data & other = latent_factors_inmem[vertex.edge(e)->vertex_id()];
if (links >= edges)
break;
if (other.done)
continue;
if (seed_edges_only && !other.active)
continue;
//solve a bug where an edge appear twice if A->B and B->A in the data
if (undirected){
it = myset.find(vertex.edge(e)->vertex_id());
if (it != myset.end())
continue;
}
fprintf(pfile, "%u %u %u\n", vertex.id()+1, vertex.edge(e)->vertex_id()+1,iiter+1);
if (undirected)
myset.insert(vertex.edge(e)->vertex_id());
if (debug && (vertex.id()+1 == 9322220 || vertex.edge(e)->vertex_id()+1 == 9322220))
cout<<"Found edge: $$$$ " << vertex.id() << " => " << vertex.edge(e)->vertex_id()+1 << " other.done " << other.done << endl;
links++;
if (!other.done){
other.next_active = true;
}
}
vdata.active=false;
vdata.done = true;
mymutex.unlock();
}
}; // end of aggregator
int main(int argc, const char *argv[]) {
logstream(LOG_WARNING)<<"GraphChi graph analytics library is written by Danny Bickson (c). Send any "
" comments or bug reports to danny.bickson@gmail.com " << std::endl;
//* GraphChi initialization will read the command line arguments and the configuration file. */
graphchi_init(argc, argv);
/* Metrics object for keeping track of performance counters
and other information. Currently required. */
metrics m("subgraph-inmemory-factors");
std::string datafile;
int max_iter = get_option_int("hops", 3); // Number of iterations
bool quiet = get_option_int("quiet", 0);
undirected = get_option_int("undirected", undirected);
if (quiet)
global_logger().set_log_level(LOG_ERROR);
debug = get_option_int("debug", 0);
datafile = get_option_string("training");
square = get_option_int("square", 0);
tokens_per_row = get_option_int("tokens_per_row", tokens_per_row);
edges = get_option_int("edges", 2460000000);
nodes = get_option_int("nodes", nodes);
orig_edges = get_option_int("orig_edges", orig_edges);
_degree = get_option_int("degree", _degree);
cc = get_option_string("cc", cc);
seed_edges_only = get_option_int("seed_edges_only", seed_edges_only);
if (_degree || cc != "" || seed_edges_only)
max_iter = 1;
std::string seeds = get_option_string("seeds","");
std::string seed_file = get_option_string("seed_file", "");
min_range = get_option_int("min_range", min_range);
max_range = get_option_int("max_range", max_range);
mytimer.start();
/* Preprocess data if needed, or discover preprocess files */
int nshards = 0;
if (tokens_per_row == 4 )
convert_matrixmarket4<edge_data>(datafile, false, square);
else if (tokens_per_row == 3 || tokens_per_row == 2)
convert_matrixmarket<edge_data>(datafile, NULL, nodes, orig_edges, tokens_per_row);
else logstream(LOG_FATAL)<<"Please use --tokens_per_row=2 or --tokens_per_row=3 or --tokens_per_row=4" << std::endl;
latent_factors_inmem.resize(square? std::max(M,N) : M+N);
vec vseed;
if (!_degree){
if (seed_file == ""){/* read list of seeds from the --seeds=XX command line argument */
if (seeds == "")
logstream(LOG_FATAL)<<"Must specify either seeds or seed_file"<<std::endl;
char * pseeds = strdup(seeds.c_str());
char * pch = strtok(pseeds, ",\n\r\t ");
int node = atoi(pch);
latent_factors_inmem[node-1].active = true;
while ((pch = strtok(NULL, ",\n\r\t "))!= NULL){
node = atoi(pch);
latent_factors_inmem[node-1].active = true;
}
}
else { /* load initial set of seeds from file */
vseed = load_matrix_market_vector(seed_file, false, false);
for (int i=0; i< vseed.size(); i++){
assert(vseed[i] > 0 && vseed[i] <= latent_factors_inmem.size());
latent_factors_inmem[vseed[i]-1].active = true;
}
}
}
vec components;
/* read a vector of connected components for each node */
if (cc != ""){
components = load_matrix_market_vector(cc, false,true);
assert((int)components.size() <= (int) latent_factors_inmem.size());
for (uint i=0; i< components.size(); i++){
assert(i+1 < latent_factors_inmem.size());
assert(components[i] >= 1 && components[i] <= nodes);
if (debug && components[i] == 9322220)
logstream(LOG_DEBUG)<<"Setting node : " <<i<<" component : " << components[i] << std::endl;
latent_factors_inmem[i].component = components[i];
}
component_edges = zeros(nodes);
component_nodes = zeros(nodes);
component_seeds = zeros(nodes);
for (uint i=0; i< vseed.size(); i++){
assert(vseed[i] >= 1 && vseed[i] <= latent_factors_inmem.size());
component_seeds[latent_factors_inmem[vseed[i]-1].component]++;
}
assert(sum(component_seeds) == vseed.size());
}
else if (seed_edges_only){
for (uint i=0; i< latent_factors_inmem.size(); i++){
vertex_data & vdata = latent_factors_inmem[i];
if (!vdata.active)
vdata.done = true;
}
}
std::string suffix;
if (cc != "")
suffix = "-cc.txt";
else if (seed_edges_only)
suffix = "-subset.txt";
else if (_degree)
suffix = "-degree.txt";
else suffix = "-subgraph.txt";
pfile = open_file((datafile + suffix).c_str(), "w", false);
std::cout<<"Writing output to: " << datafile << suffix << std::endl;
num_active = 0;
graphchi_engine<VertexDataType, EdgeDataType> engine(datafile, nshards, false, m);
set_engine_flags(engine);
engine.set_maxwindow(nodes+1);
SubgraphsProgram program;
for (iiter=0; iiter< max_iter; iiter++){
//std::cout<<mytimer.current_time() << ") Going to run subgraph iteration " << iiter << std::endl;
/* Run */
//while(true){
engine.run(program, 1);
std::cout<< iiter << ") " << mytimer.current_time() << " Number of active nodes: " << num_active <<" Number of links: " << links << std::endl;
for (uint i=0; i< latent_factors_inmem.size(); i++){
if (latent_factors_inmem[i].next_active && !latent_factors_inmem[i].done){
latent_factors_inmem[i].next_active = false;
latent_factors_inmem[i].active = true;
}
}
if (links >= edges){
std::cout<<"Grabbed enough edges!" << std::endl;
break;
}
}
if (cc != ""){
logstream(LOG_INFO)<<"component nodes sum: " << sum(component_nodes) << std::endl;
logstream(LOG_INFO)<<"component edges sum: " << sum(component_edges) << std::endl;
int total_written = 0;
assert(sum(component_nodes) == components.size());
assert(pfile != NULL);
int total_seeds = 0;
for (uint i=0; i< component_nodes.size(); i++){
if ((max_range != 2400000000 && i >= (uint)min_range && i < (uint)max_range) || (max_range == 2400000000 && (component_nodes[i] > 1 || component_edges[i] > 0))){
fprintf(pfile, "%d %d %d %d\n", i, (int)component_nodes[i], (int)component_edges[i], (int)component_seeds[i]);
total_written++;
total_seeds+= component_seeds[i];
}
if (component_nodes[i] > 1 && component_edges[i] == 0)
logstream(LOG_FATAL)<<"Bug: component " << i << " has " << component_nodes[i] << " but not edges!" <<std::endl;
if (component_nodes[i] == 0 && component_edges[i] > 0)
logstream(LOG_FATAL)<<"Bug: component " << i << " has " << component_edges[i] << " but not nodes!" <<std::endl;
if (component_seeds[i] == 0 && component_edges[i] > 0)
logstream(LOG_FATAL)<<"Bug: component " << i << " has " << component_edges[i] << " but not seeds!" << std::endl;
if (component_edges[i] > 0 && component_edges[i]+2 < component_nodes[i] )
logstream(LOG_FATAL)<<"Bug: component " << i << " has missing edges: " << component_edges[i] << " nodes: " << component_nodes[i] << std::endl;
if (component_nodes[i] == 2 && component_edges[i] == 2)
logstream(LOG_FATAL)<<"Bug: component " << i << " 2 nodes +2 edges: " << component_edges[i] << " nodes: " << component_nodes[i] << std::endl;
}
logstream(LOG_INFO)<<"total written components: " << total_written << " sum : " << sum(component_nodes) << std::endl;
logstream(LOG_INFO)<<"Singleton nodes (no edges): " << singleton_nodes << std::endl;
logstream(LOG_INFO)<<"Total seeds: " << total_seeds << std::endl;
}
else {
std::cout<< iiter << ") Number of active nodes: " << num_active <<" Number of links: " << links << std::endl;
std::cout << "subgraph finished in " << mytimer.current_time() << std::endl;
std::cout << "Number of passes: " << iiter<< std::endl;
std::cout << "Total active nodes: " << num_active << " edges: " << links << std::endl;
}
fflush(pfile);
fclose(pfile);
//delete pout;
return EXIT_SUCCESS;
}
| C++ |
/**
* @file
* @author Danny Bickson
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Implementation of the label propagation algorithm
*/
#include "../collaborative_filtering/common.hpp"
#include "../collaborative_filtering/eigen_wrapper.hpp"
double alpha = 0.15;
int debug = 0;
struct vertex_data {
vec pvec;
bool seed;
vertex_data() {
pvec = zeros(D);
seed = false;
}
//this function is only called for seed nodes
void set_val(int index, float val){
pvec[index] = val;
seed = true;
}
float get_val(int index){
return pvec[index];
}
};
/**
* Type definitions. Remember to create suitable graph shards using the
* Sharder-program.
*/
typedef vertex_data VertexDataType;
typedef float EdgeDataType; // Edges store the "rating" of user->movie pair
graphchi_engine<VertexDataType, EdgeDataType> * pengine = NULL;
std::vector<vertex_data> latent_factors_inmem;
#include "../collaborative_filtering/io.hpp"
/**
* GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type>
* class. The main logic is usually in the update function.
*/
struct LPVerticesInMemProgram : public GraphChiProgram<VertexDataType, EdgeDataType> {
/**
* Vertex update function - computes the least square step
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) {
vertex_data & vdata = latent_factors_inmem[vertex.id()];
if (debug)
logstream(LOG_DEBUG)<<"Entering node: " << vertex.id() << " seed? " << vdata.seed << " in vector: " << vdata.pvec << std::endl;
if (vdata.seed || vertex.num_outedges() == 0) //if this is a seed node, don't do anything
return;
vec ret = zeros(D);
for(int e=0; e < vertex.num_outedges(); e++) {
float weight = vertex.edge(e)->get_data();
assert(weight != 0);
vertex_data & nbr_latent = latent_factors_inmem[vertex.edge(e)->vertex_id()];
ret += weight * nbr_latent.pvec;
}
//normalize probabilities
assert(sum(ret) != 0);
ret = ret / sum(ret);
vdata.pvec = alpha * vdata.pvec + (1-alpha)*ret;
vdata.pvec/= sum(vdata.pvec);
}
};
void output_lp_result(std::string filename) {
MMOutputter_mat<vertex_data> user_mat(filename + "_U.mm", 0, M , "This file contains LP output matrix U. In each row D probabilities for the Y labels", latent_factors_inmem);
logstream(LOG_INFO) << "LP output files (in matrix market format): " << filename << "_U.mm" << std::endl;
}
int main(int argc, const char ** argv) {
print_copyright();
/* GraphChi initialization will read the command line
arguments and the configuration file. */
graphchi_init(argc, argv);
/* Metrics object for keeping track of performance counters
and other information. Currently required. */
metrics m("label_propagation");
alpha = get_option_float("alpha", alpha);
debug = get_option_int("debug", debug);
parse_command_line_args();
//load graph (adj matrix) from file
int nshards = convert_matrixmarket<EdgeDataType>(training, NULL, 0, 0, 3, TRAINING, true);
if (M != N)
logstream(LOG_FATAL)<<"Label propagation supports only square matrices" << std::endl;
init_feature_vectors<std::vector<vertex_data> >(M, latent_factors_inmem, false);
//load seed initialization from file
load_matrix_market_matrix(training + ".seeds", 0, D);
#pragma omp parallel for
for (int i=0; i< (int)M; i++){
//normalize seed probabilities to sum up to one
if (latent_factors_inmem[i].seed){
assert(sum(latent_factors_inmem[i].pvec) != 0);
latent_factors_inmem[i].pvec /= sum(latent_factors_inmem[i].pvec);
continue;
}
//other nodes get random label probabilities
for (int j=0; j< D; j++)
latent_factors_inmem[i].pvec[j] = drand48();
}
/* load initial state from disk (optional) */
if (load_factors_from_file){
load_matrix_market_matrix(training + "_U.mm", 0, D);
}
/* Run */
LPVerticesInMemProgram program;
graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m);
set_engine_flags(engine);
pengine = &engine;
engine.run(program, niters);
/* Output latent factor matrices in matrix-market format */
output_lp_result(training);
/* Report execution metrics */
if (!quiet)
metrics_report(m);
return 0;
}
| C++ |
/**
* Copyright (c) 2009 Carnegie Mellon University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an "AS
* IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*
* Written by Danny Bickson, CMU
*
*
* This program reads a text input file, where each line
* is taken from another document. The program counts the number of word
* occurances for each line (document) and outputs a document word count to be used
* in LDA.
*/
#include <cstdio>
#include <iostream>
#include <map>
#include <omp.h>
#include <assert.h>
#include "graphchi_basic_includes.hpp"
#include "../collaborative_filtering/timer.hpp"
#include "../collaborative_filtering/util.hpp"
#include "common.hpp"
using namespace std;
using namespace graphchi;
bool debug = false;
timer mytime;
size_t lines;
unsigned long long total_lines = 0;
string dir;
string outdir;
std::vector<std::string> in_files;
//non word tokens that will be removed in the parsing
//it is possible to add additional special characters or remove ones you want to keep
const char spaces[] = {" \r\n\t!?@#$%^&*()-+.,~`\";:/"};
const char qoute[] = {",\""};
const char comma[] = {","};
int has_header_titles = 1;
std::vector<std::string> header_titles;
int from_val = -1; int to_val = -1;
int mid_val = -1;
void parse(int i){
in_file fin(in_files[i]);
out_file fout((outdir + in_files[i] + ".out"));
size_t linesize = 0;
char * saveptr = NULL, * saveptr2 = NULL,* linebuf = NULL;
size_t line = 1;
uint id;
if (has_header_titles){
char * linebuf = NULL;
size_t linesize;
char linebuf_debug[1024];
/* READ LINE */
int rc = getline(&linebuf, &linesize, fin.outf);
if (rc == -1)
logstream(LOG_FATAL)<<"Error header line " << " [ " << linebuf_debug << " ] " << std::endl;
strncpy(linebuf_debug, linebuf, 1024);
char *pch = strtok(linebuf,"\t,\r; ");
if (pch == NULL)
logstream(LOG_FATAL)<<"Error header line " << " [ " << linebuf_debug << " ] " << std::endl;
header_titles.push_back(pch);
if (debug) printf("Found title: %s\n", pch);
while (pch != NULL){
pch = strtok(NULL, "\t,\r; ");
if (pch == NULL)
break;
header_titles.push_back(pch);
if (debug) printf("Found title: %s\n", pch);
}
}
while(true){
int rc = getline(&linebuf, &linesize, fin.outf);
if (rc < 1)
return;
int index = 0;
char frombuf[256];
char tobuf[256];
char *pch = strtok_r(linebuf, ",", &saveptr);
if (!pch){ logstream(LOG_ERROR) << "Error when parsing file: " << in_files[i] << ":" << line << "[" << linebuf << "]" << std::endl; return; }
fprintf(fout.outf,"\"%s\",", pch);
if (debug) printf("Found token 1 %s\n", pch);
if (pch[0] == '"')
pch++;
index++;
int from,to,mid;
if (index == from_val)
from = atoi(pch);
if (index == to_val)
to = atoi(pch);
if (index == mid_val)
mid = atoi(pch);
while(true){
pch = strtok_r(NULL, ",", &saveptr);
if (pch == NULL)
break;
index++;
if (debug) printf("Found token %d %s\n", index, pch);
if (pch[0] == '"')
pch++;
if (index == from_val)
from = atoi(pch);
if (index == to_val)
to = atoi(pch);
if (index == mid_val)
mid = atoi(pch);
}
char totalbuf[512];
if (mid_val == -1 && to_val == -1)
sprintf(totalbuf, "%d\t", from);
else if (mid_val != -1)
sprintf(totalbuf, "%d\t%d\t%d\t", from, mid, to);
else
sprintf(totalbuf, "%d\t%d\t", from, to);
if (debug) printf("Incrementing map: %s\n", totalbuf);
frommap.string2nodeid[totalbuf]++;
}
}
int main(int argc, const char *argv[]) {
logstream(LOG_WARNING)<<"GraphChi parsers library is written by Danny Bickson (c). Send any "
" comments or bug reports to danny.bickson@gmail.com " << std::endl;
global_logger().set_log_level(LOG_INFO);
global_logger().set_log_to_console(true);
graphchi_init(argc, argv);
debug = get_option_int("debug", 0);
dir = get_option_string("file_list");
lines = get_option_int("lines", 0);
omp_set_num_threads(get_option_int("ncpus", 1));
from_val = get_option_int("from_val", from_val);
to_val = get_option_int("to_val", to_val);
mid_val = get_option_int("mid_val", mid_val);
if (from_val == -1)
logstream(LOG_FATAL)<<"Must set from/to " << std::endl;
mytime.start();
FILE * f = fopen(dir.c_str(), "r");
if (f == NULL)
logstream(LOG_FATAL)<<"Failed to open file list!"<<std::endl;
while(true){
char buf[256];
int rc = fscanf(f, "%s\n", buf);
if (rc < 1)
break;
in_files.push_back(buf);
}
if (in_files.size() == 0)
logstream(LOG_FATAL)<<"Failed to read any file frommap from the list file: " << dir << std::endl;
#pragma omp parallel for
for (int i=0; i< (int)in_files.size(); i++)
parse(i);
std::cout << "Finished in " << mytime.current_time() << std::endl;
save_map_to_text_file(frommap.string2nodeid, outdir + dir + "map.text");
return 0;
}
| C++ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.