code
stringlengths
1
2.01M
repo_name
stringlengths
3
62
path
stringlengths
1
267
language
stringclasses
231 values
license
stringclasses
13 values
size
int64
1
2.01M
/** * @file * @author Aapo Kyrola <akyrola@cs.cmu.edu> * @version 1.0 * * @section LICENSE * * Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * Simple smoketest for the dynamic graph graphchi engine. */ #include <string> #define SUPPORT_DELETIONS 1 #include "graphchi_basic_includes.hpp" #include "engine/dynamic_graphs/graphchi_dynamicgraph_engine.hpp" using namespace graphchi; /** * Type definitions. Remember to create suitable graph shards using the * Sharder-program. */ typedef vid_t VertexDataType; typedef vid_t EdgeDataType; /** * Smoke test. On every iteration, each vertex sets its id to be * id + iteration number. Vertices check whether their neighbors were * set correctly. This assumes that the vertices are executed in round-robin order. * - Uses edges in inverse order to the first smoketest. */ struct SmokeTestProgram2 : public GraphChiProgram<VertexDataType, EdgeDataType> { volatile size_t ndeleted; /** * Vertex update function. */ void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) { int ninedges = 0; if (gcontext.iteration == 0) { for(int i=0; i < vertex.num_inedges(); i++) { vertex.inedge(i)->set_data(vertex.id()); ninedges++; } } else { // Keep track of the number of edegs to ensure that // deletion works fine. if (vertex.get_data() != vertex.num_inedges()) { logstream(LOG_ERROR) << "Discrepancy in edge counts: " << vertex.get_data() << " != " << vertex.num_inedges() << std::endl; } assert(vertex.get_data() == vertex.num_inedges()); for(int i=0; i < vertex.num_outedges(); i++) { graphchi_edge<vid_t> * edge = vertex.outedge(i); vid_t outedgedata = edge->get_data(); vid_t expected = edge->vertex_id() + gcontext.iteration - (edge->vertex_id() > vertex.id()); if (!is_deleted_edge_value(edge->get_data())) { if (outedgedata != expected) { logstream(LOG_ERROR) << outedgedata << " != " << expected << std::endl; assert(false); } } } for(int i=0; i < vertex.num_inedges(); i++) { vertex.inedge(i)->set_data(vertex.id() + gcontext.iteration); if (std::rand() % 4 == 1) { vertex.remove_inedge(i); __sync_add_and_fetch(&ndeleted, 1); } else { ninedges++; } } } if (gcontext.iteration == gcontext.num_iterations - 1) { vertex.set_data(gcontext.iteration + 1); } else { vertex.set_data(ninedges); } } /** * Called before an iteration starts. */ void before_iteration(int iteration, graphchi_context &gcontext) { ndeleted = 0; } /** * Called after an iteration has finished. */ void after_iteration(int iteration, graphchi_context &gcontext) { if (gcontext.iteration > 0) assert(ndeleted > 0); logstream(LOG_INFO) << "Deleted: " << ndeleted << std::endl; } /** * Called before an execution interval is started. */ void before_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) { } /** * Called after an execution interval has finished. */ void after_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) { } }; /** * Vertex callback that checks the vertex data is ok. */ class VertexDataChecker : public VCallback<VertexDataType> { int iters; public: size_t total; VertexDataChecker(int iters) : iters(iters), total(0) {} void callback(vid_t vertex_id, VertexDataType &vecvalue) { assert(vecvalue == iters); total += iters; } }; int main(int argc, const char ** argv) { /* GraphChi initialization will read the command line arguments and the configuration file. */ graphchi_init(argc, argv); /* Metrics object for keeping track of performance counters and other information. Currently required. */ metrics m("smoketest-dynamic-engine2"); /* Basic arguments for application */ std::string filename = get_option_string("file"); // Base filename int niters = get_option_int("niters", 4); // Number of iterations bool scheduler = false; // Whether to use selective scheduling /* Detect the number of shards or preprocess an input to creae them */ int nshards = convert_if_notexists<EdgeDataType>(filename, get_option_string("nshards", "auto")); /* Run */ SmokeTestProgram2 program; graphchi_dynamicgraph_engine<VertexDataType, EdgeDataType> engine(filename, nshards, scheduler, m); engine.run(program, niters); /* Check also the vertex data is ok */ VertexDataChecker vchecker(niters); foreach_vertices(filename, 0, engine.num_vertices(), vchecker); assert(vchecker.total == engine.num_vertices() * niters); /* Report execution metrics */ metrics_report(m); logstream(LOG_INFO) << "Dynamic Engine Smoketest passed successfully! Your system is working!" << std::endl; return 0; }
09jijiangwen-download
src/tests/basic_dynamicengine_smoketest2.cpp
C++
asf20
6,252
/* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #ifndef DEF_GRAPHCHI_TYPES #define DEF_GRAPHCHI_TYPES #include <stdint.h> namespace graphchi { typedef uint32_t vid_t; /** * PairContainer encapsulates a pair of values of some type. * Useful for bulk-synchronuos computation. */ template <typename ET> struct PairContainer { ET left; ET right; PairContainer() { left = ET(); right = ET(); } ET & oldval(int iter) { return (iter % 2 == 0 ? left : right); } void set_newval(int iter, ET x) { if (iter % 2 == 0) { right = x; } else { left = x; } } }; } #endif
09jijiangwen-download
src/graphchi_types.hpp
C++
asf20
1,399
/** * @file * @author Aapo Kyrola <akyrola@cs.cmu.edu> * @version 1.0 * * @section LICENSE * * Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * Vertex and Edge objects. */ #ifndef DEF_GRAPHCHI_OBJECTS #define DEF_GRAPHCHI_OBJECTS #include <vector> #include <assert.h> #include <omp.h> #include <string.h> #include "graphchi_types.hpp" #include "util/qsort.hpp" namespace graphchi { /** * GNU COMPILER HACK TO PREVENT WARNINGS "Unused variable", if * the particular app being compiled does not use a function. */ #ifdef __GNUC__ #define VARIABLE_IS_NOT_USED __attribute__ ((unused)) #else #define VARIABLE_IS_NOT_USED #endif template <typename EdgeDataType> class graphchi_edge { public: vid_t vertexid; // Source or Target vertex id. Clear from context. EdgeDataType * data_ptr; graphchi_edge() {} graphchi_edge(vid_t _vertexid, EdgeDataType * edata_ptr) : vertexid(_vertexid), data_ptr(edata_ptr) { } #ifndef DYNAMICEDATA EdgeDataType get_data() { return * data_ptr; } void set_data(EdgeDataType x) { *data_ptr = x; } #else EdgeDataType * get_vector() { // EdgeDataType is a chivector return data_ptr; } #endif /** * Returns id of the endpoint of this edge. */ vid_t vertex_id() { return vertexid; } } __attribute__((packed)); template <typename ET> bool eptr_less(const graphchi_edge<ET> &a, const graphchi_edge<ET> &b) { return a.vertexid < b.vertexid; } #ifdef SUPPORT_DELETIONS /* * Hacky support for edge deletions. * Edges are deleted by setting the value of the edge to a special * value that denotes it was deleted. * In the future, a better system could be designed. */ // This is hacky... static inline bool VARIABLE_IS_NOT_USED is_deleted_edge_value(int val); static inline bool VARIABLE_IS_NOT_USED is_deleted_edge_value(bool val) { return val; } static inline bool VARIABLE_IS_NOT_USED is_deleted_edge_value(int val); static inline bool VARIABLE_IS_NOT_USED is_deleted_edge_value(int val) { return 0xffffffff == (unsigned int)val; } static inline bool VARIABLE_IS_NOT_USED is_deleted_edge_value(vid_t val); static inline bool VARIABLE_IS_NOT_USED is_deleted_edge_value(vid_t val) { return 0xffffffffu == val; } static inline bool VARIABLE_IS_NOT_USED is_deleted_edge_value(float val); static inline bool VARIABLE_IS_NOT_USED is_deleted_edge_value(float val) { return !(val < 0 || val > 0); } static void VARIABLE_IS_NOT_USED remove_edgev(graphchi_edge<bool> * e); static void VARIABLE_IS_NOT_USED remove_edgev(graphchi_edge<bool> * e) { e->set_data(true); } static void VARIABLE_IS_NOT_USED remove_edgev(graphchi_edge<vid_t> * e); static void VARIABLE_IS_NOT_USED remove_edgev(graphchi_edge<vid_t> * e) { e->set_data(0xffffffff); } static void VARIABLE_IS_NOT_USED remove_edgev(graphchi_edge<int> * e); static void VARIABLE_IS_NOT_USED remove_edgev(graphchi_edge<int> * e) { e->set_data(0xffffffff); } #endif template <typename VertexDataType, typename EdgeDataType> class internal_graphchi_vertex { public: // Todo, use friend int inc; volatile int outc; vid_t vertexid; protected: graphchi_edge<EdgeDataType> * inedges_ptr; graphchi_edge<EdgeDataType> * outedges_ptr; public: bool modified; VertexDataType * dataptr; /* Accessed directly by the engine */ bool scheduled; bool parallel_safe; #ifdef SUPPORT_DELETIONS int deleted_inc; int deleted_outc; #endif internal_graphchi_vertex() : inc(0), outc(0) { #ifdef SUPPORT_DELETIONS deleted_outc = deleted_inc = 0; #endif dataptr = NULL; } internal_graphchi_vertex(vid_t _id, graphchi_edge<EdgeDataType> * iptr, graphchi_edge<EdgeDataType> * optr, int indeg, int outdeg) : vertexid(_id), inedges_ptr(iptr), outedges_ptr(optr) { inc = 0; outc = 0; scheduled = false; modified = false; parallel_safe = true; dataptr = NULL; #ifdef SUPPORT_DELETIONS deleted_inc = 0; deleted_outc = 0; #endif } virtual ~internal_graphchi_vertex() {} vid_t id() const { return vertexid; } int num_inedges() const { return inc; } int num_outedges() const { return outc; } int num_edges() const { return inc + outc; } // Optimization: as only memshard (not streaming shard) creates inedgers, // we do not need atomic instructions here! inline void add_inedge(vid_t src, EdgeDataType * ptr, bool special_edge) { #ifdef SUPPORT_DELETIONS if (inedges_ptr != NULL && is_deleted_edge_value(*ptr)) { deleted_inc++; return; } #endif if (inedges_ptr != NULL) inedges_ptr[inc] = graphchi_edge<EdgeDataType>(src, ptr); inc++; // Note: do not move inside the brackets, since we need to still keep track of inc even if inedgeptr is null! assert(src != vertexid); /* if(inedges_ptr != NULL && inc > outedges_ptr - inedges_ptr) { logstream(LOG_FATAL) << "Tried to add more in-edges as the stored in-degree of this vertex (" << src << "). Perhaps a preprocessing step had failed?" << std::endl; assert(inc <= outedges_ptr - inedges_ptr); } */ // Deleted, since does not work when we have separate in-edge and out-edge arrays } inline void add_outedge(vid_t dst, EdgeDataType * ptr, bool special_edge) { #ifdef SUPPORT_DELETIONS if (outedges_ptr != NULL && is_deleted_edge_value(*ptr)) { deleted_outc++; return; } #endif int i = __sync_add_and_fetch(&outc, 1); if (outedges_ptr != NULL) outedges_ptr[i-1] = graphchi_edge<EdgeDataType>(dst, ptr); assert(dst != vertexid); } }; template <typename VertexDataType, typename EdgeDataType > class graphchi_vertex : public internal_graphchi_vertex<VertexDataType, EdgeDataType> { public: graphchi_vertex() : internal_graphchi_vertex<VertexDataType, EdgeDataType>() { } graphchi_vertex(vid_t _id, graphchi_edge<EdgeDataType> * iptr, graphchi_edge<EdgeDataType> * optr, int indeg, int outdeg) : internal_graphchi_vertex<VertexDataType, EdgeDataType>(_id, iptr, optr, indeg, outdeg) {} virtual ~graphchi_vertex() {} /** * Returns ith edge of a vertex, ignoring * edge direction. */ graphchi_edge<EdgeDataType> * edge(int i) { if (i < this->inc) return inedge(i); else return outedge(i - this->inc); } graphchi_edge<EdgeDataType> * inedge(int i) { assert(i >= 0 && i < this->inc); return &this->inedges_ptr[i]; } graphchi_edge<EdgeDataType> * outedge(int i) { assert(i >= 0 && i < this->outc); return &this->outedges_ptr[i]; } graphchi_edge<EdgeDataType> * random_outedge() { if (this->outc == 0) return NULL; return outedge((int) (std::abs(random()) % this->outc)); } /** * Get the value of vertex */ #ifndef DYNAMICVERTEXDATA VertexDataType get_data() { return *(this->dataptr); } #else // VertexDataType must be a chivector VertexDataType * get_vector() { this->modified = true; // Assume vector always modified... Temporaryh solution. return this->dataptr; } #endif /** * Modify the vertex value. The new value will be * stored on disk. */ virtual void set_data(VertexDataType d) { *(this->dataptr) = d; this->modified = true; } // TODO: rethink static bool computational_edges() { return false; } static bool read_outedges() { return true; } /** * Sorts all the edges. Note: this will destroy information * about the in/out direction of an edge. Do use only if you * ignore the edge direction. */ void VARIABLE_IS_NOT_USED sort_edges_indirect() { // Check for deleted edges first... if (this->inc != (this->outedges_ptr - this->inedges_ptr)) { // Moving memmove(&this->inedges_ptr[this->inc], this->outedges_ptr, this->outc * sizeof(graphchi_edge<EdgeDataType>)); this->outedges_ptr = &this->inedges_ptr[this->inc]; } quickSort(this->inedges_ptr, (int) (this->inc + this->outc), eptr_less<EdgeDataType>); } #ifdef SUPPORT_DELETIONS void VARIABLE_IS_NOT_USED remove_edge(int i) { remove_edgev(edge(i)); } void VARIABLE_IS_NOT_USED remove_inedge(int i) { remove_edgev(inedge(i)); } void VARIABLE_IS_NOT_USED remove_outedge(int i) { remove_edgev(outedge(i)); } #endif }; /** * Experimental code */ // If highest order bit is set, the edge is "special". This is used // to indicate - in the neighborhood model - that neighbor's value is // cached in memory. #define HIGHMASK (1 + (2147483647 >> 1)) #define CLEARMASK (2147483647 >> 1) inline vid_t translate_edge(vid_t rawid, bool &is_special) { is_special = (rawid & HIGHMASK) != 0; return rawid & CLEARMASK; } inline vid_t make_special(vid_t rawid) { return rawid | HIGHMASK; } inline bool is_special(vid_t rawid) { return (rawid & HIGHMASK) != 0; } } // Namespace #endif
09jijiangwen-download
src/api/graph_objects.hpp
C++
asf20
11,570
/** * @file * @author Aapo Kyrola <akyrola@cs.cmu.edu> * @version 1.0 * * @section LICENSE * * Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * GraphChiProgram must be subclassed by GraphChi user programs. * They can define an update function (run for each vertex), and * call backs for iteration and interval beginning and ending. */ #ifndef GRAPHCHI_PROGRAM_DEF #define GRAPHCHI_PROGRAM_DEF #include "api/graph_objects.hpp" #include "api/graphchi_context.hpp" namespace graphchi { template <typename VertexDataType_, typename EdgeDataType_, typename vertex_t = graphchi_vertex<VertexDataType_, EdgeDataType_> > class GraphChiProgram { public: typedef VertexDataType_ VertexDataType; typedef EdgeDataType_ EdgeDataType; virtual ~GraphChiProgram() {} /** * Called before an iteration starts. */ virtual void before_iteration(int iteration, graphchi_context &gcontext) { } /** * Called after an iteration has finished. */ virtual void after_iteration(int iteration, graphchi_context &gcontext) { } /** * Called before an execution interval is started. */ virtual void before_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) { } /** * Called after an execution interval has finished. */ virtual void after_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) { } /** * Update function. */ virtual void update(vertex_t &v, graphchi_context &gcontext) = 0; }; } #endif
09jijiangwen-download
src/api/graphchi_program.hpp
C++
asf20
2,415
/** * @file * @author Aapo Kyrola <akyrola@cs.cmu.edu> * @version 1.0 * * @section LICENSE * * Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * Semi-synchronous implementation of the functional API. */ #ifndef GRAPHCHI_FUNCTIONAL_SEMISYNC_DEF #define GRAPHCHI_FUNCTIONAL_SEMISYNC_DEF #include <assert.h> #include "api/graph_objects.hpp" #include "api/graphchi_context.hpp" #include "api/functional/functional_defs.hpp" #include "metrics/metrics.hpp" #include "graphchi_types.hpp" namespace graphchi { template <typename KERNEL> class functional_vertex_unweighted_semisync : public graphchi_vertex<typename KERNEL::VertexDataType, typename KERNEL::EdgeDataType> { public: typedef typename KERNEL::VertexDataType VT; typedef typename KERNEL::EdgeDataType ET; VT cumval; KERNEL kernel; vertex_info vinfo; graphchi_context * gcontext; functional_vertex_unweighted_semisync() : graphchi_vertex<VT, ET> () {} functional_vertex_unweighted_semisync(graphchi_context &ginfo, vid_t _id, int indeg, int outdeg) : graphchi_vertex<VT, ET> (_id, NULL, NULL, indeg, outdeg) { vinfo.indegree = indeg; vinfo.outdegree = outdeg; vinfo.vertexid = _id; cumval = kernel.reset(); gcontext = &ginfo; } functional_vertex_unweighted_semisync(vid_t _id, graphchi_edge<ET> * iptr, graphchi_edge<ET> * optr, int indeg, int outdeg) { assert(false); // This should never be called. } void first_iteration(graphchi_context &gcontext_) { this->set_data(kernel.initial_value(gcontext_, vinfo)); } // Optimization: as only memshard (not streaming shard) creates inedgers, // we do not need atomic instructions here! inline void add_inedge(vid_t src, ET * ptr, bool special_edge) { if (gcontext->iteration > 0) { cumval = kernel.plus(cumval, kernel.op_neighborval(*gcontext, vinfo, src, *ptr)); } } void ready(graphchi_context &gcontext_) { this->set_data(kernel.compute_vertexvalue(gcontext_, vinfo, cumval)); } inline void add_outedge(vid_t dst, ET * ptr, bool special_edge) { *ptr = kernel.value_to_neighbor(*gcontext, vinfo, dst, this->get_data()); } bool computational_edges() { return true; } /* Outedges do not need to be read, they just need to be written */ static bool read_outedges() { return false; } }; template <typename KERNEL> class FunctionalProgramProxySemisync : public GraphChiProgram<typename KERNEL::VertexDataType, typename KERNEL::EdgeDataType, functional_vertex_unweighted_semisync<KERNEL> > { public: typedef typename KERNEL::VertexDataType VertexDataType; typedef typename KERNEL::EdgeDataType EdgeDataType; typedef functional_vertex_unweighted_semisync<KERNEL> fvertex_t; /** * Called before an iteration starts. */ void before_iteration(int iteration, graphchi_context &info) { } /** * Called after an iteration has finished. */ void after_iteration(int iteration, graphchi_context &ginfo) { } /** * Called before an execution interval is started. */ void before_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &ginfo) { } /** * Pagerank update function. */ void update(fvertex_t &v, graphchi_context &ginfo) { if (ginfo.iteration == 0) { v.first_iteration(ginfo); } else { v.ready(ginfo); } } }; } #endif
09jijiangwen-download
src/api/functional/functional_semisync.hpp
C++
asf20
4,405
/** * @file * @author Aapo Kyrola <akyrola@cs.cmu.edu> * @version 1.0 * * @section LICENSE * * Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * Bulk-synchronous implementation of the functional API. * This API can be used to implement Sparse-Matrix-Vector-Multiply programs. * * @section TODO * * There is too much common code with the semi-sync version. Consolidate! */ #ifndef GRAPHCHI_FUNCTIONAL_BULKSYNC_DEF #define GRAPHCHI_FUNCTIONAL_BULKSYNC_DEF #include <assert.h> #include "api/graph_objects.hpp" #include "api/graphchi_context.hpp" #include "api/functional/functional_defs.hpp" #include "metrics/metrics.hpp" #include "graphchi_types.hpp" namespace graphchi { template <typename KERNEL> class functional_vertex_unweighted_bulksync : public graphchi_vertex<typename KERNEL::VertexDataType, PairContainer<typename KERNEL::EdgeDataType> > { public: typedef typename KERNEL::VertexDataType VT; typedef PairContainer<typename KERNEL::EdgeDataType> ET; KERNEL kernel; VT cumval; vertex_info vinfo; graphchi_context * gcontext; functional_vertex_unweighted_bulksync() : graphchi_vertex<VT, ET> () {} functional_vertex_unweighted_bulksync(graphchi_context &ginfo, vid_t _id, int indeg, int outdeg) : graphchi_vertex<VT, ET> (_id, NULL, NULL, indeg, outdeg) { vinfo.indegree = indeg; vinfo.outdegree = outdeg; vinfo.vertexid = _id; cumval = kernel.reset(); gcontext = &ginfo; } functional_vertex_unweighted_bulksync(vid_t _id, graphchi_edge<ET> * iptr, graphchi_edge<ET> * optr, int indeg, int outdeg) { assert(false); // This should never be called. } void first_iteration(graphchi_context &ginfo) { this->set_data(kernel.initial_value(ginfo, vinfo)); gcontext = &ginfo; } // Optimization: as only memshard (not streaming shard) creates inedgers, // we do not need atomic instructions here! inline void add_inedge(vid_t src, ET * ptr, bool special_edge) { if (gcontext->iteration > 0) { cumval = kernel.plus(cumval, kernel.op_neighborval(*gcontext, vinfo, src, ptr->oldval(gcontext->iteration))); } } void ready(graphchi_context &ginfo) { this->set_data(kernel.compute_vertexvalue(*gcontext, vinfo, cumval)); } inline void add_outedge(vid_t dst, ET * ptr, bool special_edge) { typename KERNEL::EdgeDataType newval = kernel.value_to_neighbor(*gcontext, vinfo, dst, this->get_data()); ET paircont = *ptr; paircont.set_newval(gcontext->iteration, newval); *ptr = paircont; } bool computational_edges() { return true; } /** * We also need to read the outedges, because we need * to preserve the old value as well. */ static bool read_outedges() { return true; } }; template <typename KERNEL> class FunctionalProgramProxyBulkSync : public GraphChiProgram<typename KERNEL::VertexDataType, PairContainer<typename KERNEL::EdgeDataType>, functional_vertex_unweighted_bulksync<KERNEL> > { public: typedef typename KERNEL::VertexDataType VertexDataType; typedef PairContainer<typename KERNEL::EdgeDataType> EdgeDataType; typedef functional_vertex_unweighted_bulksync<KERNEL> fvertex_t; /** * Called before an iteration starts. */ void before_iteration(int iteration, graphchi_context &info) { } /** * Called after an iteration has finished. */ void after_iteration(int iteration, graphchi_context &ginfo) { } /** * Called before an execution interval is started. */ void before_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &ginfo) { } /** * Pagerank update function. */ void update(fvertex_t &v, graphchi_context &ginfo) { if (ginfo.iteration == 0) { v.first_iteration(ginfo); } else { v.ready(ginfo); } } }; } #endif
09jijiangwen-download
src/api/functional/functional_bulksync.hpp
C++
asf20
5,570
/** * @file * @author Aapo Kyrola <akyrola@cs.cmu.edu> * @version 1.0 * * @section LICENSE * * Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * Functional API defs. */ #ifndef GRAPHCHI_FUNCTIONALDEFS_DEF #define GRAPHCHI_FUNCTIONALDEFS_DEF #include "api/graphchi_program.hpp" namespace graphchi { struct vertex_info { vid_t vertexid; int indegree; int outdegree; }; }; #endif
09jijiangwen-download
src/api/functional/functional_defs.hpp
C++
asf20
1,061
/** * @file * @author Aapo Kyrola <akyrola@cs.cmu.edu> * @version 1.0 * * @section LICENSE * * Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * Alternative "functional" API for GraphChi. The API is implemented as a * layer on top of the standard API, but uses a specialized engine "functional_engine", * which processes the graph data in different order. Namely, it first loads in-edges, * then executes updates, and finally writes new values (broadcasts) to out-edges. */ #ifndef GRAPHCHI_FUNCTIONALAPI_DEF #define GRAPHCHI_FUNCTIONALAPI_DEF #include <assert.h> #include "api/graph_objects.hpp" #include "api/graphchi_context.hpp" #include "engine/functional/functional_engine.hpp" #include "metrics/metrics.hpp" #include "graphchi_types.hpp" #include "api/functional/functional_defs.hpp" #include "api/functional/functional_semisync.hpp" #include "api/functional/functional_bulksync.hpp" #include "preprocessing/conversions.hpp" namespace graphchi { /** * Superclass for kernels */ template <typename FVertexDataType, typename FEdgeDataType> struct functional_kernel { typedef FVertexDataType VertexDataType; typedef FEdgeDataType EdgeDataType; functional_kernel() {} /* Initial value - on first iteration */ virtual VertexDataType initial_value(graphchi_context &info, vertex_info& myvertex) = 0; /* Called before first "gather" */ virtual VertexDataType reset() = 0; // Note: Unweighted version, edge value should also be passed // "Gather" virtual EdgeDataType op_neighborval(graphchi_context &info, vertex_info& myvertex, vid_t nbid, EdgeDataType nbval)= 0; // "Sum" virtual EdgeDataType plus(VertexDataType curval, EdgeDataType toadd) = 0; // "Apply" virtual VertexDataType compute_vertexvalue(graphchi_context &ginfo, vertex_info& myvertex, EdgeDataType nbvalsum) = 0; // "Scatter virtual EdgeDataType value_to_neighbor(graphchi_context &info, vertex_info& myvertex, vid_t nbid, VertexDataType myval) = 0; }; /** * Run a functional kernel with unweighted edges. * The semantics of this API are * less well-defined than the standard one, because this API is "semi-synchronous". That is, * inside a sub-interval, new values of neighbors are not observed, but * next sub-interval will observe the new values. * * See application "pagerank_functional" for an example. * @param KERNEL needs to be a class/struct that subclasses the functional_kernel * @param filename base filename * @param nshards number of shards * @param niters number of iterations to run * @param _m metrics object */ template <class KERNEL> void run_functional_unweighted_semisynchronous(std::string filename, int niters, metrics &_m) { FunctionalProgramProxySemisync<KERNEL> program; /* Process input file - if not already preprocessed */ int nshards = convert_if_notexists<typename FunctionalProgramProxySemisync<KERNEL>::EdgeDataType>(filename, get_option_string("nshards", "auto")); functional_engine<typename FunctionalProgramProxySemisync<KERNEL>::VertexDataType, typename FunctionalProgramProxySemisync<KERNEL>::EdgeDataType, typename FunctionalProgramProxySemisync<KERNEL>::fvertex_t > engine(filename, nshards, false, _m); engine.set_modifies_inedges(false); // Important engine.set_modifies_outedges(true); // Important engine.run(program, niters); } /** * Run a functional kernel with unweighted edges in the bulk-synchronous model. * Note: shards need to have space to store two values for each edge. * * See application "pagerank_functional" for an example. * @param filename base filename * @param nshards number of shards * @param niters number of iterations to run * @param _m metrics object */ template <class KERNEL> void run_functional_unweighted_synchronous(std::string filename, int niters, metrics &_m) { FunctionalProgramProxyBulkSync<KERNEL> program; int nshards = convert_if_notexists<typename FunctionalProgramProxyBulkSync<KERNEL>::EdgeDataType>(filename, get_option_string("nshards", "auto")); functional_engine<typename FunctionalProgramProxyBulkSync<KERNEL>::VertexDataType, typename FunctionalProgramProxyBulkSync<KERNEL>::EdgeDataType, typename FunctionalProgramProxyBulkSync<KERNEL>::fvertex_t > engine(filename, nshards, false, _m); engine.set_modifies_inedges(false); // Important engine.set_modifies_outedges(true); // Important engine.set_enable_deterministic_parallelism(false); // Bulk synchronous does not need consistency. engine.run(program, niters); } } #endif
09jijiangwen-download
src/api/functional/functional_api.hpp
C++
asf20
5,722
/** * @file * @author Aapo Kyrola <akyrola@cs.cmu.edu> * @version 1.0 * * @section LICENSE * * Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * Variable size typed vector (type must be a plain old datatype) that * allows adding and removing of elements. */ #ifndef DEF_GRAPHCHI_CHIVECTOR #define DEF_GRAPHCHI_CHIVECTOR #include <vector> #include <stdint.h> namespace graphchi { #define MINCAPACITY 2 /** * Pool the extension parts of chi-vectors */ template <typename T> class extension_pool { }; template <typename T> class chivector { uint16_t nsize; uint16_t ncapacity; T * data; std::vector<T> * extensions; // TODO: use a more memory efficient system? public: typedef T element_type_t; typedef uint32_t sizeword_t; chivector() { extensions = NULL; } chivector(uint16_t sz, uint16_t cap, T * dataptr) : data(dataptr) { nsize = sz; ncapacity = cap; assert(cap >= nsize); extensions = NULL; } ~chivector() { if (extensions != NULL) { delete extensions; extensions = NULL; } } void write(T * dest) { int sz = (int) this->size(); for(int i=0; i < sz; i++) { dest[i] = get(i); // TODO: use memcpy } } uint16_t size() { return nsize; } uint16_t capacity() { return nsize > MINCAPACITY ? nsize : MINCAPACITY; } void add(T val) { nsize ++; if (nsize > ncapacity) { if (extensions == NULL) extensions = new std::vector<T>(); extensions->push_back(val); } else { data[nsize - 1] = val; } } //idx should already exist in the array void set(int idx, T val){ if (idx >= ncapacity) { (*extensions)[idx - (int)ncapacity] = val; } else { data[idx] = val; } } // TODO: addmany() T get(int idx) { if (idx >= ncapacity) { return (* extensions)[idx - (int)ncapacity]; } else { return data[idx]; } } void remove(int idx) { assert(false); } int find(T val) { assert(false); return -1; } void clear() { nsize = 0; } // TODO: iterators }; } #endif
09jijiangwen-download
src/api/dynamicdata/chivector.hpp
C++
asf20
3,181
/** * Copyright (c) 2009 Carnegie Mellon University. * All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an "AS * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language * governing permissions and limitations under the License. * * For more about this software visit: * * http://www.graphlab.ml.cmu.edu * */ #ifndef GRAPHLAB_RANDOM_HPP #define GRAPHLAB_RANDOM_HPP #include <cstdlib> #include <stdint.h> #include <vector> #include <limits> #include <algorithm> #include <boost/random.hpp> #include "util/pthread_tools.hpp" using namespace graphchi; namespace graphlab { /** * \ingroup random * A collection of thread safe random number routines. Each thread * is assigned its own generator however assigning a seed affects * all current and future generators. */ namespace random { /////////////////////////////////////////////////////////////////////// //// Underlying generator definition namespace distributions { /** * The uniform distribution struct is used for partial function * specialization. Generating uniform random real numbers is * accomplished slightly differently than for integers. * Therefore the base case is for integers and we then * specialize the two real number types (floats and doubles). */ template<typename IntType> struct uniform { typedef boost::uniform_int<IntType> distribution_type; template<typename RealRNG, typename DiscreteRNG> static inline IntType sample(RealRNG& real_rng, DiscreteRNG& discrete_rng, const IntType& min, const IntType& max) { return distribution_type(min, max)(discrete_rng); } }; template<> struct uniform<double> { typedef boost::uniform_real<double> distribution_type; template<typename RealRNG, typename DiscreteRNG> static inline double sample(RealRNG& real_rng, DiscreteRNG& discrete_rng, const double& min, const double& max) { return distribution_type(min, max)(real_rng); } }; template<> struct uniform<float> { typedef boost::uniform_real<float> distribution_type; template<typename RealRNG, typename DiscreteRNG> static inline float sample(RealRNG& real_rng, DiscreteRNG& discrete_rng, const float& min, const float& max) { return distribution_type(min, max)(real_rng); } }; }; // end of namespace distributions /** * The generator class is the base underlying type used to * generate random numbers. User threads should use the functions * provided in the random namespace. */ class generator { public: // base Generator types typedef boost::lagged_fibonacci607 real_rng_type; typedef boost::mt11213b discrete_rng_type; typedef boost::rand48 fast_discrete_rng_type; generator() { time_seed(); } //! Seed the generator using the default seed inline void seed() { mut.lock(); real_rng.seed(); discrete_rng.seed(); fast_discrete_rng.seed(); mut.unlock(); } //! Seed the generator nondeterministically void nondet_seed(); //! Seed the generator using the current time in microseconds inline void time_seed() { seed(time(NULL) ); } //! Seed the random number generator based on a number void seed(size_t number) { mut.lock(); fast_discrete_rng.seed(number); real_rng.seed(fast_discrete_rng); discrete_rng.seed(fast_discrete_rng); mut.unlock(); } //! Seed the generator using another generator void seed(generator& other){ mut.lock(); real_rng.seed(other.real_rng); discrete_rng.seed(other.discrete_rng); fast_discrete_rng.seed(other.fast_discrete_rng()); mut.unlock(); } /** * Generate a random number in the uniform real with range [min, * max) or [min, max] if the number type is discrete. */ template<typename NumType> inline NumType uniform(const NumType min, const NumType max) { mut.lock(); const NumType result = distributions::uniform<NumType>:: sample(real_rng, discrete_rng, min, max); mut.unlock(); return result; } // end of uniform /** * Generate a random number in the uniform real with range [min, * max) or [min, max] if the number type is discrete. */ template<typename NumType> inline NumType fast_uniform(const NumType min, const NumType max) { mut.lock(); const NumType result = distributions::uniform<NumType>:: sample(real_rng, fast_discrete_rng, min, max); mut.unlock(); return result; } // end of fast_uniform /** * Generate a random number in the uniform real with range [min, * max); */ inline double gamma(const double alpha = double(1)) { boost::gamma_distribution<double> gamma_dist(alpha); mut.lock(); const double result = gamma_dist(real_rng); mut.unlock(); return result; } // end of gamma /** * Generate a gaussian random variable with zero mean and unit * variance. */ inline double gaussian(const double mean = double(0), const double stdev = double(1)) { boost::normal_distribution<double> normal_dist(mean,stdev); mut.lock(); const double result = normal_dist(real_rng); mut.unlock(); return result; } // end of gaussian /** * Generate a gaussian random variable with zero mean and unit * variance. */ inline double normal(const double mean = double(0), const double stdev = double(1)) { return gaussian(mean, stdev); } // end of normal inline bool bernoulli(const double p = double(0.5)) { boost::bernoulli_distribution<double> dist(p); mut.lock(); const double result(dist(discrete_rng)); mut.unlock(); return result; } // end of bernoulli inline bool fast_bernoulli(const double p = double(0.5)) { boost::bernoulli_distribution<double> dist(p); mut.lock(); const double result(dist(fast_discrete_rng)); mut.unlock(); return result; } // end of bernoulli /** * Draw a random number from a multinomial */ template<typename Double> size_t multinomial(const std::vector<Double>& prb) { ASSERT_GT(prb.size(),0); if (prb.size() == 1) { return 0; } Double sum(0); for(size_t i = 0; i < prb.size(); ++i) { ASSERT_GE(prb[i], 0); // Each entry must be P[i] >= 0 sum += prb[i]; } ASSERT_GT(sum, 0); // Normalizer must be positive // actually draw the random number const Double rnd(uniform<Double>(0,1)); size_t ind = 0; for(Double cumsum(prb[ind]/sum); rnd > cumsum && (ind+1) < prb.size(); cumsum += (prb[++ind]/sum)); return ind; } // end of multinomial /** * Generate a draw from a multinomial using a CDF. This is * slightly more efficient since normalization is not required * and a binary search can be used. */ template<typename Double> inline size_t multinomial_cdf(const std::vector<Double>& cdf) { return std::upper_bound(cdf.begin(), cdf.end(), uniform<Double>(0,1)) - cdf.begin(); } // end of multinomial_cdf /** * Construct a random permutation */ template<typename T> inline std::vector<T> permutation(const size_t nelems) { std::vector<T> perm(nelems); for(T i = 0; i < nelems; ++i) perm[i] = i; shuffle(perm); return perm; } // end of construct a permutation /** * Shuffle a standard vector */ template<typename T> void shuffle(std::vector<T>& vec) { shuffle(vec.begin(), vec.end()); } /** * Shuffle a range using the begin and end iterators */ template<typename Iterator> void shuffle(Iterator begin, Iterator end) { mut.lock(); shuffle_functor functor(*this); std::random_shuffle(begin, end, functor); mut.unlock(); } // end of shuffle private: ////////////////////////////////////////////////////// /// Data members struct shuffle_functor { generator& gen; inline shuffle_functor(generator& gen) : gen(gen) { } inline std::ptrdiff_t operator()(std::ptrdiff_t end) { return distributions::uniform<ptrdiff_t>:: sample(gen.real_rng, gen.fast_discrete_rng, 0, end-1); } }; //! The real random number generator real_rng_type real_rng; //! The discrete random number generator discrete_rng_type discrete_rng; //! The fast discrete random number generator fast_discrete_rng_type fast_discrete_rng; //! lock used to access local members mutex mut; }; // end of class generator /** * \ingroup random * Seed all generators using the default seed */ void seed(); /** * \ingroup random * Seed all generators using an integer */ void seed(size_t seed_value); /** * \ingroup random * Seed all generators using a nondeterministic source */ void nondet_seed(); /** * \ingroup random * Seed all generators using the current time in microseconds */ void time_seed(); /** * \ingroup random * Get the local generator */ generator& get_source(); /** * \ingroup random * Generate a random number in the uniform real with range [min, * max) or [min, max] if the number type is discrete. */ template<typename NumType> inline NumType uniform(const NumType min, const NumType max) { return get_source().uniform<NumType>(min, max); } // end of uniform /** * \ingroup random * Generate a random number in the uniform real with range [min, * max) or [min, max] if the number type is discrete. */ template<typename NumType> inline NumType fast_uniform(const NumType min, const NumType max) { return get_source().fast_uniform<NumType>(min, max); } // end of fast_uniform /** * \ingroup random * Generate a random number between 0 and 1 */ inline double rand01() { return uniform<double>(0, 1); } /** * \ingroup random * Simulates the standard rand function as defined in cstdlib */ inline int rand() { return fast_uniform(0, RAND_MAX); } /** * \ingroup random * Generate a random number from a gamma distribution. */ inline double gamma(const double alpha = double(1)) { return get_source().gamma(alpha); } /** * \ingroup random * Generate a gaussian random variable with zero mean and unit * standard deviation. */ inline double gaussian(const double mean = double(0), const double stdev = double(1)) { return get_source().gaussian(mean, stdev); } /** * \ingroup random * Generate a gaussian random variable with zero mean and unit * standard deviation. */ inline double normal(const double mean = double(0), const double stdev = double(1)) { return get_source().normal(mean, stdev); } /** * \ingroup random * Draw a sample from a bernoulli distribution */ inline bool bernoulli(const double p = double(0.5)) { return get_source().bernoulli(p); } /** * \ingroup random * Draw a sample form a bernoulli distribution using the faster generator */ inline bool fast_bernoulli(const double p = double(0.5)) { return get_source().fast_bernoulli(p); } /** * \ingroup random * Generate a draw from a multinomial. This function * automatically normalizes as well. */ template<typename Double> inline size_t multinomial(const std::vector<Double>& prb) { return get_source().multinomial(prb); } /** * \ingroup random * Generate a draw from a cdf; */ template<typename Double> inline size_t multinomial_cdf(const std::vector<Double>& cdf) { return get_source().multinomial_cdf(cdf); } /** * \ingroup random * Construct a random permutation */ template<typename T> inline std::vector<T> permutation(const size_t nelems) { return get_source().permutation<T>(nelems); } /** * \ingroup random * Shuffle a standard vector */ template<typename T> inline void shuffle(std::vector<T>& vec) { get_source().shuffle(vec); } /** * \ingroup random * Shuffle a range using the begin and end iterators */ template<typename Iterator> inline void shuffle(Iterator begin, Iterator end) { get_source().shuffle(begin, end); } /** * Converts a discrete PDF into a CDF */ void pdf2cdf(std::vector<double>& pdf); }; // end of random }; // end of graphlab #endif
09jijiangwen-download
src/api/graphlab2_1_GAS_api/random.hpp
C++
asf20
17,045
/** * @file * @author Aapo Kyrola <akyrola@cs.cmu.edu> * @version 1.0 * * @section LICENSE * * Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * Wrapper classes for GraphLab v2.1 API. */ #ifndef DEF_GRAPHLAB_WRAPPERS #define DEF_GRAPHLAB_WRAPPERS #include "graphchi_basic_includes.hpp" using namespace graphchi; namespace graphlab { struct IS_POD_TYPE { }; struct empty {}; enum edge_dir_type { /** * \brief No edges implies that no edges are processed during the * corresponding gather or scatter phase, essentially skipping * that phase. */ NO_EDGES = 0, /** * \brief In edges implies that only whose target is the center * vertex are processed during gather or scatter. */ IN_EDGES = 1, /** * \brief Out edges implies that only whose source is the center * vertex are processed during gather or scatter. */ OUT_EDGES = 2 , /** * \brief All edges implies that all adges adjacent to a the * center vertex are processed on gather or scatter. Note that * some neighbors may be encountered twice if there is both an in * and out edge to that neighbor. */ ALL_EDGES = 3 }; typedef vid_t vertex_id_type; template<typename GraphType, typename GatherType, typename MessageType> class icontext { public: // Type members =========================================================== /** * \brief the user graph type (typically \ref distributed_graph) */ typedef GraphType graph_type; /** * \brief the opaque vertex_type defined in the ivertex_program::graph_type * (typically distributed_graph::vertex_type) */ typedef typename graph_type::vertex_type vertex_type; /** * \brief the global vertex identifier (see * graphlab::vertex_id_type). */ typedef typename graph_type::vertex_id_type vertex_id_type; /** * The message type specified by the user-defined vertex-program. * (see ivertex_program::message_type) */ typedef MessageType message_type; /** * The type returned by the gather operation. (see * ivertex_program::gather_type) */ typedef GatherType gather_type; /* GraphChi */ graphchi_context * gcontext; public: icontext(graphchi_context * gcontext) : gcontext(gcontext) {} /** \brief icontext destructor */ virtual ~icontext() { } /** * \brief Get the total number of vertices in the graph. * * \return the total number of vertices in the entire graph. */ virtual size_t num_vertices() const { return gcontext->nvertices; } /** * \brief Get the number of edges in the graph. * * Each direction counts as a separate edge. * * \return the total number of edges in the entire graph. */ virtual size_t num_edges() const { assert(false); return 0; } // Not implemented yet /** * \brief Get the id of this process. * * The procid is a number between 0 and * \ref graphlab::icontext::num_procs * * \warning Each process may have many threads * * @return the process of this machine. */ virtual size_t procid() const { return (size_t) omp_get_thread_num(); } /** * \brief Returns a standard output object (like cout) * which only prints once even when running distributed. * * This returns a C++ standard output stream object * which maps directly to std::cout on machine with * process ID 0, and to empty output streamss * on all other processes. Calling, * \code * context.cout() << "Hello World!"; * \endcode * will therefore only print if the code is run on machine 0. * This is useful in the finalize operation in aggregators. */ virtual std::ostream& cout() const { return std::cout; } /** * \brief Returns a standard error object (like cerr) * which only prints once even when running distributed. * * This returns a C++ standard output stream object * which maps directly to std::cerr on machine with * process ID 0, and to empty output streamss * on all other processes. Calling, * \code * context.cerr() << "Hello World!"; * \endcode * will therefore only print if the code is run on machine 0. * This is useful in the finalize operation in aggregators. */ virtual std::ostream& cerr() const { return std::cerr; } /** * \brief Get the number of processes in the current execution. * * This is typically the number of mpi jobs created: * \code * %> mpiexec -n 16 ./pagerank * \endcode * would imply that num_procs() returns 16. * * @return the number of processes in the current execution */ virtual size_t num_procs() const { return gcontext->execthreads; } /** * \brief Get the elapsed time in seconds since start was called. * * \return runtine in seconds */ virtual float elapsed_seconds() const { return gcontext->runtime(); } /** * \brief Return the current interation number (if supported). * * \return the current interation number if support or -1 * otherwise. */ virtual int iteration() const { return gcontext->iteration; } /** * \brief Signal the engine to stop executing additional update * functions. * * \warning The execution engine will stop *eventually* and * additional update functions may be executed prior to when the * engine stops. For-example the synchronous engine (see \ref * synchronous_engine) will complete the current super-step before * terminating. */ virtual void stop() { gcontext->last_iteration = gcontext->iteration; } /** * \brief Signal a vertex with a particular message. * * This function is an essential part of the GraphLab abstraction * and is used to encode iterative computation. Typically a vertex * program will signal neighboring vertices during the scatter * phase. A vertex program may choose to signal neighbors on when * changes made during the previos phases break invariants or warrant * future computation on neighboring vertices. * * The signal function takes two arguments. The first is mandatory * and specifies which vertex to signal. The second argument is * optional and is used to send a message. If no message is * provided then the default message is used. * * \param vertex [in] The vertex to send the message to * \param message [in] The message to send, defaults to message_type(). */ virtual void signal(const vertex_type& vertex, const message_type& message = message_type()) { gcontext->scheduler->add_task(vertex.id()); } /** * \brief Send a message to a vertex ID. * * \warning This function will be slow since the current machine * do not know the location of the vertex ID. If possible use the * the icontext::signal call instead. * * \param gvid [in] the vertex id of the vertex to signal * \param message [in] the message to send to that vertex, * defaults to message_type(). */ virtual void signal_vid(vertex_id_type gvid, const message_type& message = message_type()) { gcontext->scheduler->add_task(gvid); } /** * \brief Post a change to the cached sum for the vertex * * Often a vertex program will be signaled due to a change in one * or a few of its neighbors. However the gather operation will * be rerun on all neighbors potentially producing the same value * as previous invocations and wasting computation time. To * address this some engines support caching (see \ref * gather_caching for details) of the gather phase. * * When caching is enabled the engines save a copy of the previous * gather for each vertex. On subsequent calls to gather if their * is a cached gather then the gather phase is skipped and the * cached value is passed to the ivertex_program::apply function. * Therefore it is the responsibility of the vertex program to * update the cache values for neighboring vertices. This is * accomplished by using the icontext::post_delta function. * Posted deltas are atomically added to the cache. * * \param vertex [in] the vertex whose cache we want to update * \param delta [in] the change that we want to *add* to the * current cache. * */ virtual void post_delta(const vertex_type& vertex, const gather_type& delta) { assert(false); // Not implemented } /** * \brief Invalidate the cached gather on the vertex. * * When caching is enabled clear_gather_cache clears the cache * entry forcing a complete invocation of the subsequent gather. * * \param vertex [in] the vertex whose cache to clear. */ virtual void clear_gather_cache(const vertex_type& vertex) { assert(false); // Not implemented } }; // end of icontext /* Forward declaratinos */ template <typename GLVertexDataType, typename EdgeDataType> struct GraphLabVertexWrapper; template <typename GLVertexDataType, typename EdgeDataType> struct GraphLabEdgeWrapper; /* Fake distributed graph type (this is often hard-coded in GraphLab vertex programs. */ template <typename vertex_data, typename edge_data> struct distributed_graph { typedef vertex_data vertex_data_type; typedef edge_data edge_data_type; typedef GraphLabVertexWrapper<vertex_data_type, edge_data_type> vertex_type; typedef GraphLabEdgeWrapper<vertex_data_type, edge_data_type> edge_type; typedef graphchi::vid_t vertex_id_type; }; /* GraphChi's version of the ivertex_program */ template<typename Graph, typename GatherType, typename MessageType = bool> struct ivertex_program { /* Type definitions */ typedef typename Graph::vertex_data_type vertex_data_type; typedef typename Graph::edge_data_type edge_data_type; typedef GatherType gather_type; typedef MessageType message_type; typedef Graph graph_type; typedef typename graphchi::vid_t vertex_id_type; typedef GraphLabVertexWrapper<vertex_data_type, edge_data_type> vertex_type; typedef GraphLabEdgeWrapper<vertex_data_type, edge_data_type> edge_type; typedef icontext<graph_type, gather_type, message_type> icontext_type; typedef graphlab::edge_dir_type edge_dir_type; virtual void init(icontext_type& context, const vertex_type& vertex, const message_type& msg) { /** NOP */ } /** * Returns the set of edges on which to run the gather function. * The default edge direction is the in edges. */ virtual edge_dir_type gather_edges(icontext_type& context, const vertex_type& vertex) const { return IN_EDGES; } /** * Gather is called on all gather_edges() in parallel and returns * the gather_type which are added to compute the final output of * the gather. */ virtual gather_type gather(icontext_type& context, const vertex_type& vertex, edge_type& edge) const { logstream(LOG_FATAL) << "Gather not implemented!" << std::endl; return gather_type(); }; /** * The apply function is called once the gather has completed and * must be implemented by all vertex programs. */ virtual void apply(icontext_type& context, vertex_type& vertex, const gather_type& total) = 0; /** * Returns the set of edges on which to run the scatter function. * The default edge direction is the out edges. */ virtual edge_dir_type scatter_edges(icontext_type& context, const vertex_type& vertex) const { return OUT_EDGES; } /** * Scatter is called on all scatter_edges() in parallel after the * apply function has completed. The scatter function can post * deltas. */ virtual void scatter(icontext_type& context, const vertex_type& vertex, edge_type& edge) const { logstream(LOG_FATAL) << "Scatter not implemented!" << std::endl; }; }; template <typename GLVertexDataType, typename EdgeDataType> struct GraphLabVertexWrapper { typedef graphchi_vertex<bool, EdgeDataType> VertexType; // Confusing! typedef GLVertexDataType vertex_data_type; typedef GraphLabVertexWrapper<GLVertexDataType, EdgeDataType> vertex_type; graphchi::vid_t vertexId; VertexType * vertex; std::vector<GLVertexDataType> * vertexArray; GraphLabVertexWrapper(graphchi::vid_t vertexId, VertexType * vertex, std::vector<GLVertexDataType> * vertexArray): vertexId(vertexId), vertex(vertex), vertexArray(vertexArray) { } bool operator==(vertex_type& other) const { return vertexId == other.vertexId; } /// \brief Returns a constant reference to the data on the vertex const vertex_data_type& data() const { return (*vertexArray)[vertexId]; } /// \brief Returns a mutable reference to the data on the vertex vertex_data_type& data() { return (*vertexArray)[vertexId]; } /// \brief Returns the number of in edges of the vertex size_t num_in_edges() const { if (vertex == NULL) { logstream(LOG_ERROR) << "GraphChi does not support asking neighbor vertices in/out degrees." << std::endl; return 0; } return vertex->num_edges(); } /// \brief Returns the number of out edges of the vertex size_t num_out_edges() const { if (vertex == NULL) { logstream(LOG_ERROR) << "GraphChi does not support asking neighbor vertices in/out degrees." << std::endl; return 0; } return vertex->num_outedges(); } /// \brief Returns the vertex ID of the vertex graphchi::vid_t id() const { return vertexId; } /** * \brief Returns the local ID of the vertex */ graphchi::vid_t local_id() const { return vertexId; } }; template <typename GLVertexDataType, typename EdgeDataType> struct GraphLabEdgeWrapper { typedef graphchi_vertex<bool, EdgeDataType> VertexType; typedef GLVertexDataType vertex_data_type; typedef EdgeDataType edge_data_type; typedef GraphLabVertexWrapper<GLVertexDataType, EdgeDataType> vertex_type; graphchi_edge<EdgeDataType> * edge; VertexType * vertex; std::vector<GLVertexDataType> * vertexArray; bool is_inedge; GraphLabEdgeWrapper(graphchi_edge<EdgeDataType> * edge, VertexType * vertex, std::vector<GLVertexDataType> * vertexArray, bool is_inedge): edge(edge), vertex(vertex), vertexArray(vertexArray), is_inedge(is_inedge) { } public: /** * \brief Returns the source vertex of the edge. * This function returns a vertex_object by value and as a * consequence it is possible to use the resulting vertex object * to access and *modify* the associated vertex data. * * Modification of vertex data obtained through an edge object * is *usually not safe* and can lead to data corruption. * * \return The vertex object representing the source vertex. */ vertex_type source() const { if (is_inedge) { return GraphLabVertexWrapper<GLVertexDataType, EdgeDataType>(vertex->id(), vertex, vertexArray); } else { return GraphLabVertexWrapper<GLVertexDataType, EdgeDataType>(edge->vertex_id(), NULL, vertexArray); } } /** * \brief Returns the target vertex of the edge. * * This function returns a vertex_object by value and as a * consequence it is possible to use the resulting vertex object * to access and *modify* the associated vertex data. * * Modification of vertex data obtained through an edge object * is *usually not safe* and can lead to data corruption. * * \return The vertex object representing the target vertex. */ vertex_type target() const { if (!is_inedge) { return GraphLabVertexWrapper<GLVertexDataType, EdgeDataType>(vertex->id(), vertex, vertexArray); } else { return GraphLabVertexWrapper<GLVertexDataType, EdgeDataType>(edge->vertex_id(), NULL, vertexArray); } } /** * \brief Returns a constant reference to the data on the edge */ const edge_data_type& data() const { return const_cast<edge_data_type&>(*edge->data_ptr); } /** * \brief Returns a mutable reference to the data on the edge */ edge_data_type& data() { return *(edge->data_ptr); } }; // end of edge_type template <class GraphLabVertexProgram> struct GraphLabWrapper : public GraphChiProgram<bool, typename GraphLabVertexProgram::edge_data_type> { typedef bool VertexDataType; /* Temporary hack: as the vertices are stored in memory, no need to store on disk. */ typedef typename GraphLabVertexProgram::vertex_data_type GLVertexDataType; typedef typename GraphLabVertexProgram::edge_data_type EdgeDataType; typedef typename GraphLabVertexProgram::gather_type gather_type; typedef typename GraphLabVertexProgram::graph_type graph_type; typedef typename GraphLabVertexProgram::message_type message_type; std::vector<GLVertexDataType> * vertexInmemoryArray; GraphLabWrapper() { vertexInmemoryArray = new std::vector<GLVertexDataType>(); } /** * Called before an iteration starts. */ virtual void before_iteration(int iteration, graphchi_context &gcontext) { if (gcontext.iteration == 0) { logstream(LOG_INFO) << "Initialize vertices in memory." << std::endl; vertexInmemoryArray->resize(gcontext.nvertices); } } /** * Called after an iteration has finished. */ virtual void after_iteration(int iteration, graphchi_context &gcontext) { } /** * Called before an execution interval is started. */ virtual void before_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) { } /** * Called after an execution interval has finished. */ virtual void after_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) { } /** * Update function. */ void update(graphchi_vertex<bool, EdgeDataType> &vertex, graphchi_context &gcontext) { graphlab::icontext<graph_type, gather_type, message_type> glcontext(&gcontext); /* Create the vertex program */ GraphLabVertexWrapper<GLVertexDataType, EdgeDataType> wrapperVertex(vertex.id(), &vertex, vertexInmemoryArray); GraphLabVertexProgram glVertexProgram; /* Init */ glVertexProgram.init(glcontext, wrapperVertex, typename GraphLabVertexProgram::message_type()); const GraphLabVertexProgram& const_vprog = glVertexProgram; /* Gather */ edge_dir_type gather_direction = const_vprog.gather_edges(glcontext, wrapperVertex); gather_type sum; int gathered = 0; switch (gather_direction) { case ALL_EDGES: case IN_EDGES: for(int i=0; i < vertex.num_inedges(); i++) { GraphLabEdgeWrapper<GLVertexDataType, EdgeDataType> edgeWrapper(vertex.inedge(i), &vertex, vertexInmemoryArray, true); if (gathered > 0) sum += const_vprog.gather(glcontext, wrapperVertex, edgeWrapper); else sum = const_vprog.gather(glcontext, wrapperVertex, edgeWrapper); gathered++; } if (gather_direction != ALL_EDGES) break; case OUT_EDGES: for(int i=0; i < vertex.num_outedges(); i++) { GraphLabEdgeWrapper<GLVertexDataType, EdgeDataType> edgeWrapper(vertex.outedge(i), &vertex, vertexInmemoryArray, false); if (gathered > 0) sum += const_vprog.gather(glcontext, wrapperVertex, edgeWrapper); else sum = const_vprog.gather(glcontext, wrapperVertex, edgeWrapper); gathered++; } break; case NO_EDGES: break; default: assert(false); // Huh? } /* Apply */ glVertexProgram.apply(glcontext, wrapperVertex, sum); /* Scatter */ edge_dir_type scatter_direction = const_vprog.scatter_edges(glcontext, wrapperVertex); switch(scatter_direction) { case ALL_EDGES: case IN_EDGES: for(int i=0; i < vertex.num_inedges(); i++) { GraphLabEdgeWrapper<GLVertexDataType, EdgeDataType> edgeWrapper(vertex.inedge(i), &vertex, vertexInmemoryArray, true); const_vprog.scatter(glcontext, wrapperVertex, edgeWrapper); } if (scatter_direction != ALL_EDGES) break; case OUT_EDGES: for(int i=0; i < vertex.num_outedges(); i++) { GraphLabEdgeWrapper<GLVertexDataType, EdgeDataType> edgeWrapper(vertex.outedge(i), &vertex, vertexInmemoryArray, false); const_vprog.scatter(glcontext, wrapperVertex, edgeWrapper); } break; case NO_EDGES: break; default: assert(false); // Huh? } /* Done! */ } }; // End GraphLabWrapper template <typename GraphLabVertexProgram, typename ReductionType, typename EdgeMapType, typename FinalizerType> struct GraphLabEdgeAggregatorWrapper : public GraphChiProgram<bool, typename GraphLabVertexProgram::edge_data_type> { typedef bool VertexDataType; /* Temporary hack: as the vertices are stored in memory, no need to store on disk. */ typedef typename GraphLabVertexProgram::vertex_data_type GLVertexDataType; typedef typename GraphLabVertexProgram::edge_data_type EdgeDataType; typedef typename GraphLabVertexProgram::edge_type edge_type; typedef typename GraphLabVertexProgram::gather_type gather_type; typedef typename GraphLabVertexProgram::graph_type graph_type; typedef typename GraphLabVertexProgram::message_type message_type; mutex m; std::vector<ReductionType> localaggr; ReductionType aggr; std::vector<GLVertexDataType> * vertexInmemoryArray; EdgeMapType map_function; FinalizerType finalize_function; GraphLabEdgeAggregatorWrapper(EdgeMapType map_function, FinalizerType finalize_function, std::vector<typename GraphLabVertexProgram::vertex_data_type> * vertices) : map_function(map_function), finalize_function(finalize_function) { vertexInmemoryArray = vertices; } /** * Called before an iteration starts. */ virtual void before_iteration(int iteration, graphchi_context &gcontext) { aggr = ReductionType(); localaggr.resize(gcontext.execthreads); } /** * Called after an iteration has finished. */ virtual void after_iteration(int iteration, graphchi_context &gcontext) { logstream(LOG_INFO) << "Going to run edge-aggregator finalize." << std::endl; for(int i=0; i < (int)localaggr.size(); i++) { aggr += localaggr[i]; } graphlab::icontext<graph_type, gather_type, message_type> glcontext(&gcontext); finalize_function(glcontext, aggr); } /** * Called before an execution interval is started. */ virtual void before_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) { } /** * Called after an execution interval has finished. */ virtual void after_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) { } /** * Update function. */ void update(graphchi_vertex<bool, EdgeDataType> &vertex, graphchi_context &gcontext) { graphlab::icontext<graph_type, gather_type, message_type> glcontext(&gcontext); ReductionType a; for(int i=0; i < vertex.num_edges(); i++) { const GraphLabEdgeWrapper<GLVertexDataType, EdgeDataType> edgeWrapper(vertex.edge(i), &vertex, vertexInmemoryArray, true); ReductionType mapped = map_function(glcontext, edgeWrapper); a += mapped; } localaggr[omp_get_thread_num()] += a; } }; // End edge-aggregator wrapper /** * Just definitions, we do not actually support them. */ namespace messages { /** * The priority of two messages is the sum */ struct sum_priority : public graphlab::IS_POD_TYPE { double value; sum_priority(const double value = 0) : value(value) { } double priority() const { return value; } sum_priority& operator+=(const sum_priority& other) { value += other.value; return *this; } }; // end of sum_priority message /** * The priority of two messages is the max */ struct max_priority : public graphlab::IS_POD_TYPE { double value; max_priority(const double value = 0) : value(value) { } double priority() const { return value; } max_priority& operator+=(const max_priority& other) { value = std::max(value, other.value); return *this; } }; // end of max_priority message }; // end of messages namespace }; // End namespace graphlab template <typename GraphLabVertexProgram> std::vector<typename GraphLabVertexProgram::vertex_data_type> * run_graphlab_vertexprogram(std::string base_filename, int nshards, int niters, bool scheduler, metrics & _m, bool modifies_inedges=true, bool modifies_outedges=true) { typedef graphlab::GraphLabWrapper<GraphLabVertexProgram> GLWrapper; GLWrapper wrapperProgram; graphchi_engine<bool, typename GLWrapper::EdgeDataType> engine(base_filename, nshards, scheduler, _m); engine.set_modifies_inedges(modifies_inedges); engine.set_modifies_outedges(modifies_outedges); engine.run(wrapperProgram, niters); return wrapperProgram.vertexInmemoryArray; } template <typename GraphLabVertexProgram, typename ReductionType, typename EdgeMapType, typename FinalizerType> ReductionType run_graphlab_edge_aggregator(std::string base_filename, int nshards, EdgeMapType map_function, FinalizerType finalize_function, std::vector<typename GraphLabVertexProgram::vertex_data_type> * vertices, metrics & _m) { typedef graphlab::GraphLabEdgeAggregatorWrapper<GraphLabVertexProgram, ReductionType, EdgeMapType, FinalizerType> GLEdgeAggrWrapper; logstream(LOG_INFO) << "Starting edge aggregator." << std::endl; GLEdgeAggrWrapper glAggregator(map_function, finalize_function, vertices); graphchi_engine<bool, typename GLEdgeAggrWrapper::EdgeDataType> engine(base_filename, nshards, true, _m); engine.set_modifies_inedges(false); engine.set_modifies_outedges(false); engine.run(glAggregator, 1); return glAggregator.aggr; } #endif
09jijiangwen-download
src/api/graphlab2_1_GAS_api/graphchi_graphlabv2_1.hpp
C++
asf20
31,895
/** * Copyright (c) 2009 Carnegie Mellon University. * All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an "AS * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language * governing permissions and limitations under the License. * * For more about this software visit: * * http://www.graphlab.ml.cmu.edu * */ // Copyright (c) 2005, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // --- // This file contains #include information about logging-related stuff. // Pretty much everybody needs to #include this file so that they can // log various happenings. // #ifndef _ASSERTIONS_H_ #define _ASSERTIONS_H_ #include <stdarg.h> #include <stdlib.h> #include <stdio.h> #ifdef HAVE_UNISTD_H #include <unistd.h> // for write() #endif #include <string.h> // for strlen(), strcmp() #include <assert.h> #include <errno.h> // for errno #include <sstream> #include <cassert> #include "logger/logger.hpp" #include <boost/typeof/typeof.hpp> static void __print_back_trace() { logstream(LOG_ERROR) << "GraphChi does not currently have the _print_back_trace implementation!" << std::endl; } // On some systems (like freebsd), we can't call write() at all in a // global constructor, perhaps because errno hasn't been set up. // Calling the write syscall is safer (it doesn't set errno), so we // prefer that. Note we don't care about errno for logging: we just // do logging on a best-effort basis. #define WRITE_TO_STDERR(buf, len) (logbuf(LOG_FATAL, buf, len)) // CHECK dies with a fatal error if condition is not true. It is *not* // controlled by NDEBUG, so the check will be executed regardless of // compilation mode. Therefore, it is safe to do things like: // CHECK(fp->Write(x) == 4) #define CHECK(condition) \ do { \ if (__builtin_expect(!(condition), 0)) { \ logstream(LOG_ERROR) \ << "Check failed: " << #condition << std::endl; \ __print_back_trace(); \ throw("assertion failure"); \ } \ } while(0) // This prints errno as well. errno is the posix defined last error // number. See errno.h #define PCHECK(condition) \ do { \ if (__builtin_expect(!(condition), 0)) { \ const int _PCHECK_err_no_ = errno; \ logstream(LOG_ERROR) \ << "Check failed: " << #condition << ": " \ << strerror(err_no) << std::endl; \ __print_back_trace(); \ throw("assertion failure"); \ } \ } while(0) // Helper macro for binary operators; prints the two values on error // Don't use this macro directly in your code, use CHECK_EQ et al below // WARNING: These don't compile correctly if one of the arguments is a pointer // and the other is NULL. To work around this, simply static_cast NULL to the // type of the desired pointer. #define CHECK_OP(op, val1, val2) \ do { \ const typeof(val1) _CHECK_OP_v1_ = val1; \ const typeof(val2) _CHECK_OP_v2_ = (typeof(val2))val2; \ if (__builtin_expect(!((_CHECK_OP_v1_) op \ (typeof(val1))(_CHECK_OP_v2_)), 0)) { \ logstream(LOG_ERROR) \ << "Check failed: " \ << #val1 << #op << #val2 \ << " [" \ << _CHECK_OP_v1_ \ << ' ' << #op << ' ' \ << _CHECK_OP_v2_ << "]" << std::endl; \ __print_back_trace(); \ throw("assertion failure"); \ } \ } while(0) #define CHECK_EQ(val1, val2) CHECK_OP(==, val1, val2) #define CHECK_NE(val1, val2) CHECK_OP(!=, val1, val2) #define CHECK_LE(val1, val2) CHECK_OP(<=, val1, val2) #define CHECK_LT(val1, val2) CHECK_OP(< , val1, val2) #define CHECK_GE(val1, val2) CHECK_OP(>=, val1, val2) #define CHECK_GT(val1, val2) CHECK_OP(> , val1, val2) // Synonyms for CHECK_* that are used in some unittests. #define EXPECT_EQ(val1, val2) CHECK_EQ(val1, val2) #define EXPECT_NE(val1, val2) CHECK_NE(val1, val2) #define EXPECT_LE(val1, val2) CHECK_LE(val1, val2) #define EXPECT_LT(val1, val2) CHECK_LT(val1, val2) #define EXPECT_GE(val1, val2) CHECK_GE(val1, val2) #define EXPECT_GT(val1, val2) CHECK_GT(val1, val2) #define ASSERT_EQ(val1, val2) EXPECT_EQ(val1, val2) #define ASSERT_NE(val1, val2) EXPECT_NE(val1, val2) #define ASSERT_LE(val1, val2) EXPECT_LE(val1, val2) #define ASSERT_LT(val1, val2) EXPECT_LT(val1, val2) #define ASSERT_GE(val1, val2) EXPECT_GE(val1, val2) #define ASSERT_GT(val1, val2) EXPECT_GT(val1, val2) // As are these variants. #define EXPECT_TRUE(cond) CHECK(cond) #define EXPECT_FALSE(cond) CHECK(!(cond)) #define EXPECT_STREQ(a, b) CHECK(strcmp(a, b) == 0) #define ASSERT_TRUE(cond) EXPECT_TRUE(cond) #define ASSERT_FALSE(cond) EXPECT_FALSE(cond) #define ASSERT_STREQ(a, b) EXPECT_STREQ(a, b) #define ASSERT_MSG(condition, fmt, ...) \ do { \ if (__builtin_expect(!(condition), 0)) { \ logstream(LOG_ERROR) \ << "Check failed: " << #condition << ":\n"; \ logger(LOG_ERROR, fmt, ##__VA_ARGS__); \ __print_back_trace(); \ throw("assertion failure"); \ } \ } while(0) // Used for (libc) functions that return -1 and set errno #define CHECK_ERR(invocation) PCHECK((invocation) != -1) // A few more checks that only happen in debug mode #ifdef NDEBUG #define DCHECK_EQ(val1, val2) #define DCHECK_NE(val1, val2) #define DCHECK_LE(val1, val2) #define DCHECK_LT(val1, val2) #define DCHECK_GE(val1, val2) #define DCHECK_GT(val1, val2) #define DASSERT_TRUE(cond) #define DASSERT_FALSE(cond) #define DASSERT_MSG(condition, fmt, ...) #else #define DCHECK_EQ(val1, val2) CHECK_EQ(val1, val2) #define DCHECK_NE(val1, val2) CHECK_NE(val1, val2) #define DCHECK_LE(val1, val2) CHECK_LE(val1, val2) #define DCHECK_LT(val1, val2) CHECK_LT(val1, val2) #define DCHECK_GE(val1, val2) CHECK_GE(val1, val2) #define DCHECK_GT(val1, val2) CHECK_GT(val1, val2) #define DASSERT_TRUE(cond) ASSERT_TRUE(cond) #define DASSERT_FALSE(cond) ASSERT_FALSE(cond) #define DASSERT_MSG(condition, fmt, ...) \ do { \ if (__builtin_expect(!(condition), 0)) { \ logstream(LOG_ERROR) \ << "Check failed: " << #condition << ":\n"; \ logger(LOG_ERROR, fmt, ##__VA_ARGS__); \ __print_back_trace(); \ throw("assertion failure"); \ } \ } while(0) #endif #ifdef ERROR #undef ERROR // may conflict with ERROR macro on windows #endif #endif // _LOGGING_H_
09jijiangwen-download
src/api/graphlab2_1_GAS_api/assertions.hpp
C++
asf20
10,281
/** * @file * @author Aapo Kyrola <akyrola@cs.cmu.edu> * @version 1.0 * * @section LICENSE * * Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * Includes for the GraphChi - GraphLab v2.1 Gather-Apply-Scatter API. */ #ifndef DEF_GRAPHLAB_GAS_API_V2_1 #define DEF_GRAPHLAB_GAS_API_V2_1 #include "api/graphlab2_1_GAS_api/assertions.hpp" #include "api/graphlab2_1_GAS_api/graphchi_graphlabv2_1.hpp" #include <boost/foreach.hpp> #include <stdint.h> // if GNUC is available, this checks if the file which included // macros_def.hpp is the same file which included macros_undef.hpp #ifdef __GNUC__ #define GRAPHLAB_MACROS_INC_LEVEL __INCLUDE_LEVEL__ #endif // prevent this file from being included before other graphlab headers #ifdef GRAPHLAB_MACROS #error "Repeated include of <macros_def.hpp>. This probably means that macros_def.hpp was not the last include, or some header file failed to include <macros_undef.hpp>" #endif #define GRAPHLAB_MACROS /** A macro to disallow the copy constructor and operator= functions This should be used in the private: declarations for a class */ #define DISALLOW_COPY_AND_ASSIGN(TypeName) \ TypeName(const TypeName&); \ void operator=(const TypeName&); // Shortcut macro definitions //! see http://www.boost.org/doc/html/foreach.html #define foreach BOOST_FOREACH #define rev_foreach BOOST_REVERSE_FOREACH #include "api/graphlab2_1_GAS_api/random.hpp" #endif
09jijiangwen-download
src/api/graphlab2_1_GAS_api/graphlab.hpp
C++
asf20
2,069
/** * Copyright (c) 2009 Carnegie Mellon University. * All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an "AS * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language * governing permissions and limitations under the License. * * For more about this software visit: * * http://www.graphlab.ml.cmu.edu * */ #include <pthread.h> #include <set> #include <iostream> #include <fstream> #include <boost/random.hpp> #include <boost/integer_traits.hpp> #include "util/pthread_tools.hpp" #include "api/graphlab2_1_GAS_api/graphlab.hpp" namespace graphlab { namespace random { /** * A truely nondeterministic generator */ class nondet_generator { public: static nondet_generator& global() { static nondet_generator global_gen; return global_gen; } typedef size_t result_type; BOOST_STATIC_CONSTANT(result_type, min_value = boost::integer_traits<result_type>::const_min); BOOST_STATIC_CONSTANT(result_type, max_value = boost::integer_traits<result_type>::const_max); result_type min BOOST_PREVENT_MACRO_SUBSTITUTION () const { return min_value; } result_type max BOOST_PREVENT_MACRO_SUBSTITUTION () const { return max_value; } nondet_generator() { rnd_dev.open("/dev/urandom", std::ios::binary | std::ios::in); ASSERT_TRUE(rnd_dev.good()); } // Close the random number generator ~nondet_generator() { rnd_dev.close(); } // read a size_t from the source result_type operator()() { // read a machine word into result result_type result(0); mut.lock(); ASSERT_TRUE(rnd_dev.good()); rnd_dev.read(reinterpret_cast<char*>(&result), sizeof(result_type)); ASSERT_TRUE(rnd_dev.good()); mut.unlock(); // std::cout << result << std::endl; return result; } private: std::ifstream rnd_dev; mutex mut; }; //nondet_generator global_nondet_rng; /** * This class represents a master registery of all active random * number generators */ struct source_registry { std::set<generator*> generators; generator master; mutex mut; static source_registry& global() { static source_registry registry; return registry; } /** * Seed all threads using the default seed */ void seed() { mut.lock(); master.seed(); foreach(generator* generator, generators) { ASSERT_TRUE(generator != NULL); generator->seed(master); } mut.unlock(); } /** * Seed all threads using the default seed */ void nondet_seed() { mut.lock(); master.nondet_seed(); foreach(generator* generator, generators) { ASSERT_TRUE(generator != NULL); generator->seed(master); } mut.unlock(); } /** * Seed all threads using the default seed */ void time_seed() { mut.lock(); master.time_seed(); foreach(generator* generator, generators) { ASSERT_TRUE(generator != NULL); generator->seed(master); } mut.unlock(); } /** * Seed all threads with a fixed number */ void seed(const size_t number) { mut.lock(); master.seed(number); foreach(generator* generator, generators) { ASSERT_TRUE(generator != NULL); generator->seed(master); } mut.unlock(); } /** * Register a source with the registry and seed it based on the * master. */ void register_generator(generator* tls_ptr) { ASSERT_TRUE(tls_ptr != NULL); mut.lock(); generators.insert(tls_ptr); tls_ptr->seed(master); // std::cout << "Generator created" << std::endl; // __print_back_trace(); mut.unlock(); } /** * Unregister a source from the registry */ void unregister_source(generator* tls_ptr) { mut.lock(); generators.erase(tls_ptr); mut.unlock(); } }; // source_registry registry; ////////////////////////////////////////////////////////////// /// Pthread TLS code /** * this function is responsible for destroying the random number * generators */ void destroy_tls_data(void* ptr) { generator* tls_rnd_ptr = reinterpret_cast<generator*>(ptr); if(tls_rnd_ptr != NULL) { source_registry::global().unregister_source(tls_rnd_ptr); delete tls_rnd_ptr; } } /** * Simple struct used to construct the thread local storage at * startup. */ struct tls_key_creator { pthread_key_t TLS_RANDOM_SOURCE_KEY; tls_key_creator() : TLS_RANDOM_SOURCE_KEY(0) { pthread_key_create(&TLS_RANDOM_SOURCE_KEY, destroy_tls_data); } }; // This function is to be called prior to any access to the random // source static pthread_key_t get_random_source_key() { static const tls_key_creator key; return key.TLS_RANDOM_SOURCE_KEY; } // This forces __init_keys__ to be called prior to main. static pthread_key_t __unused_init_keys__(get_random_source_key()); // the combination of the two mechanisms above will force the // thread local store to be initialized // 1: before main // 2: before any use of random by global variables. // KNOWN_ISSUE: if a global variable (initialized before main) // spawns threads which then call random. Things explode. ///////////////////////////////////////////////////////////// //// Implementation of header functions generator& get_source() { // get the thread local storage generator* tls_rnd_ptr = reinterpret_cast<generator*> (pthread_getspecific(get_random_source_key())); // Create a tls_random_source if none was provided if(tls_rnd_ptr == NULL) { tls_rnd_ptr = new generator(); assert(tls_rnd_ptr != NULL); // This will seed it with the master rng source_registry::global().register_generator(tls_rnd_ptr); pthread_setspecific(get_random_source_key(), tls_rnd_ptr); } // assert(tls_rnd_ptr != NULL); return *tls_rnd_ptr; } // end of get local random source void seed() { source_registry::global().seed(); } void nondet_seed() { source_registry::global().nondet_seed(); } void time_seed() { source_registry::global().time_seed(); } void seed(const size_t seed_value) { source_registry::global().seed(seed_value); } void generator::nondet_seed() { // Get the global nondeterministic random number generator. nondet_generator& nondet_rnd(nondet_generator::global()); mut.lock(); // std::cout << "initializing real rng" << std::endl; real_rng.seed(nondet_rnd()); // std::cout << "initializing discrete rng" << std::endl; discrete_rng.seed(nondet_rnd()); // std::cout << "initializing fast discrete rng" << std::endl; fast_discrete_rng.seed(nondet_rnd()); mut.unlock(); } void pdf2cdf(std::vector<double>& pdf) { double Z = 0; for(size_t i = 0; i < pdf.size(); ++i) Z += pdf[i]; for(size_t i = 0; i < pdf.size(); ++i) pdf[i] = pdf[i]/Z + ((i>0)? pdf[i-1] : 0); } // end of pdf2cdf }; // end of namespace random };// end of namespace graphlab
09jijiangwen-download
src/api/graphlab2_1_GAS_api/random.cpp
C++
asf20
9,815
/** * @file * @author Aapo Kyrola <akyrola@cs.cmu.edu> * @version 1.0 * * @section LICENSE * * Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * Context object which contains information about the graph * and on-going computation. */ #ifndef DEF_GRAPHCHI_CONTEXT #define DEF_GRAPHCHI_CONTEXT #include <vector> #include <assert.h> #include <omp.h> #include <sys/time.h> #include "graphchi_types.hpp" #include "api/ischeduler.hpp" namespace graphchi { struct graphchi_context { size_t nvertices; size_t nedges; ischeduler * scheduler; int iteration; int num_iterations; int last_iteration; int execthreads; std::vector<double> deltas; timeval start; std::string filename; double last_deltasum; graphchi_context() : scheduler(NULL), iteration(0), last_iteration(-1) { gettimeofday(&start, NULL); last_deltasum = 0.0; } double runtime() { timeval end; gettimeofday(&end, NULL); return end.tv_sec-start.tv_sec+ ((double)(end.tv_usec-start.tv_usec))/1.0E6; } /** * Set a termination iteration. */ void set_last_iteration(int _last_iteration) { last_iteration = _last_iteration; } void reset_deltas(int nthreads) { deltas = std::vector<double>(nthreads, 0.0); } double get_delta() { double d = 0.0; for(int i=0; i < (int)deltas.size(); i++) { d += deltas[i]; } last_deltasum = d; return d; } inline bool isnan(double x) { return !(x<0 || x>=0); } /** * Method for keeping track of the amount of change in computation. * An update function may broadcast a numerical "delta" value that is * automatically accumulated (in thread-safe way). * @param delta */ void log_change(double delta) { deltas[omp_get_thread_num()] += delta; assert(delta >= 0); assert(!isnan(delta)); /* Sanity check */ } }; } #endif
09jijiangwen-download
src/api/graphchi_context.hpp
C++
asf20
2,937
/** * @file * @author Aapo Kyrola <akyrola@cs.cmu.edu> * @version 1.0 * * @section LICENSE * * Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * Scheduler interface. */ #ifndef DEF_GRAPHCHI_ISCHEDULER #define DEF_GRAPHCHI_ISCHEDULER #include "graphchi_types.hpp" #include "logger/logger.hpp" namespace graphchi { class ischeduler { public: virtual ~ischeduler() {} virtual void add_task(vid_t vid) = 0; virtual void remove_tasks(vid_t fromvertex, vid_t tovertex) = 0; virtual void add_task_to_all() = 0; virtual bool is_scheduled(vid_t vertex) = 0; }; /** * Implementation of the scheduler which actually does nothing. */ class non_scheduler : public ischeduler { int nwarnings; public: non_scheduler() : nwarnings(0) {} virtual ~non_scheduler() {} virtual void add_task(vid_t vid) { if (nwarnings++ % 10000 == 0) { logstream(LOG_WARNING) << "Tried to add task to scheduler, but scheduling was not enabled!" << std::endl; } } virtual void remove_tasks(vid_t fromvertex, vid_t tovertex) { } virtual void add_task_to_all() { } virtual bool is_scheduled(vid_t vertex) { return true; } }; } #endif
09jijiangwen-download
src/api/ischeduler.hpp
C++
asf20
1,970
/** * @file * @author Aapo Kyrola <akyrola@cs.cmu.edu> * @version 1.0 * * @section LICENSE * * Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * Returns standard filenames for all the data files used by GraphChi. * All functions expect a "basefilename". * You can specify environment variable "GRAPHCHI_ROOT", which is the * root directory for the GraphChi configuration and source directories. */ #ifndef GRAPHCHI_FILENAMES_DEF #define GRAPHCHI_FILENAMES_DEF #include <fstream> #include <fcntl.h> #include <string> #include <sstream> #include <stdlib.h> #include <errno.h> #include <unistd.h> #include <vector> #include <sys/stat.h> #include "graphchi_types.hpp" #include "logger/logger.hpp" #ifdef DYNAMICEDATA #include "shards/dynamicdata/dynamicblock.hpp" #endif namespace graphchi { #ifdef __GNUC__ #define VARIABLE_IS_NOT_USED __attribute__ ((unused)) #else #define VARIABLE_IS_NOT_USED #endif static int VARIABLE_IS_NOT_USED get_option_int(const char *option_name, int default_value); /** * Vertex data file */ template <typename VertexDataType> static std::string filename_vertex_data(std::string basefilename) { std::stringstream ss; ss << basefilename; ss << "." << sizeof(VertexDataType) << "B.vout"; return ss.str(); } static std::string filename_degree_data(std::string basefilename) { return basefilename + "_degs.bin"; } static std::string filename_intervals(std::string basefilename, int nshards) { std::stringstream ss; ss << basefilename; ss << "." << nshards << ".intervals"; return ss.str(); } static std::string VARIABLE_IS_NOT_USED get_part_str(int p, int nshards) { char partstr[32]; sprintf(partstr, ".%d_%d", p, nshards); return std::string(partstr); } template <typename EdgeDataType> static std::string filename_shard_edata(std::string basefilename, int p, int nshards) { std::stringstream ss; ss << basefilename; #ifdef DYNAMICEDATA ss << ".dynamic."; #else ss << ".edata."; #endif ss << "e" << sizeof(EdgeDataType) << "B."; ss << p << "_" << nshards; return ss.str(); } static std::string dirname_shard_edata_block(std::string edata_shardname, size_t blocksize) { std::stringstream ss; ss << edata_shardname; ss << "_blockdir_" << blocksize; return ss.str(); } template <typename EdgeDataType> static size_t get_shard_edata_filesize(std::string edata_shardname) { size_t fsize; std::string fname = edata_shardname + ".size"; std::ifstream ifs(fname.c_str()); if (!ifs.good()) { logstream(LOG_FATAL) << "Could not load " << fname << ". Preprocessing forgotten?" << std::endl; assert(ifs.good()); } ifs >> fsize; ifs.close(); return fsize; } static std::string filename_shard_edata_block(std::string edata_shardname, int blockid, size_t blocksize) { std::stringstream ss; ss << dirname_shard_edata_block(edata_shardname, blocksize); ss << "/"; ss << blockid; return ss.str(); } static std::string filename_shard_adj(std::string basefilename, int p, int nshards) { std::stringstream ss; ss << basefilename; ss << ".edata_azv."; ss << p << "_" << nshards << ".adj"; return ss.str(); } /** * Configuration file name */ static std::string filename_config(); static std::string filename_config() { char * chi_root = getenv("GRAPHCHI_ROOT"); if (chi_root != NULL) { return std::string(chi_root) + "/conf/graphchi.cnf"; } else { return "conf/graphchi.cnf"; } } /** * Configuration file name - local version which can * override the version in the version control. */ static std::string filename_config_local(); static std::string filename_config_local() { char * chi_root = getenv("GRAPHCHI_ROOT"); if (chi_root != NULL) { return std::string(chi_root) + "/conf/graphchi.local.cnf"; } else { return "conf/graphchi.local.cnf"; } } static bool file_exists(std::string sname); static bool file_exists(std::string sname) { int tryf = open(sname.c_str(), O_RDONLY); if (tryf < 0) { return false; } else { close(tryf); return true; } } /** * Returns the number of shards if a file has been already * sharded or 0 if not found. */ template<typename EdgeDataType> static int find_shards(std::string base_filename, std::string shard_string="auto") { int try_shard_num; int start_num = 0; int last_shard_num = 2400; if (shard_string == "auto") { start_num = 0; } else { start_num = atoi(shard_string.c_str()); } if (start_num > 0) { last_shard_num = start_num; } size_t blocksize = 4096 * 1024; while (blocksize % sizeof(EdgeDataType) != 0) blocksize++; for(try_shard_num=start_num; try_shard_num <= last_shard_num; try_shard_num++) { std::string last_shard_name = filename_shard_edata<EdgeDataType>(base_filename, try_shard_num - 1, try_shard_num); std::string last_block_name = filename_shard_edata_block(last_shard_name, 0, blocksize); int tryf = open(last_block_name.c_str(), O_RDONLY); if (tryf >= 0) { // Found! close(tryf); int nshards_candidate = try_shard_num; bool success = true; // Validate all relevant files exists for(int p=0; p < nshards_candidate; p++) { std::string sname = filename_shard_edata_block( filename_shard_edata<EdgeDataType>(base_filename, p, nshards_candidate), 0, blocksize); if (!file_exists(sname)) { logstream(LOG_DEBUG) << "Missing directory file: " << sname << std::endl; success = false; break; } sname = filename_shard_adj(base_filename, p, nshards_candidate); if (!file_exists(sname)) { logstream(LOG_DEBUG) << "Missing shard file: " << sname << std::endl; success = false; break; } } // Check degree file std::string degreefname = filename_degree_data(base_filename); if (!file_exists(degreefname)) { logstream(LOG_ERROR) << "Missing degree file: " << degreefname << std::endl; logstream(LOG_ERROR) << "You need to preprocess (sharder) your file again!" << std::endl; return 0; } std::string intervalfname = filename_intervals(base_filename, nshards_candidate); if (!file_exists(intervalfname)) { logstream(LOG_ERROR) << "Missing intervals file: " << intervalfname << std::endl; logstream(LOG_ERROR) << "You need to preprocess (sharder) your file again!" << std::endl; return 0; } if (!success) { continue; } logstream(LOG_INFO) << "Detected number of shards: " << nshards_candidate << std::endl; logstream(LOG_INFO) << "To specify a different number of shards, use command-line parameter 'nshards'" << std::endl; return nshards_candidate; } } if (last_shard_num == start_num) { logstream(LOG_WARNING) << "Could not find shards with nshards = " << start_num << std::endl; logstream(LOG_WARNING) << "Please define 'nshards 0' or 'nshards auto' to automatically detect." << std::endl; } return 0; } /** * Delete the shard files */ template<typename EdgeDataType_> static void delete_shards(std::string base_filename, int nshards) { #ifdef DYNAMICEDATA typedef int EdgeDataType; #else typedef EdgeDataType_ EdgeDataType; #endif logstream(LOG_DEBUG) << "Deleting files for " << base_filename << " shards=" << nshards << std::endl; std::string intervalfname = filename_intervals(base_filename, nshards); if (file_exists(intervalfname)) { int err = remove(intervalfname.c_str()); if (err != 0) logstream(LOG_ERROR) << "Error removing file " << intervalfname << ", " << strerror(errno) << std::endl; } /* Note: degree file is not removed, because same graph with different number of shards share the file. This should be probably change. std::string degreefname = filename_degree_data(base_filename); if (file_exists(degreefname)) { remove(degreefname.c_str()); } */ size_t blocksize = 4096 * 1024; while (blocksize % sizeof(EdgeDataType) != 0) blocksize++; for(int p=0; p < nshards; p++) { int blockid = 0; std::string filename_edata = filename_shard_edata<EdgeDataType>(base_filename, p, nshards); std::string fsizename = filename_edata + ".size"; if (file_exists(fsizename)) { int err = remove(fsizename.c_str()); if (err != 0) logstream(LOG_ERROR) << "Error removing file " << fsizename << ", " << strerror(errno) << std::endl; } while(true) { std::string block_filename = filename_shard_edata_block(filename_edata, blockid, blocksize); logstream(LOG_DEBUG) << "Deleting " << block_filename << " exists: " << file_exists(block_filename) << std::endl; if (file_exists(block_filename)) { int err = remove(block_filename.c_str()); if (err != 0) logstream(LOG_ERROR) << "Error removing file " << block_filename << ", " << strerror(errno) << std::endl; } else { break; } #ifdef DYNAMICEDATA delete_block_uncompressed_sizefile(block_filename); #endif blockid++; } std::string dirname = dirname_shard_edata_block(filename_edata, blocksize); if (file_exists(dirname)) { int err = remove(dirname.c_str()); if (err != 0) logstream(LOG_ERROR) << "Error removing directory " << dirname << ", " << strerror(errno) << std::endl; } std::string adjname = filename_shard_adj(base_filename, p, nshards); logstream(LOG_DEBUG) << "Deleting " << adjname << " exists: " << file_exists(adjname) << std::endl; if (file_exists(adjname)) { int err = remove(adjname.c_str()); if (err != 0) logstream(LOG_ERROR) << "Error removing file " << adjname << ", " << strerror(errno) << std::endl; } } std::string numv_filename = base_filename + ".numvertices"; if (file_exists(numv_filename)) { int err = remove(numv_filename.c_str()); if (err != 0) logstream(LOG_ERROR) << "Error removing file " << numv_filename << ", " << strerror(errno) << std::endl; } } /** * Loads vertex intervals. */ static void load_vertex_intervals(std::string base_filename, int nshards, std::vector<std::pair<vid_t, vid_t> > & intervals, bool allowfail); static void load_vertex_intervals(std::string base_filename, int nshards, std::vector<std::pair<vid_t, vid_t> > & intervals, bool allowfail=false) { std::string intervalsFilename = filename_intervals(base_filename, nshards); std::ifstream intervalsF(intervalsFilename.c_str()); if (!intervalsF.good()) { if (allowfail) return; // Hack logstream(LOG_ERROR) << "Could not load intervals-file: " << intervalsFilename << std::endl; } assert(intervalsF.good()); intervals.clear(); vid_t st=0, en; for(int i=0; i < nshards; i++) { assert(!intervalsF.eof()); intervalsF >> en; intervals.push_back(std::pair<vid_t,vid_t>(st, en)); st = en + 1; } for(int i=0; i < nshards; i++) { logstream(LOG_INFO) << "shard: " << intervals[i].first << " - " << intervals[i].second << std::endl; } intervalsF.close(); } /** * Returns the number of vertices in a graph. The value is stored in a separate file <graphname>.numvertices */ static VARIABLE_IS_NOT_USED size_t get_num_vertices(std::string basefilename); static VARIABLE_IS_NOT_USED size_t get_num_vertices(std::string basefilename) { std::string numv_filename = basefilename + ".numvertices"; std::ifstream vfileF(numv_filename.c_str()); if (!vfileF.good()) { logstream(LOG_ERROR) << "Could not find file " << numv_filename << std::endl; logstream(LOG_ERROR) << "Maybe you have old shards - please recreate." << std::endl; assert(false); } size_t n; vfileF >> n; vfileF.close(); return n; } template <typename EdgeDataType> std::string preprocess_filename(std::string basefilename) { std::stringstream ss; ss << basefilename; ss << "." << sizeof(EdgeDataType) << "B.bin"; return ss.str(); } /** * Checks if original file has more recent modification date * than the shards. If it has, deletes the shards and returns false. * Otherwise return true. */ template <typename EdgeDataType> bool check_origfile_modification_earlier(std::string basefilename, int nshards) { /* Compare last modified dates of the original graph and the shards */ if (file_exists(basefilename) && get_option_int("disable-modtime-check", 0) == 0) { struct stat origstat, shardstat; int err1 = stat(basefilename.c_str(), &origstat); std::string adjfname = filename_shard_adj(basefilename, 0, nshards); int err2 = stat(adjfname.c_str(), &shardstat); if (err1 != 0 || err2 != 0) { logstream(LOG_ERROR) << "Error when checking file modification times: " << strerror(errno) << std::endl; return nshards; } if (origstat.st_mtime > shardstat.st_mtime) { logstream(LOG_INFO) << "The input graph modification date was newer than of the shards." << std::endl; logstream(LOG_INFO) << "Going to delete old shards and recreate new ones. To disable " << std::endl; logstream(LOG_INFO) << "functionality, specify --disable-modtime-check=1" << std::endl; // Delete shards delete_shards<EdgeDataType>(basefilename, nshards); // Delete the bin-file std::string preprocfile = preprocess_filename<EdgeDataType>(basefilename); if (file_exists(preprocfile)) { logstream(LOG_DEBUG) << "Deleting: " << preprocfile << std::endl; int err = remove(preprocfile.c_str()); if (err != 0) { logstream(LOG_ERROR) << "Error deleting file: " << preprocfile << ", " << strerror(errno) << std::endl; } } return false; } else { return true; } } return true; } } #endif
09jijiangwen-download
src/api/chifilenames.hpp
C++
asf20
17,251
/** * @file * @author Aapo Kyrola <akyrola@cs.cmu.edu> * @version 1.0 * * @section LICENSE * * Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * Simple vertex-aggregators/scanners which allows reductions over all vertices * in an I/O efficient manner. */ #ifndef DEF_GRAPHCHI_VERTEX_AGGREGATOR #define DEF_GRAPHCHI_VERTEX_AGGREGATOR #include <errno.h> #include <memory.h> #include <string> #include "graphchi_types.hpp" #include "api/chifilenames.hpp" #include "io/stripedio.hpp" #include "util/ioutil.hpp" #include "engine/auxdata/vertex_data.hpp" namespace graphchi { /** * Abstract class for callbacks that are invoked for each * vertex when foreach_vertices() is called (see below). */ template <typename VertexDataType> class VCallback { public: virtual void callback(vid_t vertex_id, VertexDataType &value) = 0; }; /** * Foreach: a callback object is invoked for every vertex in the given range. * See VCallback above. * @param basefilename base filename * @param fromv first vertex * @param tov last vertex (exclusive) * @param callback user-defined callback-object. */ template <typename VertexDataType> void foreach_vertices(std::string basefilename, vid_t fromv, vid_t tov, VCallback<VertexDataType> &callback) { std::string filename = filename_vertex_data<VertexDataType>(basefilename); metrics m("foreach"); stripedio * iomgr = new stripedio(m); vid_t readwindow = 1024 * 1024; size_t numvertices = get_num_vertices(basefilename); vertex_data_store<VertexDataType> * vertexdata = new vertex_data_store<VertexDataType>(basefilename, numvertices, iomgr); vid_t st = fromv; vid_t en = 0; while(st <= tov) { en = st + readwindow - 1; if (en >= tov) en = tov - 1; if (st < en) { vertexdata->load(st, en); for(vid_t v=st; v<=en; v++) { VertexDataType * vptr = vertexdata->vertex_data_ptr(v); callback.callback(v, (VertexDataType&) *vptr); } } st += readwindow; } delete vertexdata; delete iomgr; } /** * Callback for computing a sum. * TODO: a functional version instead of imperative. */ template <typename VertexDataType, typename SumType> class SumCallback : public VCallback<VertexDataType> { public: SumType accum; SumCallback(SumType initval) : VCallback<VertexDataType>() { accum = initval; } void callback(vid_t vertex_id, VertexDataType &value) { accum += value; } }; /** * Computes a sum over a range of vertices' values. * Type SumType defines the accumulator type, which may be different * than vertex type. For example, often vertex value is 32-bit * integer, but the sum will need to be 64-bit integer. * @param basefilename base filename * @param fromv first vertex * @param tov last vertex (exclusive) */ template <typename VertexDataType, typename SumType> SumType sum_vertices(std::string base_filename, vid_t fromv, vid_t tov) { SumCallback<VertexDataType, SumType> sumc(0); foreach_vertices<VertexDataType>(base_filename, fromv, tov, sumc); return sumc.accum; } } #endif
09jijiangwen-download
src/api/vertex_aggregator.hpp
C++
asf20
4,181
;/** * @file * @author Aapo Kyrola <akyrola@cs.cmu.edu> * @version 1.0 * * @section LICENSE * * Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * Graph conversion and parsing routines. */ #ifndef GRAPHCHI_CONVERSIONS_DEF #define GRAPHCHI_CONVERSIONS_DEF #include <fcntl.h> #include <unistd.h> #include <sys/types.h> #include <dirent.h> #include <sys/stat.h> #include "graphchi_types.hpp" #include "logger/logger.hpp" #include "preprocessing/sharder.hpp" #include "preprocessing/formats/binary_adjacency_list.hpp" /** * GNU COMPILER HACK TO PREVENT WARNINGS "Unused variable", if * the particular app being compiled does not use a function. */ #ifdef __GNUC__ #define VARIABLE_IS_NOT_USED __attribute__ ((unused)) #else #define VARIABLE_IS_NOT_USED #endif namespace graphchi { struct dummy {}; /* Simple string to number parsers */ static void VARIABLE_IS_NOT_USED parse(int &x, const char * s); static void VARIABLE_IS_NOT_USED parse(unsigned int &x, const char * s); static void VARIABLE_IS_NOT_USED parse(float &x, const char * s); static void VARIABLE_IS_NOT_USED parse(long &x, const char * s); static void VARIABLE_IS_NOT_USED parse(char &x, const char * s); static void VARIABLE_IS_NOT_USED parse(bool &x, const char * s); static void VARIABLE_IS_NOT_USED parse(double &x, const char * s); static void VARIABLE_IS_NOT_USED parse(short &x, const char * s); static void FIXLINE(char * s); static void parse(int &x, const char * s) { x = atoi(s); } static void parse(unsigned int &x, const char * s) { x = (unsigned int) strtoul(s, NULL, 10); } static void parse(float &x, const char * s) { x = (float) atof(s); } /** * Special templated parser for PairContainers. */ template <typename T> void parse(PairContainer<T> &x, const char * s) { parse(x.left, s); parse(x.right, s); } static void parse(long &x, const char * s) { x = atol(s); } static void parse(char &x, const char * s) { x = s[0]; } static void parse(bool &x, const char * s) { x = atoi(s) == 1; } static void parse(double &x, const char * s) { x = atof(s); } static void parse(short &x, const char * s) { x = (short) atoi(s); } #ifdef DYNAMICEDATA static void VARIABLE_IS_NOT_USED parse_multiple(std::vector<dummy> &values, char * s); void parse_multiple(std::vector<dummy> & values, char * s) { assert(false); } /** * Parse ':' -delimited values into a vector. */ template <typename T> static void parse_multiple(typename std::vector<T> & values, char * s) { char delims[] = ":"; char * t; t = strtok(s, delims); T x; parse(x, (const char*) t); values.push_back(x); while((t = strtok(NULL, delims)) != NULL) { parse(x, (const char*) t); values.push_back(x); } } #endif // Catch all template <typename T> void parse(T &x, const char * s) { logstream(LOG_FATAL) << "You need to define parse<your-type>(your-type &x, const char *s) function" << " to support parsing the edge value." << std::endl; assert(false); } // Removes \n from the end of line void FIXLINE(char * s) { int len = (int) strlen(s)-1; if(s[len] == '\n') s[len] = 0; } // http://www.linuxquestions.org/questions/programming-9/c-list-files-in-directory-379323/ int getdir (std::string dir, std::vector<std::string> &files); int getdir (std::string dir, std::vector<std::string> &files) { DIR *dp; struct dirent *dirp; if((dp = opendir(dir.c_str())) == NULL) { std::cout << "Error(" << errno << ") opening " << dir << std::endl; return errno; } while ((dirp = readdir(dp)) != NULL) { files.push_back(std::string(dirp->d_name)); } closedir(dp); return 0; } std::string get_dirname(std::string arg); std::string get_dirname(std::string arg) { size_t a = arg.find_last_of("/"); if (a != arg.npos) { std::string dir = arg.substr(0, a); return dir; } else { assert(false); } } std::string get_filename(std::string arg); std::string get_filename(std::string arg) { size_t a = arg.find_last_of("/"); if (a != arg.npos) { std::string f = arg.substr(a + 1); return f; } else { assert(false); } } /** * Converts graph from an edge list format. Input may contain * value for the edges. Self-edges are ignored. */ template <typename EdgeDataType> void convert_edgelist(std::string inputfile, sharder<EdgeDataType> &sharderobj, bool multivalue_edges=false) { FILE * inf = fopen(inputfile.c_str(), "r"); size_t bytesread = 0; size_t linenum = 0; if (inf == NULL) { logstream(LOG_FATAL) << "Could not load :" << inputfile << " error: " << strerror(errno) << std::endl; } assert(inf != NULL); logstream(LOG_INFO) << "Reading in edge list format!" << std::endl; char s[1024]; while(fgets(s, 1024, inf) != NULL) { linenum++; if (linenum % 10000000 == 0) { logstream(LOG_DEBUG) << "Read " << linenum << " lines, " << bytesread / 1024 / 1024. << " MB" << std::endl; } FIXLINE(s); bytesread += strlen(s); if (s[0] == '#') continue; // Comment if (s[0] == '%') continue; // Comment char delims[] = "\t, "; char * t; t = strtok(s, delims); if (t == NULL) { logstream(LOG_ERROR) << "Input file is not in right format. " << "Expecting \"<from>\t<to>\". " << "Current line: \"" << s << "\"\n"; assert(false); } vid_t from = atoi(t); t = strtok(NULL, delims); if (t == NULL) { logstream(LOG_ERROR) << "Input file is not in right format. " << "Expecting \"<from>\t<to>\". " << "Current line: \"" << s << "\"\n"; assert(false); } vid_t to = atoi(t); /* Check if has value */ t = strtok(NULL, delims); if (!multivalue_edges) { EdgeDataType val; if (t != NULL) { parse(val, (const char*) t); } if (from != to) { if (t != NULL) { sharderobj.preprocessing_add_edge(from, to, val); } else { sharderobj.preprocessing_add_edge(from, to); } } } else { #ifdef DYNAMICEDATA std::vector<EdgeDataType> vals; parse_multiple(vals, (char*) t); if (from != to) { if (vals.size() == 0) { // TODO: go around this problem logstream(LOG_FATAL) << "Each edge needs at least one value." << std::endl; assert(vals.size() > 0); } sharderobj.preprocessing_add_edge_multival(from, to, vals); } #else logstream(LOG_FATAL) << "To support multivalue-edges, dynamic edge data needs to be used." << std::endl; assert(false); #endif } } fclose(inf); } /** * Converts a graph from adjacency list format. Edge values are not supported, * and each edge gets the default value for the type. Self-edges are ignored. */ template <typename EdgeDataType> void convert_adjlist(std::string inputfile, sharder<EdgeDataType> &sharderobj) { FILE * inf = fopen(inputfile.c_str(), "r"); if (inf == NULL) { logstream(LOG_FATAL) << "Could not load :" << inputfile << " error: " << strerror(errno) << std::endl; } assert(inf != NULL); logstream(LOG_INFO) << "Reading in adjacency list format!" << std::endl; int maxlen = 100000000; char * s = (char*) malloc(maxlen); size_t bytesread = 0; char delims[] = " \t"; size_t linenum = 0; size_t lastlog = 0; /*** PHASE 1 - count ***/ while(fgets(s, maxlen, inf) != NULL) { linenum++; if (bytesread - lastlog >= 500000000) { logstream(LOG_DEBUG) << "Read " << linenum << " lines, " << bytesread / 1024 / 1024. << " MB" << std::endl; lastlog = bytesread; } FIXLINE(s); bytesread += strlen(s); if (s[0] == '#') continue; // Comment if (s[0] == '%') continue; // Comment char * t = strtok(s, delims); vid_t from = atoi(t); t = strtok(NULL,delims); if (t != NULL) { vid_t num = atoi(t); vid_t i = 0; while((t = strtok(NULL,delims)) != NULL) { vid_t to = atoi(t); if (from != to) { sharderobj.preprocessing_add_edge(from, to, EdgeDataType()); } i++; } if (num != i) logstream(LOG_ERROR) << "Mismatch when reading adjacency list: " << num << " != " << i << " s: " << std::string(s) << " on line: " << linenum << std::endl; assert(num == i); } } free(s); fclose(inf); } /** * Converts a graph from cassovary's (Twitter) format. Edge values are not supported, * and each edge gets the default value for the type. Self-edges are ignored. */ template <typename EdgeDataType> void convert_cassovary(std::string basefilename, sharder<EdgeDataType> &sharderobj) { std::vector<std::string> parts; std::string dirname = get_dirname(basefilename); std::string prefix = get_filename(basefilename); std::cout << "dir=[" << dirname << "] prefix=[" << prefix << "]" << std::endl; getdir(dirname, parts); for(std::vector<std::string>::iterator it=parts.begin(); it != parts.end(); ++it) { std::string inputfile = *it; if (inputfile.find(prefix) == 0 && inputfile.find("tmp") == inputfile.npos) { std::cout << "Going to process: " << inputfile << std::endl; } } for(std::vector<std::string>::iterator it=parts.begin(); it != parts.end(); ++it) { std::string inputfile = *it; if (inputfile.find(prefix) == 0 && inputfile.find(".tmp") == inputfile.npos) { inputfile = dirname + "/" + inputfile; std::cout << "Process: " << inputfile << std::endl; FILE * inf = fopen(inputfile.c_str(), "r"); if (inf == NULL) { logstream(LOG_FATAL) << "Could not load :" << inputfile << " error: " << strerror(errno) << std::endl; } assert(inf != NULL); logstream(LOG_INFO) << "Reading in cassovary format!" << std::endl; int maxlen = 100000000; char * s = (char*) malloc(maxlen); size_t bytesread = 0; char delims[] = " \t"; size_t linenum = 0; size_t lastlog = 0; while(fgets(s, maxlen, inf) != NULL) { linenum++; if (bytesread - lastlog >= 500000000) { logstream(LOG_DEBUG) << "Read " << linenum << " lines, " << bytesread / 1024 / 1024. << " MB" << std::endl; lastlog = bytesread; } FIXLINE(s); bytesread += strlen(s); if (s[0] == '#') continue; // Comment if (s[0] == '%') continue; // Comment char * t = strtok(s, delims); vid_t from = atoi(t); t = strtok(NULL,delims); if (t != NULL) { vid_t num = atoi(t); // Read next line linenum += num + 1; for(vid_t i=0; i < num; i++) { s = fgets(s, maxlen, inf); FIXLINE(s); vid_t to = atoi(s); if (from != to) { sharderobj.preprocessing_add_edge(from, to, EdgeDataType()); } } } } free(s); fclose(inf); } } } /** * Converts a set of files in the binedgelist format (binary edge list) */ template <typename EdgeDataType> void convert_binedgelist(std::string basefilename, sharder<EdgeDataType> &sharderobj) { std::vector<std::string> parts; std::string dirname = get_dirname(basefilename); std::string prefix = get_filename(basefilename); std::cout << "dir=[" << dirname << "] prefix=[" << prefix << "]" << std::endl; getdir(dirname, parts); for(std::vector<std::string>::iterator it=parts.begin(); it != parts.end(); ++it) { std::string inputfile = *it; if (inputfile.find(prefix) == 0 && inputfile.find("tmp") == inputfile.npos) { std::cout << "Going to process: " << inputfile << std::endl; } } for(std::vector<std::string>::iterator it=parts.begin(); it != parts.end(); ++it) { std::string inputfile = *it; if (inputfile.find(prefix) == 0 && inputfile.find(".tmp") == inputfile.npos) { inputfile = dirname + "/" + inputfile; std::cout << "Process: " << inputfile << std::endl; FILE * inf = fopen(inputfile.c_str(), "r"); while(!feof(inf)) { vid_t from; vid_t to; size_t res1 = fread(&from, sizeof(vid_t), 1, inf); size_t res2 = fread(&to, sizeof(vid_t), 1, inf); assert(res1 > 0 && res2 > 0); if (from != to) { sharderobj.preprocessing_add_edge(from, to, EdgeDataType()); } } fclose(inf); } } } // TODO: remove code duplication. template <typename EdgeDataType> void convert_binedgelistval(std::string basefilename, sharder<EdgeDataType> &sharderobj) { std::vector<std::string> parts; std::string dirname = get_dirname(basefilename); std::string prefix = get_filename(basefilename); std::cout << "dir=[" << dirname << "] prefix=[" << prefix << "]" << std::endl; getdir(dirname, parts); for(std::vector<std::string>::iterator it=parts.begin(); it != parts.end(); ++it) { std::string inputfile = *it; if (inputfile.find(prefix) == 0 && inputfile.find("tmp") == inputfile.npos) { std::cout << "Going to process: " << inputfile << std::endl; } } for(std::vector<std::string>::iterator it=parts.begin(); it != parts.end(); ++it) { std::string inputfile = *it; if (inputfile.find(prefix) == 0 && inputfile.find(".tmp") == inputfile.npos) { inputfile = dirname + "/" + inputfile; std::cout << "Process: " << inputfile << std::endl; FILE * inf = fopen(inputfile.c_str(), "r"); while(!feof(inf)) { vid_t from; vid_t to; EdgeDataType edgeval; size_t res1 = fread(&from, sizeof(vid_t), 1, inf); size_t res2 = fread(&to, sizeof(vid_t), 1, inf); size_t res3 = fread(&edgeval, sizeof(EdgeDataType), 1, inf); assert(res1 > 0 && res2 > 0 && res3 > 0); if (from != to) { sharderobj.preprocessing_add_edge(from, to, edgeval); } } fclose(inf); } } } /** * An abstract class for defining preprocessor objects * that modify the preprocessed binary input prior * to sharding. */ template <typename EdgeDataType> class SharderPreprocessor { public: virtual ~SharderPreprocessor() {} virtual std::string getSuffix() = 0; virtual void reprocess(std::string preprocFilename, std::string basefileName) = 0; }; /** * Converts a graph input to shards. Preprocessing has several steps, * see sharder.hpp for more information. */ template <typename EdgeDataType> int convert(std::string basefilename, std::string nshards_string, SharderPreprocessor<EdgeDataType> * preprocessor = NULL) { std::string suffix = ""; if (preprocessor != NULL) { suffix = preprocessor->getSuffix(); } sharder<EdgeDataType> sharderobj(basefilename + suffix); if (!sharderobj.preprocessed_file_exists()) { std::string file_type_str = get_option_string_interactive("filetype", "edgelist, adjlist"); if (file_type_str != "adjlist" && file_type_str != "edgelist" && file_type_str != "binedgelist" && file_type_str != "multivalueedgelist") { logstream(LOG_ERROR) << "You need to specify filetype: 'edgelist' or 'adjlist'." << std::endl; assert(false); } /* Start preprocessing */ sharderobj.start_preprocessing(); if (file_type_str == "adjlist") { convert_adjlist<EdgeDataType>(basefilename, sharderobj); } else if (file_type_str == "edgelist") { convert_edgelist<EdgeDataType>(basefilename, sharderobj); #ifdef DYNAMICEDATA } else if (file_type_str == "multivalueedgelist" ) { convert_edgelist<EdgeDataType>(basefilename, sharderobj, true); #endif } else if (file_type_str == "binedgelist") { convert_binedgelistval<EdgeDataType>(basefilename, sharderobj); } else { assert(false); } /* Finish preprocessing */ sharderobj.end_preprocessing(); if (preprocessor != NULL) { preprocessor->reprocess(sharderobj.preprocessed_name(), basefilename); } } vid_t max_vertex_id = get_option_int("maxvertex", 0); if (max_vertex_id > 0) { sharderobj.set_max_vertex_id(max_vertex_id); } int nshards = sharderobj.execute_sharding(nshards_string); logstream(LOG_INFO) << "Successfully finished sharding for " << basefilename + suffix << std::endl; logstream(LOG_INFO) << "Created " << nshards << " shards." << std::endl; return nshards; } /** * Converts a graph input to shards with no edge values. Preprocessing has several steps, * see sharder.hpp for more information. */ int convert_none(std::string basefilename, std::string nshards_string); int convert_none(std::string basefilename, std::string nshards_string) { std::string suffix = ""; sharder<dummy> sharderobj(basefilename + suffix); sharderobj.set_no_edgevalues(); if (!sharderobj.preprocessed_file_exists()) { std::string file_type_str = get_option_string_interactive("filetype", "edgelist, adjlist, cassovary, binedgelist"); if (file_type_str != "adjlist" && file_type_str != "edgelist" && file_type_str != "cassovary" && file_type_str != "binedgelist") { logstream(LOG_ERROR) << "You need to specify filetype: 'edgelist' or 'adjlist'." << std::endl; assert(false); } /* Start preprocessing */ sharderobj.start_preprocessing(); if (file_type_str == "adjlist") { convert_adjlist<dummy>(basefilename, sharderobj); } else if (file_type_str == "edgelist") { convert_edgelist<dummy>(basefilename, sharderobj); } else if (file_type_str == "cassovary") { convert_cassovary<dummy>(basefilename, sharderobj); } else if (file_type_str == "binedgelist") { convert_binedgelist<dummy>(basefilename, sharderobj); } /* Finish preprocessing */ sharderobj.end_preprocessing(); } if (get_option_int("skipsharding", 0) == 1) { std::cout << "Skip sharding..." << std::endl; exit(0); } vid_t max_vertex_id = get_option_int("maxvertex", 0); if (max_vertex_id > 0) { sharderobj.set_max_vertex_id(max_vertex_id); } int nshards = sharderobj.execute_sharding(nshards_string); logstream(LOG_INFO) << "Successfully finished sharding for " << basefilename + suffix << std::endl; logstream(LOG_INFO) << "Created " << nshards << " shards." << std::endl; return nshards; } template <typename EdgeDataType> int convert_if_notexists(std::string basefilename, std::string nshards_string, bool &didexist, SharderPreprocessor<EdgeDataType> * preprocessor = NULL) { int nshards; std::string suffix = ""; if (preprocessor != NULL) { suffix = preprocessor->getSuffix(); } /* Check if input file is already sharded */ if ((nshards = find_shards<EdgeDataType>(basefilename + suffix, nshards_string))) { logstream(LOG_INFO) << "Found preprocessed files for " << basefilename << ", num shards=" << nshards << std::endl; didexist = true; if (check_origfile_modification_earlier<EdgeDataType>(basefilename + suffix, nshards)) { return nshards; } } didexist = false; logstream(LOG_INFO) << "Did not find preprocessed shards for " << basefilename + suffix << std::endl; logstream(LOG_INFO) << "(Edge-value size: " << sizeof(EdgeDataType) << ")" << std::endl; logstream(LOG_INFO) << "Will try create them now..." << std::endl; nshards = convert<EdgeDataType>(basefilename, nshards_string, preprocessor); return nshards; } template <typename EdgeDataType> int convert_if_notexists(std::string basefilename, std::string nshards_string, SharderPreprocessor<EdgeDataType> * preprocessor = NULL) { bool b; return convert_if_notexists<EdgeDataType>(basefilename, nshards_string, b, preprocessor); } struct vertex_degree { int deg; vid_t id; vertex_degree() {} vertex_degree(int deg, vid_t id) : deg(deg), id(id) {} }; static bool vertex_degree_less(const vertex_degree &a, const vertex_degree &b); static bool vertex_degree_less(const vertex_degree &a, const vertex_degree &b) { return a.deg < b.deg || (a.deg == b.deg && a.id < b.id); } /** * Special preprocessor which relabels vertices in ascending order * of their degree. */ template <typename EdgeDataType> class OrderByDegree : public SharderPreprocessor<EdgeDataType> { int phase; public: typedef edge_with_value<EdgeDataType> edge_t; vid_t * translate_table; vid_t max_vertex_id; vertex_degree * degarray; binary_adjacency_list_writer<EdgeDataType> * writer; OrderByDegree() { degarray = NULL; writer = NULL; } ~OrderByDegree() { if (degarray != NULL) free(degarray); degarray = NULL; if (writer != NULL) delete writer; writer = NULL; } std::string getSuffix() { return "_degord"; } vid_t translate(vid_t vid) { if (vid > max_vertex_id) return vid; return translate_table[vid]; } /** * Callback function that binary_adjacency_list_reader * invokes. In first phase, the degrees of vertice sare collected. * In the next face, they are written out to the degree-ordered data. * Note: this version does not preserve edge values! */ void receive_edge(vid_t from, vid_t to, EdgeDataType value, bool is_value) { if (phase == 0) { degarray[from].deg++; degarray[to].deg++; } else { writer->add_edge(translate(from), translate(to)); // Value is ignored } } void reprocess(std::string preprocessedFile, std::string baseFilename) { binary_adjacency_list_reader<EdgeDataType> reader(preprocessedFile); max_vertex_id = (vid_t) reader.get_max_vertex_id(); degarray = (vertex_degree *) calloc(max_vertex_id + 1, sizeof(vertex_degree)); vid_t nverts = max_vertex_id + 1; for(vid_t i=0; i < nverts; i++) { degarray[i].id = i; } phase = 0; /* Reader will invoke receive_edge() above */ reader.read_edges(this); /* Now sort */ quickSort(degarray, nverts, vertex_degree_less); /* Create translation table */ translate_table = (vid_t*) calloc(sizeof(vid_t), nverts); for(vid_t i=0; i<nverts; i++) { translate_table[degarray[i].id] = i; } delete degarray; /* Write translate table */ std::string translate_table_file = baseFilename + ".vertexmap"; int df = open(translate_table_file.c_str(), O_RDWR | O_CREAT, S_IROTH | S_IWOTH | S_IWUSR | S_IRUSR); if (df < 0) logstream(LOG_ERROR) << "Could not write vertex map: " << translate_table_file << " error: " << strerror(errno) << std::endl; assert(df >= 0); pwritea(df, translate_table, nverts * sizeof(vid_t), 0); close(df); /* Now recreate the processed file */ std::string tmpfilename = preprocessedFile + ".old"; rename(preprocessedFile.c_str(), tmpfilename.c_str()); writer = new binary_adjacency_list_writer<EdgeDataType>(preprocessedFile); binary_adjacency_list_reader<EdgeDataType> reader2(tmpfilename); phase = 1; reader2.read_edges(this); writer->finish(); delete writer; writer = NULL; delete translate_table; } }; } // end namespace #endif
09jijiangwen-download
src/preprocessing/conversions.hpp
C++
asf20
28,845
/** * @file * @author Aapo Kyrola <akyrola@cs.cmu.edu> * @version 1.0 * * @section LICENSE * * Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * Class representing a binary adjacency list format used by the * sharder. Note, this format does not comply with standard (if there are * any) formats. * * File format supports edges with and without values. */ #ifndef DEF_GRAPHCHI_BINADJLIST_FORMAT #define DEF_GRAPHCHI_BINADJLIST_FORMAT #include <assert.h> #include <stdio.h> #include <stdint.h> #include <unistd.h> #include <errno.h> #include <string> #include "graphchi_types.hpp" #include "logger/logger.hpp" #include "util/ioutil.hpp" namespace graphchi { #define FORMAT_VERSION 20120705 // Format version is the date it was conceived /** * Header struct */ struct bin_adj_header { int format_version; uint64_t max_vertex_id; // Note, use 64-bit to be future-proof. bool contains_edge_values; uint32_t edge_value_size; uint64_t numedges; }; /** * Internal container class. */ template <typename EdgeDataType> struct edge_with_value_badj { vid_t vertex; EdgeDataType value; edge_with_value_badj() {} edge_with_value_badj(vid_t v, EdgeDataType x) : vertex(v), value(x) {} }; template <typename EdgeDataType> class binary_adjacency_list_reader { std::string filename; int fd; size_t fpos; size_t blocklen; size_t blocksize; size_t total_to_process; char * block; char * blockptr; bin_adj_header header; template <typename U> inline U read_val() { if (blockptr + sizeof(U) > block + blocklen) { // Read blocklen = std::min(blocksize, total_to_process - fpos); preada(fd, block, blocklen, fpos); blockptr = block; } U res = *((U*)blockptr); blockptr += sizeof(U); fpos += sizeof(U); return res; } public: binary_adjacency_list_reader(std::string filename) : filename(filename) { fd = open(filename.c_str(), O_RDONLY); if (fd < 0) { logstream(LOG_FATAL) << "Could not open file: " << filename << " error: " << strerror(errno) << std::endl; } assert(fd >= 0); blocksize = (size_t) get_option_long("preprocessing.bufsize", 64 * 1024 * 1024); block = (char*) malloc(blocksize); blockptr = block; total_to_process = get_filesize(filename); blocklen = 0; fpos = 0; header = read_val<bin_adj_header>(); assert(header.format_version == FORMAT_VERSION); } ~binary_adjacency_list_reader() { if (block != NULL) free(block); close(fd); } template <class Callback> void read_edges(Callback * callback) { size_t nedges = 0; /* Note, header has been read in the beginning */ do { if (nedges % 10000000 == 0) { logstream(LOG_DEBUG) << (fpos * 1.0 / total_to_process * 100) << "%" << std::endl; } vid_t from; vid_t to; int adjlen; EdgeDataType val = EdgeDataType(); from = read_val<vid_t>(); adjlen = (int) read_val<uint8_t>(); assert(adjlen > 0); for(int i=0; i < adjlen; i++) { to = read_val<vid_t>(); if (header.contains_edge_values) { val = read_val<EdgeDataType>(); } callback->receive_edge(from, to, val, header.contains_edge_values); nedges++; } } while (nedges < header.numedges); } bool has_edge_values() { return header.contains_edge_values; } size_t get_max_vertex_id() { return header.max_vertex_id; } size_t get_numedges() { return header.numedges; } }; template <typename EdgeDataType> class binary_adjacency_list_writer { private: std::string filename; int fd; bin_adj_header header; int bufsize; char * buf; char * bufptr; bool initialized; edge_with_value_badj<EdgeDataType> samev_buf[256]; vid_t lastid; uint8_t counter; public: binary_adjacency_list_writer(std::string filename) : filename(filename) { bufsize = (int) get_option_int("preprocessing.bufsize", 64 * 1024 * 1024); assert(bufsize > 1024 * 1024); fd = open(filename.c_str(), O_WRONLY | O_CREAT, S_IROTH | S_IWOTH | S_IWUSR | S_IRUSR); if (fd < 0) { logstream(LOG_FATAL) << "Could not open file " << filename << " for writing. " << " Error: " << strerror(errno) << std::endl; } int res = ftruncate(fd, 0); if (res != 0) { logstream(LOG_FATAL) << "Could not truncate file " << filename << " Error: " << strerror(errno) << std::endl; } assert(res == 0); header.format_version = FORMAT_VERSION; header.max_vertex_id = 0; header.contains_edge_values = false; header.numedges = 0; header.edge_value_size = (uint32_t) sizeof(EdgeDataType); buf = (char*) malloc(bufsize); bufptr = buf; bwrite<bin_adj_header>(fd, buf, bufptr, header); counter = 0; lastid = 0; initialized = false; assert(fd >= 0); } ~binary_adjacency_list_writer() { if (buf != NULL) delete buf; } protected: void write_header() { logstream(LOG_DEBUG) << "Write header: max vertex: " << header.max_vertex_id << std::endl; pwritea(fd, &header, sizeof(bin_adj_header), 0); } /** * Write edges for the current vertex (lastid) */ void flush() { if (counter != 0) { bwrite<vid_t>(fd, buf, bufptr, lastid); bwrite<uint8_t>(fd, buf, bufptr, counter); for(int i=0; i < counter; i++) { bwrite<vid_t>(fd, buf, bufptr, samev_buf[i].vertex); if (header.contains_edge_values) { bwrite<EdgeDataType>(fd, buf, bufptr, samev_buf[i].value); } } header.numedges += (uint64_t)counter; counter = 0; } } void _addedge(vid_t from, vid_t to, EdgeDataType val) { if (from == to) return; // Filter self-edges if (from == lastid && counter > 0) { samev_buf[counter++] = edge_with_value_badj<EdgeDataType>(to, val); } else { flush(); lastid = from; samev_buf[counter++] = edge_with_value_badj<EdgeDataType>(to, val); } if (counter == 255) { /* Flush */ flush(); counter = 0; } if (from > header.max_vertex_id || to > header.max_vertex_id) { header.max_vertex_id = std::max(from, to); } } public: void add_edge(vid_t from, vid_t to, EdgeDataType val) { if (!initialized) { header.contains_edge_values = true; initialized = true; } if (!header.contains_edge_values) { logstream(LOG_ERROR) << "Tried to add edge with a value, although previously added one with a value!" << std::endl; } assert(header.contains_edge_values); _addedge(from, to, val); } void add_edge(vid_t from, vid_t to) { if (!initialized) { header.contains_edge_values = false; initialized = true; } if (header.contains_edge_values) { logstream(LOG_ERROR) << "Tried to add edge without a value, although previously added edge with a value!" << std::endl; } assert(!header.contains_edge_values); _addedge(from, to, EdgeDataType()); } bool has_edge_values() { return header.contains_edge_values; } void finish() { flush(); /* Write rest of the buffer out */ writea(fd, buf, bufptr - buf); free(buf); buf = NULL; write_header(); close(fd); } /** Buffered write function */ template <typename T> void bwrite(int f, char * buf, char * &bufptr, T val) { if (bufptr + sizeof(T) - buf >= bufsize) { writea(f, buf, bufptr - buf); bufptr = buf; } *((T*)bufptr) = val; bufptr += sizeof(T); } }; } #endif
09jijiangwen-download
src/preprocessing/formats/binary_adjacency_list.hpp
C++
asf20
10,343
/** * @file * @author Aapo Kyrola <akyrola@cs.cmu.edu> * @version 1.0 * * @section LICENSE * * Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * Splits shards into blocks. Experimental. */ #include <iostream> #include <stdlib.h> #include <string> #include <assert.h> #include <unistd.h> #include <fstream> #include <sys/stat.h> #include "api/chifilenames.hpp" #include "io/stripedio.hpp" #include "logger/logger.hpp" #include "util/ioutil.hpp" #include "util/cmdopts.hpp" #include "preprocessing/conversions.hpp" #include "preprocessing/sharder.hpp" using namespace graphchi; typedef float EdgeDataType; int main(int argc, const char ** argv) { graphchi_init(argc, argv); global_logger().set_log_level(LOG_DEBUG); std::string filename = get_option_string("file"); int nshards = convert_if_notexists<EdgeDataType>(filename, get_option_string("nshards", "auto")); size_t blocksize= get_option_long("blocksize", 4096 * 1024); char * buf = (char *) malloc(blocksize); for(int p=0; p < nshards; p++) { std::string shard_filename = filename_shard_edata<EdgeDataType>(filename, p, nshards); int f = open(shard_filename.c_str(), O_RDONLY); size_t fsize = get_filesize(shard_filename); size_t nblocks = fsize / blocksize + (fsize % blocksize != 0); size_t idx = 0; std::string block_dirname = dirname_shard_edata_block(shard_filename, blocksize); logstream(LOG_INFO) << "Going to create: " << block_dirname << std::endl; int err = mkdir(block_dirname.c_str(), 0777); if (err != 0) { logstream(LOG_ERROR) << strerror(errno) << std::endl; } for(int i=0; i < nblocks; i++) { size_t len = std::min(blocksize, fsize - idx); preada(f, buf, len, idx); std::string block_filename = filename_shard_edata_block(shard_filename, i, blocksize); int bf = open(block_filename.c_str(), O_RDWR | O_CREAT, S_IROTH | S_IWOTH | S_IWUSR | S_IRUSR); write_compressed(bf, buf, len); close(bf); idx += blocksize; } close(f); std::string sizefilename = shard_filename + ".size"; std::ofstream ofs(sizefilename.c_str()); ofs << fsize; ofs.close(); } }
09jijiangwen-download
src/preprocessing/blocksplitter.cpp
C++
asf20
3,005
/** * @file * @author Aapo Kyrola <akyrola@cs.cmu.edu> * @version 1.0 * * @section LICENSE * * Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * Sharder_basic can convert graphs from the edgelist and adjacency * list representations to shards used by the GraphChi system. */ #include <iostream> #include <stdlib.h> #include <string> #include <assert.h> #include "logger/logger.hpp" #include "preprocessing/conversions.hpp" #include "preprocessing/sharder.hpp" #include "util/cmdopts.hpp" using namespace graphchi; int main(int argc, const char ** argv) { graphchi_init(argc, argv); global_logger().set_log_level(LOG_DEBUG); std::string basefile = get_option_string_interactive("file", "[path to the input graph]"); std::string edge_data_type = get_option_string_interactive("edgedatatype", "int, uint, short, float, char, double, boolean, long, float-float, int-int, none"); std::string nshards_str = get_option_string_interactive("nshards", "Number of shards to create, or 'auto'"); if (edge_data_type == "float") { convert<float>(basefile, nshards_str); } if (edge_data_type == "float-float") { convert<PairContainer<float> >(basefile, nshards_str); } else if (edge_data_type == "int") { convert<int>(basefile, nshards_str); } else if (edge_data_type == "uint") { convert<unsigned int>(basefile, nshards_str); } else if (edge_data_type == "int-int") { convert<PairContainer<int> >(basefile, nshards_str); } else if (edge_data_type == "short") { convert<short>(basefile, nshards_str); } else if (edge_data_type == "double") { convert<double>(basefile, nshards_str); } else if (edge_data_type == "char") { convert<char>(basefile, nshards_str); } else if (edge_data_type == "boolean") { convert<bool>(basefile, nshards_str); } else if (edge_data_type == "long") { convert<long>(basefile, nshards_str); } else if (edge_data_type == "none") { convert_none(basefile, nshards_str); } else { logstream(LOG_ERROR) << "You need to specify edgedatatype. Currently supported: int, short, float, char, double, boolean, long."; return -1; } return 0; }
09jijiangwen-download
src/preprocessing/sharder_basic.cpp
C++
asf20
2,898
/** * @file * @author Aapo Kyrola <akyrola@cs.cmu.edu> * @version 1.0 * * @section LICENSE * * Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * Sharder converts a graph into shards which the GraphChi engine * can process. */ /** * @section TODO * Change all C-style IO to Unix-style IO. */ #ifndef GRAPHCHI_SHARDER_DEF #define GRAPHCHI_SHARDER_DEF #include <iostream> #include <cstdio> #include <fcntl.h> #include <unistd.h> #include <sys/stat.h> #include <vector> #include <omp.h> #include <errno.h> #include <sstream> #include <string> #include "api/chifilenames.hpp" #include "api/graphchi_context.hpp" #include "graphchi_types.hpp" #include "io/stripedio.hpp" #include "logger/logger.hpp" #include "engine/auxdata/degree_data.hpp" #include "metrics/metrics.hpp" #include "metrics/reps/basic_reporter.hpp" #include "preprocessing/formats/binary_adjacency_list.hpp" #include "shards/memoryshard.hpp" #include "shards/slidingshard.hpp" #include "util/ioutil.hpp" #include "util/qsort.hpp" namespace graphchi { #define SHARDER_BUFSIZE (64 * 1024 * 1024) enum ProcPhase { COMPUTE_INTERVALS=1, SHOVEL=2 }; template <typename EdgeDataType> struct edge_with_value { vid_t src; vid_t dst; EdgeDataType value; #ifdef DYNAMICEDATA // For dynamic edge data, we need to know if the value needs to be added // to the vector, or are we storing an empty vector. bool is_chivec_value; uint16_t valindex; #endif edge_with_value() {} edge_with_value(vid_t src, vid_t dst, EdgeDataType value) : src(src), dst(dst), value(value) { #ifdef DYNAMICEDATA is_chivec_value = false; valindex = 0; #endif } bool stopper() { return src == 0 && dst == 0; } }; template <typename EdgeDataType> bool edge_t_src_less(const edge_with_value<EdgeDataType> &a, const edge_with_value<EdgeDataType> &b) { if (a.src == b.src) { #ifdef DYNAMICEDATA if (a.dst == b.dst) { return a.valindex < b.valindex; } #endif return a.dst < b.dst; } return a.src < b.src; } template <typename EdgeDataType> class sharder { typedef edge_with_value<EdgeDataType> edge_t; protected: std::string basefilename; vid_t max_vertex_id; /* Sharding */ int nshards; std::vector< std::pair<vid_t, vid_t> > intervals; std::vector< size_t > shovelsizes; std::vector< int > shovelblocksidxs; int phase; int * edgecounts; int vertexchunk; size_t nedges; std::string prefix; int compressed_block_size; edge_t ** bufs; int * bufptrs; size_t bufsize; size_t edgedatasize; size_t ebuffer_size; size_t edges_per_block; vid_t filter_max_vertex; bool no_edgevalues; #ifdef DYNAMICEDATA edge_t last_added_edge; #endif metrics m; binary_adjacency_list_writer<EdgeDataType> * preproc_writer; public: sharder(std::string basefilename) : basefilename(basefilename), m("sharder"), preproc_writer(NULL) { bufs = NULL; edgedatasize = sizeof(EdgeDataType); no_edgevalues = false; compressed_block_size = 4096 * 1024; filter_max_vertex = 0; while (compressed_block_size % sizeof(EdgeDataType) != 0) compressed_block_size++; edges_per_block = compressed_block_size / sizeof(EdgeDataType); } virtual ~sharder() { if (preproc_writer != NULL) { delete preproc_writer; } } void set_max_vertex_id(vid_t maxid) { filter_max_vertex = maxid; } void set_no_edgevalues() { no_edgevalues = true; } std::string preprocessed_name() { return preprocess_filename<EdgeDataType>(basefilename); } /** * Checks if the preprocessed binary temporary file of a graph already exists, * so it does not need to be recreated. */ bool preprocessed_file_exists() { int f = open(preprocessed_name().c_str(), O_RDONLY); if (f >= 0) { close(f); return true; } else { return false; } } /** * Call to start a preprocessing session. */ void start_preprocessing() { if (preproc_writer != NULL) { logstream(LOG_FATAL) << "start_preprocessing() already called! Aborting." << std::endl; } m.start_time("preprocessing"); std::string tmpfilename = preprocessed_name() + ".tmp"; preproc_writer = new binary_adjacency_list_writer<EdgeDataType>(tmpfilename); logstream(LOG_INFO) << "Started preprocessing: " << basefilename << " --> " << tmpfilename << std::endl; /* Write the maximum vertex id place holder - to be filled later */ max_vertex_id = 0; } /** * Call to finish the preprocessing session. */ void end_preprocessing() { assert(preproc_writer != NULL); preproc_writer->finish(); delete preproc_writer; preproc_writer = NULL; /* Rename temporary file */ std::string tmpfilename = preprocessed_name() + ".tmp"; rename(tmpfilename.c_str(), preprocessed_name().c_str()); assert(preprocessed_file_exists()); logstream(LOG_INFO) << "Finished preprocessing: " << basefilename << " --> " << preprocessed_name() << std::endl; m.stop_time("preprocessing"); } /** * Add edge to be preprocessed with a value. */ void preprocessing_add_edge(vid_t from, vid_t to, EdgeDataType val) { preproc_writer->add_edge(from, to, val); max_vertex_id = std::max(std::max(from, to), max_vertex_id); } #ifdef DYNAMICEDATA void preprocessing_add_edge_multival(vid_t from, vid_t to, std::vector<EdgeDataType> & vals) { typename std::vector<EdgeDataType>::iterator iter; for(iter=vals.begin(); iter != vals.end(); ++iter) { preproc_writer->add_edge(from, to, *iter); } max_vertex_id = std::max(std::max(from, to), max_vertex_id); } #endif /** * Add edge without value to be preprocessed */ void preprocessing_add_edge(vid_t from, vid_t to) { preproc_writer->add_edge(from, to); max_vertex_id = std::max(std::max(from, to), max_vertex_id); } /** Buffered write function */ template <typename T> void bwrite(int f, char * buf, char * &bufptr, T val) { if (bufptr + sizeof(T) - buf >= SHARDER_BUFSIZE) { writea(f, buf, bufptr - buf); bufptr = buf; } *((T*)bufptr) = val; bufptr += sizeof(T); } int blockid; template <typename T> void edata_flush(char * buf, char * bufptr, std::string & shard_filename, size_t totbytes) { int len = (int) (bufptr - buf); m.start_time("edata_flush"); std::string block_filename = filename_shard_edata_block(shard_filename, blockid, compressed_block_size); int f = open(block_filename.c_str(), O_RDWR | O_CREAT, S_IROTH | S_IWOTH | S_IWUSR | S_IRUSR); write_compressed(f, buf, len); close(f); m.stop_time("edata_flush"); #ifdef DYNAMICEDATA // Write block's uncompressed size write_block_uncompressed_size(block_filename, len); #endif blockid++; } template <typename T> void bwrite_edata(char * &buf, char * &bufptr, T val, size_t & totbytes, std::string & shard_filename, size_t & edgecounter) { if (no_edgevalues) return; if (edgecounter == edges_per_block) { edata_flush<T>(buf, bufptr, shard_filename, totbytes); bufptr = buf; edgecounter = 0; } // Check if buffer is big enough if (bufptr - buf + sizeof(T) > ebuffer_size) { ebuffer_size *= 2; logstream(LOG_DEBUG) << "Increased buffer size to: " << ebuffer_size << std::endl; size_t ptroff = bufptr - buf; // Remember the offset buf = (char *) realloc(buf, ebuffer_size); bufptr = buf + ptroff; } totbytes += sizeof(T); *((T*)bufptr) = val; bufptr += sizeof(T); } bool try_load_intervals() { std::vector<std::pair<vid_t, vid_t> > tmpintervals; load_vertex_intervals(basefilename, nshards, tmpintervals, true); if (tmpintervals.empty()) { return false; } intervals = tmpintervals; return true; } /** * Executes sharding. * @param nshards_string the number of shards as a number, or "auto" for automatic determination */ int execute_sharding(std::string nshards_string) { m.start_time("execute_sharding"); determine_number_of_shards(nshards_string); if (nshards == 1) { binary_adjacency_list_reader<EdgeDataType> reader(preprocessed_name()); max_vertex_id = (vid_t) reader.get_max_vertex_id(); one_shard_intervals(); } for(int phase=1; phase <= 2; ++phase) { if (nshards == 1 && phase == 1) continue; // No need for the first phase /* Start the sharing process */ binary_adjacency_list_reader<EdgeDataType> reader(preprocessed_name()); /* Read max vertex id */ max_vertex_id = (vid_t) reader.get_max_vertex_id(); if (filter_max_vertex > 0) { max_vertex_id = filter_max_vertex; } logstream(LOG_INFO) << "Max vertex id: " << max_vertex_id << std::endl; if (phase == 1) { if (try_load_intervals()) { // Hack: if intervals already computed, can skip that phase logstream(LOG_INFO) << "Found intervals-file, skipping that step!" << std::endl; continue; } } this->start_phase(phase); reader.read_edges(this); this->end_phase(); } /* Write the shards */ write_shards(); m.stop_time("execute_sharding"); /* Print metrics */ basic_reporter basicrep; m.report(basicrep); return nshards; } /** * Sharding. This code might be hard to read - modify very carefully! */ protected: virtual void determine_number_of_shards(std::string nshards_string) { assert(preprocessed_file_exists()); if (nshards_string.find("auto") != std::string::npos || nshards_string == "0") { logstream(LOG_INFO) << "Determining number of shards automatically." << std::endl; int membudget_mb = get_option_int("membudget_mb", 1024); logstream(LOG_INFO) << "Assuming available memory is " << membudget_mb << " megabytes. " << std::endl; logstream(LOG_INFO) << " (This can be defined with configuration parameter 'membudget_mb')" << std::endl; binary_adjacency_list_reader<EdgeDataType> reader(preprocessed_name()); size_t numedges = reader.get_numedges(); double max_shardsize = membudget_mb * 1024. * 1024. / 8; logstream(LOG_INFO) << "Determining maximum shard size: " << (max_shardsize / 1024. / 1024.) << " MB." << std::endl; nshards = (int) ( 2 + (numedges * sizeof(EdgeDataType) / max_shardsize) + 0.5); #ifdef DYNAMICEDATA // For dynamic edge data, more working memory is needed, thus the number of shards is larger. nshards = (int) ( 2 + 4 * (numedges * sizeof(EdgeDataType) / max_shardsize) + 0.5); #endif } else { nshards = atoi(nshards_string.c_str()); } assert(nshards > 0); logstream(LOG_INFO) << "Number of shards to be created: " << nshards << std::endl; } void compute_partitionintervals() { size_t edges_per_part = nedges / nshards + 1; logstream(LOG_INFO) << "Number of shards: " << nshards << std::endl; logstream(LOG_INFO) << "Edges per shard: " << edges_per_part << std::endl; logstream(LOG_INFO) << "Max vertex id: " << max_vertex_id << std::endl; vid_t cur_st = 0; size_t edgecounter=0; std::string fname = filename_intervals(basefilename, nshards); FILE * f = fopen(fname.c_str(), "w"); if (f == NULL) { logstream(LOG_ERROR) << "Could not open file: " << fname << " error: " << strerror(errno) << std::endl; } assert(f != NULL); vid_t i = 0; while(nshards > (int) intervals.size()) { i += vertexchunk; edgecounter += edgecounts[i / vertexchunk]; if (edgecounter >= edges_per_part || (i >= max_vertex_id)) { intervals.push_back(std::pair<vid_t,vid_t>(cur_st, std::min(i, max_vertex_id))); logstream(LOG_INFO) << "Interval: " << cur_st << " - " << i << std::endl; fprintf(f, "%u\n", std::min(i, max_vertex_id)); cur_st = i + 1; edgecounter = 0; } } fclose(f); assert(nshards == (int)intervals.size()); /* Write meta-file with the number of vertices */ std::string numv_filename = basefilename + ".numvertices"; f = fopen(numv_filename.c_str(), "w"); fprintf(f, "%u\n", 1 + max_vertex_id); fclose(f); logstream(LOG_INFO) << "Computed intervals." << std::endl; } void one_shard_intervals() { assert(nshards == 1); std::string fname = filename_intervals(basefilename, nshards); FILE * f = fopen(fname.c_str(), "w"); intervals.push_back(std::pair<vid_t,vid_t>(0, max_vertex_id)); fprintf(f, "%u\n", max_vertex_id); fclose(f); /* Write meta-file with the number of vertices */ std::string numv_filename = basefilename + ".numvertices"; f = fopen(numv_filename.c_str(), "w"); fprintf(f, "%u\n", 1 + max_vertex_id); fclose(f); assert(nshards == (int)intervals.size()); } std::string shovel_filename(int shard) { std::stringstream ss; ss << basefilename << shard << "." << nshards << ".shovel"; return ss.str(); } void start_phase(int p) { phase = p; lastpart = 0; logstream(LOG_INFO) << "Starting phase: " << phase << std::endl; switch (phase) { case COMPUTE_INTERVALS: /* To compute the intervals, we need to keep track of the vertex degrees. If there is not enough memory to store degree for each vertex, we combine degrees of successive vertice. This results into less accurate shard split, but in practice it hardly matters. */ vertexchunk = (int) (max_vertex_id * sizeof(int) / (1024 * 1024 * get_option_long("membudget_mb", 1024))); if (vertexchunk<1) vertexchunk = 1; edgecounts = (int*)calloc( max_vertex_id / vertexchunk + 1, sizeof(int)); nedges = 0; break; case SHOVEL: #ifdef DYNAMICEDATA last_added_edge = edge_t(-1, -1, EdgeDataType()); #endif shovelsizes.resize(nshards); shovelblocksidxs.resize(nshards); bufs = new edge_t*[nshards]; bufptrs = new int[nshards]; size_t membudget_mb = get_option_long("membudget_mb", 1024); if (membudget_mb > 3000) membudget_mb = 3000; // Cap to 3 gigs for this purpose bufsize = (1024 * 1024 * membudget_mb) / nshards / 4; while(bufsize % sizeof(edge_t) != 0) bufsize++; logstream(LOG_DEBUG)<< "Shoveling bufsize: " << bufsize << std::endl; for(int i=0; i < nshards; i++) { shovelsizes[i] = 0; shovelblocksidxs[i] = 0; bufs[i] = (edge_t*) malloc(bufsize); bufptrs[i] = 0; } break; } } void end_phase() { logstream(LOG_INFO) << "Ending phase: " << phase << std::endl; switch (phase) { case COMPUTE_INTERVALS: compute_partitionintervals(); free(edgecounts); edgecounts = NULL; break; case SHOVEL: for(int i=0; i<nshards; i++) { swrite(i, edge_t(0, 0, EdgeDataType()), true); free(bufs[i]); } free(bufs); free(bufptrs); break; } } int lastpart; void swrite(int shard, edge_t et, bool flush=false) { if (!flush) bufs[shard][bufptrs[shard]++] = et; if (flush || bufptrs[shard] * sizeof(edge_t) >= bufsize) { m.start_time("shovel_flush"); std::stringstream ss; ss << shovel_filename(shard) << "." << shovelblocksidxs[shard]; std::string shovelfblockname = ss.str(); int bf = open(shovelfblockname.c_str(), O_WRONLY | O_CREAT, S_IROTH | S_IWOTH | S_IWUSR | S_IRUSR); size_t len = sizeof(edge_t) * bufptrs[shard]; writea(bf, bufs[shard], len); bufptrs[shard] = 0; close(bf); shovelsizes[shard] += len; shovelblocksidxs[shard] ++; m.stop_time("shovel_flush"); logstream(LOG_DEBUG) << "Flushed " << shovelfblockname << " bufsize: " << bufsize << std::endl; } } /** * Called on the second and third phase of the preprocessing by binary_adjacency_list reader. */ void receive_edge(vid_t from, vid_t to, EdgeDataType value, bool input_value) { if (to == from) { logstream(LOG_WARNING) << "Tried to add self-edge " << from << "->" << to << std::endl; return; } if (from > max_vertex_id || to > max_vertex_id) { if (max_vertex_id == 0) { logstream(LOG_ERROR) << "Tried to add an edge with too large from/to values. From:" << from << " to: "<< to << " max: " << max_vertex_id << std::endl; assert(false); } else { return; } } switch (phase) { case COMPUTE_INTERVALS: edgecounts[to / vertexchunk]++; nedges++; break; case SHOVEL: bool found=false; for(int i=0; i < nshards; i++) { int shard = (lastpart + i) % nshards; if (to >= intervals[shard].first && to <= intervals[shard].second) { edge_t e(from, to, value); #ifdef DYNAMICEDATA e.is_chivec_value = input_value; // Keep track of multiple values for same edge if (last_added_edge.src == e.src && last_added_edge.dst == to) { e.valindex = last_added_edge.valindex + 1; } last_added_edge = e; #endif swrite(shard, e); lastpart = shard; // Small optimizations, which works if edges are in order for each vertex - not much though found = true; break; } } if(!found) { logstream(LOG_ERROR) << "Shard not found for : " << to << std::endl; } assert(found); break; } } size_t read_shovel(int shard, char ** data) { m.start_time("read_shovel"); size_t sz = shovelsizes[shard]; *data = (char *) malloc(sz); char * ptr = * data; size_t nread = 0; int blockidx = 0; while(true) { size_t len = std::min(bufsize, sz-nread); std::stringstream ss; ss << shovel_filename(shard) << "." << blockidx; std::string shovelfblockname = ss.str(); int f = open(shovelfblockname.c_str(), O_RDONLY); if (f < 0) break; m.start_time("shovel_read"); preada(f, ptr, len, 0); m.stop_time("shovel_read"); nread += len; ptr += len; close(f); blockidx++; remove(shovelfblockname.c_str()); } m.stop_time("read_shovel"); assert(nread == sz); return sz; } /** * Write the shard by sorting the shovel file and compressing the * adjacency information. * To support different shard types, override this function! */ virtual void write_shards() { int membudget_mb = get_option_int("membudget_mb", 1024); // Check if we have enough memory to keep track // of the vertex degrees in-memory (heuristic) bool count_degrees_inmem = size_t(membudget_mb) * 1024 * 1024 / 3 > max_vertex_id * sizeof(degree); #ifdef DYNAMICEDATA if (!count_degrees_inmem) { /* Temporary: force in-memory count of degrees because the PSW-based computation is not yet compatible with dynamic edge data. */ logstream(LOG_WARNING) << "Dynamic edge data support only sharding when the vertex degrees can be computed in-memory." << std::endl; logstream(LOG_WARNING) << "If the program gets very slow (starts swapping), the data size is too big." << std::endl; count_degrees_inmem = true; } #endif degree * degrees = NULL; if (count_degrees_inmem) { degrees = (degree *) calloc(1 + max_vertex_id, sizeof(degree)); } for(int shard=0; shard < nshards; shard++) { m.start_time("shard_final"); blockid = 0; size_t edgecounter = 0; logstream(LOG_INFO) << "Starting final processing for shard: " << shard << std::endl; std::string fname = filename_shard_adj(basefilename, shard, nshards); std::string edfname = filename_shard_edata<EdgeDataType>(basefilename, shard, nshards); std::string edblockdirname = dirname_shard_edata_block(edfname, compressed_block_size); /* Make the block directory */ if (!no_edgevalues) mkdir(edblockdirname.c_str(), 0777); edge_t * shovelbuf; size_t shovelsize = read_shovel(shard, (char**) &shovelbuf); size_t numedges = shovelsize / sizeof(edge_t); logstream(LOG_DEBUG) << "Shovel size:" << shovelsize << " edges: " << numedges << std::endl; quickSort(shovelbuf, (int)numedges, edge_t_src_less<EdgeDataType>); // Create the final file int f = open(fname.c_str(), O_WRONLY | O_CREAT, S_IROTH | S_IWOTH | S_IWUSR | S_IRUSR); if (f < 0) { logstream(LOG_ERROR) << "Could not open " << fname << " error: " << strerror(errno) << std::endl; } assert(f >= 0); int trerr = ftruncate(f, 0); assert(trerr == 0); char * buf = (char*) malloc(SHARDER_BUFSIZE); char * bufptr = buf; char * ebuf = (char*) malloc(compressed_block_size); ebuffer_size = compressed_block_size; char * ebufptr = ebuf; vid_t curvid=0; #ifdef DYNAMICEDATA vid_t lastdst = 0xffffffff; int jumpover = 0; size_t num_uniq_edges = 0; size_t last_edge_count = 0; #endif size_t istart = 0; size_t tot_edatabytes = 0; for(size_t i=0; i <= numedges; i++) { #ifdef DYNAMICEDATA i += jumpover; // With dynamic values, there might be several values for one edge, and thus the edge repeated in the data. jumpover = 0; #endif //DYNAMICEDATA edge_t edge = (i < numedges ? shovelbuf[i] : edge_t(0, 0, EdgeDataType())); // Last "element" is a stopper #ifdef DYNAMICEDATA if (lastdst == edge.dst && edge.src == curvid) { // Currently not supported logstream(LOG_ERROR) << "Duplicate edge in the stream - aborting" << std::endl; assert(false); } lastdst = edge.dst; #endif if (!edge.stopper()) { #ifndef DYNAMICEDATA bwrite_edata<EdgeDataType>(ebuf, ebufptr, EdgeDataType(edge.value), tot_edatabytes, edfname, edgecounter); #else /* If we have dynamic edge data, we need to write the header of chivector - if there are edge values */ if (edge.is_chivec_value) { // Need to check how many values for this edge int count = 1; while(shovelbuf[i + count].valindex == count) { count++; } assert(count < 32768); typename chivector<EdgeDataType>::sizeword_t szw; ((uint16_t *) &szw)[0] = (uint16_t)count; // Sizeword with length and capacity = count ((uint16_t *) &szw)[1] = (uint16_t)count; bwrite_edata<typename chivector<EdgeDataType>::sizeword_t>(ebuf, ebufptr, szw, tot_edatabytes, edfname, edgecounter); for(int j=0; j < count; j++) { bwrite_edata<EdgeDataType>(ebuf, ebufptr, EdgeDataType(shovelbuf[i + j].value), tot_edatabytes, edfname, edgecounter); } jumpover = count - 1; // Jump over } else { // Just write size word with zero bwrite_edata<int>(ebuf, ebufptr, 0, tot_edatabytes, edfname, edgecounter); } num_uniq_edges++; #endif edgecounter++; // Increment edge counter here --- notice that dynamic edata case makes two or more calls to bwrite_edata before incrementing } if (degrees != NULL && edge.src != edge.dst) { degrees[edge.src].outdegree++; degrees[edge.dst].indegree++; } if ((edge.src != curvid) || edge.stopper()) { // New vertex #ifndef DYNAMICEDATA size_t count = i - istart; #else size_t count = num_uniq_edges - 1 - last_edge_count; last_edge_count = num_uniq_edges - 1; if (edge.stopper()) count++; #endif assert(count>0 || curvid==0); if (count>0) { if (count < 255) { uint8_t x = (uint8_t)count; bwrite<uint8_t>(f, buf, bufptr, x); } else { bwrite<uint8_t>(f, buf, bufptr, 0xff); bwrite<uint32_t>(f, buf, bufptr, (uint32_t)count); } } #ifndef DYNAMICEDATA for(size_t j=istart; j < i; j++) { bwrite(f, buf, bufptr, shovelbuf[j].dst); } #else // Special dealing with dynamic edata because some edges can be present multiple // times in the shovel. for(size_t j=istart; j < i; j++) { if (j == istart || shovelbuf[j - 1].dst != shovelbuf[j].dst) { bwrite(f, buf, bufptr, shovelbuf[j].dst); } } #endif istart = i; #ifdef DYNAMICEDATA istart += jumpover; #endif // Handle zeros if (!edge.stopper()) { if (edge.src - curvid > 1 || (i == 0 && edge.src>0)) { int nz = edge.src - curvid - 1; if (i == 0 && edge.src > 0) nz = edge.src; // border case with the first one do { bwrite<uint8_t>(f, buf, bufptr, 0); nz--; int tnz = std::min(254, nz); bwrite<uint8_t>(f, buf, bufptr, (uint8_t) tnz); nz -= tnz; } while (nz>0); } } curvid = edge.src; } } /* Flush buffers and free memory */ writea(f, buf, bufptr - buf); free(buf); free(shovelbuf); close(f); /* Write edata size file */ if (!no_edgevalues) { edata_flush<EdgeDataType>(ebuf, ebufptr, edfname, tot_edatabytes); std::string sizefilename = edfname + ".size"; std::ofstream ofs(sizefilename.c_str()); #ifndef DYNAMICEDATA ofs << tot_edatabytes; #else ofs << num_uniq_edges * sizeof(int); // For dynamic edge data, write the number of edges. #endif ofs.close(); } free(ebuf); m.stop_time("shard_final"); } if (!count_degrees_inmem) { #ifndef DYNAMICEDATA // Use memory-efficient (but slower) method to create degree-data create_degree_file(); #endif } else { std::string degreefname = filename_degree_data(basefilename); int degreeOutF = open(degreefname.c_str(), O_RDWR | O_CREAT, S_IROTH | S_IWOTH | S_IWUSR | S_IRUSR); if (degreeOutF < 0) { logstream(LOG_ERROR) << "Could not create: " << degreeOutF << std::endl; assert(degreeOutF >= 0); } writea(degreeOutF, degrees, sizeof(degree) * (1 + max_vertex_id)); free(degrees); close(degreeOutF); } } typedef char dummy_t; typedef sliding_shard<int, dummy_t> slidingshard_t; typedef memory_shard<int, dummy_t> memshard_t; #ifndef DYNAMICEDATA void create_degree_file() { // Initialize IO stripedio * iomgr = new stripedio(m); std::vector<slidingshard_t * > sliding_shards; int subwindow = 5000000; m.set("subwindow", (size_t)subwindow); int loadthreads = 4; m.start_time("degrees.runtime"); /* Initialize streaming shards */ int blocksize = compressed_block_size; for(int p=0; p < nshards; p++) { logstream(LOG_INFO) << "Initialize streaming shard: " << p << std::endl; sliding_shards.push_back( new slidingshard_t(iomgr, filename_shard_edata<dummy_t>(basefilename, p, nshards), filename_shard_adj(basefilename, p, nshards), intervals[p].first, intervals[p].second, blocksize, m, true, true)); } graphchi_context ginfo; ginfo.nvertices = 1 + intervals[nshards - 1].second; ginfo.scheduler = NULL; std::string outputfname = filename_degree_data(basefilename); int degreeOutF = open(outputfname.c_str(), O_RDWR | O_CREAT, S_IROTH | S_IWOTH | S_IWUSR | S_IRUSR); if (degreeOutF < 0) { logstream(LOG_ERROR) << "Could not create: " << degreeOutF << std::endl; } assert(degreeOutF >= 0); int trerr = ftruncate(degreeOutF, ginfo.nvertices * sizeof(int) * 2); assert(trerr == 0); for(int window=0; window<nshards; window++) { metrics_entry mwi = m.start_time(); vid_t interval_st = intervals[window].first; vid_t interval_en = intervals[window].second; /* Flush stream shard for the window */ sliding_shards[window]->flush(); /* Load shard[window] into memory */ memshard_t memshard(iomgr, filename_shard_edata<EdgeDataType>(basefilename, window, nshards), filename_shard_adj(basefilename, window, nshards), interval_st, interval_en, blocksize, m); memshard.only_adjacency = true; logstream(LOG_INFO) << "Interval: " << interval_st << " " << interval_en << std::endl; for(vid_t subinterval_st=interval_st; subinterval_st <= interval_en; ) { vid_t subinterval_en = std::min(interval_en, subinterval_st + subwindow); logstream(LOG_INFO) << "(Degree proc.) Sub-window: [" << subinterval_st << " - " << subinterval_en << "]" << std::endl; assert(subinterval_en >= subinterval_st && subinterval_en <= interval_en); /* Preallocate vertices */ metrics_entry men = m.start_time(); int nvertices = subinterval_en - subinterval_st + 1; std::vector< graphchi_vertex<int, dummy_t> > vertices(nvertices, graphchi_vertex<int, dummy_t>()); // preallocate for(int i=0; i < nvertices; i++) { vertices[i] = graphchi_vertex<int, dummy_t>(subinterval_st + i, NULL, NULL, 0, 0); vertices[i].scheduled = true; } metrics_entry me = m.start_time(); omp_set_num_threads(loadthreads); #pragma omp parallel for for(int p=-1; p < nshards; p++) { if (p == (-1)) { // if first window, now need to load the memshard if (memshard.loaded() == false) { memshard.load(); } /* Load vertices from memshard (only inedges for now so can be done in parallel) */ memshard.load_vertices(subinterval_st, subinterval_en, vertices); } else { /* Stream forward other than the window partition */ if (p != window) { sliding_shards[p]->read_next_vertices(nvertices, subinterval_st, vertices, false); } } } m.stop_time(me, "stream_ahead", window); metrics_entry mev = m.start_time(); // Read first current values int * vbuf = (int*) malloc(nvertices * sizeof(int) * 2); for(int i=0; i<nvertices; i++) { vbuf[2 * i] = vertices[i].num_inedges(); vbuf[2 * i +1] = vertices[i].num_outedges(); } pwritea(degreeOutF, vbuf, nvertices * sizeof(int) * 2, subinterval_st * sizeof(int) * 2); free(vbuf); // Move window subinterval_st = subinterval_en+1; } /* Move the offset of the window-shard forward */ sliding_shards[window]->set_offset(memshard.offset_for_stream_cont(), memshard.offset_vid_for_stream_cont(), memshard.edata_ptr_for_stream_cont()); } close(degreeOutF); m.stop_time("degrees.runtime"); delete iomgr; } #endif friend class binary_adjacency_list_reader<EdgeDataType>; }; // End class sharder }; // namespace #endif
09jijiangwen-download
src/preprocessing/sharder.hpp
C++
asf20
41,179
/** * @file * @author Aapo Kyrola <akyrola@cs.cmu.edu> * @version 1.0 * * @section LICENSE * * Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * Dynamic data version: manages a block. */ #ifndef graphchi_xcode_dynamicblock_hpp #define graphchi_xcode_dynamicblock_hpp #include <stdint.h> namespace graphchi { int get_block_uncompressed_size(std::string blockfilename, int defaultsize); int get_block_uncompressed_size(std::string blockfilename, int defaultsize) { std::string szfilename = blockfilename + ".bsize"; FILE * f = fopen(szfilename.c_str(), "r"); if (f != NULL) { int sz; fread(&sz, 1, sizeof(int), f); fclose(f); return sz; } else { return defaultsize; } } void write_block_uncompressed_size(std::string blockfilename, int size); void write_block_uncompressed_size(std::string blockfilename, int size) { std::string szfilename = blockfilename + ".bsize"; FILE * f = fopen(szfilename.c_str(), "w"); fwrite(&size, 1, sizeof(int), f); fclose(f); if (size > 20000000) { logstream(LOG_DEBUG) << "Block " << blockfilename << " size:" << size << std::endl; } } void delete_block_uncompressed_sizefile(std::string blockfilename); void delete_block_uncompressed_sizefile(std::string blockfilename) { std::string szfilename = blockfilename + ".bsize"; int err = remove(szfilename.c_str()); if (err != 0) { // File did not exist - ok } } template <typename ET> struct dynamicdata_block { int nitems; uint8_t * data; ET * chivecs; dynamicdata_block() : data(NULL), chivecs(NULL) {} dynamicdata_block(int nitems, uint8_t * data, int datasize) : nitems(nitems){ chivecs = new ET[nitems]; uint8_t * ptr = data; for(int i=0; i < nitems; i++) { assert(ptr - data <= datasize); typename ET::sizeword_t * sz = ((typename ET::sizeword_t *) ptr); ptr += sizeof(typename ET::sizeword_t); chivecs[i] = ET(((uint16_t *)sz)[0], ((uint16_t *)sz)[1], (typename ET::element_type_t *) ptr); ptr += (int) ((uint16_t *)sz)[1] * sizeof(typename ET::element_type_t); } } ET * edgevec(int i) { assert(i < nitems); assert(chivecs != NULL); return &chivecs[i]; } void write(uint8_t ** outdata, int & size) { // First compute size size = 0; for(int i=0; i < nitems; i++) { size += chivecs[i].capacity() * sizeof(typename ET::element_type_t) + sizeof(typename ET::sizeword_t); } *outdata = (uint8_t *) malloc(size); uint8_t * ptr = *outdata; for(int i=0; i < nitems; i++) { ET & vec = chivecs[i]; ((uint16_t *) ptr)[0] = vec.size(); ((uint16_t *) ptr)[1] = vec.capacity(); ptr += sizeof(typename ET::sizeword_t); vec.write((typename ET::element_type_t *) ptr); ptr += vec.capacity() * sizeof(typename ET::element_type_t); } } ~dynamicdata_block() { if (chivecs != NULL) { delete [] chivecs; } } }; }; #endif
09jijiangwen-download
src/shards/dynamicdata/dynamicblock.hpp
C++
asf20
4,208
/** * @file * @author Aapo Kyrola <akyrola@cs.cmu.edu> * @version 1.0 * * @section LICENSE * * Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * Dynamic data version: The sliding shard. */ #ifndef DEF_GRAPHCHI_SLIDINGSHARD #define DEF_GRAPHCHI_SLIDINGSHARD #include <iostream> #include <cstdio> #include <sstream> #include <vector> #include <fcntl.h> #include <unistd.h> #include <assert.h> #include <string> #include "api/graph_objects.hpp" #include "metrics/metrics.hpp" #include "logger/logger.hpp" #include "io/stripedio.hpp" #include "graphchi_types.hpp" #include "api/dynamicdata/chivector.hpp" #include "shards/dynamicdata/dynamicblock.hpp" namespace graphchi { /** * A streaming block. */ template <typename ET> struct sblock { int writedesc; int readdesc; size_t offset; size_t end; uint8_t * data; uint8_t * ptr; bool active; bool is_edata_block; std::string blockfilename; dynamicdata_block<ET> * dynblock; sblock() : writedesc(0), readdesc(0), active(false) { data = NULL; dynblock = NULL; } sblock(int wdesc, int rdesc, bool is_edata_block=false) : writedesc(wdesc), readdesc(rdesc), active(false), is_edata_block(is_edata_block){ data = NULL; dynblock = NULL; } sblock(int wdesc, int rdesc, bool is_edata_block, std::string blockfilename) : writedesc(wdesc), readdesc(rdesc), active(false), is_edata_block(is_edata_block), blockfilename(blockfilename) { assert(is_edata_block == true); data = NULL; dynblock = NULL; } void commit_async(stripedio * iomgr) { commit_now(iomgr); // TODO: async release(iomgr); // Note! } void commit_now(stripedio * iomgr) { if (active && data != NULL && writedesc >= 0) { size_t len = ptr-data; if (len > end-offset) len = end-offset; if (is_edata_block) { uint8_t * outdata = NULL; int realsize; dynblock->write(&outdata, realsize); write_block_uncompressed_size(blockfilename, realsize); iomgr->managed_pwritea_now(writedesc, &outdata, realsize, 0); /* Need to write whole block in the compressed regime */ free(outdata); } else { iomgr->managed_pwritea_now(writedesc, &data, len, offset); } } } void read_async(stripedio * iomgr) { assert(false); } void read_now(stripedio * iomgr) { if (is_edata_block) { int realsize = get_block_uncompressed_size(blockfilename, end-offset); iomgr->managed_preada_now(readdesc, &data, realsize, 0); int nedges = (end - offset) / sizeof(int); // Ugly dynblock = new dynamicdata_block<ET>(nedges, (uint8_t *) data, realsize); } else { iomgr->managed_preada_now(readdesc, &data, end - offset, offset); } } void release(stripedio * iomgr) { if (data != NULL) { iomgr->managed_release(readdesc, &data); } if (is_edata_block) { iomgr->close_session(readdesc); } if (dynblock != NULL) { delete dynblock; dynblock = NULL; } data = NULL; } }; struct indexentry { size_t adjoffset, edataoffset; indexentry(size_t a, size_t e) : adjoffset(a), edataoffset(e) {} }; /* * Graph shard that is streamed. I.e, it can only read in one direction, a chunk * a time. */ // ET must be a chivector<T> template <typename VT, typename ET, typename svertex_t = graphchi_vertex<VT, ET>, typename ETspecial = ET> class sliding_shard { stripedio * iomgr; std::string filename_edata; std::string filename_adj; vid_t range_st, range_end; size_t blocksize; vid_t curvid; size_t adjoffset, edataoffset, adjfilesize, edatafilesize; size_t window_start_edataoffset; std::vector<sblock<ET> > activeblocks; int adjfile_session; int writedesc; sblock<ET> * curblock; sblock<ET> * curadjblock; metrics &m; std::map<int, indexentry> sparse_index; // Sparse index that can be created in the fly bool disable_writes; bool async_edata_loading; // bool need_read_outedges; // Disabled - does not work with compressed data: whole block needs to be read. public: bool only_adjacency; sliding_shard(stripedio * iomgr, std::string _filename_edata, std::string _filename_adj, vid_t _range_st, vid_t _range_en, size_t _blocksize, metrics &_m, bool _disable_writes=false, bool onlyadj = false) : iomgr(iomgr), filename_edata(_filename_edata), filename_adj(_filename_adj), range_st(_range_st), range_end(_range_en), blocksize(_blocksize), m(_m), disable_writes(_disable_writes) { curvid = 0; adjoffset = 0; edataoffset = 0; disable_writes = false; only_adjacency = onlyadj; curblock = NULL; curadjblock = NULL; window_start_edataoffset = 0; while(blocksize % sizeof(int) != 0) blocksize++; assert(blocksize % sizeof(int)==0); adjfilesize = get_filesize(filename_adj); edatafilesize = get_shard_edata_filesize<int>(filename_edata); if (!only_adjacency) { logstream(LOG_DEBUG) << "Total edge data size: " << edatafilesize << std::endl; } else { // Nothing } adjfile_session = iomgr->open_session(filename_adj, true); save_offset(); async_edata_loading = false; // With dynamic edge data size, do not load } ~sliding_shard() { release_prior_to_offset(true); if (curblock != NULL) { curblock->release(iomgr); delete curblock; curblock = NULL; } if (curadjblock != NULL) { curadjblock->release(iomgr); delete curadjblock; curadjblock = NULL; } iomgr->close_session(adjfile_session); } size_t num_edges() { return edatafilesize / sizeof(ET); } protected: size_t get_adjoffset() { return adjoffset; } size_t get_edataoffset() { return edataoffset; } void save_offset() { // Note, so that we can use the lower bound operation in map, we need // to insert indices in reverse order sparse_index.insert(std::pair<int, indexentry>(-((int)curvid), indexentry(adjoffset, edataoffset))); } void move_close_to(vid_t v) { if (curvid >= v) return; std::map<int,indexentry>::iterator lowerbd_iter = sparse_index.lower_bound(-((int)v)); int closest_vid = -((int)lowerbd_iter->first); assert(closest_vid>=0); indexentry closest_offset = lowerbd_iter->second; assert(closest_vid <= (int)v); if (closest_vid > (int)curvid) { /* Note: this will fail if we have over 2B vertices! */ logstream(LOG_DEBUG) << "Sliding shard, start: " << range_st << " moved to: " << closest_vid << " " << closest_offset.adjoffset << ", asked for : " << v << " was in: curvid= " << curvid << " " << adjoffset << std::endl; if (curblock != NULL) // Move the pointer - this may invalidate the curblock, but it is being checked later curblock->ptr += closest_offset.edataoffset - edataoffset; if (curadjblock != NULL) curadjblock->ptr += closest_offset.adjoffset - adjoffset; curvid = (vid_t)closest_vid; adjoffset = closest_offset.adjoffset; edataoffset = closest_offset.edataoffset; return; } else { // Do nothing - just continue from current pos. return; } } inline void check_curblock(size_t toread) { if (curblock == NULL || curblock->end < edataoffset+toread) { if (curblock != NULL) { if (!curblock->active) { curblock->release(iomgr); } } // Load next std::string blockfilename = filename_shard_edata_block(filename_edata, (int) (edataoffset / blocksize), blocksize); int edata_session = iomgr->open_session(blockfilename, false, true); sblock<ET> newblock(edata_session, edata_session, true, blockfilename); // We align blocks always to the blocksize, even if that requires // allocating and reading some unnecessary data. newblock.offset = (edataoffset / blocksize) * blocksize; // Align size_t correction = edataoffset - newblock.offset; newblock.end = std::min(edatafilesize, newblock.offset + blocksize); assert(newblock.end >= newblock.offset); int realsize = get_block_uncompressed_size(blockfilename, newblock.end - newblock.offset); iomgr->managed_malloc(edata_session, &newblock.data, realsize, newblock.offset); newblock.ptr = newblock.data + correction; activeblocks.push_back(newblock); curblock = &activeblocks[activeblocks.size()-1]; curblock->active = true; curblock->read_now(iomgr); } } inline void check_adjblock(size_t toread) { if (curadjblock == NULL || curadjblock->end <= adjoffset + toread) { if (curadjblock != NULL) { curadjblock->release(iomgr); delete curadjblock; curadjblock = NULL; } sblock<ET> * newblock = new sblock<ET>(0, adjfile_session); newblock->offset = adjoffset; newblock->end = std::min(adjfilesize, adjoffset+blocksize); assert(newblock->end > 0); assert(newblock->end >= newblock->offset); iomgr->managed_malloc(adjfile_session, &newblock->data, newblock->end - newblock->offset, adjoffset); newblock->ptr = newblock->data; metrics_entry me = m.start_time(); iomgr->managed_preada_now(adjfile_session, &newblock->data, newblock->end - newblock->offset, adjoffset); m.stop_time(me, "blockload"); curadjblock = newblock; } } template <typename U> inline U read_val() { check_adjblock(sizeof(U)); U res = *((U*)curadjblock->ptr); adjoffset += sizeof(U); curadjblock->ptr += sizeof(U); return res; } inline ET * read_edgeptr() { if (only_adjacency) return NULL; check_curblock(sizeof(int)); edataoffset += sizeof(int); int blockedgeidx = (curblock->ptr - curblock->data) / sizeof(int); curblock->ptr += sizeof(int); assert(curblock->dynblock != NULL); return curblock->dynblock->edgevec(blockedgeidx); } inline void skip(int n, int sz) { size_t tot = n * sz; adjoffset += tot; if (curadjblock != NULL) curadjblock->ptr += tot; edataoffset += sizeof(int) * n; if (curblock != NULL) curblock->ptr += sizeof(int) * n; } public: /** * Read out-edges for vertices. */ void read_next_vertices(int nvecs, vid_t start, std::vector<svertex_t> & prealloc, bool record_index=false, bool disable_writes=false) { metrics_entry me = m.start_time(); if (!record_index) move_close_to(start); /* Release the blocks we do not need anymore */ curblock = NULL; release_prior_to_offset(false, disable_writes); assert(activeblocks.size() <= 1); /* Read next */ if (!activeblocks.empty() && !only_adjacency) { curblock = &activeblocks[0]; } vid_t lastrec = start; window_start_edataoffset = edataoffset; for(int i=((int)curvid) - ((int)start); i<nvecs; i++) { if (adjoffset >= adjfilesize) break; // TODO: skip unscheduled vertices. int n; if (record_index && (size_t)(curvid - lastrec) >= (size_t) std::max((int)100000, nvecs/16)) { save_offset(); lastrec = curvid; } uint8_t ns = read_val<uint8_t>(); if (ns == 0x00) { curvid++; uint8_t nz = read_val<uint8_t>(); curvid += nz; i += nz; continue; } if (ns == 0xff) { n = read_val<uint32_t>(); } else { n = ns; } if (i<0) { // Just skipping skip(n, sizeof(vid_t)); } else { svertex_t& vertex = prealloc[i]; assert(vertex.id() == curvid); if (vertex.scheduled) { while(--n >= 0) { bool special_edge = false; vid_t target = (sizeof(ET) == sizeof(ETspecial) ? read_val<vid_t>() : translate_edge(read_val<vid_t>(), special_edge)); ET * evalue = read_edgeptr(); vertex.add_outedge(target, evalue, special_edge); if (!((target >= range_st && target <= range_end))) { logstream(LOG_ERROR) << "Error : " << target << " not in [" << range_st << " - " << range_end << "]" << std::endl; iomgr->print_session(adjfile_session); } assert(target >= range_st && target <= range_end); } } else { // This vertex was not scheduled, so we can just skip its edges. skip(n, sizeof(vid_t)); } } curvid++; } m.stop_time(me, "read_next_vertices"); curblock = NULL; } /** * Commit modifications. */ void commit(sblock<ET> &b, bool synchronously, bool disable_writes=false) { if (synchronously) { metrics_entry me = m.start_time(); if (!disable_writes) b.commit_now(iomgr); m.stop_time(me, "commit"); b.release(iomgr); } else { if (!disable_writes) b.commit_async(iomgr); else b.release(iomgr); } } /** * Release all buffers */ void flush() { release_prior_to_offset(true); if (curadjblock != NULL) { curadjblock->release(iomgr); delete curadjblock; curadjblock = NULL; } } /** * Set the position of the sliding shard. */ void set_offset(size_t newoff, vid_t _curvid, size_t edgeptr) { this->adjoffset = newoff; this->curvid = _curvid; this->edataoffset = edgeptr; if (curadjblock != NULL) { curadjblock->release(iomgr); delete curadjblock; curadjblock = NULL; } } /** * Release blocks that come prior to the current offset/ */ void release_prior_to_offset(bool all=false, bool disable_writes=false) { // disable writes is for the dynamic case for(int i=(int)activeblocks.size() - 1; i >= 0; i--) { sblock<ET> &b = activeblocks[i]; if (b.end <= edataoffset || all) { commit(b, all, disable_writes); activeblocks.erase(activeblocks.begin() + (unsigned int)i); } } } std::string get_info_json() { std::stringstream json; json << "\"size\": "; json << edatafilesize << std::endl; json << ", \"windowStart\": "; json << window_start_edataoffset; json << ", \"windowEnd\": "; json << edataoffset; json << ", \"intervalStart\": "; json << range_st; json << ", \"intervalEnd\": "; json << range_end; return json.str(); } }; }; #endif
09jijiangwen-download
src/shards/dynamicdata/slidingshard.hpp
C++
asf20
18,769
/** * @file * @author Aapo Kyrola <akyrola@cs.cmu.edu> * @version 1.0 * * @section LICENSE * * Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * Dynamic edge data version: The memory shard. * This class should only be accessed internally by the GraphChi engine. */ #ifndef DEF_GRAPHCHI_MEMSHARD #define DEF_GRAPHCHI_MEMSHARD #include <iostream> #include <cstdio> #include <sstream> #include <vector> #include <fcntl.h> #include <unistd.h> #include <assert.h> #include <string> #include "api/graph_objects.hpp" #include "metrics/metrics.hpp" #include "io/stripedio.hpp" #include "graphchi_types.hpp" #include "shards/dynamicdata/dynamicblock.hpp" namespace graphchi { template <typename VT, typename ET, typename svertex_t = graphchi_vertex<VT, ET> > class memory_shard { stripedio * iomgr; std::string filename_edata; std::string filename_adj; vid_t range_st; vid_t range_end; size_t adjfilesize; size_t edatafilesize; size_t edgeptr; vid_t streaming_offset_vid; size_t streaming_offset; // The offset where streaming should continue size_t range_start_offset; // First byte for this range's vertices (used for writing only outedges) size_t range_start_edge_ptr; size_t streaming_offset_edge_ptr; uint8_t * adjdata; char ** edgedata; std::vector<size_t> blocksizes; std::vector< dynamicdata_block<ET> * > dynamicblocks; uint64_t chunkid; std::vector<int> block_edatasessions; int adj_session; streaming_task adj_stream_session; bool is_loaded; size_t blocksize; metrics &m; public: bool only_adjacency; /* Dynamic edata */ memory_shard(stripedio * iomgr, std::string _filename_edata, std::string _filename_adj, vid_t _range_start, vid_t _range_end, size_t _blocksize, metrics &_m) : iomgr(iomgr), filename_edata(_filename_edata), filename_adj(_filename_adj), range_st(_range_start), range_end(_range_end), blocksize(_blocksize), m(_m) { adjdata = NULL; only_adjacency = false; is_loaded = false; adj_session = -1; edgedata = NULL; } /* Dynamic edata */ ~memory_shard() { int nblocks = (int) block_edatasessions.size(); for(int i=0; i < nblocks; i++) { if (edgedata[i] != NULL) { iomgr->managed_release(block_edatasessions[i], &edgedata[i]); iomgr->close_session(block_edatasessions[i]); } if (dynamicblocks[i] != NULL) delete dynamicblocks[i]; dynamicblocks[i] = NULL; } dynamicblocks.clear(); if (adj_session >= 0) { if (adjdata != NULL) iomgr->managed_release(adj_session, &adjdata); iomgr->close_session(adj_session); } if (edgedata != NULL) free(edgedata); edgedata = NULL; } /* Dynamic edata */ void write_and_release_block(int i) { std::string block_filename = filename_shard_edata_block(filename_edata, i, blocksize); dynamicdata_block<ET> * dynblock = dynamicblocks[i]; if (dynblock != NULL) { uint8_t * outdata; int outsize; dynblock->write(&outdata, outsize); write_block_uncompressed_size(block_filename, outsize); iomgr->managed_pwritea_now(block_edatasessions[i], &outdata, outsize, 0); iomgr->managed_release(block_edatasessions[i], &edgedata[i]); iomgr->close_session(block_edatasessions[i]); free(outdata); delete dynblock; } dynamicblocks[i] = NULL; } /* Dynamic edata */ void commit(bool commit_inedges, bool commit_outedges) { if (block_edatasessions.size() == 0 || only_adjacency) return; assert(is_loaded); metrics_entry cm = m.start_time(); /** * This is an optimization that is relevant only if memory shard * has been used in a case where only out-edges are considered. * Out-edges are in a continuous "window", while in-edges are * scattered all over the shard */ int nblocks = (int) block_edatasessions.size(); if (commit_inedges) { for(int i=0; i < nblocks; i++) { /* NOTE: WRITE ALL BLOCKS SYNCHRONOUSLY */ write_and_release_block(i); edgedata[i] = NULL; } } else if (commit_outedges) { size_t last = streaming_offset_edge_ptr; if (last == 0){ // rollback last = edatafilesize; } //char * bufp = ((char*)edgedata + range_start_edge_ptr); int startblock = (int) (range_start_edge_ptr / blocksize); int endblock = (int) (last / blocksize); for(int i=0; i < nblocks; i++) { if (i >= startblock && i <= endblock) { write_and_release_block(i); } else { iomgr->managed_release(block_edatasessions[i], &edgedata[i]); } edgedata[i] = NULL; iomgr->close_session(block_edatasessions[i]); } } m.stop_time(cm, "memshard_commit"); iomgr->managed_release(adj_session, &adjdata); // FIXME: this is duplicated code from destructor for(int i=0; i < nblocks; i++) { if (edgedata[i] != NULL) { iomgr->managed_release(block_edatasessions[i], &edgedata[i]); } } block_edatasessions.clear(); is_loaded = false; } bool loaded() { return is_loaded; } private: /* Dynamic edata */ void load_edata() { bool async_inedgedata_loading = false; // Not supported with dynamic edgedata assert(blocksize % sizeof(int) == 0); int nblocks = (int) (edatafilesize / blocksize + (edatafilesize % blocksize != 0)); edgedata = (char **) calloc(nblocks, sizeof(char*)); size_t compressedsize = 0; int blockid = 0; while(true) { std::string block_filename = filename_shard_edata_block(filename_edata, blockid, blocksize); if (file_exists(block_filename)) { size_t fsize = get_block_uncompressed_size(block_filename, std::min(edatafilesize - blocksize * blockid, blocksize)); //std::min(edatafilesize - blocksize * blockid, blocksize); compressedsize += get_filesize(block_filename); int blocksession = iomgr->open_session(block_filename, false, true); // compressed block_edatasessions.push_back(blocksession); blocksizes.push_back(fsize); edgedata[blockid] = NULL; iomgr->managed_malloc(blocksession, &edgedata[blockid], fsize, 0); if (async_inedgedata_loading) { assert(false); } else { iomgr->managed_preada_now(blocksession, &edgedata[blockid], fsize, 0); } dynamicblocks.push_back(NULL); blockid++; } else { if (blockid == 0) { logstream(LOG_ERROR) << "Shard block file did not exists:" << block_filename << std::endl; } break; } } assert(blockid == nblocks); logstream(LOG_DEBUG) << "Compressed/full size: " << compressedsize * 1.0 / edatafilesize << " number of blocks: " << nblocks << std::endl; } /* Initialize a dynamic block if required */ void check_block_initialized(int blockid) { if (dynamicblocks[blockid] == NULL) { std::string block_filename = filename_shard_edata_block(filename_edata, blockid, blocksize); size_t fsize = get_block_uncompressed_size(block_filename, std::min(edatafilesize - blocksize * blockid, blocksize)); //std::min(edatafilesize - blocksize * blockid, blocksize); int nedges = std::min(edatafilesize - blocksize * blockid, blocksize) / sizeof(int); dynamicblocks[blockid] = new dynamicdata_block<ET>(nedges, (uint8_t*) edgedata[blockid], fsize); } } public: /* Dynamic edata */ void load() { is_loaded = true; adjfilesize = get_filesize(filename_adj); edatafilesize = get_shard_edata_filesize<ET>(filename_edata); #ifdef SUPPORT_DELETIONS async_inedgedata_loading = false; // Currently we encode the deleted status of an edge into the edge value (should be changed!), // so we need the edge data while loading #endif //preada(adjf, adjdata, adjfilesize, 0); adj_session = iomgr->open_session(filename_adj, true); iomgr->managed_malloc(adj_session, &adjdata, adjfilesize, 0); adj_stream_session = streaming_task(iomgr, adj_session, adjfilesize, (char**) &adjdata); iomgr->launch_stream_reader(&adj_stream_session); /* Initialize edge data asynchonous reading */ if (!only_adjacency) { load_edata(); } } /* Dynamic edata */ inline void check_stream_progress(int toread, size_t pos) { if (adj_stream_session.curpos == adjfilesize) return; while(adj_stream_session.curpos < toread+pos) { usleep(20000); if (adj_stream_session.curpos == adjfilesize) return; } } /* Dynamic edata */ void load_vertices(vid_t window_st, vid_t window_en, std::vector<svertex_t> & prealloc, bool inedges=true, bool outedges=true) { /* Find file size */ m.start_time("memoryshard_create_edges"); assert(adjdata != NULL); // Now start creating vertices uint8_t * ptr = adjdata; uint8_t * end = ptr + adjfilesize; vid_t vid = 0; edgeptr = 0; streaming_offset = 0; streaming_offset_vid = 0; streaming_offset_edge_ptr = 0; range_start_offset = adjfilesize; range_start_edge_ptr = edatafilesize; bool setoffset = false; bool setrangeoffset = false; while (ptr < end) { check_stream_progress(6, ptr-adjdata); // read at least 6 bytes if (!setoffset && vid > range_end) { // This is where streaming should continue. Notice that because of the // non-zero counters, this might be a bit off. streaming_offset = ptr-adjdata; streaming_offset_vid = vid; streaming_offset_edge_ptr = edgeptr; setoffset = true; } if (!setrangeoffset && vid>=range_st) { range_start_offset = ptr-adjdata; range_start_edge_ptr = edgeptr; setrangeoffset = true; } uint8_t ns = *ptr; int n; ptr += sizeof(uint8_t); if (ns == 0x00) { // next value tells the number of vertices with zeros uint8_t nz = *ptr; ptr += sizeof(uint8_t); vid++; vid += nz; continue; } if (ns == 0xff) { // If 255 is not enough, then stores a 32-bit integer after. n = *((uint32_t*)ptr); ptr += sizeof(uint32_t); } else { n = ns; } svertex_t* vertex = NULL; if (vid>=window_st && vid <=window_en) { // TODO: Make more efficient vertex = &prealloc[vid-window_st]; if (!vertex->scheduled) vertex = NULL; } check_stream_progress(n * 4, ptr - adjdata); bool any_edges = false; while(--n>=0) { int blockid = (int) (edgeptr / blocksize); vid_t target = *((vid_t*) ptr); ptr += sizeof(vid_t); if (vertex != NULL && outedges) { check_block_initialized(blockid); vertex->add_outedge(target, (only_adjacency ? NULL : dynamicblocks[blockid]->edgevec((edgeptr % blocksize)/sizeof(int))), false); } if (target >= window_st) { if (target <= window_en) { /* In edge */ if (inedges) { svertex_t & dstvertex = prealloc[target - window_st]; if (dstvertex.scheduled) { any_edges = true; // assert(only_adjacency || edgeptr < edatafilesize); check_block_initialized(blockid); ET * eptr = (only_adjacency ? NULL : dynamicblocks[blockid]->edgevec((edgeptr % blocksize)/sizeof(int))); dstvertex.add_inedge(vid, (only_adjacency ? NULL : eptr), false); dstvertex.parallel_safe = dstvertex.parallel_safe && (vertex == NULL); // Avoid if } } } else { // Note, we cannot skip if there can be "special edges". FIXME so dirty. // This vertex has no edges any more for this window, bail out if (vertex == NULL) { ptr += sizeof(vid_t) * n; edgeptr += (n + 1) * sizeof(int); break; } } } edgeptr += sizeof(int); } if (any_edges && vertex != NULL) { vertex->parallel_safe = false; } vid++; } m.stop_time("memoryshard_create_edges", false); } size_t offset_for_stream_cont() { return streaming_offset; } vid_t offset_vid_for_stream_cont() { return streaming_offset_vid; } size_t edata_ptr_for_stream_cont() { return streaming_offset_edge_ptr; } }; }; #endif
09jijiangwen-download
src/shards/dynamicdata/memoryshard.hpp
C++
asf20
16,688
/** * @file * @author Aapo Kyrola <akyrola@cs.cmu.edu> * @version 1.0 * * @section LICENSE * * Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * The sliding shard. */ #ifdef DYNAMICEDATA #include "shards/dynamicdata/slidingshard.hpp" #else #ifndef DEF_GRAPHCHI_SLIDINGSHARD #define DEF_GRAPHCHI_SLIDINGSHARD #include <iostream> #include <cstdio> #include <sstream> #include <vector> #include <fcntl.h> #include <unistd.h> #include <assert.h> #include <string> #include "api/graph_objects.hpp" #include "metrics/metrics.hpp" #include "logger/logger.hpp" #include "io/stripedio.hpp" #include "graphchi_types.hpp" namespace graphchi { /** * A streaming block. */ struct sblock { int writedesc; int readdesc; size_t offset; size_t end; uint8_t * data; uint8_t * ptr; bool active; bool is_edata_block; sblock() : writedesc(0), readdesc(0), active(false) { data = NULL; } sblock(int wdesc, int rdesc, bool is_edata_block=false) : writedesc(wdesc), readdesc(rdesc), active(false), is_edata_block(is_edata_block){ data = NULL; } void commit_async(stripedio * iomgr) { if (active && data != NULL && writedesc >= 0) { if (is_edata_block) { iomgr->managed_pwritea_async(writedesc, &data, end-offset, 0, true, true); data = NULL; } else { iomgr->managed_pwritea_async(writedesc, &data, end-offset, offset, true); } } } void commit_now(stripedio * iomgr) { if (active && data != NULL && writedesc >= 0) { size_t len = ptr-data; if (len > end-offset) len = end-offset; if (is_edata_block) { iomgr->managed_pwritea_now(writedesc, &data, end - offset, 0); /* Need to write whole block in the compressed regime */ } else { iomgr->managed_pwritea_now(writedesc, &data, len, offset); } } } void read_async(stripedio * iomgr) { if (is_edata_block) { iomgr->managed_preada_async(readdesc, &data, (end - offset), 0); } else { iomgr->managed_preada_async(readdesc, &data, end - offset, offset); } } void read_now(stripedio * iomgr) { if (is_edata_block) { iomgr->managed_preada_now(readdesc, &data, end-offset, 0); } else { iomgr->managed_preada_now(readdesc, &data, end-offset, offset); } } void release(stripedio * iomgr) { if (data != NULL) { iomgr->managed_release(readdesc, &data); if (is_edata_block) { iomgr->close_session(readdesc); } } data = NULL; } }; struct indexentry { size_t adjoffset, edataoffset; indexentry(size_t a, size_t e) : adjoffset(a), edataoffset(e) {} }; /* * Graph shard that is streamed. I.e, it can only read in one direction, a chunk * a time. */ template <typename VT, typename ET, typename svertex_t = graphchi_vertex<VT, ET>, typename ETspecial = ET> class sliding_shard { stripedio * iomgr; std::string filename_edata; std::string filename_adj; vid_t range_st, range_end; size_t blocksize; vid_t curvid; size_t adjoffset, edataoffset, adjfilesize, edatafilesize; size_t window_start_edataoffset; std::vector<sblock> activeblocks; int adjfile_session; int writedesc; sblock * curblock; sblock * curadjblock; metrics &m; std::map<int, indexentry> sparse_index; // Sparse index that can be created in the fly bool disable_writes; bool async_edata_loading; // bool need_read_outedges; // Disabled - does not work with compressed data: whole block needs to be read. public: bool only_adjacency; sliding_shard(stripedio * iomgr, std::string _filename_edata, std::string _filename_adj, vid_t _range_st, vid_t _range_en, size_t _blocksize, metrics &_m, bool _disable_writes=false, bool onlyadj = false) : iomgr(iomgr), filename_edata(_filename_edata), filename_adj(_filename_adj), range_st(_range_st), range_end(_range_en), blocksize(_blocksize), m(_m), disable_writes(_disable_writes) { curvid = 0; adjoffset = 0; edataoffset = 0; disable_writes = false; only_adjacency = onlyadj; curblock = NULL; curadjblock = NULL; window_start_edataoffset = 0; while(blocksize % sizeof(ET) != 0) blocksize++; assert(blocksize % sizeof(ET)==0); adjfilesize = get_filesize(filename_adj); if (!only_adjacency) { edatafilesize = get_shard_edata_filesize<ET>(filename_edata); logstream(LOG_DEBUG) << "Total edge data size: " << edatafilesize << ", " << filename_edata << "sizeof(ET): " << sizeof(ET) << std::endl; } else { // Nothing } adjfile_session = iomgr->open_session(filename_adj, true); save_offset(); async_edata_loading = !svertex_t().computational_edges(); #ifdef SUPPORT_DELETIONS async_edata_loading = false; // See comment above for memshard, async_edata_loading = false; #endif } ~sliding_shard() { release_prior_to_offset(true); if (curblock != NULL) { curblock->release(iomgr); delete curblock; curblock = NULL; } if (curadjblock != NULL) { curadjblock->release(iomgr); delete curadjblock; curadjblock = NULL; } iomgr->close_session(adjfile_session); } size_t num_edges() { return edatafilesize / sizeof(ET); } protected: size_t get_adjoffset() { return adjoffset; } size_t get_edataoffset() { return edataoffset; } void save_offset() { // Note, so that we can use the lower bound operation in map, we need // to insert indices in reverse order sparse_index.insert(std::pair<int, indexentry>(-((int)curvid), indexentry(adjoffset, edataoffset))); } void move_close_to(vid_t v) { if (curvid >= v) return; std::map<int,indexentry>::iterator lowerbd_iter = sparse_index.lower_bound(-((int)v)); int closest_vid = -((int)lowerbd_iter->first); assert(closest_vid>=0); indexentry closest_offset = lowerbd_iter->second; assert(closest_vid <= (int)v); if (closest_vid > (int)curvid) { /* Note: this will fail if we have over 2B vertices! */ if (curblock != NULL) // Move the pointer - this may invalidate the curblock, but it is being checked later curblock->ptr += closest_offset.edataoffset - edataoffset; if (curadjblock != NULL) curadjblock->ptr += closest_offset.adjoffset - adjoffset; curvid = (vid_t)closest_vid; adjoffset = closest_offset.adjoffset; edataoffset = closest_offset.edataoffset; return; } else { // Do nothing - just continue from current pos. return; } } inline void check_curblock(size_t toread) { if (curblock == NULL || curblock->end < edataoffset+toread) { if (curblock != NULL) { if (!curblock->active) { curblock->release(iomgr); } } // Load next std::string blockfilename = filename_shard_edata_block(filename_edata, (int) (edataoffset / blocksize), blocksize); int edata_session = iomgr->open_session(blockfilename, false, true); sblock newblock(edata_session, edata_session, true); // We align blocks always to the blocksize, even if that requires // allocating and reading some unnecessary data. newblock.offset = (edataoffset / blocksize) * blocksize; // Align size_t correction = edataoffset - newblock.offset; newblock.end = std::min(edatafilesize, newblock.offset + blocksize); assert(newblock.end >= newblock.offset); iomgr->managed_malloc(edata_session, &newblock.data, newblock.end - newblock.offset, newblock.offset); newblock.ptr = newblock.data + correction; activeblocks.push_back(newblock); curblock = &activeblocks[activeblocks.size()-1]; } } inline void check_adjblock(size_t toread) { if (curadjblock == NULL || curadjblock->end <= adjoffset + toread) { if (curadjblock != NULL) { curadjblock->release(iomgr); delete curadjblock; curadjblock = NULL; } sblock * newblock = new sblock(0, adjfile_session); newblock->offset = adjoffset; newblock->end = std::min(adjfilesize, adjoffset+blocksize); assert(newblock->end > 0); assert(newblock->end >= newblock->offset); iomgr->managed_malloc(adjfile_session, &newblock->data, newblock->end - newblock->offset, adjoffset); newblock->ptr = newblock->data; metrics_entry me = m.start_time(); iomgr->managed_preada_now(adjfile_session, &newblock->data, newblock->end - newblock->offset, adjoffset); m.stop_time(me, "blockload"); curadjblock = newblock; } } template <typename U> inline U read_val() { check_adjblock(sizeof(U)); U res = *((U*)curadjblock->ptr); adjoffset += sizeof(U); curadjblock->ptr += sizeof(U); return res; } template <typename U> inline U * read_edgeptr() { if (only_adjacency) return NULL; check_curblock(sizeof(U)); U * resptr = ((U*)curblock->ptr); edataoffset += sizeof(U); curblock->ptr += sizeof(U); return resptr; } inline void skip(int n, int sz) { size_t tot = n * sz; adjoffset += tot; if (curadjblock != NULL) curadjblock->ptr += tot; edataoffset += sizeof(ET)*n; if (curblock != NULL) curblock->ptr += sizeof(ET)*n; } public: /** * Read out-edges for vertices. */ void read_next_vertices(int nvecs, vid_t start, std::vector<svertex_t> & prealloc, bool record_index=false, bool disable_writes=false) { metrics_entry me = m.start_time(); if (!record_index) move_close_to(start); /* Release the blocks we do not need anymore */ curblock = NULL; release_prior_to_offset(false, disable_writes); assert(activeblocks.size() <= 1); /* Read next */ if (!activeblocks.empty() && !only_adjacency) { curblock = &activeblocks[0]; } vid_t lastrec = start; window_start_edataoffset = edataoffset; for(int i=((int)curvid) - ((int)start); i<nvecs; i++) { if (adjoffset >= adjfilesize) break; // TODO: skip unscheduled vertices. int n; if (record_index && (size_t)(curvid - lastrec) >= (size_t) std::max((int)100000, nvecs/16)) { save_offset(); lastrec = curvid; } uint8_t ns = read_val<uint8_t>(); if (ns == 0x00) { curvid++; uint8_t nz = read_val<uint8_t>(); curvid += nz; i += nz; continue; } if (ns == 0xff) { n = read_val<uint32_t>(); } else { n = ns; } if (i<0) { // Just skipping skip(n, sizeof(vid_t)); } else { svertex_t& vertex = prealloc[i]; assert(vertex.id() == curvid); if (vertex.scheduled) { while(--n >= 0) { bool special_edge = false; vid_t target = (sizeof(ET) == sizeof(ETspecial) ? read_val<vid_t>() : translate_edge(read_val<vid_t>(), special_edge)); ET * evalue = (special_edge ? (ET*)read_edgeptr<ETspecial>(): read_edgeptr<ET>()); if (!only_adjacency) { if (!curblock->active) { if (async_edata_loading) { curblock->read_async(iomgr); } else { curblock->read_now(iomgr); } } // Note: this needs to be set always because curblock might change during this loop. curblock->active = true; // This block has an scheduled vertex - need to commit } vertex.add_outedge(target, evalue, special_edge); if (!((target >= range_st && target <= range_end))) { logstream(LOG_ERROR) << "Error : " << target << " not in [" << range_st << " - " << range_end << "]" << std::endl; iomgr->print_session(adjfile_session); } assert(target >= range_st && target <= range_end); } } else { // This vertex was not scheduled, so we can just skip its edges. skip(n, sizeof(vid_t)); } } curvid++; } m.stop_time(me, "read_next_vertices"); curblock = NULL; } /** * Commit modifications. */ void commit(sblock &b, bool synchronously, bool disable_writes=false) { if (synchronously) { metrics_entry me = m.start_time(); if (!disable_writes) b.commit_now(iomgr); m.stop_time(me, "commit"); b.release(iomgr); } else { if (!disable_writes) b.commit_async(iomgr); else b.release(iomgr); } } /** * Release all buffers */ void flush() { release_prior_to_offset(true); if (curadjblock != NULL) { curadjblock->release(iomgr); delete curadjblock; curadjblock = NULL; } } /** * Set the position of the sliding shard. */ void set_offset(size_t newoff, vid_t _curvid, size_t edgeptr) { this->adjoffset = newoff; this->curvid = _curvid; this->edataoffset = edgeptr; if (curadjblock != NULL) { curadjblock->release(iomgr); delete curadjblock; curadjblock = NULL; } } /** * Release blocks that come prior to the current offset/ */ void release_prior_to_offset(bool all=false, bool disable_writes=false) { // disable writes is for the dynamic case for(int i=(int)activeblocks.size() - 1; i >= 0; i--) { sblock &b = activeblocks[i]; if (b.end <= edataoffset || all) { commit(b, all, disable_writes); activeblocks.erase(activeblocks.begin() + (unsigned int)i); } } } std::string get_info_json() { std::stringstream json; json << "\"size\": "; json << edatafilesize << std::endl; json << ", \"windowStart\": "; json << window_start_edataoffset; json << ", \"windowEnd\": "; json << edataoffset; json << ", \"intervalStart\": "; json << range_st; json << ", \"intervalEnd\": "; json << range_end; return json.str(); } }; }; #endif #endif
09jijiangwen-download
src/shards/slidingshard.hpp
C++
asf20
18,564
/** * @file * @author Aapo Kyrola <akyrola@cs.cmu.edu> * @version 1.0 * * @section LICENSE * * Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * The memory shard. This class should only be accessed internally by the GraphChi engine. */ #ifdef DYNAMICEDATA #include "shards/dynamicdata/memoryshard.hpp" #else #ifndef DEF_GRAPHCHI_MEMSHARD #define DEF_GRAPHCHI_MEMSHARD #include <iostream> #include <cstdio> #include <sstream> #include <vector> #include <fcntl.h> #include <unistd.h> #include <assert.h> #include <string> #include "api/graph_objects.hpp" #include "metrics/metrics.hpp" #include "io/stripedio.hpp" #include "graphchi_types.hpp" namespace graphchi { template <typename VT, typename ET, typename svertex_t = graphchi_vertex<VT, ET> > class memory_shard { stripedio * iomgr; std::string filename_edata; std::string filename_adj; vid_t range_st; vid_t range_end; size_t adjfilesize; size_t edatafilesize; size_t edgeptr; vid_t streaming_offset_vid; size_t streaming_offset; // The offset where streaming should continue size_t range_start_offset; // First byte for this range's vertices (used for writing only outedges) size_t range_start_edge_ptr; size_t streaming_offset_edge_ptr; uint8_t * adjdata; char ** edgedata; int * doneptr; std::vector<size_t> blocksizes; uint64_t chunkid; std::vector<int> block_edatasessions; int adj_session; streaming_task adj_stream_session; bool async_edata_loading; bool is_loaded; size_t blocksize; metrics &m; public: bool only_adjacency; memory_shard(stripedio * iomgr, std::string _filename_edata, std::string _filename_adj, vid_t _range_start, vid_t _range_end, size_t _blocksize, metrics &_m) : iomgr(iomgr), filename_edata(_filename_edata), filename_adj(_filename_adj), range_st(_range_start), range_end(_range_end), blocksize(_blocksize), m(_m) { adjdata = NULL; only_adjacency = false; is_loaded = false; adj_session = -1; edgedata = NULL; doneptr = NULL; async_edata_loading = !svertex_t().computational_edges(); #ifdef SUPPORT_DELETIONS async_edata_loading = false; // See comment above for memshard, async_edata_loading = false; #endif } ~memory_shard() { int nblocks = (int) block_edatasessions.size(); for(int i=0; i < nblocks; i++) { if (edgedata[i] != NULL) { iomgr->managed_release(block_edatasessions[i], &edgedata[i]); iomgr->close_session(block_edatasessions[i]); } } if (adj_session >= 0) { if (adjdata != NULL) iomgr->managed_release(adj_session, &adjdata); iomgr->close_session(adj_session); } if (edgedata != NULL) free(edgedata); edgedata = NULL; if (doneptr != NULL) { free(doneptr); } } void commit(bool commit_inedges, bool commit_outedges) { if (block_edatasessions.size() == 0 || only_adjacency) return; assert(is_loaded); metrics_entry cm = m.start_time(); /** * This is an optimization that is relevant only if memory shard * has been used in a case where only out-edges are considered. * Out-edges are in a continuous "window", while in-edges are * scattered all over the shard */ int nblocks = (int) block_edatasessions.size(); if (commit_inedges) { int start_stream_block = (int) (range_start_edge_ptr / blocksize); for(int i=0; i < nblocks; i++) { /* Write asynchronously blocks that will not be needed by the sliding windows on this iteration. */ if (i >= start_stream_block) { iomgr->managed_pwritea_now(block_edatasessions[i], &edgedata[i], blocksizes[i], 0); iomgr->managed_release(block_edatasessions[i], &edgedata[i]); iomgr->close_session(block_edatasessions[i]); edgedata[i] = NULL; } else { iomgr->managed_pwritea_async(block_edatasessions[i], &edgedata[i], blocksizes[i], 0, true, true); edgedata[i] = NULL; } } } else if (commit_outedges) { size_t last = streaming_offset_edge_ptr; if (last == 0){ // rollback last = edatafilesize; } //char * bufp = ((char*)edgedata + range_start_edge_ptr); int startblock = (int) (range_start_edge_ptr / blocksize); int endblock = (int) (last / blocksize); for(int i=0; i < nblocks; i++) { if (i >= startblock && i <= endblock) { iomgr->managed_pwritea_now(block_edatasessions[i], &edgedata[i], blocksizes[i], 0); } iomgr->managed_release(block_edatasessions[i], &edgedata[i]); edgedata[i] = NULL; iomgr->close_session(block_edatasessions[i]); } } else { for(int i=0; i < nblocks; i++) { iomgr->close_session(block_edatasessions[i]); } } m.stop_time(cm, "memshard_commit"); iomgr->managed_release(adj_session, &adjdata); // FIXME: this is duplicated code from destructor for(int i=0; i < nblocks; i++) { if (edgedata[i] != NULL) { iomgr->managed_release(block_edatasessions[i], &edgedata[i]); } } block_edatasessions.clear(); is_loaded = false; } bool loaded() { return is_loaded; } private: void load_edata() { assert(blocksize % sizeof(ET) == 0); int nblocks = (int) (edatafilesize / blocksize + (edatafilesize % blocksize != 0)); edgedata = (char **) calloc(nblocks, sizeof(char*)); size_t compressedsize = 0; int blockid = 0; if (!async_edata_loading) { doneptr = (int *) malloc(nblocks * sizeof(int)); for(int i=0; i < nblocks; i++) doneptr[i] = 1; } while(true) { std::string block_filename = filename_shard_edata_block(filename_edata, blockid, blocksize); if (file_exists(block_filename)) { size_t fsize = std::min(edatafilesize - blocksize * blockid, blocksize); compressedsize += get_filesize(block_filename); int blocksession = iomgr->open_session(block_filename, false, true); // compressed block_edatasessions.push_back(blocksession); blocksizes.push_back(fsize); edgedata[blockid] = NULL; iomgr->managed_malloc(blocksession, &edgedata[blockid], fsize, 0); if (async_edata_loading) { iomgr->managed_preada_async(blocksession, &edgedata[blockid], fsize, 0); } else { iomgr->managed_preada_async(blocksession, &edgedata[blockid], fsize, 0, (volatile int *)&doneptr[blockid]); } blockid++; } else { if (blockid == 0) { logstream(LOG_ERROR) << "Shard block file did not exists:" << block_filename << std::endl; } if (blockid < nblocks) { logstream(LOG_ERROR) << "Did not find block " << block_filename << std::endl; logstream(LOG_ERROR) << "Going to exit..." << std::endl; } break; } } logstream(LOG_DEBUG) << "Compressed/full size: " << compressedsize * 1.0 / edatafilesize << " number of blocks: " << nblocks << std::endl; assert(blockid == nblocks); } public: // TODO: recycle ptr! void load() { is_loaded = true; adjfilesize = get_filesize(filename_adj); #ifdef SUPPORT_DELETIONS async_edata_loading = false; // Currently we encode the deleted status of an edge into the edge value (should be changed!), // so we need the edge data while loading #endif //preada(adjf, adjdata, adjfilesize, 0); adj_session = iomgr->open_session(filename_adj, true); iomgr->managed_malloc(adj_session, &adjdata, adjfilesize, 0); adj_stream_session = streaming_task(iomgr, adj_session, adjfilesize, (char**) &adjdata); iomgr->launch_stream_reader(&adj_stream_session); /* Initialize edge data asynchonous reading */ if (!only_adjacency) { edatafilesize = get_shard_edata_filesize<ET>(filename_edata); load_edata(); } } inline void check_stream_progress(int toread, size_t pos) { if (adj_stream_session.curpos == adjfilesize) return; while(adj_stream_session.curpos < toread+pos) { usleep(20000); if (adj_stream_session.curpos == adjfilesize) return; } } void load_vertices(vid_t window_st, vid_t window_en, std::vector<svertex_t> & prealloc, bool inedges=true, bool outedges=true) { /* Find file size */ m.start_time("memoryshard_create_edges"); assert(adjdata != NULL); // Now start creating vertices uint8_t * ptr = adjdata; uint8_t * end = ptr + adjfilesize; vid_t vid = 0; edgeptr = 0; streaming_offset = 0; streaming_offset_vid = 0; streaming_offset_edge_ptr = 0; range_start_offset = adjfilesize; range_start_edge_ptr = edatafilesize; bool setoffset = false; bool setrangeoffset = false; while (ptr < end) { check_stream_progress(6, ptr-adjdata); // read at least 6 bytes if (!setoffset && vid > range_end) { // This is where streaming should continue. Notice that because of the // non-zero counters, this might be a bit off. streaming_offset = ptr-adjdata; streaming_offset_vid = vid; streaming_offset_edge_ptr = edgeptr; setoffset = true; } if (!setrangeoffset && vid>=range_st) { range_start_offset = ptr-adjdata; range_start_edge_ptr = edgeptr; setrangeoffset = true; } uint8_t ns = *ptr; int n; ptr += sizeof(uint8_t); if (ns == 0x00) { // next value tells the number of vertices with zeros uint8_t nz = *ptr; ptr += sizeof(uint8_t); vid++; vid += nz; continue; } if (ns == 0xff) { // If 255 is not enough, then stores a 32-bit integer after. n = *((uint32_t*)ptr); ptr += sizeof(uint32_t); } else { n = ns; } svertex_t* vertex = NULL; if (vid>=window_st && vid <=window_en) { // TODO: Make more efficient vertex = &prealloc[vid-window_st]; if (!vertex->scheduled) vertex = NULL; } check_stream_progress(n * 4, ptr - adjdata); bool any_edges = false; while(--n>=0) { int blockid = (int) (edgeptr / blocksize); if (!async_edata_loading && !only_adjacency) { /* Wait until blocks loaded (non-asynchronous version) */ while(doneptr[edgeptr / blocksize] != 0) { usleep(10); } } vid_t target = *((vid_t*) ptr); ptr += sizeof(vid_t); if (vertex != NULL && outedges) { char * eptr = (only_adjacency ? NULL : &(edgedata[blockid][edgeptr % blocksize])); vertex->add_outedge(target, (only_adjacency ? NULL : (ET*) eptr), false); } if (target >= window_st) { if (target <= window_en) { /* In edge */ if (inedges) { svertex_t & dstvertex = prealloc[target - window_st]; if (dstvertex.scheduled) { any_edges = true; // assert(only_adjacency || edgeptr < edatafilesize); char * eptr = (only_adjacency ? NULL : &(edgedata[blockid][edgeptr % blocksize])); dstvertex.add_inedge(vid, (only_adjacency ? NULL : (ET*) eptr), false); dstvertex.parallel_safe = dstvertex.parallel_safe && (vertex == NULL); // Avoid if } } } else { // Note, we cannot skip if there can be "special edges". FIXME so dirty. // This vertex has no edges any more for this window, bail out if (vertex == NULL) { ptr += sizeof(vid_t) * n; edgeptr += (n + 1) * sizeof(ET); break; } } } edgeptr += sizeof(ET); } if (any_edges && vertex != NULL) { vertex->parallel_safe = false; } vid++; } m.stop_time("memoryshard_create_edges", false); } size_t offset_for_stream_cont() { return streaming_offset; } vid_t offset_vid_for_stream_cont() { return streaming_offset_vid; } size_t edata_ptr_for_stream_cont() { return streaming_offset_edge_ptr; } }; }; #endif #endif
09jijiangwen-download
src/shards/memoryshard.hpp
C++
asf20
16,526
/** * @file * @author Aapo Kyrola <akyrola@cs.cmu.edu> * @version 1.0 * * @section LICENSE * * Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * This header includes all the main headers needed for a GraphChi * program. */ #ifndef GRAPHCHI_DEF_ALLBASIC_INCLUDES #define GRAPHCHI_DEF_ALLBASIC_INCLUDES #include <omp.h> #include <sstream> #include "api/chifilenames.hpp" #include "api/graphchi_context.hpp" #include "api/graphchi_program.hpp" #include "api/graph_objects.hpp" #include "api/ischeduler.hpp" #include "api/vertex_aggregator.hpp" #include "engine/graphchi_engine.hpp" #include "logger/logger.hpp" #include "metrics/metrics.hpp" #include "metrics/reps/basic_reporter.hpp" #include "metrics/reps/file_reporter.hpp" #include "metrics/reps/html_reporter.hpp" #include "preprocessing/conversions.hpp" #include "util/cmdopts.hpp" namespace graphchi { /** * Helper for metrics. */ static VARIABLE_IS_NOT_USED void metrics_report(metrics &m); static VARIABLE_IS_NOT_USED void metrics_report(metrics &m) { std::string reporters = get_option_string("metrics.reporter", "console"); char * creps = (char*)reporters.c_str(); const char * delims = ","; char * t = strtok(creps, delims); while(t != NULL) { std::string repname(t); if (repname == "basic" || repname == "console") { basic_reporter rep; m.report(rep); } else if (repname == "file") { file_reporter rep(get_option_string("metrics.reporter.filename", "metrics.txt")); m.report(rep); } else if (repname == "html") { html_reporter rep(get_option_string("metrics.reporter.htmlfile", "metrics.html")); m.report(rep); } else { logstream(LOG_WARNING) << "Could not find metrics reporter with name [" << repname << "], ignoring." << std::endl; } t = strtok(NULL, delims); } } }; #endif
09jijiangwen-download
src/graphchi_basic_includes.hpp
C++
asf20
2,698
/** * @file * @author Aapo Kyrola <akyrola@cs.cmu.edu> * @version 1.0 * * @section LICENSE * * Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * Command line options. */ #ifndef GRAPHCHI_CMDOPTS_DEF #define GRAPHCHI_CMDOPTS_DEF #include <string> #include <iostream> #include <stdint.h> #include "api/chifilenames.hpp" #include "util/configfile.hpp" namespace graphchi { /** GNU COMPILER HACK TO PREVENT IT FOR COMPILING METHODS WHICH ARE NOT USED IN THE PARTICULAR APP BEING BUILT */ #ifdef __GNUC__ #define VARIABLE_IS_NOT_USED __attribute__ ((unused)) #else #define VARIABLE_IS_NOT_USED #endif static bool _cmd_configured = false; static int _argc; static char **_argv; static std::map<std::string, std::string> conf; static void VARIABLE_IS_NOT_USED set_conf(std::string key, std::string value) { conf[key] = value; } // Config file static std::string VARIABLE_IS_NOT_USED get_config_option_string(const char *option_name) { if (conf.find(option_name) != conf.end()) { return conf[option_name]; } else { std::cout << "ERROR: could not find option " << option_name << " from config."; assert(false); } } static std::string VARIABLE_IS_NOT_USED get_config_option_string(const char *option_name, std::string default_value) { if (conf.find(option_name) != conf.end()) { return conf[option_name]; } else { return default_value; } } static int VARIABLE_IS_NOT_USED get_config_option_int(const char *option_name, int default_value) { if (conf.find(option_name) != conf.end()) { return atoi(conf[option_name].c_str()); } else { return default_value; } } static int VARIABLE_IS_NOT_USED get_config_option_int(const char *option_name) { if (conf.find(option_name) != conf.end()) { return atoi(conf[option_name].c_str()); } else { std::cout << "ERROR: could not find option " << option_name << " from config."; assert(false); } } static uint64_t VARIABLE_IS_NOT_USED get_config_option_long(const char *option_name, uint64_t default_value) { if (conf.find(option_name) != conf.end()) { return atol(conf[option_name].c_str()); } else { return default_value; } } static double VARIABLE_IS_NOT_USED get_config_option_double(const char *option_name, double default_value) { if (conf.find(option_name) != conf.end()) { return atof(conf[option_name].c_str()); } else { return default_value; } } static void set_argc(int argc, const char ** argv); static void set_argc(int argc, const char ** argv) { _argc = argc; _argv = (char**)argv; _cmd_configured = true; conf = loadconfig(filename_config_local(), filename_config()); /* Load --key=value type arguments into the conf map */ std::string prefix = "--"; for (int i = 1; i < argc; i++) { std::string arg = std::string(_argv[i]); if (arg.substr(0, prefix.size()) == prefix) { arg = arg.substr(prefix.size()); size_t a = arg.find_first_of("=", 0); if (a != arg.npos) { std::string key = arg.substr(0, a); std::string val = arg.substr(a + 1); std::cout << "[" << key << "]" << " => " << "[" << val << "]" << std::endl; conf[key] = val; } } } } static void graphchi_init(int argc, const char ** argv); static void graphchi_init(int argc, const char ** argv) { set_argc(argc, argv); } static void check_cmd_init() { if (!_cmd_configured) { std::cout << "ERROR: command line options not initialized." << std::endl; std::cout << " You need to call set_argc() in the beginning of the program." << std::endl; } } static std::string VARIABLE_IS_NOT_USED get_option_string(const char *option_name, std::string default_value) { check_cmd_init(); int i; for (i = _argc - 2; i >= 0; i -= 1) if (strcmp(_argv[i], option_name) == 0) return std::string(_argv[i + 1]); return get_config_option_string(option_name, default_value); } static std::string VARIABLE_IS_NOT_USED get_option_string(const char *option_name) { int i; check_cmd_init(); for (i = _argc - 2; i >= 0; i -= 1) if (strcmp(_argv[i], option_name) == 0) return std::string(_argv[i + 1]); return get_config_option_string(option_name); } static std::string VARIABLE_IS_NOT_USED get_option_string_interactive(const char *option_name, std::string options) { int i; check_cmd_init(); for (i = _argc - 2; i >= 0; i -= 1) if (strcmp(_argv[i], option_name) == 0) return std::string(_argv[i + 1]); if (conf.find(option_name) != conf.end()) { return conf[option_name]; } std::cout << "Please enter value for command-line argument [" << std::string(option_name) << "]"<< std::endl; std::cout << " (Options are: " << options << ")" << std::endl; std::string val; std::cin >> val; return val; } static int VARIABLE_IS_NOT_USED get_option_int(const char *option_name, int default_value) { int i; check_cmd_init(); for (i = _argc - 2; i >= 0; i -= 1) if (strcmp(_argv[i], option_name) == 0) return atoi(_argv[i + 1]); return get_config_option_int(option_name, default_value); } static int VARIABLE_IS_NOT_USED get_option_int(const char *option_name) { int i; check_cmd_init(); for (i = _argc - 2; i >= 0; i -= 1) if (strcmp(_argv[i], option_name) == 0) return atoi(_argv[i + 1]); return get_config_option_int(option_name); } static uint64_t VARIABLE_IS_NOT_USED get_option_long(const char *option_name, uint64_t default_value) { int i; check_cmd_init(); for (i = _argc - 2; i >= 0; i -= 1) if (strcmp(_argv[i], option_name) == 0) return atol(_argv[i + 1]); return get_config_option_long(option_name, default_value); } static float VARIABLE_IS_NOT_USED get_option_float(const char *option_name, float default_value) { int i; check_cmd_init(); for (i = _argc - 2; i >= 0; i -= 1) if (strcmp(_argv[i], option_name) == 0) return (float)atof(_argv[i + 1]); return (float) get_config_option_double(option_name, default_value); } } // End namespace #endif
09jijiangwen-download
src/util/cmdopts.hpp
C++
asf20
7,963
/** * @file * @author Aapo Kyrola <akyrola@cs.cmu.edu> * @version 1.0 * * @section LICENSE * * Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * Tools for listing the TOP K values from a verte data file. */ #ifndef DEF_GRAPHCHI_TOPLIST #define DEF_GRAPHCHI_TOPLIST #include <vector> #include <algorithm> #include <errno.h> #include <assert.h> #include "io/stripedio.hpp" #include "logger/logger.hpp" #include "util/merge.hpp" #include "util/ioutil.hpp" #include "util/qsort.hpp" #include "api/chifilenames.hpp" #include "engine/auxdata/vertex_data.hpp" namespace graphchi { template <typename VertexDataType> struct vertex_value { vid_t vertex; VertexDataType value; vertex_value() {} vertex_value(vid_t v, VertexDataType x) : vertex(v), value(x) {} }; template <typename VertexDataType> bool vertex_value_greater(const vertex_value<VertexDataType> &a, const vertex_value<VertexDataType> &b) { return a.value > b.value; } /** * Reads the vertex data file and returns top N values. * Vertex value type must be given as a template parameter. * This method has been implemented in a manner to consume very little * memory, i.e the whole file is not loaded into memory (unless ntop = nvertices). * @param basefilename name of the graph * @param ntop number of top values to return (if ntop is smaller than the total number of vertices, returns all in sorted order) * @param from first vertex to include (default, 0) * @param to last vertex to include (default, all) * @return a vector of top ntop values */ template <typename VertexDataType> std::vector<vertex_value<VertexDataType> > get_top_vertices(std::string basefilename, int ntop, vid_t from=0, vid_t to=0) { typedef vertex_value<VertexDataType> vv_t; /* Initialize striped IO manager */ metrics m("toplist"); stripedio * iomgr = new stripedio(m); /* Initialize the vertex-data reader */ vid_t readwindow = 1024 * 1024; size_t numvertices = get_num_vertices(basefilename); vertex_data_store<VertexDataType> * vertexdata = new vertex_data_store<VertexDataType>(basefilename, numvertices, iomgr); if ((size_t)ntop > numvertices) { ntop = (int)numvertices; } /* Initialize buffer */ vv_t * buffer_idxs = (vv_t*) calloc(readwindow, sizeof(vv_t)); vv_t * topbuf = (vv_t*) calloc(ntop, sizeof(vv_t)); vv_t * mergearr = (vv_t*) calloc(ntop * 2, sizeof(vv_t)); /* Iterate the vertex values and maintain the top-list */ size_t idx = 0; vid_t st = 0; vid_t en = numvertices - 1; int count = 0; while(st <= numvertices - 1) { en = st + readwindow - 1; if (en >= numvertices - 1) en = numvertices - 1; /* Load the vertex values */ vertexdata->load(st, en); int nt = en - st + 1; int k = 0; VertexDataType minima = VertexDataType(); if (count > 0) { minima = topbuf[ntop - 1].value; // Minimum value that should be even considered } for(int j=0; j < nt; j++) { VertexDataType& val = *vertexdata->vertex_data_ptr(j + st); if (count == 0 || (val > minima)) { buffer_idxs[k] = vv_t((vid_t)idx + from, val); k++; } idx++; } nt = k; /* How many were actually included */ /* Sort buffer-idxs */ quickSort(buffer_idxs, nt, vertex_value_greater<VertexDataType>); /* Merge the top with the current top */ if (count == 0) { /* Nothing to merge, just copy */ memcpy(topbuf, buffer_idxs, ntop * sizeof(vv_t)); } else { // void merge(ET* S1, int l1, ET* S2, int l2, ET* R, F f) { merge<vv_t>(topbuf, ntop, buffer_idxs, std::min(ntop, nt), mergearr, vertex_value_greater<VertexDataType>); memcpy(topbuf, mergearr, ntop * sizeof(vv_t)); } count++; st += readwindow; } /* Return */ std::vector< vv_t > ret; for(int i=0; i < ntop; i++) { ret.push_back(topbuf[i]); } free(buffer_idxs); free(mergearr); free(topbuf); delete vertexdata; delete iomgr; return ret; } }; #endif
09jijiangwen-download
src/util/toplist.hpp
C++
asf20
5,370
// // readdeg.cpp // graphchi_xcode // // Created by Aapo Kyrola on 9/14/12. // Copyright 2012 __MyCompanyName__. All rights reserved. // #include <iostream> #include <fstream> struct degree { int indegree; int outdegree; }; int main(int argc, const char ** argv) { FILE * f = fopen(argv[1], "r"); int wanted = atoi(argv[2]); size_t nout = 0; size_t nin = 0; size_t nonz = 0; size_t tot = 0; degree d; int j = 0; while(!feof(f)) { fread(&d, sizeof(degree), 1, f); nout += d.outdegree; nin += d.indegree; if (wanted == j) { std::cout << wanted << " indeg: " << d.indegree << " outdeg: " << d.outdegree << std::endl; break; } j++; } std::cout << "Total in: " << nin << " total out: " << nout << std::endl; std::cout << "Non-singleton vertices: " << nonz << std::endl; std::cout << "Total vertices: " << tot << std::endl; }
09jijiangwen-download
src/util/readdeg.cpp
C++
asf20
976
#ifndef SYNCHRONIZED_QUEUE_HPP #define SYNCHRONIZED_QUEUE_HPP #include <queue> #include "pthread_tools.hpp" // From graphlab namespace graphchi { template <typename T> class synchronized_queue { public: synchronized_queue() { }; ~synchronized_queue() { }; void push(const T &item) { _queuelock.lock(); _queue.push(item); _queuelock.unlock(); } bool safepop(T * ret) { _queuelock.lock(); if (_queue.size() == 0) { _queuelock.unlock(); return false; } *ret = _queue.front(); _queue.pop(); _queuelock.unlock(); return true; } T pop() { _queuelock.lock(); T t = _queue.front(); _queue.pop(); _queuelock.unlock(); return t; } size_t size() const{ return _queue.size(); } private: std::queue<T> _queue; spinlock _queuelock; }; } #endif
09jijiangwen-download
src/util/synchronized_queue.hpp
C++
asf20
1,328
/** * @file * @author Aapo Kyrola <akyrola@cs.cmu.edu> * @version 1.0 * * @section LICENSE * * Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * I/O Utils. */ #ifndef DEF_IOUTIL_HPP #define DEF_IOUTIL_HPP #include <unistd.h> #include <assert.h> #include <stdlib.h> #include <errno.h> #include <zlib.h> // Reads given number of bytes to a buffer template <typename T> void preada(int f, T * tbuf, size_t nbytes, size_t off) { size_t nread = 0; char * buf = (char*)tbuf; while(nread<nbytes) { ssize_t a = pread(f, buf, nbytes - nread, off + nread); if (a == (-1)) { std::cout << "Error, could not read: " << strerror(errno) << "; file-desc: " << f << std::endl; std::cout << "Pread arguments: " << f << " tbuf: " << tbuf << " nbytes: " << nbytes << " off: " << off << std::endl; assert(a != (-1)); } assert(a>0); buf += a; nread += a; } assert(nread <= nbytes); } template <typename T> void preada_trunc(int f, T * tbuf, size_t nbytes, size_t off) { size_t nread = 0; char * buf = (char*)tbuf; while(nread<nbytes) { size_t a = pread(f, buf, nbytes-nread, off+nread); if (a == 0) { // set rest to 0 // std::cout << "WARNING: file was not long enough - filled with zeros. " << std::endl; memset(buf, 0, nbytes-nread); return; } buf += a; nread += a; } } template <typename T> size_t readfull(int f, T ** buf) { off_t sz = lseek(f, 0, SEEK_END); lseek(f, 0, SEEK_SET); *buf = (char*)malloc(sz); preada(f, *buf, sz, 0); return sz; } template <typename T> void pwritea(int f, T * tbuf, size_t nbytes, size_t off) { size_t nwritten = 0; assert(f>0); char * buf = (char*)tbuf; while(nwritten<nbytes) { size_t a = pwrite(f, buf, nbytes-nwritten, off+nwritten); if (a == size_t(-1)) { logstream(LOG_ERROR) << "f:" << f << " nbytes: " << nbytes << " written: " << nwritten << " off:" << off << " f: " << f << " error:" << strerror(errno) << std::endl; assert(false); } assert(a>0); buf += a; nwritten += a; } } template <typename T> void checkarray_filesize(std::string fname, size_t nelements) { // Check the vertex file is correct size int f = open(fname.c_str(), O_RDWR | O_CREAT, S_IROTH | S_IWOTH | S_IWUSR | S_IRUSR); if (f < 1) { logstream(LOG_ERROR) << "Error initializing the data-file: " << fname << " error:" << strerror(errno) << std::endl; } assert(f>0); int err = ftruncate(f, nelements * sizeof(T)); if (err != 0) { logstream(LOG_ERROR) << "Error in adjusting file size: " << fname << " to size: " << nelements * sizeof(T) << " error:" << strerror(errno) << std::endl; } assert(err == 0); close(f); } template <typename T> void writea(int f, T * tbuf, size_t nbytes) { size_t nwritten = 0; char * buf = (char*)tbuf; while(nwritten<nbytes) { size_t a = write(f, buf, nbytes-nwritten); assert(a>0); if (a == size_t(-1)) { logstream(LOG_ERROR) << "Could not write " << (nbytes-nwritten) << " bytes!" << " error:" << strerror(errno) << std::endl; assert(false); } buf += a; nwritten += a; } } /* * COMPRESSED */ template <typename T> size_t write_compressed(int f, T * tbuf, size_t nbytes) { #ifndef GRAPHCHI_DISABLE_COMPRESSION unsigned char * buf = (unsigned char*)tbuf; int ret; unsigned have; z_stream strm; int CHUNK = (int) std::max((size_t)4096 * 1024, nbytes); unsigned char * out = (unsigned char *) malloc(CHUNK); lseek(f, 0, SEEK_SET); /* allocate deflate state */ strm.zalloc = Z_NULL; strm.zfree = Z_NULL; strm.opaque = Z_NULL; ret = deflateInit(&strm, Z_BEST_SPEED); if (ret != Z_OK) assert(false); /* compress until end of file */ strm.avail_in = (int) nbytes; strm.next_in = buf; int trerr = ftruncate(f, 0); assert (trerr == 0); size_t totwritten = 0; /* run deflate() on input until output buffer not full, finish compression if all of source has been read in */ do { strm.avail_out = CHUNK; strm.next_out = out; ret = deflate(&strm, Z_FINISH); /* no bad return value */ assert(ret != Z_STREAM_ERROR); /* state not clobbered */ have = CHUNK - strm.avail_out; if (write(f, out, have) != have) { (void)deflateEnd(&strm); assert(false); } totwritten += have; } while (strm.avail_out == 0); assert(strm.avail_in == 0); /* all input will be used */ assert(ret == Z_STREAM_END); /* stream will be complete */ /* clean up and return */ (void)deflateEnd(&strm); free(out); return totwritten; #else writea(f, tbuf, nbytes); return nbytes; #endif } /* Zlib-inflated read. Assume tbuf is correctly sized memory block. */ template <typename T> void read_compressed(int f, T * tbuf, size_t nbytes) { #ifndef GRAPHCHI_DISABLE_COMPRESSION unsigned char * buf = (unsigned char*)tbuf; int ret; unsigned have; z_stream strm; int CHUNK = (int) std::max((size_t)4096 * 1024, nbytes); size_t fsize = lseek(f, 0, SEEK_END); unsigned char * in = (unsigned char *) malloc(fsize); lseek(f, 0, SEEK_SET); /* allocate inflate state */ strm.zalloc = Z_NULL; strm.zfree = Z_NULL; strm.opaque = Z_NULL; strm.avail_in = 0; strm.next_in = Z_NULL; ret = inflateInit(&strm); if (ret != Z_OK) assert(false); /* decompress until deflate stream ends or end of file */ do { ssize_t a = 0; do { a = read(f, in + strm.avail_in, fsize - strm.avail_in); //fread(in, 1, CHUNK, source); strm.avail_in += (int) a; assert(a != (ssize_t)(-1)); } while (a > 0); if (strm.avail_in == 0) break; strm.next_in = in; /* run inflate() on input until output buffer not full */ do { strm.avail_out = CHUNK; strm.next_out = buf; ret = inflate(&strm, Z_NO_FLUSH); assert(ret != Z_STREAM_ERROR); /* state not clobbered */ switch (ret) { case Z_NEED_DICT: ret = Z_DATA_ERROR; /* and fall through */ case Z_DATA_ERROR: case Z_MEM_ERROR: assert(false); } have = CHUNK - strm.avail_out; buf += have; } while (strm.avail_out == 0); /* done when inflate() says it's done */ } while (ret != Z_STREAM_END); // std::cout << "Read: " << (buf - (unsigned char*)tbuf) << std::endl; /* clean up and return */ (void)inflateEnd(&strm); free(in); #else preada(f, tbuf, nbytes, 0); #endif } #endif
09jijiangwen-download
src/util/ioutil.hpp
C++
asf20
7,743
#ifndef ATOMIC_HPP #define ATOMIC_HPP // Note, stolen from GraphLab. namespace graphchi { /** * \brief atomic object toolkit * * A templated class for creating atomic numbers. */ template<typename T> class atomic{ public: volatile T value; atomic(const T& value = 0) : value(value) { } T inc() { return __sync_add_and_fetch(&value, 1); } T dec() { return __sync_sub_and_fetch(&value, 1); } //! Lvalue implicit cast operator T() const { return value; } //! Performs an atomic increment by 1, returning the new value T operator++() { return inc(); } //! Performs an atomic decrement by 1, returning the new value T operator--() { return dec(); } //! Performs an atomic increment by 'val', returning the new value T inc(const T val) { return __sync_add_and_fetch(&value, val); } //! Performs an atomic decrement by 'val', returning the new value T dec(const T val) { return __sync_sub_and_fetch(&value, val); } //! Performs an atomic increment by 'val', returning the new value T operator+=(const T val) { return inc(val); } //! Performs an atomic decrement by 'val', returning the new value T operator-=(const T val) { return dec(val); } //! Performs an atomic increment by 1, returning the old value T inc_ret_last() { return __sync_fetch_and_add(&value, 1); } //! Performs an atomic decrement by 1, returning the old value T dec_ret_last() { return __sync_fetch_and_sub(&value, 1); } //! Performs an atomic increment by 1, returning the old value T operator++(int) { return inc_ret_last(); } //! Performs an atomic decrement by 1, returning the old value T operator--(int) { return dec_ret_last(); } //! Performs an atomic increment by 'val', returning the old value T inc_ret_last(const T val) { return __sync_fetch_and_add(&value, val); } //! Performs an atomic decrement by 'val', returning the new value T dec_ret_last(const T val) { return __sync_fetch_and_sub(&value, val); } //! Performs an atomic exchange with 'val', returning the previous value T exchange(const T val) { return __sync_lock_test_and_set(&value, val); } }; /** atomic instruction that is equivalent to the following:: if a==oldval, then { \ a = newval; \ return true; \ } return false; */ template<typename T> bool atomic_compare_and_swap(T& a, const T &oldval, const T &newval) { return __sync_bool_compare_and_swap(&a, oldval, newval); }; template <> inline bool atomic_compare_and_swap(double& a, const double &oldval, const double &newval) { return __sync_bool_compare_and_swap(reinterpret_cast<uint64_t*>(&a), *reinterpret_cast<const uint64_t*>(&oldval), *reinterpret_cast<const uint64_t*>(&newval)); }; template <> inline bool atomic_compare_and_swap(float& a, const float &oldval, const float &newval) { return __sync_bool_compare_and_swap(reinterpret_cast<uint32_t*>(&a), *reinterpret_cast<const uint32_t*>(&oldval), *reinterpret_cast<const uint32_t*>(&newval)); }; template<typename T> void atomic_exchange(T& a, T& b) { b =__sync_lock_test_and_set(&a, b); }; } #endif
09jijiangwen-download
src/util/atomic.hpp
C++
asf20
3,764
/** * @file * @author Aapo Kyrola <akyrola@cs.cmu.edu> * @version 1.0 * * @section LICENSE * * Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * Analyses output of label propagation algorithms such as connected components * and community detection. Memory efficient implementation. * * @author Aapo Kyrola */ #include <vector> #include <algorithm> #include <errno.h> #include <assert.h> #include "io/stripedio.hpp" #include "logger/logger.hpp" #include "util/merge.hpp" #include "util/ioutil.hpp" #include "util/qsort.hpp" #include "api/chifilenames.hpp" #include "engine/auxdata/vertex_data.hpp" #ifndef DEF_GRAPHCHI_LABELANALYSIS #define DEF_GRAPHCHI_LABELANALYSIS using namespace graphchi; template <typename LabelType> struct labelcount_tt { LabelType label; unsigned int count; // Count excludes the vertex which has its own id as the label. (Important optimization) labelcount_tt(LabelType l, int c) : label(l), count(c) {} labelcount_tt() {} }; template <typename LabelType> bool label_count_greater(const labelcount_tt<LabelType> &a, const labelcount_tt<LabelType> &b) { return a.count > b.count; } template <typename LabelType> void analyze_labels(std::string basefilename, int printtop = 20) { typedef labelcount_tt<LabelType> labelcount_t; /** * NOTE: this implementation is quite a mouthful. Cleaner implementation * could be done by using a map implementation. But STL map takes too much * memory, and I want to avoid Boost dependency - which would have boost::unordered_map. */ metrics m("labelanalysis"); stripedio * iomgr = new stripedio(m); /* Initialize the vertex-data reader */ vid_t readwindow = 1024 * 1024; vid_t numvertices = (vid_t) get_num_vertices(basefilename); vertex_data_store<LabelType> * vertexdata = new vertex_data_store<LabelType>(basefilename, numvertices, iomgr); std::vector<labelcount_t> curlabels; bool first = true; vid_t curvid = 0; LabelType * buffer = (LabelType*) calloc(readwindow, sizeof(LabelType)); /* Iterate the vertex values and maintain the top-list */ vid_t st = 0; vid_t en = numvertices - 1; while(st <= numvertices - 1) { en = st + readwindow - 1; if (en >= numvertices - 1) en = numvertices - 1; /* Load the vertex values */ vertexdata->load(st, en); int nt = en - st + 1; /* Mark vertices with its own label with 0xffffffff so they will be ignored */ for(int i=0; i < nt; i++) { LabelType l = *vertexdata->vertex_data_ptr(i + st); if (l == curvid) buffer[i] = 0xffffffff; else buffer[i] = l; curvid++; } /* First sort the buffer */ quickSort(buffer, nt, std::less<LabelType>()); /* Then collect */ std::vector<labelcount_t> newlabels; newlabels.reserve(nt); vid_t lastlabel = 0xffffffff; for(int i=0; i < nt; i++) { if (buffer[i] != 0xffffffff) { if (buffer[i] != lastlabel) { newlabels.push_back(labelcount_t(buffer[i], 1)); } else { newlabels[newlabels.size() - 1].count ++; } lastlabel = buffer[i]; } } if (first) { for(int i=0; i < (int)newlabels.size(); i++) { curlabels.push_back(newlabels[i]); } } else { /* Merge current and new label counts */ int cl = 0; int nl = 0; std::vector< labelcount_t > merged; merged.reserve(curlabels.size() + newlabels.size()); while(cl < (int)curlabels.size() && nl < (int)newlabels.size()) { if (newlabels[nl].label == curlabels[cl].label) { merged.push_back(labelcount_t(newlabels[nl].label, newlabels[nl].count + curlabels[cl].count)); nl++; cl++; } else { if (newlabels[nl].label < curlabels[cl].label) { merged.push_back(newlabels[nl]); nl++; } else { merged.push_back(curlabels[cl]); cl++; } } } while(cl < (int)curlabels.size()) merged.push_back(curlabels[cl++]); while(nl < (int)newlabels.size()) merged.push_back(newlabels[nl++]); curlabels = merged; } first = false; st += readwindow; } /* Sort */ std::sort(curlabels.begin(), curlabels.end(), label_count_greater<LabelType>); /* Write output file */ std::string outname = basefilename + "_components.txt"; FILE * resf = fopen(outname.c_str(), "w"); if (resf == NULL) { logstream(LOG_ERROR) << "Could not write label outputfile : " << outname << std::endl; return; } for(int i=0; i < (int) curlabels.size(); i++) { fprintf(resf, "%u,%u\n", curlabels[i].label, curlabels[i].count + 1); } fclose(resf); std::cout << "Total number of different labels (components/communities): " << curlabels.size() << std::endl; std::cout << "List of labels was written to file: " << outname << std::endl; for(int i=0; i < (int)std::min((size_t)printtop, curlabels.size()); i++) { std::cout << (i+1) << ". label: " << curlabels[i].label << ", size: " << curlabels[i].count << std::endl; } free(buffer); delete vertexdata; delete iomgr; } #endif
09jijiangwen-download
src/util/labelanalysis.hpp
C++
asf20
6,348
// This code is part of the Problem Based Benchmark Suite (PBBS) // Copyright (c) 2010 Guy Blelloch and Harsha Vardhan Simhadri and the PBBS team // // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files (the // "Software"), to deal in the Software without restriction, including // without limitation the rights (to use, copy, modify, merge, publish, // distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so, subject to // the following conditions: // // The above copyright notice and this permission notice shall be included // in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND // NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE // LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION // OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION // WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #ifndef GRAPHCHI_QSORT_INCLUDED #define GRAPHCHI_QSORT_INCLUDED #include <algorithm> #include <vector> template <class E, class BinPred> void insertionSort(E* A, int n, BinPred f) { for (int i=0; i < n; i++) { E v = A[i]; E* B = A + i; while (--B >= A && f(v,*B)) *(B+1) = *B; *(B+1) = v; } } #define ISORT 25 template <class E, class BinPred> E median(E a, E b, E c, BinPred f) { return f(a,b) ? (f(b,c) ? b : (f(a,c) ? c : a)) : (f(a,c) ? a : (f(b,c) ? c : b)); } // Partly copied from PBBS template <class E, class BinPred> void quickSort(E* A, int n, BinPred f) { if (n < ISORT) insertionSort(A, n, f); else { E p = A[rand() % n]; // Random pivot E* L = A; // below L are less than pivot E* M = A; // between L and M are equal to pivot E* R = A+n-1; // above R are greater than pivot while (1) { while (!f(p,*M)) { if (f(*M,p)) std::swap(*M,*(L++)); if (M >= R) break; M++; } while (f(p,*R)) R--; if (M >= R) break; std::swap(*M,*R--); if (f(*M,p)) std::swap(*M,*(L++)); M++; } quickSort(A, (int) (L-A), f); quickSort(M, (int) (A+n-M), f); // Exclude all elts that equal pivot } } #endif
09jijiangwen-download
src/util/qsort.hpp
C++
asf20
2,598
// This code is part of the Problem Based Benchmark Suite (PBBS) // Copyright (c) 2010 Guy Blelloch and Harsha Vardhan Simhadri and the PBBS team // // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files (the // "Software"), to deal in the Software without restriction, including // without limitation the rights (to use, copy, modify, merge, publish, // distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so, subject to // the following conditions: // // The above copyright notice and this permission notice shall be included // in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND // NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE // LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION // OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION // WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #ifndef DEF_MERGE #define DEF_MERGE template <class ET, class F> void merge(ET* S1, int l1, ET* S2, int l2, ET* R, F f) { ET* pR = R; ET* pS1 = S1; ET* pS2 = S2; ET* eS1 = S1+l1; ET* eS2 = S2+l2; while (true) { *pR++ = f(*pS2,*pS1) ? *pS2++ : *pS1++; if (pS1==eS1) {std::copy(pS2,eS2,pR); break;} if (pS2==eS2) {std::copy(pS1,eS1,pR); break;} } } #endif
09jijiangwen-download
src/util/merge.hpp
C++
asf20
1,642
#ifndef DEF_PTHREAD_TOOLS_HPP #define DEF_PTHREAD_TOOLS_HPP // Stolen from GraphLab #include <cstdlib> #include <memory.h> #include <pthread.h> #include <semaphore.h> #include <sched.h> #include <signal.h> #include <sys/time.h> #include <vector> #include <cassert> #include <list> #include <iostream> #undef _POSIX_SPIN_LOCKS #define _POSIX_SPIN_LOCKS -1 /** * \file pthread_tools.hpp A collection of utilities for threading */ namespace graphchi { /** * \class mutex * * Wrapper around pthread's mutex On single core systems mutex * should be used. On multicore systems, spinlock should be used. */ class mutex { private: // mutable not actually needed mutable pthread_mutex_t m_mut; public: mutex() { int error = pthread_mutex_init(&m_mut, NULL); assert(!error); } inline void lock() const { int error = pthread_mutex_lock( &m_mut ); assert(!error); } inline void unlock() const { int error = pthread_mutex_unlock( &m_mut ); assert(!error); } inline bool try_lock() const { return pthread_mutex_trylock( &m_mut ) == 0; } ~mutex(){ int error = pthread_mutex_destroy( &m_mut ); if (error) perror("Error: failed to destroy mutex"); assert(!error); } friend class conditional; }; // End of Mutex #if _POSIX_SPIN_LOCKS >= 0 // We should change this to use a test for posix_spin_locks eventually // #ifdef __linux__ /** * \class spinlock * * Wrapper around pthread's spinlock On single core systems mutex * should be used. On multicore systems, spinlock should be used. * If pthread_spinlock is not available, the spinlock will be * typedefed to a mutex */ class spinlock { private: // mutable not actually needed mutable pthread_spinlock_t m_spin; public: spinlock () { int error = pthread_spin_init(&m_spin, PTHREAD_PROCESS_PRIVATE); assert(!error); } inline void lock() const { int error = pthread_spin_lock( &m_spin ); assert(!error); } inline void unlock() const { int error = pthread_spin_unlock( &m_spin ); assert(!error); } inline bool try_lock() const { return pthread_spin_trylock( &m_spin ) == 0; } ~spinlock(){ int error = pthread_spin_destroy( &m_spin ); assert(!error); } friend class conditional; }; // End of spinlock #define SPINLOCK_SUPPORTED 1 #else //! if spinlock not supported, it is typedef it to a mutex. typedef mutex spinlock; #define SPINLOCK_SUPPORTED 0 #endif /** * \class conditional * Wrapper around pthread's condition variable */ class conditional { private: mutable pthread_cond_t m_cond; public: conditional() { int error = pthread_cond_init(&m_cond, NULL); assert(!error); } inline void wait(const mutex& mut) const { int error = pthread_cond_wait(&m_cond, &mut.m_mut); assert(!error); } inline int timedwait(const mutex& mut, int sec) const { struct timespec timeout; struct timeval tv; struct timezone tz; gettimeofday(&tv, &tz); timeout.tv_nsec = 0; timeout.tv_sec = tv.tv_sec + sec; return pthread_cond_timedwait(&m_cond, &mut.m_mut, &timeout); } inline void signal() const { int error = pthread_cond_signal(&m_cond); assert(!error); } inline void broadcast() const { int error = pthread_cond_broadcast(&m_cond); assert(!error); } ~conditional() { int error = pthread_cond_destroy(&m_cond); assert(!error); } }; // End conditional /** * \class semaphore * Wrapper around pthread's semaphore */ class semaphore { private: mutable sem_t m_sem; public: semaphore() { int error = sem_init(&m_sem, 0,0); assert(!error); } inline void post() const { int error = sem_post(&m_sem); assert(!error); } inline void wait() const { int error = sem_wait(&m_sem); assert(!error); } ~semaphore() { int error = sem_destroy(&m_sem); assert(!error); } }; // End semaphore #define atomic_xadd(P, V) __sync_fetch_and_add((P), (V)) #define cmpxchg(P, O, N) __sync_val_compare_and_swap((P), (O), (N)) #define atomic_inc(P) __sync_add_and_fetch((P), 1) /** * \class spinrwlock * rwlock built around "spinning" * source adapted from http://locklessinc.com/articles/locks/ * "Scalable Reader-Writer Synchronization for Shared-Memory Multiprocessors" * John Mellor-Crummey and Michael Scott */ class spinrwlock { union rwticket { unsigned u; unsigned short us; __extension__ struct { unsigned char write; unsigned char read; unsigned char users; } s; }; mutable bool writing; mutable volatile rwticket l; public: spinrwlock() { memset(const_cast<rwticket*>(&l), 0, sizeof(rwticket)); } inline void writelock() const { unsigned me = atomic_xadd(&l.u, (1<<16)); unsigned char val = me >> 16; while (val != l.s.write) sched_yield(); writing = true; } inline void wrunlock() const{ rwticket t = *const_cast<rwticket*>(&l); t.s.write++; t.s.read++; *(volatile unsigned short *) (&l) = t.us; writing = false; __asm("mfence"); } inline void readlock() const { unsigned me = atomic_xadd(&l.u, (1<<16)); unsigned char val = me >> 16; while (val != l.s.read) sched_yield(); l.s.read++; } inline void rdunlock() const { atomic_inc(&l.s.write); } inline void unlock() const { if (!writing) rdunlock(); else wrunlock(); } }; #undef atomic_xadd #undef cmpxchg #undef atomic_inc /** * \class rwlock * Wrapper around pthread's rwlock */ class rwlock { private: mutable pthread_rwlock_t m_rwlock; public: rwlock() { int error = pthread_rwlock_init(&m_rwlock, NULL); assert(!error); } ~rwlock() { int error = pthread_rwlock_destroy(&m_rwlock); assert(!error); } inline void readlock() const { pthread_rwlock_rdlock(&m_rwlock); //assert(!error); } inline void writelock() const { pthread_rwlock_wrlock(&m_rwlock); //assert(!error); } inline void unlock() const { pthread_rwlock_unlock(&m_rwlock); //assert(!error); } inline void rdunlock() const { unlock(); } inline void wrunlock() const { unlock(); } }; // End rwlock /** * \class barrier * Wrapper around pthread's barrier */ #ifdef __linux__ /** * \class barrier * Wrapper around pthread's barrier */ class barrier { private: mutable pthread_barrier_t m_barrier; public: barrier(size_t numthreads) { pthread_barrier_init(&m_barrier, NULL, numthreads); } ~barrier() { pthread_barrier_destroy(&m_barrier); } inline void wait() const { pthread_barrier_wait(&m_barrier); } }; #else /** * \class barrier * Wrapper around pthread's barrier */ class barrier { private: mutex m; int needed; int called; conditional c; // we need the following to protect against spurious wakeups std::vector<unsigned char> waiting; public: barrier(size_t numthreads) { needed = (int)numthreads; called = 0; waiting.resize(numthreads); std::fill(waiting.begin(), waiting.end(), 0); } ~barrier() {} inline void wait() { m.lock(); // set waiting; size_t myid = called; waiting[myid] = 1; called++; if (called == needed) { // if I have reached the required limit, wait up. Set waiting // to 0 to make sure everyone wakes up called = 0; // clear all waiting std::fill(waiting.begin(), waiting.end(), 0); c.broadcast(); } else { // while no one has broadcasted, sleep while(waiting[myid]) c.wait(m); } m.unlock(); } }; #endif inline void prefetch_range(void *addr, size_t len) { char *cp; char *end = (char*)(addr) + len; for (cp = (char*)(addr); cp < end; cp += 64) __builtin_prefetch(cp, 0); } inline void prefetch_range_write(void *addr, size_t len) { char *cp; char *end = (char*)(addr) + len; for (cp = (char*)(addr); cp < end; cp += 64) __builtin_prefetch(cp, 1); } }; #endif
09jijiangwen-download
src/util/pthread_tools.hpp
C++
asf20
10,003
// NOTE, copied from GraphLab v 0.5 #ifndef DENSE_BITSET_HPP #define DENSE_BITSET_HPP #include <cstdio> #include <cstdlib> #include <stdint.h> namespace graphchi { class dense_bitset { public: dense_bitset() : array(NULL), len(0) { generate_bit_masks(); } dense_bitset(size_t size) : array(NULL), len(size) { resize(size); clear(); generate_bit_masks(); } virtual ~dense_bitset() {free(array);} void resize(size_t n) { len = n; //need len bits arrlen = n / (8*sizeof(size_t)) + 1; array = (size_t*)realloc(array, sizeof(size_t) * arrlen); } void clear() { for (size_t i = 0;i < arrlen; ++i) array[i] = 0; } void setall() { memset(array, 0xff, arrlen * sizeof(size_t)); } inline bool get(uint32_t b) const{ uint32_t arrpos, bitpos; bit_to_pos(b, arrpos, bitpos); return array[arrpos] & (size_t(1) << size_t(bitpos)); } //! Set the bit returning the old value inline bool set_bit(uint32_t b) { // use CAS to set the bit uint32_t arrpos, bitpos; bit_to_pos(b, arrpos, bitpos); const size_t mask(size_t(1) << size_t(bitpos)); return __sync_fetch_and_or(array + arrpos, mask) & mask; } //! Set the state of the bit returning the old value inline bool set(uint32_t b, bool value) { if (value) return set_bit(b); else return clear_bit(b); } //! Clear the bit returning the old value inline bool clear_bit(uint32_t b) { // use CAS to set the bit uint32_t arrpos, bitpos; bit_to_pos(b, arrpos, bitpos); const size_t test_mask(size_t(1) << size_t(bitpos)); const size_t clear_mask(~test_mask); return __sync_fetch_and_and(array + arrpos, clear_mask) & test_mask; } inline void clear_bits(uint32_t fromb, uint32_t tob) { // tob is inclusive // Careful with alignment const size_t bitsperword = sizeof(size_t)*8; while((fromb%bitsperword != 0)) { clear_bit(fromb); if (fromb>=tob) return; fromb++; } while((tob%bitsperword != 0)) { clear_bit(tob); if(tob<=fromb) return; tob--; } clear_bit(tob); uint32_t from_arrpos = fromb / (8 * (int) sizeof(size_t)); uint32_t to_arrpos = tob / (8 * (int) sizeof(size_t)); memset(&array[from_arrpos], 0, (to_arrpos-from_arrpos) * (int) sizeof(size_t)); } inline size_t size() const { return len; } private: inline static void bit_to_pos(uint32_t b, uint32_t &arrpos, uint32_t &bitpos) { // the compiler better optimize this... arrpos = b / (8 * (int)sizeof(size_t)); bitpos = b & (8 * (int)sizeof(size_t) - 1); } void generate_bit_masks() { below_selectedbit[0] = size_t(-2); for (size_t i = 0;i < 8 * sizeof(size_t) ; ++i) { selectbit[i] = size_t(1) << i; notselectbit[i] = ~selectbit[i]; if (i > 0) below_selectedbit[i] = below_selectedbit[i-1] - selectbit[i]; } } // returns 0 on failure inline size_t next_bit_in_block(const uint32_t &b, const size_t &block) { // use CAS to set the bit size_t x = block & below_selectedbit[b] ; if (x == 0) return 0; else return __builtin_ctzl(x); } // returns 0 on failure inline size_t first_bit_in_block(const size_t &block) { // use CAS to set the bit if (block == 0) return 0; else return __builtin_ctzl(block); } size_t* array; size_t len; size_t arrlen; // selectbit[i] has a bit in the i'th position size_t selectbit[8 * sizeof(size_t)]; size_t notselectbit[8 * sizeof(size_t)]; size_t below_selectedbit[8 * sizeof(size_t)]; }; } #endif
09jijiangwen-download
src/util/dense_bitset.hpp
C++
asf20
4,539
/** * @file * @author Aapo Kyrola <akyrola@cs.cmu.edu> * @version 1.0 * * @section LICENSE * * Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * Parses a simple configuration file. * Why did I write my own? */ #ifndef GRAPHCHI_CONFIGFILE_DEF #define GRAPHCHI_CONFIGFILE_DEF #include <iostream> #include <cstdio> #include <string> #include <map> #include <assert.h> namespace graphchi { // Code for trimming strings copied from + modified // http://stackoverflow.com/questions/479080/trim-is-not-part-of-the-standard-c-c-library const std::string whiteSpaces( " \f\n\r\t\v" ); static void trimRight( std::string &str, const std::string& trimChars ) { std::string::size_type pos = str.find_last_not_of( trimChars ); str.erase( pos + 1 ); } static void trimLeft( std::string &str, const std::string& trimChars ) { std::string::size_type pos = str.find_first_not_of( trimChars ); str.erase( 0, pos ); } static std::string trim( std::string str) { std::string trimChars = " \f\n\r\t\v"; trimRight( str, trimChars ); trimLeft( str, trimChars ); return str; } // Removes \n from the end of line static void _FIXLINE(char * s) { int len = (int)strlen(s)-1; if(s[len] == '\n') s[len] = 0; } /** * Returns a key-value map of a configuration file key-values. * If file is not found, fails with an assertion. * @param filename filename of the configuration file * @param secondary_filename secondary filename if the first version is not found. */ static std::map<std::string, std::string> loadconfig(std::string filename, std::string secondary_filename) { FILE * f = fopen(filename.c_str(), "r"); if (f == NULL) { f = fopen(secondary_filename.c_str(), "r"); if (f == NULL) { std::cout << "ERROR: Could not read configuration file: " << filename << std::endl; std::cout << "Please define environment variable GRAPHCHI_ROOT or run the program from that directory." << std::endl; } assert(f != NULL); } char s[4096]; std::map<std::string, std::string> conf; // I like C parsing more than C++, that is why this is such a mess while(fgets(s, 4096, f) != NULL) { _FIXLINE(s); if (s[0] == '#') continue; // Comment if (s[0] == '%') continue; // Comment char delims[] = "="; char * t; t = strtok(s, delims); const char * ckey = t; t = strtok(NULL, delims); const char * cval = t; if (ckey != NULL && cval != NULL) { std::string key = trim(std::string(ckey)); std::string val = trim(std::string(cval)); conf[key] = val; } } return conf; } }; #endif
09jijiangwen-download
src/util/configfile.hpp
C++
asf20
3,763
/** * @file * @author Aapo Kyrola <akyrola@cs.cmu.edu> * @version 1.0 * * @section LICENSE * * Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * I/O manager. */ #ifndef DEF_STRIPEDIO_HPP #define DEF_STRIPEDIO_HPP #include <iostream> #include <fcntl.h> #include <unistd.h> #include <assert.h> #include <stdint.h> #include <pthread.h> #include <errno.h> //#include <omp.h> #include <vector> #include "logger/logger.hpp" #include "metrics/metrics.hpp" #include "util/synchronized_queue.hpp" #include "util/ioutil.hpp" #include "util/cmdopts.hpp" namespace graphchi { static size_t get_filesize(std::string filename); struct pinned_file; /** * Defines a striped file access. */ struct io_descriptor { std::string filename; std::vector<int> readdescs; std::vector<int> writedescs; pinned_file * pinned_to_memory; int start_mplex; bool open; bool compressed; }; enum BLOCK_ACTION { READ, WRITE }; // Very simple ref count system struct refcountptr { char * ptr; volatile int count; refcountptr(char * ptr, int count) : ptr(ptr), count(count) {} }; // Forward declaration class stripedio; struct iotask { BLOCK_ACTION action; int fd; int session; refcountptr * ptr; size_t length; size_t offset; size_t ptroffset; bool free_after; stripedio * iomgr; bool compressed; bool closefd; volatile int * doneptr; iotask() : action(READ), fd(0), session(0), ptr(NULL), length(0), offset(0), ptroffset(0), free_after(false), iomgr(NULL), compressed(false), closefd(false), doneptr(NULL) {} iotask(stripedio * iomgr, BLOCK_ACTION act, int fd, int session, refcountptr * ptr, size_t length, size_t offset, size_t ptroffset, bool free_after, bool compressed, bool closefd=false) : action(act), fd(fd), session(session), ptr(ptr),length(length), offset(offset), ptroffset(ptroffset), free_after(free_after), iomgr(iomgr),compressed(compressed), closefd(closefd) { if (closefd) assert(free_after); doneptr = NULL; } }; struct thrinfo { synchronized_queue<iotask> * readqueue; synchronized_queue<iotask> * commitqueue; synchronized_queue<iotask> * prioqueue; bool running; metrics * m; volatile int pending_writes; volatile int pending_reads; int mplex; }; // Forward declaration static void * io_thread_loop(void * _info); struct stripe_chunk { int mplex_thread; size_t offset; size_t len; stripe_chunk(int mplex_thread, size_t offset, size_t len) : mplex_thread(mplex_thread), offset(offset), len(len) {} }; struct streaming_task { stripedio * iomgr; int session; size_t len; volatile size_t curpos; char ** buf; streaming_task() {} streaming_task(stripedio * iomgr, int session, size_t len, char ** buf) : iomgr(iomgr), session(session), len(len), curpos(0), buf(buf) {} }; struct pinned_file { std::string filename; size_t length; uint8_t * data; bool touched; }; // Forward declaration static void * stream_read_loop(void * _info); class stripedio { std::vector<io_descriptor *> sessions; mutex mlock; int stripesize; int multiplex; std::string multiplex_root; bool disable_preloading; std::vector< synchronized_queue<iotask> > mplex_readtasks; std::vector< synchronized_queue<iotask> > mplex_writetasks; std::vector< synchronized_queue<iotask> > mplex_priotasks; std::vector< pthread_t > threads; std::vector< thrinfo * > thread_infos; metrics &m; /* Memory-pinned files */ std::vector<pinned_file *> preloaded_files; mutex preload_lock; size_t preloaded_bytes; size_t max_preload_bytes; int niothreads; // threads per mplex public: stripedio( metrics &_m) : m(_m) { disable_preloading = false; stripesize = get_option_int("io.stripesize", 4096 * 1024 / 2); preloaded_bytes = 0; max_preload_bytes = 1024 * 1024 * get_option_long("preload.max_megabytes", 0); if (max_preload_bytes > 0) { logstream(LOG_INFO) << "Preloading maximum " << max_preload_bytes << " bytes." << std::endl; } multiplex = get_option_int("multiplex", 1); if (multiplex>1) { multiplex_root = get_option_string("multiplex_root", "<not-set>"); } else { multiplex_root = ""; stripesize = 1024*1024*1024; } m.set("stripesize", (size_t)stripesize); // Start threads (niothreads is now threads per multiplex) niothreads = get_option_int("niothreads", 1); m.set("niothreads", (size_t)niothreads); logstream(LOG_DEBUG) << "Start io-manager with " << niothreads << " threads." << std::endl; // Each multiplex partition has its own queues for(int i=0; i < multiplex * niothreads; i++) { mplex_readtasks.push_back(synchronized_queue<iotask>()); mplex_writetasks.push_back(synchronized_queue<iotask>()); mplex_priotasks.push_back(synchronized_queue<iotask>()); } int k = 0; for(int i=0; i < multiplex; i++) { for(int j=0; j < niothreads; j++) { thrinfo * cthreadinfo = new thrinfo(); cthreadinfo->commitqueue = &mplex_writetasks[k]; cthreadinfo->readqueue = &mplex_readtasks[k]; cthreadinfo->prioqueue = &mplex_priotasks[k]; cthreadinfo->running = true; cthreadinfo->pending_writes = 0; cthreadinfo->pending_reads = 0; cthreadinfo->mplex = i; cthreadinfo->m = &m; thread_infos.push_back(cthreadinfo); pthread_t iothread; int ret = pthread_create(&iothread, NULL, io_thread_loop, cthreadinfo); threads.push_back(iothread); assert(ret>=0); k++; } } } ~stripedio() { int mplex = (int) thread_infos.size(); // Quit all threads for(int i=0; i<mplex; i++) { thread_infos[i]->running=false; } size_t nthreads = threads.size(); for(unsigned int i=0; i<nthreads; i++) { pthread_join(threads[i], NULL); } for(int i=0; i<mplex; i++) { delete thread_infos[i]; } for(int j=0; j<(int)sessions.size(); j++) { if (sessions[j] != NULL) { close_session(j); delete sessions[j]; sessions[j] = NULL; } } for(std::vector<pinned_file *>::iterator it=preloaded_files.begin(); it != preloaded_files.end(); ++it) { pinned_file * preloaded = (*it); delete preloaded->data; delete preloaded; } } void set_disable_preloading(bool b) { disable_preloading = b; if (b) logstream(LOG_INFO) << "Disabled preloading." << std::endl; } bool multiplexed() { return multiplex>1; } void print_session(int session) { for(int i=0; i<multiplex; i++) { std::cout << "multiplex: " << multiplex << std::endl; std::cout << "Read desc: " << sessions[session]->readdescs[i] << std::endl; } for(int i=0; i<(int)sessions[session]->writedescs.size(); i++) { std::cout << "multiplex: " << multiplex << std::endl; std::cout << "Read desc: " << sessions[session]->writedescs[i] << std::endl; } } // Compute a hash for filename which is used for // permuting the stripes. It is important the permutation // is same regardless of when the file is opened. int hash(std::string filename) { const char * cstr = filename.c_str(); int hash = 1; int l = (int) strlen(cstr); for(int i=0; i<l; i++) { hash = 31*hash + cstr[i]; } return std::abs(hash); } int open_session(std::string filename, bool readonly=false, bool compressed=false) { mlock.lock(); // FIXME: known memory leak: sessions table is never shrunk int session_id = (int) sessions.size(); io_descriptor * iodesc = new io_descriptor(); iodesc->open = true; iodesc->compressed = compressed; iodesc->pinned_to_memory = is_preloaded(filename); iodesc->start_mplex = hash(filename) % multiplex; sessions.push_back(iodesc); mlock.unlock(); if (NULL != iodesc->pinned_to_memory) { logstream(LOG_INFO) << "Opened preloaded session: " << filename << std::endl; return session_id; } for(int i=0; i<multiplex; i++) { std::string fname = multiplexprefix(i) + filename; for(int j=0; j<niothreads+(multiplex == 1 ? 1 : 0); j++) { // Hack to have one fd for synchronous int rddesc = open(fname.c_str(), (readonly ? O_RDONLY : O_RDWR)); if (rddesc < 0) logstream(LOG_ERROR) << "Could not open: " << fname << " session: " << session_id << " error: " << strerror(errno) << std::endl; assert(rddesc>=0); iodesc->readdescs.push_back(rddesc); #ifdef F_NOCACHE if (!readonly) fcntl(rddesc, F_NOCACHE, 1); #endif if (!readonly) { int wrdesc = rddesc; // Change by Aapo: Aug 11, 2012. I don't think we need separate wrdesc? if (wrdesc < 0) logstream(LOG_ERROR) << "Could not open for writing: " << fname << " session: " << session_id << " error: " << strerror(errno) << std::endl; assert(wrdesc>=0); #ifdef F_NOCACHE fcntl(wrdesc, F_NOCACHE, 1); #endif iodesc->writedescs.push_back(wrdesc); } } } iodesc->filename = filename; if (iodesc->writedescs.size() > 0) { // logstream(LOG_INFO) << "Opened write-session: " << session_id << "(" << iodesc->writedescs[0] << ") for " << filename << std::endl; } else { // logstream(LOG_INFO) << "Opened read-session: " << session_id << "(" << iodesc->readdescs[0] << ") for " << filename << std::endl; } return session_id; } void close_session(int session) { mlock.lock(); // Note: currently io-descriptors are left into the vertex array // in purpose to make managed memory work. Should be fixed as this is // a (relatively minor) memory leak. bool wasopen; io_descriptor * iodesc = sessions[session]; wasopen = iodesc->open; iodesc->open = false; mlock.unlock(); if (wasopen) { // std::cout << "Closing: " << iodesc->filename << " " << iodesc->readdescs[0] << std::endl; for(std::vector<int>::iterator it=iodesc->readdescs.begin(); it!=iodesc->readdescs.end(); ++it) { close(*it); } // for(std::vector<int>::iterator it=iodesc->writedescs.begin(); it!=iodesc->writedescs.end(); ++it) { // close(*it); // } } } int mplex_for_offset(int session, size_t off) { return ((int) (off / stripesize) + sessions[session]->start_mplex) % multiplex; } // Returns vector of <mplex, offset> std::vector< stripe_chunk > stripe_offsets(int session, size_t nbytes, size_t off) { size_t end = off+nbytes; size_t idx = off; size_t bufoff = 0; std::vector<stripe_chunk> stripelist; while(idx<end) { size_t blockoff = idx % stripesize; size_t blocklen = std::min(stripesize-blockoff, end-idx); int mplex_thread = (int) mplex_for_offset(session, idx) * niothreads + (int) (random() % niothreads); stripelist.push_back(stripe_chunk(mplex_thread, bufoff, blocklen)); bufoff += blocklen; idx += blocklen; } return stripelist; } template <typename T> void preada_async(int session, T * tbuf, size_t nbytes, size_t off, volatile int * doneptr = NULL) { std::vector<stripe_chunk> stripelist = stripe_offsets(session, nbytes, off); if (compressed_session(session)) { assert(stripelist.size() == 1); assert(off == 0); } refcountptr * refptr = new refcountptr((char*)tbuf, (int)stripelist.size()); for(int i=0; i<(int)stripelist.size(); i++) { stripe_chunk chunk = stripelist[i]; __sync_add_and_fetch(&thread_infos[chunk.mplex_thread]->pending_reads, 1); iotask task = iotask(this, READ, sessions[session]->readdescs[chunk.mplex_thread], session, refptr, chunk.len, chunk.offset+off, chunk.offset, false, compressed_session(session)); task.doneptr = doneptr; mplex_readtasks[chunk.mplex_thread].push(task); } } /* Used for pipelined read */ void launch_stream_reader(streaming_task * task) { pthread_t t; int ret = pthread_create(&t, NULL, stream_read_loop, (void*)task); assert(ret>=0); } /** * Pinned sessions process files that are permanently * pinned to memory. */ bool pinned_session(int session) { return sessions[session]->pinned_to_memory; } bool compressed_session(int session) { return sessions[session]->compressed; } /** * Call to allow files to be preloaded. Note: using this requires * that all files are accessed with same path. This is true if * standard chifilenames.hpp -given filenames are used. */ void allow_preloading(std::string filename) { if (disable_preloading) { return; } preload_lock.lock(); assert(max_preload_bytes == 0); /* size_t filesize = get_filesize(filename); if (preloaded_bytes + filesize <= max_preload_bytes) { preloaded_bytes += filesize; m.set("preload_bytes", preloaded_bytes); pinned_file * pfile = new pinned_file(); pfile->filename = filename; pfile->length = filesize; pfile->data = (uint8_t*) malloc(filesize); pfile->touched = false; assert(pfile->data != NULL); int fid = open(filename.c_str(), O_RDONLY); if (fid < 0) { logstream(LOG_ERROR) << "Could not read file: " << filename << " error: " << strerror(errno) << std::endl; } assert(fid >= 0); logstream(LOG_INFO) << "Preloading: " << filename << std::endl; preada(fid, pfile->data, filesize, 0); close(fid); preloaded_files.push_back(pfile); }*/ preload_lock.unlock(); } void commit_preloaded() { for(std::vector<pinned_file *>::iterator it=preloaded_files.begin(); it != preloaded_files.end(); ++it) { pinned_file * preloaded = (*it); if (preloaded->touched) { logstream(LOG_INFO) << "Commit preloaded file: " << preloaded->filename << std::endl; int fid = open(preloaded->filename.c_str(), O_WRONLY); if (fid < 0) { logstream(LOG_ERROR) << "Could not read file: " << preloaded->filename << " error: " << strerror(errno) << std::endl; continue; } pwritea(fid, preloaded->data, preloaded->length, 0); close(fid); } preloaded->touched = false; } } pinned_file * is_preloaded(std::string filename) { preload_lock.lock(); pinned_file * preloaded = NULL; for(std::vector<pinned_file *>::iterator it=preloaded_files.begin(); it != preloaded_files.end(); ++it) { if (filename == (*it)->filename) { preloaded = *it; break; } } preload_lock.unlock(); return preloaded; } // Note: data is freed after write! template <typename T> void pwritea_async(int session, T * tbuf, size_t nbytes, size_t off, bool free_after, bool close_fd=false) { std::vector<stripe_chunk> stripelist = stripe_offsets(session, nbytes, off); refcountptr * refptr = new refcountptr((char*)tbuf, (int) stripelist.size()); if (compressed_session(session)) { assert(stripelist.size() == 1); assert(off == 0); } for(int i=0; i<(int)stripelist.size(); i++) { stripe_chunk chunk = stripelist[i]; __sync_add_and_fetch(&thread_infos[chunk.mplex_thread]->pending_writes, 1); mplex_writetasks[chunk.mplex_thread].push(iotask(this, WRITE, sessions[session]->writedescs[chunk.mplex_thread], session, refptr, chunk.len, chunk.offset+off, chunk.offset, free_after, compressed_session(session), close_fd)); } } template <typename T> void preada_now(int session, T * tbuf, size_t nbytes, size_t off) { metrics_entry me = m.start_time(); if (compressed_session(session)) { // Compressed sessions do not support multiplexing for now assert(off == 0); read_compressed(sessions[session]->readdescs[0], tbuf, nbytes); m.stop_time(me, "preada_now", false); return; } if (multiplex > 1) { std::vector<stripe_chunk> stripelist = stripe_offsets(session, nbytes, off); size_t checklen=0; refcountptr * refptr = new refcountptr((char*)tbuf, (int) stripelist.size()); refptr->count++; // Take a reference so we can spin on it for(int i=0; i < (int)stripelist.size(); i++) { stripe_chunk chunk = stripelist[i]; __sync_add_and_fetch(&thread_infos[chunk.mplex_thread]->pending_reads, 1); // Use prioritized task queue mplex_priotasks[chunk.mplex_thread].push(iotask(this, READ, sessions[session]->readdescs[chunk.mplex_thread], session, refptr, chunk.len, chunk.offset+off, chunk.offset, false, false)); checklen += chunk.len; } assert(checklen == nbytes); // Spin while(refptr->count>1) { usleep(5000); } delete refptr; } else { preada(sessions[session]->readdescs[threads.size()], tbuf, nbytes, off); } m.stop_time(me, "preada_now", false); } template <typename T> void pwritea_now(int session, T * tbuf, size_t nbytes, size_t off) { metrics_entry me = m.start_time(); if (compressed_session(session)) { // Compressed sessions do not support multiplexing for now assert(off == 0); write_compressed(sessions[session]->writedescs[0], tbuf, nbytes); m.stop_time(me, "pwritea_now", false); return; } std::vector<stripe_chunk> stripelist = stripe_offsets(session, nbytes, off); size_t checklen=0; for(int i=0; i<(int)stripelist.size(); i++) { stripe_chunk chunk = stripelist[i]; pwritea(sessions[session]->writedescs[chunk.mplex_thread], (char*)tbuf+chunk.offset, chunk.len, chunk.offset+off); checklen += chunk.len; } assert(checklen == nbytes); m.stop_time(me, "pwritea_now", false); } /** * Memory managed versino of the I/O functions. */ template <typename T> void managed_pwritea_async(int session, T ** tbuf, size_t nbytes, size_t off, bool free_after, bool close_fd=false) { if (!pinned_session(session)) { pwritea_async(session, *tbuf, nbytes, off, free_after, close_fd); } else { // Do nothing but mark the descriptor as 'dirty' sessions[session]->pinned_to_memory->touched = true; } } template <typename T> void managed_preada_now(int session, T ** tbuf, size_t nbytes, size_t off) { if (!pinned_session(session)) { preada_now(session, *tbuf, nbytes, off); } else { io_descriptor * iodesc = sessions[session]; *tbuf = (T*) (iodesc->pinned_to_memory->data + off); } } template <typename T> void managed_pwritea_now(int session, T ** tbuf, size_t nbytes, size_t off) { if (!pinned_session(session)) { pwritea_now(session, *tbuf, nbytes, off); } else { // Do nothing but mark the descriptor as 'dirty' sessions[session]->pinned_to_memory->touched = true; } } template<typename T> void managed_malloc(int session, T ** tbuf, size_t nbytes, size_t noff) { if (!pinned_session(session)) { *tbuf = (T*) malloc(nbytes); } else { io_descriptor * iodesc = sessions[session]; *tbuf = (T*) (iodesc->pinned_to_memory->data + noff); } } /** * @param doneptr is decremented to zero when task is ready */ template <typename T> void managed_preada_async(int session, T ** tbuf, size_t nbytes, size_t off, volatile int * doneptr = NULL) { if (!pinned_session(session)) { preada_async(session, *tbuf, nbytes, off, doneptr); } else { io_descriptor * iodesc = sessions[session]; *tbuf = (T*) (iodesc->pinned_to_memory->data + off); if (doneptr != NULL) { __sync_sub_and_fetch(doneptr, 1); } } } template <typename T> void managed_release(int session, T ** ptr) { if (!pinned_session(session)) { assert(*ptr != NULL); free(*ptr); } *ptr = NULL; } void truncate(int session, size_t nbytes) { assert(!pinned_session(session)); assert(multiplex <= 1); // We do not support truncating on multiplex yet int stat = ftruncate(sessions[session]->writedescs[0], nbytes); if (stat != 0) { logstream(LOG_ERROR) << "Could not truncate " << sessions[session]->filename << " error: " << strerror(errno) << std::endl; assert(false); } } void wait_for_reads() { metrics_entry me = m.start_time(); int loops = 0; int mplex = (int) thread_infos.size(); for(int i=0; i<mplex; i++) { while(thread_infos[i]->pending_reads > 0) { usleep(10000); loops++; } } m.stop_time(me, "stripedio_wait_for_reads", false); } void wait_for_writes() { metrics_entry me = m.start_time(); int mplex = (int) thread_infos.size(); for(int i=0; i<mplex; i++) { while(thread_infos[i]->pending_writes>0) { usleep(10000); } } m.stop_time(me, "stripedio_wait_for_writes", false); } std::string multiplexprefix(int stripe) { if (multiplex > 1) { char mstr[255]; sprintf(mstr, "%d/", 1+stripe%multiplex); return multiplex_root + std::string(mstr); } else return ""; } std::string multiplexprefix_random() { return multiplexprefix((int)random() % multiplex); } }; static void * io_thread_loop(void * _info) { iotask task; thrinfo * info = (thrinfo*)_info; int ntasks = 0; // logstream(LOG_INFO) << "Thread for multiplex :" << info->mplex << " starting." << std::endl; while(info->running) { bool success; if (info->pending_reads>0) { // Prioritize read queue success = info->prioqueue->safepop(&task); if (!success) { success = info->readqueue->safepop(&task); } } else { success = info->commitqueue->safepop(&task); } if (success) { ++ntasks; if (task.action == WRITE) { // Write metrics_entry me = info->m->start_time(); if (task.compressed) { assert(task.offset == 0); write_compressed(task.fd, task.ptr->ptr, task.length); } else { pwritea(task.fd, task.ptr->ptr + task.ptroffset, task.length, task.offset); } if (task.free_after) { // Threead-safe method of memory managment - ugly! if (__sync_sub_and_fetch(&task.ptr->count, 1) == 0) { free(task.ptr->ptr); delete task.ptr; if (task.closefd) { task.iomgr->close_session(task.session); } } } __sync_sub_and_fetch(&info->pending_writes, 1); info->m->stop_time(me, "commit_thr"); } else { if (task.compressed) { assert(task.offset == 0); read_compressed(task.fd, task.ptr->ptr, task.length); } else { preada(task.fd, task.ptr->ptr+task.ptroffset, task.length, task.offset); } __sync_sub_and_fetch(&info->pending_reads, 1); if (__sync_sub_and_fetch(&task.ptr->count, 1) == 0) { free(task.ptr); if (task.closefd) { task.iomgr->close_session(task.session); } } } if (task.doneptr != NULL) { __sync_sub_and_fetch(task.doneptr, 1); } } else { usleep(50000); // 50 ms } } // logstream(LOG_INFO) << "I/O thread exists. Handled " << ntasks << " i/o tasks." << std::endl; return NULL; } static void * stream_read_loop(void * _info) { streaming_task * task = (streaming_task*)_info; timeval start, end; gettimeofday(&start, NULL); size_t bufsize = 32*1024*1024; // 32 megs char * tbuf; /** * If this is not pinned, we just malloc the * buffer. Otherwise - shuold just return pointer * to the in-memory file buffer. */ if (task->iomgr->pinned_session(task->session)) { __sync_add_and_fetch(&task->curpos, task->len); return NULL; } tbuf = *task->buf; while(task->curpos < task->len) { size_t toread = std::min((size_t)task->len - (size_t)task->curpos, (size_t)bufsize); task->iomgr->preada_now(task->session, tbuf + task->curpos, toread, task->curpos); __sync_add_and_fetch(&task->curpos, toread); } gettimeofday(&end, NULL); return NULL; } static size_t get_filesize(std::string filename) { std::string fname = filename; int f = open(fname.c_str(), O_RDONLY); if (f < 0) { logstream(LOG_ERROR) << "Could not open file " << filename << " error: " << strerror(errno) << std::endl; assert(false); } off_t sz = lseek(f, 0, SEEK_END); close(f); return sz; } } #endif
09jijiangwen-download
src/io/stripedio.hpp
C++
asf20
31,796
#!/bin/bash #script for installing graphchi cf toolbox #written by Danny Bickson, CMU EIGEN_FILE=3.1.3.tar.bz2 EIGEN_DIST=http://bitbucket.org/eigen/eigen/get/$EIGEN_FILE test -z `which wget` if [ $? -eq 1 ]; then rm -fR $EIGEN_DIST wget --max-redirect 20 $EIGEN_DIST if [ $? -ne 0 ]; then echo "Failed to download file" echo "Please download manually the file $EIGEN_DIST to the root GraphChi folder" exit 1 fi else test -z `which curl` if [ $? -eq 1 ]; then rm -fR $EIGEN_DIST curl -o $EIGEN_FILE -L $EIGEN_DIST if [ $? -ne 0 ]; then echo "Failed to download file" echo "Please download manually the file $EIGEN_DIST to the root GraphChi folder" exit 1 fi else echo "Failed to find wget or curl" echo "Please download manually the file $EIGEN_DIST to the root GraphChi folder" exit 1 fi fi rm -f eigen-eigen-* tar -xjf $EIGEN_FILE if [ $? -ne 0 ]; then echo "Failed to extract eigen files" echo "Please download manually the file $EIGEN_DIST to the root GraphChi folder" exit 1 fi rm -fR ./src/Eigen mv eigen-eigen-*/Eigen ./src cd toolkits/collaborative_filtering make cd ../../
09jijiangwen-download
install.sh
Shell
asf20
1,187
/** * Simple tool for creating input for streaming graph demos. * An edgelist is read and two files are created: base-graph and * streaming input file. Streaming input is shuffled. * NOTE: This is unsupported code and requires plenty of memory. */ #include <vector> #include <stdio.h> #include <stdlib.h> #include <iostream> #include <assert.h> #include <string.h> #include <string> #include <errno.h> #include <algorithm> #include <iterator> struct edge { unsigned int from; unsigned int to; }; // Removes \n from the end of line void FIXLINE(char * s) { int len = (int) strlen(s)-1; if(s[len] == '\n') s[len] = 0; } int main(int argc, const char ** argv) { if (argc != 3) { std::cout << "Usage: [inputfile-edgelist] [stream-edges-per-base-edges] [max-base-id]" << std::endl; } const char * input = argv[1]; int stream_edges_per_base_edges = atoi(argv[2]); int maxbaseid = atoi(argv[3]); std::cout << "Processing: " << input << std::endl; FILE * inf = fopen(input, "r"); std::vector<edge> base_edges; std::vector<edge> stream_edges; base_edges.reserve(1e6); stream_edges.reserve(1e6); if (inf == NULL) { std::cout << "Could not load :" << input << " error: " << strerror(errno) << std::endl; } assert(inf != NULL); std::cout << "Reading in edge list format!" << std::endl; char s[1024]; while(fgets(s, 1024, inf) != NULL) { FIXLINE(s); if (s[0] == '#') continue; // Comment if (s[0] == '%') continue; // Comment char delims[] = "\t "; char * t; t = strtok(s, delims); edge e; e.from = atoi(t); t = strtok(NULL, delims); e.to = atoi(t); if (std::rand() % stream_edges_per_base_edges == 0 && e.from <= maxbaseid && e.to <= maxbaseid) base_edges.push_back(e); else stream_edges.push_back(e); } fclose(inf); std::cout << "Number of edges in base: " << base_edges.size() << std::endl; std::cout << "Number of edges to stream: " << stream_edges.size() << std::endl; std::string base_file_name = std::string(input) + "_base"; std::string stream_file_name = std::string(input) + "_stream"; FILE * basef = fopen(base_file_name.c_str(), "w"); for(std::vector<edge>::iterator it=base_edges.begin(); it != base_edges.end(); ++it) { fprintf(basef, "%u %u\n", it->from, it->to); } fclose(basef); /* Shuffle */ std::random_shuffle(stream_edges.begin(), stream_edges.end()); FILE * strmf = fopen(stream_file_name.c_str(), "w"); for(std::vector<edge>::iterator it=stream_edges.begin(); it != stream_edges.end(); ++it) { fprintf(strmf, "%u %u\n", it->from, it->to); } fclose(strmf); return 0; }
09jijiangwen-download
example_apps/demotools/create_streaminggraph_demofiles.cpp
C++
asf20
2,903
/** * @file * @author Aapo Kyrola <akyrola@cs.cmu.edu> * @version 1.0 * * @section LICENSE * * Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * Template for GraphChi applications. To create a new application, duplicate * this template. */ #include <string> #include "graphchi_basic_includes.hpp" using namespace graphchi; /** * Type definitions. Remember to create suitable graph shards using the * Sharder-program. */ typedef my_vertex_type VertexDataType; typedef my_edge_type EdgeDataType; /** * GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type> * class. The main logic is usually in the update function. */ struct MyGraphChiProgram : public GraphChiProgram<VertexDataType, EdgeDataType> { /** * Vertex update function. */ void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) { if (ginfo.iteration == 0) { /* On first iteration, initialize vertex (and its edges). This is usually required, because on each run, GraphChi will modify the data files. To start from scratch, it is easiest do initialize the program in code. Alternatively, you can keep a copy of initial data files. */ // vertex.set_data(init_value); } else { /* Do computation */ /* Loop over in-edges (example) */ for(int i=0; i < vertex.num_inedges(); i++) { // Do something // value += vertex.inedge(i).get_data(); } /* Loop over out-edges (example) */ for(int i=0; i < vertex.num_outedges(); i++) { // Do something // vertex.outedge(i).set_data(x) } /* Loop over all edges (ignore direction) */ for(int i=0; i < vertex.num_edges(); i++) { // vertex.edge(i).get_data() } // v.set_data(new_value); } } /** * Called before an iteration starts. */ void before_iteration(int iteration, graphchi_context &gcontext) { } /** * Called after an iteration has finished. */ void after_iteration(int iteration, graphchi_context &gcontext) { } /** * Called before an execution interval is started. */ void before_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) { } /** * Called after an execution interval has finished. */ void after_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) { } }; int main(int argc, const char ** argv) { /* GraphChi initialization will read the command line arguments and the configuration file. */ graphchi_init(argc, argv); /* Metrics object for keeping track of performance counters and other information. Currently required. */ metrics m("my-application-name"); /* Basic arguments for application */ std::string filename = get_option_string("file"); // Base filename int niters = get_option_int("niters", 4); // Number of iterations bool scheduler = get_option_int("scheduler", 0); // Whether to use selective scheduling /* Detect the number of shards or preprocess an input to create them */ int nshards = convert_if_notexists<EdgeDataType>(filename, get_option_string("nshards", "auto")); /* Run */ MyGraphChiProgram program; graphchi_engine<VertexDataType, EdgeDataType> engine(filename, nshards, scheduler, m); engine.run(program, niters); /* Report execution metrics */ metrics_report(m); return 0; }
09jijiangwen-download
example_apps/application_template.cpp
C++
asf20
4,478
/** * @file * @author Aapo Kyrola <akyrola@cs.cmu.edu> * @version 1.0 * * @section LICENSE * * Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * "Functional" version of pagerank, which is quite a bit more efficient, because * it does not construct the vertex-objects but directly processes the edges. * * This program can be run either in the semi-synchronous mode (faster, but * less clearly defined semantics), or synchronously. Synchronous version needs * double the amount of I/O because it needs to store both previous and * current values. Use command line parameter mode with semisync or sync. */ #define RANDOMRESETPROB 0.15 #define GRAPHCHI_DISABLE_COMPRESSION #include <string> #include <fstream> #include <cmath> #include "graphchi_basic_includes.hpp" #include "api/functional/functional_api.hpp" #include "graphchi_basic_includes.hpp" #include "util/toplist.hpp" using namespace graphchi; struct pagerank_kernel : public functional_kernel<float, float> { /* Initial value - on first iteration */ float initial_value(graphchi_context &info, vertex_info& myvertex) { return 1.0; } /* Called before first "gather" */ float reset() { return 0.0; } // Note: Unweighted version, edge value should also be passed // "Gather" float op_neighborval(graphchi_context &info, vertex_info& myvertex, vid_t nbid, float nbval) { return nbval; } // "Sum" float plus(float curval, float toadd) { return curval + toadd; } // "Apply" float compute_vertexvalue(graphchi_context &ginfo, vertex_info& myvertex, float nbvalsum) { assert(ginfo.nvertices > 0); return RANDOMRESETPROB / ginfo.nvertices + (1 - RANDOMRESETPROB) * nbvalsum; } // "Scatter float value_to_neighbor(graphchi_context &info, vertex_info& myvertex, vid_t nbid, float myval) { assert(myvertex.outdegree > 0); return myval / myvertex.outdegree; } }; int main(int argc, const char ** argv) { graphchi_init(argc, argv); metrics m("pagerank"); std::string filename = get_option_string("file"); int niters = get_option_int("niters", 4); bool onlytop = get_option_int("onlytop", 0); int ntop = get_option_int("top", 20); std::string mode = get_option_string("mode", "semisync"); if (onlytop == 0) { /* Run */ if (mode == "semisync") { logstream(LOG_INFO) << "Running pagerank in semi-synchronous mode." << std::endl; run_functional_unweighted_semisynchronous<pagerank_kernel>(filename, niters, m); } else if (mode == "sync") { logstream(LOG_INFO) << "Running pagerank in (bulk) synchronous mode." << std::endl; run_functional_unweighted_synchronous<pagerank_kernel>(filename, niters, m); } else { logstream(LOG_ERROR) << "Mode needs to be either 'semisync' or 'sync'." << std::endl; assert(false); } /* Output metrics */ metrics_report(m); } /* Write Top 20 */ std::vector< vertex_value<float> > top = get_top_vertices<float>(filename, ntop); std::cout << "Print top 20 vertices: " << std::endl; for(int i=0; i < (int) top.size(); i++) { std::cout << (i+1) << ". " << top[i].vertex << "\t" << top[i].value << std::endl; } return 0; }
09jijiangwen-download
example_apps/pagerank_functional.cpp
C++
asf20
4,051
/** * @file * @author Aapo Kyrola <akyrola@cs.cmu.edu> * @version 1.0 * * @section LICENSE * * Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * Simple program that writes a graph into adjacency list */ #include <iostream> #include "graphchi_basic_includes.hpp" using namespace graphchi; /** * Type definitions. Remember to create suitable graph shards using the * Sharder-program. */ typedef bool VertexDataType; typedef bool EdgeDataType; FILE * f; #define MODE_ADJLIST 0 #define MODE_CASSOVARY_ADJ 1 int mode; struct AdjConverter : public GraphChiProgram<VertexDataType, EdgeDataType> { /** * Vertex update function. */ void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) { if (vertex.id() % 10000 == 0) std::cout << vertex.id() << std::endl; switch(mode) { case MODE_ADJLIST: { fprintf(f, "%d %d", vertex.id(), vertex.num_outedges()); for(int i=0; i<vertex.num_outedges(); i++) fprintf(f, " %d", vertex.outedge(i)->vertex_id()); fprintf(f, "\n"); break; } case MODE_CASSOVARY_ADJ: { fprintf(f, "%d %d\n", vertex.id(), vertex.num_outedges()); for(int i=0; i<vertex.num_outedges(); i++) fprintf(f, "%d\n", vertex.outedge(i)->vertex_id()); break; } } } /** * Called before an iteration starts. */ void before_iteration(int iteration, graphchi_context &gcontext) { } /** * Called after an iteration has finished. */ void after_iteration(int iteration, graphchi_context &gcontext) { } /** * Called before an execution interval is started. */ void before_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) { } /** * Called after an execution interval has finished. */ void after_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) { } }; int main(int argc, const char ** argv) { /* GraphChi initialization will read the command line arguments and the configuration file. */ graphchi_init(argc, argv); /* Metrics object for keeping track of performance counters and other information. Currently required. */ metrics m("adjconverter"); /* Basic arguments for application */ std::string filename = get_option_string("file"); // Base filename /* Detect the number of shards or preprocess an input to create them */ int nshards = convert_if_notexists<EdgeDataType>(filename, get_option_string("nshards", "auto")); mode = get_option_int("mode", 0); std::string outfile = filename + ".adj"; f = fopen(outfile.c_str(), "w"); /* Run */ AdjConverter program; graphchi_engine<VertexDataType, EdgeDataType> engine(filename, nshards, false, m); engine.set_exec_threads(1); engine.run(program, 1); fclose(f); /* Report execution metrics */ metrics_report(m); return 0; }
09jijiangwen-download
example_apps/convert_to_adjacency.cpp
C++
asf20
3,886
/** * @file * @author Aapo Kyrola <akyrola@cs.cmu.edu> * @version 1.0 * * @section LICENSE * * Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * Triangle counting application. Counts the number of incident (full) triangles * for each vertex. Edge direction is ignored. * * This algorithm is quite complicated and requires 'trickery' to work * well on GraphChi. The complexity stems from the need to store large number * of adjacency lists in memory: we cannot store the adjacency lists reasonable * to edges, nor can we store all of them once at memory. Therefore the problems * is solved in a series of phases. On each phase, the relevant adjacency lists of an interval * of vertices (called 'pivots') is loaded into memory, and all vertices that have id smaller than the * pivots are matched with them. With 'relevant adjacency list' I mean the list of neighbors * that have higher id then the pivots themselves. That is, we only count triangles a -> b -> c * where a > b > c. * * The application involves a special preprocessing step which orders the vertices in ascending * order of their degree. This turns out to be a very important optimization on big graphs. * * This algorithm also utilizes the dynamic graph engine, and deletes edges after they have been * accounted for. */ #include <string> #include <vector> /** * Need to define prior to including GraphChi * headers. This enabled edge-deletion in the vertex object. */ #define SUPPORT_DELETIONS 1 #define GRAPHCHI_DISABLE_COMPRESSION #include "graphchi_basic_includes.hpp" #include "engine/dynamic_graphs/graphchi_dynamicgraph_engine.hpp" using namespace graphchi; /** * Type definitions. Vertex data stores the number of incident triangles. * Edge stores number of unaccounted triangles that the edge participates on. * When vertex is updated, it updates its vertex count by summing up the * counts from edges (after which the edges are deleted). */ typedef uint32_t VertexDataType; typedef uint32_t EdgeDataType; /* * Class for writing the output number of triangles for each node */ class OutputVertexCallback : public VCallback<VertexDataType> { public: virtual void callback(vid_t vertex_id, VertexDataType &value) { if (value > 0) std::cout << vertex_id << " " << value << std::endl; } }; /** * Code for intersection size computation and * pivot management. */ int grabbed_edges = 0; // Linear search inline bool findadj_linear(vid_t * datachunk, size_t n, vid_t target) { for(int i=0; i<(int)n; i++) { if (datachunk[i] == target) return true; else if (datachunk[i] > target) return false; } return false; } // Binary search inline bool findadj(vid_t * datachunk, size_t n, vid_t target) { if (n<32) return findadj_linear(datachunk, n, target); register size_t lo = 0; register size_t hi = n; register size_t m = lo + (hi-lo)/2; while(hi>lo) { vid_t eto = datachunk[m]; if (target == eto) { return true; } if (target > eto) { lo = m+1; } else { hi = m; } m = lo + (hi-lo)/2; } return false; } struct dense_adj { int count; vid_t * adjlist; dense_adj() { adjlist = NULL; } dense_adj(int _count, vid_t * _adjlist) : count(_count), adjlist(_adjlist) { } }; // This is used for keeping in-memory class adjlist_container { std::vector<dense_adj> adjs; mutex m; public: vid_t pivot_st, pivot_en; adjlist_container() { pivot_st = 0; pivot_en = 0; } void clear() { for(std::vector<dense_adj>::iterator it=adjs.begin(); it != adjs.end(); ++it) { if (it->adjlist != NULL) { free(it->adjlist); it->adjlist = NULL; } } adjs.clear(); pivot_st = pivot_en; } /** * Extend the interval of pivot vertices to en. */ void extend_pivotrange(vid_t en) { assert(en>=pivot_en); pivot_en = en; adjs.resize(pivot_en - pivot_st); } /** * Grab pivot's adjacency list into memory. */ int grab_adj(graphchi_vertex<uint32_t, uint32_t> &v) { if(is_pivot(v.id())) { int ncount = v.num_edges(); // Count how many neighbors have larger id than v v.sort_edges_indirect(); int actcount = 0; vid_t lastvid = 0; for(int i=0; i<ncount; i++) { if (v.edge(i)->vertexid > v.id() && v.edge(i)->vertexid != lastvid) actcount++; // Need to store only ids larger than me lastvid = v.edge(i)->vertex_id(); } // Allocate the in-memory adjacency list, using the // knowledge of the number of edges. dense_adj dadj = dense_adj(actcount, (vid_t*) calloc(sizeof(vid_t), actcount)); actcount = 0; lastvid = 0; for(int i=0; i<ncount; i++) { if (v.edge(i)->vertexid > v.id() && v.edge(i)->vertexid != lastvid) { // Need to store only ids larger than me dadj.adjlist[actcount++] = v.edge(i)->vertex_id(); } lastvid = v.edge(i)->vertex_id(); } assert(dadj.count == actcount); adjs[v.id() - pivot_st] = dadj; assert(v.id() - pivot_st < adjs.size()); __sync_add_and_fetch(&grabbed_edges, actcount); return actcount; } return 0; } int acount(vid_t pivot) { return adjs[pivot - pivot_st].count; } /** * Compute size of the relevant intersection of v and a pivot */ int intersection_size(graphchi_vertex<uint32_t, uint32_t> &v, vid_t pivot, int start_i) { assert(is_pivot(pivot)); int count = 0; if (pivot > v.id()) { dense_adj &dadj = adjs[pivot - pivot_st]; int vc = v.num_edges(); /** * If the adjacency list sizes are not too different, use * 'merge'-type of operation to compute size intersection. */ if (dadj.count < 32 * (vc - start_i)) { // TODO: do real profiling to find best cutoff value // Do merge-style of check assert(v.edge(start_i)->vertex_id() == pivot); int i1 = 0; int i2 = start_i+1; int nedges = v.num_edges(); while (i1 < dadj.count && i2 < nedges) { vid_t dst = v.edge(i2)->vertexid; vid_t a = dadj.adjlist[i1]; if (a == dst) { /* Add one to edge between v and the match */ v.edge(i2)->set_data(v.edge(i2)->get_data() + 1); count++; i1++; i2++; } else { i1 += a < dst; i2 += a > dst; } } } else { /** * Otherwise, use linear/binary search. */ vid_t lastvid = 0; for(int i=start_i+1; i < vc; i++) { vid_t nb = v.edge(i)->vertexid; if (nb > pivot && nb != lastvid) { int match = findadj(dadj.adjlist, dadj.count, nb); count += match; if (match > 0) { /* Add one to edge between v and the match */ v.edge(i)->set_data(v.edge(i)->get_data() + 1); } } lastvid = nb; } } } return count; } inline bool is_pivot(vid_t vid) { return vid >= pivot_st && vid < pivot_en; } }; adjlist_container * adjcontainer; /** * GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type> * class. The main logic is usually in the update function. */ struct TriangleCountingProgram : public GraphChiProgram<VertexDataType, EdgeDataType> { /** * Vertex update function. */ void update(graphchi_vertex<VertexDataType, EdgeDataType> &v, graphchi_context &gcontext) { if (gcontext.iteration % 2 == 0) { adjcontainer->grab_adj(v); } else { uint32_t oldcount = v.get_data(); uint32_t newcounts = 0; v.sort_edges_indirect(); vid_t lastvid = 0; /** * Iterate through the edges, and if an edge is from a * pivot vertex, compute intersection of the relevant * adjacency lists. */ for(int i=0; i<v.num_edges(); i++) { graphchi_edge<uint32_t> * e = v.edge(i); if (e->vertexid > v.id() && e->vertexid >= adjcontainer->pivot_st) { assert(!is_deleted_edge_value(e->get_data())); if (e->vertexid != lastvid) { // Handles reciprocal edges (a->b, b<-a) if (adjcontainer->is_pivot(e->vertexid)) { uint32_t pivot_triangle_count = adjcontainer->intersection_size(v, e->vertexid, i); newcounts += pivot_triangle_count; /* Write the number of triangles into edge between this vertex and pivot */ if (pivot_triangle_count == 0 && e->get_data() == 0) { /* ... or remove the edge, if the count is zero. */ v.remove_edge(i); } else { e->set_data(e->get_data() + pivot_triangle_count); } } else { break; } } lastvid = e->vertexid; } assert(newcounts >= 0); } if (newcounts > 0) { v.set_data(oldcount + newcounts); } } /* Collect triangle counts matched by vertices with id lower than his one, and delete */ if (gcontext.iteration % 2 == 0) { int newcounts = 0; for(int i=0; i < v.num_edges(); i++) { graphchi_edge<uint32_t> * e = v.edge(i); if (e->vertexid < v.id()) { newcounts += e->get_data(); e->set_data(0); // This edge can be now deleted. Is there some other situations we can delete? if (v.id() < adjcontainer->pivot_st && e->vertexid < adjcontainer->pivot_st) { v.remove_edge(i); } } } v.set_data(v.get_data() + newcounts); } } /** * Called before an iteration starts. */ void before_iteration(int iteration, graphchi_context &gcontext) { gcontext.scheduler->remove_tasks(0, (int) gcontext.nvertices - 1); if (gcontext.iteration % 2 == 0) { // Schedule vertices that were pivots on last iteration, so they can // keep count of the triangles counted by their lower id neighbros. for(vid_t i=adjcontainer->pivot_st; i < adjcontainer->pivot_en; i++) { gcontext.scheduler->add_task(i); } grabbed_edges = 0; adjcontainer->clear(); } else { // Schedule everything that has id < pivot logstream(LOG_INFO) << "Now pivots: " << adjcontainer->pivot_st << " " << adjcontainer->pivot_en << std::endl; for(vid_t i=0; i < gcontext.nvertices; i++) { if (i < adjcontainer->pivot_en) { gcontext.scheduler->add_task(i); } } } } /** * Called after an iteration has finished. */ void after_iteration(int iteration, graphchi_context &gcontext) { } /** * Called before an execution interval is started. * * On every even iteration, we store pivot's adjacency lists to memory. * Here we manage the memory to ensure that we do not load too much * edges into memory. */ void before_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) { if (gcontext.iteration % 2 == 0) { if (adjcontainer->pivot_st <= window_en) { size_t max_grab_edges = get_option_long("membudget_mb", 1024) * 1024 * 1024 / 8; if (grabbed_edges < max_grab_edges * 0.8) { logstream(LOG_DEBUG) << "Window init, grabbed: " << grabbed_edges << " edges" << std::endl; for(vid_t vid=window_st; vid <= window_en; vid++) { gcontext.scheduler->add_task(vid); } adjcontainer->extend_pivotrange(window_en + 1); if (window_en == gcontext.nvertices) { // Last iteration needed for collecting last triangle counts gcontext.set_last_iteration(gcontext.iteration + 3); } } else { std::cout << "Too many edges, already grabbed: " << grabbed_edges << std::endl; } } } } /** * Called after an execution interval has finished. */ void after_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) { } }; int main(int argc, const char ** argv) { /* GraphChi initialization will read the command line arguments and the configuration file. */ graphchi_init(argc, argv); /* Metrics object for keeping track of performance counters and other information. Currently required. */ metrics m("triangle-counting"); /* Basic arguments for application */ std::string filename = get_option_string("file"); // Base filename int niters = 100000; // Automatically determined during running bool scheduler = true; /* Preprocess the file, and order the vertices in the order of their degree. Mapping from original ids to new ids is saved separately. */ OrderByDegree<EdgeDataType> * orderByDegreePreprocessor = new OrderByDegree<EdgeDataType> (); int nshards = convert_if_notexists<EdgeDataType>(filename, get_option_string("nshards", "auto"), orderByDegreePreprocessor); /* Initialize adjacency container */ adjcontainer = new adjlist_container(); // TODO: ordering by degree. /* Run */ TriangleCountingProgram program; graphchi_dynamicgraph_engine<VertexDataType, EdgeDataType> engine(filename + orderByDegreePreprocessor->getSuffix(), nshards, scheduler, m); engine.set_enable_deterministic_parallelism(false); // Low memory budget is required to prevent swapping as triangle counting // uses more memory than standard GraphChi apps. engine.set_membudget_mb(std::min(get_option_int("membudget_mb", 1024), 1024)); engine.run(program, niters); /* Report execution metrics */ metrics_report(m); /* Count triangles */ size_t ntriangles = sum_vertices<vid_t, size_t>(filename + "_degord", 0, (vid_t)engine.num_vertices()); std::cout << "Number of triangles: " << ntriangles / 3 << "(" << ntriangles << ")" << std::endl; /* If run as a test, see the number matches */ size_t expected = get_option_long("assertequals", 0); if (expected > 0) { std::cout << "Testing the result is as expected: " << (ntriangles / 3) << " vs. " << expected << std::endl; assert(expected == ntriangles / 3); } /* write the output */ // OutputVertexCallback callback; // foreach_vertices<VertexDataType>(filename + "_degord", 0, engine.num_vertices(), callback); return 0; }
09jijiangwen-download
example_apps/trianglecounting.cpp
C++
asf20
17,336
/** * @file * @author Aapo Kyrola <akyrola@cs.cmu.edu> * @version 1.0 * * @section LICENSE * * Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * Application for computing the connected components of a graph. * The algorithm is simple: on first iteration each vertex sends its * id to neighboring vertices. On subsequent iterations, each vertex chooses * the smallest id of its neighbors and broadcasts its (new) label to * its neighbors. The algorithm terminates when no vertex changes label. * * @section REMARKS * * This application is interesting demonstration of the asyncronous capabilities * of GraphChi, improving the convergence considerably. Consider * a chain graph 0->1->2->...->n. First, vertex 0 will write its value to its edges, * which will be observed by vertex 1 immediatelly, changing its label to 0. Nexgt, * vertex 2 changes its value to 0, and so on. This all happens in one iteration. * A subtle issue is that as any pair of vertices a<->b share an edge, they will * overwrite each others value. However, because they will be never run in parallel * (due to deterministic parallellism of graphchi), this does not compromise correctness. * * @author Aapo Kyrola */ #include <cmath> #include <string> #include "graphchi_basic_includes.hpp" #include "util/labelanalysis.hpp" using namespace graphchi; /** * Type definitions. Remember to create suitable graph shards using the * Sharder-program. */ typedef vid_t VertexDataType; // vid_t is the vertex id type typedef vid_t EdgeDataType; /** * GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type> * class. The main logic is usually in the update function. */ struct ConnectedComponentsProgram : public GraphChiProgram<VertexDataType, EdgeDataType> { /** * Vertex update function. * On first iteration ,each vertex chooses a label = the vertex id. * On subsequent iterations, each vertex chooses the minimum of the neighbor's * label (and itself). */ void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) { /* This program requires selective scheduling. */ assert(gcontext.scheduler != NULL); if (gcontext.iteration == 0) { vertex.set_data(vertex.id()); gcontext.scheduler->add_task(vertex.id()); } /* On subsequent iterations, find the minimum label of my neighbors */ vid_t curmin = vertex.get_data(); for(int i=0; i < vertex.num_edges(); i++) { vid_t nblabel = vertex.edge(i)->get_data(); if (gcontext.iteration == 0) nblabel = vertex.edge(i)->vertex_id(); // Note! curmin = std::min(nblabel, curmin); } /* Set my label */ vertex.set_data(curmin); /** * Broadcast new label to neighbors by writing the value * to the incident edges. * Note: on first iteration, write only to out-edges to avoid * overwriting data (this is kind of a subtle point) */ vid_t label = vertex.get_data(); if (gcontext.iteration > 0) { for(int i=0; i < vertex.num_edges(); i++) { if (label < vertex.edge(i)->get_data()) { vertex.edge(i)->set_data(label); /* Schedule neighbor for update */ gcontext.scheduler->add_task(vertex.edge(i)->vertex_id()); } } } else if (gcontext.iteration == 0) { for(int i=0; i < vertex.num_outedges(); i++) { vertex.outedge(i)->set_data(label); } } } /** * Called before an iteration starts. */ void before_iteration(int iteration, graphchi_context &info) { } /** * Called after an iteration has finished. */ void after_iteration(int iteration, graphchi_context &ginfo) { } /** * Called before an execution interval is started. */ void before_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &ginfo) { } /** * Called after an execution interval has finished. */ void after_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &ginfo) { } }; int main(int argc, const char ** argv) { /* GraphChi initialization will read the command line arguments and the configuration file. */ graphchi_init(argc, argv); /* Metrics object for keeping track of performance counters and other information. Currently required. */ metrics m("connected-components"); /* Basic arguments for application */ std::string filename = get_option_string("file"); // Base filename int niters = get_option_int("niters", 10); // Number of iterations (max) bool scheduler = true; // Always run with scheduler /* Process input file - if not already preprocessed */ int nshards = (int) convert_if_notexists<EdgeDataType>(filename, get_option_string("nshards", "auto")); if (get_option_int("onlyresult", 0) == 0) { /* Run */ ConnectedComponentsProgram program; graphchi_engine<VertexDataType, EdgeDataType> engine(filename, nshards, scheduler, m); engine.run(program, niters); } /* Run analysis of the connected components (output is written to a file) */ m.start_time("label-analysis"); analyze_labels<vid_t>(filename); m.stop_time("label-analysis"); /* Report execution metrics */ metrics_report(m); return 0; }
09jijiangwen-download
example_apps/connectedcomponents.cpp
C++
asf20
6,327
/** * @file * @author Aapo Kyrola <akyrola@cs.cmu.edu> * @version 1.0 * * @section LICENSE * * Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * Application for computing the connected components of a graph. * The algorithm is simple: on first iteration each vertex sends its * id to neighboring vertices. On subsequent iterations, each vertex chooses * the smallest id of its neighbors and broadcasts its (new) label to * its neighbors. The algorithm terminates when no vertex changes label. * * @section REMARKS * * Version of connected components that keeps the vertex values * in memory. * @author Aapo Kyrola */ #include <cmath> #include <string> #include "graphchi_basic_includes.hpp" #include "util/labelanalysis.hpp" using namespace graphchi; /** * Type definitions. Remember to create suitable graph shards using the * Sharder-program. */ typedef vid_t VertexDataType; // vid_t is the vertex id type typedef vid_t EdgeDataType; /** * GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type> * class. The main logic is usually in the update function. */ struct ConnectedComponentsProgram : public GraphChiProgram<VertexDataType, EdgeDataType> { VertexDataType * vertex_values; vid_t neighbor_value(graphchi_edge<EdgeDataType> * edge) { return vertex_values[edge->vertex_id()]; } void set_data(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, vid_t value) { vertex_values[vertex.id()] = value; vertex.set_data(value); } /** * Vertex update function. * On first iteration ,each vertex chooses a label = the vertex id. * On subsequent iterations, each vertex chooses the minimum of the neighbor's * label (and itself). */ void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) { /* This program requires selective scheduling. */ assert(gcontext.scheduler != NULL); /* On subsequent iterations, find the minimum label of my neighbors */ vid_t curmin = vertex.get_data(); for(int i=0; i < vertex.num_edges(); i++) { vid_t nblabel = neighbor_value(vertex.edge(i)); curmin = std::min(nblabel, curmin); } /* If my label changes, schedule neighbors */ if (vertex.get_data() != curmin) { vid_t newlabel = curmin; for(int i=0; i < vertex.num_edges(); i++) { if (newlabel < neighbor_value(vertex.edge(i))) { /* Schedule neighbor for update */ gcontext.scheduler->add_task(vertex.edge(i)->vertex_id()); } } } set_data(vertex, curmin); } /** * Called before an iteration starts. */ void before_iteration(int iteration, graphchi_context &ctx) { if (iteration == 0) { /* initialize each vertex with its own lable */ vertex_values = new VertexDataType[ctx.nvertices]; for(int i=0; i < (int)ctx.nvertices; i++) { vertex_values[i] = i; } } } /** * Called after an iteration has finished. */ void after_iteration(int iteration, graphchi_context &ginfo) { } /** * Called before an execution interval is started. */ void before_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &ginfo) { } /** * Called after an execution interval has finished. */ void after_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &ginfo) { } }; int main(int argc, const char ** argv) { /* GraphChi initialization will read the command line arguments and the configuration file. */ graphchi_init(argc, argv); /* Metrics object for keeping track of performance counters and other information. Currently required. */ metrics m("connected-components-inmem"); /* Basic arguments for application */ std::string filename = get_option_string("file"); // Base filename int niters = get_option_int("niters", 10); // Number of iterations (max) bool scheduler = true; // Always run with scheduler /* Process input file - if not already preprocessed */ int nshards = (int) convert_if_notexists<EdgeDataType>(filename, get_option_string("nshards", "auto")); if (get_option_int("onlyresult", 0) == 0) { /* Run */ ConnectedComponentsProgram program; graphchi_engine<VertexDataType, EdgeDataType> engine(filename, nshards, scheduler, m); engine.set_modifies_inedges(false); // Improves I/O performance. engine.set_modifies_outedges(false); // Improves I/O performance. engine.run(program, niters); } /* Run analysis of the connected components (output is written to a file) */ m.start_time("label-analysis"); analyze_labels<vid_t>(filename); m.stop_time("label-analysis"); /* Report execution metrics */ metrics_report(m); return 0; }
09jijiangwen-download
example_apps/inmemconncomps.cpp
C++
asf20
5,799
/** * @file * @author Aapo Kyrola <akyrola@cs.cmu.edu> * @version 1.0 * * @section LICENSE * * Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * Simple pagerank implementation. Uses the basic vertex-based API for * demonstration purposes. A faster implementation uses the functional API, * "pagerank_functional". */ #include <string> #include <fstream> #include <cmath> #define GRAPHCHI_DISABLE_COMPRESSION #include "graphchi_basic_includes.hpp" #include "util/toplist.hpp" using namespace graphchi; #define THRESHOLD 1e-1 #define RANDOMRESETPROB 0.15 typedef float VertexDataType; typedef float EdgeDataType; struct PagerankProgram : public GraphChiProgram<VertexDataType, EdgeDataType> { /** * Called before an iteration starts. Not implemented. */ void before_iteration(int iteration, graphchi_context &info) { } /** * Called after an iteration has finished. Not implemented. */ void after_iteration(int iteration, graphchi_context &ginfo) { } /** * Called before an execution interval is started. Not implemented. */ void before_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &ginfo) { } /** * Pagerank update function. */ void update(graphchi_vertex<VertexDataType, EdgeDataType> &v, graphchi_context &ginfo) { float sum=0; if (ginfo.iteration == 0) { /* On first iteration, initialize vertex and out-edges. The initialization is important, because on every run, GraphChi will modify the data in the edges on disk. */ for(int i=0; i < v.num_outedges(); i++) { graphchi_edge<float> * edge = v.outedge(i); edge->set_data(1.0 / v.num_outedges()); } v.set_data(RANDOMRESETPROB); } else { /* Compute the sum of neighbors' weighted pageranks by reading from the in-edges. */ for(int i=0; i < v.num_inedges(); i++) { float val = v.inedge(i)->get_data(); sum += val; } /* Compute my pagerank */ float pagerank = RANDOMRESETPROB + (1 - RANDOMRESETPROB) * sum; /* Write my pagerank divided by the number of out-edges to each of my out-edges. */ if (v.num_outedges() > 0) { float pagerankcont = pagerank / v.num_outedges(); for(int i=0; i < v.num_outedges(); i++) { graphchi_edge<float> * edge = v.outedge(i); edge->set_data(pagerankcont); } } /* Keep track of the progression of the computation. GraphChi engine writes a file filename.deltalog. */ ginfo.log_change(std::abs(pagerank - v.get_data())); /* Set my new pagerank as the vertex value */ v.set_data(pagerank); } } }; int main(int argc, const char ** argv) { graphchi_init(argc, argv); metrics m("pagerank"); /* Parameters */ std::string filename = get_option_string("file"); // Base filename int niters = get_option_int("niters", 4); bool scheduler = false; // Non-dynamic version of pagerank. int ntop = get_option_int("top", 20); /* Process input file - if not already preprocessed */ int nshards = convert_if_notexists<EdgeDataType>(filename, get_option_string("nshards", "auto")); /* Run */ graphchi_engine<float, float> engine(filename, nshards, scheduler, m); engine.set_modifies_inedges(false); // Improves I/O performance. PagerankProgram program; engine.run(program, niters); /* Output top ranked vertices */ std::vector< vertex_value<float> > top = get_top_vertices<float>(filename, ntop); std::cout << "Print top " << ntop << " vertices:" << std::endl; for(int i=0; i < (int)top.size(); i++) { std::cout << (i+1) << ". " << top[i].vertex << "\t" << top[i].value << std::endl; } metrics_report(m); return 0; }
09jijiangwen-download
example_apps/pagerank.cpp
C++
asf20
4,923
/** * @file * @author Aapo Kyrola <akyrola@cs.cmu.edu> * @version 1.0 * * @section LICENSE * * Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * Random walk simulation. From a set of source vertices, a set of * random walks is started. Random walks walk via edges, and we use the * dynamic chivectors to support multiple walks in one edge. Each * vertex keeps track of the walks that pass by it, thus in the end * we have estimate of the "pagerank" of each vertex. * * Note, this version does not support 'resets' of random walks. * TODO: from each vertex, start new random walks with some probability, * and also terminate a walk with some probablity. * */ #define DYNAMICEDATA 1 #include <string> #include "graphchi_basic_includes.hpp" #include "api/dynamicdata/chivector.hpp" #include "util/toplist.hpp" using namespace graphchi; /** * Type definitions. Remember to create suitable graph shards using the * Sharder-program. */ typedef unsigned int VertexDataType; typedef chivector<vid_t> EdgeDataType; struct RandomWalkProgram : public GraphChiProgram<VertexDataType, EdgeDataType> { int walks_per_source() { return 100; } bool is_source(vid_t v) { return (v % 50 == 0); } /** * Vertex update function. */ void update(graphchi_vertex<VertexDataType, EdgeDataType > &vertex, graphchi_context &gcontext) { if (gcontext.iteration == 0) { if (is_source(vertex.id())) { for(int i=0; i < walks_per_source(); i++) { /* Get random out edge's vector */ graphchi_edge<EdgeDataType> * outedge = vertex.random_outedge(); if (outedge != NULL) { chivector<vid_t> * evector = outedge->get_vector(); /* Add a random walk particle, represented by the vertex-id of the source (this vertex) */ evector->add(vertex.id()); gcontext.scheduler->add_task(outedge->vertex_id()); // Schedule destination } } } vertex.set_data(0); } else { /* Check inbound edges for walks and advance them. */ int num_walks = 0; for(int i=0; i < vertex.num_inedges(); i++) { graphchi_edge<EdgeDataType> * edge = vertex.inedge(i); chivector<vid_t> * invector = edge->get_vector(); for(int j=0; j < invector->size(); j++) { /* Get one walk */ vid_t walk = invector->get(j); /* Move to a random out-edge */ graphchi_edge<EdgeDataType> * outedge = vertex.random_outedge(); if (outedge != NULL) { chivector<vid_t> * outvector = outedge->get_vector(); /* Add a random walk particle, represented by the vertex-id of the source (this vertex) */ outvector->add(walk); gcontext.scheduler->add_task(outedge->vertex_id()); // Schedule destination } num_walks ++; } /* Remove all walks from the inbound vector */ invector->clear(); } /* Keep track of the walks passed by via this vertex */ vertex.set_data(vertex.get_data() + num_walks); } } /** * Called before an iteration starts. */ void before_iteration(int iteration, graphchi_context &gcontext) { } /** * Called after an iteration has finished. */ void after_iteration(int iteration, graphchi_context &gcontext) { } /** * Called before an execution interval is started. */ void before_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) { } /** * Called after an execution interval has finished. */ void after_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) { } }; int main(int argc, const char ** argv) { /* GraphChi initialization will read the command line arguments and the configuration file. */ graphchi_init(argc, argv); /* Metrics object for keeping track of performance counters and other information. Currently required. */ metrics m("randomwalk"); /* Basic arguments for application */ std::string filename = get_option_string("file"); // Base filename int niters = get_option_int("niters", 4); // Number of iterations bool scheduler = true; // Whether to use selective scheduling /* Detect the number of shards or preprocess an input to create them */ bool preexisting_shards; int nshards = convert_if_notexists<vid_t>(filename, get_option_string("nshards", "auto"), preexisting_shards); /* Run */ RandomWalkProgram program; graphchi_engine<VertexDataType, EdgeDataType> engine(filename, nshards, scheduler, m); if (preexisting_shards) { engine.reinitialize_edge_data(0); } engine.run(program, niters); /* List top 20 */ int ntop = 20; std::vector< vertex_value<VertexDataType> > top = get_top_vertices<VertexDataType>(filename, ntop); std::cout << "Print top 20 vertices: " << std::endl; for(int i=0; i < (int) top.size(); i++) { std::cout << (i+1) << ". " << top[i].vertex << "\t" << top[i].value << std::endl; } /* Report execution metrics */ metrics_report(m); return 0; }
09jijiangwen-download
example_apps/randomwalks.cpp
C++
asf20
6,284
/** * @file * @author Aapo Kyrola <akyrola@cs.cmu.edu> * @version 1.0 * * @section LICENSE * * Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * Demonstration for streaming graph updates. This application reads from a file * list of edges and adds them into the graph continuously. Simultaneously, pagerank * is computed for the evolving graph. * * This code includes a fair amount of code for demo purposes. To be cleaned * eventually. */ #include <string> #include <fstream> #include <cmath> #define GRAPHCHI_DISABLE_COMPRESSION #include "graphchi_basic_includes.hpp" #include "engine/dynamic_graphs/graphchi_dynamicgraph_engine.hpp" #include "util/toplist.hpp" /* HTTP admin tool */ #include "httpadmin/chi_httpadmin.hpp" #include "httpadmin/plotter.hpp" using namespace graphchi; #define THRESHOLD 1e-1f #define RANDOMRESETPROB 0.15f #define DEMO 1 typedef float VertexDataType; typedef float EdgeDataType; graphchi_dynamicgraph_engine<float, float> * dyngraph_engine; std::string streaming_graph_file; std::string getname(vid_t v); std::string getname(vid_t userid) { #ifdef DEMO // Temporary code for demo purposes! int f = open("/Users/akyrola/graphs/twitter_names.dat", O_RDONLY); if (f < 0) return "n/a"; char s[16]; size_t idx = userid * 16; preada(f, s, 16, idx); close(f); s[15] = '\0'; return std::string(s); #else return "": #endif } struct PagerankProgram : public GraphChiProgram<VertexDataType, EdgeDataType> { /** * Called before an iteration starts. */ void before_iteration(int iteration, graphchi_context &gcontext) { } /** * Called after an iteration has finished. */ void after_iteration(int iteration, graphchi_context &gcontext) { #ifdef DEMO std::vector< vertex_value<float> > top = get_top_vertices<float>(gcontext.filename, 20); for(int i=0; i < (int) top.size(); i++) { vertex_value<float> vv = top[i]; std::cout << (i+1) << ". " << vv.vertex << " " << getname(vv.vertex) << ": " << vv.value << std::endl; } /* Keep top 20 available for http admin */ for(int i=0; i < (int) top.size(); i++) { vertex_value<float> vv = top[i]; std::stringstream ss; ss << "rank" << i; std::stringstream sv; sv << vv.vertex << ":" << getname(vv.vertex) << ":" << vv.value<< ""; dyngraph_engine->set_json(ss.str(), sv.str()); } #endif } /** * Called before an execution interval is started. */ void before_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &ginfo) { #ifdef DEMO update_plotdata(dyngraph_engine); #endif } /** * Pagerank update function. */ void update(graphchi_vertex<VertexDataType, EdgeDataType> &v, graphchi_context &ginfo) { float sum=0; if (ginfo.iteration == 0) { /* On first iteration, initialize vertex */ for(int i=0; i < v.num_outedges(); i++) { graphchi_edge<float> * edge = v.outedge(i); edge->set_data(1.0f / v.num_outedges()); if (ginfo.scheduler != NULL) ginfo.scheduler->add_task(edge->vertex_id()); } v.set_data(RANDOMRESETPROB); /* If using selective scheduling, schedule myself for next iteration */ if (ginfo.scheduler != NULL) ginfo.scheduler->add_task(v.id()); } else { /* Compute the sum of neighbors' weighted pageranks */ for(int i=0; i < v.num_inedges(); i++) { float val = v.inedge(i)->get_data(); sum += val; } /* Compute my pagerank */ float pagerank = RANDOMRESETPROB + (1 - RANDOMRESETPROB) * sum; float oldvalue = v.get_data(); float delta = (float) fabs(oldvalue - pagerank); bool significant_change = (delta >= THRESHOLD); if (v.num_outedges() > 0) { float pagerankcont = pagerank/v.num_outedges(); for(int i=0; i < v.num_outedges(); i++) { graphchi_edge<float> * edge = v.outedge(i); /* If using selective scheduling, and the change was larger than a threshold, add neighbor to schedule. */ if (ginfo.scheduler != NULL) { if (significant_change) { ginfo.scheduler->add_task(edge->vertex_id()); } } edge->set_data(pagerankcont); } } v.set_data(pagerank); /* Keep track of the progression of the computation */ ginfo.log_change(delta); } } }; /* Demo stuff. */ class IntervalTopRequest : public custom_request_handler { public: virtual std::string handle(const char * req) { const char * shardnum_str = &req[strlen("/ajax/shardpagerank")]; int shardnum = atoi(shardnum_str); logstream(LOG_DEBUG) << "Requested shard pagerank: " << shardnum_str << std::endl; if (shardnum >= 0 && shardnum < dyngraph_engine->get_nshards()) { vid_t fromvid = dyngraph_engine->get_interval_start(shardnum); vid_t tovid = dyngraph_engine->get_interval_end(shardnum); std::vector< vertex_value<float> > top = get_top_vertices<float>(dyngraph_engine->get_context().filename, 10, fromvid, tovid + 1); std::stringstream ss; ss << "{"; for(int i=0; i < (int) top.size(); i++) { vertex_value<float> vv = top[i]; if (i > 0) ss << ","; ss << "\"rank" << i << "\": \"" << vv.vertex << ":" << getname(vv.vertex) << ":" << vv.value<< "\""; } ss << "}"; std::cout << ss.str(); return ss.str(); } return "error"; } virtual bool responds_to(const char * req) { return (strncmp(req, "/ajax/shardpagerank", 19) == 0); } }; bool running = true; void * plotter_thread(void * info); void * plotter_thread(void * info) { usleep(1000000 * 10); init_plots(dyngraph_engine); while(running) { /* Update plots */ drawplots(); usleep(1000000 * 10); } return NULL; } /** * Function executed by a separate thread that streams * graph from a file. */ void * dynamic_graph_reader(void * info); void * dynamic_graph_reader(void * info) { std::cout << "Start sleeping..." << std::endl; usleep(50000); std::cout << "End sleeping..." << std::endl; int edges_per_sec = get_option_int("edges_per_sec", 100000); logstream(LOG_INFO) << "Going to stream from: " << streaming_graph_file << std::endl; FILE * f = fopen(streaming_graph_file.c_str(), "r"); if (f == NULL) { logstream(LOG_ERROR) << "Could not open file for streaming: " << streaming_graph_file << " error: " << strerror(errno) << std::endl; } assert(f != NULL); logstream(LOG_INFO) << "Streaming speed capped at: " << edges_per_sec << " edges/sec." << std::endl; size_t c = 0; size_t ingested = 0; // Used for flow control timeval last, now; gettimeofday(&last, NULL); vid_t from; vid_t to; char s[1024]; while(fgets(s, 1024, f) != NULL) { FIXLINE(s); /* Read next line */ char delims[] = "\t "; char * t; t = strtok(s, delims); from = atoi(t); t = strtok(NULL, delims); to = atoi(t); if (from == to) { // logstream(LOG_WARNING) << "Self-edge in stream: " << from << " <-> " << to << std::endl; continue; } bool success=false; while (!success) { success = dyngraph_engine->add_edge(from, to, 0.0f); } dyngraph_engine->add_task(from); ingested++; if (++c % edges_per_sec == 0) { std::cout << "Stream speed check...." << std::endl; double sincelast; double speed; // Throttling - keeps average speed of edges/sec in control do { gettimeofday(&now, NULL); sincelast = now.tv_sec-last.tv_sec+ ((double)(now.tv_usec-last.tv_usec))/1.0E6; usleep(20000); speed = (c / sincelast); } while (speed > edges_per_sec); dyngraph_engine->set_json("ingestspeed", speed); logstream(LOG_INFO) << "Stream speed check ended.... Speed now:" << speed << " edges/sec" << std::endl; dyngraph_engine->set_json("ingestedges", ingested); } if (c % 1000 == 0) { set_ingested_edges(ingested); } } fclose(f); dyngraph_engine->finish_after_iters(10); return NULL; } int main(int argc, const char ** argv) { graphchi_init(argc, argv); metrics m("streaming-pagerank"); /* Parameters */ std::string filename = get_option_string("file"); // Base filename int niters = 100000; // End of computation to be determined programmatically // Pagerank can be run with or without selective scheduling bool scheduler = true; int ntop = get_option_int("top", 20); /* Process input file (the base graph) - if not already preprocessed */ int nshards = convert_if_notexists<EdgeDataType>(filename, get_option_string("nshards", "auto")); /* Streaming input graph - must be in edge-list format */ streaming_graph_file = get_option_string_interactive("streaming_graph_file", "Pathname to graph file to stream edges from"); /* Create the engine object */ dyngraph_engine = new graphchi_dynamicgraph_engine<float, float>(filename, nshards, scheduler, m); dyngraph_engine->set_modifies_inedges(false); // Improves I/O performance. /* Start streaming thread */ pthread_t strthread; int ret = pthread_create(&strthread, NULL, dynamic_graph_reader, NULL); assert(ret>=0); /* Start HTTP admin */ start_httpadmin< graphchi_dynamicgraph_engine<float, float> >(dyngraph_engine); register_http_request_handler(new IntervalTopRequest()); pthread_t plotterthr; ret = pthread_create(&plotterthr, NULL, plotter_thread, NULL); assert(ret>=0); /* Run the engine */ PagerankProgram program; dyngraph_engine->run(program, niters); running = false; /* Output top ranked vertices */ std::vector< vertex_value<float> > top = get_top_vertices<float>(filename, ntop); std::cout << "Print top " << ntop << " vertices:" << std::endl; for(int i=0; i < (int)top.size(); i++) { std::cout << (i+1) << ". " << top[i].vertex << "\t" << top[i].value << std::endl; } metrics_report(m); return 0; }
09jijiangwen-download
example_apps/streaming_pagerank.cpp
C++
asf20
12,046
/** * @file * @author Aapo Kyrola <akyrola@cs.cmu.edu> * @version 1.0 * * @section LICENSE * * Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * Program for running ALS-matrix factorizatino toolkit from * GraphLab. This is an example of GraphLab v2.1 programs development * for GraphChi. */ #include <string> #include <algorithm> #include "../matrixmarket/mmio.h" #include "../matrixmarket/mmio.c" #include "graphchi_basic_includes.hpp" #include "api/graphlab2_1_GAS_api/graphlab.hpp" #include "als_vertex_program.hpp" using namespace graphchi; using namespace graphlab; // Forward declaration int convert_matrixmarket_for_ALS_graphlab(std::string filename); size_t vertex_data::NLATENT = 5; double als_vertex_program::TOLERANCE = 1e-3; double als_vertex_program::LAMBDA = 0.01; size_t als_vertex_program::MAX_UPDATES = -1; int main(int argc, const char ** argv) { /* GraphChi initialization will read the command line arguments and the configuration file. */ graphchi_init(argc, argv); /* Metrics object for keeping track of performance counters and other information. Currently required. */ metrics m("als-graphlab"); /* Basic arguments for application. NOTE: File will be automatically 'sharded'. */ std::string filename = get_option_string("file"); // Base filename int niters = get_option_int("niters", 4); // Number of iterations /* Preprocess data if needed, or discover preprocess files */ int nshards = convert_matrixmarket_for_ALS_graphlab(filename); /* Run */ std::vector<vertex_data> * vertices = run_graphlab_vertexprogram<als_vertex_program>(filename, nshards, niters, false, m, false, false); /* Error aggregation */ error_aggregator final_error = run_graphlab_edge_aggregator<als_vertex_program, error_aggregator>(filename, nshards, error_aggregator::map, error_aggregator::finalize, vertices, m); std::cout << "Final train error: " << final_error.train_error << std::endl; /* TODO: write output latent matrices */ delete vertices; /* Report execution metrics */ metrics_report(m); return 0; } /** * Create a bipartite graph from a matrix. Each row corresponds to vertex * with the same id as the row number (0-based), but vertices correponsing to columns * have id + num-rows. */ int convert_matrixmarket_for_ALS_graphlab(std::string base_filename) { // Note, code based on: http://math.nist.gov/MatrixMarket/mmio/c/example_read.c int ret_code; MM_typecode matcode; FILE *f; int M, N, nz; /** * Create sharder object */ int nshards; if ((nshards = find_shards<edge_data>(base_filename, get_option_string("nshards", "auto")))) { logstream(LOG_INFO) << "File " << base_filename << " was already preprocessed, won't do it again. " << std::endl; logstream(LOG_INFO) << "If this is not intended, please delete the shard files and try again. " << std::endl; return nshards; } sharder<edge_data> sharderobj(base_filename); sharderobj.start_preprocessing(); if ((f = fopen(base_filename.c_str(), "r")) == NULL) { logstream(LOG_ERROR) << "Could not open file: " << base_filename << ", error: " << strerror(errno) << std::endl; exit(1); } if (mm_read_banner(f, &matcode) != 0) { logstream(LOG_ERROR) << "Could not process Matrix Market banner. File: " << base_filename << std::endl; logstream(LOG_ERROR) << "Matrix must be in the Matrix Market format. " << std::endl; exit(1); } /* This is how one can screen matrix types if their application */ /* only supports a subset of the Matrix Market data types. */ if (mm_is_complex(matcode) || !mm_is_sparse(matcode)) { logstream(LOG_ERROR) << "Sorry, this application does not support complex values and requires a sparse matrix." << std::endl; logstream(LOG_ERROR) << "Market Market type: " << mm_typecode_to_str(matcode) << std::endl; exit(1); } /* find out size of sparse matrix .... */ if ((ret_code = mm_read_mtx_crd_size(f, &M, &N, &nz)) !=0) { logstream(LOG_ERROR) << "Failed reading matrix size: error=" << ret_code << std::endl; exit(1); } logstream(LOG_INFO) << "Starting to read matrix-market input. Matrix dimensions: " << M << " x " << N << ", non-zeros: " << nz << std::endl; if (M < 5 || N < 5 || nz < 10) { logstream(LOG_ERROR) << "File is suspiciously small. Something wrong? File: " << base_filename << std::endl; assert(M < 5 || N < 5 || nz < 10); } if (!sharderobj.preprocessed_file_exists()) { for (int i=0; i<nz; i++) { int I, J; double val; fscanf(f, "%d %d %lg\n", &I, &J, &val); I--; /* adjust from 1-based to 0-based */ J--; sharderobj.preprocessing_add_edge(I, M + J, edge_data(val, edge_data::TRAIN)); } sharderobj.end_preprocessing(); } else { logstream(LOG_INFO) << "Matrix already preprocessed, just run sharder." << std::endl; } if (f !=stdin) fclose(f); logstream(LOG_INFO) << "Now creating shards." << std::endl; // Shard with a specified number of shards, or determine automatically if not defined nshards = sharderobj.execute_sharding(get_option_string("nshards", "auto")); return nshards; }
09jijiangwen-download
example_apps/matrix_factorization/graphlab_gas/als_graphlab.cpp
C++
asf20
6,296
/** * Copyright (c) 2009 Carnegie Mellon University. * All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an "AS * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language * governing permissions and limitations under the License. * * For more about this software visit: * * http://www.graphlab.ml.cmu.edu * */ #ifndef ALS_VERTEX_PROGRAM_HPP #define ALS_VERTEX_PROGRAM_HPP /** * \file * \ingroup toolkit_matrix_factorization * * \brief This file describes the vertex program for the alternating * least squares (ALS) matrix factorization algorithm. See * \ref als_vertex_program for description of the ALS Algorithm. */ #include <Eigen/Dense> //#include <graphlab.hpp> //#include "eigen_serialization.hpp" typedef Eigen::VectorXd vec_type; typedef Eigen::MatrixXd mat_type; /** * \ingroup toolkit_matrix_factorization * * \brief the vertex data type which contains the latent factor. * * Each row and each column in the matrix corresponds to a different * vertex in the ALS graph. Associated with each vertex is a factor * (vector) of latent parameters that represent that vertex. The goal * of the ALS algorithm is to find the values for these latent * parameters such that the non-zero entries in the matrix can be * predicted by taking the dot product of the row and column factors. */ struct vertex_data { /** * \brief A shared "constant" that specifies the number of latent * values to use. */ static size_t NLATENT; /** \brief The number of times this vertex has been updated. */ uint32_t nupdates; /** \brief The most recent L1 change in the factor value */ float residual; //! how much the latent value has changed /** \brief The latent factor for this vertex */ vec_type factor; /** * \brief Simple default constructor which randomizes the vertex * data */ vertex_data() : nupdates(0), residual(1) { randomize(); } /** \brief Randomizes the latent factor */ void randomize() { factor.resize(NLATENT); factor.setRandom(); } /** \brief Save the vertex data to a binary archive */ //void save(graphlab::oarchive& arc) const { // arc << nupdates << residual << factor; //} /** \brief Load the vertex data from a binary archive */ //void load(graphlab::iarchive& arc) { // arc >> nupdates >> residual >> factor; //} }; // end of vertex data /** * \brief The edge data stores the entry in the matrix. * * In addition the edge data also stores the most recent error estimate. */ struct edge_data : public graphlab::IS_POD_TYPE { /** * \brief The type of data on the edge; * * \li *Train:* the observed value is correct and used in training * \li *Validate:* the observed value is correct but not used in training * \li *Predict:* The observed value is not correct and should not be * used in training. */ enum data_role_type { TRAIN, VALIDATE, PREDICT }; /** \brief the observed value for the edge */ float obs; /** \brief The train/validation/test designation of the edge */ data_role_type role; /** \brief basic initialization */ edge_data(float obs = 0, data_role_type role = PREDICT) : obs(obs), role(role) { } }; // end of edge data /** * \brief The graph type is defined in terms of the vertex and edge * data. */ typedef graphlab::distributed_graph<vertex_data, edge_data> graph_type; /** * \brief Given a vertex and an edge return the other vertex in the * edge. */ inline graph_type::vertex_type get_other_vertex(graph_type::edge_type& edge, const graph_type::vertex_type& vertex) { return vertex.id() == edge.source().id()? edge.target() : edge.source(); }; // end of get_other_vertex /** * \brief Given an edge compute the error associated with that edge */ double extract_l2_error(const graph_type::edge_type & edge) { const double pred = edge.source().data().factor.dot(edge.target().data().factor); return (edge.data().obs - pred) * (edge.data().obs - pred); } // end of extract_l2_error /** * \brief The graph loader function is a line parser used for * distributed graph construction. */ // Commented out for graphchi /* inline bool graph_loader(graph_type& graph, const std::string& filename, const std::string& line) { ASSERT_FALSE(line.empty()); // Determine the role of the data edge_data::data_role_type role = edge_data::TRAIN; if(boost::ends_with(filename,".validate")) role = edge_data::VALIDATE; else if(boost::ends_with(filename, ".predict")) role = edge_data::PREDICT; // Parse the line std::stringstream strm(line); graph_type::vertex_id_type source_id(-1), target_id(-1); float obs(0); strm >> source_id >> target_id; if(role == edge_data::TRAIN || role == edge_data::VALIDATE) strm >> obs; // Create an edge and add it to the graph graph.add_edge(source_id, target_id+1000000, edge_data(obs, role)); return true; // successful load } // end of graph_loader */ /** * \brief The gather type used to construct XtX and Xty needed for the ALS * update * * To compute the ALS update we need to compute the sum of * \code * sum: XtX = nbr.factor.transpose() * nbr.factor * sum: Xy = nbr.factor * edge.obs * \endcode * For each of the neighbors of a vertex. * * To do this in the Gather-Apply-Scatter model the gather function * computes and returns a pair consisting of XtX and Xy which are then * added. The gather type represents that tuple and provides the * necessary gather_type::operator+= operation. * */ class gather_type { public: /** * \brief Stores the current sum of nbr.factor.transpose() * * nbr.factor */ mat_type XtX; /** * \brief Stores the current sum of nbr.factor * edge.obs */ vec_type Xy; /** \brief basic default constructor */ gather_type() { } /** * \brief This constructor computes XtX and Xy and stores the result * in XtX and Xy */ gather_type(const vec_type& X, const double y) : XtX(X.size(), X.size()), Xy(X.size()) { XtX.triangularView<Eigen::Upper>() = X * X.transpose(); Xy = X * y; } // end of constructor for gather type /** \brief Save the values to a binary archive */ // void save(graphlab::oarchive& arc) const { arc << XtX << Xy; } /** \brief Read the values from a binary archive */ // void load(graphlab::iarchive& arc) { arc >> XtX >> Xy; } /** * \brief Computes XtX += other.XtX and Xy += other.Xy updating this * tuples value */ gather_type& operator+=(const gather_type& other) { if(other.Xy.size() == 0) { ASSERT_EQ(other.XtX.rows(), 0); ASSERT_EQ(other.XtX.cols(), 0); } else { if(Xy.size() == 0) { ASSERT_EQ(XtX.rows(), 0); ASSERT_EQ(XtX.cols(), 0); XtX = other.XtX; Xy = other.Xy; } else { XtX.triangularView<Eigen::Upper>() += other.XtX; Xy += other.Xy; } } return *this; } // end of operator+= }; // end of gather type /** * ALS vertex program type */ class als_vertex_program : public graphlab::ivertex_program<graph_type, gather_type, graphlab::messages::sum_priority>, public graphlab::IS_POD_TYPE { public: /** The convergence tolerance */ static double TOLERANCE; static double LAMBDA; static size_t MAX_UPDATES; /** The set of edges to gather along */ edge_dir_type gather_edges(icontext_type& context, const vertex_type& vertex) const { return graphlab::ALL_EDGES; }; // end of gather_edges /** The gather function computes XtX and Xy */ gather_type gather(icontext_type& context, const vertex_type& vertex, edge_type& edge) const { if(edge.data().role == edge_data::TRAIN) { const vertex_type other_vertex = get_other_vertex(edge, vertex); return gather_type(other_vertex.data().factor, edge.data().obs); } else return gather_type(); } // end of gather function /** apply collects the sum of XtX and Xy */ void apply(icontext_type& context, vertex_type& vertex, const gather_type& sum) { // Get and reset the vertex data vertex_data& vdata = vertex.data(); // Determine the number of neighbors. Each vertex has only in or // out edges depending on which side of the graph it is located if(sum.Xy.size() == 0) { vdata.residual = 0; ++vdata.nupdates; return; } mat_type XtX = sum.XtX; vec_type Xy = sum.Xy; // Add regularization for(int i = 0; i < XtX.rows(); ++i) XtX(i,i) += LAMBDA; // /nneighbors; // Solve the least squares problem using eigen ---------------------------- const vec_type old_factor = vdata.factor; vdata.factor = XtX.selfadjointView<Eigen::Upper>().ldlt().solve(Xy); // Compute the residual change in the factor factor ----------------------- vdata.residual = (vdata.factor - old_factor).cwiseAbs().sum() / XtX.rows(); ++vdata.nupdates; } // end of apply /** The edges to scatter along */ edge_dir_type scatter_edges(icontext_type& context, const vertex_type& vertex) const { return graphlab::ALL_EDGES; }; // end of scatter edges /** Scatter reschedules neighbors */ void scatter(icontext_type& context, const vertex_type& vertex, edge_type& edge) const { /* edge_data& edata = edge.data(); if(edata.role == edge_data::TRAIN) { const vertex_type other_vertex = get_other_vertex(edge, vertex); const vertex_data& vdata = vertex.data(); const vertex_data& other_vdata = other_vertex.data(); const double pred = vdata.factor.dot(other_vdata.factor); const float error = std::fabs(edata.obs - pred); const double priority = (error * vdata.residual); // Reschedule neighbors ------------------------------------------------ if( priority > TOLERANCE && other_vdata.nupdates < MAX_UPDATES) context.signal(other_vertex, priority); }*/ } // end of scatter function /** * \brief Signal all vertices on one side of the bipartite graph */ static graphlab::empty signal_left(icontext_type& context, vertex_type& vertex) { if(vertex.num_out_edges() > 0) context.signal(vertex); return graphlab::empty(); } // end of signal_left }; // end of als vertex program struct error_aggregator : public graphlab::IS_POD_TYPE { typedef als_vertex_program::icontext_type icontext_type; typedef graph_type::edge_type edge_type; double train_error, validation_error; size_t ntrain, nvalidation; error_aggregator() : train_error(0), validation_error(0), ntrain(0), nvalidation(0) { } error_aggregator& operator+=(const error_aggregator& other) { train_error += other.train_error; validation_error += other.validation_error; ntrain += other.ntrain; nvalidation += other.nvalidation; return *this; } static error_aggregator map(icontext_type& context, const graph_type::edge_type& edge) { error_aggregator agg; if(edge.data().role == edge_data::TRAIN) { agg.train_error = extract_l2_error(edge); agg.ntrain = 1; } else if(edge.data().role == edge_data::VALIDATE) { agg.validation_error = extract_l2_error(edge); agg.nvalidation = 1; } return agg; } static void finalize(icontext_type& context, error_aggregator& agg) { ASSERT_GT(agg.ntrain, 0); agg.train_error = std::sqrt(agg.train_error / agg.ntrain); context.cout() << context.elapsed_seconds() << "\t" << agg.train_error; if(agg.nvalidation > 0) { const double validation_error = std::sqrt(agg.validation_error / agg.nvalidation); context.cout() << "\t" << validation_error; } context.cout() << std::endl; } }; // end of error aggregator struct prediction_saver { typedef graph_type::vertex_type vertex_type; typedef graph_type::edge_type edge_type; std::string save_vertex(const vertex_type& vertex) const { return ""; //nop } std::string save_edge(const edge_type& edge) const { std::stringstream strm; const double prediction = edge.source().data().factor.dot(edge.target().data().factor); strm << edge.source().id() << '\t' << edge.target().id() << '\t' << prediction << '\n'; return strm.str(); } }; // end of prediction_saver #endif
09jijiangwen-download
example_apps/matrix_factorization/graphlab_gas/als_vertex_program.hpp
C++
asf20
12,821
/** * @file * @author Aapo Kyrola <akyrola@cs.cmu.edu> * @version 1.0 * * @section LICENSE * * Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * Matrix factorization with the Alternative Least Squares (ALS) algorithm. * This code is based on GraphLab's implementation of ALS by Joey Gonzalez * and Danny Bickson (CMU). A good explanation of the algorithm is * given in the following paper: * Large-Scale Parallel Collaborative Filtering for the Netflix Prize * Yunhong Zhou, Dennis Wilkinson, Robert Schreiber and Rong Pan * http://www.springerlink.com/content/j1076u0h14586183/ * * Faster version of ALS, which stores latent factors of vertices in-memory. * Thus, this version requires more memory. See the version "als_edgefactors" * for a low-memory implementation. * * * In the code, we use movie-rating terminology for clarity. This code has been * tested with the Netflix movie rating challenge, where the task is to predict * how user rates movies in range from 1 to 5. * * This code is has integrated preprocessing, 'sharding', so it is not necessary * to run sharder prior to running the matrix factorization algorithm. Input * data must be provided in the Matrix Market format (http://math.nist.gov/MatrixMarket/formats.html). * * ALS uses free linear algebra library 'Eigen'. See Readme_Eigen.txt for instructions * how to obtain it. * * At the end of the processing, the two latent factor matrices are written into files in * the matrix market format. * * @section USAGE * * bin/example_apps/matrix_factorization/als_edgefactors file <matrix-market-input> niters 5 * * */ #define GRAPHCHI_DISABLE_COMPRESSION #include <string> #include <algorithm> #include "graphchi_basic_includes.hpp" /* ALS-related classes are contained in als.hpp */ #include "als.hpp" using namespace graphchi; /** * Type definitions. Remember to create suitable graph shards using the * Sharder-program. */ typedef latentvec_t VertexDataType; typedef float EdgeDataType; // Edges store the "rating" of user->movie pair /** * GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type> * class. The main logic is usually in the update function. */ struct ALSVerticesInMemProgram : public GraphChiProgram<VertexDataType, EdgeDataType> { mutex lock; std::vector<latentvec_t> latent_factors_inmem; // Helper virtual void set_latent_factor(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, latentvec_t &fact) { vertex.set_data(fact); // Note, also stored on disk. This is non-optimal... latent_factors_inmem[vertex.id()] = fact; } /** * Called before an iteration starts. */ void before_iteration(int iteration, graphchi_context &gcontext) { if (iteration == 0) { latent_factors_inmem.resize(gcontext.nvertices); // Initialize in-memory vertices. } } /** * Vertex update function. */ void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) { if (gcontext.iteration == 0) { /* On first iteration, initialize vertex (and its edges). This is usually required, because on each run, GraphChi will modify the data files. To start from scratch, it is easiest do initialize the program in code. Alternatively, you can keep a copy of initial data files. */ latentvec_t latentfac; latentfac.init(); set_latent_factor(vertex, latentfac); } else { mat XtX(NLATENT, NLATENT); XtX.setZero(); vec Xty(NLATENT); Xty.setZero(); // Compute XtX and Xty (NOTE: unweighted) for(int e=0; e < vertex.num_edges(); e++) { float observation = vertex.edge(e)->get_data(); latentvec_t & nbr_latent = latent_factors_inmem[vertex.edge(e)->vertex_id()]; for(int i=0; i<NLATENT; i++) { Xty(i) += nbr_latent[i] * observation; for(int j=i; j < NLATENT; j++) { XtX(j,i) += nbr_latent[i] * nbr_latent[j]; } } } // Symmetrize for(int i=0; i <NLATENT; i++) for(int j=i + 1; j< NLATENT; j++) XtX(i,j) = XtX(j,i); // Diagonal for(int i=0; i < NLATENT; i++) XtX(i,i) += (LAMBDA) * vertex.num_edges(); // Solve the least squares problem with eigen using Cholesky decomposition vec veclatent = XtX.ldlt().solve(Xty); // Convert to plain doubles (this is useful because now the output data by GraphCHI // is plain binary double matrix that can be read, for example, by Matlab). latentvec_t newlatent; for(int i=0; i < NLATENT; i++) newlatent[i] = veclatent[i]; double sqerror = 0; bool compute_rmse = (gcontext.iteration == gcontext.num_iterations-1 && vertex.num_outedges() == 0); if (compute_rmse) { // Compute RMSE only on "right side" of bipartite graph for(int e=0; e < vertex.num_edges(); e++) { // Compute RMSE float observation = vertex.edge(e)->get_data(); latentvec_t & nbr_latent = latent_factors_inmem[vertex.edge(e)->vertex_id()]; double prediction = nbr_latent.dot(newlatent); sqerror += (prediction - observation) * (prediction - observation); } rmselock.lock(); rmse += sqerror; rmselock.unlock(); if (vertex.id() % 5000 == 1) { logstream(LOG_DEBUG) << "Computed RMSE for : " << vertex.id() << std::endl; } } set_latent_factor(vertex, newlatent); if (vertex.id() % 100000 == 1) { std::cout << gcontext.iteration << ": " << vertex.id() << std::endl; } } /* Hack: we need to count ourselves the number of vertices on left and right side of the bipartite graph. TODO: maybe there should be specialized support for bipartite graphs in GraphChi? */ if (vertex.num_outedges() > 0) { // Left side on the bipartite graph if (vertex.id() > max_left_vertex) { lock.lock(); max_left_vertex = std::max(vertex.id(), max_left_vertex); lock.unlock(); } } else { if (vertex.id() > max_right_vertex) { lock.lock(); max_right_vertex = std::max(vertex.id(), max_right_vertex); lock.unlock(); } } } /** * Called after an iteration has finished. */ void after_iteration(int iteration, graphchi_context &gcontext) { } /** * Called before an execution interval is started. */ void before_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) { } /** * Called after an execution interval has finished. */ void after_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) { } }; int main(int argc, const char ** argv) { /* GraphChi initialization will read the command line arguments and the configuration file. */ graphchi_init(argc, argv); /* Metrics object for keeping track of performance counters and other information. Currently required. */ metrics m("als-inmemory-factors"); /* Basic arguments for application. NOTE: File will be automatically 'sharded'. */ std::string filename = get_option_string("file"); // Base filename int niters = get_option_int("niters", 6); // Number of iterations bool scheduler = false; // Selective scheduling not supported for now. /* Preprocess data if needed, or discover preprocess files */ int nshards = convert_matrixmarket_for_ALS<float>(filename); /* Run */ ALSVerticesInMemProgram program; graphchi_engine<VertexDataType, EdgeDataType> engine(filename, nshards, scheduler, m); engine.set_modifies_inedges(false); engine.set_modifies_outedges(false); engine.set_enable_deterministic_parallelism(false); engine.run(program, niters); /* Report result (train RMSE) */ double trainRMSE = sqrt(rmse / (1.0 * engine.num_edges())); m.set("train_rmse", trainRMSE); m.set("latent_dimension", NLATENT); std::cout << "Latent factor dimension: " << NLATENT << " - train RMSE: " << trainRMSE << std::endl; /* Output latent factor matrices in matrix-market format */ vid_t numvertices = engine.num_vertices(); assert(numvertices == max_right_vertex + 1); // Sanity check output_als_result(filename, numvertices, max_left_vertex); /* Report execution metrics */ metrics_report(m); return 0; }
09jijiangwen-download
example_apps/matrix_factorization/als_vertices_inmem.cpp
C++
asf20
9,976
/** * @file * @author Aapo Kyrola <akyrola@cs.cmu.edu> * @version 1.0 * * @section LICENSE * * Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * Matrix factorization with the Alternative Least Squares (ALS) algorithm. * This code is based on GraphLab's implementation of ALS by Joey Gonzalez * and Danny Bickson (CMU). A good explanation of the algorithm is * given in the following paper: * Large-Scale Parallel Collaborative Filtering for the Netflix Prize * Yunhong Zhou, Dennis Wilkinson, Robert Schreiber and Rong Pan * http://www.springerlink.com/content/j1076u0h14586183/ * * There are two versions of the ALS in the example applications. This version * is slower, but works with very low memory. In this implementation, a vertex * writes its D-dimensional latent factor to its incident edges. See application * "als_vertices_inmem" for a faster version, which requires more memory. * * In the code, we use movie-rating terminology for clarity. This code has been * tested with the Netflix movie rating challenge, where the task is to predict * how user rates movies in range from 1 to 5. * * This code is has integrated preprocessing, 'sharding', so it is not necessary * to run sharder prior to running the matrix factorization algorithm. Input * data must be provided in the Matrix Market format (http://math.nist.gov/MatrixMarket/formats.html). * * ALS uses free linear algebra library 'Eigen'. See Readme_Eigen.txt for instructions * how to obtain it. * * At the end of the processing, the two latent factor matrices are written into files in * the matrix market format. * * @section USAGE * * bin/example_apps/matrix_factorization/als_edgefactors file <matrix-market-input> niters 5 * * */ #include <string> #include <algorithm> #include "graphchi_basic_includes.hpp" /* ALS-related classes are contained in als.hpp */ #include "als.hpp" using namespace graphchi; /** * Type definitions. Remember to create suitable graph shards using the * Sharder-program. */ typedef latentvec_t VertexDataType; typedef als_factor_and_weight EdgeDataType; // Edges store the "rating" of user->movie pair // and the latent factor of their incident vertex. /** * GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type> * class. The main logic is usually in the update function. */ struct ALSEdgeFactorsProgram : public GraphChiProgram<VertexDataType, EdgeDataType> { mutex lock; // Helper virtual void set_latent_factor(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, latentvec_t &fact) { vertex.set_data(fact); for(int i=0; i < vertex.num_edges(); i++) { als_factor_and_weight factwght = vertex.edge(i)->get_data(); factwght.factor = fact; vertex.edge(i)->set_data(factwght); // Note that neighbors override the values they have written to edges. // This is ok, because vertices are always executed in same order. } } /** * Vertex update function. */ void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) { if (gcontext.iteration == 0) { /* On first iteration, initialize vertex (and its edges). This is usually required, because on each run, GraphChi will modify the data files. To start from scratch, it is easiest do initialize the program in code. Alternatively, you can keep a copy of initial data files. */ latentvec_t latentfac; latentfac.init(); set_latent_factor(vertex, latentfac); } else { mat XtX(NLATENT, NLATENT); XtX.setZero(); vec Xty(NLATENT); Xty.setZero(); // Compute XtX and Xty (NOTE: unweighted) for(int e=0; e < vertex.num_edges(); e++) { float observation = vertex.edge(e)->get_data().weight; latentvec_t nbr_latent = vertex.edge(e)->get_data().factor; for(int i=0; i<NLATENT; i++) { Xty(i) += nbr_latent[i] * observation; for(int j=i; j < NLATENT; j++) { XtX(j,i) += nbr_latent[i] * nbr_latent[j]; } } } // Symmetrize for(int i=0; i <NLATENT; i++) for(int j=i + 1; j< NLATENT; j++) XtX(i,j) = XtX(j,i); // Diagonal for(int i=0; i < NLATENT; i++) XtX(i,i) += (LAMBDA) * vertex.num_edges(); // Solve the least squares problem with eigen using Cholesky decomposition vec veclatent = XtX.ldlt().solve(Xty); // Convert to plain doubles (this is useful because now the output data by GraphCHI // is plain binary double matrix that can be read, for example, by Matlab). latentvec_t newlatent; for(int i=0; i < NLATENT; i++) newlatent[i] = veclatent[i]; double sqerror = 0; bool compute_rmse = (gcontext.iteration == gcontext.num_iterations-1 && vertex.num_outedges() == 0); if (compute_rmse) { // Compute RMSE only on "right side" of bipartite graph for(int e=0; e < vertex.num_edges(); e++) { // Compute RMSE float observation = vertex.edge(e)->get_data().weight; latentvec_t nbr_latent = vertex.edge(e)->get_data().factor; double prediction = nbr_latent.dot(newlatent); sqerror += (prediction - observation) * (prediction - observation); } rmselock.lock(); rmse += sqerror; rmselock.unlock(); if (vertex.id() % 5000 == 1) { logstream(LOG_DEBUG) << "Computed RMSE for : " << vertex.id() << std::endl; } } set_latent_factor(vertex, newlatent); if (vertex.id() % 100000 == 1) { std::cout << gcontext.iteration << ": " << vertex.id() << std::endl; } } /* Hack: we need to count ourselves the number of vertices on left and right side of the bipartite graph. TODO: maybe there should be specialized support for bipartite graphs in GraphChi? */ if (vertex.num_outedges() > 0) { // Left side on the bipartite graph if (vertex.id() > max_left_vertex) { lock.lock(); max_left_vertex = std::max(vertex.id(), max_left_vertex); lock.unlock(); } } else { if (vertex.id() > max_right_vertex) { lock.lock(); max_right_vertex = std::max(vertex.id(), max_right_vertex); lock.unlock(); } } } /** * Called before an iteration starts. */ void before_iteration(int iteration, graphchi_context &gcontext) { } /** * Called after an iteration has finished. */ void after_iteration(int iteration, graphchi_context &gcontext) { } /** * Called before an execution interval is started. */ void before_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) { } /** * Called after an execution interval has finished. */ void after_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) { } }; int main(int argc, const char ** argv) { /* GraphChi initialization will read the command line arguments and the configuration file. */ graphchi_init(argc, argv); /* Metrics object for keeping track of performance counters and other information. Currently required. */ metrics m("als-edgefactors"); /* Basic arguments for application. NOTE: File will be automatically 'sharded'. */ std::string filename = get_option_string("file"); // Base filename int niters = get_option_int("niters", 6); // Number of iterations bool scheduler = false; // Selective scheduling not supported for now. /* Preprocess data if needed, or discover preprocess files */ int nshards = convert_matrixmarket_for_ALS<als_factor_and_weight>(filename); /* Run */ ALSEdgeFactorsProgram program; graphchi_engine<VertexDataType, EdgeDataType> engine(filename, nshards, scheduler, m); engine.set_enable_deterministic_parallelism(false); engine.run(program, niters); /* Report result (train RMSE) */ double trainRMSE = sqrt(rmse / (1.0 * engine.num_edges())); m.set("train_rmse", trainRMSE); m.set("latent_dimension", NLATENT); std::cout << "Latent factor dimension: " << NLATENT << " - train RMSE: " << trainRMSE << std::endl; /* Output latent factor matrices in matrix-market format */ vid_t numvertices = engine.num_vertices(); assert(numvertices == max_right_vertex + 1); // Sanity check output_als_result(filename, numvertices, max_left_vertex); /* Report execution metrics */ metrics_report(m); return 0; }
09jijiangwen-download
example_apps/matrix_factorization/als_edgefactors.cpp
C++
asf20
10,149
/** * @file * @author Aapo Kyrola <akyrola@cs.cmu.edu> * @version 1.0 * * @section LICENSE * * Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * Common code for ALS implementations. */ #ifndef DEF_ALSHPP #define DEF_ALSHPP #include <assert.h> #include <cmath> #include <errno.h> #include <string> #include <stdint.h> #include "matrixmarket/mmio.h" #include "matrixmarket/mmio.c" #include "api/chifilenames.hpp" #include "api/vertex_aggregator.hpp" #include "preprocessing/sharder.hpp" // See note above about Eigen #include "Eigen/Dense" #define EIGEN_YES_I_KNOW_SPARSE_MODULE_IS_NOT_STABLE_YET #include "Eigen/Sparse" #include "Eigen/Cholesky" #include "Eigen/Eigenvalues" #include "Eigen/SVD" using namespace Eigen; typedef MatrixXd mat; typedef VectorXd vec; typedef VectorXi ivec; typedef MatrixXi imat; typedef SparseVector<double> sparse_vec; using namespace graphchi; #ifndef NLATENT #define NLATENT 5 // Dimension of the latent factors. You can specify this in compile time as well (in make). #endif double LAMBDA = 0.065; /// RMSE computation double rmse=0.0; mutex rmselock; // Hackish: we need to count the number of left // and right vertices in the bipartite graph ourselves. vid_t max_left_vertex =0 ; vid_t max_right_vertex = 0; struct latentvec_t { double d[NLATENT]; latentvec_t() { } void init() { for(int k=0; k < NLATENT; k++) d[k] = 0.001 * (std::rand() % 1000); } double & operator[] (int idx) { return d[idx]; } bool operator!=(const latentvec_t &oth) const { for(int i=0; i<NLATENT; i++) { if (d[i] != oth.d[i]) return true; } return false; } double dot(latentvec_t &oth) const { double x=0; for(int i=0; i<NLATENT; i++) x+= oth.d[i]*d[i]; return x; } }; struct als_factor_and_weight { latentvec_t factor; float weight; als_factor_and_weight() {} als_factor_and_weight(float obs) { weight = obs; factor.init(); } }; /** * Create a bipartite graph from a matrix. Each row corresponds to vertex * with the same id as the row number (0-based), but vertices correponsing to columns * have id + num-rows. */ template <typename als_edge_type> int convert_matrixmarket_for_ALS(std::string base_filename) { // Note, code based on: http://math.nist.gov/MatrixMarket/mmio/c/example_read.c int ret_code; MM_typecode matcode; FILE *f; uint M, N; size_t nz; /** * Create sharder object */ int nshards; if ((nshards = find_shards<als_edge_type>(base_filename, get_option_string("nshards", "auto")))) { if (check_origfile_modification_earlier<als_edge_type>(base_filename, nshards)) { logstream(LOG_INFO) << "File " << base_filename << " was already preprocessed, won't do it again. " << std::endl; logstream(LOG_INFO) << "If this is not intended, please delete the shard files and try again. " << std::endl; return nshards; } } sharder<als_edge_type> sharderobj(base_filename); sharderobj.start_preprocessing(); if ((f = fopen(base_filename.c_str(), "r")) == NULL) { logstream(LOG_ERROR) << "Could not open file: " << base_filename << ", error: " << strerror(errno) << std::endl; exit(1); } if (mm_read_banner(f, &matcode) != 0) { logstream(LOG_ERROR) << "Could not process Matrix Market banner. File: " << base_filename << std::endl; logstream(LOG_ERROR) << "Matrix must be in the Matrix Market format. " << std::endl; exit(1); } /* This is how one can screen matrix types if their application */ /* only supports a subset of the Matrix Market data types. */ if (mm_is_complex(matcode) || !mm_is_sparse(matcode)) { logstream(LOG_ERROR) << "Sorry, this application does not support complex values and requires a sparse matrix." << std::endl; logstream(LOG_ERROR) << "Market Market type: " << mm_typecode_to_str(matcode) << std::endl; exit(1); } /* find out size of sparse matrix .... */ if ((ret_code = mm_read_mtx_crd_size(f, &M, &N, &nz)) !=0) { logstream(LOG_ERROR) << "Failed reading matrix size: error=" << ret_code << std::endl; exit(1); } logstream(LOG_INFO) << "Starting to read matrix-market input. Matrix dimensions: " << M << " x " << N << ", non-zeros: " << nz << std::endl; if (M < 5 || N < 5 || nz < 10) { logstream(LOG_ERROR) << "File is suspiciously small. Something wrong? File: " << base_filename << std::endl; assert(M < 5 || N < 5 || nz < 10); } if (!sharderobj.preprocessed_file_exists()) { for (size_t i=0; i<nz; i++) { uint I, J; double val; int rc = fscanf(f, "%u %u %lg\n", &I, &J, &val); if (rc != 3) logstream(LOG_FATAL)<<"Error reading line: " << i << std::endl; I--; /* adjust from 1-based to 0-based */ J--; sharderobj.preprocessing_add_edge(I, M + J, als_edge_type((float)val)); } sharderobj.end_preprocessing(); } else { logstream(LOG_INFO) << "Matrix already preprocessed, just run sharder." << std::endl; } if (f !=stdin) fclose(f); logstream(LOG_INFO) << "Now creating shards." << std::endl; // Shard with a specified number of shards, or determine automatically if not defined nshards = sharderobj.execute_sharding(get_option_string("nshards", "auto")); return nshards; } struct MMOutputter : public VCallback<latentvec_t> { FILE * outf; MMOutputter(std::string fname, vid_t nvertices) { MM_typecode matcode; mm_initialize_typecode(&matcode); mm_set_matrix(&matcode); mm_set_array(&matcode); mm_set_real(&matcode); outf = fopen(fname.c_str(), "w"); assert(outf != NULL); mm_write_banner(outf, matcode); mm_write_mtx_array_size(outf, nvertices, NLATENT); } void callback(vid_t vertex_id, latentvec_t &vec) { for(int i=0; i < NLATENT; i++) { fprintf(outf, "%lf\n", vec.d[i]); } } ~MMOutputter() { if (outf != NULL) fclose(outf); } }; void output_als_result(std::string filename, vid_t numvertices, vid_t max_left_vertex) { MMOutputter mmoutput_left(filename + "_U.mm", max_left_vertex + 1); foreach_vertices<latentvec_t>(filename, 0, max_left_vertex + 1, mmoutput_left); MMOutputter mmoutput_right(filename + "_V.mm", numvertices - max_left_vertex - 1); foreach_vertices<latentvec_t>(filename, max_left_vertex + 1, numvertices-1, mmoutput_right); logstream(LOG_INFO) << "ALS output files (in matrix market format): " << filename + "_U.mm" << ", " << filename + "_V.mm" << std::endl; } #endif
09jijiangwen-download
example_apps/matrix_factorization/als.hpp
C++
asf20
7,669
/* * Matrix Market I/O library for ANSI C * * See http://math.nist.gov/MatrixMarket for details. * * */ #include <stdio.h> #include <string.h> #include <stdlib.h> #include <ctype.h> #include "mmio.h" int mm_read_unsymmetric_sparse(const char *fname, int *M_, int *N_, int *nz_, double **val_, int **I_, int **J_) { FILE *f; MM_typecode matcode; uint M, N; size_t nz, i; double *val; int *I, *J; if ((f = fopen(fname, "r")) == NULL) return -1; if (mm_read_banner(f, &matcode) != 0) { printf("mm_read_unsymetric: Could not process Matrix Market banner "); printf(" in file [%s]\n", fname); return -1; } if ( !(mm_is_real(matcode) && mm_is_matrix(matcode) && mm_is_sparse(matcode))) { fprintf(stderr, "Sorry, this application does not support "); fprintf(stderr, "Market Market type: [%s]\n", mm_typecode_to_str(matcode)); return -1; } /* find out size of sparse matrix: M, N, nz .... */ if (mm_read_mtx_crd_size(f, &M, &N, &nz) !=0) { fprintf(stderr, "read_unsymmetric_sparse(): could not parse matrix size.\n"); return -1; } *M_ = M; *N_ = N; *nz_ = nz; /* reseve memory for matrices */ I = (int *) malloc(nz * sizeof(int)); J = (int *) malloc(nz * sizeof(int)); val = (double *) malloc(nz * sizeof(double)); *val_ = val; *I_ = I; *J_ = J; /* NOTE: when reading in doubles, ANSI C requires the use of the "l" */ /* specifier as in "%lg", "%lf", "%le", otherwise errors will occur */ /* (ANSI C X3.159-1989, Sec. 4.9.6.2, p. 136 lines 13-15) */ for (i=0; i<nz; i++) { int rc = fscanf(f, "%d %d %lg\n", &I[i], &J[i], &val[i]); if (rc != 3) logstream(LOG_FATAL)<<"Failed to read input row: " << i << std::endl; I[i]--; /* adjust from 1-based to 0-based */ J[i]--; } fclose(f); return 0; } int mm_is_valid(MM_typecode matcode) { if (!mm_is_matrix(matcode)) return 0; if (mm_is_dense(matcode) && mm_is_pattern(matcode)) return 0; if (mm_is_real(matcode) && mm_is_hermitian(matcode)) return 0; if (mm_is_pattern(matcode) && (mm_is_hermitian(matcode) || mm_is_skew(matcode))) return 0; return 1; } int mm_read_banner(FILE *f, MM_typecode *matcode) { char line[MM_MAX_LINE_LENGTH]; char banner[MM_MAX_TOKEN_LENGTH]; char mtx[MM_MAX_TOKEN_LENGTH]; char crd[MM_MAX_TOKEN_LENGTH]; char data_type[MM_MAX_TOKEN_LENGTH]; char storage_scheme[MM_MAX_TOKEN_LENGTH]; char *p; mm_clear_typecode(matcode); if (fgets(line, MM_MAX_LINE_LENGTH, f) == NULL) return MM_PREMATURE_EOF; if (sscanf(line, "%s %s %s %s %s", banner, mtx, crd, data_type, storage_scheme) != 5){ perror("Error: Failed to read matrix market header"); exit(1); } for (p=mtx; *p!='\0'; *p=tolower(*p),p++); /* convert to lower case */ for (p=crd; *p!='\0'; *p=tolower(*p),p++); for (p=data_type; *p!='\0'; *p=tolower(*p),p++); for (p=storage_scheme; *p!='\0'; *p=tolower(*p),p++); /* check for banner */ if (strncmp(banner, MatrixMarketBanner, strlen(MatrixMarketBanner)) != 0) return MM_NO_HEADER; /* first field should be "mtx" */ if (strcmp(mtx, MM_MTX_STR) != 0) return MM_UNSUPPORTED_TYPE; mm_set_matrix(matcode); /* second field describes whether this is a sparse matrix (in coordinate storgae) or a dense array */ if (strcmp(crd, MM_SPARSE_STR) == 0) mm_set_sparse(matcode); else if (strcmp(crd, MM_DENSE_STR) == 0) mm_set_dense(matcode); else return MM_UNSUPPORTED_TYPE; /* third field */ if (strcmp(data_type, MM_REAL_STR) == 0) mm_set_real(matcode); else if (strcmp(data_type, MM_COMPLEX_STR) == 0) mm_set_complex(matcode); else if (strcmp(data_type, MM_PATTERN_STR) == 0) mm_set_pattern(matcode); else if (strcmp(data_type, MM_INT_STR) == 0) mm_set_integer(matcode); else return MM_UNSUPPORTED_TYPE; /* fourth field */ if (strcmp(storage_scheme, MM_GENERAL_STR) == 0) mm_set_general(matcode); else if (strcmp(storage_scheme, MM_SYMM_STR) == 0) mm_set_symmetric(matcode); else if (strcmp(storage_scheme, MM_HERM_STR) == 0) mm_set_hermitian(matcode); else if (strcmp(storage_scheme, MM_SKEW_STR) == 0) mm_set_skew(matcode); else return MM_UNSUPPORTED_TYPE; return 0; } int mm_write_mtx_crd_size(FILE *f, uint M, uint N, size_t nz) { if (fprintf(f, "%u %u %llu\n", M, N, (long long unsigned int)nz) != 3) return MM_COULD_NOT_WRITE_FILE; else return 0; } int mm_read_mtx_crd_size(FILE *f, uint *_M, uint *_N, size_t *nz ) { char line[MM_MAX_LINE_LENGTH]; /* set return null parameter values, in case we exit with errors */ *_M = *_N = *nz = 0; /* now continue scanning until you reach the end-of-comments */ do { if (fgets(line,MM_MAX_LINE_LENGTH,f) == NULL) return MM_PREMATURE_EOF; }while (line[0] == '%'); /* line[] is either blank or has M,N, nz */ if (sscanf(line, "%u %u %llu", _M, _N, (long long unsigned int*)nz) == 3) return 0; else return MM_UNSUPPORTED_TYPE; } int mm_read_mtx_array_size(FILE *f, uint *M, uint *N) { char line[MM_MAX_LINE_LENGTH]; int num_items_read; /* set return null parameter values, in case we exit with errors */ *M = *N = 0; /* now continue scanning until you reach the end-of-comments */ do { if (fgets(line,MM_MAX_LINE_LENGTH,f) == NULL) return MM_PREMATURE_EOF; }while (line[0] == '%'); /* line[] is either blank or has M,N, nz */ if (sscanf(line, "%u %u", M, N) == 2) return 0; else /* we have a blank line */ do { num_items_read = fscanf(f, "%u %u", M, N); if (num_items_read == EOF) return MM_PREMATURE_EOF; } while (num_items_read != 2); return 0; } int mm_write_mtx_array_size(FILE *f, uint M, uint N) { if (fprintf(f, "%u %u\n", M, N) != 2) return MM_COULD_NOT_WRITE_FILE; else return 0; } /*-------------------------------------------------------------------------*/ /******************************************************************/ /* use when I[], J[], and val[]J, and val[] are already allocated */ /******************************************************************/ int mm_read_mtx_crd_data(FILE *f, uint M, uint N, size_t nz, uint I[], uint J[], double val[], MM_typecode matcode) { size_t i; if (mm_is_complex(matcode)) { for (i=0; i<nz; i++) if (fscanf(f, "%u %u %lg %lg", &I[i], &J[i], &val[2*i], &val[2*i+1]) != 4) return MM_PREMATURE_EOF; } else if (mm_is_real(matcode)) { for (i=0; i<nz; i++) { if (fscanf(f, "%u %u %lg\n", &I[i], &J[i], &val[i]) != 3) return MM_PREMATURE_EOF; } } else if (mm_is_pattern(matcode)) { for (i=0; i<nz; i++) if (fscanf(f, "%u %u", &I[i], &J[i]) != 2) return MM_PREMATURE_EOF; } else return MM_UNSUPPORTED_TYPE; return 0; } int mm_read_mtx_crd_entry(FILE *f, int *I, int *J, double *real, double *imag, MM_typecode matcode) { if (mm_is_complex(matcode)) { if (fscanf(f, "%d %d %lg %lg", I, J, real, imag) != 4) return MM_PREMATURE_EOF; } else if (mm_is_real(matcode)) { if (fscanf(f, "%d %d %lg\n", I, J, real) != 3) return MM_PREMATURE_EOF; } else if (mm_is_pattern(matcode)) { if (fscanf(f, "%d %d", I, J) != 2) return MM_PREMATURE_EOF; } else return MM_UNSUPPORTED_TYPE; return 0; } /************************************************************************ mm_read_mtx_crd() fills M, N, nz, array of values, and return type code, e.g. 'MCRS' if matrix is complex, values[] is of size 2*nz, (nz pairs of real/imaginary values) ************************************************************************/ int mm_read_mtx_crd(char *fname, uint *M, uint *N, size_t *nz, uint **I, uint **J, double **val, MM_typecode *matcode) { int ret_code; FILE *f; if (strcmp(fname, "stdin") == 0) f=stdin; else if ((f = fopen(fname, "r")) == NULL) return MM_COULD_NOT_READ_FILE; if ((ret_code = mm_read_banner(f, matcode)) != 0) return ret_code; if (!(mm_is_valid(*matcode) && mm_is_sparse(*matcode) && mm_is_matrix(*matcode))) return MM_UNSUPPORTED_TYPE; if ((ret_code = mm_read_mtx_crd_size(f, M, N, nz)) != 0) return ret_code; *I = (uint *) malloc(*nz * sizeof(uint)); *J = (uint *) malloc(*nz * sizeof(uint)); *val = NULL; if (mm_is_complex(*matcode)) { *val = (double *) malloc(*nz * 2 * sizeof(double)); ret_code = mm_read_mtx_crd_data(f, *M, *N, *nz, *I, *J, *val, *matcode); if (ret_code != 0) return ret_code; } else if (mm_is_real(*matcode)) { *val = (double *) malloc(*nz * sizeof(double)); ret_code = mm_read_mtx_crd_data(f, *M, *N, *nz, *I, *J, *val, *matcode); if (ret_code != 0) return ret_code; } else if (mm_is_pattern(*matcode)) { ret_code = mm_read_mtx_crd_data(f, *M, *N, *nz, *I, *J, *val, *matcode); if (ret_code != 0) return ret_code; } if (f != stdin) fclose(f); return 0; } int mm_write_banner(FILE *f, MM_typecode matcode) { char *str = mm_typecode_to_str(matcode); int ret_code; ret_code = fprintf(f, "%s %s\n", MatrixMarketBanner, str); free(str); if (ret_code !=2 ) return MM_COULD_NOT_WRITE_FILE; else return 0; } int mm_write_mtx_crd(char fname[], int M, int N, int nz, int I[], int J[], double val[], MM_typecode matcode) { FILE *f; int i; if (strcmp(fname, "stdout") == 0) f = stdout; else if ((f = fopen(fname, "w")) == NULL) return MM_COULD_NOT_WRITE_FILE; /* print banner followed by typecode */ fprintf(f, "%s ", MatrixMarketBanner); fprintf(f, "%s\n", mm_typecode_to_str(matcode)); /* print matrix sizes and nonzeros */ fprintf(f, "%d %d %d\n", M, N, nz); /* print values */ if (mm_is_pattern(matcode)) for (i=0; i<nz; i++) fprintf(f, "%d %d\n", I[i], J[i]); else if (mm_is_real(matcode)) for (i=0; i<nz; i++) fprintf(f, "%d %d %20.16g\n", I[i], J[i], val[i]); else if (mm_is_complex(matcode)) for (i=0; i<nz; i++) fprintf(f, "%d %d %20.16g %20.16g\n", I[i], J[i], val[2*i], val[2*i+1]); else { if (f != stdout) fclose(f); return MM_UNSUPPORTED_TYPE; } if (f !=stdout) fclose(f); return 0; } /** * Create a new copy of a string s. mm_strdup() is a common routine, but * not part of ANSI C, so it is included here. Used by mm_typecode_to_str(). * */ char *mm_strdup(const char *s) { int len = strlen(s); char *s2 = (char *) malloc((len+1)*sizeof(char)); return strcpy(s2, s); } char *mm_typecode_to_str(MM_typecode matcode) { char buffer[MM_MAX_LINE_LENGTH]; char *types[4]; char *mm_strdup(const char *); /* check for MTX type */ if (mm_is_matrix(matcode)) types[0] = (char*)MM_MTX_STR; else return NULL; /* check for CRD or ARR matrix */ if (mm_is_sparse(matcode)) types[1] = (char*)MM_SPARSE_STR; else if (mm_is_dense(matcode)) types[1] = (char*)MM_DENSE_STR; else return NULL; /* check for element data type */ if (mm_is_real(matcode)) types[2] = (char*)MM_REAL_STR; else if (mm_is_complex(matcode)) types[2] = (char*)MM_COMPLEX_STR; else if (mm_is_pattern(matcode)) types[2] = (char*)MM_PATTERN_STR; else if (mm_is_integer(matcode)) types[2] = (char*)MM_INT_STR; else return NULL; /* check for symmetry type */ if (mm_is_general(matcode)) types[3] = (char*)MM_GENERAL_STR; else if (mm_is_symmetric(matcode)) types[3] = (char*)MM_SYMM_STR; else if (mm_is_hermitian(matcode)) types[3] = (char*)MM_HERM_STR; else if (mm_is_skew(matcode)) types[3] = (char*)MM_SKEW_STR; else return NULL; sprintf(buffer,"%s %s %s %s", types[0], types[1], types[2], types[3]); return mm_strdup(buffer); }
09jijiangwen-download
example_apps/matrix_factorization/matrixmarket/mmio.c
C
asf20
13,014
/* * Matrix Market I/O library for ANSI C * * See http://math.nist.gov/MatrixMarket for details. * * */ #ifndef MM_IO_H #define MM_IO_H #define MM_MAX_LINE_LENGTH 1025 #define MatrixMarketBanner "%%MatrixMarket" #define MM_MAX_TOKEN_LENGTH 64 typedef char MM_typecode[4]; char *mm_typecode_to_str(MM_typecode matcode); int mm_read_banner(FILE *f, MM_typecode *matcode); int mm_read_mtx_crd_size(FILE *f, uint *M, uint *N, size_t *nz); int mm_read_mtx_array_size(FILE *f, uint *M, uint *N); int mm_write_banner(FILE *f, MM_typecode matcode); int mm_write_mtx_crd_size(FILE *f, uint M, uint N, size_t nz); int mm_write_mtx_array_size(FILE *f, uint M, uint N); /********************* MM_typecode query fucntions ***************************/ #define mm_is_matrix(typecode) ((typecode)[0]=='M') #define mm_is_sparse(typecode) ((typecode)[1]=='C') #define mm_is_coordinate(typecode)((typecode)[1]=='C') #define mm_is_dense(typecode) ((typecode)[1]=='A') #define mm_is_array(typecode) ((typecode)[1]=='A') #define mm_is_complex(typecode) ((typecode)[2]=='C') #define mm_is_real(typecode) ((typecode)[2]=='R') #define mm_is_pattern(typecode) ((typecode)[2]=='P') #define mm_is_integer(typecode) ((typecode)[2]=='I') #define mm_is_symmetric(typecode)((typecode)[3]=='S') #define mm_is_general(typecode) ((typecode)[3]=='G') #define mm_is_skew(typecode) ((typecode)[3]=='K') #define mm_is_hermitian(typecode)((typecode)[3]=='H') int mm_is_valid(MM_typecode matcode); /* too complex for a macro */ /********************* MM_typecode modify fucntions ***************************/ #define mm_set_matrix(typecode) ((*typecode)[0]='M') #define mm_set_coordinate(typecode) ((*typecode)[1]='C') #define mm_set_array(typecode) ((*typecode)[1]='A') #define mm_set_dense(typecode) mm_set_array(typecode) #define mm_set_sparse(typecode) mm_set_coordinate(typecode) #define mm_set_complex(typecode)((*typecode)[2]='C') #define mm_set_real(typecode) ((*typecode)[2]='R') #define mm_set_pattern(typecode)((*typecode)[2]='P') #define mm_set_integer(typecode)((*typecode)[2]='I') #define mm_set_symmetric(typecode)((*typecode)[3]='S') #define mm_set_general(typecode)((*typecode)[3]='G') #define mm_set_skew(typecode) ((*typecode)[3]='K') #define mm_set_hermitian(typecode)((*typecode)[3]='H') #define mm_clear_typecode(typecode) ((*typecode)[0]=(*typecode)[1]= \ (*typecode)[2]=' ',(*typecode)[3]='G') #define mm_initialize_typecode(typecode) mm_clear_typecode(typecode) /********************* Matrix Market error codes ***************************/ #define MM_COULD_NOT_READ_FILE 11 #define MM_PREMATURE_EOF 12 #define MM_NOT_MTX 13 #define MM_NO_HEADER 14 #define MM_UNSUPPORTED_TYPE 15 #define MM_LINE_TOO_LONG 16 #define MM_COULD_NOT_WRITE_FILE 17 /******************** Matrix Market internal definitions ******************** MM_matrix_typecode: 4-character sequence ojbect sparse/ data storage dense type scheme string position: [0] [1] [2] [3] Matrix typecode: M(atrix) C(oord) R(eal) G(eneral) A(array) C(omplex) H(ermitian) P(attern) S(ymmetric) I(nteger) K(kew) ***********************************************************************/ #define MM_MTX_STR "matrix" #define MM_ARRAY_STR "array" #define MM_DENSE_STR "array" #define MM_COORDINATE_STR "coordinate" #define MM_SPARSE_STR "coordinate" #define MM_COMPLEX_STR "complex" #define MM_REAL_STR "real" #define MM_INT_STR "integer" #define MM_GENERAL_STR "general" #define MM_SYMM_STR "symmetric" #define MM_HERM_STR "hermitian" #define MM_SKEW_STR "skew-symmetric" #define MM_PATTERN_STR "pattern" /* high level routines */ int mm_write_mtx_crd(char fname[], uint M, uint N, size_t nz, uint I[], uint J[], double val[], MM_typecode matcode); int mm_read_mtx_crd_data(FILE *f, uint M, uint N, size_t nz, uint I[], uint J[], double val[], MM_typecode matcode); int mm_read_mtx_crd_entry(FILE *f, uint *I, uint *J, double *real, double *img, MM_typecode matcode); int mm_read_unsymmetric_sparse(const char *fname, uint *M_, uint *N_, size_t *nz_, double **val_, uint **I_, uint **J_); #endif
09jijiangwen-download
example_apps/matrix_factorization/matrixmarket/mmio.h
C
asf20
4,243
/** * @file * @author Aapo Kyrola <akyrola@cs.cmu.edu> * @version 1.0 * * @section LICENSE * * Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * A simple community detection algorithm based on label propagation. * LPA-algorithm is explained in http://arxiv.org/pdf/0910.1154.pdf * "Advanced modularity-specialized label propagation algorithm for detecting communities in networks * X. Liu, T. Murata Tokyo Institute of Technology, 2-12-1 Ookayama, Meguro, Tokyo 152-8552, Japan * * @section REMARKS * * The algorithm is very similar to the connected components algorithm, but instead * of vertex choosing the minimum label of its neighbor, it chooses the most frequent one. * * However, because the operation (most frequent label) is not commutative, * we need to store both vertices labels in an edge. See comment below, above the * struct "bidirectional_label". * * Note, that this algorithm is not very sophisticated and is prone to local minimas. * If you want to use this seriously, try with different initial labeling. * Also, a more sophisticated algorithm called LPAm should be doable on GraphChi. * * @author Aapo Kyrola */ #include <cmath> #include <map> #include <string> #include "graphchi_basic_includes.hpp" #include "util/labelanalysis.hpp" using namespace graphchi; #define GRAPHCHI_DISABLE_COMPRESSION /** * Unlike in connected components, we need * to ensure that neighbors do not overwrite each * others values. This is achieved by keeping two values * in an edge. In this struct, smaller_one is the id of the * vertex that has smaller id, and larger_one the others. * This complexity is due to us ignoring the direction of an edge. */ struct bidirectional_label { vid_t smaller_one; vid_t larger_one; }; vid_t & neighbor_label(bidirectional_label & bidir, vid_t myid, vid_t nbid) { if (myid < nbid) { return bidir.larger_one; } else { return bidir.smaller_one; } } vid_t & my_label(bidirectional_label & bidir, vid_t myid, vid_t nbid) { if (myid < nbid) { return bidir.smaller_one; } else { return bidir.larger_one; } } typedef vid_t VertexDataType; // vid_t is the vertex id type typedef bidirectional_label EdgeDataType; // Note, 8-byte edge data /** * GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type> * class. The main logic is usually in the update function. */ struct CommunityDetectionProgram : public GraphChiProgram<VertexDataType, EdgeDataType> { /** * Vertex update function. */ void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) { /* This program requires selective scheduling. */ assert(gcontext.scheduler != NULL); vid_t newlabel; if (gcontext.iteration == 0) { /* On first iteration, choose label vertex id */ vid_t firstlabel = vertex.id(); vertex.set_data(firstlabel); newlabel = firstlabel; /* Scheduler myself for next iteration */ gcontext.scheduler->add_task(vertex.id()); } else { if (vertex.num_edges() == 0) return; // trivial /* The basic idea is to find the label that is most popular among this vertex's neighbors. This label will be chosen as the new label of this vertex. */ // This part could be optimized: STL map is quite slow. std::map<vid_t, int> counts; int maxcount=0; vid_t maxlabel=0; /* Iterate over all the edges */ for(int i=0; i < vertex.num_edges(); i++) { /* Extract neighbor's current label. The edge contains the labels of both vertices it connects, so we need to use the right one. (See comment for bidirectional_label above) */ bidirectional_label edgelabel = vertex.edge(i)->get_data(); vid_t nblabel = neighbor_label(edgelabel, vertex.id(), vertex.edge(i)->vertex_id()); /* Check if this label (nblabel) has been encountered before ... */ std::map<vid_t, int>::iterator existing = counts.find(nblabel); int newcount = 0; if(existing == counts.end()) { /* ... if not, we add this label with count of one to the map */ counts.insert(std::pair<vid_t,int>(nblabel, 1)); newcount = 1; } else { /* ... if yes, we increment the counter for this label by 1 */ existing->second++; newcount = existing->second; } /* Finally, we keep track of the most frequent label */ if (newcount > maxcount || (maxcount == newcount && nblabel > maxlabel)) { maxlabel = nblabel; maxcount = newcount; } } newlabel = maxlabel; } /** * Write my label to my neighbors. */ if (newlabel != vertex.get_data() || gcontext.iteration == 0) { vertex.set_data(newlabel); for(int i=0; i<vertex.num_edges(); i++) { bidirectional_label labels_on_edge = vertex.edge(i)->get_data(); my_label(labels_on_edge, vertex.id(), vertex.edge(i)->vertex_id()) = newlabel; vertex.edge(i)->set_data(labels_on_edge); // On first iteration, everyone schedules themselves. if (gcontext.iteration > 0) gcontext.scheduler->add_task(vertex.edge(i)->vertex_id()); } } } /** * Called before an iteration starts. */ void before_iteration(int iteration, graphchi_context &info) { } /** * Called after an iteration has finished. */ void after_iteration(int iteration, graphchi_context &ginfo) { } /** * Called before an execution interval is started. */ void before_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &ginfo) { } /** * Called after an execution interval has finished. */ void after_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &ginfo) { } }; int main(int argc, const char ** argv) { /* GraphChi initialization will read the command line arguments and the configuration file. */ graphchi_init(argc, argv); /* Metrics object for keeping track of performance counters and other information. Currently required. */ metrics m("community-detection"); /* Basic arguments for application */ std::string filename = get_option_string("file"); // Base filename int niters = get_option_int("niters", 10); // Number of iterations (max) bool scheduler = true; // Always run with scheduler /* Process input file - if not already preprocessed */ int nshards = convert_if_notexists<EdgeDataType>(filename, get_option_string("nshards", "auto")); if (get_option_int("onlyresult", 0) == 0) { /* Run */ CommunityDetectionProgram program; graphchi_engine<VertexDataType, EdgeDataType> engine(filename, nshards, scheduler, m); engine.run(program, niters); } /* Run analysis of the communities (output is written to a file) */ m.start_time("label-analysis"); analyze_labels<vid_t>(filename); m.stop_time("label-analysis"); /* Report execution metrics */ metrics_report(m); return 0; }
09jijiangwen-download
example_apps/communitydetection.cpp
C++
asf20
8,464
# Following command makes the unittest.sh fail if any of the # commands fail. set -e function display_name { echo "****************************************************************************" echo -n "*******************" echo -n $1 echo "**********************" echo "****************************************************************************" } display_name "TESTING BASELINE" ./toolkits/collaborative_filtering/baseline --training=smallnetflix_mm --validation=smallnetflix_mm --minval=1 --maxval=5 --quiet=1 --algorithm=user_mean --clean_cache=1 display_name "TESTING ALS" ./toolkits/collaborative_filtering/als --training=smallnetflix_mm --validation=smallnetflix_mme --lambda=0.065 --minval=1 --maxval=5 --max_iter=6 --quiet=1 --clean_cache=1 display_name "TESTING ALS - RMSE VALIDATION STOP" ./toolkits/collaborative_filtering/als --training=smallnetflix_mm --validation=smallnetflix_mme --lambda=0.1 --minval=1 --maxval=5 --max_iter=100 --quiet=1 --halt_on_rmse_increase=3 --clean_cache=1 display_name "TESTING ALS SERIALIZATION" ./toolkits/collaborative_filtering/als --training=smallnetflix_mm --validation=smallnetflix_mme --lambda=0.065 --minval=1 --maxval=5 --max_iter=6 --quiet=1 --load_factors_from_file=1 --clean_cache=1 display_name "TESTING ALS - RATING" ./toolkits/collaborative_filtering/rating --algorithm=als --training=smallnetflix_mm --quiet=1 --num_ratings=3 mv smallnetflix_mm.ids smallnetflix_mm.ids1 ./toolkits/collaborative_filtering/rating --algorithm=als --training=smallnetflix_mm --quiet=1 --num_ratings=3 diff smallnetflix_mm.ids smallnetflix_mm.ids1 display_name "TESTING ALS COORD" ./toolkits/collaborative_filtering/als_coord --training=smallnetflix_mm --validation=smallnetflix_mme --lambda=0.065 --minval=1 --maxval=5 --max_iter=6 --quiet=1 --clean_cache=1 display_name "TESTING SGD" ./toolkits/collaborative_filtering/sgd --training=smallnetflix_mm --validation=smallnetflix_mme --sgd_lambda=1e-4 --sgd_gamma=1e-4 --minval=1 --maxval=5 --max_iter=6 --quiet=1 --clean_cache=1 display_name "TESTING BIAS_SGD" ./toolkits/collaborative_filtering/biassgd --training=smallnetflix_mm --validation=smallnetflix_mme --biassgd_lambda=1e-4 --biassgd_gamma=1e-4 --minval=1 --maxval=5 --max_iter=6 --quiet=1 --clean_cache=1 display_name "TESTING BIAS_SGD SERIALIZATION" ./toolkits/collaborative_filtering/biassgd --training=smallnetflix_mm --validation=smallnetflix_mme --biassgd_lambda=1e-4 --biassgd_gamma=1e-4 --minval=1 --maxval=5 --max_iter=6 --quiet=1 --load_factors_from_file=1 display_name "TESTING SVD++" ./toolkits/collaborative_filtering/svdpp --training=smallnetflix_mm --validation=smallnetflix_mme --biassgd_lambda=1e-4 --biassgd_gamma=1e-4 --minval=1 --maxval=5 --max_iter=6 --quiet=1 --clean_cache=1 ./toolkits/collaborative_filtering/rating2 --training=smallnetflix_mm --algorithm=svdpp --num_ratings=3 --quiet=1 display_name "TESTING SVD++ SERIALIZATION" ./toolkits/collaborative_filtering/svdpp --training=smallnetflix_mm --validation=smallnetflix_mme --biassgd_lambda=1e-4 --biassgd_gamma=1e-4 --minval=1 --maxval=5 --max_iter=6 --quiet=1 --load_factors_from_file=1 --clean_cache=1 display_name "TESTING NMF" ./toolkits/collaborative_filtering/nmf --training=reverse_netflix.mm --minval=1 --maxval=5 --max_iter=6 --quiet=1 --clean_cache=1 display_name "TESTING SVD" rm -fR smallnetflix_mm.* ./toolkits/collaborative_filtering/svd --training=smallnetflix_mm --nsv=3 --nv=5 --max_iter=5 --quiet=1 --tol=1e-1 --clean_cache=1 display_name "TESTING SVD-ONESIDED" ./toolkits/collaborative_filtering/svd_onesided --training=smallnetflix_mm --nsv=3 --nv=5 --max_iter=5 --quiet=1 --tol=1e-1 --clean_cache=1 display_name "TESTING RBM" ./toolkits/collaborative_filtering/rbm --training=smallnetflix_mm --validation=smallnetflix_mme --minval=1 --maxval=5 --max_iter=6 --quiet=1 --clean_cache=1 display_name "TESTING WALS" ./toolkits/collaborative_filtering/wals --training=time_smallnetflix --validation=time_smallnetflixe --lambda=0.065 --minval=1 --maxval=5 --max_iter=6 --K=27 --quiet=1 --clean_cache=1 display_name "TESTING WALS - RATING" ./toolkits/collaborative_filtering/rating --training=time_smallnetflix --algorithm=wals --quiet=1 --num_ratings=3 display_name "TESTING ALS-TENSOR" ./toolkits/collaborative_filtering/als_tensor --training=time_smallnetflix --validation=time_smallnetflixe --lambda=0.065 --minval=1 --maxval=5 --max_iter=6 --K=27 --quiet=1 --clean_cache=1 display_name "TESTING TIME-SVD++" ./toolkits/collaborative_filtering/timesvdpp --training=time_smallnetflix --validation=time_smallnetflixe --minval=1 --maxval=5 --max_iter=6 --quiet=1 --clean_cache=1 display_name "TESTING LIBFM" ./toolkits/collaborative_filtering/libfm --training=time_smallnetflix --validation=time_smallnetflixe --minval=1 --maxval=5 --max_iter=6 --quiet=1 --clean_cache=1 display_name "TESTING BIAS_SGD2 - LOGISTIC LOSS" ./toolkits/collaborative_filtering/biassgd2 --training=smallnetflix_mm --minval=1 --maxval=5 --validation=smallnetflix_mme --biassgd_gamma=1e-2 --biassgd_lambda=1e-2 --max_iter=6 --quiet=1 --loss=logistic --biassgd_step_dec=0.99999 --clean_cache=1 display_name "TESTING BIAS_SGD2 - ABS LOSS" ./toolkits/collaborative_filtering/biassgd2 --training=smallnetflix_mm --minval=1 --maxval=5 --validation=smallnetflix_mme --biassgd_gamma=1e-2 --biassgd_lambda=1e-2 --max_iter=6 --quiet=1 --loss=abs --biassgd_step_dec=0.99999 --clean_cache=1 display_name "TESTING BIAS_SGD2 - SQUARE LOSS" ./toolkits/collaborative_filtering/biassgd2 --training=smallnetflix_mm --minval=1 --maxval=5 --validation=smallnetflix_mme --biassgd_gamma=1e-2 --biassgd_lambda=1e-2 --max_iter=6 --quiet=1 --loss=square --biassgd_step_dec=0.99999 --clean_cache=1 #display_name "TESTING PMF" # ./toolkits/collaborative_filtering/pmf --training=smallnetflix_mm --validation=smallnetflix_mme --quiet=1 --minval=1 --maxval=5 --max_iter=10 --pmf_burn_in=5 --test=smallnetflix_mme --regnormal=0 --clean_cache=1 display_name "TESTING ITEMCF" ./toolkits/collaborative_filtering/itemcf --training=smallnetflix_mm --nshards=1 --quiet=1 --K=10 --clean_cache=1 display_name "TESTING ITEMCF - AIOLLI ASYM COST" ./toolkits/collaborative_filtering/itemcf --training=smallnetflix_mm --nshards=1 --quiet=1 --distance=3 --K=10 --clean_cache=1 display_name "ITEM-SIM-TO-RATING" rm -fR ./toolkits/collaborative_filtering/unittest/itemsim2rating.unittest.graph.* ./toolkits/collaborative_filtering/itemsim2rating --training=./toolkits/collaborative_filtering/unittest/itemsim2rating.unittest.graph --similarity=./toolkits/collaborative_filtering/unittest/itemsim2rating.unittest.similarity --K=4 execthreads 1 --nshards=1 --quiet=0 --undirected=1 --debug=1 diff ./toolkits/collaborative_filtering/unittest/itemsim2rating.unittest.graph-rec ./toolkits/collaborative_filtering/unittest/itemsim2rating.unittest display_name "TESTING ITEMCF - CORRECTNESS" rm -fR ./toolkits/collaborative_filtering/unittest/itemcf.unittest.graph.* ./toolkits/collaborative_filtering/itemcf --training=./toolkits/collaborative_filtering/unittest/itemcf.unittest.graph --min_allowed_intersection=2 --K=5 --nshards=1 --quiet=1 execthreads 1 sh ./toolkits/collaborative_filtering/topk.sh ./toolkits/collaborative_filtering/unittest/itemcf.unittest.graph #diff ./toolkits/collaborative_filtering/unittest/itemcf.unittest.graph-topk ./toolkits/collaborative_filtering/unittest/itemcf.unittest.graph-topk-correct a=`grep "0.400000" ./toolkits/collaborative_filtering/unittest/itemcf.unittest.graph-topk | wc -l` if [ $a -ne 3 ]; then echo "Failed unittest!" exit 1 fi display_name "MAP METRIC - test 1" ./toolkits/collaborative_filtering/metric_eval --training=./toolkits/collaborative_filtering/unittest/metric_eval.unittest4 --test=./toolkits/collaborative_filtering/unittest/metric_eval.unittest3 --K=3 display_name "MAP METRIC - test 2" ./toolkits/collaborative_filtering/metric_eval --training=./toolkits/collaborative_filtering/unittest/metric_eval.unittest2 --test=./toolkits/collaborative_filtering/unittest/metric_eval.unittest2 --K=3 display_name "TOP K" ./toolkits/parsers/topk --training=./toolkits/collaborative_filtering/unittest/topk.unittest --K=3 --quiet=1 diff ./toolkits/collaborative_filtering/unittest/topk.unittest.ids ./toolkits/collaborative_filtering/unittest/topk.unittest.ids.correct display_name "ITEMCF3" ./toolkits/collaborative_filtering/itemcf3 --training=./toolkits/collaborative_filtering/unittest/itemcf3.unittest.graph --distance=9 --debug=0 --quiet=1 --execthreads=1 #diff ./toolkits/collaborative_filtering/unittest/itemcf3.unittest.correct ./toolkits/collaborative_filtering/unittest/itemcf3.unittest.graph.out0 a=`grep "2 1 0.6666" ./toolkits/collaborative_filtering/unittest/itemcf3.unittest.graph.out0 | wc -l` if [ $a -ne 1 ]; then echo "Failed unittest!" exit 1 fi b=`grep "3 1 0.3333" ./toolkits/collaborative_filtering/unittest/itemcf3.unittest.graph.out0 | wc -l` if [ $b -ne 1 ]; then echo "Failed unittest!" exit 1 fi display_name "GENSGD" ./toolkits/collaborative_filtering/gensgd --training=smallnetflix_mm --validation=smallnetflix_mme --from_pos=0 --to_pos=1 --val_pos=2 --rehash=1 --has_header_titles=0 --debug=0 --file_columns=3 --quiet=1 --max_iter=3 display_name "K-FOLD cross validation" ./toolkits/collaborative_filtering/als --training=smallnetflix_mm --validation=smallnetflix_mme --lambda=0.065 --minval=1 --maxval=5 --max_iter=6 --quiet=1 --kfold_cross_validation=10 --kfold_cross_validation_index=3 ./toolkits/collaborative_filtering/als --training=smallnetflix_mm --validation=smallnetflix_mme --lambda=0.065 --minval=1 --maxval=5 --max_iter=6 --quiet=1 --kfold_cross_validation=10 --kfold_cross_validation_index=4 display_name "CLiMF" ./toolkits/collaborative_filtering/climf --training=smallnetflix_mm --validation=smallnetflix_mme --binary_relevance_thresh=4 --sgd_gamma=1e-6 --max_iter=6 --quiet=1 --sgd_step_dec=0.9999 --sgd_lambda=1e-6 --clean_cache=1
09jijiangwen-download
unittest.sh
Shell
asf20
9,963
/* * Copyright (c) 2009 Carnegie Mellon University. * All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an "AS * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language * governing permissions and limitations under the License. * * For more about this software visit: * * http://www.graphlab.ml.cmu.edu * */ /** * \file cgs_lda.cpp * * \brief This file contains a GraphLab based implementation of the * Collapsed Gibbs Sampler (CGS) for the Latent Dirichlet Allocation * (LDA) model. * * * * \author Joseph Gonzalez, Diana Hu */ #include <vector> #include <set> #include <algorithm> #include "util/atomic.hpp" #include <boost/math/special_functions/gamma.hpp> #include <vector> #include <algorithm> #include <boost/algorithm/string.hpp> #include <boost/config/warning_disable.hpp> #include <boost/spirit/include/qi.hpp> #include <boost/spirit/include/phoenix_core.hpp> #include <boost/spirit/include/phoenix_operator.hpp> #include <boost/spirit/include/phoenix_stl.hpp> #include <boost/iostreams/stream.hpp> #include <boost/iostreams/filtering_stream.hpp> #include <boost/iostreams/input_sequence.hpp> // Global Types // ============================================================================ typedef int count_type; /** * \brief The factor type is used to store the counts of tokens in * each topic for words, documents, and assignments. * * Atomic counts are used because we violate the abstraction by * modifying adjacent vertex data on scatter. As a consequence * multiple threads on the same machine may try to update the same * vertex data at the same time. The graphlab::atomic type ensures * that multiple increments are serially consistent. */ typedef std::vector< graphchi::atomic<count_type> > factor_type; /** * \brief We use the factor type in accumulators and so we define an * operator+= */ inline factor_type& operator+=(factor_type& lvalue, const factor_type& rvalue) { if(!rvalue.empty()) { if(lvalue.empty()) lvalue = rvalue; else { for(size_t t = 0; t < lvalue.size(); ++t) lvalue[t] += rvalue[t]; } } return lvalue; } // end of operator += /** * \brief The latent topic id of a token is the smallest reasonable * type. */ typedef uint16_t topic_id_type; // We require a null topic to represent the topic assignment for // tokens that have not yet been assigned. #define NULL_TOPIC (topic_id_type(-1)) #define NTOPICS 20 /** * \brief The assignment type is used on each edge to store the * assignments of each token. There can be several occurrences of the * same word in a given document and so a vector is used to store the * assignments of each occurrence. */ typedef uint16_t assignment_type[NTOPICS]; // Global Variables // ============================================================================ /** * \brief The alpha parameter determines the sparsity of topics for * each document. */ double ALPHA = 1; /** * \brief the Beta parameter determines the sparsity of words in each * document. */ double BETA = 0.1; /** * \brief the total number of topics to uses */ /** * \brief The total number of words in the dataset. */ size_t NWORDS = 0; /** * \brief The total number of docs in the dataset. */ size_t NDOCS = 0; /** * \brief The total number of tokens in the corpus */ size_t NTOKENS = 0; /** * \brief The number of top words to display during execution (from * each topic). */ size_t TOPK = 5; /** * \brief The interval to display topics during execution. */ size_t INTERVAL = 10; /** * \brief The global variable storing the global topic count across * all machines. This is maintained periodically using aggregation. */ factor_type GLOBAL_TOPIC_COUNT; /** * \brief A dictionary of words used to print the top words during * execution. */ std::vector<std::string> DICTIONARY; /** * \brief The maximum occurences allowed for an individual term-doc * pair. (edge data) */ size_t MAX_COUNT = 100; /** * \brief The time to run until the first sample is taken. If less * than zero then the sampler will run indefinitely. */ float BURNIN = -1; // Graph Types // ============================================================================ /** * \brief The vertex data represents each term and document in the * corpus and contains the counts of tokens in each topic. */ struct vertex_data { ///! The total number of updates uint32_t nupdates; ///! The total number of changes to adjacent tokens uint32_t nchanges; ///! The count of tokens in each topic factor_type factor; vertex_data() : nupdates(0), nchanges(0), factor(NTOPICS) { } }; // end of vertex_data /** * \brief The edge data represents the individual tokens (word,doc) * pairs and their assignment to topics. */ struct edge_data { ///! The number of changes on the last update uint16_t nchanges; ///! The assignment of all tokens assignment_type assignment; edge_data(size_t ntokens = 0) : nchanges(0) { for(int i=0; i<NTOPICS; i++) assignment[i] = 0; } }; // end of edge_data typedef graphlab::distributed_graph<vertex_data, edge_data> graph_type; static void parse(edge_data &x, const char * s) { size_t count = atol(s); count = std::min(count, MAX_COUNT); x = (edge_data(count)); } /** * \brief Edge data parser used in graph.load_json * * Make sure that the edge file list * has docids from -2 to -(total #docid) and wordids 0 to (total #words -1) */ bool eparser(edge_data& ed, const std::string& line){ const int BASE = 10; char* next_char_ptr = NULL; size_t count = strtoul(line.c_str(), &next_char_ptr, BASE); if(next_char_ptr ==NULL) return false; //threshold count count = std::min(count, MAX_COUNT); ed = (edge_data(count)); return true; } /** * \brief Vertex data parser used in graph.load_json */ bool vparser(vertex_data& vd, const std::string& line){ vd = vertex_data(); return true; } /** * \brief Determine if the given vertex is a word vertex or a doc * vertex. * * For simplicity we connect docs --> words and therefore if a vertex * has in edges then it is a word. */ inline bool is_word(const graph_type::vertex_type& vertex) { return vertex.num_in_edges() > 0 ? 1 : 0; } /** * \brief Determine if the given vertex is a doc vertex * * For simplicity we connect docs --> words and therefore if a vertex * has out edges then it is a doc */ inline bool is_doc(const graph_type::vertex_type& vertex) { return vertex.num_out_edges() > 0 ? 1 : 0; } /** * \brief return the number of tokens on a particular edge. */ inline size_t count_tokens(const graph_type::edge_type& edge) { return edge.data().assignment.size(); } /** * \brief Get the other vertex in the edge. */ inline graph_type::vertex_type get_other_vertex(const graph_type::edge_type& edge, const graph_type::vertex_type& vertex) { return vertex.id() == edge.source().id()? edge.target() : edge.source(); } // ======================================================== // The Collapsed Gibbs Sampler Function /** * \brief The gather type for the collapsed Gibbs sampler is used to * collect the topic counts on adjacent edges so that the apply * function can compute the correct topic counts for the center * vertex. * */ struct gather_type { factor_type factor; uint32_t nchanges; gather_type() : nchanges(0) { }; gather_type(uint32_t nchanges) : factor(NTOPICS), nchanges(nchanges) { }; gather_type& operator+=(const gather_type& other) { factor += other.factor; nchanges += other.nchanges; return *this; } }; // end of gather type /** * \brief The collapsed Gibbs sampler vertex program updates the topic * counts for the center vertex and then draws new topic assignments * for each edge durring the scatter phase. * */ class cgs_lda_vertex_program : public graphlab::ivertex_program<graph_type, gather_type> { public: /** * \brief At termination we want to disable sampling to allow the * correct final counts to be computed. */ static bool DISABLE_SAMPLING; /** \brief gather on all edges */ edge_dir_type gather_edges(icontext_type& context, const vertex_type& vertex) const { return graphlab::ALL_EDGES; } // end of gather_edges /** * \brief Collect the current topic count on each edge. */ gather_type gather(icontext_type& context, const vertex_type& vertex, edge_type& edge) const { gather_type ret(edge.data().nchanges); const assignment_type& assignment = edge.data().assignment; foreach(topic_id_type asg, assignment) { if(asg != NULL_TOPIC) ++ret.factor[asg]; } return ret; } // end of gather /** * \brief Update the topic count for the center vertex. This * ensures that the center vertex has the correct topic count before * resampling the topics for each token along each edge. */ void apply(icontext_type& context, vertex_type& vertex, const gather_type& sum) { const size_t num_neighbors = vertex.num_in_edges() + vertex.num_out_edges(); ASSERT_GT(num_neighbors, 0); // There should be no new edge data since the vertex program has been cleared vertex_data& vdata = vertex.data(); ASSERT_EQ(sum.factor.size(), NTOPICS); ASSERT_EQ(vdata.factor.size(), NTOPICS); vdata.nupdates++; vdata.nchanges = sum.nchanges; vdata.factor = sum.factor; } // end of apply /** * \brief Scatter on all edges if the computation is on-going. * Computation stops after bunrin or when disable sampling is set to * true. */ edge_dir_type scatter_edges(icontext_type& context, const vertex_type& vertex) const { return (DISABLE_SAMPLING || (BURNIN > 0 && context.elapsed_seconds() > BURNIN))? graphlab::NO_EDGES : graphlab::ALL_EDGES; }; // end of scatter edges /** * \brief Draw new topic assignments for each edge token. * * Note that we exploit the GraphLab caching model here by DIRECTLY * modifying the topic counts of adjacent vertices. Making the * changes immediately visible to any adjacent vertex programs * running on the same machine. However, these changes will be * overwritten during the apply step and are only used to accelerate * sampling. This is a potentially dangerous violation of the * abstraction and should be taken with caution. In our case all * vertex topic counts are preallocated and atomic operations are * used. In addition during the sampling phase we must be careful * to guard against potentially negative temporary counts. */ void scatter(icontext_type& context, const vertex_type& vertex, edge_type& edge) const { factor_type& doc_topic_count = is_doc(edge.source()) ? edge.source().data().factor : edge.target().data().factor; factor_type& word_topic_count = is_word(edge.source()) ? edge.source().data().factor : edge.target().data().factor; ASSERT_EQ(doc_topic_count.size(), NTOPICS); ASSERT_EQ(word_topic_count.size(), NTOPICS); // run the actual gibbs sampling std::vector<double> prob(NTOPICS); assignment_type& assignment = edge.data().assignment; edge.data().nchanges = 0; foreach(topic_id_type& asg, assignment) { const topic_id_type old_asg = asg; if(asg != NULL_TOPIC) { // construct the cavity --doc_topic_count[asg]; --word_topic_count[asg]; --GLOBAL_TOPIC_COUNT[asg]; } for(size_t t = 0; t < NTOPICS; ++t) { const double n_dt = std::max(count_type(doc_topic_count[t]), count_type(0)); const double n_wt = std::max(count_type(word_topic_count[t]), count_type(0)); const double n_t = std::max(count_type(GLOBAL_TOPIC_COUNT[t]), count_type(0)); prob[t] = (ALPHA + n_dt) * (BETA + n_wt) / (BETA * NWORDS + n_t); } asg = graphlab::random::multinomial(prob); // asg = std::max_element(prob.begin(), prob.end()) - prob.begin(); ++doc_topic_count[asg]; ++word_topic_count[asg]; ++GLOBAL_TOPIC_COUNT[asg]; if(asg != old_asg) { ++edge.data().nchanges; } } // End of loop over each token // singla the other vertex context.signal(get_other_vertex(edge, vertex)); } // end of scatter function }; // end of cgs_lda_vertex_program bool cgs_lda_vertex_program::DISABLE_SAMPLING = false; /** * \brief The icontext type associated with the cgs_lda_vertex program * is needed for all aggregators. */ typedef cgs_lda_vertex_program::icontext_type icontext_type; // ======================================================== // Aggregators /** * \brief The topk aggregator is used to periodically compute and * display the topk most common words in each topic. * * The number of words is determined by the global variable \ref TOPK * and the interval is determined by the global variable \ref INTERVAL. * */ class topk_aggregator { typedef std::pair<float, graphlab::vertex_id_type> cw_pair_type; private: std::vector< std::set<cw_pair_type> > top_words; size_t nchanges, nupdates; public: topk_aggregator(size_t nchanges = 0, size_t nupdates = 0) : nchanges(nchanges), nupdates(nupdates) { } topk_aggregator& operator+=(const topk_aggregator& other) { nchanges += other.nchanges; nupdates += other.nupdates; if(other.top_words.empty()) return *this; if(top_words.empty()) top_words.resize(NTOPICS); for(size_t i = 0; i < top_words.size(); ++i) { // Merge the topk top_words[i].insert(other.top_words[i].begin(), other.top_words[i].end()); // Remove excess elements while(top_words[i].size() > TOPK) top_words[i].erase(top_words[i].begin()); } return *this; } // end of operator += static topk_aggregator map(icontext_type& context, const graph_type::vertex_type& vertex) { topk_aggregator ret_value; const vertex_data& vdata = vertex.data(); ret_value.nchanges = vdata.nchanges; ret_value.nupdates = vdata.nupdates; if(is_word(vertex)) { const graphlab::vertex_id_type wordid = vertex.id(); ret_value.top_words.resize(vdata.factor.size()); for(size_t i = 0; i < vdata.factor.size(); ++i) { const cw_pair_type pair(vdata.factor[i], wordid); ret_value.top_words[i].insert(pair); } } return ret_value; } // end of map function static void finalize(icontext_type& context, const topk_aggregator& total) { if(context.procid() != 0) return; for(size_t i = 0; i < total.top_words.size(); ++i) { std::cout << "Topic " << i << ": "; rev_foreach(cw_pair_type pair, total.top_words[i]) { std::cout << DICTIONARY[pair.second] << "(" << pair.first << ")" << ", "; } std::cout << std::endl; } std::cout << "\nNumber of token changes: " << total.nchanges << std::endl; std::cout << "\nNumber of updates: " << total.nupdates << std::endl; } // end of finalize }; // end of topk_aggregator struct /** * \brief The global counts aggregator computes the total number of * tokens in each topic across all words and documents and then * updates the \ref GLOBAL_TOPIC_COUNT variable. * */ struct global_counts_aggregator { typedef graph_type::vertex_type vertex_type; static factor_type map(icontext_type& context, const vertex_type& vertex) { return vertex.data().factor; } // end of map function static void finalize(icontext_type& context, const factor_type& total) { size_t sum = 0; for(size_t t = 0; t < total.size(); ++t) { GLOBAL_TOPIC_COUNT[t] = std::max(count_type(total[t]/2), count_type(0)); sum += GLOBAL_TOPIC_COUNT[t]; } context.cout() << "Total Tokens: " << sum << std::endl; } // end of finalize }; // end of global_counts_aggregator struct /** * \brief The Likelihood aggregators maintains the current estimate of * the log-likelihood of the current token assignments. * * llik_words_given_topics = ... * ntopics * (gammaln(nwords * beta) - nwords * gammaln(beta)) - ... * sum_t(gammaln( n_t + nwords * beta)) + * sum_w(sum_t(gammaln(n_wt + beta))); * * llik_topics = ... * ndocs * (gammaln(ntopics * alpha) - ntopics * gammaln(alpha)) + ... * sum_d(sum_t(gammaln(n_td + alpha)) - gammaln(sum_t(n_td) + ntopics * alpha)); */ class likelihood_aggregator : public graphlab::IS_POD_TYPE { typedef graph_type::vertex_type vertex_type; double lik_words_given_topics; double lik_topics; public: likelihood_aggregator() : lik_words_given_topics(0), lik_topics(0) { } likelihood_aggregator& operator+=(const likelihood_aggregator& other) { lik_words_given_topics += other.lik_words_given_topics; lik_topics += other.lik_topics; return *this; } // end of operator += static likelihood_aggregator map(icontext_type& context, const vertex_type& vertex) { using boost::math::lgamma; const factor_type& factor = vertex.data().factor; ASSERT_EQ(factor.size(), NTOPICS); likelihood_aggregator ret; if(is_word(vertex)) { for(size_t t = 0; t < NTOPICS; ++t) { const double value = std::max(count_type(factor[t]), count_type(0)); ret.lik_words_given_topics += lgamma(value + BETA); } } else { ASSERT_TRUE(is_doc(vertex)); double ntokens_in_doc = 0; for(size_t t = 0; t < NTOPICS; ++t) { const double value = std::max(count_type(factor[t]), count_type(0)); ret.lik_topics += lgamma(value + ALPHA); ntokens_in_doc += factor[t]; } ret.lik_topics -= lgamma(ntokens_in_doc + NTOPICS * ALPHA); } return ret; } // end of map function static void finalize(icontext_type& context, const likelihood_aggregator& total) { using boost::math::lgamma; // Address the global sum terms double denominator = 0; for(size_t t = 0; t < NTOPICS; ++t) { denominator += lgamma(GLOBAL_TOPIC_COUNT[t] + NWORDS * BETA); } // end of for loop const double lik_words_given_topics = NTOPICS * (lgamma(NWORDS * BETA) - NWORDS * lgamma(BETA)) - denominator + total.lik_words_given_topics; const double lik_topics = NDOCS * (lgamma(NTOPICS * ALPHA) - NTOPICS * lgamma(ALPHA)) + total.lik_topics; const double lik = lik_words_given_topics + lik_topics; context.cout() << "Likelihood: " << lik << std::endl; } // end of finalize }; // end of likelihood_aggregator struct /** * \brief The selective signal functions are used to signal only the * vertices corresponding to words or documents. This is done by * using the iengine::map_reduce_vertices function. */ struct signal_only { /** * \brief Signal only the document vertices and skip the word * vertices. */ static graphlab::empty docs(icontext_type& context, const graph_type::vertex_type& vertex) { if(is_doc(vertex)) context.signal(vertex); return graphlab::empty(); } // end of signal_docs /** * \brief Signal only the word vertices and skip the document * vertices. */ static graphlab::empty words(icontext_type& context, const graph_type::vertex_type& vertex) { if(is_word(vertex)) context.signal(vertex); return graphlab::empty(); } // end of signal_words }; // end of selective_only /** * \brief Load the dictionary global variable from the file containing * the terms (one term per line). * * Note that while graphs can be loaded from multiple files the * dictionary must be in a single file. The dictionary is loaded * entirely into memory and used to display word clouds and the top * terms in each topic. * * \param [in] fname the file containing the dictionary data. The * data can be located on HDFS and can also be gzipped (must end in * ".gz"). * */ bool load_dictionary(const std::string& fname) { // std::cout << "staring load on: " // << graphlab::get_local_ip_as_str() << std::endl; const bool gzip = boost::ends_with(fname, ".gz"); // test to see if the graph_dir is an hadoop path std::cout << "opening: " << fname << std::endl; std::ifstream in_file(fname.c_str(), std::ios_base::in | std::ios_base::binary); boost::iostreams::filtering_stream<boost::iostreams::input> fin; fin.push(in_file); if(!fin.good() || !fin.good()) { logstream(LOG_ERROR) << "Error loading dictionary: " << fname << std::endl; return false; } std::string term; std::cout << "Loooping" << std::endl; while(std::getline(fin, term).good()) DICTIONARY.push_back(term); fin.pop(); in_file.close(); // std::cout << "Finished load on: " // << graphlab::get_local_ip_as_str() << std::endl; std::cout << "Dictionary Size: " << DICTIONARY.size() << std::endl; return true; } // end of load dictionary struct count_saver { bool save_words; count_saver(bool save_words) : save_words(save_words) { } typedef graph_type::vertex_type vertex_type; typedef graph_type::edge_type edge_type; std::string save_vertex(const vertex_type& vertex) const { // Skip saving vertex data if the vertex type is not consistent // with the save type if((save_words && is_doc(vertex)) || (!save_words && is_word(vertex))) return ""; // Proceed to save std::stringstream strm; if(save_words) { const graphlab::vertex_id_type vid = vertex.id(); strm << vid << '\t'; } else { // save documents const graphlab::vertex_id_type vid = (-vertex.id()) - 2; strm << vid << '\t'; } const factor_type& factor = vertex.data().factor; for(size_t i = 0; i < factor.size(); ++i) { strm << factor[i]; if(i+1 < factor.size()) strm << '\t'; } strm << '\n'; return strm.str(); } std::string save_edge(const edge_type& edge) const { return ""; //nop } }; // end of prediction_saver
09jijiangwen-download
graphlab_toolkit_ports/lda/cgs_lda_vertexprogram.hpp
C++
asf20
22,556
// // cgs_lda.cpp // graphchi_xcode // // Created by Aapo Kyrola on 8/8/12. // // #include <string> #include <algorithm> #include "graphchi_basic_includes.hpp" #include "api/graphlab2_1_GAS_api/graphlab.hpp" #include "cgs_lda_vertexprogram.hpp" using namespace graphchi; using namespace graphlab; int main(int argc, const char ** argv) { /* GraphChi initialization will read the command line arguments and the configuration file. */ graphchi_init(argc, argv); /* Metrics object for keeping track of performance counters and other information. Currently required. */ metrics m("LDA-graphlab"); /* Basic arguments for application. NOTE: File will be automatically 'sharded'. */ std::string filename = get_option_string("file"); // Base filename int niters = get_option_int("niters", 4); // Number of iterations /* Preprocess data if needed, or discover preprocess files */ int nshards = convert_if_notexists<edge_data>(filename, get_option_string("nshards", "auto")); /* Run */ std::vector<vertex_data> * vertices = run_graphlab_vertexprogram<cgs_lda_vertex_program>(filename, nshards, niters, false, m, false, false); /* TODO: write output latent matrices */ delete vertices; /* Report execution metrics */ metrics_report(m); return 0; }
09jijiangwen-download
graphlab_toolkit_ports/lda/cgs_lda.cpp
C++
asf20
1,370
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"> <title>GraphChi Admin Dashboard</title> <meta name="viewport" content="width=device-width, initial-scale=1.0"> <meta name="description" content=""> <meta name="author" content=""> <script src="bootstrap/js/jquery.js"></script> <!-- Le styles --> <link href="bootstrap/css/bootstrap.css" rel="stylesheet"> <link href="style/graphchi.css" rel="stylesheet"> <style type="text/css"> body { padding-top: 60px; padding-bottom: 40px; } .sidebar-nav { padding: 9px 0; } </style> <link href="bootstrap/css/bootstrap-responsive.css" rel="stylesheet"> <!-- Le HTML5 shim, for IE6-8 support of HTML5 elements --> <!--[if lt IE 9]> <script src="http://html5shim.googlecode.com/svn/trunk/html5.js"></script> <![endif]--> <!-- Le fav and touch icons --> <script type="text/javascript"> var chosenShard = 0; function updateChosen() { $.getJSON("/ajax/shardpagerank" + chosenShard, "", function(data) { var shardprel = document.getElementById("selectshard") shardprel.innerHTML = getPagerankTable(data) } ) } function createPswWindow(num, nShards, windowStartFrac, windowEndFrac, beingRecreated, vSt, vEn) { var bgElement = document.getElementById("psw_bg") var shardElement = document.createElement("div") var shardHeight = bgElement.clientHeight - 20; var shardWidth = Math.floor((parseInt(bgElement.clientWidth) - nShards*12) / nShards); shardElement.setAttribute("id", "shard" + num) shardElement.setAttribute("class", "psw") shardElement.style.width = shardWidth + "px" shardElement.style.height = shardHeight + "px" var b = document.createElement("b") if (!beingRecreated) { b.appendChild(document.createTextNode( num )) } else { b.appendChild(document.createTextNode( "(recreate)" )) } shardElement.appendChild(b) // Window winElement = document.createElement("div") winElement.setAttribute("id", "win" + num) winElement.setAttribute("class", "pswwindow") winElement.style.position = "relative"; winElement.style.width = shardWidth + "px" winElement.style.height = Math.round((windowEndFrac-windowStartFrac)*shardHeight) + "px" winElement.style.top = Math.round(windowStartFrac * shardHeight - 20) + "px" shardElement.appendChild(winElement) bgElement.appendChild(shardElement) shardElement.onclick = function() { chosenShard = num; var shardheader = document.getElementById("selectshard_header"); shardheader.innerHTML = "Shard: " + chosenShard + "; vertices: " + vSt + " -- " + vEn; updateChosen() }; } function getPagerankTable(data) { var s = "<table>"; for(i=0; i < 20; i++) { var r; eval("r = data.rank" + i) if (r != undefined) { r = r.split(":") s = s + "<tr><td >" + (i+1) + ". " + "</td><td width='150px'>" + r[1] + " </td><td style='text-align: right;'>" + r[2] + "</td></tr>" } } s = s + "</table>" return s; } function updatePageranks(data) { var s = getPagerankTable(data) document.getElementById("pageranks").innerHTML = s; } function updatePics() { document.getElementById("updatesimg").src = "plots/updates.png?" + Math.random() document.getElementById("edgesimg").src = "plots/edges.png?" + Math.random() document.getElementById("ingestsimg").src = "plots/ingests.png?" + Math.random() document.getElementById("bufedgesimg").src = "plots/bufedges.png?" + Math.random() document.getElementById("deltasimg").src = "plots/deltas.png?" + Math.random() } function update() { $.getJSON("/ajax/getinfo", "", function(data) { var graphname = data.file; // Remove previous childs var bgElement = document.getElementById("psw_bg") if ( bgElement.hasChildNodes() ) { while ( bgElement.childNodes.length >= 1 ) { bgElement.removeChild( bgElement.firstChild ); } } document.getElementById("filename").innerHTML = graphname.split( "/" ).pop(); document.getElementById("state").innerHTML = data.state; document.getElementById("nvertices").innerHTML = data.nvertices; if (data.ingestedges != undefined) { document.getElementById("ingest").innerHTML = data.ingestedges; document.getElementById("ingestspeed").innerHTML = data.ingestspeed; } document.getElementById("nedges").innerHTML = data.edges; document.getElementById("updates").innerHTML = data.updates; document.getElementById("iteration").innerHTML = data.iteration + " / " + data.numIterations; document.getElementById("runtime").innerHTML = data.runTime; document.getElementById("curwindow").innerHTML = data.windowStart + " - " + data.windowEnd; document.getElementById("updatessec").innerHTML = Math.floor(data.updates / (data.runTime+0.01)); $.each(data.shards, function(i, shard) { if (shard.windowStart != undefined) { createPswWindow(i, data.shards.length, shard.windowStart / shard.size, shard.windowEnd / shard.size, false, shard.intervalStart, shard.intervalEnd); } else { createPswWindow(i, data.shards.length, 0.0, 1.0, true, shard.intervalStart, shard.intervalEnd); } }); updatePageranks(data) shardEl = document.getElementById("shard" + data.interval) if (shardEl != undefined) { shardEl.style.backgroundColor = "#666666"; shardEl.style.border = "1px solid red" } } ); } function load() { setInterval(function(){update()},500); setInterval(function(){updatePics()},5000); setInterval(function(){updateChosen()}, 8000); } </script> </head> <body onLoad="load()"> <div class="navbar navbar-fixed-top"> <div class="navbar-inner"> <div class="container-fluid"> <a class="btn btn-navbar" data-toggle="collapse" data-target=".nav-collapse"> <span class="icon-bar"></span> <span class="icon-bar"></span> <span class="icon-bar"></span> </a> <a class="brand" href="#">GraphChi Admin Dashboard</a> <!-- <div class="btn-group pull-right"> <a class="btn dropdown-toggle" data-toggle="dropdown" href="#"> <i class="icon-user"></i> Username <span class="caret"></span> </a> <ul class="dropdown-menu"> <li><a href="#">Profile</a></li> <li class="divider"></li> <li><a href="#">Sign Out</a></li> </ul> </div> --> <div class="nav-collapse"> <ul class="nav"> <li class="active"><a target="_new" href="http://code.google.com/p/graphchi">GraphChi @ Google Code</a></li> </ul> </div> </div> </div> </div> <div class="container-fluid"> <div class="row-fluid"> <div class="span3"> <div class="well sidebar-nav"> <!-- <ul class="nav nav-list"> <li class="nav-header">Sidebar</li> <li class="active"><a href="#">Link</a></li> <li><a href="#">Link</a></li> <li><a href="#">Link</a></li> <li><a href="#">Link</a></li> </ul> --> <span class="nav-header2">Summary</span> <table style="margin: 5px;"> <tr> <td class="leftcol">Input</td><td class="rightcol"> <span id="filename">input</span></td> </tr> <tr> <td class="leftcol">State</td><td class="rightcol"> <span id="state">--</span> </td></tr> <tr> <td class="leftcol">Vertices</td><td class="rightcol"><span id="nvertices">__</span></td></tr> <tr> <td class="leftcol">Edges</td><td class="rightcol"><span id="nedges">__</span></td></tr> <tr> <td class="leftcol">Updates executed</td><td class="rightcol"><span id="updates">0</span></td></tr> <tr> <td class="leftcol">Updates / sec</td><td class="rightcol"> <span id="updatessec">0</span></td></tr> <tr> <td class="leftcol">Iteration</td><td class="rightcol"> <span id="iteration">0</span> </td></tr> <tr> <td class="leftcol">Runtime</td><td class="rightcol"> <span id="runtime">0</span> </td></tr> </table> <span class="nav-header2 style="margin-top: 20px;"">Ingest</span> <table style="margin: 5px;"> <tr> <td class="leftcol">Streamed edges</td><td class="rightcol"> <span id="ingest">--</span></td> </tr> <tr> <td class="leftcol"> - edges / sec</td><td class="rightcol"> <span id="ingestspeed">--</span> </td> </tr> </table> <span class="nav-header2" style="margin-top: 20px;" >App output</span> <div id="pageranks" class="appresult"> 1. dsfsfsf <br/> 2. dsfsfsf <br/> 3. dsfsfsf <br/> 4. dsfsfsf <br/> 5. dsfsfsf <br/> 6. dsfsfsf <br/> </div> </div><!--/.well --> </div><!--/span--> <div class="span9"> <span style="font-size: 14px"> <span style="font-weight: bold; color: red;">What is happening: </span> <span style="color: red">The demo started with a Twitter graph of just 180 million edges. After that, we started inserting roughly 100,000 edges / second in the graph, while simultaneously computing Pagerank. On the left side, you see the users with highest pageranks. It is updated after each iteration of <b>GraphChi</b> computation. </span> </span> <br/> Execution interval: <span id="curwindow" style="font-style:oblique;"></span> <br/> <div class="hero-unit2"> <div id="psw_bg" style="margin: 4px; height: 200px; overflow: auto;" > </div> </div> <div class="row-fluid"> <div class="span4"> <h2>Selected Shard</h2> <span id="selectshard_header">---</span> <div id="selectshard"> --- </div> </div><!--/span--> <div class="span4"> <h2>Updates/sec</h2> <p> <img src="plots/updates.png" id="updatesimg" /> </p> </div><!--/span--> <div class="span4"> <h2>Ingests/sec</h2> <p> <img src="plots/ingests.png" id="ingestsimg" /> </p> </div><!--/span--> </div><!--/row--> <div class="row-fluid"> <div class="span4"> <h2>Delta</h2> <img src="plots/deltas.png" id="deltasimg" /> </div><!--/span--> <div class="span4"> <h2>Edges</h2> <p> <img src="plots/edges.png" id="edgesimg" /> </p> </div><!--/span--> <div class="span4"> <h2>Buffered Edges</h2> <p> <img src="plots/bufedges.png" id="bufedgesimg" /> </p> </div><!--/span--> </div><!--/row--> </div><!--/span--> </div><!--/row--> <hr> <footer> <p>&copy; Aapo Kyrola et al. 2012 / Carnegie Mellon University</p> </footer> </div><!--/.fluid-container--> <!-- Le javascript ================================================== --> <!-- Placed at the end of the document so the pages load faster --> <script src="bootstrap/js/bootstrap-transition.js"></script> <script src="bootstrap/js/bootstrap-alert.js"></script> <script src="bootstrap/js/bootstrap-modal.js"></script> <script src="bootstrap/js/bootstrap-dropdown.js"></script> <script src="bootstrap/js/bootstrap-scrollspy.js"></script> <script src="bootstrap/js/bootstrap-tab.js"></script> <script src="bootstrap/js/bootstrap-tooltip.js"></script> <script src="bootstrap/js/bootstrap-popover.js"></script> <script src="bootstrap/js/bootstrap-button.js"></script> <script src="bootstrap/js/bootstrap-collapse.js"></script> <script src="bootstrap/js/bootstrap-carousel.js"></script> <script src="bootstrap/js/bootstrap-typeahead.js"></script> </body> </html>
09jijiangwen-download
conf/adminhtml/index.html
HTML
asf20
12,725
#!/usr/bin/python import sys import os import matplotlib import numpy import matplotlib matplotlib.use('AGG') import matplotlib.pyplot as plt from matplotlib.ticker import MaxNLocator from matplotlib.ticker import FormatStrFormatter def getArg(param, default=""): if (sys.argv.count(param) == 0): return default i = sys.argv.index(param) return sys.argv[i + 1] lastsecs = int(getArg("lastsecs", 240)) fname = sys.argv[1] try: tdata = numpy.loadtxt(fname, delimiter=" ") except: exit(0) if len(tdata.shape) < 2 or tdata.shape[0] < 2 or tdata.shape[1] < 2: print "Too small data - do not try to plot yet." exit(0) times = tdata[:, 0] values = tdata[:, 1] lastt = max(times) #majorFormatter = FormatStrFormatter('%.2f') fig = plt.figure(figsize=(3.5, 2.0)) plt.plot(times[times > lastt - lastsecs], values[times > lastt - lastsecs]) plt.gca().xaxis.set_major_locator( MaxNLocator(nbins = 7, prune = 'lower') ) plt.xlim([max(0, lastt - lastsecs), lastt]) #plt.ylim([lastt - lastsecs, lastt]) plt.gca().yaxis.set_major_locator( MaxNLocator(nbins = 7, prune = 'lower') ) #plt.gca().yaxis.set_major_formatter(majorFormatter) plt.savefig(fname.replace(".dat", ".png"), format="png", bbox_inches='tight')
09jijiangwen-download
conf/adminhtml/plots/plotter.py
Python
asf20
1,235
.psw { border: 2px solid #000000; background-color: #888888; float: left; margin: 3px; border-top-left-radius: 4px 4px; border-bottom-right-radius: 4px 4px; border-bottom-left-radius: 4px 4px; border-top-right-radius: 4px 4px; } .pswwindow { background-color: #333333; border: 1px solid #000000; position: relative; left: -1px; } .infobox { float: left; width: 320px; } .hero-unit2 { padding: 12px; margin-bottom: 30px; background-color: #eeeeee; -webkit-border-radius: 6px; -moz-border-radius: 6px; border-radius: 6px; } .rightcol { text-align: right; width: 140px; } .leftcol { text-align: left; font-weight: bold; width: 180px; } .appresult { margin: 5px; } .nav-header2 { display: block; padding: 3px 15px; font-size: 11px; font-weight: bold; line-height: 18px; color: #999999; text-shadow: 0 1px 0 rgba(255, 255, 255, 0.5); text-transform: uppercase; }
09jijiangwen-download
conf/adminhtml/style/graphchi.css
CSS
asf20
995
/* Greyscale Table Design by Scott Boyle, Two Plus Four www.twoplusfour.co.uk ----------------------------------------------- */ .dsinfo { font: italic 80%/140% arial, helvetica, sans-serif; } .descs { width: 650px; font: italic 80%/140% arial, helvetica, sans-serif; } h1 { border: 1px solid #999999; background: #ccc; text-align: center; } h2 { border: 1px solid #999999; background: #eee; text-align: center; } .psw { background-color: #444444; border: 1px solid #000000; float: left; margin: 3px; } .pswwindow { background-color: #999999; border: 1px solid #000000; position: relative; left: -1px; } .infobox { float: left; } .restable {border-collapse: collapse; border: 1px solid #999999; font: normal 80%/140% arial, helvetica, sans-serif; color: #555; background: #fff;} table.restable td {font: normal 100% courier, "courier new", monospace;; } table.restable th {font: bold 100% arial; } table {border-collapse: collapse; border: 1px solid #999999; font: normal 80%/140% arial, helvetica, sans-serif; color: #555; background: #fff;} td, th {border: 1px dotted #bbb; padding: .5em;} caption {padding: 0 0 .5em 0; text-align: left; font-size: 1.4em; font-weight: bold; text-transform: uppercase; color: #333; background: transparent;} /* =links ----------------------------------------------- */ th a {padding: 1px; text-decoration: underline; font-weight: bold; background: transparent;} table a:link {border-bottom: 1px dashed #ddd; color: #000;} table a:hover {border-bottom: 1px dashed #bbb; color: #666;} /* =head =foot ----------------------------------------------- */ thead th, tfoot th {border: 2px solid #000; text-align: left; font-size: 1.2em; font-weight: bold; color: #333; background: transparent;} tfoot td {border: 2px solid #000;} /* =body ----------------------------------------------- */ tbody th, tbody td {vertical-align: top; text-align: left;} tbody th {white-space: nowrap;} .odd {background: #fcfcfc;} tbody tr:hover {background: #fafafa;}
09jijiangwen-download
conf/adminhtml/style/greyscale.css
CSS
asf20
2,065
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"> <title>GraphChi Admin Dashboard</title> <meta name="viewport" content="width=device-width, initial-scale=1.0"> <meta name="description" content=""> <meta name="author" content=""> <script src="bootstrap/js/jquery.js"></script> <!-- Le styles --> <link href="bootstrap/css/bootstrap.css" rel="stylesheet"> <link href="style/graphchi.css" rel="stylesheet"> <style type="text/css"> body { padding-top: 60px; padding-bottom: 40px; } .sidebar-nav { padding: 9px 0; } </style> <link href="bootstrap/css/bootstrap-responsive.css" rel="stylesheet"> <!-- Le HTML5 shim, for IE6-8 support of HTML5 elements --> <!--[if lt IE 9]> <script src="http://html5shim.googlecode.com/svn/trunk/html5.js"></script> <![endif]--> <!-- Le fav and touch icons --> <script type="text/javascript"> function createPswWindow(num, nShards, windowStartFrac, windowEndFrac, beingRecreated) { var bgElement = document.getElementById("psw_bg") var shardElement = document.createElement("div") var shardHeight = bgElement.clientHeight - 20; var shardWidth = Math.floor((parseInt(bgElement.clientWidth) - nShards*12) / nShards); shardElement.setAttribute("id", "shard" + num) shardElement.setAttribute("class", "psw") shardElement.style.width = shardWidth + "px" shardElement.style.height = shardHeight + "px" var b = document.createElement("b") if (!beingRecreated) { b.appendChild(document.createTextNode( num )) } else { b.appendChild(document.createTextNode( "(recreate)" )) } shardElement.appendChild(b) // Window winElement = document.createElement("div") winElement.setAttribute("id", "win" + num) winElement.setAttribute("class", "pswwindow") winElement.style.position = "relative"; winElement.style.width = shardWidth + "px" winElement.style.height = Math.round((windowEndFrac-windowStartFrac)*shardHeight) + "px" winElement.style.top = Math.round(windowStartFrac * shardHeight - 20) + "px" shardElement.appendChild(winElement) bgElement.appendChild(shardElement) } function updatePageranks(data) { var s = "<table>"; for(i=0; i< 20; i++) { var r; eval("r = data.rank" + i) r = r.split(":") s = s + "<tr><td >" + (i+1) + ". " + "</td><td width='150px'>" + r[1] + " </td><td style='text-align: right;'>" + r[2] + "</td></tr>" } s = s + "</table>" document.getElementById("pageranks").innerHTML = s; } function updatePics() { document.getElementById("updatesimg").src = "plots/updates.png?" + Math.random() document.getElementById("edgesimg").src = "plots/edges.png?" + Math.random() document.getElementById("ingestsimg").src = "plots/ingests.png?" + Math.random() document.getElementById("bufedgesimg").src = "plots/bufedges.png?" + Math.random() } function update() { $.getJSON("/ajax/getinfo", "", function(data) { var graphname = data.file; // Remove previous childs var bgElement = document.getElementById("psw_bg") if ( bgElement.hasChildNodes() ) { while ( bgElement.childNodes.length >= 1 ) { bgElement.removeChild( bgElement.firstChild ); } } document.getElementById("filename").innerHTML = graphname.split( "/" ).pop(); document.getElementById("state").innerHTML = data.state; document.getElementById("nvertices").innerHTML = data.nvertices; if (data.ingestedges != undefined) { document.getElementById("ingest").innerHTML = data.ingestedges; document.getElementById("ingestspeed").innerHTML = data.ingestspeed; } document.getElementById("updates").innerHTML = data.updates; document.getElementById("iteration").innerHTML = data.iteration + " / " + data.numIterations; document.getElementById("runtime").innerHTML = data.runTime; document.getElementById("curwindow").innerHTML = data.windowStart + " - " + data.windowEnd; document.getElementById("updatessec").innerHTML = Math.floor(data.updates / (data.runTime+0.01)); $.each(data.shards, function(i, shard) { if (shard.windowStart != undefined) { createPswWindow(i, data.shards.length, shard.windowStart / shard.size, shard.windowEnd / shard.size, false); } else { createPswWindow(i, data.shards.length, 0.0, 1.0, true); } }); updatePageranks(data) shardEl = document.getElementById("shard" + data.interval) if (shardEl != undefined) { shardEl.style.backgroundColor = "#666666"; shardEl.style.border = "1px solid red" } } ); } function load() { setInterval(function(){update()},500); setInterval(function(){updatePics()},5000); } </script> </head> <body onLoad="load()"> <div class="navbar navbar-fixed-top"> <div class="navbar-inner"> <div class="container-fluid"> <a class="btn btn-navbar" data-toggle="collapse" data-target=".nav-collapse"> <span class="icon-bar"></span> <span class="icon-bar"></span> <span class="icon-bar"></span> </a> <a class="brand" href="#">GraphChi Admin Dashboard</a> <!-- <div class="btn-group pull-right"> <a class="btn dropdown-toggle" data-toggle="dropdown" href="#"> <i class="icon-user"></i> Username <span class="caret"></span> </a> <ul class="dropdown-menu"> <li><a href="#">Profile</a></li> <li class="divider"></li> <li><a href="#">Sign Out</a></li> </ul> </div> --> <div class="nav-collapse"> <ul class="nav"> <li class="active"><a target="_new" href="http://code.google.com/p/graphchi">GraphChi @ Google Code</a></li> </ul> </div> </div> </div> </div> <div class="container-fluid"> <div class="row-fluid"> <div class="span3"> <div class="well sidebar-nav"> <!-- <ul class="nav nav-list"> <li class="nav-header">Sidebar</li> <li class="active"><a href="#">Link</a></li> <li><a href="#">Link</a></li> <li><a href="#">Link</a></li> <li><a href="#">Link</a></li> </ul> --> <span class="nav-header2">Summary</span> <table style="margin: 5px;"> <tr> <td class="leftcol">Input</td><td class="rightcol"> <span id="filename">input</span></td> </tr> <tr> <td class="leftcol">State</td><td class="rightcol"> <span id="state">--</span> </td></tr> <tr> <td class="leftcol">Vertices</td><td class="rightcol"><span id="nvertices">__</span></td></tr> <tr> <td class="leftcol">Edges</td><td class="rightcol"><span id="nedges">__</span></td></tr> <tr> <td class="leftcol">Updates executed</td><td class="rightcol"><span id="updates">0</span></td></tr> <tr> <td class="leftcol">Updates / sec</td><td class="rightcol"> <span id="updatessec">0</span></td></tr> <tr> <td class="leftcol">Iteration</td><td class="rightcol"> <span id="iteration">0</span> </td></tr> <tr> <td class="leftcol">Runtime</td><td class="rightcol"> <span id="runtime">0</span> </td></tr> </table> <span class="nav-header2 style="margin-top: 20px;"">Ingest</span> <table style="margin: 5px;"> <tr> <td class="leftcol">Streamed edges</td><td class="rightcol"> <span id="ingest">--</span></td> </tr> <tr> <td class="leftcol"> - edges / sec</td><td class="rightcol"> <span id="ingestspeed">--</span> </td> </tr> </table> <span class="nav-header2" style="margin-top: 20px;" >App output</span> <div id="pageranks" class="appresult"> 1. dsfsfsf <br/> 2. dsfsfsf <br/> 3. dsfsfsf <br/> 4. dsfsfsf <br/> 5. dsfsfsf <br/> 6. dsfsfsf <br/> </div> </div><!--/.well --> </div><!--/span--> <div class="span9"> Execution interval: <span id="curwindow" style="font-style:oblique;"></span> <br/> <div class="hero-unit2"> <div id="psw_bg" style="margin: 4px; height: 200px; overflow: auto;" > </div> </div> <div class="row-fluid"> <div class="span4"> <h2>Updates/sec</h2> <p> <img src="plots/updates.png" id="updatesimg" /> </p> </div><!--/span--> <div class="span4"> <h2>Ingests/sec</h2> <p> <img src="plots/ingests.png" id="ingestsimg" /> </p> </div><!--/span--> <div class="span4"> <h2>Delta</h2> <p>Image </p> </div><!--/span--> </div><!--/row--> <div class="row-fluid"> <div class="span4"> <h2>Edges</h2> <p> <img src="plots/edges.png" id="edgesimg" /> </p> </div><!--/span--> <div class="span4"> <h2>Input/Output</h2> <p>Line for reads and writes/sec </p> </div><!--/span--> <div class="span4"> <h2>Buffered Edges</h2> <p> <img src="plots/bufedges.png" id="bufedgesimg" /> </p> </div><!--/span--> </div><!--/row--> </div><!--/span--> </div><!--/row--> <hr> <footer> <p>&copy; Aapo Kyrola et al. 2012 / Carnegie Mellon University</p> </footer> </div><!--/.fluid-container--> <!-- Le javascript ================================================== --> <!-- Placed at the end of the document so the pages load faster --> <script src="bootstrap/js/bootstrap-transition.js"></script> <script src="bootstrap/js/bootstrap-alert.js"></script> <script src="bootstrap/js/bootstrap-modal.js"></script> <script src="bootstrap/js/bootstrap-dropdown.js"></script> <script src="bootstrap/js/bootstrap-scrollspy.js"></script> <script src="bootstrap/js/bootstrap-tab.js"></script> <script src="bootstrap/js/bootstrap-tooltip.js"></script> <script src="bootstrap/js/bootstrap-popover.js"></script> <script src="bootstrap/js/bootstrap-button.js"></script> <script src="bootstrap/js/bootstrap-collapse.js"></script> <script src="bootstrap/js/bootstrap-carousel.js"></script> <script src="bootstrap/js/bootstrap-typeahead.js"></script> </body> </html>
09jijiangwen-download
conf/adminhtml/index_bootstrap.html
HTML
asf20
11,101
#ifndef __GRAPHCHI_RMSE_ENGINE #define __GRAPHCHI_RMSE_ENGINE /** * @file * @author Danny Bickson * @version 1.0 * * @section LICENSE * * Copyright [2012] [Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * File for aggregating and siplaying error mesasures and algorithm progress */ float (*pprediction_func)(const vertex_data&, const vertex_data&, const float, double &, void *) = NULL; vec validation_rmse_vec; vec users_vec; vec sum_ap_vec; bool user_nodes = true; int num_threads = 1; bool converged_engine = false; int cur_iteration = 0; /** * GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type> * class. The main logic is usually in the update function. */ struct ValidationAPProgram : public GraphChiProgram<VertexDataType, EdgeDataType> { /** * compute validaton AP for a single user */ void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) { if (user_nodes && vertex.id() >= M) return; else if (!user_nodes && vertex.id() < M) return; vertex_data & vdata = latent_factors_inmem[vertex.id()]; vec ratings = zeros(vertex.num_outedges()); vec real_vals = zeros(vertex.num_outedges()); if (ratings.size() > 0){ users_vec[omp_get_thread_num()]++; int j=0; int real_click_count = 0; for(int e=0; e < vertex.num_outedges(); e++) { const EdgeDataType & observation = vertex.edge(e)->get_data(); vertex_data & pdata = latent_factors_inmem[vertex.edge(e)->vertex_id()]; double prediction; (*pprediction_func)(vdata, pdata, observation, prediction, NULL); ratings[j] = prediction; real_vals[j] = observation; if (observation > 0) real_click_count++; j++; } int count = 0; double ap = 0; ivec pos = sort_index(ratings); for (int j=0; j< std::min(ap_number, (int)ratings.size()); j++){ if (real_vals[pos[ratings.size() - j - 1]] > 0) ap += (++count * 1.0/(j+1)); } if (real_click_count > 0 ) ap /= real_click_count; else ap = 0; sum_ap_vec[omp_get_thread_num()] += ap; } } void before_iteration(int iteration, graphchi_context & gcontext){ last_validation_rmse = dvalidation_rmse; users_vec = zeros(num_threads); sum_ap_vec = zeros(num_threads); } /** * Called after an iteration has finished. */ void after_iteration(int iteration, graphchi_context &gcontext) { assert(Le > 0); dvalidation_rmse = finalize_rmse(sum(sum_ap_vec) , (double)sum(users_vec)); std::cout<<" Validation " << error_names[loss_type] << ":" << std::setw(10) << dvalidation_rmse << std::endl; if (halt_on_rmse_increase > 0 && halt_on_rmse_increase < cur_iteration && dvalidation_rmse > last_validation_rmse){ logstream(LOG_WARNING)<<"Stopping engine because of validation " << error_names[loss_type] << " increase" << std::endl; //gcontext.set_last_iteration(gcontext.iteration); converged_engine = true; } } }; /** * GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type> * class. The main logic is usually in the update function. */ struct ValidationRMSEProgram : public GraphChiProgram<VertexDataType, EdgeDataType> { /** * compute validaton RMSE for a single user */ void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) { if (user_nodes && vertex.id() >= M) return; else if (!user_nodes && vertex.id() < M) return; vertex_data & vdata = latent_factors_inmem[vertex.id()]; for(int e=0; e < vertex.num_outedges(); e++) { const EdgeDataType & observation = vertex.edge(e)->get_data(); vertex_data & nbr_latent = latent_factors_inmem[vertex.edge(e)->vertex_id()]; double prediction; double rmse = (*pprediction_func)(vdata, nbr_latent, observation, prediction, NULL); assert(rmse <= pow(maxval - minval, 2)); assert(validation_rmse_vec.size() > omp_get_thread_num()); validation_rmse_vec[omp_get_thread_num()] += rmse; } } void before_iteration(int iteration, graphchi_context & gcontext){ last_validation_rmse = dvalidation_rmse; validation_rmse_vec = zeros(num_threads); } /** * Called after an iteration has finished. */ void after_iteration(int iteration, graphchi_context &gcontext) { assert(Le > 0); dvalidation_rmse = finalize_rmse(sum(validation_rmse_vec) , (double)Le); std::cout<<" Validation " << error_names[loss_type] << ":" << std::setw(10) << dvalidation_rmse << std::endl; if (halt_on_rmse_increase > 0 && halt_on_rmse_increase < cur_iteration && dvalidation_rmse > last_validation_rmse){ logstream(LOG_WARNING)<<"Stopping engine because of validation RMSE increase" << std::endl; converged_engine = true; } } }; void reset_rmse(int exec_threads){ logstream(LOG_DEBUG)<<"Detected number of threads: " << exec_threads << std::endl; num_threads = exec_threads; rmse_vec = zeros(exec_threads); } template<typename VertexDataType, typename EdgeDataType> void init_validation_rmse_engine(graphchi_engine<VertexDataType,EdgeDataType> *& pvalidation_engine, int nshards,float (*prediction_func)(const vertex_data & user, const vertex_data & movie, float rating, double & prediction, void * extra)){ if (nshards == -1) return; metrics * m = new metrics("validation_rmse_engine"); graphchi_engine<VertexDataType, EdgeDataType> * engine = new graphchi_engine<VertexDataType, EdgeDataType>(validation, nshards, false, *m); set_engine_flags(*engine); pvalidation_engine = engine; pprediction_func = prediction_func; } template<typename VertexDataType, typename EdgeDataType> void run_validation(graphchi_engine<VertexDataType, EdgeDataType> * pvalidation_engine, graphchi_context & context){ //no validation data, no need to run validation engine calculations cur_iteration = context.iteration; if (pvalidation_engine == NULL){ std::cout << std::endl; return; } if (calc_ap){ //AP ValidationAPProgram program; pvalidation_engine->run(program, 1); } else { //RMSE ValidationRMSEProgram program; pvalidation_engine->run(program, 1); } if (converged_engine) context.set_last_iteration(context.iteration); } #endif //__GRAPHCHI_RMSE_ENGINE
09jijiangwen-download
toolkits/collaborative_filtering/rmse_engine.hpp
C++
asf20
6,956
#ifndef _IMPLICIT_HPP__ #define _IMPLICIT_HPP__ /** * @file * @author Danny Bickson * @version 1.0 * * @section LICENSE * * Copyright [2012] [Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include "eigen_wrapper.hpp" enum{ IMPLICIT_RATING_DISABLED = 0, IMPLICIT_RATING_RANDOM = 1 }; double implicitratingweight; double implicitratingvalue = -1; double implicitratingpercentage; int implicitratingtype; template<typename als_edge_type> uint add_implicit_edges4(int type, sharder<als_edge_type>& shrd){ switch(type){ case IMPLICIT_RATING_DISABLED: return 0; case IMPLICIT_RATING_RANDOM: break; default: assert(false); }; uint added = 0; uint toadd = (uint)(implicitratingpercentage*N*M); logstream(LOG_INFO)<<"Going to add: " << toadd << " implicit edges. " << std::endl; assert(toadd >= 1); for (uint j=0; j< toadd; j++){ ivec item = ::randi(1,0,N-1); ivec user = ::randi(1,0,M-1); shrd.preprocessing_add_edge(user[0], item[0], als_edge_type(implicitratingvalue, implicitratingweight)); added++; } logstream(LOG_INFO)<<"Finished adding " << toadd << " implicit edges. " << std::endl; return added; } template<typename als_edge_type> uint add_implicit_edges(int type, sharder<als_edge_type>& shrd ){ switch(type){ case IMPLICIT_RATING_DISABLED: return 0; case IMPLICIT_RATING_RANDOM: break; default: assert(false); }; uint added = 0; uint toadd = (uint)(implicitratingpercentage*N*M); logstream(LOG_INFO)<<"Going to add: " << toadd << " implicit edges. " << std::endl; assert(toadd >= 1); for (uint j=0; j< toadd; j++){ ivec item = ::randi(1,0,N-1); ivec user = ::randi(1,0,M-1); shrd.preprocessing_add_edge(user[0], item[0], als_edge_type(implicitratingvalue)); added++; } logstream(LOG_INFO)<<"Finished adding " << toadd << " implicit edges. " << std::endl; return added; } void parse_implicit_command_line(){ implicitratingweight = get_option_float("implicitratingweight", implicitratingweight); implicitratingvalue = get_option_float("implicitratingvalue", implicitratingvalue); implicitratingtype = get_option_int("implicitratingtype", implicitratingtype); if (implicitratingtype != IMPLICIT_RATING_RANDOM && implicitratingtype != IMPLICIT_RATING_DISABLED) logstream(LOG_FATAL)<<"Implicit rating type should be either 0 (IMPLICIT_RATING_DISABLED) or 1 (IMPLICIT_RATING_RANDOM)" << std::endl; implicitratingpercentage = get_option_float("implicitratingpercentage", implicitratingpercentage); if (implicitratingpercentage < 1e-8 && implicitratingpercentage > 0.8) logstream(LOG_FATAL)<<"Implicit rating percentage should be (1e-8, 0.8)" << std::endl; if (implicitratingtype != IMPLICIT_RATING_DISABLED && implicitratingvalue == 0) logstream(LOG_FATAL)<<"You are not allowed to use --implicitratingvalue=0. Please select a non zero value, for example -1" << std::endl; } #endif //_IMPLICIT_HPP__
09jijiangwen-download
toolkits/collaborative_filtering/implicit.hpp
C++
asf20
3,500
from optparse import OptionParser import random import heapq from operator import itemgetter from collections import defaultdict class Split: def __init__(self): self.train = {} self.test = {} self.counts = {} def add(self,user,trustees): if len(trustees) >= opts.min_trustees: self.counts[user] = len(trustees) random.shuffle(trustees) self.train[user] = trustees[:opts.given] self.test[user] = trustees[opts.given:] def map_ids(self): utrans = IndexTranslator() ttrans = IndexTranslator() train_idx = defaultdict(list) for user,trustees in self.train.iteritems(): uidx = utrans.idx(user) for t in trustees: train_idx[uidx].append(ttrans.idx(t)) test_idx = defaultdict(list) for user,trustees in self.test.iteritems(): uidx = utrans.idx(user,allow_update=False) assert(uidx is not None) # shouldn't have any unique users for t in trustees: tidx = ttrans.idx(t,allow_update=False) if tidx is not None: test_idx[uidx].append(tidx) self.train = train_idx self.test = test_idx class IndexTranslator: def __init__(self): self.index = {} def idx(self,key,allow_update=True): if allow_update and key not in self.index: self.index[key] = len(self.index)+1 return self.index.get(key,None) class MMWriter: def __init__(self,filepath): self.filepath = filepath def write(self,mat): f = open(self.filepath,'w') self.write_header(f,mat) self.write_data(f,mat) def write_header(self,f,mat): tot = 0 maxid = 0 for user,trustees in mat.iteritems(): tot += len(trustees) maxid = max(maxid,max(trustees)) print >>f,'%%MatrixMarket matrix coordinate integer general' print >>f,'{0} {1} {2}'.format(max(mat.keys()),maxid,tot) def write_data(self,f,mat): for user,trustees in mat.iteritems(): for t in trustees: print >>f,user,t,1 parser = OptionParser() parser.add_option('-i','--infile',dest='infile',help='input dataset') parser.add_option('-o','--outpath',dest='outpath',help='root path for output datasets [default=infile]') parser.add_option('-m','--min_trustees',dest='min_trustees',type='int',help='omit users with fewer trustees') parser.add_option('-g','--given',dest='given',type='int',help='retain this many trustees in training set') parser.add_option('-d','--discard_top',dest='discard_top',type='int',default=3,help='discard this many overall top popular users [default=%default]') (opts,args) = parser.parse_args() if not opts.min_trustees or not opts.given or not opts.infile: parser.print_help() raise SystemExit if not opts.outpath: opts.outpath = opts.infile overall = defaultdict(list) counts = defaultdict(int) f = open(opts.infile) for line in f: if not line.startswith('%'): break for line in f: user,trustee,score = map(int,line.strip().split()) if score > 0: counts[trustee] += 1 top = heapq.nlargest(opts.discard_top,counts.iteritems(),key=itemgetter(1)) for user,_ in top: counts[user] = 0 # so we don't include them f = open(opts.infile) for line in f: if not line.startswith('%'): break for line in f: user,trustee,score = map(int,line.strip().split()) if score > 0 and counts[trustee] >= opts.min_trustees: overall[user].append(trustee) split = Split() for user,trustees in overall.iteritems(): split.add(user,trustees) split.map_ids() w = MMWriter(opts.outpath+'_train') w.write(split.train) w = MMWriter(opts.outpath+'_test') w.write(split.test)
09jijiangwen-download
toolkits/collaborative_filtering/climf_script/prepare_epinions.py
Python
asf20
3,832
/** * Copyright (c) 2009 Carnegie Mellon University. * All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an "AS * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language * governing permissions and limitations under the License. * * For more about this software visit: * * http://graphlab.org * */ /** * Code by Danny Bickson, CMU */ #ifndef EIGEN_WRAPPER #define EIGEN_WRAPPER #ifdef EIGEN_NDEBUG #define NDEBUG #endif /** * SET OF WRAPPER FUNCTIONS FOR EIGEN * * */ #include <iostream> #include <fstream> #include <ostream> #include "Eigen/Dense" #define EIGEN_YES_I_KNOW_SPARSE_MODULE_IS_NOT_STABLE_YET #include "Eigen/Sparse" #include "Eigen/Cholesky" #include "Eigen/Eigenvalues" #include "Eigen/SVD" #define EIGEN_DONT_PARALLELIZE //eigen parallel for loop interfers with ours. using namespace Eigen; typedef MatrixXd mat; typedef VectorXd vec; typedef VectorXf fvec; typedef VectorXi ivec; typedef MatrixXi imat; typedef Matrix<size_t, Dynamic, Dynamic> matst; typedef SparseVector<double> sparse_vec; inline void debug_print_vec(const char * name,const vec& _vec, int len){ printf("%s ) ", name); for (int i=0; i< len; i++) if (_vec[i] == 0) printf(" 0 "); else printf("%12.4g ", _vec[i]); printf("\n"); } inline void debug_print_vec(const char * name,const double* _vec, int len){ printf("%s ) ", name); for (int i=0; i< len; i++) if (_vec[i] == 0) printf(" 0 "); else printf("%12.4g ", _vec[i]); printf("\n"); } mat randn1(int dx, int dy, int col); template<typename mat, typename data> inline void set_val(mat &A, int row, int col, data val){ A(row, col) = val; } inline double get_val(const mat &A, int row, int col){ return A(row, col); } inline int get_val(const imat &A, int row, int col){ return A(row, col); } inline vec get_col(const mat& A, int col){ return A.col(col); } inline vec get_row(const mat& A, int row){ return A.row(row); } inline void set_col(mat& A, int col, const vec & val){ A.col(col) = val; } inline void set_row(mat& A, int row, const vec & val){ A.row(row) = val; } inline mat eye(int size){ return mat::Identity(size, size); } inline vec ones(int size){ return vec::Ones(size); } inline fvec fones(int size){ return fvec::Ones(size); } inline vec init_vec(const double * array, int size){ vec ret(size); memcpy(ret.data(), array, size*sizeof(double)); return ret; } inline mat init_mat(const char * string, int row, int col){ mat out(row, col); char buf[2056]; strcpy(buf, string); char *pch = strtok(buf," \r\n\t;"); for (int i=0; i< row; i++){ for (int j=0; j< col; j++){ out(i,j) = atof(pch); pch = strtok (NULL, " \r\n\t;"); } } return out; } inline imat init_imat(const char * string, int row, int col){ imat out(row, col); char buf[2056]; strcpy(buf, string); char *pch = strtok(buf," \r\n\t;"); for (int i=0; i< row; i++){ for (int j=0; j< col; j++){ out(i,j) = atol(pch); pch = strtok (NULL, " \r\n\t;"); } } return out; } inline vec init_vec(const char * string, int size){ vec out(size); char buf[2056]; strcpy(buf, string); char *pch = strtok (buf," \r\n\t;"); int i=0; while (pch != NULL) { out(i) =atof(pch); pch = strtok (NULL, " \r\n\t;"); i++; } assert(i == size); return out; } inline vec init_dbl_vec(const char * string, int size){ return init_vec(string, size); } inline vec zeros(int size){ return vec::Zero(size); } inline fvec fzeros(int size){ return fvec::Zero(size); } inline mat zeros(int rows, int cols){ return mat::Zero(rows, cols); } inline vec head(const vec& v, int num){ return v.head(num); } inline vec mid(const vec&v, int start, int num){ return v.segment(start, std::min(num, (int)(v.size()-start))); } inline vec tail(const vec&v, int num){ return v.segment(v.size() - num, num); } inline ivec head(const ivec& v, int num){ return v.head(num); } inline void sort(ivec &a){ std::sort(a.data(), a.data()+a.size()); } inline void sort(vec & a){ std::sort(a.data(), a.data()+a.size()); } inline ivec sort_index(const vec&a){ ivec ret(a.size()); std::vector<std::pair<double,int> > D; // D.reserve(a.size()); for (int i=0;i<a.size();i++) D.push_back(std::make_pair<double,int>(a.coeff(i),i)); std::sort(D.begin(),D.end()); for (int i=0;i<a.size();i++) { ret[i]=D[i].second; } return ret; } inline void dot2(const vec& x1, const vec& x3, mat & Q, int j, int len){ for (int i=0; i< len; i++){ Q(i,j) = (x1(i) * x3(i)); } } inline bool ls_solve_chol(const mat &A, const vec &b, vec &result){ //result = A.jacobiSvd(ComputeThinU | ComputeThinV).solve(b); result = A.ldlt().solve(b); return true; } inline bool ls_solve(const mat &A, const vec &b, vec &result){ //result = A.jacobiSvd(ComputeThinU | ComputeThinV).solve(b); result = A.ldlt().solve(b); return true; } inline bool chol(mat& sigma, mat& out){ out = sigma.llt().matrixLLT(); return true; } inline bool backslash(const mat& A, const vec & b, vec & x){ x = A.jacobiSvd(ComputeThinU | ComputeThinV).solve(b); return true; } inline mat transpose(mat & A){ return A.transpose(); } inline mat randn(int dx, int dy){ return randn1(dx,dy,-1); } inline void set_diag(mat &A, vec & v){ A.diagonal()=v; } inline mat diag(vec & v){ return v.asDiagonal(); } template<typename mat> inline double sumsum(const mat & A){ return A.sum(); } inline double norm(const mat &A, int pow=2){ return A.squaredNorm(); } inline mat inv(const mat&A){ return A.inverse(); } inline bool inv(const mat&A, mat &out){ out = A.inverse(); return true; } inline mat outer_product(const vec&a, const vec&b){ return a*b.transpose(); } //Eigen does not sort eigenvalues, as done in matlab inline bool eig_sym(const mat & T, vec & eigenvalues, mat & eigenvectors){ // //Column of the returned matrix is an eigenvector corresponding to eigenvalue number as returned by eigenvalues(). The eigenvectors are normalized to have (Euclidean) norm equal to one. SelfAdjointEigenSolver<mat> solver(T); eigenvectors = solver.eigenvectors(); eigenvalues = solver.eigenvalues(); ivec index = sort_index(eigenvalues); sort(eigenvalues); vec eigenvalues2 = eigenvalues.reverse(); mat T2 = zeros(eigenvectors.rows(), eigenvectors.cols()); for (int i=0; i< eigenvectors.cols(); i++){ set_col(T2, index[i], get_col(eigenvectors, i)); } eigenvectors = T2; eigenvalues = eigenvalues2; return true; } inline vec elem_mult(const vec&a, const vec&b){ vec ret = a; for (int i=0; i<b.size(); i++) ret(i) *= b(i); return ret; } inline sparse_vec elem_mult(const sparse_vec&a, const sparse_vec&b){ return a.cwiseProduct(b); } inline double sum(const vec & a){ return a.sum(); } inline double min(const vec &a){ return a.minCoeff(); } inline double max(const vec & a){ return a.maxCoeff(); } inline vec randu(int size){ return vec::Random(size); } inline fvec frandu(int size){ return fvec::Random(size); } inline double randu(){ return vec::Random(1)(0); } inline ivec randi(int size, int from, int to){ ivec ret(size); for (int i=0; i<size; i++) ret[i]= internal::random<int>(from,to); return ret; } inline int randi(int from, int to){ return internal::random<int>(from,to); } inline ivec concat(const ivec&a, const ivec&b){ ivec ret(a.size()+b.size()); ret << a,b; return ret; } inline void del(ivec&a, int i){ memcpy(a.data()+i, a.data() + i+1, (a.size() - i - 1)*sizeof(int)); a.conservativeResize(a.size() - 1); //resize without deleting values! } inline mat get_cols(const mat&A, ivec & cols){ mat a(A.rows(), cols.size()); for (int i=0; i< cols.size(); i++) set_col(a, i, get_col(A, cols[i])); return a; } inline mat get_cols(const mat&A, int start_col, int end_col){ assert(end_col > start_col); assert(end_col <= A.cols()); assert(start_col >= 0); mat a(A.rows(), end_col-start_col); for (int i=0; i< end_col-start_col; i++) set_col(a, i, get_col(A, i)); return a; } inline void set_val(vec & v, int pos, double val){ v(pos) = val; } inline void set_val(sparse_vec & v, int pos, double val){ v.coeffRef(pos) = val; } inline double dot(const vec&a, const vec& b){ return a.dot(b); } inline vec reverse(vec& a){ return a.reverse(); } inline ivec reverse(ivec& a){ return a.reverse(); } inline const double * data(const mat &A){ return A.data(); } inline const int * data(const imat &A){ return A.data(); } inline const double * data(const vec &v){ return v.data(); } class it_file{ std::fstream fb; public: it_file(const char * name){ fb.open(name, std::fstream::in); fb.close(); if (fb.fail()){ fb.clear(std::fstream::failbit); fb.open(name, std::fstream::out | std::fstream::trunc ); } else { fb.open(name, std::fstream::in); } if (!fb.is_open()){ perror("Failed opening file "); printf("filename is: %s\n", name); assert(false); } }; std::fstream & operator<<(const std::string str){ int size = str.size(); fb.write((char*)&size, sizeof(int)); assert(!fb.fail()); fb.write(str.c_str(), size); return fb; } std::fstream &operator<<(mat & A){ int rows = A.rows(), cols = A.cols(); fb.write( (const char*)&rows, sizeof(int)); fb.write( (const char *)&cols, sizeof(int)); for (int i=0; i< A.rows(); i++) for (int j=0; j< A. cols(); j++){ double val = A(i,j); fb.write( (const char *)&val, sizeof(double)); assert(!fb.fail()); } return fb; } std::fstream &operator<<(const vec & v){ int size = v.size(); fb.write( (const char*)&size, sizeof(int)); assert(!fb.fail()); for (int i=0; i< v.size(); i++){ double val = v(i); fb.write( (const char *)&val, sizeof(double)); assert(!fb.fail()); } return fb; } std::fstream & operator<<(const double &v){ fb.write((const char*)&v, sizeof(double)); return fb; } std::fstream & operator>>(std::string str){ int size = -1; fb.read((char*)&size, sizeof(int)); if (fb.fail() || fb.eof()){ perror("Failed reading file"); assert(false); } char buf[256]; fb.read(buf, std::min(256,size)); assert(!fb.fail()); assert(!strncmp(str.c_str(), buf, std::min(256,size))); return fb; } std::fstream &operator>>(mat & A){ int rows, cols; fb.read( (char *)&rows, sizeof(int)); assert(!fb.fail()); fb.read( (char *)&cols, sizeof(int)); assert(!fb.fail()); A = mat(rows, cols); double val; for (int i=0; i< A.rows(); i++) for (int j=0; j< A. cols(); j++){ fb.read((char*)&val, sizeof(double)); assert(!fb.fail()); A(i,j) = val; } return fb; } std::fstream &operator>>(vec & v){ int size; fb.read((char*)&size, sizeof(int)); assert(!fb.fail()); assert(size >0); v = vec(size); double val; for (int i=0; i< v.size(); i++){ fb.read((char*)& val, sizeof(double)); assert(!fb.fail()); v(i) = val; } return fb; } std::fstream &operator>>(double &v){ fb.read((char*)&v, sizeof(double)); assert(!fb.fail()); return fb; } void close(){ fb.close(); } }; #define Name(a) std::string(a) inline void set_size(sparse_vec &v, int size){ //did not find a way to declare vector dimension, yet } inline void set_new(sparse_vec&v, int ind, double val){ v.insert(ind) = val; } inline int nnz(sparse_vec& v){ return v.nonZeros(); } inline int get_nz_index(sparse_vec &v, sparse_vec::InnerIterator& i){ return i.index(); } inline double get_nz_data(sparse_vec &v, sparse_vec::InnerIterator& i){ return i.value(); } #define FOR_ITERATOR(i,v) \ for (sparse_vec::InnerIterator i(v); i; ++i) template<typename T> inline double sum_sqr(const T& a); template<> inline double sum_sqr<vec>(const vec & a){ vec ret = a.array().pow(2); return ret.sum(); } template<> inline double sum_sqr<sparse_vec>(const sparse_vec & a){ double sum=0; FOR_ITERATOR(i,a){ sum+= powf(i.value(),2); } return sum; } inline double trace(const mat & a){ return a.trace(); } inline double get_nz_data(sparse_vec &v, int i){ assert(nnz(v) > i); int cnt=0; FOR_ITERATOR(j, v){ if (cnt == i){ return j.value(); } cnt++; } return 0.0; } inline void print(sparse_vec & vec){ int cnt = 0; FOR_ITERATOR(i, vec){ std::cout<<get_nz_index(vec, i)<<":"<< get_nz_data(vec, i) << " "; cnt++; if (cnt >= 20) break; } std::cout<<std::endl; } inline vec pow(const vec&v, int exponent){ vec ret = vec(v.size()); for (int i=0; i< v.size(); i++) ret[i] = powf(v[i], exponent); return ret; } inline double dot_prod(sparse_vec &v1, sparse_vec & v2){ return v1.dot(v2); } inline double dot_prod(const vec &v1, const vec & v2){ return v1.dot(v2); } inline double dot3(const vec &v1, const vec & v2, const vec & v3){ double ret = 0; for (int i=0; i < v1.size(); i++) ret+= v1[i]*v2[i]*v3[i]; return ret; } inline double dot_prod(sparse_vec &v1, const vec & v2){ double sum = 0; for (int i=0; i< v2.size(); i++){ sum+= v2[i] * v1.coeffRef(i); } return sum; } inline vec cumsum(vec& v){ vec ret = v; for (int i=1; i< v.size(); i++) for (int j=0; j< i; j++) ret(i) += v(j); return ret; } inline double get_val(sparse_vec & v1, int i){ //TODO optimize performance for (sparse_vec::InnerIterator it(v1); it; ++it) if (it.index() == i) return it.value(); return 0; } inline double get_val(vec & v1, int i){ return v1(i); } inline void set_div(sparse_vec&v, sparse_vec::InnerIterator i, double val){ v.coeffRef(i.index()) /= val; } inline sparse_vec minus(sparse_vec &v1,sparse_vec &v2){ return v1-v2; } inline vec minus( sparse_vec &v1, vec &v2){ vec ret = -v2; FOR_ITERATOR(i, v1){ ret[i.index()] += i.value(); } return ret; } inline void plus( vec &v1, sparse_vec &v2){ FOR_ITERATOR(i, v2){ v1[i.index()] += i.value(); } } inline void minus( vec &v1, sparse_vec &v2){ FOR_ITERATOR(i, v2){ v1[i.index()] -= i.value(); } } inline sparse_vec fabs( sparse_vec & dvec1){ sparse_vec ret = dvec1; FOR_ITERATOR(i, ret){ ret.coeffRef(i.index()) = fabs(i.value()); } return ret; }; inline vec fabs( const vec & dvec1){ vec ret(dvec1.size()); for (int i=0; i< dvec1.size(); i++){ ret(i) = fabs(dvec1(i)); } return ret; }; inline double abs_sum(const mat& A){ double sum =0; for (int i=0; i< A.rows(); i++) for (int j=0; j< A.cols(); j++) sum += fabs(A(i,j)); return sum; } inline double abs_sum(const vec &v){ double sum =0; for (int i=0; i< v.size(); i++) sum += fabs(v(i)); return sum; } inline double sum(const sparse_vec &v){ double sum =0; FOR_ITERATOR(i, v){ sum += i.value(); } return sum; } inline vec sqrt(const vec & v){ vec ret(v.size()); for (int i=0; i< v.size(); i++){ ret[i] = std::sqrt(v(i)); } return ret; } inline void svd(const mat & A, mat & U, mat & V, vec & singular_values){ Eigen::JacobiSVD<mat> svdEigen(A, Eigen::ComputeFullU | Eigen::ComputeFullV); U= svdEigen.matrixU(); V= svdEigen.matrixV(); singular_values =svdEigen.singularValues(); } inline bool pair_compare (std::pair<double,int> &x1, std::pair<double,int> & x2) { return (x1.first>x2.first); } inline ivec reverse_sort_index2(const vec&a, const ivec&indices, vec & out, int K){ assert(a.size() == indices.size()); assert(K > 0); int size = std::min((unsigned int)a.size(), (unsigned int)K); ivec ret(size); std::vector<std::pair<double,int> > D; D.reserve(a.size()); for (int i=0;i<a.size();i++) D.push_back(std::make_pair<double,int>(a[i],indices[i])); std::partial_sort(D.begin(),D.begin() + size, D.end(), pair_compare); for (int i=0;i< size;i++) { ret[i]=D[i].second; out[i] = D[i].first; } return ret; } inline ivec reverse_sort_index(const vec& a, int K){ assert(K > 0); int size = std::min((unsigned int)a.size(), (unsigned int)K); ivec ret(size); std::vector<std::pair<double,int> > D; D.reserve(a.size()); for (int i=0;i<a.size();i++) D.push_back(std::make_pair<double,int>(a[i],i)); std::partial_sort(D.begin(),D.begin() + size, D.end(), pair_compare); for (int i=0;i< size;i++) { ret[i]=D[i].second; } return ret; } inline ivec reverse_sort_index(sparse_vec& a, int K){ assert(K > 0); int size = std::min((unsigned int)nnz(a), (unsigned int)K); ivec ret(size); std::vector<std::pair<double,int> > D; D.reserve(nnz(a)); FOR_ITERATOR(i, a){ D.push_back(std::make_pair<double,int>(i.value(),i.index())); } std::partial_sort(D.begin(),D.begin() + size, D.end(), pair_compare); for (int i=0;i< size;i++) { ret[i]=D[i].second; } return ret; } //define function to be applied coefficient-wise double equal_greater(double x){ if (x != 0) return 1; else return 0; } //sort(edges.begin(), edges.end()); #undef NDEBUG #endif
09jijiangwen-download
toolkits/collaborative_filtering/eigen_wrapper.hpp
C++
asf20
17,569
/** * Copyright (c) 2009 Carnegie Mellon University. * All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an "AS * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language * governing permissions and limitations under the License. * * For more about this software visit: * * http://graphchi.org * */ #include "common.hpp" #include "types.hpp" #include "eigen_wrapper.hpp" #include "timer.hpp" using namespace std; int input_cols = 3; /* Metrics object for keeping track of performance counters and other information. Currently required. */ metrics m("svd-onesided-inmemory-factors"); int nshards; struct vertex_data { vec pvec; double value; double A_ii; vertex_data(){ value = 0; A_ii = 1; } //TODO void add_self_edge(double value) { A_ii = value; } void set_val(double value, int field_type) { pvec[field_type] = value; } //double get_output(int field_type){ return pred_x; } }; // end of vertex_data struct edge_data { float weight; edge_data(double weight = 0) : weight(weight) { } edge_data(double weight, double ignored) : weight(weight) { } //void set_field(int pos, double val){ weight = val; } //double get_field(int pos){ return weight; } }; /** * Type definitions. Remember to create suitable graph shards using the * Sharder-program. */ typedef vertex_data VertexDataType; typedef edge_data EdgeDataType; // Edges store the "rating" of user->movie pair graphchi_engine<VertexDataType, EdgeDataType> * pengine = NULL; std::vector<vertex_data> latent_factors_inmem; #include "io.hpp" /** * * Implementation of the Lanczos algorithm, as given in: * http://en.wikipedia.org/wiki/Lanczos_algorithm * * Code written by Danny Bickson, CMU, June 2011 * */ //LANCZOS VARIABLES int max_iter = 10; bool no_edge_data = false; int actual_vector_len; int nv = 0; int nsv = 0; double tol = 1e-8; bool finished = false; int ortho_repeats = 3; bool save_vectors = false; std::string format = "matrixmarket"; int nodes = 0; int data_size = max_iter; #include "math.hpp" #include "printouts.hpp" void init_lanczos(bipartite_graph_descriptor & info){ srand48(time(NULL)); latent_factors_inmem.resize(info.total()); data_size = nsv + nv+1 + max_iter; actual_vector_len = data_size; if (info.is_square()) actual_vector_len = data_size + 3; #pragma omp parallel for for (int i=0; i< info.total(); i++){ if (i < info.get_start_node(false) || info.is_square()) latent_factors_inmem[i].pvec = zeros(actual_vector_len); else latent_factors_inmem[i].pvec = zeros(3); } logstream(LOG_INFO)<<"Allocated a total of: " << ((double)(data_size * info.num_nodes(true) +3.0*info.num_nodes(false)) * sizeof(double)/ 1e6) << " MB for storing vectors." << " rows: " << info.num_nodes(true) << std::endl; } vec one_sided_lanczos( bipartite_graph_descriptor & info, timer & mytimer, vec & errest, const std::string & vecfile){ int nconv = 0; int its = 1; DistMat A(info); int other_size_offset = info.is_square() ? data_size : 0; DistSlicedMat U(other_size_offset, other_size_offset + 3, true, info, "U"); DistSlicedMat V(0, data_size, false, info, "V"); DistVec v(info, 1, false, "v"); DistVec u(info, other_size_offset+ 0, true, "u"); DistVec u_1(info, other_size_offset+ 1, true, "u_1"); DistVec tmp(info, other_size_offset + 2, true, "tmp"); vec alpha, beta, b; vec sigma = zeros(data_size); errest = zeros(nv); DistVec v_0(info, 0, false, "v_0"); if (vecfile.size() == 0) v_0 = randu(size(A,2)); PRINT_VEC2("svd->V", v_0); DistDouble vnorm = norm(v_0); v_0=v_0/vnorm; PRINT_INT(nv); while(nconv < nsv && its < max_iter){ std::cout<<"Starting iteration: " << its << " at time: " << mytimer.current_time() << std::endl; int k = nconv; int n = nv; PRINT_INT(k); PRINT_INT(n); PRINT_VEC2("v", v); PRINT_VEC2("u", u); alpha = zeros(n); beta = zeros(n); u = V[k]*A._transpose(); PRINT_VEC2("u",u); for (int i=k+1; i<n; i++){ std::cout <<"Starting step: " << i << " at time: " << mytimer.current_time() << std::endl; PRINT_INT(i); V[i]=u*A; double a = norm(u).toDouble(); u = u / a; multiply(V, i, a); PRINT_DBL(a); double b; orthogonalize_vs_all(V, i, b); PRINT_DBL(b); u_1 = V[i]*A._transpose(); u_1 = u_1 - u*b; alpha(i-k-1) = a; beta(i-k-1) = b; PRINT_VEC3("alpha", alpha, i-k-1); PRINT_VEC3("beta", beta, i-k-1); tmp = u; u = u_1; u_1 = tmp; } V[n]= u*A; double a = norm(u).toDouble(); PRINT_DBL(a); u = u/a; double b; multiply(V, n, a); orthogonalize_vs_all(V, n, b); alpha(n-k-1)= a; beta(n-k-1) = b; PRINT_VEC3("alpha", alpha, n-k-1); PRINT_VEC3("beta", beta, n-k-1); //compute svd of bidiagonal matrix PRINT_INT(nv); PRINT_NAMED_INT("svd->nconv", nconv); n = nv - nconv; PRINT_INT(n); alpha.conservativeResize(n); beta.conservativeResize(n); PRINT_MAT2("Q",eye(n)); PRINT_MAT2("PT",eye(n)); PRINT_VEC2("alpha",alpha); PRINT_VEC2("beta",beta); mat T=diag(alpha); for (int i=0; i<n-1; i++) set_val(T, i, i+1, beta(i)); PRINT_MAT2("T", T); mat aa,PT; vec bb; svd(T, aa, PT, bb); PRINT_MAT2("Q", aa); alpha=bb.transpose(); PRINT_MAT2("alpha", alpha); for (int t=0; t< n-1; t++) beta(t) = 0; PRINT_VEC2("beta",beta); PRINT_MAT2("PT", PT.transpose()); //estiamte the error int kk = 0; for (int i=nconv; i < nv; i++){ int j = i-nconv; PRINT_INT(j); sigma(i) = alpha(j); PRINT_NAMED_DBL("svd->sigma[i]", sigma(i)); PRINT_NAMED_DBL("Q[j*n+n-1]",aa(n-1,j)); PRINT_NAMED_DBL("beta[n-1]",beta(n-1)); errest(i) = abs(aa(n-1,j)*beta(n-1)); PRINT_NAMED_DBL("svd->errest[i]", errest(i)); if (alpha(j) > tol){ errest(i) = errest(i) / alpha(j); PRINT_NAMED_DBL("svd->errest[i]", errest(i)); } if (errest(i) < tol){ kk = kk+1; PRINT_NAMED_INT("k",kk); } if (nconv +kk >= nsv){ printf("set status to tol\n"); finished = true; } }//end for PRINT_NAMED_INT("k",kk); vec v; if (!finished){ vec swork=get_col(PT,kk); PRINT_MAT2("swork", swork); v = zeros(size(A,1)); for (int ttt=nconv; ttt < nconv+n; ttt++){ v = v+swork(ttt-nconv)*(V[ttt].to_vec()); } PRINT_VEC2("svd->V",V[nconv]); PRINT_VEC2("v[0]",v); } //compute the ritz eigenvectors of the converged singular triplets if (kk > 0){ PRINT_VEC2("svd->V", V[nconv]); mat tmp= V.get_cols(nconv,nconv+n)*PT; V.set_cols(nconv, nconv+kk, get_cols(tmp, 0, kk)); PRINT_VEC2("svd->V", V[nconv]); } nconv=nconv+kk; if (finished) break; V[nconv]=v; PRINT_VEC2("svd->V", V[nconv]); PRINT_NAMED_INT("svd->nconv", nconv); its++; PRINT_NAMED_INT("svd->its", its); PRINT_NAMED_INT("svd->nconv", nconv); //nv = min(nconv+mpd, N); //if (nsv < 10) // nv = 10; PRINT_NAMED_INT("nv",nv); } // end(while) printf(" Number of computed signular values %d",nconv); printf("\n"); DistVec normret(info, other_size_offset + 1, true, "normret"); DistVec normret_tranpose(info, nconv, false, "normret_tranpose"); for (int i=0; i < nconv; i++){ u = V[i]*A._transpose(); double a = norm(u).toDouble(); u = u / a; if (save_vectors){ char output_filename[256]; sprintf(output_filename, "%s.U.%d", training.c_str(), i); write_output_vector(output_filename, u.to_vec(), false, "GraphLab v2 SVD output. This file contains eigenvector number i of the matrix U"); } normret = V[i]*A._transpose() - u*sigma(i); double n1 = norm(normret).toDouble(); PRINT_DBL(n1); normret_tranpose = u*A -V[i]*sigma(i); double n2 = norm(normret_tranpose).toDouble(); PRINT_DBL(n2); double err=sqrt(n1*n1+n2*n2); PRINT_DBL(err); PRINT_DBL(tol); if (sigma(i)>tol){ err = err/sigma(i); } PRINT_DBL(err); PRINT_DBL(sigma(i)); printf("Singular value %d \t%13.6g\tError estimate: %13.6g\n", i, sigma(i),err); } if (save_vectors){ if (nconv == 0) logstream(LOG_FATAL)<<"No converged vectors. Aborting the save operation" << std::endl; char output_filename[256]; for (int i=0; i< nconv; i++){ sprintf(output_filename, "%s.V.%d", training.c_str(), i); write_output_vector(output_filename, V[i].to_vec(), false, "GraphLab v2 SVD output. This file contains eigenvector number i of the matrix V'"); } } return sigma; } int main(int argc, const char *argv[]) { print_copyright(); //* GraphChi initialization will read the command line arguments and the configuration file. */ graphchi_init(argc, argv); std::string vecfile; vecfile = get_option_string("initial_vector", ""); debug = get_option_int("debug", 0); ortho_repeats = get_option_int("ortho_repeats", 3); nv = get_option_int("nv", 1); nsv = get_option_int("nsv", 1); tol = get_option_float("tol", 1e-5); save_vectors = get_option_int("save_vectors", 1); input_cols = get_option_int("input_cols", 3); max_iter = get_option_int("max_iter", max_iter); parse_command_line_args(); parse_implicit_command_line(); if (nv < nsv){ logstream(LOG_FATAL)<<"Please set the number of vectors --nv=XX, to be greater than the number of support vectors --nsv=XX " << std::endl; } //unit testing if (unittest == 1){ training = "gklanczos_testA"; vecfile = "gklanczos_testA_v0"; nsv = 3; nv = 3; debug = true; //TODO core.set_ncpus(1); } else if (unittest == 2){ training = "gklanczos_testB"; vecfile = "gklanczos_testB_v0"; nsv = 10; nv = 10; debug = true; max_iter = 100; //TODO core.set_ncpus(1); } else if (unittest == 3){ training = "gklanczos_testC"; vecfile = "gklanczos_testC_v0"; nsv = 25; nv = 25; debug = true; max_iter = 100; //TODO core.set_ncpus(1); } std::cout << "Load matrix " << training << std::endl; /* Preprocess data if needed, or discover preprocess files */ if (input_cols == 3) nshards = convert_matrixmarket<edge_data>(training); else if (input_cols == 4) nshards = convert_matrixmarket4<edge_data>(training); else logstream(LOG_FATAL)<<"--input_cols=XX should be either 3 or 4 input columns" << std::endl; info.rows = M; info.cols = N; info.nonzeros = L; assert(info.rows > 0 && info.cols > 0 && info.nonzeros > 0); timer mytimer; mytimer.start(); init_lanczos(info); init_math(info, ortho_repeats); //read initial vector from file (optional) if (vecfile.size() > 0){ std::cout << "Load inital vector from file" << vecfile << std::endl; load_matrix_market_vector(vecfile, info, 0, true, false); } //or start with a random initial vector else { #pragma omp parallel for for (int i=0; i< (int)M; i++) latent_factors_inmem[i].pvec[0] = drand48(); } graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m); set_engine_flags(engine); pengine = &engine; vec errest; vec singular_values = one_sided_lanczos(info, mytimer, errest, vecfile); std::cout << "Lanczos finished in " << mytimer.current_time() << std::endl; write_output_vector(training + ".singular_values", singular_values,false, "%GraphLab SVD Solver library. This file contains the singular values."); if (unittest == 1){ assert(errest.size() == 3); for (int i=0; i< errest.size(); i++) assert(errest[i] < 1e-30); } else if (unittest == 2){ assert(errest.size() == 10); for (int i=0; i< errest.size(); i++) assert(errest[i] < 1e-15); } /* Report execution metrics */ if (!quiet) metrics_report(m); return 0; }
09jijiangwen-download
toolkits/collaborative_filtering/svd_onesided.cpp
C++
asf20
12,310
/** * Copyright (c) 2009 Carnegie Mellon University. * All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an "AS * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language * governing permissions and limitations under the License. * * For more about this software visit: * * http://www.graphlab.ml.cmu.edu * */ #ifndef GRAPHLAB_TIMER_HPP #define GRAPHLAB_TIMER_HPP #include <sys/time.h> #include <stdio.h> #include <iostream> /** * \ingroup util * * \brief A simple class that can be used for benchmarking/timing up * to microsecond resolution. * * Standard Usage * ================= * * The timer is used by calling \ref graphlab::timer::start and then * by getting the current time since start by calling * \ref graphlab::timer::current_time. * * For example: * * \code * * graphlab::timer timer; * timer.start(); * // do something * std::cout << "Elapsed time: " << timer.current_time() << std::endl; * \endcode * * Fast approximate time * ==================== * * Calling current item in a tight loop can be costly and so we * provide a faster less accurate timing primitive which reads a * local time variable that is updated roughly every 100 millisecond. * These are the \ref graphlab::timer::approx_time_seconds and * \ref graphlab::timer::approx_time_millis. */ class timer { private: /** * \brief The internal start time for this timer object */ timeval start_time_; public: /** * \brief The timer starts on construction but can be restarted by * calling \ref graphlab::timer::start. */ inline timer() { start(); } /** * \brief Reset the timer. */ inline void start() { gettimeofday(&start_time_, NULL); } /** * \brief Returns the elapsed time in seconds since * \ref graphlab::timer::start was last called. * * @return time in seconds since \ref graphlab::timer::start was called. */ inline double current_time() const { timeval current_time; gettimeofday(&current_time, NULL); double answer = // (current_time.tv_sec + ((double)current_time.tv_usec)/1.0E6) - // (start_time_.tv_sec + ((double)start_time_.tv_usec)/1.0E6); (double)(current_time.tv_sec - start_time_.tv_sec) + ((double)(current_time.tv_usec - start_time_.tv_usec))/1.0E6; return answer; } // end of current_time /** * \brief Returns the elapsed time in milliseconds since * \ref graphlab::timer::start was last called. * * @return time in milliseconds since \ref graphlab::timer::start was called. */ inline double current_time_millis() const { return current_time() * 1000; } }; // end of Timer #endif
09jijiangwen-download
toolkits/collaborative_filtering/timer.hpp
C++
asf20
3,236
#** # * @file # * @author Danny Bickson # * @version 1.0 # * # * @section LICENSE # * # * Copyright [2012] [Carngie Mellon University] # * # * Licensed under the Apache License, Version 2.0 (the "License"); # * you may not use this file except in compliance with the License. # * You may obtain a copy of the License at # * # * http://www.apache.org/licenses/LICENSE-2.0 # * # * Unless required by applicable law or agreed to in writing, software # * distributed under the License is distributed on an "AS IS" BASIS, # * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # * See the License for the specific language governing permissions and # * limitations under the License. # Makefile for compiling graphchi collaborative filtering library # Written by Danny Bickson # Thanks to Yucheng Low for fixing the Makefile INCFLAGS = -I/usr/local/include/ -I../../src/ -I. # NOTE: Uncomment the flag GRAPHCHI_USE_GSL if you want to compile pmf CPPFLAGS = -O3 $(INCFLAGS) -DEIGEN_NDEBUG -fopenmp -Wall -Wno-strict-aliasing # NOTE: uncomment the flag -lgsl if you want to compile pmf LINKFLAGS = -lz # Note : on Ubuntu on some compilers -lz is not detected properly so it is # deliberatively set to be the last flag. CPP = g++ CXX = g++ headers=$(wildcard *.h**) all: $(patsubst %.cpp, %, $(wildcard *.cpp)) %: %.cpp $(headers) $(CPP) $(CPPFLAGS) $< -o $@ $(LINKFLAGS) clean: rm -f $(patsubst %.cpp, %, $(wildcard *.cpp))
09jijiangwen-download
toolkits/collaborative_filtering/Makefile
Makefile
asf20
1,457
/** * @file * @author Danny Bickson * @version 1.0 * * @section LICENSE * * Copyright [2012] [Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * Implementation of the libfm algorithm. * Steffen Rendle (2010): Factorization Machines, in Proceedings of the 10th IEEE International Conference on Data Mining (ICDM 2010), Sydney, Australia. * Original implementation by Qiang Yan, Chinese Academy of Science. * note: this code version implements the SGD version of libfm. In the original library there are also ALS and MCMC methods. * Also the treatment of features is richer in libfm. The code here can serve for a quick evaluation but the user * is encouraged to try libfm as well. */ #include "common.hpp" #include "eigen_wrapper.hpp" double libfm_rate = 1e-02; double libfm_mult_dec = 0.9; double libfm_regw = 1e-3; double libfm_regv = 1e-3; double reg0 = 0.1; bool debug = false; int time_offset = 1; //time bin starts from 1? bool is_user(vid_t id){ return id < M; } bool is_item(vid_t id){ return id >= M && id < N; } bool is_time(vid_t id){ return id >= M+N; } #define BIAS_POS -1 struct vertex_data { vec pvec; double bias; int last_item; vertex_data() { bias = 0; last_item = 0; } void set_val(int index, float val){ if (index == BIAS_POS) bias = val; else pvec[index] = val; } float get_val(int index){ if (index== BIAS_POS) return bias; else return pvec[index]; } }; struct edge_data { double weight; double time; edge_data() { weight = time = 0; } edge_data(double weight, double time) : weight(weight), time(time) { } }; struct vertex_data_libfm{ double * bias; double * v; int *last_item; vertex_data_libfm(const vertex_data & vdata){ v = (double*)&vdata.pvec[0]; bias = (double*)&vdata.bias; last_item = (int*)&vdata.last_item; } vertex_data_libfm & operator=(vertex_data & data){ v = (double*)&data.pvec[0]; bias = (double*)&data.bias; last_item = (int*)&data.last_item; return * this; } }; /** * Type definitions. Remember to create suitable graph shards using the * Sharder-program. */ typedef vertex_data VertexDataType; typedef edge_data EdgeDataType; // Edges store the "rating" of user->movie pair graphchi_engine<VertexDataType, EdgeDataType> * pengine = NULL; graphchi_engine<VertexDataType, EdgeDataType> * pvalidation_engine = NULL; std::vector<vertex_data> latent_factors_inmem; #include "io.hpp" #include "rmse.hpp" #include "rmse_engine4.hpp" float libfm_predict(const vertex_data_libfm& user, const vertex_data_libfm& movie, const vertex_data_libfm& time, const float rating, double& prediction, vec * sum){ vertex_data & last_item = latent_factors_inmem[M+N+K+(*user.last_item)]; //TODO, when no ratings, last item is 0 vec sum_sqr = zeros(D); *sum = zeros(D); prediction = globalMean + *user.bias + *movie.bias + *time.bias + last_item.bias; for (int j=0; j< D; j++){ sum->operator[](j) += user.v[j] + movie.v[j] + time.v[j] + last_item.pvec[j]; sum_sqr[j] = pow(user.v[j],2) + pow(movie.v[j],2) + pow(time.v[j],2) + pow(last_item.pvec[j],2); prediction += 0.5 * (pow(sum->operator[](j),2) - sum_sqr[j]); } //truncate prediction to allowed values prediction = std::min((double)prediction, maxval); prediction = std::max((double)prediction, minval); //return the squared error float err = rating - prediction; assert(!std::isnan(err)); return err*err; } float libfm_predict(const vertex_data& user, const vertex_data& movie, const float rating, double & prediction, void * extra){ vec sum; return libfm_predict(vertex_data_libfm((vertex_data&)user), vertex_data_libfm((vertex_data&)movie), vertex_data_libfm(*(vertex_data*)extra), rating, prediction, &sum); } void init_libfm(){ srand(time(NULL)); latent_factors_inmem.resize(M+N+K+M); assert(D > 0); double factor = 0.1/sqrt(D); #pragma omp parallel for for (int i=0; i< (int)(M+N+K+M); i++){ latent_factors_inmem[i].pvec = (debug ? 0.1*ones(D) : (::randu(D)*factor)); } } /** * GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type> * class. The main logic is usually in the update function. */ struct LIBFMVerticesInMemProgram : public GraphChiProgram<VertexDataType, EdgeDataType> { /* * Vertex update function - computes the least square step */ void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) { if (gcontext.iteration == 0){ if (is_user(vertex.id())) { //user node. find the last rated item and store it vertex_data_libfm user = latent_factors_inmem[vertex.id()]; int max_time = 0; for(int e=0; e < vertex.num_outedges(); e++) { const edge_data & edge = vertex.edge(e)->get_data(); if (edge.time >= max_time){ max_time = (int)(edge.time - time_offset); *user.last_item = vertex.edge(e)->vertex_id() - M; } } } if (is_user(vertex.id()) && vertex.num_outedges() == 0) logstream(LOG_WARNING)<<"Vertex: " << vertex.id() << " with no edges: " << std::endl; return; return; } //go over all user nodes if (is_user(vertex.id())){ vertex_data_libfm user = latent_factors_inmem[vertex.id()]; assert(*user.last_item >= 0 && *user.last_item < (int)N); vertex_data & last_item = latent_factors_inmem[M+N+K+(*user.last_item)]; for(int e=0; e < vertex.num_outedges(); e++) { vertex_data_libfm movie(latent_factors_inmem[vertex.edge(e)->vertex_id()]); float rui = vertex.edge(e)->get_data().weight; double pui; vec sum; vertex_data & time = latent_factors_inmem[(int)vertex.edge(e)->get_data().time - time_offset]; float sqErr = libfm_predict(user, movie, time, rui, pui, &sum); float eui = pui - rui; globalMean -= libfm_rate * (eui + reg0 * globalMean); *user.bias -= libfm_rate * (eui + libfm_regw * *user.bias); *movie.bias -= libfm_rate * (eui + libfm_regw * *movie.bias); time.bias -= libfm_rate * (eui + libfm_regw * time.bias); assert(!std::isnan(time.bias)); last_item.bias -= libfm_rate * (eui + libfm_regw * last_item.bias); for(int f = 0; f < D; f++){ // user float grad = sum[f] - user.v[f]; user.v[f] -= libfm_rate * (eui * grad + libfm_regv * user.v[f]); // item grad = sum[f] - movie.v[f]; movie.v[f] -= libfm_rate * (eui * grad + libfm_regv * movie.v[f]); // time grad = sum[f] - time.pvec[f]; time.pvec[f] -= libfm_rate * (eui * grad + libfm_regv * time.pvec[f]); // last item grad = sum[f] - last_item.pvec[f]; last_item.pvec[f] -= libfm_rate * (eui * grad + libfm_regv * last_item.pvec[f]); } rmse_vec[omp_get_thread_num()] += sqErr; } } }; /** * Called before an iteration is started. */ void before_iteration(int iteration, graphchi_context &gcontext) { reset_rmse(gcontext.execthreads); } /** * Called after an iteration has finished. */ void after_iteration(int iteration, graphchi_context &gcontext) { libfm_rate *= libfm_mult_dec; training_rmse(iteration, gcontext); run_validation4(pvalidation_engine, gcontext); }; }; void output_libfm_result(std::string filename) { MMOutputter_mat<vertex_data> mmoutput_left(filename + "_U.mm", 0, M, "This file contains LIBFM output matrix U. In each row D factors of a single user node.", latent_factors_inmem); MMOutputter_mat<vertex_data> mmoutput_right(filename + "_V.mm", M ,M+N, "This file contains -LIBFM output matrix V. In each row D factors of a single item node.", latent_factors_inmem); MMOutputter_mat<vertex_data> mmoutput_time(filename + "_T.mm", M+N ,M+N+K, "This file contains -LIBFM output matrix T. In each row D factors of a single time node.", latent_factors_inmem); MMOutputter_mat<vertex_data> mmoutput_last_item(filename + "_L.mm", M+N+K ,M+N+K+M, "This file contains -LIBFM output matrix L. In each row D factors of a single last item node.", latent_factors_inmem); MMOutputter_vec<vertex_data> mmoutput_bias_left(filename + "_U_bias.mm", 0, M, BIAS_POS, "This file contains LIBFM output bias vector. In each row a single user bias.", latent_factors_inmem); MMOutputter_vec<vertex_data> mmoutput_bias_right(filename + "_V_bias.mm",M ,M+N, BIAS_POS, "This file contains LIBFM output bias vector. In each row a single item bias.", latent_factors_inmem); MMOutputter_vec<vertex_data> mmoutput_bias_time(filename + "_T_bias.mm",M+N ,M+N+K , BIAS_POS, "This file contains LIBFM output bias vector. In each row a single time bias.", latent_factors_inmem); MMOutputter_vec<vertex_data> mmoutput_bias_last_item(filename + "_L_bias.mm",M+N+K ,M+N+K+M , BIAS_POS, "This file contains LIBFM output bias vector. In each row a single last item bias.", latent_factors_inmem); MMOutputter_scalar gmean(filename + "_global_mean.mm", "This file contains LIBFM global mean which is required for computing predictions.", globalMean); logstream(LOG_INFO) << " LIBFM output files (in matrix market format): " << filename << "_U.mm" << ", " << filename + "_V.mm " << filename + "_T.mm, " << filename << "_L.mm, " << filename << "_global_mean.mm, " << filename << "_U_bias.mm " << filename << "_V_bias.mm, " << filename << "_T_bias.mm, " << filename << "_L_bias.mm " <<std::endl; } int main(int argc, const char ** argv) { print_copyright(); /* GraphChi initialization will read the command line arguments and the configuration file. */ graphchi_init(argc, argv); /* Metrics object for keeping track of performance counters and other information. Currently required. */ metrics m("libfm"); //specific command line parameters for libfm libfm_rate = get_option_float("libfm_rate", libfm_rate); libfm_regw = get_option_float("libfm_regw", libfm_regw); libfm_regv = get_option_float("libfm_regv", libfm_regv); libfm_mult_dec = get_option_float("libfm_mult_dec", libfm_mult_dec); D = get_option_int("D", D); parse_command_line_args(); parse_implicit_command_line(); /* Preprocess data if needed, or discover preprocess files */ int nshards = convert_matrixmarket4<edge_data>(training, false); init_libfm(); if (validation != ""){ int vshards = convert_matrixmarket4<EdgeDataType>(validation, true, M==N, VALIDATION); init_validation_rmse_engine<VertexDataType, EdgeDataType>(pvalidation_engine, vshards, &libfm_predict, false, true, 1); } if (load_factors_from_file){ load_matrix_market_matrix(training + "_U.mm", 0, D); load_matrix_market_matrix(training + "_V.mm", M, D); load_matrix_market_matrix(training + "_T.mm", M+N, D); load_matrix_market_matrix(training + "_L.mm", M+N+K, D); vec user_bias = load_matrix_market_vector(training +"_U_bias.mm", false, true); vec item_bias = load_matrix_market_vector(training +"_V_bias.mm", false, true); vec time_bias = load_matrix_market_vector(training+ "_T_bias.mm", false, true); vec last_item_bias = load_matrix_market_vector(training+"_L_bias.m", false, true); for (uint i=0; i<M+N+K+M; i++){ if (i < M) latent_factors_inmem[i].bias = user_bias[i]; else if (i <M+N) latent_factors_inmem[i].bias = item_bias[i-M]; else if (i <M+N+K) latent_factors_inmem[i].bias = time_bias[i-M-N]; else latent_factors_inmem[i].bias = last_item_bias[i-M-N-K]; } vec gm = load_matrix_market_vector(training + "_global_mean.mm", false, true); globalMean = gm[0]; } /* Run */ LIBFMVerticesInMemProgram program; graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m); set_engine_flags(engine); pengine = &engine; engine.run(program, niters); /* Output test predictions in matrix-market format */ output_libfm_result(training); test_predictions3(&libfm_predict, 1); /* Report execution metrics */ if (!quiet) metrics_report(m); return 0; }
09jijiangwen-download
toolkits/collaborative_filtering/libfm.cpp
C++
asf20
12,740
/** * Copyright (c) 2009 Carnegie Mellon University. * All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an "AS * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language * governing permissions and limitations under the License. * * For more about this software visit: * * http://graphchi.org * * Written by Danny Bickson * */ #ifndef _MATH_HPP #define _MATH_HPP #include "types.hpp" #include "eigen_wrapper.hpp" extern graphchi_engine<VertexDataType, EdgeDataType> * pengine; double regularization; bool debug; void print_vec(const char * name, const vec & pvec, bool high); struct math_info{ //for Axb operation int increment; double c; double d; int x_offset, b_offset , y_offset, r_offset, div_offset, prev_offset, div_const; bool A_offset, A_transpose; std::vector<std::string> names; bool use_diag; int ortho_repeats; int start, end; //for backslash operation bool dist_sliced_mat_backslash; mat eDT; double maxval, minval; math_info(){ reset_offsets(); } void reset_offsets(){ increment = 2; c=1.0; d=0.0; x_offset = b_offset = y_offset = r_offset = div_offset = prev_offset = -1; div_const = 0; A_offset = false; A_transpose = false; use_diag = true; start = end = -1; dist_sliced_mat_backslash = false; } int increment_offset(){ return increment++; } }; bipartite_graph_descriptor info; math_info mi; #define MAX_PRINT_ITEMS 25 double runtime = 0; /*** * UPDATE FUNCTION (ROWS) */ /** * GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type> * class. The main logic is usually in the update function. */ struct Axb : public GraphChiProgram<VertexDataType, EdgeDataType> { /** * Vertex update function. */ void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) { if (vertex.id() < (uint)mi.start || vertex.id() >= (uint)mi.end) return; vertex_data& user = latent_factors_inmem[vertex.id()]; bool rows = vertex.id() < (uint)info.get_start_node(false); if (info.is_square()) rows = mi.A_transpose; assert(mi.r_offset >=0); //store previous value for convergence detection if (mi.prev_offset >= 0) user.pvec[mi.prev_offset ] = user.pvec[mi.r_offset]; double val = 0; assert(mi.x_offset >=0 || mi.y_offset>=0); /*** COMPUTE r = c*A*x ********/ if (mi.A_offset && mi.x_offset >= 0){ for(int e=0; e < vertex.num_edges(); e++) { const edge_data & edge = vertex.edge(e)->get_data(); const vertex_data & movie = latent_factors_inmem[vertex.edge(e)->vertex_id()]; val += (edge.weight * movie.pvec[mi.x_offset]); } if (info.is_square() && mi.use_diag)// add the diagonal term val += (/*mi.c**/ (user.A_ii+ regularization) * user.pvec[mi.x_offset]); val *= mi.c; } /***** COMPUTE r = c*I*x *****/ else if (!mi.A_offset && mi.x_offset >= 0){ val = mi.c*user.pvec[mi.x_offset]; } /**** COMPUTE r+= d*y (optional) ***/ if (mi.y_offset>= 0){ val += mi.d*user.pvec[mi.y_offset]; } /***** compute r = (... ) / div */ if (mi.div_offset >= 0){ val /= user.pvec[mi.div_offset]; } user.pvec[mi.r_offset] = val; } //end update }; //end Axb Axb program; void init_math(bipartite_graph_descriptor & _info, int ortho_repeats = 3){ info = _info; mi.reset_offsets(); mi.ortho_repeats = ortho_repeats; } class DistMat; class DistDouble; class DistVec{ public: int offset; //real location in memory int display_offset; //offset to print out int prev_offset; std::string name; //optional bool transpose; bipartite_graph_descriptor info; int start; int end; void init(){ start = info.get_start_node(!transpose); end = info.get_end_node(!transpose); assert(start < end && start >= 0 && end >= 1); //debug_print(name); }; int size(){ return end-start; } DistVec(const bipartite_graph_descriptor &_info, int _offset, bool _transpose, const std::string & _name){ offset = _offset; display_offset = _offset; name = _name; info = _info; transpose = _transpose; prev_offset = -1; init(); } DistVec(const bipartite_graph_descriptor &_info, int _offset, bool _transpose, const std::string & _name, int _prev_offset){ offset = _offset; display_offset = _offset; name = _name; info = _info; transpose = _transpose; assert(_prev_offset < data_size); prev_offset = _prev_offset; init(); } DistVec& operator-(){ mi.d=-1.0; return *this; } DistVec& operator-(const DistVec & other){ mi.x_offset = offset; mi.y_offset = other.offset; transpose = other.transpose; if (mi.d == 0) mi.d = -1.0; else mi.d*=-1.0; return *this; } DistVec& operator+(){ if (mi.d == 0) mi.d=1.0; return *this; } DistVec& operator+(const DistVec &other){ mi.x_offset =offset; mi.y_offset = other.offset; transpose = other.transpose; return *this; } DistVec& operator+(const DistMat &other); DistVec& operator-(const DistMat &other); DistVec& operator/(const DistVec &other){ mi.div_offset = other.offset; return *this; } DistVec& operator/(const DistDouble & other); DistVec& operator/(double val){ assert(val != 0); assert(mi.d == 0); mi.d = 1/val; return *this; } DistVec& operator=(const DistVec & vec){ assert(offset < (info.is_square() ? 2*data_size: data_size)); if (mi.x_offset == -1 && mi.y_offset == -1){ mi.y_offset = vec.offset; } mi.r_offset = offset; assert(prev_offset < data_size); mi.prev_offset = prev_offset; if (mi.d == 0.0) mi.d=1.0; transpose = vec.transpose; end = vec.end; start = vec.start; mi.start = start; mi.end = end; //graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m); //set_engine_flags(engine); //Axb program; pengine->run(program, 1); debug_print(name); mi.reset_offsets(); return *this; } DistVec& operator=(const vec & pvec){ assert(offset >= 0); assert(pvec.size() == info.num_nodes(true) || pvec.size() == info.num_nodes(false)); assert(start < end); if (!info.is_square() && pvec.size() == info.num_nodes(false)){ transpose = true; } else { transpose = false; } for (int i=start; i< end; i++){ latent_factors_inmem[i].pvec[offset] = pvec[i-start]; } debug_print(name); return *this; } vec to_vec(){ vec ret = zeros(end-start); for (int i=start; i< end; i++){ ret[i-start] = latent_factors_inmem[i].pvec[offset]; } return ret; } double get_pos(int i){ return latent_factors_inmem[i].pvec[offset]; } void debug_print(const char * name){ if (debug){ std::cout<<name<<"["<<display_offset<<"]" << std::endl; for (int i=start; i< std::min(end, start+MAX_PRINT_ITEMS); i++){ //std::cout<<latent_factors_inmem(i).pvec[(mi.r_offset==-1)?offset:mi.r_offset]<<" "; printf("%.5lg ", fabs(latent_factors_inmem[i].pvec[(mi.r_offset==-1)?offset:mi.r_offset])); } printf("\n"); } } void debug_print(std::string name){ return debug_print(name.c_str());} double operator[](int i){ assert(i < end - start); return latent_factors_inmem[i+start].pvec[offset]; } DistDouble operator*(const DistVec & other); DistVec& operator*(const double val){ assert(val!= 0); mi.d=val; return *this; } DistVec& operator*(const DistDouble &dval); DistMat &operator*(DistMat & v); DistVec& _transpose() { /*if (!config.square){ start = n; end = m+n; }*/ return *this; } DistVec& operator=(DistMat &mat); }; class DistSlicedMat{ public: bipartite_graph_descriptor info; int start_offset; int end_offset; std::string name; //optional int start; int end; bool transpose; DistSlicedMat(int _start_offset, int _end_offset, bool _transpose, const bipartite_graph_descriptor &_info, std::string _name){ //assert(_start_offset < _end_offset); assert(_start_offset >= 0); assert(_info.total() > 0); transpose = _transpose; info = _info; init(); start_offset = _start_offset; end_offset = _end_offset; name = _name; } DistSlicedMat& operator=(DistMat & other); void init(){ start = info.get_start_node(!transpose); end = info.get_end_node(!transpose); assert(start < end && start >= 0 && end >= 1); //debug_print(name); }; int size(int dim){ return (dim == 1) ? (end-start) : (end_offset - start_offset) ; } void set_cols(int start_col, int end_col, const mat& pmat){ assert(start_col >= 0); assert(end_col <= end_offset - start_offset); assert(pmat.rows() == end-start); assert(pmat.cols() >= end_col - start_col); for (int i=start_col; i< end_col; i++) this->operator[](i) = get_col(pmat, i-start_col); } mat get_cols(int start_col, int end_col){ assert(start_col < end_offset - start_offset); assert(start_offset + end_col <= end_offset); mat retmat = zeros(end-start, end_col - start_col); for (int i=start_col; i< end_col; i++) set_col(retmat, i-start_col, this->operator[](i-start_col).to_vec()); return retmat; } void operator=(mat & pmat){ assert(end_offset-start_offset <= pmat.cols()); assert(end-start == pmat.rows()); set_cols(0, pmat.cols(), pmat); } std::string get_name(int pos){ assert(pos < end_offset - start_offset); assert(pos >= 0); return name; } DistVec operator[](int pos){ assert(pos < end_offset-start_offset); assert(pos >= 0); DistVec ret(info, start_offset + pos, transpose, get_name(pos)); ret.display_offset = pos; return ret; } }; /* * wrapper for computing r = c*A*x+d*b*y */ class DistMat{ public: bool transpose; bipartite_graph_descriptor info; DistMat(const bipartite_graph_descriptor& _info) { info = _info; transpose = false; }; DistMat &operator*(const DistVec & v){ mi.x_offset = v.offset; mi.A_offset = true; //v.transpose = transpose; //r_offset = A_offset; return *this; } DistMat &operator*(const DistDouble &d); DistMat &operator-(){ mi.c=-1.0; return *this; } DistMat &operator/(const DistVec & v){ mi.div_offset = v.offset; return *this; } DistMat &operator+(){ mi.c=1.0; return *this; } DistMat &operator+(const DistVec &v){ mi.y_offset = v.offset; if (mi.d == 0.0) mi.d=1.0; return *this; } DistMat &operator-(const DistVec &v){ mi.y_offset = v.offset; if (mi.d == 0.0) mi.d=-1.0; else mi.d*=-1.0; return *this; } DistMat & _transpose(){ transpose = true; mi.A_transpose = true; return *this; } DistMat & operator~(){ return _transpose(); } DistMat & backslash(DistSlicedMat & U){ mi.dist_sliced_mat_backslash = true; transpose = U.transpose; return *this; } void set_use_diag(bool use){ mi.use_diag = use; } }; DistVec& DistVec::operator=(DistMat &mat){ mi.r_offset = offset; assert(prev_offset < data_size); mi.prev_offset = prev_offset; transpose = mat.transpose; mi.start = info.get_start_node(!transpose); mi.end = info.get_end_node(!transpose); //graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m); //set_engine_flags(engine); //Axb program; pengine->run(program, 1); debug_print(name); mi.reset_offsets(); mat.transpose = false; return *this; } DistVec& DistVec::operator+(const DistMat &other){ mi.y_offset = offset; transpose = other.transpose; return *this; } DistVec& DistVec::operator-(const DistMat & other){ mi.y_offset = offset; transpose = other.transpose; if (mi.c == 0) mi.c = -1; else mi.c *= -1; return *this; } DistMat& DistVec::operator*(DistMat & v){ mi.x_offset = offset; mi.A_offset = true; return v; } class DistDouble{ public: double val; std::string name; DistDouble() {}; DistDouble(double _val) : val(_val) {}; DistVec& operator*(DistVec & dval){ mi.d=val; return dval; } DistMat& operator*(DistMat & mat){ mi.c = val; return mat; } DistDouble operator/(const DistDouble dval){ DistDouble mval; mval.val = val / dval.val; return mval; } bool operator<(const double other){ return val < other; } DistDouble & operator=(const DistDouble & other){ val = other.val; debug_print(name); return *this; } bool operator==(const double _val){ return val == _val; } void debug_print(const char * name){ std::cout<<name<<" "<<val<<std::endl; } double toDouble(){ return val; } void debug_print(std::string name){ return debug_print(name.c_str()); } }; DistDouble DistVec::operator*(const DistVec & vec){ mi.y_offset = offset; mi.b_offset = vec.offset; if (mi.d == 0) mi.d = 1.0; assert(mi.y_offset >=0 && mi.b_offset >= 0); double val = 0; for (int i=start; i< end; i++){ const vertex_data * data = &latent_factors_inmem[i]; double * pv = (double*)&data->pvec[0]; // if (y_offset >= 0 && b_offset == -1) //val += pv[y_offset] * pv[y_offset]; val += mi.d* pv[mi.y_offset] * pv[mi.b_offset]; } mi.reset_offsets(); DistDouble mval; mval.val = val; return mval; } DistVec& DistVec::operator*(const DistDouble &dval){ mi.d = dval.val; return *this; } int size(DistMat & A, int pos){ assert(pos == 1 || pos == 2); return A.info.num_nodes(!A.transpose); } DistMat &DistMat::operator*(const DistDouble &d){ mi.c = d.val; return *this; } DistDouble sqrt(DistDouble & dval){ DistDouble mval; mval.val=sqrt(dval.val); return mval; } DistDouble norm(const DistVec &vec){ assert(vec.offset>=0); assert(vec.start < vec.end); DistDouble mval; mval.val = 0; for (int i=vec.start; i < vec.end; i++){ const vertex_data * data = &latent_factors_inmem[i]; double * px = (double*)&data->pvec[0]; mval.val += px[vec.offset]*px[vec.offset]; } mval.val = sqrt(mval.val); return mval; } DistDouble norm(DistMat & mat){ DistVec vec(info, 0, mat.transpose, "norm"); vec = mat; return norm((const DistVec&)vec); } vec diag(DistMat & mat){ assert(info.is_square()); vec ret = zeros(info.total()); for (int i=0; i< info.total(); i++){ ret[i] = latent_factors_inmem[i].A_ii; } return ret; } #if 0 void orthogonalize_vs_all(DistSlicedMat & mat, int curoffset){ assert(mi.ortho_repeats >=1 && mi.ortho_repeats <= 3); INITIALIZE_TRACER(orthogonalize_vs_alltrace, "orthogonalization step"); BEGIN_TRACEPOINT(orthogonalize_vs_alltrace); bool old_debug = debug; debug = false; DistVec current = mat[curoffset]; //DistDouble * alphas = new DistDouble[curoffset]; //cout<<current.to_vec().transpose() << endl; for (int j=0; j < mi.ortho_repeats; j++){ for (int i=0; i< curoffset; i++){ DistDouble alpha = mat[i]*current; // //cout<<mat[i].to_vec().transpose()<<endl; // //cout<<"alpha is: " <<alpha.toDouble()<<endl; if (alpha.toDouble() > 1e-10) current = current - mat[i]*alpha; } } END_TRACEPOINT(orthogonalize_vs_alltrace); debug = old_debug; current.debug_print(current.name); } #endif void orthogonalize_vs_all(DistSlicedMat & mat, int curoffset, double &alpha){ assert(mi.ortho_repeats >=1 && mi.ortho_repeats <= 3); bool old_debug = debug; debug = false; DistVec current = mat[curoffset]; assert(mat.start_offset <= current.offset); double * alphas = new double[curoffset]; //DistDouble * alphas = new DistDouble[curoffset]; //cout<<current.to_vec().transpose() << endl; if (curoffset > 0){ for (int j=0; j < mi.ortho_repeats; j++){ memset(alphas, 0, sizeof(double)*curoffset); #pragma omp parallel for for (int i=mat.start_offset; i< current.offset; i++){ for (int k=info.get_start_node(!current.transpose); k< info.get_end_node(!current.transpose); k++){ alphas[i-mat.start_offset] += latent_factors_inmem[k].pvec[i] * latent_factors_inmem[k].pvec[current.offset]; } } for (int i=mat.start_offset; i< current.offset; i++){ #pragma omp parallel for for (int k=info.get_start_node(!current.transpose); k< info.get_end_node(!current.transpose); k++){ latent_factors_inmem[k].pvec[current.offset] -= alphas[i-mat.start_offset] * latent_factors_inmem[k].pvec[i]; } } } //for ortho_repeast } delete [] alphas; debug = old_debug; current.debug_print(current.name); // alpha = 0; double sum = 0; int k; //#pragma omp parallel for private(k) reduction(+: sum) for (k=info.get_start_node(!current.transpose); k< info.get_end_node(!current.transpose); k++){ sum = sum + pow(latent_factors_inmem[k].pvec[current.offset],2); } alpha = sqrt(sum); if (alpha >= 1e-10 ){ #pragma omp parallel for for (int k=info.get_start_node(!current.transpose); k< info.get_end_node(!current.transpose); k++){ latent_factors_inmem[k].pvec[current.offset]/=alpha; } } } void multiply(DistSlicedMat & mat, int curoffset, double a){ assert(a>0); DistVec current = mat[curoffset]; assert(mat.start_offset <= current.offset); vec result = zeros(curoffset); if (curoffset > 0){ #pragma omp parallel for for (int i=mat.start_offset; i< current.offset; i++){ for (int k=info.get_start_node(!current.transpose); k< info.get_end_node(!current.transpose); k++){ result[i-mat.start_offset] += latent_factors_inmem[k].pvec[i] * latent_factors_inmem[k].pvec[current.offset]; } } #pragma omp parallel for for (int k=info.get_start_node(!current.transpose); k< info.get_end_node(!current.transpose); k++){ latent_factors_inmem[k].pvec[curoffset] /= a; } for (int i=mat.start_offset; i< current.offset; i++){ #pragma omp parallel for for (int k=info.get_start_node(!current.transpose); k< info.get_end_node(!current.transpose); k++){ latent_factors_inmem[k].pvec[current.offset] -= result[i-mat.start_offset]/a * latent_factors_inmem[k].pvec[i]; } } } current.debug_print(current.name); } DistVec& DistVec::operator/(const DistDouble & other){ assert(other.val != 0); assert(mi.d == 0); mi.d = 1/other.val; return *this; } #endif //_MATH_HPP
09jijiangwen-download
toolkits/collaborative_filtering/math.hpp
C++
asf20
19,584
/** * @file * @author Danny Bickson * @version 1.0 * * @section LICENSE * * Copyright [2012] [Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * Matrix factorization using RBM (Restricted Bolzman Machines) algorithm. * Algorithm is described in the paper: * G. Hinton. A Practical Guide to Training Restricted Boltzmann Machines. University of Toronto Tech report UTML TR 2010-003 * */ #include "common.hpp" #include "eigen_wrapper.hpp" double rbm_alpha = 0.1; double rbm_beta = 0.06; int rbm_bins = 6; double rbm_scaling = 1; double rbm_mult_step_dec= 0.9; bool is_user(vid_t id){ return id < M; } bool is_item(vid_t id){ return id >= M && id < N; } bool is_time(vid_t id){ return id >= M+N; } void setRand2(double * a, int d, float c){ for(int i = 0; i < d; ++i) a[i] = ((drand48() - 0.5) * c); } float dot(double * a, double * b){ float ret = 0; for(int i = 0; i < D; ++i) ret += a[i] * b[i]; return ret; } #define BIAS_POS -1 struct vertex_data { vec pvec; //storing the feature vector double bias; vertex_data() { bias = 0; } void set_val(int index, float val){ if (index == BIAS_POS) bias = val; else pvec[index] = val; } float get_val(int index){ if (index== BIAS_POS) return bias; else return pvec[index]; } }; /* * h = pvec = D * DOUBLE * h0 = weight = D * DOUBLE * h1 = weight+D = D * DOUBLE */ struct rbm_user{ double * h; double * h0; double * h1; rbm_user(const vertex_data & vdata){ h = (double*)&vdata.pvec[0]; h0 = h + D; h1 = h0 + D; } rbm_user & operator=(vertex_data & data){ h = &data.pvec[0]; h0 = h + D; h1 = h0 + D; return * this; } }; /** * ni = bias = DOUBLE * bi = pvec = rbm_bins * DOUBLE * w = weight = rbm_bins * D * Double */ struct rbm_movie{ double * bi; double * ni; double * w; rbm_movie(const vertex_data& vdata){ ni = (double*)&vdata.bias; bi = (double*)&vdata.pvec[0]; w = bi + rbm_bins; } rbm_movie & operator=(vertex_data & data){ ni = (double*)&data.bias; bi = (double*)&data.pvec[0]; w = bi + rbm_bins; return * this; } }; float rbm_predict(const rbm_user & usr, const rbm_movie & mov, const float rating, double & prediction, void * extra){ float ret = 0; double nn = 0; for(int r = 0; r < rbm_bins; ++r){ double zz = exp(mov.bi[r] + dot(usr.h, &mov.w[r*D])); if (std::isinf(zz)) std::cout<<" mov.bi[r] " << mov.bi[r] << " dot: " << dot(usr.h, &mov.w[r*D]) << std::endl; ret += zz * (float)(r); assert(!std::isnan(ret)); nn += zz; } assert(!std::isnan(ret)); assert(std::fabs(nn) > 1e-32); ret /= nn; if(ret < minval) ret = minval; else if(ret > maxval) ret = maxval; assert(!std::isnan(ret)); prediction = ret * rbm_scaling; assert(!std::isnan(prediction)); return pow(prediction - rating,2); } float rbm_predict(const vertex_data & usr, const vertex_data & mov, const float rating, double & prediction, void * extra){ return rbm_predict(rbm_user((vertex_data&)usr), rbm_movie((vertex_data&)mov), rating, prediction, NULL); } float predict1(const rbm_user & usr, const rbm_movie & mov, const float rating, double & prediction){ vec zz = zeros(rbm_bins); float szz = 0; for(int r = 0; r < rbm_bins; ++r){ zz[r] = exp(mov.bi[r] + dot(usr.h0, &mov.w[r*D])); szz += zz[r]; } float rd = drand48() * szz; szz = 0; int ret = 0; for(int r = 0; r < rbm_bins; ++r){ szz += zz[r]; if(rd < szz){ ret = r; break; } } prediction = ret * rbm_scaling; assert(!std::isnan(prediction)); return pow(prediction - rating, 2); } inline float sigmoid(float x){ return 1 / (1 + exp(-1 * x)); } #include "util.hpp" /** * Type definitions. Remember to create suitable graph shards using the * Sharder-program. */ typedef vertex_data VertexDataType; typedef float EdgeDataType; // Edges store the "rating" of user->movie pair graphchi_engine<VertexDataType, EdgeDataType> * pengine = NULL; graphchi_engine<VertexDataType, EdgeDataType> * pvalidation_engine = NULL; std::vector<vertex_data> latent_factors_inmem; #include "rmse.hpp" #include "rmse_engine.hpp" #include "io.hpp" /** * GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type> * class. The main logic is usually in the update function. */ struct RBMVerticesInMemProgram : public GraphChiProgram<VertexDataType, EdgeDataType> { /** * Called before an iteration is started. */ void before_iteration(int iteration, graphchi_context &gcontext) { reset_rmse(gcontext.execthreads); } /** * Called after an iteration has finished. */ void after_iteration(int iteration, graphchi_context &gcontext) { rbm_alpha *= rbm_mult_step_dec; training_rmse(iteration, gcontext); if (iteration >= 2) run_validation(pvalidation_engine, gcontext); else std::cout<<std::endl; } /** * Vertex update function. */ void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) { if (gcontext.iteration == 0){ if (is_user(vertex.id()) && vertex.num_outedges() > 0){ vertex_data& user = latent_factors_inmem[vertex.id()]; user.pvec = zeros(D*3); for(int e=0; e < vertex.num_outedges(); e++) { rbm_movie mov = latent_factors_inmem[vertex.edge(e)->vertex_id()]; float observation = vertex.edge(e)->get_data(); int r = (int)(observation/rbm_scaling); assert(r < rbm_bins); mov.bi[r]++; } } return; } else if (gcontext.iteration == 1){ if (vertex.num_inedges() > 0){ rbm_movie mov = latent_factors_inmem[vertex.id()]; setRand2(mov.w, D*rbm_bins, 0.001); for(int r = 0; r < rbm_bins; ++r){ mov.bi[r] /= (double)vertex.num_inedges(); mov.bi[r] = log(1E-9 + mov.bi[r]); if (mov.bi[r] > 1000){ assert(false); logstream(LOG_FATAL)<<"Numerical overflow" <<std::endl; } } } return; //done with initialization } //go over all user nodes if (is_user(vertex.id()) && vertex.num_outedges()){ vertex_data & user = latent_factors_inmem[vertex.id()]; user.pvec = zeros(3*D); rbm_user usr(user); vec v1 = zeros(vertex.num_outedges()); //go over all ratings for(int e=0; e < vertex.num_outedges(); e++) { float observation = vertex.edge(e)->get_data(); rbm_movie mov = latent_factors_inmem[vertex.edge(e)->vertex_id()]; int r = (int)(observation / rbm_scaling); assert(r < rbm_bins); for(int k=0; k < D; k++){ usr.h[k] += mov.w[D*r + k]; assert(!std::isnan(usr.h[k])); } } for(int k=0; k < D; k++){ usr.h[k] = sigmoid(usr.h[k]); if (drand48() < usr.h[k]) usr.h0[k] = 1; else usr.h0[k] = 0; } int i = 0; double prediction; for(int e=0; e < vertex.num_outedges(); e++) { rbm_movie mov = latent_factors_inmem[vertex.edge(e)->vertex_id()]; float observation = vertex.edge(e)->get_data(); predict1(usr, mov, observation, prediction); int vi = (int)(prediction / rbm_scaling); v1[i] = vi; i++; } i = 0; for(int e=0; e < vertex.num_outedges(); e++) { rbm_movie mov = latent_factors_inmem[vertex.edge(e)->vertex_id()]; int r = (int)v1[i]; for (int k=0; k< D;k++){ usr.h1[k] += mov.w[r*D+k]; } i++; } for (int k=0; k < D; k++){ usr.h1[k] = sigmoid(usr.h1[k]); if (drand48() < usr.h1[k]) usr.h1[k] = 1; else usr.h1[k] = 0; } i = 0; for(int e=0; e < vertex.num_outedges(); e++) { rbm_movie mov = latent_factors_inmem[vertex.edge(e)->vertex_id()]; float observation = vertex.edge(e)->get_data(); double prediction; rbm_predict(user, mov, observation, prediction, NULL); double pui = prediction / rbm_scaling; double rui = observation / rbm_scaling; rmse_vec[omp_get_thread_num()] += (pui - rui) * (pui - rui); //nn += 1.0; int vi0 = (int)(rui); int vi1 = (int)v1[i]; for (int k = 0; k < D; k++){ mov.w[D*vi0+k] += rbm_alpha * (usr.h0[k] - rbm_beta * mov.w[vi0*D+k]); assert(!std::isnan(mov.w[D*vi0+k])); mov.w[D*vi1+k] -= rbm_alpha * (usr.h1[k] + rbm_beta * mov.w[vi1*D+k]); assert(!std::isnan(mov.w[D*vi1+k])); } i++; } } } }; //dump output to file void output_rbm_result(std::string filename) { MMOutputter_mat<vertex_data> user_mat(filename + "_U.mm", 0, M, "This file contains RBM output matrix U. In each row D factors of a single user node.", latent_factors_inmem); MMOutputter_mat<vertex_data> mmoutput_right(filename + "_V.mm", M ,M+N, "This file contains RBM output matrix V. In each row D factors of a single item node.", latent_factors_inmem); MMOutputter_vec<vertex_data> mmoutput_bias_right(filename + "_V_bias.mm",M ,M+N , BIAS_POS, "This file contains RBM output bias vector. In each row a single item ni.", latent_factors_inmem); logstream(LOG_INFO) << "RBM output files (in matrix market format): " << filename << "_U.mm" << ", " << filename + "_V.mm " << std::endl; } void rbm_init(){ srand48(time(NULL)); latent_factors_inmem.resize(M+N); #pragma omp parallel for for(int i = 0; i < (int)N; ++i){ vertex_data & movie = latent_factors_inmem[M+i]; movie.pvec = zeros(rbm_bins + D * rbm_bins); movie.bias = 0; } logstream(LOG_INFO) << "RBM initialization ok" << std::endl; } int main(int argc, const char ** argv) { print_copyright(); //* GraphChi initialization will read the command line arguments and the configuration file. */ graphchi_init(argc, argv); /* Metrics object for keeping track of performance counters and other information. Currently required. */ metrics m("rbm-inmemory-factors"); /* Basic arguments for application. NOTE: File will be automatically 'sharded'. */ rbm_bins = get_option_int("rbm_bins", rbm_bins); rbm_alpha = get_option_float("rbm_alpha", rbm_alpha); rbm_beta = get_option_float("rbm_beta", rbm_beta); rbm_mult_step_dec = get_option_float("rbm_mult_step_dec", rbm_mult_step_dec); rbm_scaling = get_option_float("rbm_scaling", rbm_scaling); parse_command_line_args(); parse_implicit_command_line(); mytimer.start(); /* Preprocess data if needed, or discover preprocess files */ int nshards = convert_matrixmarket<float>(training); rbm_init(); if (validation != ""){ int vshards = convert_matrixmarket<EdgeDataType>(validation, NULL, 0, 0, 3, VALIDATION); init_validation_rmse_engine<VertexDataType, EdgeDataType>(pvalidation_engine, vshards, &rbm_predict); } /* load initial state from disk (optional) */ if (load_factors_from_file){ load_matrix_market_matrix(training + "_U.mm", 0, 3*D); load_matrix_market_matrix(training + "_V.mm", M, rbm_bins*(D+1)); vec item_bias = load_matrix_market_vector(training +"_V_bias.mm", false, true); for (uint i=0; i< N; i++){ latent_factors_inmem[M+i].bias = item_bias[i]; } } print_config(); /* Run */ RBMVerticesInMemProgram program; graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m); set_engine_flags(engine); pengine = &engine; engine.run(program, niters); /* Output latent factor matrices in matrix-market format */ output_rbm_result(training); test_predictions(&rbm_predict); /* Report execution metrics */ if (!quiet) metrics_report(m); return 0; }
09jijiangwen-download
toolkits/collaborative_filtering/rbm.cpp
C++
asf20
12,596
/** * @file * @author Danny Bickson * @version 1.0 * * @section LICENSE * * Copyright [2012] [Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * Matrix factorization with the Stochastic Gradient Descent (Baseline) algorithm. * Algorithm is described in the papers: * 1) Matrix Factorization Techniques for Recommender Systems Yehuda Koren, Robert Bell, Chris Volinsky. In IEEE Computer, Vol. 42, No. 8. (07 August 2009), pp. 30-37. * 2) Takács, G, Pilászy, I., Németh, B. and Tikk, D. (2009). Scalable Collaborative Filtering Approaches for Large Recommender Systems. Journal of Machine Learning Research, 10, 623-656. * * */ #include "common.hpp" #include "eigen_wrapper.hpp" //types of algorithms supported when computing prediction enum{ GLOBAL_MEAN = 0, USER_MEAN = 1, ITEM_MEAN = 2 }; int algo = GLOBAL_MEAN; std::string algorithm; struct vertex_data { double mean_rating; vec pvec; vertex_data() { mean_rating = 0; } void set_val(int index, float val){ pvec[index] = val; } float get_val(int index){ return pvec[index]; } }; #include "util.hpp" typedef vertex_data VertexDataType; typedef float EdgeDataType; // Edges store the "rating" of user->movie pair graphchi_engine<VertexDataType, EdgeDataType> * pengine = NULL; std::vector<vertex_data> latent_factors_inmem; #include "rmse.hpp" #include "io.hpp" /** compute a missing value based on SGD algorithm */ float baseline_predict(const vertex_data& user, const vertex_data& movie, const float rating, double & prediction, void * extra = NULL){ prediction = globalMean; if (algo == USER_MEAN) prediction = user.mean_rating; else if (algo == ITEM_MEAN) prediction = movie.mean_rating; //truncate prediction to allowed values prediction = std::min((double)prediction, maxval); prediction = std::max((double)prediction, minval); //return the squared error float err = rating - prediction; assert(!std::isnan(err)); return err*err; } /** * GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type> * class. The main logic is usually in the update function. */ struct BaselineVerticesInMemProgram : public GraphChiProgram<VertexDataType, EdgeDataType> { void after_iteration(int iteration, graphchi_context &gcontext) { training_rmse(iteration, gcontext, algo == ITEM_MEAN); validation_rmse(&baseline_predict, gcontext); } /** * Vertex update function. */ void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) { //go over all user nodes if ( vertex.num_outedges() > 0 && (algo == GLOBAL_MEAN || algo == USER_MEAN)){ vertex_data & user = latent_factors_inmem[vertex.id()]; //go over all ratings if (algo == USER_MEAN){ user.mean_rating = 0; for(int e=0; e < vertex.num_edges(); e++) { float observation = vertex.edge(e)->get_data(); user.mean_rating += observation; } if (vertex.num_edges() > 0) user.mean_rating /= vertex.num_edges(); } //go over all ratings for(int e=0; e < vertex.num_edges(); e++) { double prediction; float observation = vertex.edge(e)->get_data(); vertex_data & movie = latent_factors_inmem[vertex.edge(e)->vertex_id()]; rmse_vec[omp_get_thread_num()] += baseline_predict(user, movie, observation, prediction); } } else if (vertex.num_inedges() > 0 && algo == ITEM_MEAN){ vertex_data & user = latent_factors_inmem[vertex.id()]; user.mean_rating = 0; //go over all ratings for(int e=0; e < vertex.num_edges(); e++) { float observation = vertex.edge(e)->get_data(); user.mean_rating += observation; } if (vertex.num_edges() > 0) user.mean_rating /= vertex.num_edges(); for(int e=0; e < vertex.num_edges(); e++) { float observation = vertex.edge(e)->get_data(); double prediction; vertex_data & movie = latent_factors_inmem[vertex.edge(e)->vertex_id()]; rmse_vec[omp_get_thread_num()] += baseline_predict(movie, user, observation, prediction); } } } }; //struct for writing the output feature vectors into file struct MMOutputter2{ FILE * outf; MMOutputter2(std::string fname, uint start, uint end, std::string comment) { MM_typecode matcode; set_matcode(matcode); outf = fopen(fname.c_str(), "w"); assert(outf != NULL); mm_write_banner(outf, matcode); if (comment != "") fprintf(outf, "%%%s\n", comment.c_str()); mm_write_mtx_array_size(outf, end-start, 1); for (uint i=start; i < end; i++) fprintf(outf, "%1.12e\n", latent_factors_inmem[i].mean_rating); } ~MMOutputter2() { if (outf != NULL) fclose(outf); } }; //dump output to file void output_baseline_result(std::string filename) { if (algo == USER_MEAN){ MMOutputter2 mmoutput_left(filename + ".baseline_user", 0, M, "This file contains Baseline output matrix U. In each row rating mean a single user node."); } else if (algo == ITEM_MEAN){ MMOutputter2 mmoutput_right(filename + ".baseline_item", M ,M+N, "This file contains Baseline output vector V. In each row rating mean of a single item node."); } logstream(LOG_INFO) << "Baseline output files (in matrix market format): " << filename << (algo == USER_MEAN ? ".baseline_user" : ".baseline_item") << std::endl; } int main(int argc, const char ** argv) { print_copyright(); //* GraphChi initialization will read the command line arguments and the configuration file. */ graphchi_init(argc, argv); /* Metrics object for keeping track of performance counters and other information. Currently required. */ metrics m("sgd-inmemory-factors"); algorithm = get_option_string("algorithm", "global_mean"); if (algorithm == "global_mean") algo = GLOBAL_MEAN; else if (algorithm == "user_mean") algo = USER_MEAN; else if (algorithm == "item_mean") algo = ITEM_MEAN; else logstream(LOG_FATAL)<<"Unsupported algorithm name. Should be --algorithm=XX where XX is one of [global_mean,user_mean,item_mean] for example --algorithm=global_mean" << std::endl; parse_command_line_args(); mytimer.start(); /* Preprocess data if needed, or discover preprocess files */ int nshards = convert_matrixmarket<float>(training, NULL, 0, 0, 3, TRAINING, false); init_feature_vectors<std::vector<vertex_data> >(M+N, latent_factors_inmem, false); rmse_vec = zeros(number_of_omp_threads()); print_config(); /* Run */ BaselineVerticesInMemProgram program; graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m); set_engine_flags(engine); pengine = &engine; engine.run(program, 1); if (algo == USER_MEAN || algo == ITEM_MEAN) output_baseline_result(training); test_predictions(&baseline_predict); /* Report execution metrics */ if (!quiet) metrics_report(m); return 0; }
09jijiangwen-download
toolkits/collaborative_filtering/baseline.cpp
C++
asf20
7,653
/** * @file * @author Danny Bickson, CMU * @version 1.0 * * @section LICENSE * * Copyright [2012] [Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * This code implements the paper: * Lee, D..D., and Seung, H.S., (2001), 'Algorithms for Non-negative Matrix * Factorization', Adv. Neural Info. Proc. Syst. 13, 556-562. * * */ #include "common.hpp" #include "eigen_wrapper.hpp" const double epsilon = 1e-16; struct vertex_data { vec pvec; vertex_data() { pvec = zeros(D); } void set_val(int index, float val){ pvec[index] = val; } float get_val(int index){ return pvec[index]; } }; typedef vertex_data VertexDataType; typedef float EdgeDataType; // Edges store the "rating" of user->movie pair graphchi_engine<VertexDataType, EdgeDataType> * pengine = NULL; graphchi_engine<VertexDataType, EdgeDataType> * pvalidation_engine = NULL; std::vector<vertex_data> latent_factors_inmem; vec sum_of_item_latent_features, sum_of_user_latent_feautres; int iter; #include "rmse.hpp" #include "rmse_engine.hpp" #include "io.hpp" /** compute a missing value based on NMF algorithm */ float nmf_predict(const vertex_data& user, const vertex_data& movie, const float rating, double & prediction, void * extra = NULL){ prediction = dot_prod(user.pvec, movie.pvec); //truncate prediction to allowed values prediction = std::min((double)prediction, maxval); prediction = std::max((double)prediction, minval); //return the squared error float err = rating - prediction; assert(!std::isnan(err)); return err*err; } /* sum up all item data vectors */ void pre_user_iter(){ sum_of_item_latent_features = zeros(D); for (uint i=M; i<M+N; i++){ vertex_data & data = latent_factors_inmem[i]; sum_of_item_latent_features += data.pvec; } } /* sum up all user data vectors */ void pre_movie_iter(){ sum_of_user_latent_feautres = zeros(D); for (uint i=0; i<M; i++){ vertex_data & data = latent_factors_inmem[i]; sum_of_user_latent_feautres += data.pvec; } } /** * GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type> * class. The main logic is usually in the update function. */ struct NMFVerticesInMemProgram : public GraphChiProgram<VertexDataType, EdgeDataType> { /** * Called before an iteration starts. */ void before_iteration(int iteration, graphchi_context &gcontext) { reset_rmse(gcontext.execthreads); iter = iteration; if (iteration > 0) { if (iteration % 2 == 1) pre_user_iter(); else pre_movie_iter(); } } /** * Vertex update function - computes the least square step */ void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) { if (gcontext.iteration == 0){ if (vertex.num_outedges() == 0 && vertex.id() < M) logstream(LOG_FATAL)<<"NMF algorithm can not work when the row " << vertex.id() << " of the matrix contains all zeros" << std::endl; for(int e=0; e < vertex.num_edges(); e++) { float observation = vertex.edge(e)->get_data(); if (observation < 0 ){ logstream(LOG_FATAL)<<"Found a negative entry in matirx row " << vertex.id() << " with value: " << observation << std::endl; } } return; } bool isuser = (vertex.id() < M); if ((iter % 2 == 1 && !isuser) || (iter % 2 == 0 && isuser)) return; vec ret = zeros(D); vertex_data & vdata = latent_factors_inmem[vertex.id()]; for(int e=0; e < vertex.num_edges(); e++) { float observation = vertex.edge(e)->get_data(); vertex_data & nbr_latent = latent_factors_inmem[vertex.edge(e)->vertex_id()]; double prediction; rmse_vec[omp_get_thread_num()] += nmf_predict(vdata, nbr_latent, observation, prediction); if (prediction == 0) logstream(LOG_FATAL)<<"Got into numerical error! Please submit a bug report." << std::endl; ret += nbr_latent.pvec * (observation / prediction); } vec px; if (isuser) px = sum_of_item_latent_features; else px = sum_of_user_latent_feautres; for (int i=0; i<D; i++){ assert(px[i] != 0); vdata.pvec[i] *= ret[i] / px[i]; if (vdata.pvec[i] < epsilon) vdata.pvec[i] = epsilon; } } /** * Called after an iteration has finished. */ void after_iteration(int iteration, graphchi_context &gcontext) { //print rmse every other iteration, since 2 iterations are considered one NMF round int now = iteration % 2; if (now == 0){ training_rmse(iteration/2, gcontext); run_validation(pvalidation_engine, gcontext); } } }; void output_nmf_result(std::string filename){ MMOutputter_mat<vertex_data> user_mat(filename + "_U.mm", 0, M, "This file contains NMF output matrix U. In each row D factors of a single user node.", latent_factors_inmem); MMOutputter_mat<vertex_data> item_mat(filename + "_V.mm", M, M+N, "This file contains NMF output matrix V. In each row D factors of a single item node.", latent_factors_inmem); logstream(LOG_INFO) << "NMF output files (in matrix market format): " << filename << "_U.mm" << ", " << filename + "_V.mm " << std::endl; } int main(int argc, const char ** argv) { print_copyright(); /* GraphChi initialization will read the command line arguments and the configuration file. */ graphchi_init(argc, argv); metrics m("nmf-inmemory-factors"); parse_command_line_args(); parse_implicit_command_line(); niters *= 2; //each NMF iteration is composed of two sub iters /* Preprocess data if needed, or discover preprocess files */ int nshards = convert_matrixmarket<float>(training, NULL, 0, 0, 3, TRAINING, false); init_feature_vectors<std::vector<vertex_data> >(M+N, latent_factors_inmem, !load_factors_from_file); if (validation != ""){ int vshards = convert_matrixmarket<EdgeDataType>(validation, NULL, 0, 0, 3, VALIDATION, false); if (vshards != -1) init_validation_rmse_engine<VertexDataType, EdgeDataType>(pvalidation_engine, vshards, &nmf_predict); } if (load_factors_from_file){ load_matrix_market_matrix(training + "_U.mm", 0, D); load_matrix_market_matrix(training + "_V.mm", M, D); } sum_of_item_latent_features = zeros(D); sum_of_user_latent_feautres = zeros(D); /* Run */ NMFVerticesInMemProgram program; graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m); set_engine_flags(engine); pengine = &engine; engine.run(program, niters); /* Output latent factor matrices in matrix-market format */ output_nmf_result(training); test_predictions(&nmf_predict); /* Report execution metrics */ if (!quiet) metrics_report(m); return 0; }
09jijiangwen-download
toolkits/collaborative_filtering/nmf.cpp
C++
asf20
7,469
/** * @file * @author Danny Bickson, based on code by Aapo Kyrola <akyrola@cs.cmu.edu> * @version 1.0 * * @section LICENSE * * Copyright [2012] [Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * This file implements item based collaborative filtering by comparing all item pairs which * are connected by one or more user nodes. * * For the Jaccard index see: http://en.wikipedia.org/wiki/Jaccard_index * * For the AA index see: http://arxiv.org/abs/0907.1728 "Role of Weak Ties in Link Prediction of Complex Networks", equation (2) * * For the RA index see the above paper, equation (3) * * For Asym. Cosine see: F. Aiolli, A Preliminary Study on a Recommender System for the Million Songs Dataset Challenge * Preference Learning: Problems and Applications in AI (PL-12), ECAI-12 Workshop, Montpellier * * Acknowledgements: thanks to Clive Cox, Rummble Labs, for implementing Asym. Cosince metric and contributing the code. */ #include <set> #include <iomanip> #include <algorithm> #include "common.hpp" #include "timer.hpp" #include "eigen_wrapper.hpp" #include "engine/dynamic_graphs/graphchi_dynamicgraph_engine.hpp" enum DISTANCE_METRICS{ JACCARD = 0, AA = 1, RA = 2, ASYM_COSINE = 3, }; int min_allowed_intersection = 1; vec written_pairs; size_t zero_dist = 0; size_t actual_written = 0; size_t item_pairs_compared = 0; size_t not_enough = 0; std::vector<FILE*> out_files; timer mytimer; bool * relevant_items = NULL; int grabbed_edges = 0; int distance_metric; float asym_cosine_alpha = 0.5; int debug = 0; bool is_item(vid_t v){ return v >= M; } bool is_user(vid_t v){ return v < M; } /** * Type definitions. Remember to create suitable graph shards using the * Sharder-program. */ typedef unsigned int VertexDataType; typedef unsigned int EdgeDataType; // Edges store the "rating" of user->movie pair struct vertex_data{ vec pvec; int degree; vertex_data(){ degree = 0; } void set_val(int index, float val){ pvec[index] = val; } float get_val(int index){ return pvec[index]; } }; std::vector<vertex_data> latent_factors_inmem; #include "io.hpp" struct dense_adj { int count; vid_t * adjlist; dense_adj() { adjlist = NULL; } dense_adj(int _count, vid_t * _adjlist) : count(_count), adjlist(_adjlist) { } }; // This is used for keeping in-memory class adjlist_container { std::vector<dense_adj> adjs; //mutex m; public: vid_t pivot_st, pivot_en; adjlist_container() { pivot_st = M; //start pivor on item nodes (excluding user nodes) pivot_en = M; } void clear() { for(std::vector<dense_adj>::iterator it=adjs.begin(); it != adjs.end(); ++it) { if (it->adjlist != NULL) { free(it->adjlist); it->adjlist = NULL; } } adjs.clear(); pivot_st = pivot_en; } /** * Extend the interval of pivot vertices to en. */ void extend_pivotrange(vid_t en) { assert(en>=pivot_en); pivot_en = en; adjs.resize(pivot_en - pivot_st); } /** * Grab pivot's adjacency list into memory. */ int load_edges_into_memory(graphchi_vertex<uint32_t, uint32_t> &v) { //assert(is_pivot(v.id())); //assert(is_item(v.id())); int num_edges = v.num_edges(); //not enough user rated this item, we don't need to compare to it if (num_edges < min_allowed_intersection){ relevant_items[v.id() - M] = false; return 0; } relevant_items[v.id() - M] = true; // Count how many neighbors have larger id than v dense_adj dadj = dense_adj(num_edges, (vid_t*) calloc(sizeof(vid_t), num_edges)); for(int i=0; i<num_edges; i++) { dadj.adjlist[i] = v.edge(i)->vertex_id(); } std::sort(dadj.adjlist, dadj.adjlist + num_edges); adjs[v.id() - pivot_st] = dadj; assert(v.id() - pivot_st < adjs.size()); __sync_add_and_fetch(&grabbed_edges, num_edges /*edges_to_larger_id*/); return num_edges; } int acount(vid_t pivot) { return adjs[pivot - pivot_st].count; } /** * calc distance between two items. * Let a be all the users rated item 1 * Let b be all the users rated item 2 * * 1) Using Jackard index: * Dist_ab = intersection(a,b) / (size(a) + size(b) - size(intersection(a,b)) * * 2) Using AA index: * Dist_ab = sum_user k in intersection(a,b) [ 1 / log(degree(k)) ] * * 3) Using RA index: * Dist_ab = sum_user k in intersection(a,b) [ 1 / degree(k) ] * * 4) Using Asym Cosine: * Dist_ab = intersection(a,b) / size(a)^alpha * size(b)^(1-alpha) */ double calc_distance(graphchi_vertex<uint32_t, uint32_t> &v, vid_t pivot, int distance_metric) { //assert(is_pivot(pivot)); //assert(is_item(pivot) && is_item(v.id())); dense_adj &pivot_edges = adjs[pivot - pivot_st]; int num_edges = v.num_edges(); //if there are not enough neighboring user nodes to those two items there is no need //to actually count the intersection if (num_edges < min_allowed_intersection || pivot_edges.count < min_allowed_intersection) return 0; std::vector<vid_t> edges; edges.resize(num_edges); for(int i=0; i < num_edges; i++) { vid_t other_vertex = v.edge(i)->vertexid; edges[i] = other_vertex; } sort(edges.begin(), edges.end()); std::set<vid_t> intersection; std::set_intersection( pivot_edges.adjlist, pivot_edges.adjlist + pivot_edges.count, edges.begin(), edges.end(), std::inserter(intersection, intersection.begin())); double intersection_size = (double)intersection.size(); //not enough user nodes rated both items, so the pairs of items are not compared. if (intersection_size < (double)min_allowed_intersection) return 0; if (distance_metric == JACCARD){ uint set_a_size = v.num_edges(); //number of users connected to current item uint set_b_size = acount(pivot); //number of users connected to current pivot return intersection_size / (double)(set_a_size + set_b_size - intersection_size); //compute the distance } else if (distance_metric == AA){ double dist = 0; for (std::set<vid_t>::iterator i= intersection.begin() ; i != intersection.end(); i++){ vid_t user = *i; assert(latent_factors_inmem.size() == M && is_user(user)); assert(latent_factors_inmem[user].degree > 0); dist += 1.0 / log(latent_factors_inmem[user].degree); } return dist; } else if (distance_metric == RA){ double dist = 0; for (std::set<vid_t>::iterator i= intersection.begin() ; i != intersection.end(); i++){ vid_t user = *i; assert(latent_factors_inmem.size() == M && is_user(user)); assert(latent_factors_inmem[user].degree > 0); dist += 1.0 / latent_factors_inmem[user].degree; } return dist; } else if (distance_metric == ASYM_COSINE){ uint set_a_size = v.num_edges(); //number of users connected to current item uint set_b_size = acount(pivot); //number of users connected to current pivot return intersection_size / (pow(set_a_size,asym_cosine_alpha) * pow(set_b_size,1-asym_cosine_alpha)); } return 0; } inline bool is_pivot(vid_t vid) { return vid >= pivot_st && vid < pivot_en; } }; adjlist_container * adjcontainer; struct index_val{ uint index; float val; index_val(){ index = -1; val = 0; } index_val(uint index, float val): index(index), val(val){ } }; bool Greater(const index_val& a, const index_val& b) { return a.val > b.val; } struct ItemDistanceProgram : public GraphChiProgram<VertexDataType, EdgeDataType> { /** * Vertex update function. */ void update(graphchi_vertex<VertexDataType, EdgeDataType> &v, graphchi_context &gcontext) { if (debug) printf("Entered iteration %d with %d\n", gcontext.iteration, v.id()); /* even iteration numbers: * 1) load a subset of items into memory (pivots) * 2) Find which subset of items needs to compared to the users */ if (gcontext.iteration % 2 == 0) { if (adjcontainer->is_pivot(v.id()) && is_item(v.id())){ adjcontainer->load_edges_into_memory(v); if (debug) printf("Loading pivot %dintro memory\n", v.id()); } else if (is_user(v.id())){ //in the zero iteration, if using AA distance metric, initialize array //with node degrees if (gcontext.iteration == 0 && (distance_metric == AA || distance_metric == RA)){ latent_factors_inmem[v.id()].degree = v.num_edges(); } //check if this user is connected to any pivot item bool has_pivot = false; int pivot = -1; for(int i=0; i<v.num_edges(); i++) { graphchi_edge<uint32_t> * e = v.edge(i); //assert(is_item(e->vertexid)); if (adjcontainer->is_pivot(e->vertexid)) { has_pivot = true; pivot = e->vertexid; break; } } if (debug) printf("user %d is linked to pivot %d\n", v.id(), pivot); if (!has_pivot) //this user is not connected to any of the pivot item nodes and thus //it is not relevant at this point return; //this user is connected to a pivot items, thus all connected items should be compared for(int i=0; i<v.num_edges(); i++) { graphchi_edge<uint32_t> * e = v.edge(i); //assert(v.id() != e->vertexid); relevant_items[e->vertexid - M] = true; } }//is_user } //iteration % 2 = 1 /* odd iteration number: * 1) For any item connected to a pivot item * compute itersection */ else { if (!relevant_items[v.id() - M]){ if (debug) logstream(LOG_DEBUG)<<"Skipping item: " << v.id() << " since not relevant" << std::endl; return; } std::vector<index_val> heap; for (vid_t i=adjcontainer->pivot_st; i< adjcontainer->pivot_en; i++){ //if JACCARD which is symmetric, compare only to pivots which are smaller than this item id if ((distance_metric != ASYM_COSINE && i >= v.id()) || (!relevant_items[i-M])) continue; else if (distance_metric == ASYM_COSINE && i == v.id()) continue; double dist = adjcontainer->calc_distance(v, i, distance_metric); item_pairs_compared++; if (item_pairs_compared % 10000000 == 0) logstream(LOG_INFO)<< std::setw(10) << mytimer.current_time() << ") " << std::setw(10) << item_pairs_compared << " pairs compared " << std::setw(10) <<sum(written_pairs) << " written. " << std::endl; if (debug) printf("comparing %d to pivot %d distance is %g\n", i - M + 1, v.id() - M + 1, dist); if (dist != 0){ heap.push_back(index_val(i, dist)); //where the output format is: //[item A] [ item B ] [ distance ] } else zero_dist++; } sort(heap.begin(), heap.end(), &Greater); int thread_num = omp_get_thread_num(); if (heap.size() < K) not_enough++; for (uint i=0; i< std::min(heap.size(), (size_t)K); i++){ int rc = fprintf(out_files[thread_num], "%u %u %.12lg\n", v.id()-M+1, heap[i].index-M+1, (double)heap[i].val);//write item similarity to file written_pairs[omp_get_thread_num()]++; if (rc <= 0){ perror("Failed to write output"); logstream(LOG_FATAL)<<"Failed to write output to: file: " << training << omp_get_thread_num() << ".out" << std::endl; } } }//end of iteration % 2 == 1 }//end of update function /** * Called before an iteration starts. * On odd iteration, schedule both users and items. * on even iterations, schedules only item nodes */ void before_iteration(int iteration, graphchi_context &gcontext) { gcontext.scheduler->remove_tasks(0, (int) gcontext.nvertices - 1); if (gcontext.iteration == 0) written_pairs = zeros(gcontext.execthreads); if (gcontext.iteration % 2 == 0){ memset(relevant_items, 0, sizeof(bool)*N); for (vid_t i=0; i < M+N; i++){ gcontext.scheduler->add_task(i); } grabbed_edges = 0; adjcontainer->clear(); } else { //iteration % 2 == 1 for (vid_t i=M; i < M+N; i++){ gcontext.scheduler->add_task(i); } } } /** * Called before an execution interval is started. * * On every even iteration, we load pivot's item connected user lists to memory. * Here we manage the memory to ensure that we do not load too much * edges into memory. */ void before_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) { /* on even iterations, loads pivot items into memory base on the membudget_mb allowed memory size */ if (gcontext.iteration % 2 == 0) { if (!quiet){ printf("entering iteration: %d on before_exec_interval\n", gcontext.iteration); printf("pivot_st is %d window_en %d\n", adjcontainer->pivot_st, window_en); } if (adjcontainer->pivot_st <= window_en) { size_t max_grab_edges = get_option_long("membudget_mb", 1024) * 1024 * 1024 / 8; if (grabbed_edges < max_grab_edges * 0.8) { logstream(LOG_DEBUG) << "Window init, grabbed: " << grabbed_edges << " edges" << " extending pivor_range to : " << window_en + 1 << std::endl; adjcontainer->extend_pivotrange(window_en + 1); logstream(LOG_DEBUG) << "Window en is: " << window_en << " vertices: " << gcontext.nvertices << std::endl; if (window_en+1 == gcontext.nvertices) { // every item was a pivot item, so we are done logstream(LOG_DEBUG)<<"Setting last iteration to: " << gcontext.iteration + 2 << std::endl; gcontext.set_last_iteration(gcontext.iteration + 2); } } else { logstream(LOG_DEBUG) << "Too many edges, already grabbed: " << grabbed_edges << std::endl; } } } } }; int main(int argc, const char ** argv) { print_copyright(); /* GraphChi initialization will read the command line arguments and the configuration file. */ graphchi_init(argc, argv); /* Metrics object for keeping track of performance counters and other information. Currently required. */ metrics m("item-cf"); /* Basic arguments for application */ min_allowed_intersection = get_option_int("min_allowed_intersection", min_allowed_intersection); distance_metric = get_option_int("distance", JACCARD); asym_cosine_alpha = get_option_float("asym_cosine_alpha", 0.5); debug = get_option_int("debug", debug); if (distance_metric != JACCARD && distance_metric != AA && distance_metric != RA && distance_metric != ASYM_COSINE) logstream(LOG_FATAL)<<"Wrong distance metric. --distance_metric=XX, where XX should be either 0) JACCARD, 1) AA, 2) RA, 3) ASYM_COSINE" << std::endl; parse_command_line_args(); mytimer.start(); int nshards = convert_matrixmarket<EdgeDataType>(training, NULL, 0, 0, 3, TRAINING, false); if (nshards != 1) logstream(LOG_FATAL)<<"This application currently supports only 1 shard" << std::endl; K = get_option_int("K", K); if (K <= 0) logstream(LOG_FATAL)<<"Please specify the number of ratings to generate for each user using the --K command" << std::endl; assert(M > 0 && N > 0); //initialize data structure which saves a subset of the items (pivots) in memory adjcontainer = new adjlist_container(); //array for marking which items are conected to the pivot items via users. relevant_items = new bool[N]; //store node degrees in an array to be used for AA distance metric if (distance_metric == AA || distance_metric == RA) latent_factors_inmem.resize(M); /* Run */ ItemDistanceProgram program; graphchi_engine<VertexDataType, EdgeDataType> engine(training, 1, true, m); set_engine_flags(engine); engine.set_maxwindow(M+N+1); //open output files as the number of operating threads out_files.resize(number_of_omp_threads()); for (uint i=0; i< out_files.size(); i++){ char buf[256]; sprintf(buf, "%s.out%d", training.c_str(), i); out_files[i] = open_file(buf, "w"); } //run the program engine.run(program, niters); /* Report execution metrics */ if (!quiet) metrics_report(m); std::cout<<"Total item pairs compared: " << item_pairs_compared << " total written to file: " << sum(written_pairs) << " pairs with zero distance: " << zero_dist << std::endl; if (not_enough) logstream(LOG_WARNING)<<"Items that did not have enough similar items: " << not_enough << std::endl; for (uint i=0; i< out_files.size(); i++){ fflush(out_files[i]); fclose(out_files[i]); } std::cout<<"Created " << number_of_omp_threads() << " output files with the format: " << training << ".outXX, where XX is the output thread number" << std::endl; delete[] relevant_items; return 0; }
09jijiangwen-download
toolkits/collaborative_filtering/itemcf.cpp
C++
asf20
18,228
/** * @file * @author Danny Bickson * @version 1.0 * * @section LICENSE * * Copyright [2012] [Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * This code implements the PMF (probablistic matrix factorization) algorithm * as explained in Liang Xiong et al SDM 2010 paper. * */ #include "eigen_wrapper.hpp" #include "common.hpp" #include "prob.hpp" double lambda = 0.065; int pmf_burn_in = 10;//number of iterations for burn in (itermediate solutions are thrown) int pmf_additional_output = 0; int debug = 0; /* variables for PMF */ double nuAlpha = 1; double Walpha = 1; double nu0 = D; double alpha = 0; double beta = 1; vec beta0 = init_vec("1", 1); //vec mu0T = init_vec("1", 1); mat W0; //mat W0T; double iWalpha; mat iW0; //mat iW0T; mat A_U, A_V;// A_T; vec mu_U, mu_V; //, mu_T; int iiter = 0; vec validation_avgprod; //vector for storing temporary aggregated predictions for the MCMC method vec test_avgprod; //vector for strogin temporary aggregated predictions for the MCMC method size_t rmse_index = 0; int rmse_type = 0; struct vertex_data { vec pvec; vertex_data() { pvec = zeros(D); } void set_val(int index, float val){ pvec[index] = val; } float get_val(int index){ return pvec[index]; } }; struct edge_data { float weight; float avgprd; edge_data() { weight = 0; avgprd = 0; } edge_data(double weight): weight(weight) { avgprd = 0; } }; /** * Type definitions. Remember to create suitable graph shards using the * Sharder-program. */ typedef vertex_data VertexDataType; typedef edge_data EdgeDataType; // Edges store the "rating" of user->movie pair graphchi_engine<VertexDataType, EdgeDataType> * pengine = NULL; std::vector<vertex_data> latent_factors_inmem; #include "io.hpp" #include "rmse.hpp" /** compute a missing value based on PMF algorithm */ float pmf_predict(const vertex_data& user, const vertex_data& movie, const float rating, double & prediction, void * pedge){ prediction = dot_prod(user.pvec, movie.pvec); //truncate prediction to allowed values prediction = std::min((double)prediction, maxval); prediction = std::max((double)prediction, minval); float err = 0; if (iiter > pmf_burn_in){ if (pedge){ if (iiter == pmf_burn_in+1) (*(float*)pedge) = 0; (*(float*)pedge) += prediction; err = pow(((*(float*)pedge) / (iiter - pmf_burn_in)) - rating, 2); } } else { err = pow(prediction - rating,2); } assert(!std::isnan(err)); if (!pedge) rmse_index++; return err; } void init_self_pot(){ W0 = eye(D); //W0T = eye(D); iWalpha = 1.0/Walpha; iW0 = inv(W0); //iW0T = inv(W0T); nu0 = D; A_U = eye(D); //cov prior for users A_V = eye(D); //cov prior for movies //A_T = eye(D); //cov prior for time nodes mu_U = zeros(D); mu_V = zeros(D);// mu_T = zeros(D); //printf("nuAlpha=%g, Walpha=%g, mu0=%d, muT=%g, nu=%g, " // "beta=%g, W=%g, WT=%g pmf_burn_in=%d\n", nuAlpha, Walpha, 0, // mu0T[0], nu0, beta0[0], W0(1,1), W0T(1,1), pmf_burn_in); //test_randn(); //test_wishrnd(); //test_wishrnd2(); //test_chi2rnd(); //test_wishrnd3(); //test_mvnrndex(); } /** * sample the noise level * Euqation A.2 in Xiong paper */ void sample_alpha(double res2){ if (debug) printf("res is %g\n", res2); double res = res2; if (nuAlpha > 0){ double nuAlpha_ =nuAlpha+ L; mat iWalpha_(1,1); set_val(iWalpha_, 0,0,iWalpha + res); mat iiWalpha_ = zeros(1,1); iiWalpha_ = inv(iWalpha_); alpha = get_val(wishrnd(iiWalpha_, nuAlpha_),0,0); assert(alpha != 0); if (debug) std::cout<<"Sampling from alpha" <<nuAlpha_<<" "<<iWalpha<<" "<< iiWalpha_<<" "<<alpha<<endl; //printf("sampled alpha is %g\n", alpha); } } mat calc_MMT(int start_pos, int end_pos, vec &Umean){ int batchSize = 1000; mat U = zeros(batchSize,D); mat MMT = zeros(D,D); int cnt = 0; for (int i=start_pos; i< end_pos; i++){ if ((i-start_pos) % batchSize == 0){ U=zeros(batchSize, D); cnt = 1; } const vertex_data * data= &latent_factors_inmem[i]; vec mean = data->pvec; Umean += mean; for (int s=0; s<D; s++) U(i%batchSize,s)=mean(s); if (debug && (i==start_pos || i == end_pos-1)) std::cout<<" clmn "<<i<< " vec: " << mean <<std::endl; if ((cnt == batchSize) || (cnt < batchSize && i == end_pos-1)){ MMT = MMT+transpose(U)*U; } cnt++; } Umean /= (end_pos-start_pos); if (debug) cout<<"mean: "<<Umean<<endl; assert(MMT.rows() == D && MMT.cols() == D); assert(Umean.size() == D); return MMT; } // sample movie nodes hyperprior // according to equation A.3 in Xiong paper. void sample_U(){ vec Umean = zeros(D); mat UUT = calc_MMT(0,M,Umean); double beta0_ = beta0[0] + M; vec mu0_ = (M*Umean)/beta0_; double nu0_ = nu0 +M; vec dMu = - Umean; if (debug) std::cout<<"dMu:"<<dMu<<"beta0: "<<beta0[0]<<" beta0_ "<<beta0_<<" nu0_ " <<nu0_<<" mu0_ " << mu0_<<endl; mat UmeanT = M*outer_product(Umean, Umean); assert(UmeanT.rows() == D && UmeanT.cols() == D); mat dMuT = (beta0[0]/beta0_)*UmeanT; mat iW0_ = iW0 + UUT - UmeanT + dMuT; mat W0_; bool ret =inv(iW0_, W0_); assert(ret); mat tmp = (W0_+transpose(W0_))*0.5; if (debug) std::cout<<iW0<<UUT<<UmeanT<<dMuT<<W0_<<tmp<<nu0_<<endl; A_U = wishrnd(tmp, nu0_); mat tmp2; ret = inv(beta0_ * A_U, tmp2); assert(ret); mu_U = mvnrndex(mu0_, tmp2, D, 0); if (debug) std::cout<<"Sampling from U" <<A_U<<" "<<mu_U<<" "<<Umean<<" "<<W0_<<tmp<<endl; } // sample user nodes hyperprior // according to equation A.4 in Xiong paper void sample_V(){ vec Vmean = zeros(D); mat VVT = calc_MMT(M, M+N, Vmean); double beta0_ = beta0[0] + N; vec mu0_ = (N*Vmean)/beta0_; double nu0_ = nu0 +N; vec dMu = - Vmean; if (debug) std::cout<<"dMu:"<<dMu<<"beta0: "<<beta0[0]<<" beta0_ "<<beta0_<<" nu0_ " <<nu0_<<endl; mat VmeanT = N*outer_product(Vmean, Vmean); assert(VmeanT.rows() == D && VmeanT.cols() == D); mat dMuT = (beta0[0]/beta0_)*VmeanT; mat iW0_ = iW0 + VVT - VmeanT + dMuT; mat W0_; bool ret = inv(iW0_, W0_); assert(ret); mat tmp = (W0_+transpose(W0_))*0.5; if (debug) std::cout<<"iW0: "<<iW0<<" VVT: "<<VVT<<" VmeanT: "<<VmeanT<<" dMuT: " <<dMuT<<"W0_"<< W0_<<" tmp: " << tmp<<" nu0_: "<<nu0_<<endl; A_V = wishrnd(tmp, nu0_); mat tmp2; ret = inv(beta0_*A_V, tmp2); assert(ret); mu_V = mvnrndex(mu0_, tmp2, D, 0); if (debug) std::cout<<"Sampling from V: A_V" <<A_V<<" mu_V: "<<mu_V<<" Vmean: "<<Vmean<<" W0_: "<<W0_<<" tmp: "<<tmp<<endl; } void sample_hyperpriors(double res){ sample_alpha(res); sample_U(); sample_V(); //if (tensor) // sample_T(); } void output_pmf_result(std::string filename) { MMOutputter_mat<vertex_data> user_mat(filename + "_U.mm", 0, M , "This file contains PMF output matrix U. In each row D factors of a single user node.", latent_factors_inmem); MMOutputter_mat<vertex_data> item_mat(filename + "_V.mm", M ,M+N, "This file contains PMF output matrix V. In each row D factors of a single item node.", latent_factors_inmem); logstream(LOG_INFO) << "PMF output files (in matrix market format): " << filename << "_U.mm" << ", " << filename + "_V.mm " << std::endl; } /** * GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type> * class. The main logic is usually in the update function. */ struct PMFVerticesInMemProgram : public GraphChiProgram<VertexDataType, EdgeDataType> { /** * Vertex update function - computes the least square step */ void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) { vertex_data & vdata = latent_factors_inmem[vertex.id()]; bool isuser = vertex.id() < M; mat XtX = mat::Zero(D, D); vec Xty = vec::Zero(D); bool compute_rmse = (vertex.num_outedges() > 0); // Compute XtX and Xty (NOTE: unweighted) for(int e=0; e < vertex.num_edges(); e++) { const edge_data & edge = vertex.edge(e)->get_data(); float observation = edge.weight; vertex_data & nbr_latent = latent_factors_inmem[vertex.edge(e)->vertex_id()]; Xty += nbr_latent.pvec * observation; XtX.triangularView<Eigen::Upper>() += nbr_latent.pvec * nbr_latent.pvec.transpose(); if (compute_rmse) { double prediction; rmse_vec[omp_get_thread_num()] += pmf_predict(vdata, nbr_latent, observation, prediction, (void*)&edge.avgprd); vertex.edge(e)->set_data(edge); } } double regularization = lambda; if (regnormal) lambda *= vertex.num_edges(); for(int i=0; i < D; i++) XtX(i,i) += regularization; // Solve the least squares problem with eigen using Cholesky decomposition mat iAi_; bool ret =inv((isuser? A_U : A_V) + alpha * XtX, iAi_); assert(ret); vec mui_ = iAi_*((isuser? (A_U*mu_U) : (A_V*mu_V)) + alpha * Xty); vdata.pvec = mvnrndex(mui_, iAi_, D, 0); assert(vdata.pvec.size() == D); } /** * Called before an iteration is started. */ void before_iteration(int iteration, graphchi_context &gcontext) { rmse_vec = zeros(gcontext.execthreads); } /** * Called after an iteration has finished. */ void after_iteration(int iteration, graphchi_context &gcontext) { if (iteration == pmf_burn_in){ printf("Finished burn-in period. starting to aggregate samples\n"); } if (pmf_additional_output && iiter >= pmf_burn_in){ char buf[256]; sprintf(buf, "%s-%d", training.c_str(), iiter-pmf_burn_in); output_pmf_result(buf); } double res = training_rmse(iteration, gcontext); sample_hyperpriors(res); rmse_index = 0; rmse_type = VALIDATION; validation_rmse(&pmf_predict, gcontext, 3, &validation_avgprod, pmf_burn_in); if (iteration >= pmf_burn_in){ rmse_index = 0; rmse_type = TEST; test_predictions(&pmf_predict, &gcontext, iiter == niters-1, &test_avgprod); } iiter++; } }; void init_pmf(){ init_self_pot(); } int main(int argc, const char ** argv) { print_copyright(); /* GraphChi initialization will read the command line arguments and the configuration file. */ graphchi_init(argc, argv); /* Metrics object for keeping track of performance counters and other information. Currently required. */ metrics m("pmf-inmemory-factors"); lambda = get_option_float("lambda", 0.065); debug = get_option_int("debug", debug); pmf_burn_in = get_option_int("pmf_burn_in", pmf_burn_in); pmf_additional_output = get_option_int("pmf_additional_output", pmf_additional_output); parse_command_line_args(); parse_implicit_command_line(); /* Preprocess data if needed, or discover preprocess files */ int nshards = convert_matrixmarket<edge_data>(training, NULL, 0, 0, 3, TRAINING, false); init_feature_vectors<std::vector<vertex_data> >(M+N, latent_factors_inmem, !load_factors_from_file); init_pmf(); if (load_factors_from_file){ load_matrix_market_matrix(training + "_U.mm", 0, D); load_matrix_market_matrix(training + "_V.mm", M, D); } /* Run */ PMFVerticesInMemProgram program; graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m); set_engine_flags(engine, true); pengine = &engine; engine.run(program, niters); /* Report execution metrics */ if (!quiet) metrics_report(m); return 0; }
09jijiangwen-download
toolkits/collaborative_filtering/pmf.cpp
C++
asf20
12,199
#ifndef _COMMON_H__ #define _COMMON_H__ /** * @file * @author Danny Bickson * @version 1.0 * * @section LICENSE * * Copyright [2012] [Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <algorithm> #include <assert.h> #include <cmath> #include <errno.h> #include <string> #include "util.hpp" #include "graphchi_basic_includes.hpp" #include "api/vertex_aggregator.hpp" #include "preprocessing/sharder.hpp" #include "../../example_apps/matrix_factorization/matrixmarket/mmio.h" #include "../../example_apps/matrix_factorization/matrixmarket/mmio.c" #include <stdio.h> #ifdef __APPLE__ #include "getline.hpp" //fix for missing getline() function on MAC OS #endif using namespace graphchi; double minval = -1e100; //max allowed value in matrix double maxval = 1e100; //min allowed value in matrix double valrange = 1; //range of allowed values in matrix std::string training; std::string validation; std::string test; uint M, N, K; size_t L, Le; uint Me, Ne; double globalMean = 0; double globalMean2 = 0; double rmse=0.0; bool load_factors_from_file = false; int unittest = 0; int niters = 10; int halt_on_rmse_increase = 0; int D = 20; //feature vector width bool quiet = false; int input_file_offset = 1; int kfold_cross_validation = 0; int kfold_cross_validation_index = 0; int regnormal = 0; // if set to 1, compute LS regularization according to the paper "Yunhong Zhou, Dennis Wilkinson, Robert Schreiber and Rong Pan. Large-Scale Parallel Collaborative Filtering for the Netflix Prize." int clean_cache = 0; int R_output_format = 0; // if set to 1, all matrices and vectors are written in sparse matrix market format since // R does not currently support array format (dense format). /* support for different loss types (for SGD variants) */ std::string loss = "square"; enum { LOGISTIC = 0, SQUARE = 1, ABS = 2, AP = 3 }; const char * error_names[] = {"LOGISTIC LOSS", "RMSE", "MAE", "AP"}; int loss_type = SQUARE; int calc_ap = 0; int ap_number = 3; //AP@3 enum { TRAINING= 0, VALIDATION = 1, TEST = 2 }; void remove_cached_files(){ //remove cached files int rc; assert(training != ""); rc = system((std::string("rm -fR ") + training + std::string(".*")).c_str()); assert(!rc); if (validation != ""){ rc = system((std::string("rm -fR ") + validation + std::string(".*")).c_str()); assert(!rc); } } void parse_command_line_args(){ /* Basic arguments for application. NOTE: File will be automatically 'sharded'. */ unittest = get_option_int("unittest", 0); niters = get_option_int("max_iter", 6); // Number of iterations if (unittest > 0) training = get_option_string("training", ""); // Base filename else training = get_option_string("training"); validation = get_option_string("validation", ""); test = get_option_string("test", ""); D = get_option_int("D", D); maxval = get_option_float("maxval", 1e100); minval = get_option_float("minval", -1e100); if (minval >= maxval) logstream(LOG_FATAL)<<"Min allowed rating (--minval) should be smaller than max allowed rating (--maxval)" << std::endl; valrange = maxval - minval; assert(valrange > 0); quiet = get_option_int("quiet", 0); if (quiet) global_logger().set_log_level(LOG_ERROR); halt_on_rmse_increase = get_option_int("halt_on_rmse_increase", 0); load_factors_from_file = get_option_int("load_factors_from_file", 0); input_file_offset = get_option_int("input_file_offset", input_file_offset); /* find out loss type (optional, for SGD variants only) */ loss = get_option_string("loss", loss); if (loss == "square") loss_type = SQUARE; else if (loss == "logistic") loss_type = LOGISTIC; else if (loss == "abs") loss_type = ABS; else if (loss == "ap") loss_type = AP; else logstream(LOG_FATAL)<<"Loss type should be one of [square,logistic,abs] (for example, --loss==square);" << std::endl; calc_ap = get_option_int("calc_ap", calc_ap); if (calc_ap) loss_type = AP; ap_number = get_option_int("ap_number", ap_number); kfold_cross_validation = get_option_int("kfold_cross_validation", kfold_cross_validation); kfold_cross_validation_index = get_option_int("kfold_cross_validation_index", kfold_cross_validation_index); if (kfold_cross_validation_index > 0){ if (kfold_cross_validation_index >= kfold_cross_validation) logstream(LOG_FATAL)<<"kfold_cross_validation index should be between 0 to kfold_cross_validation-1 parameter" << std::endl; } if (kfold_cross_validation != 0){ logstream(LOG_WARNING)<<"Activating kfold cross vlidation with K="<< kfold_cross_validation << std::endl; if (training == validation) logstream(LOG_FATAL)<<"Using cross validation, validation file (--validation=filename) should have a different name than training" << std::endl; if (validation == "") logstream(LOG_FATAL)<<"You must provide validation input file name (--validation=filename) when using k-fold cross validation" << std::endl; clean_cache = 1; } regnormal = get_option_int("regnormal", regnormal); clean_cache = get_option_int("clean_cache", clean_cache); if (clean_cache) remove_cached_files(); R_output_format = get_option_int("R_output_format", R_output_format); } template<typename T> void set_engine_flags(T & pengine){ pengine.set_disable_vertexdata_storage(); pengine.set_enable_deterministic_parallelism(false); pengine.set_modifies_inedges(false); pengine.set_modifies_outedges(false); pengine.set_preload_commit(false); } template<typename T> void set_engine_flags(T & pengine, bool modify_outedges){ pengine.set_disable_vertexdata_storage(); pengine.set_enable_deterministic_parallelism(false); pengine.set_modifies_inedges(false); pengine.set_modifies_outedges(modify_outedges); pengine.set_preload_commit(false); } void print_copyright(){ logstream(LOG_WARNING)<<"GraphChi Collaborative filtering library is written by Danny Bickson (c). Send any " " comments or bug reports to danny.bickson@gmail.com " << std::endl; } void print_config(){ std::cout<<"[feature_width] => [" << D << "]" << std::endl; std::cout<<"[users] => [" << M << "]" << std::endl; std::cout<<"[movies] => [" << N << "]" <<std::endl; std::cout<<"[training_ratings] => [" << L << "]" << std::endl; std::cout<<"[number_of_threads] => [" << number_of_omp_threads() << "]" <<std::endl; std::cout<<"[membudget_Mb] => [" << get_option_int("membudget_mb") << "]" <<std::endl; } template<typename T> void init_feature_vectors(uint size, T& latent_factors_inmem, bool randomize = true, double scale = 1.0){ assert(size > 0); srand48(time(NULL)); latent_factors_inmem.resize(size); // Initialize in-memory vertices. if (!randomize) return; #pragma omp parallel for for (int i=0; i < (int)size; i++){ for (int j=0; j<D; j++) latent_factors_inmem[i].pvec[j] = scale * drand48(); } } #endif //_COMMON_H__
09jijiangwen-download
toolkits/collaborative_filtering/common.hpp
C++
asf20
7,535
/** * @file * @author Danny Bickson * @version 1.0 * * @section LICENSE * * Copyright [2012] [Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * Implementation of the gensgd algorithm. A generalization of SGD algorithm when there are multiple features for each * rating, in the form * [from] [to] [feature1] [feature2] [feature3] ... [featureN] [rating] * (It is also possible to dynamically specify column numbers which are relevant) * Steffen Rendle (2010): Factorization Machines, in Proceedings of the 10th IEEE International Conference on Data Mining (ICDM 2010), Sydney, Australia. * Original implementation by Qiang Yan, Chinese Academy of Science. * note: this code version implements the SGD version of gensgd. In the original library there are also ALS and MCMC methods. * Also the treatment of features is richer in gensgd. The code here can serve for a quick evaluation but the user * is encouraged to try gensgd as well. */ #include <vector> #include "common.hpp" #include "eigen_wrapper.hpp" #include "../parsers/common.hpp" #include <omp.h> #define MAX_FEATURES 256 #define FEATURE_WIDTH 11 //MAX NUMBER OF ALLOWED FEATURES IN TEXT FILE double gensgd_rate1 = 1e-02; double gensgd_rate2 = 1e-02; double gensgd_rate3 = 1e-02; double gensgd_rate4 = 1e-02; double gensgd_rate5 = 1e-02; double gensgd_mult_dec = 0.9; double gensgd_regw = 1e-3; double gensgd_regv = 1e-3; double gensgd_reg0 = 1e-1; bool debug = false; std::string user_file; //optional file with user features std::string item_file; //optional file with item features std::string user_links; //optional file with user to user links int limit_rating = 0; size_t vertex_with_no_edges = 0; int calc_error = 0; int file_columns = 0; std::vector<std::string> header_titles; int has_header_titles = 0; int has_user_titles = 0; int has_item_titles = 0; float cutoff = 0; size_t new_validation_users = 0; size_t new_test_users = 0; int json_input = 0; int cold_start = 0; double inputGlobalMean = 0; int binary_prediction = 0; struct stats{ float minval; float maxval; float meanval; stats(){ minval = maxval = meanval = 0; } }; enum _cold_start{ NONE = 0, GLOBAL = 1, ITEM = 3 }; struct feature_control{ std::vector<double_map> node_id_maps; double_map val_map; int rehash_value; int last_item; std::vector<stats> stats_array; int feature_num; int node_features; int node_links; int total_features; std::vector<bool> feature_selection; const std::string default_feature_str; std::vector<int> offsets; bool hash_strings; int from_pos; int to_pos; int val_pos; feature_control(){ rehash_value = 0; last_item = 0; total_features = 0; node_features = 0; feature_num = FEATURE_WIDTH; hash_strings = true; from_pos = 0; to_pos = 1; val_pos = -1; node_links = 0; feature_selection.resize(MAX_FEATURES+3); } }; feature_control fc; int num_feature_bins(){ int sum = 0; if (fc.hash_strings){ assert(2+fc.total_features+fc.node_features == (int)fc.node_id_maps.size()); for (int i=2; i < 2+fc.total_features+fc.node_features; i++){ sum+= fc.node_id_maps[i].string2nodeid.size(); } } else { for (int i=0; i< fc.total_features; i++) sum += (int)ceil((fc.stats_array[i].maxval - fc.stats_array[i].minval) + 1); } if (fc.total_features > 0) assert(sum > 0); return sum; } int calc_feature_num(){ return 2+fc.total_features+fc.last_item+fc.node_features; } void get_offsets(std::vector<int> & offsets){ assert(offsets.size() >= 2); offsets[0] = 0; offsets[1] = M; if (offsets.size() >= 3) offsets[2] = M+N; if (fc.hash_strings){ for (uint j=2; j< offsets.size()-1; j++){ offsets[j+1] = offsets[j] + fc.node_id_maps[j].string2nodeid.size(); logstream(LOG_DEBUG)<<"Offset " << j+1 << " is: " << offsets[j+1] << std::endl; } } else { for (uint j=2; j < offsets.size(); j++){ offsets[j+1] = offsets[j] + (int)ceil((fc.stats_array[j-2].maxval-fc.stats_array[j-2].minval)+1); logstream(LOG_DEBUG)<<"Offset " << j+1 << " is: " << offsets[j+1] << std::endl; } } } bool is_user(vid_t id){ return id < M; } bool is_item(vid_t id){ return id >= M && id < M+N; } bool is_time(vid_t id){ return id >= M+N; } vec errors_vec; #define BIAS_POS -1 struct vertex_data { vec pvec; double bias; int last_item; float avg_rating; sparse_vec features; sparse_vec links; //links to other users or items vertex_data() { bias = 0; last_item = 0; avg_rating = -1; } void set_val(int index, float val){ if (index == BIAS_POS) bias = val; else pvec[index] = val; } float get_val(int index){ if (index== BIAS_POS) return bias; else return pvec[index]; } }; struct edge_data { float features[FEATURE_WIDTH]; float weight; edge_data() { weight = 0; memset(features, 0, sizeof(float)*FEATURE_WIDTH); } edge_data(float weight, float * valarray, int size): weight(weight) { memcpy(features, valarray, sizeof(float)*size); } }; /** * Type definitions. Remember to create suitable graph shards using the * Sharder-program. */ typedef vertex_data VertexDataType; typedef edge_data EdgeDataType; // Edges store the "rating" of user->movie pair graphchi_engine<VertexDataType, EdgeDataType> * pengine = NULL; std::vector<vertex_data> latent_factors_inmem; int calc_feature_node_array_size(uint node, uint item){ if (node != (uint)-1){ assert(node <= M); assert(node < latent_factors_inmem.size()); } if (item != (uint)-1){ assert(item <= N); assert(fc.offsets[1]+item < latent_factors_inmem.size()); } int ret = fc.total_features+fc.last_item; if (node != (uint)-1) ret+= (1+nnz(latent_factors_inmem[node].features)); if (item != (uint)-1) ret += (1+nnz(latent_factors_inmem[fc.offsets[1]+item].features)); assert(ret > 0); return ret; } /** * return a numeric node ID out of the string text read from file (training, validation or test) */ float get_node_id(char * pch, int pos, size_t i, bool read_only = false){ assert(pch != NULL); assert(i >= 0); float ret; //read numeric id if (!fc.hash_strings){ ret = (pos < 2 ? atoi(pch) : atof(pch)); if (pos < 2) ret-=input_file_offset; if (pos == 0 && ret >= M) logstream(LOG_FATAL)<<"Row index larger than the matrix row size " << ret << " > " << M << " in line: " << i << std::endl; else if (pos == 1 && ret >= N) logstream(LOG_FATAL)<<"Col index larger than the matrix row size " << ret << " > " << N << " in line: " << i << std::endl; } //else read string id and assign numeric id else { uint id; assert(pos < (int)fc.node_id_maps.size()); if (read_only){ // find if node was in map std::map<std::string,uint>::iterator it = fc.node_id_maps[pos].string2nodeid.find(pch); if (it != fc.node_id_maps[pos].string2nodeid.end()){ ret = it->second; assert(ret < fc.node_id_maps[pos].string2nodeid.size()); } else ret = -1; } else { //else enter node into map (in case it did not exist) and return its position assign_id(fc.node_id_maps[pos], id, pch); assert(id < fc.node_id_maps[pos].string2nodeid.size()); ret = id; } } if (!read_only) assert(ret != -1); return ret; } float get_value(char * pch, bool read_only){ float ret; if (!fc.rehash_value){ if ( pch[0] == '"' ) { pch++; } ret = atof(pch); } else { uint id; if (read_only){ // find if node was in map std::map<std::string,uint>::iterator it = fc.val_map.string2nodeid.find(pch); if (it != fc.val_map.string2nodeid.end()){ ret = it->second; } else ret = -1; } else { //else enter node into map (in case it did not exist) and return its position assign_id(fc.val_map, id, pch); assert(id < fc.val_map.string2nodeid.size()); ret = id; } } if (std::isnan(ret) || std::isinf(ret)) logstream(LOG_FATAL)<<"Failed to read value" << std::endl; return ret; } char * read_one_token(char *& linebuf, const char * pspaces, size_t i, char * linebuf_debug, int token, int type = TRAINING){ char *pch = strsep(&linebuf,pspaces); if (pch == NULL && type == TRAINING) logstream(LOG_FATAL)<<"Error reading line " << i << " [ " << linebuf_debug << " ] " << std::endl; else if (pch == NULL && type == TEST) return NULL; if (json_input){ //for json, multiple separators may lead to empty strings, we simply skip them while(pch && !strcmp(pch, "")){ pch = strsep(&linebuf, pspaces); if (pch == NULL) logstream(LOG_FATAL)<<"Error reading line " << i << " [ " << linebuf_debug << " ] " << " token number: " << token << std::endl; } //toekn should not be empty assert(strcmp(pch, "")); if (i == 0) header_titles.push_back(pch); pch = strsep(&linebuf, pspaces); //for json, multiple separators may lead to empty strings, we simply skip them while(pch && !strcmp(pch, "")){ pch = strsep(&linebuf, pspaces); if (pch == NULL) logstream(LOG_FATAL)<<"Error reading line " << i << " [ " << linebuf_debug << " ] " << " token number: " << token << std::endl; } } return pch; } /* Read and parse one input line from file */ bool read_line(FILE * f, const std::string filename, size_t i, uint & I, uint & J, float &val, std::vector<float>& valarray, int type, char * linebuf_debug){ char * linebuf = NULL; size_t linesize = 0; int token = 0; int index = 0; int rc = getline(&linebuf, &linesize, f); if (rc == -1){ perror("getline"); logstream(LOG_FATAL)<<"Failed to get line: " << i << " in file: " << filename << std::endl; } char * linebuf_to_free = linebuf; strncpy(linebuf_debug, linebuf, 1024); assert(file_columns >= 2); const char* spaces[] = {"\t,\r\n "}; const char * json_spaces[] = {"\t,\r\n \":{}"}; const char * pspaces = ((!json_input) ? *spaces : *json_spaces); char * pch = NULL; while (token < file_columns){ /* READ FROM */ if (token == fc.from_pos){ pch = read_one_token(linebuf, pspaces, i, linebuf_debug, token); I = (uint)get_node_id(pch, 0, i, type != TRAINING); if (type == TRAINING){ assert( I >= 0 && I < M); } token++; } else if (token == fc.to_pos){ /* READ TO */ pch = read_one_token(linebuf, pspaces, i, linebuf_debug, token); J = (uint)get_node_id(pch, 1, i, type != TRAINING); if (type == TRAINING) assert(J >= 0 && J < N); token++; } else if (token == fc.val_pos){ /* READ RATING */ pch = read_one_token(linebuf, pspaces, i, linebuf_debug, token, type); if (pch == NULL && type == TEST) return true; val = get_value(pch, type != TRAINING); token++; } else { if (token >= file_columns) break; /* READ FEATURES */ pch = read_one_token(linebuf, pspaces, i, linebuf_debug, token, type); if (pch == NULL && type == TEST) return true; if (!fc.feature_selection[token]){ token++; continue; } assert(index < (int)valarray.size()); valarray[index] = get_node_id(pch, index+2, i, type != TRAINING); if (type == TRAINING) if (std::isnan(valarray[index])) logstream(LOG_FATAL)<<"Error reading line " << i << " feature " << token << " [ " << linebuf_debug << " ] " << std::endl; //calc stats about ths feature if (type == TRAINING && !fc.hash_strings){ fc.stats_array[index].minval = std::min(fc.stats_array[index].minval, valarray[index]); fc.stats_array[index].maxval = std::max(fc.stats_array[index].maxval, valarray[index]); fc.stats_array[index].meanval += valarray[index]; } index++; token++; } }//end while free(linebuf_to_free); return true; }//end read_line /* compute an edge prediction based on input features */ float compute_prediction( const uint I, const uint J, const float val, double & prediction, float * valarray, float (*prediction_func)(const vertex_data ** array, int arraysize, float rating, double & prediction, vec * psum), vec * psum, vertex_data **& node_array){ if (I == (uint)-1 && J == (uint)-1) logstream(LOG_FATAL)<<"BUG: can not compute prediction for new user and new item" << std::endl; if (J != (uint)-1) assert(J >=0 && J <= N); if (I != (uint)-1) assert(I>=0 && I <= M); /* COMPUTE PREDICTION */ /* USER NODE **/ int index = 0; int loc = 0; if (I != (uint)-1){ node_array[index] = &latent_factors_inmem[I+fc.offsets[loc]]; if (node_array[index]->pvec[0] >= 1e5) logstream(LOG_FATAL)<<"Got into numerical problem, try to decrease SGD step size" << std::endl; index++; } loc++; /* 1) ITEM NODE */ if (J != (uint)-1){ assert(J+fc.offsets[index] < latent_factors_inmem.size()); node_array[index] = &latent_factors_inmem[J+fc.offsets[loc]]; if (node_array[index]->pvec[0] >= 1e5) logstream(LOG_FATAL)<<"Got into numerical problem, try to decrease SGD step size" << std::endl; index++; } loc++; /* 2) FEATURES GIVEN IN RATING LINE */ for (int j=0; j< fc.total_features; j++){ uint pos = (uint)ceil(valarray[j]+fc.offsets[j+loc]-fc.stats_array[j].minval); //assert(pos >= 0 && pos < latent_factors_inmem.size()); if (pos < 0 || pos >= latent_factors_inmem.size()) logstream(LOG_FATAL)<<"Bug: j is: " << j << " fc.total_features " << fc.total_features << " index : " << index << " loc: " << loc << " fc.offsets " << fc.offsets[j+loc] << " vlarray[j] " << valarray[j] << " pos: " << pos << " latent_factors_inmem.size() " << latent_factors_inmem.size() << std::endl; node_array[j+index] = & latent_factors_inmem[pos]; if (node_array[j+index]->pvec[0] >= 1e5) logstream(LOG_FATAL)<<"Got into numerical problem, try to decrease SGD step size" << std::endl; } index+= fc.total_features; loc += fc.total_features; /* 3) USER FEATURES */ if (user_file != ""){ if (I != (uint)-1){ int i = 0; FOR_ITERATOR(j, latent_factors_inmem[I+fc.offsets[0]].features){ int pos; if (user_links != ""){ pos = j.index(); assert(pos < (int)M); } else { pos = j.index()+fc.offsets[loc]; assert((uint)loc < fc.node_id_maps.size()); assert(j.index() < (int)fc.node_id_maps[loc].string2nodeid.size()); assert(pos >= 0 && pos < (int)latent_factors_inmem.size()); assert(pos >= (int)fc.offsets[loc]); } //logstream(LOG_INFO)<<"setting index " << i+index << " to: " << pos << std::endl; node_array[i+index] = & latent_factors_inmem[pos]; if (node_array[i+index]->pvec[0] >= 1e5) logstream(LOG_FATAL)<<"Got into numerical problem, try to decrease SGD step size" << std::endl; i++; } assert(i == nnz(latent_factors_inmem[I+fc.offsets[0]].features)); index+= nnz(latent_factors_inmem[I+fc.offsets[0]].features); loc+=1; } } /* 4) ITEM FEATURES */ if (item_file != ""){ if (J != (uint)-1){ int i=0; FOR_ITERATOR(j, latent_factors_inmem[J+fc.offsets[1]].features){ uint pos = j.index()+fc.offsets[loc]; assert(j.index() < (int)fc.node_id_maps[loc].string2nodeid.size()); assert(pos >= 0 && pos < latent_factors_inmem.size()); assert(pos >= (uint)fc.offsets[loc]); //logstream(LOG_INFO)<<"setting index " << i+index << " to: " << pos << std::endl; node_array[i+index] = & latent_factors_inmem[pos]; if (node_array[i+index]->pvec[0] >= 1e5) logstream(LOG_FATAL)<<"Got into numerical problem, try to decrease SGD step size" << std::endl; i++; } assert(i == nnz(latent_factors_inmem[J+fc.offsets[1]].features)); index+= nnz(latent_factors_inmem[J+fc.offsets[1]].features); loc+=1; } } if (fc.last_item){ uint pos = latent_factors_inmem[I].last_item + fc.offsets[2+fc.total_features+fc.node_features]; assert(pos < latent_factors_inmem.size()); node_array[index] = &latent_factors_inmem[pos]; if (node_array[index]->pvec[0] >= 1e5) logstream(LOG_FATAL)<<"Got into numerical problem, try to decrease SGD step size" << std::endl; index++; loc+=1; } assert(index == calc_feature_node_array_size(I,J)); (*prediction_func)((const vertex_data**)node_array, calc_feature_node_array_size(I,J), val, prediction, psum); return pow(val - prediction,2); } #include "io.hpp" #include "../parsers/common.hpp" /** * Create a bipartite graph from a matrix. Each row corresponds to vertex * with the same id as the row number (0-based), but vertices correponsing to columns * have id + num-rows. * Line format of the type * [user] [item] [feature1] [feature2] ... [featureN] [rating] */ /* Read input file, process it and save a binary representation for faster loading */ template <typename als_edge_type> int convert_matrixmarket_N(std::string base_filename, bool square, feature_control & fc, int limit_rating = 0) { // Note, code based on: http://math.nist.gov/MatrixMarket/mmio/c/example_read.c FILE *f; size_t nz; /** * Create sharder object */ int nshards; sharder<als_edge_type> sharderobj(base_filename); sharderobj.start_preprocessing(); detect_matrix_size(base_filename, f, M, N, nz); if (f == NULL) logstream(LOG_FATAL) << "Could not open file: " << base_filename << ", error: " << strerror(errno) << std::endl; if (M == 0 && N == 0) logstream(LOG_FATAL)<<"Failed to detect matrix size. Please prepare a file named: " << base_filename << ":info with matrix market header, as explained here: http://bickson.blogspot.co.il/2012/12/collaborative-filtering-3rd-generation_14.html " << std::endl; logstream(LOG_INFO) << "Starting to read matrix-market input. Matrix dimensions: " << M << " x " << N << ", non-zeros: " << nz << std::endl; if (has_header_titles){ char * linebuf = NULL; size_t linesize; char linebuf_debug[1024]; /* READ LINE */ int rc = getline(&linebuf, &linesize, f); if (rc == -1) logstream(LOG_FATAL)<<"Error header line " << " [ " << linebuf_debug << " ] " << std::endl; strncpy(linebuf_debug, linebuf, 1024); char *pch = strtok(linebuf,"\t,\r;"); if (pch == NULL) logstream(LOG_FATAL)<<"Error header line " << " [ " << linebuf_debug << " ] " << std::endl; header_titles.push_back(pch); while (pch != NULL){ pch = strtok(NULL, "\t,\r;"); if (pch == NULL) break; header_titles.push_back(pch); } } compute_matrix_size(nz, TRAINING); uint I, J; int val_array_len = std::max(1, fc.total_features); assert(val_array_len < FEATURE_WIDTH); std::vector<float> valarray; valarray.resize(val_array_len); float val; if (!fc.hash_strings){ for (int i=0; i< fc.total_features; i++){ fc.stats_array[i].minval = 1e100; fc.stats_array[i].maxval = -1e100; } } if (limit_rating > 0 && limit_rating < (int)nz) nz = limit_rating; char linebuf_debug[1024]; for (size_t i=0; i<nz; i++) { if (!read_line(f, base_filename, i,I, J, val, valarray, TRAINING, linebuf_debug)) logstream(LOG_FATAL)<<"Failed to read line: " <<i<< " in file: " << base_filename << std::endl; if (I>= M || J >= N || I < 0 || J < 0){ if (i == 0) logstream(LOG_FATAL)<<"Failed to parsed first line, there are too many tokens. Did you forget the --has_header_titles=1 flag when file has string column headers?" << std::endl; else logstream(LOG_FATAL)<<"Bug: can not add edge from " << I << " to J " << J << " since max is: " << M <<"x" <<N<<std::endl; } bool active_edge = decide_if_edge_is_active(i, TRAINING); if (active_edge){ //calc stats globalMean += val; sharderobj.preprocessing_add_edge(I, square?J:M+J, als_edge_type(val, &valarray[0], val_array_len)); } } sharderobj.end_preprocessing(); //calc stats assert(L > 0); for (int i=0; i< fc.total_features; i++){ fc.stats_array[i].meanval /= L; } //assert(globalMean != 0); if (globalMean == 0) logstream(LOG_WARNING)<<"Found global mean of the data to be zero (val_pos). Please verify this is correct." << std::endl; globalMean /= L; logstream(LOG_INFO)<<"Computed global mean is: " << globalMean << std::endl; inputGlobalMean = globalMean; //print features for (int i=0; i< fc.total_features; i++){ logstream(LOG_INFO) << "Feature " << i << " min val: " << fc.stats_array[i].minval << " max val: " << fc.stats_array[i].maxval << " mean val: " << fc.stats_array[i].meanval << std::endl; } FILE * outf = fopen((base_filename + ".gm").c_str(), "w"); fprintf(outf, "%d\n%d\n%ld\n%d\n%12.8lg", M, N, L, fc.total_features, globalMean); for (int i=0; i < fc.total_features; i++){ fprintf(outf, "%12.8g\n%12.8g\n%12.8g\n", fc.stats_array[i].minval, fc.stats_array[i].maxval, fc.stats_array[i].meanval); } fclose(outf); fclose(f); if (fc.hash_strings){ for (int i=0; i< fc.total_features+2; i++){ if (fc.node_id_maps[i].string2nodeid.size() == 0) logstream(LOG_FATAL)<<"Failed to save feature number : " << i << " no values find in data " << std::endl; } } logstream(LOG_INFO) << "Now creating shards." << std::endl; // Shard with a specified number of shards, or determine automatically if not defined nshards = sharderobj.execute_sharding(get_option_string("nshards", "auto")); return nshards; } /* read node features from file */ void read_node_features(std::string base_filename, bool square, feature_control & fc, bool user, bool binary) { FILE *f; if ((f = fopen(base_filename.c_str(), "r")) == NULL) { logstream(LOG_FATAL) << "Could not open file: " << base_filename << ", error: " << strerror(errno) << std::endl; } binary = true; //TODO double_map fmap; fc.node_id_maps.push_back(fmap); fc.node_features++; stats stat; fc.stats_array.push_back(stat); uint I, J = -1; char * linebuf = NULL; char linebuf_debug[1024]; size_t linesize; size_t lines = 0; size_t tokens = 0; float val = 1; int missing_nodes = 0; while(true){ /* READ LINE */ int rc = getline(&linebuf, &linesize, f); if (rc == -1) break; strncpy(linebuf_debug, linebuf, 1024); lines++; //skip over header titles (if any) if (lines == 1 && user && has_user_titles) continue; else if (lines == 1 && !user && has_item_titles) continue; /** READ [FROM] */ char *pch = strtok(linebuf,"\t,\r; "); if (pch == NULL) logstream(LOG_FATAL)<<"Error reading line " << lines << " [ " << linebuf_debug << " ] " << std::endl; I = (uint)get_node_id(pch, user?0:1, lines, true); if (I == (uint)-1){ //user id was not found in map, so we do not need this users features missing_nodes++; continue; } if (user) assert(I >= 0 && I < M); else assert(I>=0 && I< N); /** READ USER FEATURES */ while (pch != NULL){ pch = strtok(NULL, "\t,\r; "); if (pch == NULL) break; if (binary){ J = (uint)get_node_id(pch, 2+fc.total_features+fc.node_features-1, lines); } else { pch = strtok(NULL, "\t\r,;: "); if (pch == NULL) logstream(LOG_FATAL)<<"Failed to read feture value" << std::endl; val = atof(pch); } assert(J >= 0); if (user) assert(I < latent_factors_inmem.size()); else assert(I+M < latent_factors_inmem.size()); set_new(latent_factors_inmem[user? I : I+M].features, J, val); tokens++; //update stats if needed } } assert(tokens > 0); logstream(LOG_DEBUG)<<"Read a total of " << lines << " node features. Tokens: " << tokens << " avg tokens: " << (lines/tokens) << " user? " << user << " new entries: " << fc.node_id_maps[2+fc.total_features+fc.node_features-1].string2nodeid.size() << std::endl; if (missing_nodes > 0) std::cerr<<"Warning: missing: " << missing_nodes << " from node feature file: " << base_filename << " out of: " << lines << std::endl; } /* read node features from file */ void read_node_links(std::string base_filename, bool square, feature_control & fc, bool user, bool binary) { FILE *f; if ((f = fopen(base_filename.c_str(), "r")) == NULL) { logstream(LOG_FATAL) << "Could not open file: " << base_filename << ", error: " << strerror(errno) << std::endl; } //double_map fmap; //fc.node_id_maps.push_back(fmap); fc.node_links++; //stats stat; //fc.stats_array.push_back(stat); uint I, J = -1; char * linebuf = NULL; char linebuf_debug[1024]; size_t linesize; size_t lines = 0; size_t tokens = 0; float val = 1; while(true){ /* READ LINE */ int rc = getline(&linebuf, &linesize, f); if (rc == -1) break; strncpy(linebuf_debug, linebuf, 1024); lines++; /** READ [FROM] */ char *pch = strtok(linebuf,"\t,\r; "); if (pch == NULL) logstream(LOG_FATAL)<<"Error reading line " << lines << " [ " << linebuf_debug << " ] " << std::endl; I = (uint)get_node_id(pch, user? 0 : 1, lines, true); if (I == (uint)-1)//user id was not found in map, we do not need this user link features continue; if (user) assert(I < (uint)fc.offsets[1]); else assert(I < (uint)fc.offsets[2]); /** READ TO */ pch = strtok(NULL, "\t,\r; "); if (pch == NULL) logstream(LOG_FATAL)<<"Failed to read to field [ " << linebuf_debug << " ] " << std::endl; J = (uint)get_node_id(pch, user? 0 : 1, lines); set_new(latent_factors_inmem[user? I : I+M].links, J, val); tokens++; //update stats if needed } logstream(LOG_DEBUG)<<"Read a total of " << lines << " node features. Tokens: " << tokens << " user? " << user << " new entries: " << fc.node_id_maps[user? 0 : 1].string2nodeid.size() << std::endl; } #include "rmse.hpp" /** compute validation rmse */ void validation_rmse_N( float (*prediction_func)(const vertex_data ** array, int arraysize, float rating, double & prediction, vec * psum) ,graphchi_context & gcontext, feature_control & fc, bool square = false) { assert(fc.total_features <= fc.feature_num); if ((validation == "") || !file_exists(validation)) { if ((validation != (training + "e")) && gcontext.iteration == 0) logstream(LOG_WARNING) << "Validation file was specified, but not found:" << validation << std::endl; std::cout << std::endl; return; } FILE *f = NULL; size_t nz; detect_matrix_size(validation, f, Me, Ne, nz); if (f == NULL){ logstream(LOG_WARNING)<<"Failed to open validation data. Skipping."<<std::endl; return; } if ((M > 0 && N > 0) && (Me != M || Ne != N)) logstream(LOG_WARNING)<<"Input size of validation matrix must be identical to training matrix, namely " << M << "x" << N << std::endl; compute_matrix_size(nz, VALIDATION); last_validation_rmse = dvalidation_rmse; dvalidation_rmse = 0; double validation_error = 0; std::vector<float> valarray; valarray.resize(fc.total_features); uint I, J; float val; char linebuf_debug[1024]; for (size_t i=0; i<nz; i++) { int size = num_feature_bins(); if (!read_line(f, validation, i, I, J, val, valarray, VALIDATION, linebuf_debug)) logstream(LOG_FATAL)<<"Failed to read line: " << i << " in file: " << validation << std::endl; bool active_edge = decide_if_edge_is_active(i, VALIDATION); if (active_edge){ assert(size == num_feature_bins()); size = 0; //to avoid warning if (I == (uint)-1 || J == (uint)-1){ new_validation_users++; continue; } double prediction; vertex_data ** node_array = new vertex_data*[calc_feature_node_array_size(I,J)]; for (int k=0; k< calc_feature_node_array_size(I,J); k++) node_array[k] = NULL; vec sum; compute_prediction(I, J, val, prediction, &valarray[0], prediction_func, &sum, node_array); delete [] node_array; dvalidation_rmse += pow(prediction - val, 2); if (calc_error) if ((prediction < cutoff && val > cutoff) || (prediction > cutoff && val < cutoff)) validation_error++; } } fclose(f); assert(Le > 0); dvalidation_rmse = sqrt(dvalidation_rmse / (double)Le); std::cout<<" Validation RMSE: " << std::setw(10) << dvalidation_rmse; if (!calc_error) std::cout << std::endl; else std::cout << " Validation error: " << std::setw(10) << validation_error/Le << std::endl; if (halt_on_rmse_increase && dvalidation_rmse > last_validation_rmse && gcontext.iteration > 0){ logstream(LOG_WARNING)<<"Stopping engine because of validation RMSE increase" << std::endl; gcontext.set_last_iteration(gcontext.iteration); } } /* compute predictions for test data */ void test_predictions_N( float (*prediction_func)(const vertex_data ** node_array, int node_array_size, float rating, double & predictioni, vec * sum), feature_control & fc, bool square = false) { FILE * f = NULL; uint Mt, Nt; size_t nz; if (test == ""){ logstream(LOG_INFO)<<"No test file was found, skipping test predictions " << std::endl; return; } if (!file_exists(test)) { if (test != (training + "t")) logstream(LOG_WARNING)<<" test predictions file was specified but not found: " << test << std::endl; return; } detect_matrix_size(test, f, Mt, Nt, nz); if (f == NULL){ logstream(LOG_WARNING)<<"Failed to open test file. Skipping " << std::endl; return; } if ((M > 0 && N > 0 ) && (Mt != M || Nt != N)) logstream(LOG_FATAL)<<"Input size of test matrix must be identical to training matrix, namely " << M << "x" << N << std::endl; FILE * fout = open_file((test + ".predict").c_str(),"w"); std::vector<float> valarray; valarray.resize(fc.total_features); float val; double prediction; uint I,J; uint i=0; char linebuf_debug[1024]; for (i=0; i<nz; i++) { if (!read_line(f, test, i, I, J, val, valarray, TEST, linebuf_debug)) logstream(LOG_FATAL)<<"Failed to read line: " <<i << " in file: " << test << std::endl; if (I == (uint)-1 || J == (uint)-1){ if (cold_start == NONE){ fprintf(fout, "N/A\n"); new_test_users++; } else if (cold_start ==2 || (cold_start == 1 && I ==(uint)-1 && J==(uint)-1)){ fprintf(fout, "%12.8g\n", inputGlobalMean); new_test_users++; } else if (cold_start == ITEM && I == (uint)-1 && J != (uint)-1) fprintf(fout, "%12.8g\n", latent_factors_inmem[fc.offsets[1]+J].avg_rating); else if (cold_start == ITEM && I != (uint)-1 && J == (uint)-1) fprintf(fout, "%12.8g\n", latent_factors_inmem[I].avg_rating); else if (cold_start == ITEM){ fprintf(fout, "%12.8g\n", inputGlobalMean); new_test_users++; } continue; } vertex_data ** node_array = new vertex_data*[calc_feature_node_array_size(I,J)]; vec sum; compute_prediction(I, J, val, prediction, &valarray[0], prediction_func, &sum, node_array); if (binary_prediction) prediction = (prediction > cutoff); fprintf(fout, "%12.8lg\n", prediction); delete[] node_array; } if (i != nz) logstream(LOG_FATAL)<<"Missing input lines in test file. Should be : " << nz << " found only " << i << std::endl; fclose(f); fclose(fout); logstream(LOG_INFO)<<"Finished writing " << nz << " predictions to file: " << test << ".predict" << std::endl; } /* This function implements equation (5) in the libFM paper: * http://www.csie.ntu.edu.tw/~b97053/paper/Factorization%20Machines%20with%20libFM.pdf * Note that in our implementation x_i are all 1 so the formula is slightly simpler */ float gensgd_predict(const vertex_data** node_array, int node_array_size, const float rating, double& prediction, vec* sum){ vec sum_sqr = zeros(D); *sum = zeros(D); prediction = globalMean; assert(!std::isnan(prediction)); for (int i=0; i< node_array_size; i++) prediction += node_array[i]->bias; assert(!std::isnan(prediction)); for (int j=0; j< D; j++){ for (int i=0; i< node_array_size; i++){ sum->operator[](j) += node_array[i]->pvec[j]; if (sum->operator[](j) >= 1e5) logstream(LOG_FATAL)<<"Got into numerical problems. Try to decrease step size" << std::endl; sum_sqr[j] += pow(node_array[i]->pvec[j],2); } prediction += 0.5 * (pow(sum->operator[](j),2) - sum_sqr[j]); assert(!std::isnan(prediction)); } //truncate prediction to allowed values prediction = std::min((double)prediction, maxval); prediction = std::max((double)prediction, minval); //return the squared error float err = rating - prediction; assert(!std::isnan(err)); return err*err; } float gensgd_predict(const vertex_data** node_array, int node_array_size, const float rating, double & prediction){ vec sum; return gensgd_predict(node_array, node_array_size, rating, prediction, &sum); } void init_gensgd(bool load_factors_from_file){ srand(time(NULL)); int nodes = M+N+num_feature_bins()+fc.last_item*M; latent_factors_inmem.resize(nodes); int howmany = calc_feature_num(); logstream(LOG_DEBUG)<<"Going to calculate: " << howmany << " offsets." << std::endl; fc.offsets.resize(howmany); get_offsets(fc.offsets); assert(D > 0); if (!load_factors_from_file){ double factor = 0.1/sqrt(D); #pragma omp parallel for for (int i=0; i< nodes; i++){ latent_factors_inmem[i].pvec = (debug ? 0.1*ones(D) : (::randu(D)*factor)); } } } void training_rmse_N(int iteration, graphchi_context &gcontext, bool items = false){ last_training_rmse = dtraining_rmse; dtraining_rmse = 0; size_t total_errors = 0; int start = 0; int end = M; if (items){ start = M; end = M+N; } dtraining_rmse = sum(rmse_vec); if (calc_error) total_errors = (size_t)sum(errors_vec); dtraining_rmse = sqrt(dtraining_rmse / pengine->num_edges()); if (calc_error) std::cout<< std::setw(10) << mytimer.current_time() << ") Iteration: " << std::setw(3) <<iteration<<" Training RMSE: " << std::setw(10)<< dtraining_rmse << " Train err: " << std::setw(10) << (total_errors/(double)L); else std::cout<< std::setw(10) << mytimer.current_time() << ") Iteration: " << std::setw(3) <<iteration<<" Training RMSE: " << std::setw(10)<< dtraining_rmse; } /** * GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type> * class. The main logic is usually in the update function. */ struct GensgdVerticesInMemProgram : public GraphChiProgram<VertexDataType, EdgeDataType> { /* * Vertex update function - computes the least square step */ void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) { if (fc.last_item && gcontext.iteration == 0){ if (is_user(vertex.id()) && vertex.num_outedges() > 0) { //user node. find the last rated item and store it. we assume items are sorted by time! vertex_data& user = latent_factors_inmem[vertex.id()]; int max_time = 0; for(int e=0; e < vertex.num_outedges(); e++) { const edge_data & edge = vertex.outedge(e)->get_data(); if (edge.features[0] >= max_time){ //first feature is time max_time = (int)ceil(edge.features[0]); user.last_item = vertex.outedge(e)->vertex_id() - M; } } } else if (is_user(vertex.id()) && vertex.num_outedges() == 0) vertex_with_no_edges++; return; } if (cold_start == ITEM && gcontext.iteration == 0){ vertex_data & item = latent_factors_inmem[vertex.id()]; item.avg_rating = 0; for(int e=0; e < vertex.num_edges(); e++) { item.avg_rating += vertex.edge(e)->get_data().weight; } item.avg_rating /= vertex.num_edges(); } //go over all user nodes if (is_user(vertex.id())){ //vertex_data& user = latent_factors_inmem[vertex.id()]; //assert(user.last_item >= 0 && user.last_item < (int)N); //go over all observed ratings for(int e=0; e < vertex.num_outedges(); e++) { int howmany = calc_feature_node_array_size(vertex.id(), vertex.outedge(e)->vertex_id()-M); vertex_data ** node_array = new vertex_data*[howmany]; for (int i=0; i< howmany; i++) node_array[i] = NULL; const edge_data & data = vertex.outedge(e)->get_data(); float rui = data.weight; double pui; vec sum; //compute current prediction rmse_vec[omp_get_thread_num()] += compute_prediction(vertex.id(), vertex.outedge(e)->vertex_id()-M, rui ,pui, (float*)data.features, gensgd_predict, &sum, node_array); if (calc_error) if ((pui < cutoff && rui > cutoff) || (pui > cutoff && rui < cutoff)) errors_vec[omp_get_thread_num()]++; float eui = pui - rui; //update global mean bias globalMean -= gensgd_rate1 * (eui + gensgd_reg0 * globalMean); //update node biases and vectors for (int i=0; i < calc_feature_node_array_size(vertex.id(), vertex.outedge(e)->vertex_id()-M); i++){ double gensgd_rate; if (i == 0) //user gensgd_rate = gensgd_rate1; else if (i == 1) //item gensgd_rate = gensgd_rate2; else if (i < 2+fc.total_features) //rating features gensgd_rate = gensgd_rate3; else if (i < 2+fc.total_features+fc.node_features) //user and item features gensgd_rate = gensgd_rate4; else gensgd_rate = gensgd_rate5; //last item node_array[i]->bias -= gensgd_rate * (eui + gensgd_regw* node_array[i]->bias); assert(!std::isnan(node_array[i]->bias)); assert(node_array[i]->bias < 1e5); vec grad = sum - node_array[i]->pvec; node_array[i]->pvec -= gensgd_rate * (eui*grad + gensgd_regv * node_array[i]->pvec); assert(!std::isnan(node_array[i]->pvec[0])); assert(node_array[i]->pvec[0] < 1e5); } delete[] node_array; } } }; /** * Called after an iteration has finished. */ void after_iteration(int iteration, graphchi_context &gcontext) { if (iteration == 1 && vertex_with_no_edges > 0) logstream(LOG_WARNING)<<"There are " << vertex_with_no_edges << " users without ratings" << std::endl; gensgd_rate1 *= gensgd_mult_dec; gensgd_rate2 *= gensgd_mult_dec; gensgd_rate3 *= gensgd_mult_dec; gensgd_rate4 *= gensgd_mult_dec; gensgd_rate5 *= gensgd_mult_dec; training_rmse_N(iteration, gcontext); validation_rmse_N(&gensgd_predict, gcontext, fc); }; /** * Called before an iteration is started. */ void before_iteration(int iteration, graphchi_context &gcontext) { rmse_vec = zeros(gcontext.execthreads); if (calc_error) errors_vec = zeros(gcontext.execthreads); } }; void output_gensgd_result(std::string filename) { MMOutputter_mat<vertex_data> mmoutput(filename + "_U.mm", 0, latent_factors_inmem.size(), "This file contains Gensgd output matrices. In each row D factors of a single user node, then item nodes, then features", latent_factors_inmem); MMOutputter_vec<vertex_data> mmoutput_bias(filename + "_U_bias.mm", 0, latent_factors_inmem.size(), BIAS_POS, "This file contains Gensgd output bias vector. In each row a single user bias.", latent_factors_inmem); MMOutputter_scalar gmean(filename + "_global_mean.mm", "This file contains Gensgd global mean which is required for computing predictions.", globalMean); //output mapping between string to array index of features. if (fc.hash_strings){ assert(2+fc.total_features+fc.node_features == (int)fc.node_id_maps.size()); for (int i=0; i < 2+fc.total_features+fc.node_features; i++){ char buf[256]; sprintf(buf, "%s.map.%d", filename.c_str(), i); save_map_to_text_file(fc.node_id_maps[i].string2nodeid, buf, fc.offsets[i]); } } logstream(LOG_INFO) << " GENSGD output files (in matrix market format): " << filename << "_U.mm" << ", "<< filename << "_global_mean.mm, " << filename << "_U_bias.mm " <<std::endl; } int main(int argc, const char ** argv) { print_copyright(); /* GraphChi initialization will read the command line arguments and the configuration file. */ graphchi_init(argc, argv); /* Metrics object for keeping track of performance counters and other information. Currently required. */ metrics m("als-tensor-inmemory-factors"); //specific command line parameters for gensgd gensgd_rate1 = get_option_float("gensgd_rate1", gensgd_rate1); gensgd_rate2 = get_option_float("gensgd_rate2", gensgd_rate2); gensgd_rate3 = get_option_float("gensgd_rate3", gensgd_rate3); gensgd_rate4 = get_option_float("gensgd_rate4", gensgd_rate4); gensgd_rate5 = get_option_float("gensgd_rate5", gensgd_rate5); gensgd_regw = get_option_float("gensgd_regw", gensgd_regw); gensgd_regv = get_option_float("gensgd_regv", gensgd_regv); gensgd_reg0 = get_option_float("gensgd_reg0", gensgd_reg0); gensgd_mult_dec = get_option_float("gensgd_mult_dec", gensgd_mult_dec); fc.last_item = get_option_int("last_item", fc.last_item); fc.hash_strings = get_option_int("rehash", fc.hash_strings); user_file = get_option_string("user_file", user_file); user_links = get_option_string("user_links", user_links); item_file = get_option_string("item_file", item_file); file_columns = get_option_int("file_columns"); //get the number of columns in the edge file if (file_columns < 3) logstream(LOG_FATAL)<<"You must have at least 3 columns in input file: [from] [to] [value] on each line"<<std::endl; if (file_columns >= FEATURE_WIDTH) logstream(LOG_FATAL)<<"file_columns exceeds the allowed storage limit - please increase FEATURE_WIDTH and recompile." << std::endl; D = get_option_int("D", D); if (D <=2 || D>= 300) logstream(LOG_FATAL)<<"Allowed range for latent factor vector D is [2,300]." << std::endl; fc.from_pos = get_option_int("from_pos", fc.from_pos); fc.to_pos = get_option_int("to_pos", fc.to_pos); fc.val_pos = get_option_int("val_pos", fc.val_pos); if (fc.from_pos >= file_columns || fc.to_pos >= file_columns || fc.val_pos >= file_columns) logstream(LOG_FATAL)<<"Please note that column numbering of from_pos, to_pos and val_pos starts from zero and should be smaller than file_columns" << std::endl; if (fc.from_pos == fc.to_pos || fc.from_pos == fc.val_pos || fc.to_pos == fc.val_pos) logstream(LOG_FATAL)<<"from_pos, to_pos and val_pos should have uniqu values" << std::endl; if (fc.val_pos == -1) logstream(LOG_FATAL)<<"you must specify a target column using --val_pos=XXX. Colmn index starts from 0." << std::endl; limit_rating = get_option_int("limit_rating", limit_rating); calc_error = get_option_int("calc_error", calc_error); has_header_titles = get_option_int("has_header_titles", has_header_titles); has_user_titles = get_option_int("has_user_titles", has_user_titles); has_item_titles = get_option_int("has_item_titles", has_item_titles); fc.rehash_value = get_option_int("rehash_value", fc.rehash_value); cutoff = get_option_float("cutoff", cutoff); json_input = get_option_int("json_input", json_input); cold_start = get_option_int("cold_start", cold_start); binary_prediction = get_option_int("binary_prediction", 0); parse_command_line_args(); parse_implicit_command_line(); std::string string_features = get_option_string("features", fc.default_feature_str); if (string_features != ""){ char * pfeatures = strdup(string_features.c_str()); char * pch = strtok(pfeatures, ",\n\r\t "); int node = atoi(pch); if (node < 0 || node >= MAX_FEATURES+3) logstream(LOG_FATAL)<<"Feature id using the --features=XX command should be non negative, starting from zero"<<std::endl; if (node >= file_columns) logstream(LOG_FATAL)<<"Feature id using the --feature=XX command should be < file_columns (counting starts from zero)" << std::endl; fc.feature_selection[node] = true; fc.total_features++; while ((pch = strtok(NULL, ",\n\r\t "))!= NULL){ node = atoi(pch); if (node < 0 || node >= MAX_FEATURES+3) logstream(LOG_FATAL)<<"Feature id using the --features=XX command should be non negative, starting from zero"<<std::endl; fc.feature_selection[node] = true; fc.total_features++; } } fc.node_id_maps.resize(2+fc.total_features); fc.stats_array.resize(fc.total_features); int nshards = convert_matrixmarket_N<edge_data>(training, false, fc, limit_rating); init_gensgd(load_factors_from_file); if (user_file != "") read_node_features(user_file, false, fc, true, false); if (item_file != "") read_node_features(item_file, false, fc, false, false); if (user_links != "") read_node_links(user_links, false, fc, true, false); if (json_input) has_header_titles = 1; if (has_header_titles && header_titles.size() == 0) logstream(LOG_FATAL)<<"Please delete temp files (using : \"rm -f " << training << ".*\") and run again" << std::endl; logstream(LOG_INFO) <<"Total selected features: " << fc.total_features << " : " << std::endl; for (int i=0; i < MAX_FEATURES+3; i++) if (fc.feature_selection[i]) logstream(LOG_INFO)<<"Selected feature: " << std::setw(3) << i << " : " << (has_header_titles? header_titles[i] : "") <<std::endl; logstream(LOG_INFO)<<"Target variable " << std::setw(3) << fc.val_pos << " : " << (has_header_titles? header_titles[fc.val_pos] : "") <<std::endl; logstream(LOG_INFO)<<"From " << std::setw(3) << fc.from_pos<< " : " << (has_header_titles? header_titles[fc.from_pos] : "") <<std::endl; logstream(LOG_INFO)<<"To " << std::setw(3) << fc.to_pos << " : " << (has_header_titles? header_titles[fc.to_pos] : "") <<std::endl; if (fc.node_features){ int last_offset = fc.node_id_maps.size(); int toadd = 0; for (int i = last_offset - fc.node_features; i < last_offset; i++){ toadd += fc.node_id_maps[i].string2nodeid.size(); } logstream(LOG_DEBUG)<<"Going to add " << toadd << std::endl; vertex_data data; for (int i=0; i < toadd; i++){ data.pvec = zeros(D); for (int j=0; j < D; j++) data.pvec[j] = drand48(); latent_factors_inmem.push_back(data); } fc.offsets.resize(calc_feature_num()); get_offsets(fc.offsets); } if (load_factors_from_file){ load_matrix_market_matrix(training + "_U.mm", 0, D); vec user_bias = load_matrix_market_vector(training +"_U_bias.mm", false, true); assert(user_bias.size() == num_feature_bins()); for (uint i=0; num_feature_bins(); i++){ latent_factors_inmem[i].bias = user_bias[i]; } vec gm = load_matrix_market_vector(training + "_global_mean.mm", false, true); globalMean = gm[0]; } /* Run */ GensgdVerticesInMemProgram program; graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m); set_engine_flags(engine); pengine = &engine; engine.run(program, niters); /* Output test predictions in matrix-market format */ output_gensgd_result(training); test_predictions_N(&gensgd_predict, fc); if (new_validation_users > 0) logstream(LOG_WARNING)<<"Found " << new_validation_users<< " new users with no information about them in training dataset!" << std::endl; if (new_test_users > 0) std::cout<<"Found " << new_test_users<< " new test users with no information about them in training dataset!" << std::endl; /* Report execution metrics */ if (!quiet) metrics_report(m); return 0; }
09jijiangwen-download
toolkits/collaborative_filtering/gensgd.cpp
C++
asf20
48,629
/* * @file * @author Danny Bickson * @version 1.0 * * @section LICENSE * * Copyright [2012] [Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * Matrix factorizatino with the Alternative Least Squares (ALS) algorithm * using sparse factors. Sparsity is obtained using the CoSaMP algorithm. * * */ #include "cosamp.hpp" #include "eigen_wrapper.hpp" #include "common.hpp" double lambda = 0.065; struct vertex_data { vec pvec; vertex_data() { pvec = zeros(D); } void set_val(int index, float val){ pvec[index] = val; } float get_val(int index){ return pvec[index]; } }; /** * Type definitions. Remember to create suitable graph shards using the * Sharder-program. */ typedef vertex_data VertexDataType; typedef float EdgeDataType; // Edges store the "rating" of user->movie pair graphchi_engine<VertexDataType, EdgeDataType> * pengine = NULL; graphchi_engine<VertexDataType, EdgeDataType> * pvalidation_engine = NULL; std::vector<vertex_data> latent_factors_inmem; #include "io.hpp" //algorithm run mode enum { SPARSE_USR_FACTOR = 1, SPARSE_ITM_FACTOR = 2, SPARSE_BOTH_FACTORS = 3 }; int algorithm; double user_sparsity; double movie_sparsity; #include "rmse.hpp" #include "rmse_engine.hpp" /** compute a missing value based on ALS algorithm */ float sparse_als_predict(const vertex_data& user, const vertex_data& movie, const float rating, double & prediction, void * extra = NULL){ prediction = user.pvec.dot(movie.pvec); //truncate prediction to allowed values prediction = std::min((double)prediction, maxval); prediction = std::max((double)prediction, minval); //return the squared error float err = rating - prediction; assert(!std::isnan(err)); return err*err; } /** * GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type> * class. The main logic is usually in the update function. */ struct ALSVerticesInMemProgram : public GraphChiProgram<VertexDataType, EdgeDataType> { /** * Vertex update function - computes the least square step */ void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) { vertex_data & vdata = latent_factors_inmem[vertex.id()]; mat XtX = mat::Zero(D, D); vec Xty = vec::Zero(D); bool compute_rmse = (vertex.num_outedges() > 0); // Compute XtX and Xty (NOTE: unweighted) for(int e=0; e < vertex.num_edges(); e++) { float observation = vertex.edge(e)->get_data(); vertex_data & nbr_latent = latent_factors_inmem[vertex.edge(e)->vertex_id()]; Xty += nbr_latent.pvec * observation; XtX += nbr_latent.pvec * nbr_latent.pvec.transpose(); if (compute_rmse) { double prediction; rmse_vec[omp_get_thread_num()] += sparse_als_predict(vdata, nbr_latent, observation, prediction); } } double regularization = lambda; if (regnormal) lambda *= vertex.num_edges(); for(int i=0; i < D; i++) XtX(i,i) += regularization; bool isuser = vertex.id() < (uint)M; if (algorithm == SPARSE_BOTH_FACTORS || (algorithm == SPARSE_USR_FACTOR && isuser) || (algorithm == SPARSE_ITM_FACTOR && !isuser)){ double sparsity_level = 1.0; if (isuser) sparsity_level -= user_sparsity; else sparsity_level -= movie_sparsity; vdata.pvec = CoSaMP(XtX, Xty, (int)ceil(sparsity_level*(double)D), 10, 1e-4, D); } else vdata.pvec = XtX.selfadjointView<Eigen::Upper>().ldlt().solve(Xty); } /** * Called before an iteration is started. */ void before_iteration(int iteration, graphchi_context &gcontext) { reset_rmse(gcontext.execthreads); } /** * Called after an iteration has finished. */ void after_iteration(int iteration, graphchi_context &gcontext) { training_rmse(iteration, gcontext); run_validation(pvalidation_engine, gcontext); } }; void output_als_result(std::string filename) { MMOutputter_mat<vertex_data> user_mat(filename + "_U.mm", 0, M, "This file contains ALS output matrix U. In each row D factors of a single user node.", latent_factors_inmem); MMOutputter_mat<vertex_data> item_mat(filename + "_V.mm", M, M+N, "This file contains ALS output matrix V. In each row D factors of a single item node.", latent_factors_inmem); logstream(LOG_INFO) << "ALS output files (in matrix market format): " << filename << "_U.mm" << ", " << filename + "_V.mm " << std::endl; } int main(int argc, const char ** argv) { print_copyright(); /* GraphChi initialization will read the command line arguments and the configuration file. */ graphchi_init(argc, argv); /* Metrics object for keeping track of performance counters and other information. Currently required. */ metrics m("als-inmemory-factors"); lambda = get_option_float("lambda", 0.065); user_sparsity = get_option_float("user_sparsity", 0.9); movie_sparsity = get_option_float("movie_sparsity", 0.9); algorithm = get_option_int("algorithm", SPARSE_USR_FACTOR); parse_command_line_args(); parse_implicit_command_line(); if (user_sparsity < 0.5 || user_sparsity >= 1) logstream(LOG_FATAL)<<"Sparsity level should be [0.5,1). Please run again using --user_sparsity=XX in this range" << std::endl; if (movie_sparsity < 0.5 || movie_sparsity >= 1) logstream(LOG_FATAL)<<"Sparsity level should be [0.5,1). Please run again using --movie_sparsity=XX in this range" << std::endl; if (algorithm != SPARSE_USR_FACTOR && algorithm != SPARSE_BOTH_FACTORS && algorithm != SPARSE_ITM_FACTOR) logstream(LOG_FATAL)<<"Algorithm should be 1 for SPARSE_USR_FACTOR, 2 for SPARSE_ITM_FACTOR and 3 for SPARSE_BOTH_FACTORS" << std::endl; /* Preprocess data if needed, or discover preprocess files */ int nshards = convert_matrixmarket<EdgeDataType>(training); init_feature_vectors<std::vector<vertex_data> >(M+N, latent_factors_inmem, !load_factors_from_file); if (validation != ""){ int vshards = convert_matrixmarket<EdgeDataType>(validation, NULL, 0, 0, 3, VALIDATION); init_validation_rmse_engine<VertexDataType, EdgeDataType>(pvalidation_engine, vshards, &sparse_als_predict); } if (load_factors_from_file){ load_matrix_market_matrix(training + "_U.mm", 0, D); load_matrix_market_matrix(training + "_V.mm", M, D); } /* Run */ ALSVerticesInMemProgram program; graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m); set_engine_flags(engine); pengine = &engine; engine.run(program, niters); /* Output latent factor matrices in matrix-market format */ output_als_result(training); test_predictions(&sparse_als_predict); /* Report execution metrics */ if (!quiet) metrics_report(m); return 0; }
09jijiangwen-download
toolkits/collaborative_filtering/sparse_als.cpp
C++
asf20
7,440
#!/bin/bash export GRAPHCHI_ROOT=$PWD/../../ stdoutfname=$PWD/stdout.log rm -f $stdoutfname echo | tee -a $stdoutfname echo "Running application tests"| tee -a $stdoutfname echo "========================="| tee -a $stdoutfname echo "GraphChi collaborative filtering library"| tee -a $stdoutfname somefailed=0 echo "---------ALS-------------" | tee -a $stdoutfname ./als --unittest=1 --quiet=1 >> $stdoutfname 2>& 1 if [ $? -eq 0 ]; then echo "PASS TEST 1 (Alternating least squares)"| tee -a $stdoutfname else somefailed=1 echo "FAIL ./aks --unittest=1 (Alternating least squares)"| tee -a $stdoutfname fi echo "---------WALS-------------" | tee -a $stdoutfname ./wals --unittest=1 --quiet=1 >> $stdoutfname 2>& 1 if [ $? -eq 0 ]; then echo "PASS TEST 2 (Weighted alternating least squares)"| tee -a $stdoutfname else somefailed=1 echo "FAIL TEST 2 (Weighted Alternating least squares)"| tee -a $stdoutfname fi if [ $somefailed == 1 ]; then echo "**** FAILURE LOG **************" >> $stdoutfname echo `date` >> $stdoutfname echo `uname -a` >> $stdoutfname echo `echo $USER` >> $stdoutfname echo "Some of the tests failed". echo "Please email stdout.log to danny.bickson@gmail.com" echo "Thanks for helping improve GraphChi!" fi
09jijiangwen-download
toolkits/collaborative_filtering/runtests.sh
Shell
asf20
1,264
#ifndef __GRAPHCHI_MRR_ENGINE #define __GRAPHCHI_MRR_ENGINE /** * @file * @author Mark Levy * @version 1.0 * * @section LICENSE * * Copyright [2012] [Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * File for aggregating and displaying error mesasures and algorithm progress */ #include <set> #include <sstream> #include "climf.hpp" vec mrr_vec; // cumulative sum of MRR per thread vec users_vec; // user count per thread int num_threads = 1; int cur_iteration = 0; /** * GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type> * class. The main logic is usually in the update function. */ struct ValidationMRRProgram : public GraphChiProgram<VertexDataType, EdgeDataType> { /** * compute MRR for a single user */ void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) { if (vertex.id() < M) { // we're at a user node const vec & U = latent_factors_inmem[vertex.id()].pvec; std::set<int> known_likes; { for(int j = 0; j < vertex.num_edges(); j++) { if (is_relevant(vertex.edge(j))) { known_likes.insert(vertex.edge(j)->vertex_id() - M); } } } if (!known_likes.empty()) { // make predictions ivec indices = ivec::Zero(N); vec distances = zeros(N); for (uint i = M; i < M+N; i++) { const vec & V = latent_factors_inmem[i].pvec; indices[i-M] = i-M; distances[i-M] = dot(U,V); } int num_predictions = std::min(num_ratings, static_cast<int>(N)); vec sorted_distances(num_predictions); ivec sorted_indices = reverse_sort_index2(distances, indices, sorted_distances, num_predictions); // compute actual MRR double MRR = 0; for (uint i = 0; i < sorted_indices.size(); ++i) { if (known_likes.find(sorted_indices[i]) != known_likes.end()) { MRR = 1.0/(i+1); break; } } assert(mrr_vec.size() > omp_get_thread_num()); mrr_vec[omp_get_thread_num()] += MRR; assert(users_vec.size() > omp_get_thread_num()); users_vec[omp_get_thread_num()]++; } } } void before_iteration(int iteration, graphchi_context & gcontext) { users_vec = zeros(num_threads); mrr_vec = zeros(num_threads); } /** * Called after an iteration has finished. */ void after_iteration(int iteration, graphchi_context &gcontext) { double mrr = sum(mrr_vec) / sum(users_vec); std::cout<<" Validation MRR:" << std::setw(10) << mrr << std::endl; } }; void reset_mrr(int exec_threads) { logstream(LOG_DEBUG)<<"Detected number of threads: " << exec_threads << std::endl; num_threads = exec_threads; mrr_vec = zeros(num_threads); } template<typename VertexDataType, typename EdgeDataType> void init_mrr_engine(graphchi_engine<VertexDataType,EdgeDataType> *& pvalidation_engine, int nshards) { if (nshards == -1) return; metrics * m = new metrics("validation_mrr_engine"); graphchi_engine<VertexDataType, EdgeDataType> * engine = new graphchi_engine<VertexDataType, EdgeDataType>(validation, nshards, false, *m); set_engine_flags(*engine); pvalidation_engine = engine; } template<typename VertexDataType, typename EdgeDataType> void run_validation(graphchi_engine<VertexDataType, EdgeDataType> * pvalidation_engine, graphchi_context & context) { //no validation data, no need to run validation engine calculations cur_iteration = context.iteration; if (pvalidation_engine == NULL) return; ValidationMRRProgram program; pvalidation_engine->run(program, 1); } #endif //__GRAPHCHI_MRR_ENGINE
09jijiangwen-download
toolkits/collaborative_filtering/mrr_engine.hpp
C++
asf20
4,341
/** * Copyright (c) 2009 Carnegie Mellon University. * All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an "AS * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language * governing permissions and limitations under the License. * * For more about this software visit: * * http://graphchi.org * * Written by Danny Bickson * */ #include "common.hpp" #include "types.hpp" #include "eigen_wrapper.hpp" #include "timer.hpp" using namespace std; int nshards; int input_cols = 3; /* Metrics object for keeping track of performance counters and other information. Currently required. */ metrics m("svd-inmemory-factors"); struct vertex_data { vec pvec; double value; double A_ii; vertex_data(){ value = 0; A_ii = 1; } //TODO void add_self_edge(double value) { A_ii = value; } void set_val(double value, int field_type) { pvec[field_type] = value; } //double get_output(int field_type){ return pred_x; } }; // end of vertex_data struct edge_data { float weight; edge_data(double weight = 0) : weight(weight) { } edge_data(double weight, double ignored) : weight(weight) { } //void set_field(int pos, double val){ weight = val; } //double get_field(int pos){ return weight; } }; /** * Type definitions. Remember to create suitable graph shards using the * Sharder-program. */ typedef vertex_data VertexDataType; typedef edge_data EdgeDataType; // Edges store the "rating" of user->movie pair graphchi_engine<VertexDataType, EdgeDataType> * pengine = NULL; std::vector<vertex_data> latent_factors_inmem; #include "io.hpp" /** * * Implementation of the Lanczos algorithm, as given in: * http://en.wikipedia.org/wiki/Lanczos_algorithm * * Code written by Danny Bickson, CMU, June 2011 * */ //LANCZOS VARIABLES int max_iter = 10; bool no_edge_data = false; int actual_vector_len; int nv = 0; int nsv = 0; double tol = 1e-8; bool finished = false; int ortho_repeats = 3; bool save_vectors = false; std::string format = "matrixmarket"; int nodes = 0; int data_size = max_iter; #include "math.hpp" #include "printouts.hpp" void init_lanczos(bipartite_graph_descriptor & info){ srand48(time(NULL)); latent_factors_inmem.resize(info.total()); data_size = nsv + nv+1 + max_iter; if (info.is_square()) data_size *= 2; actual_vector_len = data_size; #pragma omp parallel for for (int i=0; i< info.total(); i++){ latent_factors_inmem[i].pvec = zeros(actual_vector_len); } logstream(LOG_INFO)<<"Allocated a total of: " << ((double)actual_vector_len * info.total() * sizeof(double)/ 1e6) << " MB for storing vectors." << std::endl; } vec lanczos( bipartite_graph_descriptor & info, timer & mytimer, vec & errest, const std::string & vecfile){ int nconv = 0; int its = 1; DistMat A(info); DistSlicedMat U(info.is_square() ? data_size : 0, info.is_square() ? 2*data_size : data_size, true, info, "U"); DistSlicedMat V(0, data_size, false, info, "V"); vec alpha, beta, b; vec sigma = zeros(data_size); errest = zeros(nv); DistVec v_0(info, 0, false, "v_0"); if (vecfile.size() == 0) v_0 = randu(size(A,2)); PRINT_VEC2("svd->V", v_0); DistDouble vnorm = norm(v_0); v_0=v_0/vnorm; PRINT_INT(nv); while(nconv < nsv && its < max_iter){ std::cout<<"Starting iteration: " << its << " at time: " << mytimer.current_time() << std::endl; int k = nconv; int n = nv; PRINT_INT(k); PRINT_INT(n); alpha = zeros(n); beta = zeros(n); U[k] = V[k]*A._transpose(); orthogonalize_vs_all(U, k, alpha(0)); //alpha(0)=norm(U[k]).toDouble(); PRINT_VEC3("alpha", alpha, 0); //U[k] = U[k]/alpha(0); for (int i=k+1; i<n; i++){ std::cout <<"Starting step: " << i << " at time: " << mytimer.current_time() << std::endl; PRINT_INT(i); V[i]=U[i-1]*A; orthogonalize_vs_all(V, i, beta(i-k-1)); //beta(i-k-1)=norm(V[i]).toDouble(); //V[i] = V[i]/beta(i-k-1); PRINT_VEC3("beta", beta, i-k-1); U[i] = V[i]*A._transpose(); orthogonalize_vs_all(U, i, alpha(i-k)); //alpha(i-k)=norm(U[i]).toDouble(); //U[i] = U[i]/alpha(i-k); PRINT_VEC3("alpha", alpha, i-k); } V[n]= U[n-1]*A; orthogonalize_vs_all(V, n, beta(n-k-1)); //beta(n-k-1)=norm(V[n]).toDouble(); PRINT_VEC3("beta", beta, n-k-1); //compute svd of bidiagonal matrix PRINT_INT(nv); PRINT_NAMED_INT("svd->nconv", nconv); n = nv - nconv; PRINT_INT(n); alpha.conservativeResize(n); beta.conservativeResize(n); PRINT_MAT2("Q",eye(n)); PRINT_MAT2("PT",eye(n)); PRINT_VEC2("alpha",alpha); PRINT_VEC2("beta",beta); mat T=diag(alpha); for (int i=0; i<n-1; i++) set_val(T, i, i+1, beta(i)); PRINT_MAT2("T", T); mat a,PT; svd(T, a, PT, b); PRINT_MAT2("Q", a); alpha=b.transpose(); PRINT_MAT2("alpha", alpha); for (int t=0; t< n-1; t++) beta(t) = 0; PRINT_VEC2("beta",beta); PRINT_MAT2("PT", PT.transpose()); //estiamte the error int kk = 0; for (int i=nconv; i < nv; i++){ int j = i-nconv; PRINT_INT(j); sigma(i) = alpha(j); PRINT_NAMED_DBL("svd->sigma[i]", sigma(i)); PRINT_NAMED_DBL("Q[j*n+n-1]",a(n-1,j)); PRINT_NAMED_DBL("beta[n-1]",beta(n-1)); errest(i) = abs(a(n-1,j)*beta(n-1)); PRINT_NAMED_DBL("svd->errest[i]", errest(i)); if (alpha(j) > tol){ errest(i) = errest(i) / alpha(j); PRINT_NAMED_DBL("svd->errest[i]", errest(i)); } if (errest(i) < tol){ kk = kk+1; PRINT_NAMED_INT("k",kk); } if (nconv +kk >= nsv){ printf("set status to tol\n"); finished = true; } }//end for PRINT_NAMED_INT("k",kk); vec v; if (!finished){ vec swork=get_col(PT,kk); PRINT_MAT2("swork", swork); v = zeros(size(A,1)); for (int ttt=nconv; ttt < nconv+n; ttt++){ v = v+swork(ttt-nconv)*(V[ttt].to_vec()); } PRINT_VEC2("svd->V",V[nconv]); PRINT_VEC2("v[0]",v); } //compute the ritz eigenvectors of the converged singular triplets if (kk > 0){ PRINT_VEC2("svd->V", V[nconv]); mat tmp= V.get_cols(nconv,nconv+n)*PT; V.set_cols(nconv, nconv+kk, get_cols(tmp, 0, kk)); PRINT_VEC2("svd->V", V[nconv]); PRINT_VEC2("svd->U", U[nconv]); tmp= U.get_cols(nconv, nconv+n)*a; U.set_cols(nconv, nconv+kk,get_cols(tmp,0,kk)); PRINT_VEC2("svd->U", U[nconv]); } nconv=nconv+kk; if (finished) break; V[nconv]=v; PRINT_VEC2("svd->V", V[nconv]); PRINT_NAMED_INT("svd->nconv", nconv); its++; PRINT_NAMED_INT("svd->its", its); PRINT_NAMED_INT("svd->nconv", nconv); //nv = min(nconv+mpd, N); //if (nsv < 10) // nv = 10; PRINT_NAMED_INT("nv",nv); } // end(while) printf(" Number of computed signular values %d",nconv); printf("\n"); DistVec normret(info, nconv, false, "normret"); DistVec normret_tranpose(info, nconv, true, "normret_tranpose"); for (int i=0; i < nconv; i++){ normret = V[i]*A._transpose() -U[i]*sigma(i); double n1 = norm(normret).toDouble(); PRINT_DBL(n1); normret_tranpose = U[i]*A -V[i]*sigma(i); double n2 = norm(normret_tranpose).toDouble(); PRINT_DBL(n2); double err=sqrt(n1*n1+n2*n2); PRINT_DBL(err); PRINT_DBL(tol); if (sigma(i)>tol){ err = err/sigma(i); } PRINT_DBL(err); PRINT_DBL(sigma(i)); printf("Singular value %d \t%13.6g\tError estimate: %13.6g\n", i, sigma(i),err); } if (save_vectors){ std::cout<<"Going to save output vectors U and V" << std::endl; if (nconv == 0) logstream(LOG_FATAL)<<"No converged vectors. Aborting the save operation" << std::endl; char output_filename[256]; for (int i=0; i< nconv; i++){ sprintf(output_filename, "%s.U.%d", training.c_str(), i); write_output_vector(output_filename, U[i].to_vec(), false, "GraphLab v2 SVD output. This file contains eigenvector number i of the matrix U"); sprintf(output_filename, "%s.V.%d", training.c_str(), i); write_output_vector(output_filename, V[i].to_vec(), false, "GraphLab v2 SVD output. This file contains eigenvector number i of the matrix V'"); } } return sigma; } int main(int argc, const char *argv[]) { print_copyright(); //* GraphChi initialization will read the command line arguments and the configuration file. */ graphchi_init(argc, argv); std::string vecfile; vecfile = get_option_string("initial_vector", ""); debug = get_option_int("debug", 0); ortho_repeats = get_option_int("ortho_repeats", 3); nv = get_option_int("nv", 1); nsv = get_option_int("nsv", 1); tol = get_option_float("tol", 1e-5); save_vectors = get_option_int("save_vectors", 1); input_cols = get_option_int("input_cols", 3); max_iter = get_option_int("max_iter", max_iter); parse_command_line_args(); parse_implicit_command_line(); if (nv < nsv){ logstream(LOG_FATAL)<<"Please set the number of vectors --nv=XX, to be at least the number of support vectors --nsv=XX or larger" << std::endl; } //unit testing if (unittest == 1){ training = "gklanczos_testA"; vecfile = "gklanczos_testA_v0"; nsv = 3; nv = 3; debug = true; //TODO core.set_ncpus(1); } else if (unittest == 2){ training = "gklanczos_testB"; vecfile = "gklanczos_testB_v0"; nsv = 10; nv = 10; debug = true; max_iter = 100; //TODO core.set_ncpus(1); } else if (unittest == 3){ training = "gklanczos_testC"; vecfile = "gklanczos_testC_v0"; nsv = 4; nv = 10; debug = true; max_iter = 100; //TODO core.set_ncpus(1); } std::cout << "Load matrix " << training << std::endl; /* Preprocess data if needed, or discover preprocess files */ if (input_cols == 3) nshards = convert_matrixmarket<edge_data>(training); else if (input_cols == 4) nshards = convert_matrixmarket4<edge_data>(training); else logstream(LOG_FATAL)<<"--input_cols=XX should be either 3 or 4 input columns" << std::endl; info.rows = M; info.cols = N; info.nonzeros = L; assert(info.rows > 0 && info.cols > 0 && info.nonzeros > 0); timer mytimer; mytimer.start(); init_lanczos(info); init_math(info, ortho_repeats); //read initial vector from file (optional) if (vecfile.size() > 0){ std::cout << "Load inital vector from file" << vecfile << std::endl; load_matrix_market_vector(vecfile, info, 0, true, false); } //or start with a random initial vector else { #pragma omp parallel for for (int i=0; i< (int)M; i++) latent_factors_inmem[i].pvec[0] = drand48(); } graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m); set_engine_flags(engine); pengine = &engine; vec errest; vec singular_values = lanczos(info, mytimer, errest, vecfile); std::cout << "Lanczos finished " << mytimer.current_time() << std::endl; write_output_vector(training + ".singular_values", singular_values,false, "%GraphLab SVD Solver library. This file contains the singular values."); if (unittest == 1){ assert(errest.size() == 3); for (int i=0; i< errest.size(); i++) assert(errest[i] < 1e-30); } else if (unittest == 2){ assert(errest.size() == 10); for (int i=0; i< errest.size(); i++) assert(errest[i] < 1e-15); } /* Report execution metrics */ if (!quiet) metrics_report(m); return 0; }
09jijiangwen-download
toolkits/collaborative_filtering/svd.cpp
C++
asf20
11,860
#ifndef __GRAPHCHI_RMSE_ENGINE4 #define __GRAPHCHI_RMSE_ENGINE4 /** * @file * @author Danny Bickson * @version 1.0 * * @section LICENSE * * Copyright [2012] [Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * File for aggregating and siplaying error mesasures and algorithm progress */ float (*pprediction_func)(const vertex_data&, const vertex_data&, const float, double &, void *) = NULL; vec validation_rmse_vec; bool user_nodes = true; int counter = 0; bool time_weighting = false; bool time_nodes = false; int matlab_time_offset = 0; int num_threads = 1; bool converged_engine = false; int cur_iteration = 0; /** * GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type> * class. The main logic is usually in the update function. */ struct ValidationRMSEProgram4 : public GraphChiProgram<VertexDataType, EdgeDataType> { /** * compute validaton RMSE for a single user */ void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) { if (user_nodes && vertex.id() >= M) return; else if (!user_nodes && vertex.id() < M) return; vertex_data & vdata = latent_factors_inmem[vertex.id()]; for(int e=0; e < vertex.num_outedges(); e++) { double observation = vertex.edge(e)->get_data().weight; uint time = (uint)vertex.edge(e)->get_data().time - matlab_time_offset; vertex_data * time_node = NULL; if (time_nodes){ assert(time >= 0 && time < M+N+K); time_node = &latent_factors_inmem[time]; } vertex_data & nbr_latent = latent_factors_inmem[vertex.edge(e)->vertex_id()]; double prediction; double rmse = (*pprediction_func)(vdata, nbr_latent, observation, prediction, (void*)time_node); assert(rmse <= pow(maxval - minval, 2)); if (time_weighting) rmse *= vertex.edge(e)->get_data().time; assert(validation_rmse_vec.size() > omp_get_thread_num()); validation_rmse_vec[omp_get_thread_num()] += rmse; } } void before_iteration(int iteration, graphchi_context & gcontext){ last_validation_rmse = dvalidation_rmse; validation_rmse_vec = zeros(num_threads); } /** * Called after an iteration has finished. */ void after_iteration(int iteration, graphchi_context &gcontext) { assert(Le > 0); dvalidation_rmse = finalize_rmse(sum(validation_rmse_vec) , (double)Le); std::cout<<" Validation " << error_names[loss_type] << ":" << std::setw(10) << dvalidation_rmse << std::endl; if (halt_on_rmse_increase > 0 && halt_on_rmse_increase < cur_iteration && dvalidation_rmse > last_validation_rmse){ logstream(LOG_WARNING)<<"Stopping engine because of validation RMSE increase" << std::endl; converged_engine = true; } } }; template<typename VertexDataType, typename EdgeDataType> void init_validation_rmse_engine(graphchi_engine<VertexDataType,EdgeDataType> *& pvalidation_engine, int nshards,float (*prediction_func)(const vertex_data & user, const vertex_data & movie, float rating, double & prediction, void * extra), bool _time_weighting, bool _time_nodes, int _matlab_time_offset){ metrics * m = new metrics("validation_rmse_engine"); graphchi_engine<VertexDataType, EdgeDataType> * engine = new graphchi_engine<VertexDataType, EdgeDataType>(validation, nshards, false, *m); set_engine_flags(*engine); pvalidation_engine = engine; time_weighting = _time_weighting; time_nodes = _time_nodes; matlab_time_offset = _matlab_time_offset; pprediction_func = prediction_func; num_threads = number_of_omp_threads(); } template<typename VertexDataType, typename EdgeDataType> void run_validation4(graphchi_engine<VertexDataType, EdgeDataType> * pvalidation_engine, graphchi_context & context){ //no validation data, no need to run validation engine calculations cur_iteration = context.iteration; if (pvalidation_engine == NULL){ std::cout << std::endl; return; } ValidationRMSEProgram4 program; pvalidation_engine->run(program, 1); if (converged_engine) context.set_last_iteration(cur_iteration); } void reset_rmse(int exec_threads){ logstream(LOG_DEBUG)<<"Detected number of threads: " << exec_threads << std::endl; num_threads = exec_threads; rmse_vec = zeros(exec_threads); } #endif //__GRAPHCHI_RMSE_ENGINE4
09jijiangwen-download
toolkits/collaborative_filtering/rmse_engine4.hpp
C++
asf20
4,881
/** * @file * @author Danny Bickson * @version 1.0 * * @section LICENSE * * Copyright [2012] [Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * Implementation of the gensgd algorithm. A generalization of SGD algorithm when there are multiple features for each * rating, in the form * [from] [to] [feature1] [feature2] [feature3] ... [featureN] [rating] * (It is also possible to dynamically specify column numbers which are relevant) * Steffen Rendle (2010): Factorization Machines, in Proceedings of the 10th IEEE International Conference on Data Mining (ICDM 2010), Sydney, Australia. * Original implementation by Qiang Yan, Chinese Academy of Science. * note: this code version implements the SGD version of gensgd. In the original library there are also ALS and MCMC methods. * Also the treatment of features is richer in gensgd. The code here can serve for a quick evaluation but the user * is encouraged to try gensgd as well. */ #include <vector> #include "common.hpp" #include "eigen_wrapper.hpp" #include "../parsers/common.hpp" #include <omp.h> #define MAX_FEATAURES 256 #define FEATURE_WIDTH 21//MAX NUMBER OF ALLOWED FEATURES IN TEXT FILE double gensgd_rate1 = 1e-02; double gensgd_rate2 = 1e-02; double gensgd_rate3 = 1e-02; double gensgd_rate4 = 1e-02; double gensgd_rate5 = 1e-02; double gensgd_mult_dec = 0.9; double gensgd_regw = 1e-3; double gensgd_regv = 1e-3; double gensgd_reg0 = 1e-1; bool debug = false; std::string user_file; //optional file with user features std::string item_file; //optional file with item features std::string user_links; //optional file with user to user links int limit_rating = 0; size_t vertex_with_no_edges = 0; int calc_error = 0; int calc_roc = 0; int binary = 1; int round_float = 0; std::vector<std::string> header_titles; int has_header_titles = 0; float cutoff = 0; std::string format = "libsvm"; vec errors_vec; struct single_map{ std::map<float,uint> string2nodeid; single_map(){ } }; struct feature_control{ std::vector<single_map> node_id_maps; single_map val_map; single_map index_map; int rehash_value; int feature_num; int node_features; int node_links; int total_features; const std::string default_feature_str; std::vector<int> offsets; bool hash_strings; int from_pos; int to_pos; int val_pos; feature_control(){ rehash_value = 0; total_features = 0; node_features = 0; feature_num = FEATURE_WIDTH; hash_strings = false; from_pos = 0; to_pos = 1; val_pos = -1; node_links = 0; } }; feature_control fc; int num_feature_bins(){ int sum = 0; if (fc.hash_strings){ assert(2+fc.total_features+fc.node_features == (int)fc.node_id_maps.size()); for (int i=2; i < 2+fc.total_features+fc.node_features; i++){ sum+= fc.node_id_maps[i].string2nodeid.size(); } } else assert(false); return sum; } int calc_feature_num(){ return 2+fc.total_features+fc.node_features; } void get_offsets(std::vector<int> & offsets){ assert(offsets.size() > 3); offsets[0] = 0; offsets[1] = M; offsets[2] = M+N; for (uint i=3; i< offsets.size(); i++){ assert(fc.node_id_maps.size() > (uint)i); offsets[i] += offsets[i-1] + fc.node_id_maps[i].string2nodeid.size(); } } bool is_user(vid_t id){ return id < M; } bool is_item(vid_t id){ return id >= M && id < N; } bool is_time(vid_t id){ return id >= M+N; } #define BIAS_POS -1 struct vertex_data { fvec pvec; double bias; vertex_data() { bias = 0; } void set_val(int index, float val){ if (index == BIAS_POS) bias = val; else pvec[index] = val; } float get_val(int index){ if (index== BIAS_POS) return bias; else return pvec[index]; } }; struct edge_data { uint features[FEATURE_WIDTH]; uint index[FEATURE_WIDTH]; uint size; float weight; edge_data() { weight = 0; size = 0; memset(features, 0, sizeof(uint)*FEATURE_WIDTH); memset(index, 0, sizeof(uint)*FEATURE_WIDTH); } edge_data(float weight, uint * valarray, uint * _index, uint size): size(size), weight(weight) { memcpy(features, valarray, sizeof(uint)*FEATURE_WIDTH); memcpy(index, _index, sizeof(uint)*FEATURE_WIDTH); } }; /** * Type definitions. Remember to create suitable graph shards using the * Sharder-program. */ typedef vertex_data VertexDataType; typedef edge_data EdgeDataType; // Edges store the "rating" of user->movie pair graphchi_engine<VertexDataType, EdgeDataType> * pengine = NULL; std::vector<vertex_data> latent_factors_inmem; int calc_feature_node_array_size(uint node, uint item, uint edge_size){ assert(node <= M); assert(item <= N); assert(edge_size >= 0); assert(node < latent_factors_inmem.size()); assert(fc.offsets[1]+item < latent_factors_inmem.size()); return 2+edge_size; } void assign_id(single_map& dmap, unsigned int & outval, const float name){ std::map<float,uint>::iterator it = dmap.string2nodeid.find(name); //if an id was already assigned, return it if (it != dmap.string2nodeid.end()){ outval = it->second - 1; assert(outval < dmap.string2nodeid.size()); return; } mymutex.lock(); //assign a new id outval = dmap.string2nodeid[name]; if (outval == 0){ dmap.string2nodeid[name] = dmap.string2nodeid.size(); outval = dmap.string2nodeid.size() - 1; } mymutex.unlock(); } /** * return a numeric node ID out of the string text read from file (training, validation or test) */ float get_node_id(char * pch, int pos, size_t i, bool read_only = false){ assert(pch != NULL); assert(pch[0] != 0); assert(i >= 0); float ret; //read numeric id if (!fc.hash_strings){ ret = (pos < 2 ? atoi(pch) : atof(pch)); if (pos < 2) ret--; if (pos == 0 && ret >= M) logstream(LOG_FATAL)<<"Row index larger than the matrix row size " << ret << " > " << M << " in line: " << i << std::endl; else if (pos == 1 && ret >= N) logstream(LOG_FATAL)<<"Col index larger than the matrix row size " << ret << " > " << N << " in line: " << i << std::endl; } //else read string id and assign numeric id else { uint id; float val = atof(pch); assert(!std::isnan(val)); if (round_float) val = floorf(val * 10000 + 0.5) / 10000; if (pos >= 0) assert(pos < (int)fc.node_id_maps.size()); single_map * pmap = NULL; if (pos == -1) pmap = &fc.index_map; else pmap = &fc.node_id_maps[pos]; if (read_only){ // find if node was in map std::map<float,uint>::iterator it = pmap->string2nodeid.find(val); if (it != pmap->string2nodeid.end()){ ret = it->second - 1; assert(ret < pmap->string2nodeid.size()); } else ret = -1; } else { //else enter node into map (in case it did not exist) and return its position assign_id(*pmap, id, val); if (pos == -1 && fc.index_map.string2nodeid.size() == id+1 && fc.node_id_maps.size() < fc.index_map.string2nodeid.size()+2){//TODO debug single_map newmap; fc.node_id_maps.push_back(newmap); } ret = id; } } if (!read_only) assert(ret != -1); return ret; } #include "io.hpp" #include "../parsers/common.hpp" float get_value(char * pch, bool read_only){ float ret; if (!fc.rehash_value){ ret = atof(pch); } else { uint id; if (read_only){ // find if node was in map std::map<float,uint>::iterator it = fc.val_map.string2nodeid.find(atof(pch)); if (it != fc.val_map.string2nodeid.end()){ ret = it->second - 1; } else ret = -1; } else { //else enter node into map (in case it did not exist) and return its position assign_id(fc.val_map, id, atof(pch)); ret = id; } } if (std::isnan(ret) || std::isinf(ret)) logstream(LOG_FATAL)<<"Failed to read value" << std::endl; return ret; } /* Read and parse one input line from file */ bool read_line(FILE * f, const std::string filename, size_t i, uint & I, uint & J, float &val, std::vector<uint>& valarray, std::vector<uint>& positions, int & index, int type, int & skipped_features){ char * linebuf = NULL; size_t linesize; char linebuf_debug[1024]; int token = 0; index = 0; int rc = getline(&linebuf, &linesize, f); if (rc == -1) logstream(LOG_FATAL)<<"Failed to get line: " << i << " in file: " << filename << std::endl; char * linebuf_to_free = linebuf; strncpy(linebuf_debug, linebuf, 1024); while (index < FEATURE_WIDTH){ /* READ FROM */ if (token == fc.from_pos){ char *pch = strsep(&linebuf,"\t,\r\n: "); if (pch == NULL) logstream(LOG_FATAL)<<"Error reading line " << i << " [ " << linebuf_debug << " ] " << std::endl; I = (uint)get_node_id(pch, 0, i, type != TRAINING); token++; } else if (token == fc.to_pos){ /* READ TO */ char * pch = strsep(&linebuf, "\t,\r\n: "); if (pch == NULL) logstream(LOG_FATAL)<<"Error reading line " << i << " [ " << linebuf_debug << " ] " << std::endl; J = (uint)get_node_id(pch, 1, i, type != TRAINING); token++; } else if (token == fc.val_pos){ /* READ RATING */ char * pch = strsep(&linebuf, "\t,\r\n "); if (pch == NULL) logstream(LOG_FATAL)<<"Error reading line " << i << " [ " << linebuf_debug << " ] " << std::endl; val = get_value(pch, type != TRAINING); token++; } else { /* READ FEATURES */ char * pch = strsep(&linebuf, "\t,\r\n:; "); if (pch == NULL || pch[0] == 0) break; uint pos = get_node_id(pch, -1, i, type != TRAINING); if (type != TRAINING && pos == (uint)-1){ //this feature was not observed on training, skip char * pch2 = strsep(&linebuf, "\t\r\n "); if (pch2 == NULL || pch2[0] == 0) logstream(LOG_FATAL)<<"Error reading line " << i << " feature2 " << index << " [ " << linebuf_debug << " ] " << std::endl; skipped_features++; continue; } assert(pos != (uint)-1 && pos < fc.index_map.string2nodeid.size()); char * pch2 = strsep(&linebuf, "\t\r\n "); if (pch2 == NULL || pch2[0] == 0) logstream(LOG_FATAL)<<"Error reading line " << i << " feature2 " << index << " [ " << linebuf_debug << " ] " << std::endl; uint second_index = get_node_id(pch2, pos, i, type != TRAINING); if (type != TRAINING && second_index == (uint)-1){ //this value was not observed in training, skip second_index = 0; //skipped_features++; //continue; } assert(second_index != (uint)-1); assert(index< (int)valarray.size()); assert(index< (int)positions.size()); valarray[index] = second_index; positions[index] = pos; index++; token++; } }//end while free(linebuf_to_free); return true; }//end read_line /* compute an edge prediction based on input features */ float compute_prediction( uint I, uint J, const float val, double & prediction, uint * valarray, uint * positions, uint edge_size, float (*prediction_func)(std::vector<vertex_data*>& node_array, int arraysize, float rating, double & prediction, fvec * psum), fvec * psum, std::vector<vertex_data*>& node_array, uint node_array_size){ /* COMPUTE PREDICTION */ /* USER NODE **/ int index = 0; int loc = 0; node_array[index] = &latent_factors_inmem[I+fc.offsets[index]]; assert(node_array[index]->pvec[0] < 1e5); index++; loc++; /* 1) ITEM NODE */ assert(J+fc.offsets[index] < latent_factors_inmem.size()); node_array[index] = &latent_factors_inmem[J+fc.offsets[index]]; assert(node_array[index]->pvec[0] < 1e5); index++; loc++; /* 2) FEATURES GIVEN IN RATING LINE */ for (int j=0; j< (int)edge_size; j++){ assert(fc.offsets.size() > positions[j]); uint pos = fc.offsets[positions[j]] + valarray[j]; assert(pos >= 0 && pos < latent_factors_inmem.size()); assert(j+index < (int)node_array_size); node_array[j+index] = & latent_factors_inmem[pos]; assert(node_array[j+index]->pvec[0] < 1e5); } index+= edge_size; loc += edge_size; assert(index == calc_feature_node_array_size(I,J, edge_size)); (*prediction_func)(node_array, node_array_size, val, prediction, psum); return pow(val - prediction,2); } #include "rmse.hpp" /** * Create a bipartite graph from a matrix. Each row corresponds to vertex * with the same id as the row number (0-based), but vertices correponsing to columns * have id + num-rows. * Line format of the type * [user] [item] [feature1] [feature2] ... [featureN] [rating] */ /* Read input file, process it and save a binary representation for faster loading */ template <typename als_edge_type> int convert_matrixmarket_N(std::string base_filename, bool square, feature_control & fc, int limit_rating = 0) { // Note, code based on: http://math.nist.gov/MatrixMarket/mmio/c/example_read.c FILE *f; size_t nz; /** * Create sharder object */ int nshards; sharder<als_edge_type> sharderobj(base_filename); sharderobj.start_preprocessing(); detect_matrix_size(base_filename, f, M, N, nz); /* if .info file is not present, try to find matrix market header inside the base_filename file */ if (format == "libsvm") assert(!has_header_titles); if (has_header_titles){ char * linebuf = NULL; size_t linesize; char linebuf_debug[1024]; /* READ LINE */ int rc = getline(&linebuf, &linesize, f); if (rc == -1) logstream(LOG_FATAL)<<"Error header line " << " [ " << linebuf_debug << " ] " << std::endl; strncpy(linebuf_debug, linebuf, 1024); /** READ [FROM] */ char *pch = strtok(linebuf,"\t,\r; "); if (pch == NULL) logstream(LOG_FATAL)<<"Error header line " << " [ " << linebuf_debug << " ] " << std::endl; header_titles.push_back(pch); /** READ USER FEATURES */ while (pch != NULL){ pch = strtok(NULL, "\t,\r; "); if (pch == NULL) break; header_titles.push_back(pch); //update stats if needed } } if (M == 0 && N == 0) logstream(LOG_FATAL)<<"Failed to detect matrix size. Please prepare a file named: " << base_filename << ":info with matrix market header, as explained here: http://bickson.blogspot.co.il/2012/12/collaborative-filtering-3rd-generation_14.html " << std::endl; logstream(LOG_INFO) << "Starting to read matrix-market input. Matrix dimensions: " << M << " x " << N << ", non-zeros: " << nz << std::endl; uint I, J; std::vector<uint> valarray; valarray.resize(FEATURE_WIDTH); std::vector<uint> positions; positions.resize(FEATURE_WIDTH); float val; if (limit_rating > 0) nz = limit_rating; int skipped_features = 0; for (size_t i=0; i<nz; i++) { int index; if (!read_line(f, base_filename, i,I, J, val, valarray, positions, index, TRAINING, skipped_features)) logstream(LOG_FATAL)<<"Failed to read line: " <<i<< " in file: " << base_filename << std::endl; if (index < 1) logstream(LOG_FATAL)<<"Failed to read line: " <<i<< " in file: " << base_filename << std::endl; if (nz > 1000000 && (i % 1000000) == 0) logstream(LOG_INFO)<< mytimer.current_time() << " Finished reading " << i << " lines " << std::endl; //calc stats L++; globalMean += val; sharderobj.preprocessing_add_edge(I, square?J:M+J, als_edge_type(val, &valarray[0], &positions[0], index)); } sharderobj.end_preprocessing(); //calc stats assert(L > 0); assert(globalMean != 0); globalMean /= L; logstream(LOG_INFO)<<"Coputed global mean is: " << globalMean << std::endl; fclose(f); logstream(LOG_INFO) << "Now creating shards." << std::endl; // Shard with a specified number of shards, or determine automatically if not defined nshards = sharderobj.execute_sharding(get_option_string("nshards", "auto")); return nshards; } static bool mySort(const std::pair<double, double> &p1,const std::pair<double, double> &p2) { return p1.second > p2.second; } /** compute validation rmse */ void validation_rmse_N( float (*prediction_func)(std::vector<vertex_data*>& array, int arraysize, float rating, double & prediction, fvec * psum) ,graphchi_context & gcontext, feature_control & fc, bool square = false) { if (validation == "") return; FILE * f = NULL; size_t nz = 0; detect_matrix_size(validation, f, Me, Ne, nz); if (f == NULL){ logstream(LOG_WARNING)<<"Failed to open validation file: " << validation << " - skipping."<<std::endl; return; } if ((M > 0 && N > 0) && (Me != M || Ne != N)) logstream(LOG_WARNING)<<"Input size of validation matrix must be identical to training matrix, namely " << M << "x" << N << std::endl; Le = nz; last_validation_rmse = dvalidation_rmse; dvalidation_rmse = 0; std::vector<uint> valarray; valarray.resize(FEATURE_WIDTH); std::vector<uint> positions; positions.resize(FEATURE_WIDTH); uint I, J; float val; int skipped_features = 0; int skipped_nodes = 0; int errors = 0; //FOR ROC. ROC code thanks to Justin Yan. double _M = 0; double _N = 0; std::vector<std::pair<double, double> > realPrediction; for (size_t i=0; i<nz; i++) { int index; if (!read_line(f, validation, i, I, J, val, valarray, positions, index, VALIDATION, skipped_features)) logstream(LOG_FATAL)<<"Failed to read line: " << i << " in file: " << validation << std::endl; if (I == (uint)-1 || J == (uint)-1){ skipped_nodes++; continue; } double prediction; int howmany = calc_feature_node_array_size(I,J, index); std::vector<vertex_data*> node_array; node_array.resize(howmany); for (int k=0; k< howmany; k++) node_array[k] = NULL; fvec sum; compute_prediction(I, J, val, prediction, &valarray[0], &positions[0], index, prediction_func, &sum, node_array, howmany); if (calc_roc) realPrediction.push_back(std::make_pair(val, prediction)); double temp_pred = prediction; temp_pred = std::min(temp_pred, maxval); temp_pred = std::max(temp_pred, minval); dvalidation_rmse += pow(prediction - val, 2); if (prediction < cutoff && val >= cutoff) errors++; else if (prediction >= cutoff && val < cutoff) errors++; } fclose(f); assert(Le > 0); dvalidation_rmse = sqrt(dvalidation_rmse / (double)Le); std::cout<<" Validation RMSE: " << std::setw(10) << dvalidation_rmse; if (calc_error) std::cout<<" Validation Err: " << std::setw(10) << ((double)errors/(double)(nz-skipped_nodes)); if (calc_roc){ double roc = 0; double ret = 0; std::vector<double> L; std::sort(realPrediction.begin(), realPrediction.end(),mySort); std::vector<std::pair<double, double> >::iterator iter; for(iter=realPrediction.begin();iter!=realPrediction.end();iter++) { L.push_back(iter->first); if(iter->first > cutoff) _M++; else _N++; } std::vector<double>:: iterator iter2; int i=0; for(iter2=L.begin();iter2!=L.end();iter2++) { if(*iter2 > cutoff) ret += ((_M+_N) - i); i++; } double ret2 = _M *(_M+1)/2; roc= (ret-ret2)/(_M*_N); std::cout<<" Validation ROC: " << roc << std::endl; } else std::cout<<std::endl; if (halt_on_rmse_increase && dvalidation_rmse > last_validation_rmse && gcontext.iteration > 0){ logstream(LOG_WARNING)<<"Stopping engine because of validation RMSE increase" << std::endl; gcontext.set_last_iteration(gcontext.iteration); } if (skipped_features > 0) logstream(LOG_DEBUG)<<"Skipped " << skipped_features << " when reading from file. " << std::endl; if (skipped_nodes > 0) logstream(LOG_DEBUG)<<"Skipped " << skipped_nodes << " when reading from file. " << std::endl; } /* compute predictions for test data */ void test_predictions_N( float (*prediction_func)(std::vector<vertex_data*>& node_array, int node_array_size, float rating, double & predictioni, fvec * sum), feature_control & fc, bool square = false) { FILE *f = NULL; uint Me, Ne; size_t nz; if (test == ""){ logstream(LOG_INFO)<<"No test file was found, skipping test predictions " << std::endl; return; } detect_matrix_size(test, f, Me, Ne, nz); if (f == NULL){ logstream(LOG_WARNING)<<"Failed to open test file " << test<< " skipping test predictions " << std::endl; return; } if ((M > 0 && N > 0 ) && (Me != M || Ne != N)) logstream(LOG_FATAL)<<"Input size of test matrix must be identical to training matrix, namely " << M << "x" << N << std::endl; FILE * fout = open_file((test + ".predict").c_str(),"w", false); MM_typecode matcode; mm_set_array(&matcode); mm_write_banner(fout, matcode); mm_write_mtx_array_size(fout ,nz, 1); std::vector<uint> valarray; valarray.resize(FEATURE_WIDTH); std::vector<uint> positions; positions.resize(FEATURE_WIDTH); float val; double prediction; uint I,J; int skipped_features = 0; int skipped_nodes = 0; for (uint i=0; i<nz; i++) { int index; if (!read_line(f, test, i, I, J, val, valarray, positions, index, TEST, skipped_features)) logstream(LOG_FATAL)<<"Failed to read line: " <<i << " in file: " << test << std::endl; if (I == (uint)-1 || J == (uint)-1){ skipped_nodes++; fprintf(fout, "%d\n", 0); //features for this node are not found in the training set, write a default value continue; } int howmany = calc_feature_node_array_size(I,J,index); std::vector<vertex_data*> node_array; node_array.resize(howmany); for (int k=0; k< howmany; k++) node_array[k] = NULL; fvec sum; compute_prediction(I, J, val, prediction, &valarray[0], &positions[0], index, prediction_func, &sum, node_array, howmany); fprintf(fout, "%12.8lg\n", prediction); } fclose(f); fclose(fout); logstream(LOG_INFO)<<"Finished writing " << nz << " predictions to file: " << test << ".predict" << std::endl; if (skipped_features > 0) logstream(LOG_DEBUG)<<"Skipped " << skipped_features << " when reading from file. " << std::endl; if (skipped_nodes > 0) logstream(LOG_WARNING)<<"Skipped node in test dataset: " << skipped_nodes << std::endl; } float gensgd_predict(std::vector<vertex_data*> & node_array, int node_array_size, const float rating, double& prediction, fvec* sum){ fvec sum_sqr = fzeros(D); *sum = fzeros(D); prediction = globalMean; assert(!std::isnan(prediction)); for (int i=0; i< node_array_size; i++) prediction += node_array[i]->bias; assert(!std::isnan(prediction)); for (int j=0; j< D; j++){ for (int i=0; i< node_array_size; i++){ sum->operator[](j) += node_array[i]->pvec[j]; assert(sum->operator[](j) < 1e5); sum_sqr[j] += pow(node_array[i]->pvec[j],2); } prediction += 0.5 * (pow(sum->operator[](j),2) - sum_sqr[j]); assert(!std::isnan(prediction)); } //truncate prediction to allowed values prediction = std::min((double)prediction, maxval); prediction = std::max((double)prediction, minval); //return the squared error float err = rating - prediction; assert(!std::isnan(err)); return err*err; } float gensgd_predict(std::vector<vertex_data*>& node_array, int node_array_size, const float rating, double & prediction){ fvec sum; return gensgd_predict(node_array, node_array_size, rating, prediction, &sum); } void init_gensgd(bool load_factors_from_file){ srand(time(NULL)); int nodes = M+N+num_feature_bins(); latent_factors_inmem.resize(nodes); int howmany = calc_feature_num(); logstream(LOG_DEBUG)<<"Going to calculate: " << howmany << " offsets." << std::endl; fc.offsets.resize(howmany); get_offsets(fc.offsets); assert(D > 0); if (!load_factors_from_file){ double factor = 0.1/sqrt(D); #pragma omp parallel for for (int i=0; i< nodes; i++){ latent_factors_inmem[i].pvec = (debug ? 0.1*fones(D) : (::frandu(D)*factor)); } } } void training_rmse_N(int iteration, graphchi_context &gcontext, bool items = false){ last_training_rmse = dtraining_rmse; dtraining_rmse = 0; size_t total_errors = 0; int start = 0; int end = M; if (items){ start = M; end = M+N; } dtraining_rmse = sum(rmse_vec); if (calc_error){ total_errors = sum(errors_vec); } dtraining_rmse = sqrt(dtraining_rmse / pengine->num_edges()); if (calc_error) std::cout<< std::setw(10) << mytimer.current_time() << ") Iteration: " << std::setw(3) <<iteration<<" Training RMSE: " << std::setw(10)<< dtraining_rmse << " Train err: " << std::setw(10) << (total_errors/(double)L); else std::cout<< std::setw(10) << mytimer.current_time() << ") Iteration: " << std::setw(3) <<iteration<<" Training RMSE: " << std::setw(10)<< dtraining_rmse; } /** * GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type> * class. The main logic is usually in the update function. */ struct Sparse_GensgdVerticesInMemProgram : public GraphChiProgram<VertexDataType, EdgeDataType> { /* * Vertex update function - computes the least square step */ void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) { //go over all user nodes if (is_user(vertex.id())){ //go over all observed ratings for(int e=0; e < vertex.num_outedges(); e++) { const edge_data & data = vertex.edge(e)->get_data(); int howmany = calc_feature_node_array_size(vertex.id(), vertex.edge(e)->vertex_id()-M, data.size); std::vector<vertex_data*> node_array; node_array.resize(howmany); for (int i=0; i< howmany; i++) node_array[i] = NULL; float rui = data.weight; double pui; fvec sum; //compute current prediction rmse_vec[omp_get_thread_num()] += compute_prediction(vertex.id(), vertex.edge(e)->vertex_id()-M, rui ,pui, (uint*)data.features, (uint*)data.index, data.size, gensgd_predict, &sum, node_array, howmany); if (calc_error){ if ((pui < cutoff && rui > cutoff) || (pui > cutoff && rui < cutoff)) errors_vec[omp_get_thread_num()]++; } float eui = pui - rui; //update global mean bias globalMean -= gensgd_rate1 * (eui + gensgd_reg0 * globalMean); //update node biases and vectors for (int i=0; i < howmany; i++){ double gensgd_rate; if (i == 0) //user gensgd_rate = gensgd_rate1; else if (i == 1) //item gensgd_rate = gensgd_rate2; else if (i < (int)(data.size+2)) //rating features gensgd_rate = gensgd_rate3; else if (i < (int)(2+data.size+fc.node_features)) //user and item features gensgd_rate = gensgd_rate4; else gensgd_rate = gensgd_rate5; //last item node_array[i]->bias -= gensgd_rate * (eui + gensgd_regw* node_array[i]->bias); assert(!std::isnan(node_array[i]->bias)); assert(node_array[i]->bias < 1e3); fvec grad = sum - node_array[i]->pvec; node_array[i]->pvec -= gensgd_rate * (eui*grad + gensgd_regv * node_array[i]->pvec); assert(!std::isnan(node_array[i]->pvec[0])); assert(node_array[i]->pvec[0] < 1e3); } } } }; /** * Called after an iteration has finished. */ void after_iteration(int iteration, graphchi_context &gcontext) { if (iteration == 1 && vertex_with_no_edges > 0) logstream(LOG_WARNING)<<"There are " << vertex_with_no_edges << " users without ratings" << std::endl; gensgd_rate1 *= gensgd_mult_dec; gensgd_rate2 *= gensgd_mult_dec; gensgd_rate3 *= gensgd_mult_dec; gensgd_rate4 *= gensgd_mult_dec; gensgd_rate5 *= gensgd_mult_dec; training_rmse_N(iteration, gcontext); validation_rmse_N(&gensgd_predict, gcontext, fc); }; /** * Called before an iteration is started. */ void before_iteration(int iteration, graphchi_context &gcontext) { rmse_vec = zeros(number_of_omp_threads()); if (calc_error) errors_vec = zeros(number_of_omp_threads()); } }; void output_gensgd_result(std::string filename) { MMOutputter_mat<vertex_data> mmoutput(filename + "_U.mm", 0, M+N+num_feature_bins(), "This file contains Sparse_Gensgd output matrices. In each row D factors of a single user node, then item nodes, then features", latent_factors_inmem); MMOutputter_vec<vertex_data> mmoutput_bias(filename + "_U_bias.mm", 0, num_feature_bins(), BIAS_POS, "This file contains Sparse_Gensgd output bias vector. In each row a single user bias.", latent_factors_inmem); MMOutputter_scalar gmean(filename + "_global_mean.mm", "This file contains Sparse_Gensgd global mean which is required for computing predictions.", globalMean); logstream(LOG_INFO) << " GENSGD output files (in matrix market format): " << filename << "_U.mm" << ", "<< filename << "_global_mean.mm, " << filename << "_U_bias.mm " <<std::endl; } int main(int argc, const char ** argv) { print_copyright(); /* GraphChi initialization will read the command line arguments and the configuration file. */ graphchi_init(argc, argv); /* Metrics object for keeping track of performance counters and other information. Currently required. */ metrics m("als-tensor-inmemory-factors"); //specific command line parameters for gensgd gensgd_rate1 = get_option_float("gensgd_rate1", gensgd_rate1); gensgd_rate2 = get_option_float("gensgd_rate2", gensgd_rate2); gensgd_rate3 = get_option_float("gensgd_rate3", gensgd_rate3); gensgd_rate4 = get_option_float("gensgd_rate4", gensgd_rate4); gensgd_rate5 = get_option_float("gensgd_rate5", gensgd_rate5); gensgd_regw = get_option_float("gensgd_regw", gensgd_regw); gensgd_regv = get_option_float("gensgd_regv", gensgd_regv); gensgd_reg0 = get_option_float("gensgd_reg0", gensgd_reg0); gensgd_mult_dec = get_option_float("gensgd_mult_dec", gensgd_mult_dec); fc.hash_strings = get_option_int("rehash", fc.hash_strings); user_file = get_option_string("user_file", user_file); user_links = get_option_string("user_links", user_links); item_file = get_option_string("item_file", item_file); D = get_option_int("D", D); fc.from_pos = get_option_int("from_pos", fc.from_pos); fc.to_pos = get_option_int("to_pos", fc.to_pos); fc.val_pos = get_option_int("val_pos", fc.val_pos); limit_rating = get_option_int("limit_rating", limit_rating); calc_error = get_option_int("calc_error", calc_error); calc_roc = get_option_int("calc_roc", calc_roc); round_float = get_option_int("round_float", round_float); has_header_titles = get_option_int("has_header_titles", has_header_titles); fc.rehash_value = get_option_int("rehash_value", fc.rehash_value); cutoff = get_option_float("cutoff", cutoff); binary = get_option_int("binary", binary); parse_command_line_args(); parse_implicit_command_line(); fc.node_id_maps.resize(2); //initial place for from/to map //fc.stats_array.resize(fc.total_features); if (format == "libsvm"){ fc.val_pos = 0; fc.to_pos = 2; fc.from_pos = 1; binary = false; fc.hash_strings = true; } int nshards = convert_matrixmarket_N<edge_data>(training, false, fc, limit_rating); fc.total_features = fc.index_map.string2nodeid.size(); if (load_factors_from_file){ load_matrix_market_matrix(training + "_U.mm", 0, D); vec user_bias = load_matrix_market_vector(training +"_U_bias.mm", false, true); assert(user_bias.size() == num_feature_bins()); for (uint i=0; num_feature_bins(); i++){ latent_factors_inmem[i].bias = user_bias[i]; } vec gm = load_matrix_market_vector(training + "_global_mean.mm", false, true); globalMean = gm[0]; } init_gensgd(load_factors_from_file); if (has_header_titles && header_titles.size() == 0) logstream(LOG_FATAL)<<"Please delete temp files (using : \"rm -f " << training << ".*\") and run again" << std::endl; logstream(LOG_INFO)<<"Target variable " << std::setw(3) << fc.val_pos << " : " << (has_header_titles? header_titles[fc.val_pos] : "") <<std::endl; logstream(LOG_INFO)<<"From " << std::setw(3) << fc.from_pos<< " : " << (has_header_titles? header_titles[fc.from_pos] : "") <<std::endl; logstream(LOG_INFO)<<"To " << std::setw(3) << fc.to_pos << " : " << (has_header_titles? header_titles[fc.to_pos] : "") <<std::endl; /* Run */ Sparse_GensgdVerticesInMemProgram program; graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m); set_engine_flags(engine); pengine = &engine; engine.run(program, niters); /* Output test predictions in matrix-market format */ output_gensgd_result(training); test_predictions_N(&gensgd_predict, fc); /* Report execution metrics */ metrics_report(m); return 0; }
09jijiangwen-download
toolkits/collaborative_filtering/sparse_gensgd.cpp
C++
asf20
33,742
#ifndef DEF_RMSEHPP #define DEF_RMSEHPP #include <iostream> #include <iomanip> #include <omp.h> /** * @file * @author Danny Bickson * @version 1.0 * * @section LICENSE * * Copyright [2012] [Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * File for aggregating and siplaying error mesasures and algorithm progress */ #include "timer.hpp" #include "eigen_wrapper.hpp" #include "common.hpp" void read_matrix_market_banner_and_size(FILE * pfile, MM_typecode & matcode, uint & M, uint & N, size_t & nz, const std::string & filename); FILE * open_file(const char * filename, const char * mode, bool optional); timer mytimer; double dtraining_rmse = 0; double last_training_rmse = 0; double dvalidation_rmse = 0; double last_validation_rmse = 0; int sign(double x){ if (x < 0) return -1; else if (x > 0) return 1; else return 0; } /* compute the average of the loss after aggregating it */ double finalize_rmse(double rmse, double num_edges){ double ret = 0; switch(loss_type){ case SQUARE: ret = sqrt(rmse / num_edges); break; case LOGISTIC: ret = rmse/num_edges; break; case ABS: ret = rmse / num_edges; case AP: ret = rmse / num_edges; break; } return ret; } /** calc the loss measure based on the cost function */ double calc_loss(double exp_prediction, double err){ double ret = 0; switch (loss_type){ case LOGISTIC: ret= (exp_prediction * log(exp_prediction) + (1-exp_prediction)*log(1-exp_prediction)); break; case SQUARE: ret = err*err; break; case ABS: ret = fabs(err); break; } return ret; } /** calc prediction error based on the cost function */ double calc_error_f(double exp_prediction, double err){ switch (loss_type){ case LOGISTIC: return err; case SQUARE: return err *= (exp_prediction*(1.0-exp_prediction)*(maxval-minval)); case ABS: return err = sign(err)*(exp_prediction*(1-exp_prediction)*(maxval-minval)); } return NAN; } /** compute predictions on test data */ void test_predictions(float (*prediction_func)(const vertex_data & user, const vertex_data & movie, float rating, double & prediction, void * extra), graphchi_context * gcontext = NULL, bool dosave = true, vec * avgprd = NULL, int pmf_burn_in = 0) { MM_typecode matcode; FILE *f; uint Me, Ne; size_t nz; if ((f = fopen(test.c_str(), "r")) == NULL) { return; //missing validaiton data, nothing to compute } FILE * fout = NULL; if (dosave) fout = open_file((test + ".predict").c_str(),"w", false); read_matrix_market_banner_and_size(f, matcode, Me, Ne, nz, test+".predict"); if ((M > 0 && N > 0 ) && (Me != M || Ne != N)) logstream(LOG_FATAL)<<"Input size of test matrix must be identical to training matrix, namely " << M << "x" << N << std::endl; if (avgprd && gcontext->iteration == pmf_burn_in) *avgprd = zeros(nz); if (dosave){ mm_write_banner(fout, matcode); fprintf(fout, "%%This file contains predictions of user/item pair, one prediction in each line. The first column is user id. The second column is the item id. The third column is the computed prediction.\n"); mm_write_mtx_crd_size(fout ,M,N,nz); } for (uint i=0; i<nz; i++) { int I, J; double val; int rc = fscanf(f, "%d %d %lg\n", &I, &J, &val); if (rc != 3) logstream(LOG_FATAL)<<"Error when reading input file: " << i << std::endl; I--; /* adjust from 1-based to 0-based */ J--; double prediction; (*prediction_func)(latent_factors_inmem[I], latent_factors_inmem[J+M], val, prediction, NULL); //TODO //for mcmc methods, store the sum of predictions if (avgprd && avgprd->size() > 0 && gcontext->iteration >= pmf_burn_in) avgprd->operator[](i) += prediction; if (dosave){ if (avgprd && avgprd->size() > 0) prediction = avgprd->operator[](i) /(gcontext->iteration - pmf_burn_in); fprintf(fout, "%d %d %12.8lg\n", I+1, J+1, prediction); } } fclose(f); if (dosave) fclose(fout); if (dosave) std::cout<<"Finished writing " << nz << " predictions to file: " << test << ".predict" << std::endl; } void test_predictions3(float (*prediction_func)(const vertex_data & user, const vertex_data & movie, float rating, double & prediction, void * extra), int time_offset = 0) { MM_typecode matcode; FILE *f; uint Me, Ne; size_t nz; if ((f = fopen(test.c_str(), "r")) == NULL) { return; //missing validaiton data, nothing to compute } FILE * fout = open_file((test + ".predict").c_str(),"w", false); read_matrix_market_banner_and_size(f, matcode, Me, Ne, nz, test+".predict"); if ((M > 0 && N > 0 ) && (Me != M || Ne != N)) logstream(LOG_FATAL)<<"Input size of test matrix must be identical to training matrix, namely " << M << "x" << N << std::endl; mm_write_banner(fout, matcode); mm_write_mtx_crd_size(fout ,M,N,nz); for (uint i=0; i<nz; i++) { int I, J; double val; int time; int rc = fscanf(f, "%d %d %d %lg\n", &I, &J, &time, &val); if (rc != 4) logstream(LOG_FATAL)<<"Error when reading input file: " << i << std::endl; if (time - input_file_offset < 0) logstream(LOG_FATAL)<<"Error: we assume time values >= " << input_file_offset << std::endl; I--; /* adjust from 1-based to 0-based */ J--; double prediction; (*prediction_func)(latent_factors_inmem[I], latent_factors_inmem[J+M], 1, prediction, (void*)&latent_factors_inmem[time+M+N-input_file_offset]); fprintf(fout, "%d %d %12.8lg\n", I+1, J+1, prediction); } fclose(f); fclose(fout); logstream(LOG_INFO)<<"Finished writing " << nz << " predictions to file: " << test << ".predict" << std::endl; } float (*prediction_func)(const vertex_data & user, const vertex_data & movie, float rating, double & prediction, void * extra); void detect_matrix_size(std::string filename, FILE *&f, uint &_M, uint &_N, size_t & nz, uint nodes, size_t edges, int type); /** compute validation rmse */ void validation_rmse(float (*prediction_func)(const vertex_data & user, const vertex_data & movie, float rating, double & prediction, void * extra) ,graphchi_context & gcontext, int tokens_per_row = 3, vec * avgprd = NULL, int pmf_burn_in = 0) { FILE *f; size_t nz; detect_matrix_size(validation, f, Me, Ne, nz, 0, 0, VALIDATION); if (f == NULL) return; if ((M > 0 && N > 0) && (Me != M || Ne != N)) logstream(LOG_FATAL)<<"Input size of validation matrix must be identical to training matrix, namely " << M << "x" << N << std::endl; Le = nz; if (avgprd != NULL && gcontext.iteration == 0) *avgprd = zeros(nz); last_validation_rmse = dvalidation_rmse; dvalidation_rmse = 0; int I, J; double val, time = 1.0; for (size_t i=0; i<nz; i++) { int rc; if (tokens_per_row == 3) rc = fscanf(f, "%d %d %lg\n", &I, &J, &val); else rc = fscanf(f, "%d %d %lg %lg\n", &I, &J, &time, &val); if (rc != tokens_per_row) logstream(LOG_FATAL)<<"Error when reading input file on line: " << i << " . should have" << tokens_per_row << std::endl; if (val < minval || val > maxval) logstream(LOG_FATAL)<<"Value is out of range: " << val << " should be: " << minval << " to " << maxval << std::endl; I--; /* adjust from 1-based to 0-based */ J--; double prediction; dvalidation_rmse += time *(*prediction_func)(latent_factors_inmem[I], latent_factors_inmem[J+M], val, prediction, avgprd == NULL ? NULL : &avgprd->operator[](i)); } fclose(f); assert(Le > 0); dvalidation_rmse = finalize_rmse(dvalidation_rmse , (double)Le); std::cout<<" Validation " << error_names[loss_type] << ":" << std::setw(10) << dvalidation_rmse << " ratings_per_sec: " << std::setw(10) << (gcontext.iteration*L/mytimer.current_time()) << std::endl; if (halt_on_rmse_increase > 0 && halt_on_rmse_increase < gcontext.iteration && dvalidation_rmse > last_validation_rmse){ logstream(LOG_WARNING)<<"Stopping engine because of validation RMSE increase" << std::endl; gcontext.set_last_iteration(gcontext.iteration); } } /** compute validation rmse */ void validation_rmse3(float (*prediction_func)(const vertex_data & user, const vertex_data & movie, const vertex_data & time, float rating, double & prediction) ,graphchi_context & gcontext,int tokens_per_row = 4, int time_offset = 0) { MM_typecode matcode; FILE *f; size_t nz; if ((f = fopen(validation.c_str(), "r")) == NULL) { std::cout<<std::endl; return; //missing validaiton data, nothing to compute } read_matrix_market_banner_and_size(f, matcode, Me, Ne, nz, validation); if ((M > 0 && N > 0) && (Me != M || Ne != N)) logstream(LOG_FATAL)<<"Input size of validation matrix must be identical to training matrix, namely " << M << "x" << N << std::endl; Le = nz; last_validation_rmse = dvalidation_rmse; dvalidation_rmse = 0; int I, J; double val, time = 1.0; for (size_t i=0; i<nz; i++) { int rc; rc = fscanf(f, "%d %d %lg %lg\n", &I, &J, &time, &val); time -= time_offset; if (rc != 4) logstream(LOG_FATAL)<<"Error when reading input file on line: " << i << " . should have 4 columns " << std::endl; if (val < minval || val > maxval) logstream(LOG_FATAL)<<"Value is out of range: " << val << " should be: " << minval << " to " << maxval << std::endl; if ((uint)time > K) logstream(LOG_FATAL)<<"Third column value time should be smaller than " << K << " while observed " << time << " in line : " << i << std::endl; I--; /* adjust from 1-based to 0-based */ J--; double prediction; dvalidation_rmse += (*prediction_func)(latent_factors_inmem[I], latent_factors_inmem[J+M], latent_factors_inmem[M+N+(uint)time], val, prediction); } fclose(f); assert(Le > 0); dvalidation_rmse = finalize_rmse(dvalidation_rmse , (double)Le); std::cout<<" Validation " << error_names[loss_type] << ":" << std::setw(10) << dvalidation_rmse << std::endl; if (halt_on_rmse_increase >= gcontext.iteration && dvalidation_rmse > last_validation_rmse){ logstream(LOG_WARNING)<<"Stopping engine because of validation RMSE increase" << std::endl; gcontext.set_last_iteration(gcontext.iteration); } } vec rmse_vec; double training_rmse(int iteration, graphchi_context &gcontext, bool items = false){ last_training_rmse = dtraining_rmse; dtraining_rmse = 0; double ret = 0; dtraining_rmse = sum(rmse_vec); int old_loss = loss_type; if (loss_type == AP) loss_type = SQUARE; ret = dtraining_rmse = finalize_rmse(dtraining_rmse, pengine->num_edges()); std::cout<< std::setw(10) << mytimer.current_time() << ") Iteration: " << std::setw(3) <<iteration<<" Training " << error_names[loss_type] << ":"<< std::setw(10)<< dtraining_rmse; loss_type = old_loss; return ret; } #endif //DEF_RMSEHPP
09jijiangwen-download
toolkits/collaborative_filtering/rmse.hpp
C++
asf20
11,486
/** * @file * @author Danny Bickson, based on code by Aapo Kyrola <akyrola@cs.cmu.edu> * @version 1.0 * * @section LICENSE * * Copyright [2012] [Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * This file implements item based collaborative filtering by comparing all item pairs which * are connected by one or more user nodes. * * * For Pearson's correlation * * see: http://en.wikipedia.org/wiki/Pearson_product-moment_correlation_coefficient Cosine Similarity See: http://en.wikipedia.org/wiki/Cosine_similarity Manhattan Distance See http://en.wikipedia.org/wiki/Taxicab_geometry Log Similarity Distance See http://tdunning.blogspot.co.il/2008/03/surprise-and-coincidence.html Chebychev Distance http://en.wikipedia.org/wiki/Chebyshev_distance Tanimoto Distance See http://en.wikipedia.org/wiki/Jaccard_index Slope One See "A prorammers guide to data mining" page 18: http://guidetodatamining.com/guide/ch3/DataMining-ch3.pdf */ #include <string> #include <vector> #include <algorithm> #include <iomanip> #include <set> #include <iostream> #include "eigen_wrapper.hpp" #include "distance.hpp" #include "util.hpp" #include "timer.hpp" #include "common.hpp" enum DISTANCE_METRICS{ JACKARD = 0, AA = 1, RA = 2, PEARSON = 3, COSINE = 4, CHEBYCHEV = 5, MANHATTEN = 6, TANIMOTO = 7, LOG_LIKELIHOOD = 8, SLOPE_ONE = 9 }; int min_allowed_intersection = 1; size_t written_pairs = 0; size_t item_pairs_compared = 0; std::vector<FILE*> out_files; timer mytimer; bool * relevant_items = NULL; vec mean; vec stddev; int grabbed_edges = 0; int distance_metric; int debug; bool is_item(vid_t v){ return v >= M; } bool is_user(vid_t v){ return v < M; } /** * Type definitions. Remember to create suitable graph shards using the * Sharder-program. */ typedef unsigned int VertexDataType; typedef float EdgeDataType; // Edges store the "rating" of user->movie pair struct vertex_data{ vec pvec; vertex_data(){ } void set_val(int index, float val){ pvec[index] = val; } float get_val(int index){ return pvec[index]; } }; std::vector<vertex_data> latent_factors_inmem; #include "io.hpp" struct dense_adj { sparse_vec edges; dense_adj() { } double intersect(const dense_adj & other){ sparse_vec x1 = edges.unaryExpr(std::ptr_fun(equal_greater)); sparse_vec x2 = other.edges.unaryExpr(std::ptr_fun(equal_greater)); sparse_vec x3 = x1.cwiseProduct(x2); return sum(x3); } }; // This is used for keeping in-memory class adjlist_container { std::vector<dense_adj> adjs; //mutex m; public: vid_t pivot_st, pivot_en; adjlist_container() { if (debug) std::cout<<"setting pivot st and end to " << M << std::endl; pivot_st = M; //start pivor on item nodes (excluding user nodes) pivot_en = M; } void clear() { for(std::vector<dense_adj>::iterator it=adjs.begin(); it != adjs.end(); ++it) { if (nnz(it->edges)) { it->edges.resize(0); } } adjs.clear(); if (debug) std::cout<<"setting pivot st to " << pivot_en << std::endl; pivot_st = pivot_en; } /** * Extend the interval of pivot vertices to en. */ void extend_pivotrange(vid_t en) { assert(en>pivot_en); pivot_en = en; adjs.resize(pivot_en - pivot_st); } /** * Grab pivot's adjacency list into memory. */ int load_edges_into_memory(graphchi_vertex<uint32_t, float> &v) { //assert(is_pivot(v.id())); //assert(is_item(v.id())); int num_edges = v.num_edges(); //not enough user rated this item, we don't need to compare to it if (num_edges < min_allowed_intersection){ relevant_items[v.id() - M] = false; return 0; } relevant_items[v.id() - M] = true; // Count how many neighbors have larger id than v dense_adj dadj; for(int i=0; i<num_edges; i++) set_new( dadj.edges, v.edge(i)->vertex_id(), v.edge(i)->get_data()); //std::sort(&dadj.adjlist[0], &dadj.adjlist[0] + num_edges); adjs[v.id() - pivot_st] = dadj; assert(v.id() - pivot_st < adjs.size()); __sync_add_and_fetch(&grabbed_edges, num_edges /*edges_to_larger_id*/); return num_edges; } int acount(vid_t pivot) { return nnz(adjs[pivot - pivot_st].edges); } /** * calc distance between two items. * Let a be all the users rated item 1 * Let b be all the users rated item 2 * * 3) Using Pearson correlation * Dist_12 = (a - mean)*(b- mean)' / (std(a)*std(b)) * * 4) Using cosine similarity: * Dist_12 = (a*b) / sqrt(sum_sqr(a)) * sqrt(sum_sqr(b))) * * 5) Using chebychev: * Dist_12 = max(abs(a-b)) * * 6) Using manhatten distance: * Dist_12 = sum(abs(a-b)) * * 7) Using tanimoto: * Dist_12 = 1.0 - [(a*b) / (sum_sqr(a) + sum_sqr(b) - a*b)] * * 8) Using log likelihood similarity * Dist_12 = 1.0 - 1.0/(1.0 + loglikelihood) * * 9) Using slope one: * Dist_12 = sum_(u in intersection (a,b) (r_u1-ru2 ) / size(intersection(a,b))) */ double calc_distance(graphchi_vertex<uint32_t, float> &v, vid_t pivot, int distance_metric) { //assert(is_pivot(pivot)); //assert(is_item(pivot) && is_item(v.id())); dense_adj &pivot_edges = adjs[pivot - pivot_st]; int num_edges = v.num_edges(); //if there are not enough neighboring user nodes to those two items there is no need //to actually count the intersection if (num_edges < min_allowed_intersection || nnz(pivot_edges.edges) < min_allowed_intersection) return 0; dense_adj item_edges; for(int i=0; i < num_edges; i++) set_new(item_edges.edges, v.edge(i)->vertexid, v.edge(i)->get_data()); double intersection_size = item_edges.intersect(pivot_edges); //not enough user nodes rated both items, so the pairs of items are not compared. if (intersection_size < (double)min_allowed_intersection) return 0; if (distance_metric == PEARSON){ if (debug){ std::cout<< pivot -M+1<<" Pivot edges: " <<pivot_edges.edges << std::endl; std::cout<< "Minusmean: " << minus(pivot_edges.edges,mean) << std::endl; std::cout<< v.id() -M+1<<"Item edges: " <<item_edges.edges << std::endl; std::cout<< "Minusmean: " << minus(item_edges.edges, mean) << std::endl; } double dist = minus(pivot_edges.edges, mean).dot(minus(item_edges.edges, mean)); if (debug) std::cout<<"dist " << pivot-M+1 << ":" << v.id()-M+1 << " " << dist << std::endl; return dist / (stddev[pivot-M] * stddev[v.id()-M]); } else if (distance_metric == TANIMOTO){ return calc_tanimoto_distance(pivot_edges.edges, item_edges.edges, sum_sqr(pivot_edges.edges), sum_sqr(item_edges.edges)); } else if (distance_metric == CHEBYCHEV){ return calc_chebychev_distance(pivot_edges.edges, item_edges.edges); } else if (distance_metric == LOG_LIKELIHOOD){ return calc_loglikelihood_distance(pivot_edges.edges, item_edges.edges, sum_sqr(pivot_edges.edges), sum_sqr(item_edges.edges)); } else if (distance_metric == COSINE){ return calc_cosine_distance(pivot_edges.edges, item_edges.edges, sum_sqr(pivot_edges.edges), sum_sqr(item_edges.edges)); } else if (distance_metric ==MANHATTEN){ return calc_manhatten_distance(pivot_edges.edges, item_edges.edges); } else if (distance_metric == SLOPE_ONE){ return calc_slope_one_distance(pivot_edges.edges, item_edges.edges) / intersection_size; } return NAN; } inline bool is_pivot(vid_t vid) { return vid >= pivot_st && vid < pivot_en; } }; adjlist_container * adjcontainer; struct ItemDistanceProgram : public GraphChiProgram<VertexDataType, EdgeDataType> { /** * Vertex update function. */ void update(graphchi_vertex<VertexDataType, EdgeDataType> &v, graphchi_context &gcontext) { if (debug) printf("Entered iteration %d with %d\n", gcontext.iteration, v.id()); //in the zero iteration compute the mean if (gcontext.iteration == 0){ if (is_item(v.id())){ for(int i=0; i<v.num_edges(); i++) { graphchi_edge<float> * e = v.edge(i); vid_t user = e->vertexid; mean[user] += e->get_data() / (float)N; } } } //at the first iteration compute the stddev of each item from the mean else if (gcontext.iteration == 1){ if (is_item(v.id())){ dense_adj item_edges; for(int i=0; i < v.num_edges(); i++) set_new(item_edges.edges, v.edge(i)->vertexid, v.edge(i)->get_data()); stddev[v.id() - M] = sum(minus(item_edges.edges, mean).array().pow(2)) / (M-1.0); if (debug) std::cout<<"item: " << v.id() - M+1 << " stddev: " << stddev[v.id() - M] << std::endl; } } /* even iteration numbers: * 1) load a subset of items into memory (pivots) * 2) Find which subset of items needs to compared to the users */ else if (gcontext.iteration % 2 == 0) { if (adjcontainer->is_pivot(v.id()) && is_item(v.id())){ adjcontainer->load_edges_into_memory(v); if (debug) printf("Loading pivot %d intro memory\n", v.id()); } else if (is_user(v.id())){ //check if this user is connected to any pivot item bool has_pivot = false; int pivot = -1; for(int i=0; i<v.num_edges(); i++) { graphchi_edge<float> * e = v.edge(i); //assert(is_item(e->vertexid)); if (adjcontainer->is_pivot(e->vertexid) && relevant_items[e->vertexid-M]) { has_pivot = true; pivot = e->vertexid; break; } } if (debug) printf("user %d is linked to pivot %d\n", v.id(), pivot); if (!has_pivot) //this user is not connected to any of the pivot item nodes and thus //it is not relevant at this point return; //this user is connected to a pivot items, thus all connected items should be compared for(int i=0; i<v.num_edges(); i++) { graphchi_edge<float> * e = v.edge(i); //assert(v.id() != e->vertexid); relevant_items[e->vertexid - M] = true; } }//is_user } //iteration % 2 = 1 /* odd iteration number: * 1) For any item connected to a pivot item * compute itersection */ else { if (!relevant_items[v.id() - M]){ return; } for (vid_t i=adjcontainer->pivot_st; i< adjcontainer->pivot_en; i++){ //since metric is symmetric, compare only to pivots which are smaller than this item id if (i >= v.id() || (!relevant_items[i-M])) continue; double dist = adjcontainer->calc_distance(v, i, distance_metric); item_pairs_compared++; if (item_pairs_compared % 1000000 == 0) logstream(LOG_INFO)<< std::setw(10) << mytimer.current_time() << ") " << std::setw(10) << item_pairs_compared << " pairs compared " << std::endl; if (debug) printf("comparing %d to pivot %d distance is %lg\n", i - M + 1, v.id() - M + 1, dist); if (dist != 0){ fprintf(out_files[omp_get_thread_num()], "%u %u %.12lg\n", v.id()-M+1, i-M+1, (double)dist);//write item similarity to file //where the output format is: //[item A] [ item B ] [ distance ] written_pairs++; } } }//end of iteration % 2 == 1 }//end of update function /** * Called before an iteration starts. * On odd iteration, schedule both users and items. * on even iterations, schedules only item nodes */ void before_iteration(int iteration, graphchi_context &gcontext) { gcontext.scheduler->remove_tasks(0, (int) gcontext.nvertices - 1); if (gcontext.iteration % 2 == 0){ memset(relevant_items, 0, sizeof(bool)*N); for (vid_t i=0; i < M+N; i++){ gcontext.scheduler->add_task(i); } if (debug) printf("scheduling all nodes, setting relevant_items to zero\n"); grabbed_edges = 0; adjcontainer->clear(); } else { //iteration % 2 == 1 for (vid_t i=M; i < M+N; i++){ gcontext.scheduler->add_task(i); } } } /** * Called after an iteration has finished. */ void after_iteration(int iteration, graphchi_context &gcontext) { if (debug && gcontext.iteration == 0) std::cout<<"Mean : " << mean << std::endl; } /** * Called before an execution interval is started. * * On every even iteration, we load pivot's item connected user lists to memory. * Here we manage the memory to ensure that we do not load too much * edges into memory. */ void before_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) { /* on even iterations, loads pivot items into memory base on the membudget_mb allowed memory size */ if ((gcontext.iteration % 2 == 0) && (gcontext.iteration >= 2)) { if (debug){ printf("entering iteration: %d on before_exec_interval\n", gcontext.iteration); printf("pivot_st is %d window_en %d\n", adjcontainer->pivot_st, window_en); } if (adjcontainer->pivot_st <= window_en) { size_t max_grab_edges = get_option_long("membudget_mb", 1024) * 1024 * 1024 / 8; if (grabbed_edges < max_grab_edges * 0.8) { logstream(LOG_DEBUG) << "Window init, grabbed: " << grabbed_edges << " edges" << " extending pivor_range to : " << window_en + 1 << std::endl; adjcontainer->extend_pivotrange(window_en + 1); logstream(LOG_DEBUG) << "Window en is: " << window_en << " vertices: " << gcontext.nvertices << std::endl; if (window_en+1 == gcontext.nvertices) { // every item was a pivot item, so we are done logstream(LOG_DEBUG)<<"Setting last iteration to: " << gcontext.iteration + 2 << std::endl; gcontext.set_last_iteration(gcontext.iteration + 2); } } else { logstream(LOG_DEBUG) << "Too many edges, already grabbed: " << grabbed_edges << std::endl; } } } } }; int main(int argc, const char ** argv) { print_copyright(); /* GraphChi initialization will read the command line arguments and the configuration file. */ graphchi_init(argc, argv); /* Metrics object for keeping track of performance counters and other information. Currently required. */ metrics m("item-cf2"); /* Basic arguments for application */ min_allowed_intersection = get_option_int("min_allowed_intersection", min_allowed_intersection); distance_metric = get_option_int("distance", PEARSON); if (distance_metric != PEARSON && distance_metric != MANHATTEN && distance_metric != COSINE && distance_metric != CHEBYCHEV && distance_metric != LOG_LIKELIHOOD && distance_metric != TANIMOTO && distance_metric != SLOPE_ONE) logstream(LOG_FATAL)<<"--distance_metrix=XX should be one of: 3=PEARSON, 4=COSINE, 5=CHEBYCHEV, 6=MANHATTEN, 7=TANIMOTO, 8=LOG_LIKELIHOOD, 9 = SLOPE_ONE" << std::endl; debug = get_option_int("debug", 0); parse_command_line_args(); //if (distance_metric != JACKARD && distance_metric != AA && distance_metric != RA) // logstream(LOG_FATAL)<<"Wrong distance metric. --distance_metric=XX, where XX should be either 0) JACKARD, 1) AA, 2) RA" << std::endl; mytimer.start(); int nshards = convert_matrixmarket<EdgeDataType>(training, NULL, 0, 0, 3, TRAINING, false); assert(M > 0 && N > 0); //initialize data structure which saves a subset of the items (pivots) in memory adjcontainer = new adjlist_container(); //array for marking which items are conected to the pivot items via users. relevant_items = new bool[N]; mean = vec::Zero(M); stddev = vec::Zero(N); /* Run */ ItemDistanceProgram program; graphchi_engine<VertexDataType, EdgeDataType> engine(training/*+orderByDegreePreprocessor->getSuffix()*/ ,nshards, true, m); set_engine_flags(engine); //open output files as the number of operating threads out_files.resize(number_of_omp_threads()); for (uint i=0; i< out_files.size(); i++){ char buf[256]; sprintf(buf, "%s.out%d", training.c_str(), i); out_files[i] = open_file(buf, "w"); } //run the program engine.run(program, niters); /* Report execution metrics */ if (!quiet) metrics_report(m); std::cout<<"Total item pairs compared: " << item_pairs_compared << " total written to file: " << written_pairs << std::endl; for (uint i=0; i< out_files.size(); i++) fclose(out_files[i]); std::cout<<"Created output files with the format: " << training << ".outXX, where XX is the output thread number" << std::endl; delete[] relevant_items; return 0; }
09jijiangwen-download
toolkits/collaborative_filtering/itemcf2.cpp
C++
asf20
17,498
/** * @file * @author Danny Bickson * @version 1.0 * * @section LICENSE * * Copyright [2012] [Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * Matrix factorization with the Bias Stochastic Gradient Descent (BIASSGD) algorithm. * Algorithm is described in the paper: * Y. Koren. Factorization Meets the Neighborhood: a Multifaceted Collaborative Filtering Model. ACM SIGKDD 2008. Equation (5). * Thanks to Zeno Gantner, MyMediaLight for teaching me how to compute the derivative in case of logistic and absolute loss. * http://mymedialite.net/ */ #include "common.hpp" #include "eigen_wrapper.hpp" double biassgd_lambda = 1e-3; //sgd step size double biassgd_gamma = 1e-3; //sgd regularization double biassgd_step_dec = 0.9; //sgd step decrement #define BIAS_POS -1 struct vertex_data { vec pvec; //storing the feature vector double bias; vertex_data() { pvec = zeros(D); bias = 0; } void set_val(int index, float val){ if (index == BIAS_POS) bias = val; else pvec[index] = val; } float get_val(int index){ if (index== BIAS_POS) return bias; else return pvec[index]; } }; /** * Type definitions. Remember to create suitable graph shards using the * Sharder-program. */ typedef vertex_data VertexDataType; typedef float EdgeDataType; // Edges store the "rating" of user->movie pair graphchi_engine<VertexDataType, EdgeDataType> * pengine = NULL; graphchi_engine<VertexDataType, EdgeDataType> * pvalidation_engine = NULL; std::vector<vertex_data> latent_factors_inmem; #include "rmse.hpp" #include "rmse_engine.hpp" #include "io.hpp" /** compute a missing value based on bias-SGD algorithm */ float bias_sgd_predict(const vertex_data& user, const vertex_data& movie, const float rating, double & prediction, void * extra = NULL){ prediction = globalMean/maxval + user.bias + movie.bias + dot_prod(user.pvec, movie.pvec); double exp_prediction = 1.0 / (1.0 + exp(-prediction)); //truncate prediction to allowed values prediction = minval + exp_prediction *(maxval-minval); //return the squared error float err = rating - prediction; if (std::isnan(err)) logstream(LOG_FATAL)<<"Got into numerical errors. Try to decrease step size using bias-SGD command line arugments)" << std::endl; if (extra != NULL) *(double*)extra = exp_prediction; return calc_loss(exp_prediction, err); } /** * GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type> * class. The main logic is usually in the update function. */ struct BIASSGDVerticesInMemProgram : public GraphChiProgram<VertexDataType, EdgeDataType> { /** * Called before an iteration is started. */ void before_iteration(int iteration, graphchi_context &gcontext) { reset_rmse(gcontext.execthreads); } /** * Called after an iteration has finished. */ void after_iteration(int iteration, graphchi_context &gcontext) { biassgd_gamma *= biassgd_step_dec; training_rmse(iteration, gcontext); run_validation(pvalidation_engine, gcontext); } /** * Vertex update function. */ void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) { //user node if ( vertex.num_outedges() > 0){ vertex_data & user = latent_factors_inmem[vertex.id()]; for(int e=0; e < vertex.num_edges(); e++) { float observation = vertex.edge(e)->get_data(); vertex_data & movie = latent_factors_inmem[vertex.edge(e)->vertex_id()]; double prediction; double exp_prediction; rmse_vec[omp_get_thread_num()] += bias_sgd_predict(user, movie, observation, prediction, &exp_prediction); double err = observation - prediction; err = calc_error_f(exp_prediction, err); if (std::isnan(err) || std::isinf(err)) logstream(LOG_FATAL)<<"BIASSGD got into numerical error. Please tune step size using --biassgd_gamma and biassgd_lambda" << std::endl; user.bias += biassgd_gamma*(err - biassgd_lambda* user.bias); movie.bias += biassgd_gamma*(err - biassgd_lambda* movie.bias); //NOTE: the following code is not thread safe, since potentially several //user nodes may update this item gradient vector concurrently. However in practice it //did not matter in terms of accuracy on a multicore machine. //if you like to defend the code, you can define a global variable //mutex mymutex; // //and then do: mymutex.lock() movie.pvec += biassgd_gamma*(err*user.pvec - biassgd_lambda*movie.pvec); //here add: mymutex.unlock(); user.pvec += biassgd_gamma*(err*movie.pvec - biassgd_lambda*user.pvec); } } } }; void output_biassgd_result(std::string filename){ MMOutputter_mat<vertex_data> user_mat(filename + "_U.mm", 0, M, "This file contains bias-SGD output matrix U. In each row D factors of a single user node.", latent_factors_inmem); MMOutputter_mat<vertex_data> item_mat(filename + "_V.mm", M, M+N , "This file contains bias-SGD output matrix V. In each row D factors of a single item node.", latent_factors_inmem); MMOutputter_vec<vertex_data> user_bias_vec(filename + "_U_bias.mm", 0, M, BIAS_POS, "This file contains bias-SGD output bias vector. In each row a single user bias.",latent_factors_inmem); MMOutputter_vec<vertex_data> item_bias_vec(filename + "_V_bias.mm",M ,M+N, BIAS_POS, "This file contains bias-SGD output bias vector. In each row a single item bias.", latent_factors_inmem); MMOutputter_scalar gmean(filename + "_global_mean.mm", "This file contains SVD++ global mean which is required for computing predictions.", globalMean); logstream(LOG_INFO) << "SVDPP output files (in matrix market format): " << filename << "_U.mm" << ", " << filename + "_V.mm, " << filename << "_U_bias.mm, " << filename << "_V_bias.mm, " << filename << "_global_mean.mm" << std::endl; } int main(int argc, const char ** argv) { print_copyright(); //* GraphChi initialization will read the command line arguments and the configuration file. */ graphchi_init(argc, argv); /* Metrics object for keeping track of performance counters and other information. Currently required. */ metrics m("biassgd2"); biassgd_lambda = get_option_float("biassgd_lambda", 1e-3); biassgd_gamma = get_option_float("biassgd_gamma", 1e-3); biassgd_step_dec = get_option_float("biassgd_step_dec", 0.9); parse_command_line_args(); parse_implicit_command_line(); if (maxval == 1e100 || minval == -1e100) logstream(LOG_FATAL)<<"You must set min allowed rating and max allowed rating using the --minval and --maval flags" << std::endl; /* Preprocess data if needed, or discover preprocess files */ int nshards = convert_matrixmarket<EdgeDataType>(training, NULL,0, 0, 3, TRAINING, false); init_feature_vectors<std::vector<vertex_data> >(M+N, latent_factors_inmem, !load_factors_from_file); if (validation != ""){ int vshards = convert_matrixmarket<EdgeDataType>(validation, NULL, 0, 0, 3, VALIDATION, false); init_validation_rmse_engine<VertexDataType, EdgeDataType>(pvalidation_engine, vshards, &bias_sgd_predict); } /* load initial state from disk (optional) */ if (load_factors_from_file){ load_matrix_market_matrix(training + "_U.mm", 0, D); load_matrix_market_matrix(training + "_V.mm", M, D); vec user_bias = load_matrix_market_vector(training +"_U_bias.mm", false, true); vec item_bias = load_matrix_market_vector(training +"_V_bias.mm", false, true); for (uint i=0; i<M+N; i++){ latent_factors_inmem[i].bias = ((i<M)?user_bias[i] : item_bias[i-M]); } vec gm = load_matrix_market_vector(training + "_global_mean.mm", false, true); globalMean = gm[0]; } /* Run */ BIASSGDVerticesInMemProgram program; graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m); set_engine_flags(engine); pengine = &engine; engine.run(program, niters); /* Output latent factor matrices in matrix-market format */ output_biassgd_result(training); test_predictions(&bias_sgd_predict); /* Report execution metrics */ if (!quiet) metrics_report(m); return 0; }
09jijiangwen-download
toolkits/collaborative_filtering/biassgd2.cpp
C++
asf20
8,903
/** * @file * @author Danny Bickson * @version 1.0 * * @section LICENSE * * Copyright [2012] [Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * Matrix factorization with the Stochastic Gradient Descent (SGD) algorithm. * Algorithm is described in the papers: * 1) Matrix Factorization Techniques for Recommender Systems Yehuda Koren, Robert Bell, Chris Volinsky. In IEEE Computer, Vol. 42, No. 8. (07 August 2009), pp. 30-37. * 2) Takács, G, Pilászy, I., Németh, B. and Tikk, D. (2009). Scalable Collaborative Filtering Approaches for Large Recommender Systems. Journal of Machine Learning Research, 10, 623-656. * * */ #include "eigen_wrapper.hpp" #include "common.hpp" double sgd_lambda = 1e-3; //sgd regularization double sgd_gamma = 1e-3; //sgd step size double sgd_step_dec = 0.9; //sgd step decrement struct vertex_data { vec pvec; //storing the feature vector vertex_data() { pvec = zeros(D); } void set_val(int index, float val){ pvec[index] = val; } float get_val(int index){ return pvec[index]; } }; #include "util.hpp" /** * Type definitions. Remember to create suitable graph shards using the * Sharder-program. */ typedef vertex_data VertexDataType; typedef float EdgeDataType; // Edges store the "rating" of user->movie pair graphchi_engine<VertexDataType, EdgeDataType> * pengine = NULL; graphchi_engine<VertexDataType, EdgeDataType> * pvalidation_engine = NULL; std::vector<vertex_data> latent_factors_inmem; #include "rmse.hpp" #include "rmse_engine.hpp" #include "io.hpp" /** compute a missing value based on SGD algorithm */ float sgd_predict(const vertex_data& user, const vertex_data& movie, const float rating, double & prediction, void * extra = NULL){ prediction = dot_prod(user.pvec,movie.pvec); //truncate prediction to allowed values prediction = std::min((double)prediction, maxval); prediction = std::max((double)prediction, minval); //return the squared error float err = rating - prediction; assert(!std::isnan(err)); return err*err; } /** * GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type> * class. The main logic is usually in the update function. */ struct SGDVerticesInMemProgram : public GraphChiProgram<VertexDataType, EdgeDataType> { /** * Called before an iteration is started. */ void before_iteration(int iteration, graphchi_context &gcontext) { reset_rmse(gcontext.execthreads); } /** * Called after an iteration has finished. */ void after_iteration(int iteration, graphchi_context &gcontext) { sgd_gamma *= sgd_step_dec; training_rmse(iteration, gcontext); run_validation(pvalidation_engine, gcontext); } /** * Vertex update function. */ void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) { //go over all user nodes if ( vertex.num_outedges() > 0){ vertex_data & user = latent_factors_inmem[vertex.id()]; //go over all ratings for(int e=0; e < vertex.num_edges(); e++) { float observation = vertex.edge(e)->get_data(); vertex_data & movie = latent_factors_inmem[vertex.edge(e)->vertex_id()]; double estScore; rmse_vec[omp_get_thread_num()] += sgd_predict(user, movie, observation, estScore); double err = observation - estScore; if (std::isnan(err) || std::isinf(err)) logstream(LOG_FATAL)<<"SGD got into numerical error. Please tune step size using --sgd_gamma and sgd_lambda" << std::endl; //NOTE: the following code is not thread safe, since potentially several //user nodes may updates this item gradient vector concurrently. However in practice it //did not matter in terms of accuracy on a multicore machine. //if you like to defend the code, you can define a global variable //mutex mymutex; // //and then do: mymutex.lock() movie.pvec += sgd_gamma*(err*user.pvec - sgd_lambda*movie.pvec); //and here add: mymutex.unlock(); user.pvec += sgd_gamma*(err*movie.pvec - sgd_lambda*user.pvec); } } } }; //dump output to file void output_sgd_result(std::string filename) { MMOutputter_mat<vertex_data> user_mat(filename + "_U.mm", 0, M, "This file contains SGD output matrix U. In each row D factors of a single user node.", latent_factors_inmem); MMOutputter_mat<vertex_data> item_mat(filename + "_V.mm", M ,M+N, "This file contains SGD output matrix V. In each row D factors of a single item node.", latent_factors_inmem); logstream(LOG_INFO) << "SGD output files (in matrix market format): " << filename << "_U.mm" << ", " << filename + "_V.mm " << std::endl; } int main(int argc, const char ** argv) { print_copyright(); //* GraphChi initialization will read the command line arguments and the configuration file. */ graphchi_init(argc, argv); /* Metrics object for keeping track of performance counters and other information. Currently required. */ metrics m("sgd-inmemory-factors"); /* Basic arguments for application. NOTE: File will be automatically 'sharded'. */ sgd_lambda = get_option_float("sgd_lambda", 1e-3); sgd_gamma = get_option_float("sgd_gamma", 1e-3); sgd_step_dec = get_option_float("sgd_step_dec", 0.9); parse_command_line_args(); parse_implicit_command_line(); /* Preprocess data if needed, or discover preprocess files */ int nshards = convert_matrixmarket<EdgeDataType>(training, NULL, 0, 0, 3, TRAINING, false); init_feature_vectors<std::vector<vertex_data> >(M+N, latent_factors_inmem, !load_factors_from_file); if (validation != ""){ int vshards = convert_matrixmarket<EdgeDataType>(validation, NULL, 0, 0, 3, VALIDATION, false); init_validation_rmse_engine<VertexDataType, EdgeDataType>(pvalidation_engine, vshards, &sgd_predict); } /* load initial state from disk (optional) */ if (load_factors_from_file){ load_matrix_market_matrix(training + "_U.mm", 0, D); load_matrix_market_matrix(training + "_V.mm", M, D); } print_config(); /* Run */ SGDVerticesInMemProgram program; graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m); set_engine_flags(engine); pengine = &engine; engine.run(program, niters); /* Output latent factor matrices in matrix-market format */ output_sgd_result(training); test_predictions(&sgd_predict); /* Report execution metrics */ if (!quiet) metrics_report(m); return 0; }
09jijiangwen-download
toolkits/collaborative_filtering/sgd.cpp
C++
asf20
7,208
/** * @file * @author Danny Bickson, based on code by Aapo Kyrola * @version 1.0 * * @section LICENSE * * Copyright [2012] [Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * Matrix factorizatino with the Alternative Least Squares (ALS) algorithm. * This code is based on GraphLab's implementation of ALS by Joey Gonzalez * and Danny Bickson (CMU). A good explanation of the algorithm is * given in the following paper: * Large-Scale Parallel Collaborative Filtering for the Netflix Prize * Yunhong Zhou, Dennis Wilkinson, Robert Schreiber and Rong Pan * http://www.springerlink.com/content/j1076u0h14586183/ * * Faster version of ALS, which stores latent factors of vertices in-memory. * Thus, this version requires more memory. See the version "als_edgefactors" * for a low-memory implementation. * * * In the code, we use movie-rating terminology for clarity. This code has been * tested with the Netflix movie rating challenge, where the task is to predict * how user rates movies in range from 1 to 5. * * This code is has integrated preprocessing, 'sharding', so it is not necessary * to run sharder prior to running the matrix factorization algorithm. Input * data must be provided in the Matrix Market format (http://math.nist.gov/MatrixMarket/formats.html). * * ALS uses free linear algebra library 'Eigen'. See Readme_Eigen.txt for instructions * how to obtain it. * * At the end of the processing, the two latent factor matrices are written into files in * the matrix market format. * */ #include "common.hpp" #include "eigen_wrapper.hpp" double lambda = 0.065; struct vertex_data { vec pvec; vertex_data() { pvec = zeros(D); } void set_val(int index, float val){ pvec[index] = val; } float get_val(int index){ return pvec[index]; } }; /** * Type definitions. Remember to create suitable graph shards using the * Sharder-program. */ typedef vertex_data VertexDataType; typedef float EdgeDataType; // Edges store the "rating" of user->movie pair graphchi_engine<VertexDataType, EdgeDataType> * pengine = NULL; graphchi_engine<VertexDataType, EdgeDataType> * pvalidation_engine = NULL; std::vector<vertex_data> latent_factors_inmem; #include "io.hpp" #include "rmse.hpp" #include "rmse_engine.hpp" /** compute a missing value based on ALS algorithm */ float als_predict(const vertex_data& user, const vertex_data& movie, const float rating, double & prediction, void * extra = NULL){ prediction = dot_prod(user.pvec, movie.pvec); //truncate prediction to allowed values prediction = std::min((double)prediction, maxval); prediction = std::max((double)prediction, minval); //return the squared error float err = rating - prediction; assert(!std::isnan(err)); return err*err; } /** * GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type> * class. The main logic is usually in the update function. */ struct ALSVerticesInMemProgram : public GraphChiProgram<VertexDataType, EdgeDataType> { /** * Vertex update function - computes the least square step */ void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) { vertex_data & vdata = latent_factors_inmem[vertex.id()]; mat XtX = mat::Zero(D, D); vec Xty = vec::Zero(D); bool compute_rmse = (vertex.num_outedges() > 0); // Compute XtX and Xty (NOTE: unweighted) for(int e=0; e < vertex.num_edges(); e++) { float observation = vertex.edge(e)->get_data(); vertex_data & nbr_latent = latent_factors_inmem[vertex.edge(e)->vertex_id()]; Xty += nbr_latent.pvec * observation; XtX.triangularView<Eigen::Upper>() += nbr_latent.pvec * nbr_latent.pvec.transpose(); if (compute_rmse) { double prediction; rmse_vec[omp_get_thread_num()] += als_predict(vdata, nbr_latent, observation, prediction); } } double regularization = lambda; if (regnormal) regularization *= vertex.num_edges(); for(int i=0; i < D; i++) XtX(i,i) += regularization; // Solve the least squares problem with eigen using Cholesky decomposition vdata.pvec = XtX.selfadjointView<Eigen::Upper>().ldlt().solve(Xty); } /** * Called before an iteration is started. */ void before_iteration(int iteration, graphchi_context &gcontext) { reset_rmse(gcontext.execthreads); } /** * Called after an iteration has finished. */ void after_iteration(int iteration, graphchi_context &gcontext) { training_rmse(iteration, gcontext); run_validation(pvalidation_engine, gcontext); } }; void output_als_result(std::string filename) { MMOutputter_mat<vertex_data> user_mat(filename + "_U.mm", 0, M , "This file contains ALS output matrix U. In each row D factors of a single user node.", latent_factors_inmem); MMOutputter_mat<vertex_data> item_mat(filename + "_V.mm", M ,M+N, "This file contains ALS output matrix V. In each row D factors of a single item node.", latent_factors_inmem); logstream(LOG_INFO) << "ALS output files (in matrix market format): " << filename << "_U.mm" << ", " << filename + "_V.mm " << std::endl; } int main(int argc, const char ** argv) { print_copyright(); /* GraphChi initialization will read the command line arguments and the configuration file. */ graphchi_init(argc, argv); /* Metrics object for keeping track of performance counters and other information. Currently required. */ metrics m("als-inmemory-factors"); lambda = get_option_float("lambda", 0.065); parse_command_line_args(); parse_implicit_command_line(); /* Preprocess data if needed, or discover preprocess files */ int nshards = convert_matrixmarket<EdgeDataType>(training, NULL, 0, 0, 3, TRAINING, false); init_feature_vectors<std::vector<vertex_data> >(M+N, latent_factors_inmem, !load_factors_from_file); if (validation != ""){ int vshards = convert_matrixmarket<EdgeDataType>(validation, NULL, 0, 0, 3, VALIDATION, false); init_validation_rmse_engine<VertexDataType, EdgeDataType>(pvalidation_engine, vshards, &als_predict); } /* load initial state from disk (optional) */ if (load_factors_from_file){ load_matrix_market_matrix(training + "_U.mm", 0, D); load_matrix_market_matrix(training + "_V.mm", M, D); } /* Run */ ALSVerticesInMemProgram program; graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m); set_engine_flags(engine); pengine = &engine; engine.run(program, niters); /* Output latent factor matrices in matrix-market format */ output_als_result(training); test_predictions(&als_predict); /* Report execution metrics */ if (!quiet) metrics_report(m); return 0; }
09jijiangwen-download
toolkits/collaborative_filtering/als.cpp
C++
asf20
7,456
/** * @file * @author Danny Bickson * @version 1.0 * * @section LICENSE * * Copyright [2012] [Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * time-SVD++ algorithm implementation. As described in the paper: * Yehuda Koren. 2009. Collaborative filtering with temporal dynamics. In Proceedings of the 15th ACM SIGKDD international conference on Knowledge discovery and data mining (KDD '09). ACM, New York, NY, USA, 447-456. DOI=10.1145/1557019.1557072 * */ #include "common.hpp" #include "eigen_wrapper.hpp" struct timesvdpp_params{ double lrate; double beta; double gamma; double lrate_mult_dec; timesvdpp_params(){ lrate =0.0001; beta = 0.00001; gamma = 0.0001; lrate_mult_dec = 0.9; } }; timesvdpp_params tsp; bool is_user(vid_t id){ return id < M; } bool is_item(vid_t id){ return id >= M && id < N; } bool is_time(vid_t id){ return id >= M+N; } #define BIAS_POS -1 struct vertex_data { vec pvec; double bias; vertex_data() { bias = 0; } void set_val(int index, float val){ if (index == BIAS_POS) bias = val; else pvec[index] = val; } float get_val(int index){ if (index == BIAS_POS) return bias; else return pvec[index]; } }; struct edge_data { double weight; double time; edge_data() { weight = time = 0; } edge_data(double weight, double time) : weight(weight), time(time) { } }; struct time_svdpp_usr{ double * bu; double * p; double * pu; double * x; double * ptemp; time_svdpp_usr(vertex_data & vdata){ bu = &vdata.bias; assert(vdata.pvec.size() == D*4); //TO REMOVE p = &vdata.pvec[0]; pu = p+D; x = pu+D; ptemp = x+D; } time_svdpp_usr & operator = (vertex_data & vdata){ bu = &vdata.bias; assert(vdata.pvec.size() == D*4); //TO REMOVE p = &vdata.pvec[0]; pu = p+D; x = pu+D; ptemp = x+D; return *this; } }; struct time_svdpp_movie{ double * bi; double * q; double * y; time_svdpp_movie(vertex_data& vdata){ assert(vdata.pvec.size() == D*2); bi = &vdata.bias; q = &vdata.pvec[0]; y = q+D; } time_svdpp_movie & operator=(const vertex_data& vdata){ assert(vdata.pvec.size() == D*2); bi = (double*)&vdata.bias; q = (double*)&vdata.pvec[0]; y = (double*)(q+D); return *this; } }; struct time_svdpp_time{ double * bt; double * z; double * pt; time_svdpp_time(vertex_data& vdata){ bt = &vdata.bias; z = &vdata.pvec[0]; pt = z+D; assert(vdata.pvec.size() == D*2); } time_svdpp_time & operator=(vertex_data & vdata){ bt = &vdata.bias; z = &vdata.pvec[0]; pt = z+D; assert(vdata.pvec.size() == D*2); return *this; } }; float time_svdpp_predict(const time_svdpp_usr & usr, const time_svdpp_movie & mov, const time_svdpp_time & ptime, const float rating, double & prediction){ //prediction = global_mean + user_bias + movie_bias double pui = globalMean + *usr.bu + *mov.bi; for(int k=0;k<D;k++){ // + user x movie factors pui += (usr.ptemp[k] * mov.q[k]); // + user x time factors pui += usr.x[k] * ptime.z[k]; // + user x time x movies factors pui += usr.pu[k] * ptime.pt[k] * mov.q[k]; } pui = std::min(pui,maxval); pui = std::max(pui,minval); prediction = pui; if (std::isnan(prediction)) logstream(LOG_FATAL)<<"Got into numerical errors! Try to decrease --lrate, --gamma, --beta" <<std::endl; float err = rating - prediction; return err*err; } float time_svdpp_predict(const vertex_data& user, const vertex_data& movie, const float rating, double & prediction, void * extra){ return time_svdpp_predict(time_svdpp_usr((vertex_data&)user), time_svdpp_movie((vertex_data&)movie), time_svdpp_time(*(vertex_data*)extra), rating, prediction); } /** * Type definitions. Remember to create suitable graph shards using the * Sharder-program. */ typedef vertex_data VertexDataType; typedef edge_data EdgeDataType; // Edges store the "rating" of user->movie pair graphchi_engine<VertexDataType, EdgeDataType> * pengine = NULL; graphchi_engine<VertexDataType, EdgeDataType> * pvalidation_engine = NULL; std::vector<vertex_data> latent_factors_inmem; void init_time_svdpp_node_data(){ int k = D; #pragma omp parallel for for (int u = 0; u < (int)M; u++) { vertex_data & data = latent_factors_inmem[u]; data.pvec = zeros(4*k); time_svdpp_usr usr(data); *usr.bu = 0; for (int m=0; m< k; m++){ usr.p[m] = 0.01*drand48() / (double) (k); usr.pu[m] = 0.001 * drand48() / (double) (k); usr.x[m] = 0.001 * drand48() / (double) (k); usr.ptemp[m] = usr.p[m]; } } #pragma omp parallel for for (int i = M; i < (int)(N+M); i++) { vertex_data & data = latent_factors_inmem[i]; data.pvec = zeros(2*k); time_svdpp_movie movie(data); *movie.bi = 0; for (int m = 0; m < k; m++){ movie.q[m] = 0.01 * drand48() / (double) (k); movie.y[m] = 0.001 * drand48() / (double) (k); } } } void init_time_svdpp(){ fprintf(stderr, "time-SVD++ %d factors\n", D); int k = D; latent_factors_inmem.resize(M+N+K); init_time_svdpp_node_data(); #pragma omp parallel for for (int i = M+N; i < (int)(M+N+K); i++) { vertex_data & data = latent_factors_inmem[i]; data.pvec = zeros(2*k); time_svdpp_time timenode(data); *timenode.bt = 0; for (int m = 0; m < k; m++){ timenode.z[m] = 0.001 * drand48() / (double) (k); timenode.pt[m] = 0.001 * drand48() / (double) (k); } } } #include "io.hpp" #include "rmse.hpp" #include "rmse_engine4.hpp" /** * GraphChi programs need to subclass GraphChiProgram<vertex-type, edge-type> * class. The main logic is usually in the update function. */ struct TIMESVDPPVerticesInMemProgram : public GraphChiProgram<VertexDataType, EdgeDataType> { /* * Vertex update function - computes the least square step */ void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) { //go over all user nodes if (is_user(vertex.id())){ vertex_data & user = latent_factors_inmem[vertex.id()]; time_svdpp_usr usr(user); unsigned int userRatings = vertex.num_outedges(); double rRuNum = 1/sqrt(userRatings+10); int dim = D; double sumY = 0.0; //go over all ratings for(int e=0; e < vertex.num_outedges(); e++) { uint pos = vertex.edge(e)->vertex_id(); assert(pos >= M && pos < M+N); vertex_data & data = latent_factors_inmem[pos]; time_svdpp_movie movie(data); Map<vec> y(movie.y, D); sumY += sum((const vec&)y); //y } for( int k=0; k<dim; ++k) { usr.ptemp[k] = usr.pu[k] + rRuNum * sumY; // pTemp = pu + rRuNum*sumY } vec sum = zeros(dim); for(int e=0; e < vertex.num_edges(); e++) { //edge_data & edge = scope.edge_data(oedgeid); //float rui = edge.weight; float rui = vertex.edge(e)->get_data().weight; uint t = (uint)(vertex.edge(e)->get_data().time - 1); // we assume time bins start from 1 assert(t < M+N+K); vertex_data & data = latent_factors_inmem[vertex.edge(e)->vertex_id()]; time_svdpp_movie mov(data); time_svdpp_time time(latent_factors_inmem[t]); double pui = 0; time_svdpp_predict(usr, mov, time, rui, pui); double eui = rui - pui; *usr.bu += tsp.lrate*(eui - tsp.beta* *usr.bu); *mov.bi += tsp.lrate * (eui - tsp.beta* *mov.bi); for (int k = 0; k < dim; k++) { double oldValue = mov.q[k]; double userValue = usr.ptemp[k] + usr.pu[k] * time.pt[k]; sum[k] += eui * mov.q[k]; mov.q[k] += tsp.lrate * (eui * userValue - tsp.gamma*mov.q[k]); usr.ptemp[k] += tsp.lrate * ( eui * oldValue - tsp.gamma * usr.ptemp[k]); usr.p[k] += tsp.lrate * ( eui * oldValue - tsp.gamma*usr.p[k] ); usr.pu[k] += tsp.lrate * (eui * oldValue * time.pt[k] - tsp.gamma * usr.pu[k]); time.pt[k] += tsp.lrate * (eui * oldValue * usr.pu[k] - tsp.gamma * time.pt[k]); double xOldValue = usr.x[k]; double zOldValue = time.z[k]; usr.x[k] += tsp.lrate * (eui * zOldValue - tsp.gamma * xOldValue); time.z[k] += tsp.lrate * (eui * xOldValue - tsp.gamma * zOldValue); } rmse_vec[omp_get_thread_num()] += eui*eui; } for(int e=0; e < vertex.num_edges(); e++) { time_svdpp_movie mov = latent_factors_inmem[vertex.edge(e)->vertex_id()]; for(int k=0;k<dim;k++){ mov.y[k] += tsp.lrate * (rRuNum * sum[k]- tsp.gamma*mov.y[k]); } } } }; /** * Called before an iteration is started. */ void before_iteration(int iteration, graphchi_context &gcontext) { reset_rmse(gcontext.execthreads); } /** * Called after an iteration has finished. */ void after_iteration(int iteration, graphchi_context &gcontext) { tsp.lrate *= tsp.lrate_mult_dec; training_rmse(iteration, gcontext); run_validation4(pvalidation_engine, gcontext); }; }; void output_timesvdpp_result(std::string filename) { MMOutputter_mat<vertex_data> user_mat(filename + "_U.mm", 0, M, "This file contains TIMESVDPP output matrix U. In each row 4xD factors of a single user node. The vectors are [p pu x ptemp]", latent_factors_inmem); MMOutputter_mat<vertex_data> item_mat(filename + "_V.mm", M ,M+N, "This file contains -TIMESVDPP output matrix V. In each row 2xD factors of a single item node. The vectors are [q y]", latent_factors_inmem); MMOutputter_mat<vertex_data> time_mat(filename + "_T.mm", M+N ,M+N+K, "This file contains -TIMESVDPP output matrix T. In each row 2xD factors of a single time node. The vectors are [z pt]", latent_factors_inmem); MMOutputter_vec<vertex_data> mmoutput_bias_left(filename + "_U_bias.mm", 0, M, BIAS_POS, "This file contains time-svd++ output bias vector. In each row a single user bias.", latent_factors_inmem); MMOutputter_vec<vertex_data> mmoutput_bias_right(filename + "_V_bias.mm",M ,M+N , BIAS_POS, "This file contains time-svd++ output bias vector. In each row a single item bias.", latent_factors_inmem); MMOutputter_scalar gmean(filename + "_global_mean.mm", "This file contains time-svd++ global mean which is required for computing predictions.", globalMean); logstream(LOG_INFO) << " time-svd++ output files (in matrix market format): " << filename << "_U.mm" << ", " << filename + "_V.mm " << filename + "_T.mm, " << filename << " _global_mean.mm, " << filename << "_U_bias.mm " << filename << "_V_bias.mm " << std::endl; } int main(int argc, const char ** argv) { print_copyright(); /* GraphChi initialization will read the command line arguments and the configuration file. */ graphchi_init(argc, argv); /* Metrics object for keeping track of performance counters and other information. Currently required. */ metrics m("time-svdpp-inmemory-factors"); //specific command line parameters for time-svd++ tsp.lrate = get_option_float("lrate", tsp.lrate); tsp.beta = get_option_float("beta", tsp.beta); tsp.gamma = get_option_float("gamma", tsp.gamma); tsp.lrate_mult_dec = get_option_float("lrate_mult_dec", tsp.lrate_mult_dec); parse_command_line_args(); parse_implicit_command_line(); /* Preprocess data if needed, or discover preprocess files */ int nshards = convert_matrixmarket4<edge_data>(training, false); init_time_svdpp(); if (validation != ""){ int vshards = convert_matrixmarket4<EdgeDataType>(validation, false, M==N, VALIDATION); init_validation_rmse_engine<VertexDataType, EdgeDataType>(pvalidation_engine, vshards, &time_svdpp_predict, false, true, 1); } if (load_factors_from_file){ load_matrix_market_matrix(training + "_U.mm", 0, 4*D); load_matrix_market_matrix(training + "_V.mm", M, 2*D); load_matrix_market_matrix(training + "_T.mm", M+N, 2*D); vec user_bias = load_matrix_market_vector(training +"_U_bias.mm", false, true); vec item_bias = load_matrix_market_vector(training +"_V_bias.mm", false, true); vec time_bias = load_matrix_market_vector(training+ "_T_bias.mm", false, true); for (uint i=0; i<M+N+K; i++){ if (i < M) latent_factors_inmem[i].bias = user_bias[i]; else if (i <M+N) latent_factors_inmem[i].bias = item_bias[i-M]; else latent_factors_inmem[i].bias = time_bias[i-M-N]; } vec gm = load_matrix_market_vector(training + "_global_mean.mm", false, true); globalMean = gm[0]; } /* Run */ TIMESVDPPVerticesInMemProgram program; graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m); set_engine_flags(engine); pengine = &engine; engine.run(program, niters); /* Output test predictions in matrix-market format */ output_timesvdpp_result(training); test_predictions3(&time_svdpp_predict, 1); /* Report execution metrics */ if (!quiet) metrics_report(m); return 0; }
09jijiangwen-download
toolkits/collaborative_filtering/timesvdpp.cpp
C++
asf20
13,668
/** * Copyright (c) 2009 Carnegie Mellon University. * All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an "AS * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language * governing permissions and limitations under the License. * * For more about this software visit: * * http://www.graphlab.ml.cmu.edu * * Code written by Danny Bickson, CMU * Any changes to the code must include this original license notice in full. * This file implements the shooting algorithm for solving Lasso problem */ #ifndef _COSAMP_HPP #define _COSAMP_HPP #include "eigen_wrapper.hpp" ivec sort_union(ivec a, ivec b){ ivec ab = concat(a,b); sort(ab); for (int i=1; i< ab.size(); i++){ if (ab[i] == ab[i-1]) del(ab,i); } return ab; } vec CoSaMP(const mat & Phi, const vec & u, int K, int max_iter, double tol1, int D){ assert(K<= 2*D); assert(K>=1); assert(Phi.rows() == Phi.cols()); assert(Phi.rows() == D); assert(u.size() == D); vec Sest = zeros(D); vec utrue = Sest; vec v = u; int t=1; ivec T2; while (t<max_iter){ ivec z = sort_index(fabs(Phi.transpose() * v)); z = reverse(z); ivec Omega = head(z,2*K); ivec T=sort_union(Omega,T2); mat phit=get_cols(Phi, T); vec b; bool ret = backslash(phit, u, b); assert(ret); ret = false;//avoid warning b= fabs(b); ivec z3 = sort_index(b); z3 = reverse(z3); Sest=zeros(D); for (int i=0; i< K; i++) set_val(Sest, z3[i], b[z3[i]]); ivec z2 = sort_index(fabs(Sest)); z2 = reverse(z2); T2 = head(z2,K-1); v=u-Phi*Sest; double n2 = max(fabs(v)); if (n2 < tol1) break; t++; } return Sest; } void test_cosamp(){ mat A= init_mat("0.9528 0.5982 0.8368 ; 0.7041 0.8407 0.5187; 0.9539 0.4428 0.0222", 3, 3); vec b= init_vec(" 0.3759 0.8986 0.4290",3); int K=1; double epsilon =1e-3; vec ret = CoSaMP(A,b,K,10, epsilon,3); vec right = init_vec("0 1.2032 0", 3); double diff = norm(ret - right); assert(diff <1e-4); diff = 0; //avoid warning } #endif
09jijiangwen-download
toolkits/collaborative_filtering/cosamp.hpp
C++
asf20
2,526
/** * @file * @author Danny Bickson, CMU * @version 1.0 * * @section LICENSE * * Copyright [2012] [Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * This program computes top K recommendations based on the linear model computed * by one of: als,sparse_als,wals, sgd and nmf applications. * */ #include "common.hpp" #include "eigen_wrapper.hpp" #include "timer.hpp" int debug; int num_ratings; double knn_sample_percent = 1.0; const double epsilon = 1e-16; timer mytimer; int tokens_per_row = 3; int algo = 0; #define BIAS_POS -1 enum { SVDPP = 0, BIASSGD = 1 }; struct vertex_data { vec ratings; ivec ids; vec pvec; vec weight; double bias; vertex_data() { bias = 0; assert(num_ratings > 0); ratings = zeros(num_ratings); ids = ivec::Zero(num_ratings); assert(D > 0); pvec = zeros(D); weight = zeros(D); } void set_val(int index, float val){ if (index == BIAS_POS) bias = val; else if (index < D) pvec[index] = val; else weight[index-D] = val; } float get_val(int index){ if (index== BIAS_POS) return bias; else if (index < D) return pvec[index]; else return weight[index-D]; } }; struct edge_data { double weight; edge_data() { weight = 0; } edge_data(double weight) : weight(weight) { } }; struct edge_data4 { double weight; double time; edge_data4() { weight = time = 0; } edge_data4(double weight, double time) : weight(weight), time(time) { } }; /** * Type definitions. Remember to create suitable graph shards using the * Sharder-program. */ typedef vertex_data VertexDataType; typedef edge_data EdgeDataType; // Edges store the "rating" of user->movie pair graphchi_engine<VertexDataType, EdgeDataType> * pengine = NULL; std::vector<vertex_data> latent_factors_inmem; /** compute a missing value based on SVD++ algorithm */ float svdpp_predict(const vertex_data& user, const vertex_data& movie, const float rating, double & prediction, void * extra = NULL){ //\hat(r_ui) = \mu + prediction = globalMean; // + b_u + b_i + prediction += user.bias + movie.bias; // + q_i^T *(p_u +sqrt(|N(u)|)\sum y_j) //prediction += dot_prod(movie.pvec,(user.pvec+user.weight)); for (int j=0; j< D; j++) prediction += movie.pvec[j] * (user.pvec[j] + user.weight[j]); prediction = std::min((double)prediction, maxval); prediction = std::max((double)prediction, minval); float err = rating - prediction; if (std::isnan(err)) logstream(LOG_FATAL)<<"Got into numerical errors. Try to decrease step size using the command line: svdpp_user_bias_step, svdpp_item_bias_step, svdpp_user_factor2_step, svdpp_user_factor_step, svdpp_item_step" << std::endl; return err*err; } /** compute a missing value based on bias-SGD algorithm */ float biassgd_predict(const vertex_data& user, const vertex_data& movie, const float rating, double & prediction, void * extra = NULL){ prediction = globalMean + user.bias + movie.bias + dot_prod(user.pvec, movie.pvec); //truncate prediction to allowed values prediction = std::min((double)prediction, maxval); prediction = std::max((double)prediction, minval); //return the squared error float err = rating - prediction; if (std::isnan(err)) logstream(LOG_FATAL)<<"Got into numerical errors. Try to decrease step size using bias-SGD command line arugments)" << std::endl; return err*err; } void rating_stats(){ double min=1e100, max=0, avg=0; int cnt = 0; int startv = 0; int endv = M; for (int i=startv; i< endv; i++){ vertex_data& data = latent_factors_inmem[i]; if (data.ratings.size() > 0){ min = std::min(min, data.ratings[0]); max = std::max(max, data.ratings[0]); if (std::isnan(data.ratings[0])) printf("bug: nan on %d\n", i); else { avg += data.ratings[0]; cnt++; } } } printf("Distance statistics: min %g max %g avg %g\n", min, max, avg/cnt); } #include "io.hpp" void read_factors(std::string base_filename){ if (algo == SVDPP) load_matrix_market_matrix(training + "_U.mm", 0, 2*D); else if (algo == BIASSGD) load_matrix_market_matrix(training + "_U.mm", 0, D); else assert(false); load_matrix_market_matrix(training + "_V.mm", M, D); vec user_bias = load_matrix_market_vector(training +"_U_bias.mm", false, true); assert(user_bias.size() == M); vec item_bias = load_matrix_market_vector(training +"_V_bias.mm", false, true); assert(item_bias.size() == N); for (uint i=0; i<M+N; i++){ latent_factors_inmem[i].bias = ((i<M)?user_bias[i] : item_bias[i-M]); } vec gm = load_matrix_market_vector(training + "_global_mean.mm", false, true); globalMean = gm[0]; } template<typename VertexDataType, typename EdgeDataType> struct RatingVerticesInMemProgram : public GraphChiProgram<VertexDataType, EdgeDataType> { /** * Vertex update function - computes the least square step */ void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) { //compute only for user nodes if (vertex.id() >= M) return; vertex_data & vdata = latent_factors_inmem[vertex.id()]; int howmany = (int)(N*knn_sample_percent); assert(howmany > 0 ); vec distances = zeros(howmany); ivec indices = ivec::Zero(howmany); for (int i=0; i< howmany; i++){ indices[i]= -1; } std::vector<bool> curratings; curratings.resize(N); for(int e=0; e < vertex.num_edges(); e++) { //no need to calculate this rating since it is given in the training data reference assert(vertex.edge(e)->vertex_id() - M >= 0 && vertex.edge(e)->vertex_id() - M < N); curratings[vertex.edge(e)->vertex_id() - M] = true; } if (knn_sample_percent == 1.0){ for (uint i=M; i< M+N; i++){ if (curratings[i-M]) continue; vertex_data & other = latent_factors_inmem[i]; double dist; if (algo == SVDPP) svdpp_predict(vdata, other, 0, dist); else biassgd_predict(vdata, other, 0, dist); indices[i-M] = i-M; distances[i-M] = dist + 1e-10; } } else for (int i=0; i<howmany; i++){ int random_other = ::randi(M, M+N-1); vertex_data & other = latent_factors_inmem[random_other]; double dist; if (algo == SVDPP) svdpp_predict(vdata, other, 0, dist); else biassgd_predict(vdata, other, 0, dist); indices[i] = random_other-M; distances[i] = dist; } vec out_dist(num_ratings); ivec indices_sorted = reverse_sort_index2(distances, indices, out_dist, num_ratings); assert(indices_sorted.size() <= num_ratings); assert(out_dist.size() <= num_ratings); vdata.ids = indices_sorted; vdata.ratings = out_dist; if (debug) printf("Closest is: %d with distance %g\n", (int)vdata.ids[0], vdata.ratings[0]); if (vertex.id() % 1000 == 0) printf("Computing recommendations for user %d at time: %g\n", vertex.id()+1, mytimer.current_time()); } }; struct MMOutputter_ratings{ MMOutputter_ratings(std::string fname, uint start, uint end, std::string comment) { assert(start < end); MM_typecode matcode; set_matcode(matcode); FILE * outf = fopen(fname.c_str(), "w"); assert(outf != NULL); mm_write_banner(outf, matcode); if (comment != "") fprintf(outf, "%%%s\n", comment.c_str()); mm_write_mtx_array_size(outf, end-start, num_ratings+1); for (uint i=start; i < end; i++){ fprintf(outf, "%u ", i+1); for(int j=0; j < latent_factors_inmem[i].ratings.size(); j++) { fprintf(outf, "%1.12e ", latent_factors_inmem[i].ratings[j]); } fprintf(outf, "\n"); } fclose(outf); } }; struct MMOutputter_ids{ MMOutputter_ids(std::string fname, uint start, uint end, std::string comment) { assert(start < end); MM_typecode matcode; set_matcode(matcode); FILE * outf = fopen(fname.c_str(), "w"); assert(outf != NULL); mm_write_banner(outf, matcode); if (comment != "") fprintf(outf, "%%%s\n", comment.c_str()); mm_write_mtx_array_size(outf, end-start, num_ratings+1); for (uint i=start; i < end; i++){ fprintf(outf, "%u ", i+1); for(int j=0; j < latent_factors_inmem[i].ids.size(); j++) { fprintf(outf, "%u ", (int)latent_factors_inmem[i].ids[j]+1);//go back to item ids starting from 1,2,3, (and not from zero as in c) } fprintf(outf, "\n"); } fclose(outf); } }; void output_knn_result(std::string filename) { MMOutputter_ratings ratings(filename + ".ratings", 0, M,"This file contains user scalar ratings. In each row i, num_ratings top scalar ratings of different items for user i. (First column: user id, next columns, top K ratings)"); MMOutputter_ids mmoutput_ids(filename + ".ids", 0, M ,"This file contains item ids matching the ratings. In each row i, num_ratings top item ids for user i. (First column: user id, next columns, top K ratings). Note: 0 item id means there are no more items to recommend for this user."); std::cout << "Rating output files (in matrix market format): " << filename << ".ratings" << ", " << filename + ".ids " << std::endl; } int main(int argc, const char ** argv) { mytimer.start(); print_copyright(); /* GraphChi initialization will read the command line arguments and the configuration file. */ graphchi_init(argc, argv); /* Metrics object for keeping track of performance counters and other information. Currently required. */ metrics m("nmf-inmemory-factors"); knn_sample_percent = get_option_float("knn_sample_percent", 1.0); if (knn_sample_percent <= 0 || knn_sample_percent > 1) logstream(LOG_FATAL)<<"Sample percente should be in the range (0, 1] " << std::endl; num_ratings = get_option_int("num_ratings", 10); if (num_ratings <= 0) logstream(LOG_FATAL)<<"num_ratings, the number of recomended items for each user, should be >=1 " << std::endl; debug = get_option_int("debug", 0); tokens_per_row = get_option_int("tokens_per_row", tokens_per_row); std::string algorithm = get_option_string("algorithm"); if (algorithm == "svdpp" || algorithm == "svd++") algo = SVDPP; else if (algorithm == "biassgd") algo = BIASSGD; else logstream(LOG_FATAL)<<"--algorithm should be svd++ or biassgd"<<std::endl; parse_command_line_args(); /* Preprocess data if needed, or discover preprocess files */ int nshards = 0; if (tokens_per_row == 3) nshards = convert_matrixmarket<edge_data>(training, NULL, 0, 0, 3, TRAINING, false); else if (tokens_per_row == 4) nshards = convert_matrixmarket4<edge_data4>(training); else logstream(LOG_FATAL)<<"--tokens_per_row should be either 3 or 4" << std::endl; assert(M > 0 && N > 0); latent_factors_inmem.resize(M+N); // Initialize in-memory vertices. read_factors(training); if ((uint)num_ratings > N){ logstream(LOG_WARNING)<<"num_ratings is too big - setting it to: " << N << std::endl; num_ratings = N; } srand(time(NULL)); /* Run */ if (tokens_per_row == 3){ RatingVerticesInMemProgram<VertexDataType, EdgeDataType> program; graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m); set_engine_flags(engine); engine.run(program, 1); } else if (tokens_per_row == 4){ RatingVerticesInMemProgram<VertexDataType, edge_data4> program; graphchi_engine<VertexDataType, edge_data4> engine(training, nshards, false, m); set_engine_flags(engine); engine.run(program, 1); } /* Output latent factor matrices in matrix-market format */ output_knn_result(training); rating_stats(); /* Report execution metrics */ if (!quiet) metrics_report(m); return 0; }
09jijiangwen-download
toolkits/collaborative_filtering/rating2.cpp
C++
asf20
12,507
/** * @file * @author Danny Bickson, based on code by Aapo Kyrola <akyrola@cs.cmu.edu> * @version 1.0 * * @section LICENSE * * Copyright [2012] [Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * This program takes both a rating file (user to item rasting) and a similarity * file (item to item similarities). * The output of this program is K top recommendations for each user based using * the current user ratings and the item similarities. * */ #include <string> #include <vector> #include <algorithm> #include <iomanip> #include <set> #include <iostream> #include "eigen_wrapper.hpp" #include "distance.hpp" #include "util.hpp" #include "timer.hpp" #include "common.hpp" int min_allowed_intersection = 1; size_t written_pairs = 0; size_t item_pairs_compared = 0; FILE * out_file; timer mytimer; bool * relevant_items = NULL; int grabbed_edges = 0; int distance_metric; int debug; int undirected = 1; double Q = 3; //the power of the weights added into the total score bool is_item(vid_t v){ return v >= M; } bool is_user(vid_t v){ return v < M; } /** * Type definitions. Remember to create suitable graph shards using the * Sharder-program. */ typedef unsigned int VertexDataType; struct edge_data{ float up_weight; float down_weight; edge_data(){ up_weight = 0; down_weight = 0; } edge_data(float up_weight, float down_weight) : up_weight(up_weight), down_weight(down_weight) { }; }; typedef edge_data EdgeDataType; // Edges store the "rating" of user->movie pair struct vertex_data{ vec pvec; vertex_data(){ } void set_val(int index, float val){ pvec[index] = val; } float get_val(int index){ return pvec[index]; } }; std::vector<vertex_data> latent_factors_inmem; #include "io.hpp" struct dense_adj { sparse_vec edges; sparse_vec ratings; mutex mymutex; vid_t vid; dense_adj() { vid = -1; } }; bool find_twice(std::vector<vid_t>& edges, vid_t val){ int ret = 0; for (int i=0; i < (int)edges.size(); i++){ if (edges[i] == val) ret++; } assert(ret >= 0 && ret <= 2); return (ret == 2); } // This is used for keeping in-memory class adjlist_container { public: std::vector<dense_adj> adjs; vid_t pivot_st, pivot_en; adjlist_container() { if (debug) std::cout<<"setting pivot st and end to " << 0 << std::endl; pivot_st = 0; //start pivot on user nodes (excluding item nodes) pivot_en = 0; } void clear() { for(std::vector<dense_adj>::iterator it=adjs.begin(); it != adjs.end(); ++it) { if (nnz(it->edges)) { it->edges.resize(0); } it->ratings.resize(0); } adjs.clear(); if (debug) std::cout<<"setting pivot st to " << pivot_en << std::endl; pivot_st = pivot_en; } /** * Extend the interval of pivot vertices to en. */ void extend_pivotrange(vid_t en) { assert(en>pivot_en); assert(en > pivot_st); pivot_en = en; adjs.resize(pivot_en - pivot_st); //for (uint i=0; i< pivot_en - pivot_st; i++) // adjs[i].ratings = zeros(N); } /** * Grab pivot's adjacency list into memory. */ int load_edges_into_memory(graphchi_vertex<uint32_t, EdgeDataType> &v) { assert(is_pivot(v.id())); assert(is_user(v.id())); int num_edges = v.num_edges(); dense_adj dadj; for(int i=0; i<num_edges; i++) set_new( dadj.edges, v.edge(i)->vertex_id(), v.edge(i)->get_data().up_weight); //dadj.ratings = zeros(N); dadj.vid = v.id(); adjs[v.id() - pivot_st] = dadj; assert(v.id() - pivot_st < adjs.size()); __sync_add_and_fetch(&grabbed_edges, num_edges /*edges_to_larger_id*/); return num_edges; } /** * add weighted ratings for each linked item * */ double compute_ratings(graphchi_vertex<uint32_t, EdgeDataType> &item, vid_t user_pivot, int distance_metric) { assert(is_pivot(user_pivot)); //assert(is_item(pivot) && is_item(v.id())); dense_adj &pivot_edges = adjs[user_pivot - pivot_st]; if (!get_val(pivot_edges.edges, item.id())){ if (debug) logstream(LOG_DEBUG)<<"Skipping item pivot pair since not connected!" << item.id() << std::endl; return 0; } int num_edges = item.num_edges(); if (debug) logstream(LOG_DEBUG)<<"Found " << num_edges << " edges from item : " << item.id() << std::endl; //if there are not enough neighboring user nodes to those two items there is no need //to actually count the intersection if (num_edges < min_allowed_intersection || nnz(pivot_edges.edges) < min_allowed_intersection){ if (debug) logstream(LOG_DEBUG)<<"skipping item pivot pair since < min_allowed_intersection" << std::endl; return 0; } std::vector<vid_t> edges; for(int i=0; i < num_edges; i++){ if (is_item(item.edge(i)->vertex_id())) edges.push_back(item.edge(i)->vertex_id()); } std::sort(edges.data(), edges.data()+edges.size()); for(int i=0; i < num_edges; i++){ vid_t other_item = item.edge(i)->vertex_id(); bool up = item.id() < other_item; if (debug) logstream(LOG_DEBUG)<<"Checking now edge: " << other_item << std::endl; if (is_user(other_item)){ if (debug) logstream(LOG_DEBUG)<<"skipping edge to user " << other_item << std::endl; continue; } if (!undirected && ((!up && item.edge(i)->get_data().up_weight == 0) || (up && item.edge(i)->get_data().down_weight == 0))){ if (debug) logstream(LOG_DEBUG)<<"skipping edge with wrong direction to " << other_item << std::endl; continue; } if (get_val(pivot_edges.edges, other_item)){ if (debug) logstream(LOG_DEBUG)<<"skipping edge to " << other_item << " because alrteady connected to pivot" << std::endl; continue; } assert(get_val(pivot_edges.edges, item.id()) != 0); float weight = std::max(item.edge(i)->get_data().down_weight, item.edge(i)->get_data().up_weight); assert(weight != 0); if (undirected || find_twice(edges, other_item)){ //pivot_edges.ratings[edges[i]-M] += item.edge(i)->get_data() * get_val(pivot_edges.edges, item.id()); pivot_edges.mymutex.lock(); set_val(pivot_edges.ratings, other_item-M, get_val(pivot_edges.ratings, other_item-M) + pow(weight,Q) /* * get_val(pivot_edges.edges, item.id())*/); pivot_edges.mymutex.unlock(); if (debug) logstream(LOG_DEBUG)<<"Adding weight: " << weight << " to item: " << other_item-M+1 << " for user: " << user_pivot+1<<std::endl; } } if (debug) logstream(LOG_DEBUG)<<"Finished user pivot " << user_pivot << std::endl; return 0; } inline bool is_pivot(vid_t vid) { return vid >= pivot_st && vid < pivot_en; } }; adjlist_container * adjcontainer; struct ItemDistanceProgram : public GraphChiProgram<VertexDataType, EdgeDataType> { /** * Vertex update function. */ void update(graphchi_vertex<VertexDataType, EdgeDataType> &v, graphchi_context &gcontext) { if (debug) printf("Entered iteration %d with %d\n", gcontext.iteration, is_item(v.id()) ? (v.id() - M + 1): v.id()); /* Even iteration numbers: * 1) load a subset of users into memory (pivots) * 2) Find which subset of items is connected to the users */ if (gcontext.iteration % 2 == 0) { if (adjcontainer->is_pivot(v.id()) && is_user(v.id())){ adjcontainer->load_edges_into_memory(v); if (debug) printf("Loading pivot %d intro memory\n", v.id()); } } /* odd iteration number: * 1) For any item connected to a pivot item * compute itersection */ else { assert(is_item(v.id())); for (int i=0; i< v.num_edges(); i++){ if (!adjcontainer->is_pivot(v.edge(i)->vertex_id())) continue; if (debug) printf("comparing user pivot %d to item %d\n", v.edge(i)->vertex_id()+1 , v.id() - M + 1); adjcontainer->compute_ratings(v, v.edge(i)->vertex_id(), distance_metric); item_pairs_compared++; if (item_pairs_compared % 1000000 == 0) logstream(LOG_INFO)<< std::setw(10) << mytimer.current_time() << ") " << std::setw(10) << item_pairs_compared << " pairs compared " << std::endl; } }//end of iteration % 2 == 1 }//end of update function /** * Called before an iteration starts. * On odd iteration, schedule both users and items. * on even iterations, schedules only item nodes */ void before_iteration(int iteration, graphchi_context &gcontext) { gcontext.scheduler->remove_tasks(0, (int) gcontext.nvertices - 1); if (gcontext.iteration % 2 == 0){ for (vid_t i=0; i < M; i++){ //even iterations, schedule only user nodes gcontext.scheduler->add_task(i); } } else { //iteration % 2 == 1, schedule only item nodes for (vid_t i=M; i < M+N; i++){ gcontext.scheduler->add_task(i); } } } void after_iteration(int iteration, graphchi_context &gcontext){ if (gcontext.iteration % 2 == 1){ for (int i=0; i< (int)adjcontainer->adjs.size(); i++){ if (debug) logstream(LOG_DEBUG)<<"Going over user" << adjcontainer->adjs[i].vid << std::endl; dense_adj &user = adjcontainer->adjs[i]; if (nnz(user.edges) == 0 || nnz(user.ratings) == 0){ if (debug) logstream(LOG_DEBUG)<<"User with no edges" << std::endl; continue; } //assert(user.ratings.size() == N); ivec positions = reverse_sort_index(user.ratings, K); assert(positions.size() > 0); for (int j=0; j < positions.size(); j++){ assert(positions[j] >= 0); assert(positions[j] < (int)N); //skip zero entries if (get_val(user.ratings, positions[j])== 0){ if (debug) logstream(LOG_DEBUG)<<"Found zero in position " << j << std::endl; break; } int rc = fprintf(out_file, "%u %u %lg\n", user.vid+1, positions[j]+1, get_val(user.ratings, positions[j]));//write item similarity to file if (debug) logstream(LOG_DEBUG)<<"Writing rating from user" << user.vid+1 << " to item: " << positions[j] << std::endl; assert(rc > 0); written_pairs++; } } grabbed_edges = 0; adjcontainer->clear(); } } /** * Called before an execution interval is started. * * On every even iteration, we load pivot's item connected user lists to memory. * Here we manage the memory to ensure that we do not load too much * edges into memory. */ void before_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) { /* on even iterations, loads pivot items into memory base on the membudget_mb allowed memory size */ if ((gcontext.iteration % 2 == 0)) { //if (debug){ printf("entering iteration: %d on before_exec_interval\n", gcontext.iteration); printf("pivot_st is %d window_St %d, window_en %d\n", adjcontainer->pivot_st, window_st, window_en); //} if (adjcontainer->pivot_st < window_en){ size_t max_grab_edges = get_option_long("membudget_mb", 1024) * 1024 * 1024 / 8; if (grabbed_edges < max_grab_edges * 0.8) { logstream(LOG_DEBUG) << "Window init, grabbed: " << grabbed_edges << " edges" << " extending pivor_range to : " << window_en + 1 << std::endl; adjcontainer->extend_pivotrange(window_en + 1); logstream(LOG_DEBUG) << "Window en is: " << window_en << " vertices: " << gcontext.nvertices << std::endl; if (window_en+1 >= gcontext.nvertices) { // every user was a pivot item, so we are done logstream(LOG_DEBUG)<<"Setting last iteration to: " << gcontext.iteration + 2 << std::endl; gcontext.set_last_iteration(gcontext.iteration + 2); } } else { logstream(LOG_DEBUG) << "Too many edges, already grabbed: " << grabbed_edges << std::endl; } } } } /** * Called before an execution interval is started. * */ void after_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) { //on odd iterations, dump user recommendations computed so far to disk if (gcontext.iteration % 2 == 1){ printf("entering iteration: %d on after_exec_interval\n", gcontext.iteration); printf("pivot_st is %d window_st %d, window_en %d\n", adjcontainer->pivot_st, window_st, window_en); } } }; int main(int argc, const char ** argv) { print_copyright(); /* GraphChi initialization will read the command line arguments and the configuration file. */ graphchi_init(argc, argv); /* Metrics object for keeping track of performance counters and other information. Currently required. */ metrics m("itemsim2rating"); /* Basic arguments for application */ min_allowed_intersection = get_option_int("min_allowed_intersection", min_allowed_intersection); debug = get_option_int("debug", 0); parse_command_line_args(); std::string similarity = get_option_string("similarity", ""); if (similarity == "") logstream(LOG_FATAL)<<"Missing similarity input file. Please specify one using the --similarity=filename command line flag" << std::endl; undirected = get_option_int("undirected", 1); Q = get_option_float("Q", Q); mytimer.start(); int nshards = convert_matrixmarket_and_item_similarity<EdgeDataType>(training, similarity); K = get_option_int("K"); assert(M > 0 && N > 0); //initialize data structure which saves a subset of the items (pivots) in memory adjcontainer = new adjlist_container(); //array for marking which items are conected to the pivot items via users. relevant_items = new bool[N]; /* Run */ ItemDistanceProgram program; graphchi_engine<VertexDataType, EdgeDataType> engine(training,nshards, true, m); set_engine_flags(engine); //engine.set_maxwindow(M+N+1); out_file = open_file((training + "-rec").c_str(), "w"); //run the program engine.run(program, niters); /* Report execution metrics */ if (!quiet) metrics_report(m); std::cout<<"Total item pairs compared: " << item_pairs_compared << " total written to file: " << written_pairs << std::endl; std::cout<<"Created output files with the format: " << training << "-rec" << std::endl; delete[] relevant_items; fclose(out_file); return 0; }
09jijiangwen-download
toolkits/collaborative_filtering/itemsim2rating.cpp
C++
asf20
15,493