text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
//
// $Id$
//
// -------------------------------------------------------------------------
// This file is part of ZeroBugs, Copyright (c) 2010 Cristian L. Vlasceanu
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// -------------------------------------------------------------------------
#include <iostream>
#include <numeric>
#include <sstream>
#include <boost/bind.hpp>
#include "gtkmm/adjustment.h"
#include "gtkmm/color.h"
#include "gtkmm/connect.h"
#include "gtkmm/ctree.h"
#include "gtkmm/eventbox.h"
#include "gtkmm/flags.h"
#include "gtkmm/frame.h"
#include "gtkmm/label.h"
#include "gtkmm/scrolledwindow.h"
#include "gtkmm/style.h"
#include "gtkmm/tooltips.h"
#include "gtkmm/window.h"
#include "dharma/environ.h"
#include "dharma/pipe.h"
#include "generic/lock.h"
#include "generic/temporary.h"
#include "zdk/check_ptr.h"
#include "zdk/data_type.h"
#include "zdk/shared_string_impl.h"
#include "zdk/thread_util.h"
#include "zdk/types.h"
#include "zdk/variant.h"
#include "frame_state.h"
#include "highlight_changes.h"
#include "gui.h"
#include "is_reference.h"
#include "output_dialog.h"
#include "popup_menu.h"
#include "scope_helpers.h"
#include "set_cursor.h"
#include "slot_macros.h"
#include "variables_view.h"
using namespace std;
using namespace SigC;
////////////////////////////////////////////////////////////////
VariablesView::~VariablesView()
{
clear_data();
}
////////////////////////////////////////////////////////////////
VariablesView::VariablesView(Debugger& /* debugger */)
: base_(0)
, ctree_(0)
, isStaleView_(false)
, updatePending_(false)
{
Gtk::Frame* frame = manage(new Gtk::Frame);
pack_start(*frame, true, true);
Gtk::ScrolledWindow* sw = manage(new Gtk::ScrolledWindow);
frame->add(*sw);
static const char* titles[] =
{
"Variable", "Value", "Type", 0
};
ctree_ = manage(new Tree(titles));
sw->add(*ctree_);
sw->set_policy(Gtk_FLAG(POLICY_NEVER), Gtk_FLAG(POLICY_AUTOMATIC));
ctree_->set_column_editable(1, true);
ctree_->column(0).set_width(100);
ctree_->column(1).set_width(150);
ctree_->column(0).set_passive();
ctree_->column(1).set_passive();
ctree_->column(2).set_passive();
ctree_->set_line_style(Gtk_FLAG(CTREE_LINES_DOTTED));
#if GTKMM_2
ctree_->set_line_style(Gtk_FLAG(CTREE_LINES_SOLID));
#endif
// connect CTree signals to internal slots
Gtk_CONNECT_0(ctree_, tree_expand, this, &VariablesView::on_expand);
Gtk_CONNECT_0(ctree_, tree_collapse, this, &VariablesView::on_collapse);
Gtk_CONNECT_0(ctree_, cell_edit, this, &VariablesView::on_cell_edit);
}
////////////////////////////////////////////////////////////////
bool VariablesView::is_expanding(DebugSymbol* sym) const
{
bool result = false;
if (sym)
{
Lock<Mutex> lock(mx_);
const SymKey key(*sym);
set<SymKey>::iterator i = expand_.find(key);
if (i != expand_.end())
{
result = true;
}
else
{
KeyMap::const_iterator j = keyMap_.find(key);
if (j != keyMap_.end())
{
i = expand_.find(j->second);
if (i != expand_.end())
{
result = true;
}
}
}
dbgout(1) << __func__ << ": " << sym->name()
<< " (" << key << ")=" << result << endl;
}
return result;
}
////////////////////////////////////////////////////////////////
void VariablesView::emit_sym_read(const RefPtr<DebugSymbol>& sym)
{
// avoid deadlock: unlock the mutex in this scope,
// since reading the symbol may cause a callback
// (symbol_change, for e.g.) to come in on the main thread
Unlock<Mutex> unlock(mx_);
read_symbol.emit(sym, this);
}
////////////////////////////////////////////////////////////////
bool VariablesView::emit_sym_filt(RefPtr<DebugSymbol>& sym)
{
Unlock<Mutex> unlock(mx_);
RefPtr<DebugSymbol> old = sym;
bool result = filter.emit(&sym, sym->parent(), this);
symbol_change(sym.get(), old.get());
return result;
}
////////////////////////////////////////////////////////////////
bool VariablesView::notify(DebugSymbol* symbol)
{
assert(symbol);
Lock<Mutex> lock(mx_);
Temporary<bool> pending(updatePending_, true);
emit_sym_read(symbol);
RefPtr<DebugSymbol> sym(symbol);
if (emit_sym_filt(sym))
{
debugSymbols_.push_back(sym);
if (is_expanding(sym.get()))
{
emit_sym_read(sym);
}
}
return false;
}
////////////////////////////////////////////////////////////////
//
// Helpers for symbol_change: if a symbol is replaced by
// another, say for example, due to a custom filter that displays
// vectors as C-style arrays, expanded child objects need to
// stay expanded after the substitution.
//
static inline void
expand_symbol(VariablesView&, RefPtr<DebugSymbol>, DebugSymbol*);
namespace
{
/**
* Enumerate the child symbols of a given symbol and
* map them by name (the assumption being that language rules
* ensure uniqueness).
*/
class ZDK_LOCAL DebugSymbolNameMap : public DebugSymbolCallback
{
typedef std::map<RefPtr<SharedString>, RefPtr<DebugSymbol> > map_type;
map_type map_;
BEGIN_INTERFACE_MAP(DebugSymbolNameMap)
INTERFACE_ENTRY(DebugSymbolCallback)
END_INTERFACE_MAP()
public:
bool notify(DebugSymbol* sym)
{
if (sym)
{
map_.insert(make_pair(sym->name(), sym));
}
return true;
}
RefPtr<DebugSymbol> find(RefPtr<SharedString> name) const
{
RefPtr<DebugSymbol> result;
map_type::const_iterator i = map_.find(name);
if (i != map_.end())
{
result = i->second;
}
return result;
}
};
class ZDK_LOCAL DebugSymbolExpander : public DebugSymbolCallback
{
DebugSymbolNameMap nameMap_;
VariablesView& view_;
BEGIN_INTERFACE_MAP(DebugSymbolExpander)
INTERFACE_ENTRY(DebugSymbolCallback)
END_INTERFACE_MAP()
public:
DebugSymbolExpander(const RefPtr<DebugSymbol>& sym,
VariablesView& view)
: view_(view)
{
sym->enum_children(&nameMap_);
}
bool notify(DebugSymbol* sym)
{
if (sym && view_.is_expanding(sym))
{
if (RefPtr<DebugSymbol> peer = nameMap_.find(sym->name()))
{
expand_symbol(view_, peer, sym);
}
else
{
dbgout(0) << "not found: " << sym->name() << endl;
}
}
return true;
}
};
} // namespace
void
expand_symbol(VariablesView& view, RefPtr<DebugSymbol> sym, DebugSymbol* old)
{
assert(sym);
view.expand(*sym);
sym->read(&view);
if (old)
{
DebugSymbolExpander expander(sym, view);
old->enum_children(&expander);
}
}
////////////////////////////////////////////////////////////////
/// This is called when a symbol is replaced by another, likely
/// as result of a user-defined DataFilter
void
VariablesView::symbol_change(DebugSymbol* sym, DebugSymbol* old)
{
if (sym && old && (sym != old))
{
SymKey oldKey(*old);
SymKey symKey(*sym);
Lock<Mutex> lock(mx_);
// is the old symbol expanded?
set<SymKey>::iterator i = expand_.find(oldKey);
if (i != expand_.end())
{
expand_symbol(*this, sym, old);
expand_.erase(i);
expand_.insert(symKey);
}
else
{
// was a replacement for sym provided before?
KeyMap::iterator k = keyMap_.find(oldKey);
if (k != keyMap_.end())
{
// is the replacement expanded?
i = expand_.find(k->second);
if (i != expand_.end())
{
DebugSymbolMap::iterator j = subst_.find(k->second);
if (j != subst_.end())
{
expand_symbol(*this, sym, j->second.get());
}
}
}
}
keyMap_[oldKey] = symKey;
}
}
////////////////////////////////////////////////////////////////
void VariablesView::display(bool force)
{
Lock<Mutex> lock(mx_);
if (is_visible() && (force || is_stale_view()))
{
set_is_stale_view(false);
set_cursor(*this, Gdk_FLAG(TOP_LEFT_ARROW));
ScopedFreeze<Gtk::CTree> freeze(*CHKPTR(ctree_));
// save the current vertical scroll position
Gtk::Adjustment* adj = ctree_->get_vadjustment();
const double adjVal = adj ? adj->get_value() : 0;
ctree_->reset_tooltip();
Gtk::clear_rows(*ctree_);
Gtk::CTree::RowList rows = ctree_->rows();
DebugSymbolList::const_iterator i = debugSymbols_.begin();
for (; i != debugSymbols_.end(); ++i)
{
add_symbol(rows, *i);
}
// restore vertical scroll position
if (adj)
{
adj->set_value(adjVal);
}
DebugSymbolList(debugSymbols_).swap(keepAlive_);
}
}
////////////////////////////////////////////////////////////////
void VariablesView::clear_data(bool keepExpand) throw()
{
Lock<Mutex> lock(mx_);
clear_symbols();
values_.clear();
subst_.clear();
if (!keepExpand)
{
expand_.clear();
keyMap_.clear();
}
}
////////////////////////////////////////////////////////////////
void VariablesView::reset(bool keepExpand)
{
assert(ctree_);
Gtk::clear_rows(*ctree_);
clear_data(keepExpand);
}
////////////////////////////////////////////////////////////////
class VariablesView::EnumChildrenHelper : public DebugSymbolCallback
{
public:
EnumChildrenHelper(VariablesView& view, Gtk::CTree::RowList rows, bool ref)
: view_(view), rows_(rows), ref_(ref)
{ }
bool notify(DebugSymbol* sym)
{
if (sym)
{
assert(sym->ref_count() > 0);
view_.add_symbol(rows_, sym, ref_);
}
return true;
}
BEGIN_INTERFACE_MAP(EnumChildSymbol)
INTERFACE_ENTRY(DebugSymbolCallback)
END_INTERFACE_MAP()
private:
VariablesView& view_;
Gtk::CTree::RowList rows_;
bool ref_;
};
////////////////////////////////////////////////////////////////
void VariablesView::restore_state(const Frame* f) volatile
{
if (f)
{
RefPtr<FrameState> state =
interface_cast<FrameState*>(f->get_user_object(".state"));
restore_state(state, true);
}
}
////////////////////////////////////////////////////////////////
void VariablesView::restore_state
(
const RefPtr<FrameState>& state,
bool restoreSymbols
) volatile
{
Lock<Mutex> lock(mx_);
VariablesView* THIS = const_cast<VariablesView*>(this);
THIS->subst_.clear();
if (state.is_null())
{
THIS->expand_.clear();
THIS->values_.clear();
if (restoreSymbols)
{
THIS->debugSymbols_.clear();
dbgout(0) << __func__ << ": symbols cleared" << endl;
}
}
else
{
dbgout(0) << __func__ << endl;
THIS->expand_ = state->expand_;
THIS->values_ = state->values_;
if (restoreSymbols)
{
THIS->debugSymbols_ = state->symbols_;
dbgout(0) << __func__ << ": "
<< THIS->debugSymbols_.size() << " symbol(s)"
<< endl;
THIS->set_is_stale_view(true);
}
}
}
////////////////////////////////////////////////////////////////
void VariablesView::save_state(FrameState& state) volatile
{
VariablesView* THIS = const_cast<VariablesView*>(this);
state.expand_ = THIS->expand_;
state.values_ = THIS->values_;
state.symbols_ = THIS->debugSymbols_;
}
////////////////////////////////////////////////////////////////
RefPtr<SharedString>
VariablesView::validate(const RefPtr<DebugSymbol>& symbol)
{
RefPtr<SharedString> value = symbol->value();
#if GTKMM_2
if (value)
{
Glib::ustring line = value->c_str();
if (!line.validate())
{
value.reset();
}
}
#endif
return value;
}
////////////////////////////////////////////////////////////////
void VariablesView::add_symbol(Gtk::CTree::Row row,
RefPtr<DebugSymbol> symbol,
RefPtr<SharedString> value)
{
if (!value)
{
dbgout(1) << __func__ << ": " << symbol->name() << ":nil" << endl;
}
else
{
const SymKey key(*symbol);
// if returned by a function, or the value has changed
// from the last displayed value, then show it in another
// color
DebugValuesMap::const_iterator i = values_.find(key);
if (i == values_.end())
{
if (symbol->is_return_value())
{
row.set_foreground(Gdk_Color(highlight_changes_color()));
}
}
else
{
// has the value has changed since last displayed?
DataType* type = CHKPTR(symbol->type());
if (!value->is_equal2(i->second.get())
&& type->compare(value->c_str(), CHKPTR(i->second)->c_str()) != 0)
{
row.set_foreground(Gdk_Color(highlight_changes_color()));
}
}
values_[key] = value;
subst_[key] = symbol;
row.set_data(symbol.get());
}
}
////////////////////////////////////////////////////////////////
Gtk::CTree::Row
VariablesView::add_row(const char* name,
const char* value,
const char* type)
{
assert_ui_thread();
vector<string> items(3);
if (name)
{
items[0] = name;
}
if (value)
{
items[1] = value;
}
if (type)
{
items[2] = type;
}
if (ctree_)
{
ctree_->rows().push_back(Gtk::CTree::Element(items));
return ctree_->rows().back();
}
return Gtk::CTree::Row();
}
////////////////////////////////////////////////////////////////
void VariablesView::add_symbol(Gtk::CTree::RowList rows,
RefPtr<DebugSymbol> symbol,
bool ref)
{
assert_ui_thread();
if (!CHKPTR(symbol)->type())
{
#ifdef DEBUG
clog << "Symbol has NULL type: " << symbol->name() << endl;
#endif
return;
}
bool isRef = false;
#if 0
// Do not create an extra node for the reference object,
// just show the referred variable (child object).
if (is_ref(symbol.get()))
{
assert(!ref);
isRef = true;
expand_.insert(SymKey(*symbol));
}
else
#endif
{
string name = CHKPTR(symbol->name())->c_str();
if (ref && symbol->parent())
{
name = CHKPTR(symbol->parent()->name())->c_str();
}
/* if (symbol->is_return_value())
{
name += " returned";
}
*/
RefPtr<SharedString> value = validate(symbol);
const char* items[3] =
{
name.c_str(),
value.is_null() ? "" : value->c_str(),
CHKPTR(symbol->type_name())->c_str(),
};
rows.push_back(Gtk::CTree::Element(items));
Gtk::CTree::Row row = rows.back();
rows = row.subtree();
if (is_expanding(symbol.get()))
{
row.expand();
}
add_symbol(row, symbol, value);
}
// add the children of aggregated symbols (such
// as class instances, arrays, etc.
EnumChildrenHelper children(*this, rows, isRef);
symbol->enum_children(&children);
}
////////////////////////////////////////////////////////////////
void VariablesView::on_expand(Gtk::CTree::Row row)
{
if (DebugSymbol* sym = reinterpret_cast<DebugSymbol*>(row.get_data()))
{
const SymKey key(*sym);
expand_.insert(key);
assert(is_expanding(sym));
if (SharedString* value = sym->value())
{
values_.insert(make_pair(key, value));
subst_.insert(make_pair(key, sym));
}
set_cursor(*this, Gdk_FLAG(WATCH));
dbgout(1) << __func__ << ": " << sym->name() << endl;
symbol_expand(sym);
}
}
////////////////////////////////////////////////////////////////
void VariablesView::on_collapse(Gtk::CTree::Row row)
{
if (DebugSymbol* symbol = reinterpret_cast<DebugSymbol*>(row.get_data()))
{
const SymKey key(*symbol);
expand_.erase(key);
}
}
////////////////////////////////////////////////////////////////
event_result_t
VariablesView::on_button_press_event(GdkEventButton* event)
{
Gtk::VBox::on_button_press_event(event);
if ((event != NULL) &&
(event->type == GDK_BUTTON_PRESS) &&
(event->button == 3))
{
Gtk::RowHandle row;
int col = 0; // not used
// silence off compiler warnings; event->x and event->y
// are of the double type, and get_selection_info expects
// integers
const int x = static_cast<int>(event->x);
const int y = static_cast<int>(event->y);
if (ctree_ && ctree_->get_selection_info(x, y, &row, &col))
{
void* data = ctree_->row(row).get_data();
if (DebugSymbol* sym = reinterpret_cast<DebugSymbol*>(data))
{
popup_menu(*event, sym);
}
#ifdef DEBUG
else
{
clog << "data=" << data << endl;
}
#endif
}
}
return true;
}
////////////////////////////////////////////////////////////////
void VariablesView::popup_menu
(
GdkEventButton& event,
RefPtr<DebugSymbol> symbol
)
{
assert(symbol.get());
std::auto_ptr<PopupMenu> menu(new PopupMenu);
Gtk::MenuItem* item = 0;
const addr_t addr = symbol->addr();
if (addr)
{
item = menu->add_manage_item(new Gtk::MenuItem("View Raw Memory"));
Gtk_CONNECT(item, activate, Gtk_BIND(show_raw_memory.slot(), addr));
}
Gtk::CheckMenuItem* check =
menu->add_manage_item(new Gtk::CheckMenuItem("Hexadecimal"));
int base = 10;
if (numeric_base(symbol.get()) == 16)
{
check->set_active();
}
else
{
base = 16;
}
Gtk_CONNECT_1(check, activate, this, &VariablesView::set_numeric_base, base);
// if live thread (i.e. not loaded from a core file), then
// add a menu entry for setting watchpoint(s) on this variable
if (symbol->addr() && symbol->thread() && symbol->thread()->is_live())
{
menu->items().push_back(Gtk::Menu_Helpers::SeparatorElem());
item = menu->add_manage_item(new Gtk::MenuItem("Set Watchpoint..."));
Gtk_CONNECT(item, activate, Gtk_BIND(set_watchpoint.slot(), symbol));
}
#ifdef DEBUG
item = menu->add_manage_item(new Gtk::MenuItem("Type Info"));
Gtk_CONNECT_1(item, activate, this, &VariablesView::type_info, symbol);
#endif
menu.release()->popup(event.button, event.time);
}
////////////////////////////////////////////////////////////////
void VariablesView::set_numeric_base(int base)
{
if (base != base_)
{
base_ = base;
numeric_base_changed();
}
}
#ifdef DEBUG
////////////////////////////////////////////////////////////////
void VariablesView::type_info(RefPtr<DebugSymbol> symbol)
{
assert(!symbol.is_null());
assert(symbol->type());
Pipe pipe;
OutputDialog output("Type Info", pipe.output());
describe_type(symbol, pipe.input());
assert(get_toplevel());
Gtk::Window& top = dynamic_cast<Gtk::Window&>(*get_toplevel());
output.set_transient_for(top);
output.run(this);
}
#endif // DEBUG
////////////////////////////////////////////////////////////////
bool VariablesView::on_cell_edit(CELL_EDIT_PARAM)
{
return on_cell_edit_vfunc(path, ncol, old, s);
}
////////////////////////////////////////////////////////////////
bool VariablesView::on_cell_edit_vfunc(CELL_EDIT_PARAM)
{
Gtk::CTree::Row row = CHKPTR(ctree_)->row(path);
if (void* data = row.get_data())
{
RefPtr<DebugSymbol> sym(reinterpret_cast<DebugSymbol*>(data));
if (sym->type())
{
return edit.emit(sym, s);
}
}
return false;
}
////////////////////////////////////////////////////////////////
bool VariablesView::update(RefPtr<Thread>)
{
return !updatePending_;
}
////////////////////////////////////////////////////////////////
bool VariablesView::get_text_at_pointer(
Gtk::Widget& wid,
double x,
double y,
Gtk::RowHandle& hrow,
int& hcol,
std::string& text)
{
Gtk::CTree& ctree = dynamic_cast<Gtk::CTree&>(wid);
Gtk::RowHandle nrow; int ncol = 0;
if (ctree.get_selection_info((int)x, (int)y, &nrow, &ncol))
{
if (void* data = ctree.rows()[nrow].get_data())
{
if (DebugSymbol* sym = reinterpret_cast<DebugSymbol*>(data))
{
if (const char* tip = sym->tooltip())
{
text = tip;
return true;
}
}
}
}
return ToolTipTraits<Gtk::CTree>::get_text_at_pointer(wid, x, y, hrow, hcol, text);
}
////////////////////////////////////////////////////////////////
void VariablesView::expand(DebugSymbol& sym)
{
expand_.insert(SymKey(sym));
}
////////////////////////////////////////////////////////////////
string highlight_changes_color()
{
static string color = env::get_string("ZERO_HIGHLIGHT_CHANGES", "magenta");
return color;
}
////////////////////////////////////////////////////////////////
void VariablesView::save_config(Properties& prop, const string& prefix)
{
prop.set_word((prefix + ".0.width").c_str(), ctree_->column(0).get_width());
prop.set_word((prefix + ".1.width").c_str(), ctree_->column(1).get_width());
}
////////////////////////////////////////////////////////////////
void VariablesView::restore_settings(Properties& prop, const string& prefix)
{
ctree_->column(0).set_width(prop.get_word((prefix + ".0.width").c_str(), 100));
ctree_->column(1).set_width(prop.get_word((prefix + ".1.width").c_str(), 150));
}
// vim: tabstop=4:softtabstop=4:expandtab:shiftwidth=4
|
{"hexsha": "a6f784204472b8f0d2335fa1a5d03844eec19d0a", "size": 22981, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "plugin/gui/variables_view.cpp", "max_stars_repo_name": "cristivlas/zerobugs", "max_stars_repo_head_hexsha": "5f080c8645b123d7887fd8a64f60e8d226e3b1d5", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2018-03-19T23:27:47.000Z", "max_stars_repo_stars_event_max_datetime": "2018-06-24T16:15:19.000Z", "max_issues_repo_path": "plugin/gui/variables_view.cpp", "max_issues_repo_name": "cristivlas/zerobugs", "max_issues_repo_head_hexsha": "5f080c8645b123d7887fd8a64f60e8d226e3b1d5", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "plugin/gui/variables_view.cpp", "max_forks_repo_name": "cristivlas/zerobugs", "max_forks_repo_head_hexsha": "5f080c8645b123d7887fd8a64f60e8d226e3b1d5", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2021-11-28T05:39:05.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-28T05:39:05.000Z", "avg_line_length": 25.5913140312, "max_line_length": 87, "alphanum_fraction": 0.5134676472, "num_tokens": 5113}
|
/*
Copyright 2007-2008 Christian Henning, Andreas Pokorny, Lubomir Bourdev
Use, modification and distribution are subject to the Boost Software License,
Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt).
*/
/*************************************************************************************************/
#ifndef BOOST_GIL_EXTENSION_IO_DETAIL_PNG_IO_WRITE_HPP_INCLUDED
#define BOOST_GIL_EXTENSION_IO_DETAIL_PNG_IO_WRITE_HPP_INCLUDED
////////////////////////////////////////////////////////////////////////////////////////
/// \file
/// \brief
/// \author Christian Henning, Andreas Pokorny, Lubomir Bourdev \n
///
/// \date 2007-2008 \n
///
////////////////////////////////////////////////////////////////////////////////////////
extern "C" {
#include <png.h>
}
#include <boost/gil/extension/io_new/detail/typedefs.hpp>
#include <boost/gil/extension/io_new/detail/base.hpp>
#include <boost/gil/extension/io_new/detail/row_buffer_helper.hpp>
#include "base.hpp"
#include "supported_types.hpp"
namespace boost { namespace gil { namespace detail {
template<typename Device>
class writer< Device
, png_tag
> : png_io_base< Device >
{
public:
writer( Device& io_dev )
: png_io_base< Device >( io_dev )
, _png_ptr ( NULL )
, _info_ptr( NULL )
{
// Create and initialize the png_struct with the desired error handler
// functions. If you want to use the default stderr and longjump method,
// you can supply NULL for the last three parameters. We also check that
// the library version is compatible with the one used at compile time,
// in case we are using dynamically linked libraries. REQUIRED.
_png_ptr = png_create_write_struct( PNG_LIBPNG_VER_STRING
, NULL // user_error_ptr
, NULL // user_error_fn
, NULL // user_warning_fn
);
io_error_if( _png_ptr == NULL
, "png_writer: fail to call png_create_write_struct()"
);
// Allocate/initialize the image information data. REQUIRED
_info_ptr = png_create_info_struct( _png_ptr );
if( _info_ptr == NULL )
{
png_destroy_write_struct( &_png_ptr
, NULL
);
io_error( "png_writer: fail to call png_create_info_struct()" );
}
// Set error handling. REQUIRED if you aren't supplying your own
// error handling functions in the png_create_write_struct() call.
if( setjmp( png_jmpbuf( _png_ptr )))
{
//free all of the memory associated with the png_ptr and info_ptr
png_destroy_write_struct( &_png_ptr
, &_info_ptr
);
io_error( "png_writer: fail to call setjmp()" );
}
this->init_io( _png_ptr );
}
~writer()
{
png_destroy_write_struct( &_png_ptr
, &_info_ptr
);
}
template< typename View >
void apply( const View& view )
{
apply( view, image_write_info< png_tag >() );
}
template <typename View>
void apply( const View& view
, const image_write_info< png_tag >& info
)
{
typedef png_write_support< typename channel_type < typename get_pixel_type< View >::type >::type
, typename color_space_type< View >::type
> png_rw_info_t;
io_error_if( view.width() == 0 && view.height() == 0
, "png format cannot handle empty views."
);
// Set the image information here. Width and height are up to 2^31,
// bit_depth is one of 1, 2, 4, 8, or 16, but valid values also depend on
// the color_type selected. color_type is one of PNG_COLOR_TYPE_GRAY,
// PNG_COLOR_TYPE_GRAY_ALPHA, PNG_COLOR_TYPE_PALETTE, PNG_COLOR_TYPE_RGB,
// or PNG_COLOR_TYPE_RGB_ALPHA. interlace is either PNG_INTERLACE_NONE or
// PNG_INTERLACE_ADAM7, and the compression_type and filter_type MUST
// currently be PNG_COMPRESSION_TYPE_BASE and PNG_FILTER_TYPE_BASE. REQUIRED
png_set_IHDR( _png_ptr
, _info_ptr
, static_cast< png_image_width::type >( view.width() )
, static_cast< png_image_height::type >( view.height() )
, static_cast< png_bitdepth::type >( png_rw_info_t::_bit_depth )
, static_cast< png_color_type::type >( png_rw_info_t::_color_type )
, info._interlace_method
, info._compression_method
, info._filter_method
);
#ifdef BOOST_GIL_IO_PNG_FLOATING_POINT_SUPPORTED
if( info._valid_cie_colors )
{
png_set_cHRM( _png_ptr
, _info_ptr
, info._white_x
, info._white_y
, info._red_x
, info._red_y
, info._green_x
, info._green_y
, info._blue_x
, info._blue_y
);
}
if( info._valid_file_gamma )
{
png_set_gAMA( _png_ptr
, _info_ptr
, info._gamma
);
}
#else
if( info._valid_cie_colors )
{
png_set_cHRM_fixed( _png_ptr
, _info_ptr
, info._white_x
, info._white_y
, info._red_x
, info._red_y
, info._green_x
, info._green_y
, info._blue_x
, info._blue_y
);
}
if( info._valid_file_gamma )
{
png_set_gAMA_fixed( _png_ptr
, _info_ptr
, info._file_gamma
);
}
#endif // BOOST_GIL_IO_PNG_FLOATING_POINT_SUPPORTED
if( info._valid_icc_profile )
{
png_set_iCCP( _png_ptr
, _info_ptr
, const_cast< png_charp >( info._icc_name.c_str() )
, info._iccp_compression_type
, reinterpret_cast< png_const_bytep >( info._profile.c_str() )
, info._profile_length
);
}
if( info._valid_intent )
{
png_set_sRGB( _png_ptr
, _info_ptr
, info._intent
);
}
if( info._valid_palette )
{
png_set_PLTE( _png_ptr
, _info_ptr
, const_cast< png_colorp >( &info._palette.front() )
, info._num_palette
);
}
if( info._valid_background )
{
png_set_bKGD( _png_ptr
, _info_ptr
, const_cast< png_color_16p >( &info._background )
);
}
if( info._valid_histogram )
{
png_set_hIST( _png_ptr
, _info_ptr
, const_cast< png_uint_16p >( &info._histogram.front() )
);
}
if( info._valid_offset )
{
png_set_oFFs( _png_ptr
, _info_ptr
, info._offset_x
, info._offset_y
, info._off_unit_type
);
}
if( info._valid_pixel_calibration )
{
std::vector< const char* > params( info._num_params );
for( std::size_t i = 0; i < params.size(); ++i )
{
params[i] = info._params[ i ].c_str();
}
png_set_pCAL( _png_ptr
, _info_ptr
, const_cast< png_charp >( info._purpose.c_str() )
, info._X0
, info._X1
, info._cal_type
, info._num_params
, const_cast< png_charp >( info._units.c_str() )
, const_cast< png_charpp >( ¶ms.front() )
);
}
if( info._valid_resolution )
{
png_set_pHYs( _png_ptr
, _info_ptr
, info._res_x
, info._res_y
, info._phy_unit_type
);
}
if( info._valid_significant_bits )
{
png_set_sBIT( _png_ptr
, _info_ptr
, const_cast< png_color_8p >( &info._sig_bits )
);
}
#ifdef BOOST_GIL_IO_PNG_FLOATING_POINT_SUPPORTED
if( info._valid_scale_factors )
{
png_set_sCAL( _png_ptr
, _info_ptr
, info._scale_unit
, info._scale_width
, info._scale_height
);
}
#else
#ifdef BOOST_GIL_IO_PNG_FIXED_POINT_SUPPORTED
if( info._valid_scale_factors )
{
png_set_sCAL_s( _png_ptr
, _info_ptr
, _scale_unit
, const_cast< png_charp >( _scale_width.c_str() )
, const_cast< png_charp >( _scale_height.c_str() )
);
}
#endif // BOOST_GIL_IO_PNG_FIXED_POINT_SUPPORTED
#endif // BOOST_GIL_IO_PNG_FLOATING_POINT_SUPPORTED
if( info._valid_text )
{
std::vector< png_text > texts( info._num_text );
for( std::size_t i = 0; i < texts.size(); ++i )
{
png_text pt;
pt.compression = info._text[i]._compression;
pt.key = const_cast< png_charp >( info._text[i]._key.c_str() );
pt.text = const_cast< png_charp >( info._text[i]._text.c_str() );
pt.text_length = info._text[i]._text.length();
texts[i] = pt;
}
png_set_text( _png_ptr
, _info_ptr
, &texts.front()
, info._num_text
);
}
if( info._valid_modification_time )
{
png_set_tIME( _png_ptr
, _info_ptr
, const_cast< png_timep >( &info._mod_time )
);
}
if( info._valid_transparency_factors )
{
int sample_max = ( 1 << info._bit_depth );
/* libpng doesn't reject a tRNS chunk with out-of-range samples */
if( !( ( info._color_type == PNG_COLOR_TYPE_GRAY
&& (int) info._trans_values[0].gray > sample_max
)
|| ( info._color_type == PNG_COLOR_TYPE_RGB
&&( (int) info._trans_values[0].red > sample_max
|| (int) info._trans_values[0].green > sample_max
|| (int) info._trans_values[0].blue > sample_max
)
)
)
)
{
//@todo Fix that once reading transparency values works
/*
png_set_tRNS( _png_ptr
, _info_ptr
, trans
, num_trans
, trans_values
);
*/
}
}
png_write_info( _png_ptr
,_info_ptr
);
write_view( view
, typename is_bit_aligned< View >::type()
);
}
private:
template<typename View>
void write_view( const View& view
, mpl::false_ // is bit aligned
)
{
typedef typename get_pixel_type< View >::type pixel_t;
typedef png_write_support< typename channel_type < pixel_t >::type
, typename color_space_type< pixel_t >::type
> png_rw_info;
if( little_endian() )
{
if( png_rw_info::_bit_depth == 16 )
{
png_set_swap( _png_ptr );
}
if( png_rw_info::_bit_depth < 8 )
{
png_set_packswap( _png_ptr );
}
}
row_buffer_helper_view< View > row_buffer( view.width()
, false
);
for( int y = 0; y != view.height(); ++ y)
{
std::copy( view.row_begin( y )
, view.row_end ( y )
, row_buffer.begin()
);
png_write_row( _png_ptr
, reinterpret_cast< png_bytep >( row_buffer.data() )
);
}
png_write_end( _png_ptr
, _info_ptr
);
}
template<typename View>
void write_view( const View& view
, mpl::true_ // is bit aligned
)
{
typedef png_write_support< typename kth_semantic_element_type< typename View::value_type
, 0
>::type
, typename color_space_type<View>::type
> png_rw_info;
if (little_endian() )
{
if( png_rw_info::_bit_depth == 16 )
{
png_set_swap( _png_ptr );
}
if( png_rw_info::_bit_depth < 8 )
{
png_set_packswap( _png_ptr );
}
}
row_buffer_helper_view< View > row_buffer( view.width()
, false
);
for( int y = 0; y != view.height(); ++y )
{
std::copy( view.row_begin( y )
, view.row_end ( y )
, row_buffer.begin()
);
png_write_row( _png_ptr
, reinterpret_cast< png_bytep >( row_buffer.data() )
);
}
png_free_data( _png_ptr
, _info_ptr
, PNG_FREE_UNKN
, -1
);
png_write_end( _png_ptr
, _info_ptr
);
}
void init_io( png_structp png_ptr )
{
png_set_write_fn( png_ptr
, static_cast< void* > ( &this->_io_dev )
, static_cast< png_rw_ptr > ( &png_io_base<Device>::write_data )
, static_cast< png_flush_ptr >( &png_io_base<Device>::flush )
);
}
png_structp _png_ptr;
png_infop _info_ptr;
};
struct png_write_is_supported
{
template< typename View >
struct apply
: public is_write_supported< typename get_pixel_type< View >::type
, png_tag
>
{};
};
template< typename Device >
class dynamic_image_writer< Device
, png_tag
>
: public writer< Device
, png_tag
>
{
typedef writer< Device
, png_tag
> parent_t;
public:
dynamic_image_writer( Device& file )
: parent_t( file )
{}
template< typename Views >
void apply( const any_image_view< Views >& views )
{
dynamic_io_fnobj< png_write_is_supported
, parent_t
> op( this );
apply_operation( views, op );
}
};
} // namespace detail
} // namespace gil
} // namespace boost
#endif // BOOST_GIL_EXTENSION_IO_DETAIL_PNG_IO_WRITE_HPP_INCLUDED
|
{"hexsha": "0dc0155515f0d9c97947458e145bcee56a09264e", "size": 17003, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "extern/boost/gil/extension/io_new/formats/png/write.hpp", "max_stars_repo_name": "qc2105/librjmcmc", "max_stars_repo_head_hexsha": "6e031a9f6f3612394f8918c745700ae41d2aa586", "max_stars_repo_licenses": ["CECILL-B"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "extern/boost/gil/extension/io_new/formats/png/write.hpp", "max_issues_repo_name": "qc2105/librjmcmc", "max_issues_repo_head_hexsha": "6e031a9f6f3612394f8918c745700ae41d2aa586", "max_issues_repo_licenses": ["CECILL-B"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "extern/boost/gil/extension/io_new/formats/png/write.hpp", "max_forks_repo_name": "qc2105/librjmcmc", "max_forks_repo_head_hexsha": "6e031a9f6f3612394f8918c745700ae41d2aa586", "max_forks_repo_licenses": ["CECILL-B"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.5727969349, "max_line_length": 107, "alphanum_fraction": 0.4415103217, "num_tokens": 3376}
|
[STATEMENT]
lemma P_inner_t0[simp]: "P_inner g t0 = x0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. P_inner g t0 = x0
[PROOF STEP]
by (simp add: P_inner_def)
|
{"llama_tokens": 79, "file": "Ordinary_Differential_Equations_IVP_Initial_Value_Problem", "length": 1}
|
import os
import argparse
from argparse import Namespace
import logging
import time
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import copy
from decimal import Decimal
import wandb
import sys
sys.path.append('../../../../')
from sopa.src.solvers.utils import create_solver
from sopa.src.models.utils import fix_seeds, RunningAverageMeter
from sopa.src.models.odenet_mnist.layers import MetaNODE
from sopa.src.models.odenet_mnist.utils import makedirs, get_logger, count_parameters, learning_rate_with_decay
from sopa.src.models.odenet_mnist.data import get_mnist_loaders, inf_generator
# from sopa.src.models.odenet_mnist.metrics import accuracy
from sopa.src.models.odenet_mnist.train_validate import train, validate
parser = argparse.ArgumentParser()
parser.add_argument('--network', type=str, choices=['resnet', 'odenet'], default='odenet')
parser.add_argument('--downsampling-method', type=str, default='conv', choices=['conv', 'res'])
parser.add_argument('--activation', type=str, choices=['tanh', 'softplus', 'softsign', 'relu'], default='relu')
parser.add_argument('--in_channels', type=int, default=1)
parser.add_argument('--solvers',
type=lambda s: [tuple(map(lambda iparam: str(iparam[1]) if iparam[0] <= 1 else (
int(iparam[1]) if iparam[0] == 2 else (
float(iparam[1]) if iparam[0] == 3 else Decimal(iparam[1]))),
enumerate(item.split(',')))) for item in s.strip().split(';')],
default=None,
help='Each solver is represented with (method,parameterization,n_steps,step_size,u0,v0) \n' +
'If the solver has only one parameter u0, set v0 to -1; \n' +
'n_steps and step_size are exclusive parameters, only one of them can be != -1, \n'
'If n_steps = step_size = -1, automatic time grid_constructor is used \n;'
'For example, --solvers rk4,uv,2,-1,0.3,0.6;rk3,uv,-1,0.1,0.4,0.6;rk2,u,4,-1,0.3,-1')
parser.add_argument('--solver_mode', type=str, choices=['switch', 'ensemble', 'standalone'], default='standalone')
parser.add_argument('--val_solver_modes',
type=lambda s: s.strip().split(','),
default=['standalone', 'ensemble', 'switch'],
help='Solver modes to use for validation step')
parser.add_argument('--switch_probs', type=lambda s: [float(item) for item in s.split(',')], default=None,
help="--switch_probs 0.8,0.1,0.1")
parser.add_argument('--ensemble_weights', type=lambda s: [float(item) for item in s.split(',')], default=None,
help="ensemble_weights 0.6,0.2,0.2")
parser.add_argument('--ensemble_prob', type=float, default=1.)
parser.add_argument('--noise_type', type=str, choices=['cauchy', 'normal'], default=None)
parser.add_argument('--noise_sigma', type=float, default=0.001)
parser.add_argument('--noise_prob', type=float, default=0.)
parser.add_argument('--minimize_rk2_error', type=eval, default=False, choices=[True, False])
parser.add_argument('--nepochs_nn', type=int, default=160)
parser.add_argument('--nepochs_solver', type=int, default=0)
parser.add_argument('--nstages', type=int, default=1)
parser.add_argument('--data_aug', type=eval, default=True, choices=[True, False])
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--weight_decay', type=float, default=0.0005)
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--test_batch_size', type=int, default=1000)
parser.add_argument('--lr_uv', type=float, default=1e-3)
parser.add_argument('--torch_dtype', type=str, default='float32')
parser.add_argument('--wandb_name', type=str, default='mnist_tmp')
parser.add_argument('--data_root', type=str, default='/gpfs/gpfs0/t.daulbaev/data/MNIST')
parser.add_argument('--save', type=str, default='../../../rk2_tmp')
parser.add_argument('--debug', action='store_true')
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--seed', type=int, default=502)
# Noise and adversarial attacks parameters:
parser.add_argument('--data_noise_std', type=float, default=0.,
help='Applies Norm(0, std) gaussian noise to each batch entry')
parser.add_argument('--eps_adv_training', type=float, default=0.3,
help='Epsilon for adversarial training')
parser.add_argument(
"--adv_training_mode",
default="clean",
choices=["clean", "fgsm", "at"], #, "at_ls", "av", "fs", "nce", "nce_moco", "moco", "av_extra", "meta"],
help='''Adverarial training method/mode, by default there is no adversarial training (clean).
For further details see MegaAdversarial/train in this repository.
'''
)
parser.add_argument('--use_wandb', type=eval, default=True, choices=[True, False])
parser.add_argument('--use_logger', type=eval, default=False, choices=[True, False])
parser.add_argument('--ss_loss', type=eval, default=False, choices=[True, False])
parser.add_argument('--ss_loss_reg', type=float, default=0.1)
parser.add_argument('--timestamp', type=int, default=int(1e6 * time.time()))
args = parser.parse_args()
sys.path.append('../../')
if args.use_logger:
makedirs(args.save)
logger = get_logger(logpath=os.path.join(args.save, 'logs'), filepath=None)
logger.info(args)
if args.use_wandb:
wandb.init(project=args.wandb_name, entity="sopa_node")
makedirs(args.save)
wandb.config.update(args)
os.makedirs(os.path.join(args.save, str(args.timestamp)))
if args.torch_dtype == 'float64':
dtype = torch.float64
elif args.torch_dtype == 'float32':
dtype = torch.float32
else:
raise ValueError('torch_type should be either float64 or float32')
if __name__ == "__main__":
print(args.solvers)
fix_seeds(args.seed)
if args.torch_dtype == 'float64':
dtype = torch.float64
elif args.torch_dtype == 'float32':
dtype = torch.float32
else:
raise ValueError('torch_type should be either float64 or float32')
makedirs(args.save)
logger = get_logger(logpath=os.path.join(args.save, 'logs'), filepath=None)
logger.info(args)
device = torch.device('cuda:' + str(args.gpu) if torch.cuda.is_available() else 'cpu')
########### Create train / val solvers
train_solvers = [create_solver(*solver_params, dtype=dtype, device=device) for solver_params in args.solvers]
for solver in train_solvers:
solver.freeze_params()
train_solver_options = Namespace(**{key: vars(args)[key] for key in ['solver_mode', 'switch_probs',
'ensemble_prob', 'ensemble_weights']})
val_solver_modes = args.val_solver_modes
########## Build the model
is_odenet = args.network == 'odenet'
model = MetaNODE(downsampling_method=args.downsampling_method,
is_odenet=is_odenet,
activation_type=args.activation,
in_channels=args.in_channels)
model.to(device)
if args.use_wandb:
wandb.watch(model)
if args.use_logger:
logger.info(model)
########### Create data loaders
train_loader, test_loader, train_eval_loader = get_mnist_loaders(args.data_aug,
args.batch_size,
args.test_batch_size,
data_root=args.data_root)
data_gen = inf_generator(train_loader)
batches_per_epoch = len(train_loader)
########### Creare criterion and optimizer
criterion = nn.CrossEntropyLoss().to(device)
loss_options = Namespace(ss_loss=args.ss_loss)
lr_fn = learning_rate_with_decay(
args.batch_size, batch_denom=128, batches_per_epoch=batches_per_epoch, boundary_epochs=[60, 100, 140],
decay_rates=[1, 0.1, 0.01, 0.001], lr0=args.lr)
optimizer = optim.RMSprop([{"params": model.parameters(), 'lr': args.lr}, ], lr=args.lr,
weight_decay=args.weight_decay)
########### Train the model
best_acc = {'standalone': [0] * len(train_solvers),
'ensemble': 0,
'switch': 0}
batch_time_meter = RunningAverageMeter()
f_nfe_meter = RunningAverageMeter()
b_nfe_meter = RunningAverageMeter()
for itr in range(args.nepochs_nn * batches_per_epoch):
for param_group in optimizer.param_groups:
param_group['lr'] = lr_fn(itr)
if itr % batches_per_epoch != 0:
train(itr,
model,
data_gen,
solvers=train_solvers,
solver_options=train_solver_options,
criterion=criterion,
optimizer=optimizer,
batch_time_meter=batch_time_meter,
f_nfe_meter=f_nfe_meter,
b_nfe_meter=b_nfe_meter,
device=device,
dtype=dtype,
is_odenet=is_odenet,
args=args,
logger=None,
wandb_logger=None)
else:
train(itr,
model,
data_gen,
solvers=train_solvers,
solver_options=train_solver_options,
criterion=criterion,
optimizer=optimizer,
batch_time_meter=batch_time_meter,
f_nfe_meter=f_nfe_meter,
b_nfe_meter=b_nfe_meter,
device=device,
dtype=dtype,
is_odenet=is_odenet,
args=args,
logger=logger,
wandb_logger=wandb)
best_acc = validate(best_acc,
itr,
model,
train_eval_loader,
test_loader,
batches_per_epoch,
solvers=train_solvers,
val_solver_modes=val_solver_modes,
batch_time_meter=batch_time_meter,
f_nfe_meter=f_nfe_meter,
b_nfe_meter=b_nfe_meter,
device=device,
dtype=dtype,
args=args,
logger=logger,
wandb_logger=wandb)
# # How to run
# CUDA_DEVICE_ORDER=PCI_BUS_ID CUDA_VISIBLE_DEVICES=3 python3 runner.py
# --data_root "/workspace/raid/data/datasets"
# --save "./experiment1"
# --network odenet
# --downsampling_method conv
# --solvers rk2,u,-1,-1,0.6,-1;rk2,u,-1,-1,0.5,-1
# --solver_mode switch
# --switch_probs 0.8,0.2
# --nepochs_nn 160
# --nepochs_solver 0
# --nstages 1
# --lr 0.1
# --seed 502
# CUDA_DEVICE_ORDER=PCI_BUS_ID CUDA_VISIBLE_DEVICES=1 python3 runner_new.py --data_root /workspace/raid/data/datasets/mnist --save ./experiment2_new --network 'odenet' --downsampling-method 'conv' --solvers "rk2,u,1,-1,0.5,-1;rk2,u,1,-1,1.0,-1" --solver_mode "switch" --activation "relu" --seed 702 --nepochs_nn 160 --nepochs_solver 0 --nstages 1 --lr 0.1
# CUDA_DEVICE_ORDER=PCI_BUS_ID CUDA_VISIBLE_DEVICES=4 python3 runner_new.py --data_root /workspace/raid/data/datasets/mnist --save ./experiment2_new --network 'odenet' --downsampling-method 'conv' --solvers "rk2,u,1,-1,0.66666666,-1" --solver_mode standalone --activation relu --seed 702 --nepochs_nn 160 --nepochs_solver 0 --nstages 1 --lr 0.1 --noise_type 'cauchy' --noise_sigma 0.001 --noise_prob 1.
# Пересчитать MNIST
|
{"hexsha": "df471018a4c3f81400f758c32be191fc187de7c8", "size": 11905, "ext": "py", "lang": "Python", "max_stars_repo_path": "sopa/src/models/odenet_mnist/runner_new.py", "max_stars_repo_name": "juliagusak/neural-ode-metasolver", "max_stars_repo_head_hexsha": "a5ca6ae0c00d2a8da3a5f4b77ee20fb151674d22", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 25, "max_stars_repo_stars_event_min_datetime": "2021-03-16T13:40:45.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-12T04:54:39.000Z", "max_issues_repo_path": "sopa/src/models/odenet_mnist/runner_new.py", "max_issues_repo_name": "MetaSolver/icml2021", "max_issues_repo_head_hexsha": "619774abe4a834ae371434af8b23379e9524e7da", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sopa/src/models/odenet_mnist/runner_new.py", "max_forks_repo_name": "MetaSolver/icml2021", "max_forks_repo_head_hexsha": "619774abe4a834ae371434af8b23379e9524e7da", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-03-31T02:58:03.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-31T02:58:03.000Z", "avg_line_length": 44.7556390977, "max_line_length": 404, "alphanum_fraction": 0.614699706, "include": true, "reason": "import numpy", "num_tokens": 2736}
|
# File which contains helper method to visualize, process, predict for CNN models
# Dt- 02.08.21
import os
import random
import pathlib
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import tensorflow as tf
#####3
## Method 1
def get_class_names(path):
'''
getting all the classNames from the directory structure
'''
if not path :
raise Exception("Please provide path as an arguement")
dir= pathlib.Path(path)
classes= []
for item in dir.glob("*") :
classes.append(item.name)
classes= np.sort(np.array(classes))
classes= classes[classes != ".DS_Store"]
return classes
#####
## Method 2
def traverse_dataset(path):
'''
A method which will traverse the direcotry and count the files and type
'''
if not path :
raise Exception("Please provide path as an arguement")
for dir_path, dir_names, file_names in os.walk(path):
print(f" There are {len(file_names)} files in {len(dir_names)} direcotory under {dir_path}")
# os.listdir(path) will gnerate the list of files on given path..
#######
# Method 3
def view_random_image(target_dir, target_class) :
"""
getting a random image from a folder under a particular class
"""
target= os.path.join(target_dir, target_class)
img= random.sample(os.listdir(target),1)[0]
path= os.path.join(target, img)
img_tensor=mpimg.imread(path)
plt.imshow(img_tensor)
plt.axis(False)
plt.title(target_class)
print("shape --", img_tensor.shape)
return img_tensor
#######
## Method 4
def plot_curves(hist) :
'''
Plotting the loss and accuracy curves
'''
loss= hist.history["loss"]
val_loss= hist.history["val_loss"]
acc= hist.history["accuracy"]
val_acc= hist.history["val_accuracy"]
epochs= range(len(loss))
plt.figure(figsize=(9,4))
plt.subplot(1,2,1)
plt.plot(epochs, loss, label="loss")
plt.plot(epochs, val_loss, label="val-loss")
plt.title("Loss over epochs")
plt.xlabel("Epochs")
plt.legend()
plt.subplot(1,2,2)
plt.plot(epochs, acc, label="Accuracy")
plt.plot(epochs, val_acc, label="val-accuracy")
plt.title("Accuracy over epochs")
plt.xlabel("Epochs")
plt.legend()
plt.tight_layout()
def load_prep_image(file, shape=224):
'''
Reads an image file and loads with scaling
'''
img= tf.io.read_file(file)
img_tensor= tf.image.decode_image(img, channels=3),
img_resized= tf.image.resize(img_tensor, size=(shape, shape))
img_resized /= 255.
return img_resized
def pred_and_plot(model, file, classes) :
"""
Predicting and plotting for an image
"""
img= load_prep_image(file)
if img.ndim < 4:
plt.imshow(img)
img= tf.expand_dims(img, axis=0)
else:
plt.imshow(tf.squeeze(img, axis=0))
pred= tf.squeeze(model.predict(img))
print(pred)
if len(pred) ==1 :
pred_Class= classes[tf.cast(pred.numpy() > 0.5 , dtype="int8").numpy()]
else :
pred_Class= classes[pred.argmax()]
plt.title(pred_Class, fontdict={"fontsize": 18, "color": "green"})
plt.axis(False)
if __name__ == "__main__":
pass
|
{"hexsha": "58792e47b3c073354a955cba253dc3a797932943", "size": 3341, "ext": "py", "lang": "Python", "max_stars_repo_path": "Computer-Vision/helper.py", "max_stars_repo_name": "teddcp2/Tensorflow-Deep-Learning-notes", "max_stars_repo_head_hexsha": "6b52cc32338695052256852879dc383c581fbf7f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Computer-Vision/helper.py", "max_issues_repo_name": "teddcp2/Tensorflow-Deep-Learning-notes", "max_issues_repo_head_hexsha": "6b52cc32338695052256852879dc383c581fbf7f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Computer-Vision/helper.py", "max_forks_repo_name": "teddcp2/Tensorflow-Deep-Learning-notes", "max_forks_repo_head_hexsha": "6b52cc32338695052256852879dc383c581fbf7f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.035971223, "max_line_length": 100, "alphanum_fraction": 0.6270577671, "include": true, "reason": "import numpy", "num_tokens": 835}
|
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cupy as cp
from uuid import uuid1
from cuml.dask.common.base import BaseEstimator
from cuml.dask.common.base import DelayedPredictionMixin
from cuml.dask.common.base import DelayedTransformMixin
from cuml.dask.common.base import mnmg_import
from cuml.dask.common.input_utils import concatenate
from cuml.dask.common.input_utils import DistributedDataHandler
from cuml.dask.common.comms import CommsContext
from cuml.dask.common.comms import worker_state
from cuml.dask.common.utils import raise_exception_from_futures
from dask.distributed import wait
from cuml.utils.memory_utils import with_cupy_rmm
class KMeans(BaseEstimator, DelayedPredictionMixin, DelayedTransformMixin):
"""
Multi-Node Multi-GPU implementation of KMeans.
This version minimizes data transfer by sharing only
the centroids between workers in each iteration.
Predictions are done embarrassingly parallel, using cuML's
single-GPU version.
For more information on this implementation, refer to the
documentation for single-GPU K-Means.
Parameters
----------
handle : cuml.Handle
If it is None, a new one is created just for this class.
n_clusters : int (default = 8)
The number of centroids or clusters you want.
max_iter : int (default = 300)
The more iterations of EM, the more accurate, but slower.
tol : float (default = 1e-4)
Stopping criterion when centroid means do not change much.
verbose : boolean (default = 0)
If True, prints diagnositc information.
random_state : int (default = 1)
If you want results to be the same when you restart Python,
select a state.
init : {'scalable-kmeans++', 'k-means||' , 'random' or an ndarray}
(default = 'scalable-k-means++')
'scalable-k-means++' or 'k-means||': Uses fast and stable scalable
kmeans++ intialization.
'random': Choose 'n_cluster' observations (rows) at random
from data for the initial centroids. If an ndarray is passed,
it should be of shape (n_clusters, n_features) and gives the
initial centers.
oversampling_factor : int (default = 2) The amount of points to sample
in scalable k-means++ initialization for potential centroids.
Increasing this value can lead to better initial centroids at the
cost of memory. The total number of centroids sampled in scalable
k-means++ is oversampling_factor * n_clusters * 8.
max_samples_per_batch : int (default = 32768) The number of data
samples to use for batches of the pairwise distance computation.
This computation is done throughout both fit predict. The default
should suit most cases. The total number of elements in the
batched pairwise distance computation is max_samples_per_batch
* n_clusters. It might become necessary to lower this number when
n_clusters becomes prohibitively large.
Attributes
----------
cluster_centers_ : cuDF DataFrame or CuPy ndarray
The coordinates of the final clusters. This represents of "mean" of
each data cluster.
"""
def __init__(self, client=None, verbose=False, **kwargs):
super(KMeans, self).__init__(client=client,
verbose=verbose,
**kwargs)
@staticmethod
@mnmg_import
def _func_fit(sessionId, objs, datatype, **kwargs):
from cuml.cluster.kmeans_mg import KMeansMG as cumlKMeans
handle = worker_state(sessionId)["handle"]
inp_data = concatenate(objs)
return cumlKMeans(handle=handle, output_type=datatype,
**kwargs).fit(inp_data)
@staticmethod
def _score(model, data):
ret = model.score(data)
return ret
@with_cupy_rmm
def fit(self, X):
"""
Fit a multi-node multi-GPU KMeans model
Parameters
----------
X : Dask cuDF DataFrame or CuPy backed Dask Array
Training data to cluster.
"""
data = DistributedDataHandler.single(X, client=self.client)
self.datatype = data.datatype
comms = CommsContext(comms_p2p=False, verbose=self.verbose)
comms.init(workers=data.workers)
key = uuid1()
kmeans_fit = [self.client.submit(KMeans._func_fit,
comms.sessionId,
wf[1],
self.datatype,
**self.kwargs,
workers=[wf[0]],
key="%s-%s" % (key, idx))
for idx, wf in enumerate(data.worker_to_parts.items())]
wait(kmeans_fit)
raise_exception_from_futures(kmeans_fit)
comms.destroy()
self.local_model = kmeans_fit[0].result()
self.cluster_centers_ = self.local_model.cluster_centers_
return self
def fit_predict(self, X, delayed=True):
"""
Compute cluster centers and predict cluster index for each sample.
Parameters
----------
X : Dask cuDF DataFrame or CuPy backed Dask Array
Data to predict
Returns
-------
result: Dask cuDF DataFrame or CuPy backed Dask Array
Distributed object containing predictions
"""
return self.fit(X).predict(X, delayed=delayed)
def predict(self, X, delayed=True):
"""
Predict labels for the input
Parameters
----------
X : Dask cuDF DataFrame or CuPy backed Dask Array
Data to predict
delayed : bool (default = True)
Whether to do a lazy prediction (and return Delayed objects) or an
eagerly executed one.
Returns
-------
result: Dask cuDF DataFrame or CuPy backed Dask Array
Distributed object containing predictions
"""
return self._predict(X, delayed=delayed)
def fit_transform(self, X, delayed=True):
"""
Calls fit followed by transform using a distributed KMeans model
Parameters
----------
X : Dask cuDF DataFrame or CuPy backed Dask Array
Data to predict
delayed : bool (default = True)
Whether to execute as a delayed task or eager.
Returns
-------
result: Dask cuDF DataFrame or CuPy backed Dask Array
Distributed object containing the transformed data
"""
return self.fit(X).transform(X, delayed=delayed)
def transform(self, X, delayed=True):
"""
Transforms the input into the learned centroid space
Parameters
----------
X : Dask cuDF DataFrame or CuPy backed Dask Array
Data to predict
delayed : bool (default = True)
Whether to execute as a delayed task or eager.
Returns
-------
result: Dask cuDF DataFrame or CuPy backed Dask Array
Distributed object containing the transformed data
"""
return self._transform(X, n_dims=2, delayed=delayed)
@with_cupy_rmm
def score(self, X):
"""
Computes the inertia score for the trained KMeans centroids.
Parameters
----------
X : dask_cudf.Dataframe
Dataframe to compute score
Returns
-------
Inertial score
"""
scores = self._run_parallel_func(KMeans._score,
X,
n_dims=1,
delayed=False,
output_futures=True)
return -1 * cp.sum(cp.asarray(
self.client.compute(scores, sync=True))*-1.0)
def get_param_names(self):
return list(self.kwargs.keys())
|
{"hexsha": "0781606810839912910f846b554bb464e94b31ca", "size": 8589, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/cuml/dask/cluster/kmeans.py", "max_stars_repo_name": "harrism/cuml", "max_stars_repo_head_hexsha": "060dcd94138deed2ac692031cfe70a674b15c6f0", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-07-13T04:16:50.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-13T04:16:50.000Z", "max_issues_repo_path": "python/cuml/dask/cluster/kmeans.py", "max_issues_repo_name": "harrism/cuml", "max_issues_repo_head_hexsha": "060dcd94138deed2ac692031cfe70a674b15c6f0", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/cuml/dask/cluster/kmeans.py", "max_forks_repo_name": "harrism/cuml", "max_forks_repo_head_hexsha": "060dcd94138deed2ac692031cfe70a674b15c6f0", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.55078125, "max_line_length": 78, "alphanum_fraction": 0.6162533473, "include": true, "reason": "import cupy", "num_tokens": 1813}
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 2 13:14:32 2021
@author: ali_d
"""
import matplotlib.pyplot as plt
import numpy as np
x = np.linspace(-10,9,20)
#-9 ile 10 arasında esıt aralıklarda 20 tane deger olusturdum
y = x ** 3
z = x**2
figure = plt.figure()
#bos bir figur olusturuyorum
axes_cube = figure.add_axes([0.1,0.1,0.8,0.8])
#konumlarım soldan 0.2 altan 0.2 genıslık ve yukseklık olarakda 0.8
axes_cube.plot(x,y,"b")
axes_cube.set_xlabel("X axis")
axes_cube.set_ylabel("Y axis")
axes_cube.set_title("Cube")
axes_cube = figure.add_axes([0.15,0.6,0.25,0.25])
axes_cube.plot(x,z,"r")
axes_cube.set_xlabel("X axis")
axes_cube.set_ylabel("Y axis")
axes_cube.set_title("Square")
plt.show()
#%%
x = np.linspace(-10,9,20)
#-9 ile 10 arasında esıt aralıklarda 20 tane deger olusturdum
y = x ** 3
z = x**2
figure = plt.figure()
axes = figure.add_axes([0,0,1,1])
axes.plot(x,z,label="Square")
axes.plot(x,y,label="Cube")
axes.legend(loc=4)
plt.show()
#%%
fig,axes = plt.subplots(nrows=2,ncols=1,figsize=(8,8))
axes[0].plot(x,y,"r")
axes[0].set_title("Cube")
axes[1].plot(x,z,"g")
axes[1].set_title("Square")
fig.savefig("matplotlib")
plt.tight_layout()
plt.show()
###
plt.bar([0.25,1.25,2.25,3.25,4.25],[90,80,30,70,90],label="Bmv",width=.5)
plt.bar([0.15,1.35,2.15,3.45,3.25],[40,80,80,60,70],label="Audi",width=.5)
plt.legend()
plt.xlabel("gün")
plt.ylabel("mesafe(km)")
plt.title("Araç Bilgileri")
plt.show()
|
{"hexsha": "ab891fee163e0e78db9cae560e8118a1517c7340", "size": 1495, "ext": "py", "lang": "Python", "max_stars_repo_path": "Data Visualization/Matplotlib/3 Matplotlib.py", "max_stars_repo_name": "ALDOR99/Python", "max_stars_repo_head_hexsha": "a76f37bb3e573cd3fdcfc19f4f73494cafa9140e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-05-27T19:13:02.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-02T13:26:35.000Z", "max_issues_repo_path": "Data Visualization/Matplotlib/3 Matplotlib.py", "max_issues_repo_name": "ALDOR99/Python", "max_issues_repo_head_hexsha": "a76f37bb3e573cd3fdcfc19f4f73494cafa9140e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Data Visualization/Matplotlib/3 Matplotlib.py", "max_forks_repo_name": "ALDOR99/Python", "max_forks_repo_head_hexsha": "a76f37bb3e573cd3fdcfc19f4f73494cafa9140e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-06-07T18:17:35.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-07T18:17:35.000Z", "avg_line_length": 11.4122137405, "max_line_length": 74, "alphanum_fraction": 0.6488294314, "include": true, "reason": "import numpy", "num_tokens": 547}
|
\chapter{Chain Database}
\label{chaindb}
TODO\todo{TODO}: This is currently a disjoint collection of snippets.
\section{Union of the Volatile DB and the Immutable DB}
\label{chaindb:union}
As discussed in \cref{storage:components}, the blocks in the Chain DB are
divided between the Volatile DB (\cref{volatile}) and the Immutable DB
(\cref{immutable}). Yet, it presents a unified view of the two databases.
Whereas the Immutable DB only contains the immutable chain and the Volatile DB
the volatile \emph{parts} of multiple forks, by combining the two, the Chain DB
contains multiple forks.
\subsection{Looking up blocks}
\label{chaindb:union:lookup}
Just like the two underlying databases the Chain DB allows looking up a
\lstinline!BlockComponent! of a block by its point. By comparing the slot number
of the point to the slot of the immutable tip, we could decide in which database
to look up the block. However, this would not be correct: the point might have a
slot older than the immutable tip, but refer to a block not in the Immutable DB,
i.e., a block on an older fork. More importantly, there is a potential race
condition: between the time at which the immutable tip was retrieved and the
time the block is retrieved from the Volatile DB, the block might have been
copied to the Immutable DB and garbage collected from the Volatile DB, resulting
in a false negative. Nevertheless, the overlap between the two makes this
scenario very unlikely.
For these reasons, we look up a block in the Chain DB as follows. We first look
up the given point in the Volatile DB. If the block is not in the Volatile DB,
we fall back to the Immutable DB. This means that if, at the same, a block is
copied from the Volatile DB to the Immutable DB and garbage collected from the
Volatile DB, we will still find it in the Immutable DB. Note that failed lookups
in the Volatile DB are cheap, as no disk access is required.
\subsection{Iterators}
\label{chaindb:union:iterators}
Similar to the Immutable DB (\cref{immutable:api:iterators}), the Chain DB
allows streaming blocks using iterators. We only support streaming blocks from
the current chain or from a recent fork. We \emph{do not} support streaming from
a fork that starts before the current immutable tip, as these blocks are likely
to be garbage collected soon. Moreover, it is of no use to us to serve another
node blocks from a fork we discarded.
We might have to stream blocks from the Immutable DB, the Volatile DB, or from
both. If the end bound is older or equal to the immutable tip, we simply try to
open an Immutable DB iterator with the given bounds. If the end bound is newer
than the immutable tip, we construct a path of points (see
\lstinline!filterByPredecessor! in \cref{volatile:api}) connecting the end bound
to the start bound. This path is either entirely in the Volatile DB or it is
partial because a block is missing from the Volatile DB. If the missing block is
the tip of the Immutable DB, we will have to stream from the Immutable DB in
addition to the Volatile DB. If the missing block is not the tip of the
Immutable DB, we consider the range to be invalid. In other words, we allow
streaming from both databases, but only if the immutable tip is the transition
point between the two, it cannot be a block before the tip, as that would mean
the fork is too old.
\todo{TODO} Image?
To stream blocks from the Volatile DB, we maintain the constructed path of
points as a list in memory and look up the corresponding block (component) in
the Volatile DB one by one.
Consider the following scenario: we open a Chain DB iterator to stream the
beginning of the current volatile chain, i.e., the blocks in the Volatile DB
right after the immutable tip. However, before streaming the iterator's first
block, we switch to a long fork that forks off all the way back at our immutable
tip. If that fork is longer than the previous chain, blocks from the start of
our chain will be copied from the Volatile DB to the Immutable DB,\todo{link}
advancing the immutable tip. This means the blocks the iterator will stream are
now part of a fork older than $k$. In this new situation, we would not allow
opening an iterator with the same range as the already-opened iterator. However,
we do allow streaming these blocks using the already opened iterator, as the
blocks to stream are unlikely to have already been garbage collected.
Nevertheless, it is still theoretically possible\footnote{This is unlikely, as
there is a delay between copying and garbage collection (see
\cref{chaindb:gc:delay}) and there are network time-outs on the block fetch
protocol, of which the server-side (see \cref{servers:blockfetch}) is the
primary user of Chain DB iterators.} that such a block has already been garbage
collected. For this reason, the Chain DB extends the Immutable DB's
\lstinline!IteratorResult! type (see \cref{immutable:api:iterators}) with the
\lstinline!IteratorBlockGCed! constructor:
%
\begin{lstlisting}
data IteratorResult blk b =
IteratorExhausted
| IteratorResult b
| IteratorBlockGCed (RealPoint blk)
\end{lstlisting}
There is another scenario to consider: we stream the blocks from the start of
the current volatile chain, just like in the previous scenario. However, in this
case, we do not switch to a fork, but our chain is extended with new blocks,
which means blocks from the start of our volatile chain are copied from the
Volatile DB to the Immutable DB. If these blocks have been copied and garbage
collected before the iterator is used to stream them from the Volatile DB (which
is unlikely, as explained in the previous scenario), the iterator will
incorrectly yield \lstinline!IteratorBlockGCed!. Instead, when a block that was
planned to be streamed from the Volatile DB is missing, we first look in the
Immutable DB for the block in case it has been copied there. After the block
copied to the Immutable has been streamed, we continue with the remaining blocks
to stream from the Volatile DB. It might be the case that the next block has
also been copied and garbage collected, requiring another switch to the
Immutable DB. In the theoretical worst case, we have to switch between the two
databases for each block, but this is nearly impossible to happen in practice.
\subsection{Followers}
\label{chaindb:union:followers}
In addition to iterators, the Chain DB also supports \emph{followers}. Unlike an
iterator, which is used to request a static segment of the current chain or a
recent fork, a follower is used to follow the \emph{current chain}. Either from
the start of from a suggested more recent point. Unlike iterators, followers are
dynamic, they will follow the chain when it grows or forks. A follower is
pull-based, just like its primary user, the chain sync server (see
\cref{servers:chainsync}). This avoids the need to have a growing queue of
changes to the chain on the server side in case the client side is slower.
The API of a follower is as follows:
%
\begin{lstlisting}
data Follower m blk a = Follower {
followerInstruction :: m (Maybe (ChainUpdate blk a))
, followerInstructionBlocking :: m (ChainUpdate blk a)
, followerForward :: [Point blk] -> m (Maybe (Point blk))
, followerClose :: m ()
}
\end{lstlisting}
%
The \lstinline!a! parameter is the same \lstinline!a! as the one in
\lstinline!BlockComponent! (see \cref{immutable:api:block-component}), as a
follower for any block component \lstinline!a! can be opened.
A follower always has an implicit position associated with it. The
\lstinline!followerInstruction! operation and its blocking variant allow
requesting the next instruction w.r.t.\ the follower's implicit position, i.e.,
a \lstinline!ChainUpdate!:
%
\begin{lstlisting}
data ChainUpdate block a =
AddBlock a
| RollBack (Point block)
\end{lstlisting}
%
The \lstinline!AddBlock! constructor indicates that to follow the current chain,
the follower should extend its chain with the given block (component). Switching
to a fork is represented by first rolling back to a certain point
(\lstinline!RollBack!), followed by at least as many new blocks
(\lstinline!AddBlock!) as blocks that have been rolled back. If we were to
represent switching to a fork using a constructor like:
%
\begin{lstlisting}
| SwitchToFork (Point block) [a]
\end{lstlisting}
%
we would need to have many blocks or block components in memory at the same
time.
These operations are implemented as follows. In case the follower is looking at
the immutable part of the chain, an Immutable DB iterator is used and no
rollbacks will be encountered. When the follower has advanced into the volatile
part of the chain, the in-memory fragment containing the last $k$ headers is
used (see \cref{storage:inmemory}). Depending on the block component, the
corresponding block might have to be read from the Volatile DB.
When a new chain has been adopted during chain selection (see
\cref{chainsel:addblock}), all open followers that are looking at the part of
the current chain that was rolled back are updated so that their next
instruction will be the correct \lstinline!RollBack!. By definition, followers
looking at the immutable part of the chain will be unaffected.
By default, a follower will start from the very start of the chain, i.e., at
genesis. Accordingly, the first instruction will be an \lstinline!AddBlock! with
the very first block of the chain. As mentioned, the primary user of a follower
is the chain sync server, of which the clients in most cases already have large
parts of the chain. The \lstinline!followerForward! operation can be used in
these cases to find a more recent intersection from which the follower can
start. The client will sent a few recent points from its chain and the follower
will try to find the most recent of them that is on our current chain. This is
implemented by looking up blocks by their point in the current chain fragment
and the Immutable DB.
Followers are affected by garbage collection similarly to how iterators are
(\cref{chaindb:union:iterators}): when the implicit position of the follower is
in the immutable part of the chain, an Immutable DB iterator with a static range
is used. Such an iterator is not aware of blocks appended to the Immutable DB
since the iterator was opened. This means that when the iterator reaches its
end, we first have to check whether more blocks have been appended to the
Immutable DB. If so, a new iterator is opened to stream these blocks. If not, we
switch over to the in-memory fragment.
\section{Block processing queue}
\label{chaindb:queue}
Discuss the chain DB's block processing queue, the future/promises/events,
concurrency concerns, etc.
Discuss the problem of the effective queue size (\#2721).
\section{Marking invalid blocks}
\label{chaindb:invalidblocks}
The chain database keeps a set of hashes of known-to-be-invalid blocks.
This information is used by the chain sync client (\cref{chainsyncclient}) to
terminate connections to nodes with a chain that contains an invalid block.
\begin{lemma}
\label{chaindb:dont-mark-invalid-successors}
When the chain database discovers an invalid block $X$, it is sufficient
to mark only $X$; there is no need to additionally mark any successors of $X$.
\end{lemma}
\begin{proof}[Proof (sketch).]
The chain sync client maintains a chain fragment corresponding to some suffix
of the upstream node's chain, and it preserves an invariant that that suffix
must intersect with the node's own current chain. It can therefore never be
the case that the fragment contains a successor of $X$ but not $X$ itself:
since $X$ is invalid, the node will never adopt it, and so a fragment that
intersects the node's current chain and includes a successor of $X$ \emph{must}
also contain $X$.
\end{proof}
TODO\todo{TODO}: We should discuss how this relates to GC (\cref{chaindb:gc}).
\section{Effective maximum rollback}
The maximum rollback we can support is bound by the length of the current fragment. This will be less than $k$ only if
\begin{itemize}
\item We are near genesis and the immutable database is empty, or
\item Due to data corruption the volatile database lost some blocks
\end{itemize}
Only the latter case is some cause for concern: we are in a state where
conceptually we \emph{could} roll back up to $k$ blocks, but due to how we chose
to organise the data on disk (the immutable/volatile split) we cannot. One
option here would be to move blocks \emph{back} from the immutable DB to the
volatile DB under these circumstances, and indeed, if there were other parts of
the system where rollback might be instigated that would be the right thing to
do: those other parts of the system should not be aware of particulars of the
disk layout.
However, since the chain database is \emph{exclusively} in charge of switching
to forks, all the logic can be isolated to the chain database. So, when we have
a short volatile fragment, we will just not roll back more than the length of
that fragment. Conceptually this can be justified also: the fact that $I$ is the
tip of the immutable DB means that \emph{at some point} it was in our chain at
least $k$ blocks back, and so we considered it to be immutable: the fact that
some data loss occurred does not really change that. We may still roll back more
than $k$ blocks when disk corruption occurs in the immutable database, of
course.
One use case of the current fragment merits a closer examination. When the chain
sync client (\cref{chainsyncclient}) looks for an intersection between our chain
and the chain of the upstream peer, it sends points from our chain fragment. If
the volatile fragment is shorter than $k$ due to data corruption, the client
would have fewer points to send to the upstream node. However, this is the
correct behaviour: it would mean we cannot connect to upstream nodes who fork
more than $k$ of what \emph{used to be} our tip before the data corruption, even
if that's not where our tip is anymore. In the extreme case, if the volatile
database gets entirely erased, only a single point is available (the tip of the
immutable database $I$), and hence we can only connect to upstream nodes that
have $I$ on their chain. This is precisely stating that we can only sync with
upstream nodes that have a chain that extends our immutable chain.
\section{Garbage collection}
\label{chaindb:gc}
Blocks on chains that are never selected, or indeed blocks whose
predecessor we never learn, will eventually be garbage collected when their
slot number number is more than $k$ away from the tip of the selected chain.\footnote{This is slot based rather than block based for historical
reasons only; we should probably change this.}
\begin{bug}
The chain DB (more specifically, the volatile DB) can still grow without bound
if we allow upstream nodes to rapidly switch between forks; this should be
addressed at the network layer (for instance, by introducing rate limiting for
rollback in the chain sync client, \cref{chainsyncclient}).
\end{bug}
Although this is GC of the volatile DB, I feel it belongs here more than in
the volatile DB chapter because here we know \emph{when} we could GC.
But perhaps it should be split into two: a section on how GC is implemented
in the volatile DB chapter, and then a section here how it's used in the
chain DB. References from elsewhere in the report to GC should probably
refer here, though, not to the vol DB chapter.
\subsection{GC delay}
\label{chaindb:gc:delay}
For performance reasons neither the immutable DB nor the volatile DB ever makes
explicit \lstinline!fsync! calls to flush data to disk. This means that when the
node crashes, recently added blocks may be lost. When this happens in the
volatile DB it's not a huge deal: when the node starts back up and the chain
database is initialised we just run chain selection on whatever blocks still
remain; in typical cases we just end up with a slightly shorter chain.
However, when this happens in the immutable database the impact may be larger.
In particular, if we delete blocks from the volatile database as soon as we add
them to the immutable database, then data loss in the immutable database would
result in a gap between the volatile database and the immutable database, making
\emph{all} blocks in the volatile database unusable. We can recover from this, but it
would result in a large rollback (in particular, one larger than $k$).
To avoid this, we currently have a delay between adding blocks to the immutable
DB and removing them from the volatile DB (garbage collection). The delay is
configurable, but should be set in such a way that the possibility that the
block has not yet been written to disk at the time of garbage collection is
minimised;a a relatively short delay should suffice (currently we use a delay of
1 minute), though there are other reasons for preferring a longer delay:
\begin{itemize}
\item Clock changes can more easily be accommodated with more overlap (\cref{{future:clockchanges}})
\item The time delay also determines the worst-case validity of iterators
(todo\todo{TODO}: reference to relevant section).
\end{itemize}
Larger delays will of course result in more overlap between the two databases.
During normal node operation this might not be much, but the overlap might be
more significant during bulk syncing.
Notwithstanding the above discussion, an argument could be made that the
additional complexity due to the delay is not worth it; even a ``rollback'' of
more than $k$ is easily recovered from\footnote{Note that the node will never
actually notice such a rollback; the node would crash when discovering data
loss, and then restart with a smaller chain}, and clock changes as well, as
iterators asking for blocks that now live on distant chains, are not important
use cases. We could therefore decide to remove it altogether.
\section{Resources}
\label{chaindb:resources}
In the case of the chain DB, the allocation function will be wrapped in a
\lstinline!runWithTempRegistry! combinator, which will hold an empty resulting
state. This is because as mentioned in \ref{nonfunctional:temporaryregs}, we only get
values that do not leak implementation details and therefore we can't run any
checks, but still we want to keep track of the resources. The allocation of each
of the databases (Immutable DB and Volatile DB) will be executed using the
combinator \lstinline!runInnerWithTempRegistry! so that each of them performs
the relevant checks on the \lstinline!OpenState! they return but such checks are
not visible (nor runnable) on the chain DB scope.
The threads that are spawned during the initialization of the database will be
registered in the node general registry as they won't be directly tracked by the
chain DB API but instead will coexist on its side.
The final step of ChainDB initialization is registering itself in the general
registry so that it is closed in presence of an exception.
|
{"hexsha": "8d794bc13600d5fbabb730659650cedd457b9447", "size": 18976, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "ouroboros-consensus/docs/report/chapters/storage/chaindb.tex", "max_stars_repo_name": "RyanGlScott/ouroboros-network", "max_stars_repo_head_hexsha": "85b06a74c7b895c5412ba2ac8a43b9c264ad7957", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-05-16T12:30:21.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-16T12:30:21.000Z", "max_issues_repo_path": "ouroboros-consensus/docs/report/chapters/storage/chaindb.tex", "max_issues_repo_name": "RyanGlScott/ouroboros-network", "max_issues_repo_head_hexsha": "85b06a74c7b895c5412ba2ac8a43b9c264ad7957", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ouroboros-consensus/docs/report/chapters/storage/chaindb.tex", "max_forks_repo_name": "RyanGlScott/ouroboros-network", "max_forks_repo_head_hexsha": "85b06a74c7b895c5412ba2ac8a43b9c264ad7957", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 53.7563739377, "max_line_length": 143, "alphanum_fraction": 0.7881007589, "num_tokens": 4365}
|
import pretty_midi
import numpy as np
def extract_label(label_path, m_beat_arr):
"""Extract drum label notes.
Process ground-truth midi into numpy array representation.
Parameters
----------
label_path: Path
Path to the midi file.
m_beat_arr:
Extracted mini-beat array of the coressponding audio piece.
Returns
-------
drum_track_ary: numpy.ndarray
The extracted label in numpy array. Should have a total of 128 classes
of drum notes.
See Also
--------
omnizart.feature.beat_for_drum.extract_mini_beat_from_audio_path:
The function for extracting mini-beat array from the given audio path.
"""
m_beat_range = []
start = m_beat_arr[0] - (m_beat_arr[1] - m_beat_arr[0]) / 2
end = m_beat_arr[0] + (m_beat_arr[1] - m_beat_arr[0]) / 2
m_beat_range.append(start)
m_beat_range.append(end)
for idx, beat in enumerate(m_beat_arr[1:-1]):
end = beat + (m_beat_arr[idx+1] - beat) / 2 # noqa: E226
m_beat_range.append(end)
end = m_beat_arr[-1] + (m_beat_arr[-1] - m_beat_arr[-2]) / 2
m_beat_range.append(end)
m_beat_range = np.array(m_beat_range)
midi = pretty_midi.PrettyMIDI(label_path)
notes = np.array([
[nn.start, nn.pitch]
for inst in midi.instruments
for nn in inst.notes
if inst.is_drum
])
drum_track_ary = np.zeros([len(m_beat_arr), 128])
for idx, beat in enumerate(m_beat_range[:-1]):
for note in notes:
if beat <= note[0] < m_beat_range[idx+1]: # noqa: E226
drum_track_ary[idx, int(note[1])] = 1.0
return drum_track_ary
def extract_label_13_inst(label_path, m_beat_arr):
"""Extract 13 types of drum label notes.
Process the MIDI drum notes into numpy array and concludes them
into 13 different sub-classes of drum notes.
Parameters
----------
label_path: Path
Path to the midi file.
m_beat_arr:
Extracted mini-beat array of the coressponding audio piece.
Returns
-------
drum_track_ary: numpy.ndarray
The extracted label in numpy array.
See Also
--------
omnizart.drum.labels.extract_label:
Complete drum label extraction with 128 output classes.
omnizart.feature.beat_for_drum.extract_mini_beat_from_audio_path:
The function for extracting mini-beat array from the given audio path.
"""
label = extract_label(label_path, m_beat_arr)
inst_ary_out = np.zeros([len(label), 13]).astype(np.float32)
inst_ary_out[:, 0] = np.max(label[:, [33, 35, 36]], axis=1) # Bass drum
inst_ary_out[:, 1] = np.max(label[:, [27, 38, 40, 85, 87]], axis=1) # Snare drum
inst_ary_out[:, 2] = np.max(label[:, [37]], axis=1) # Side Stick
inst_ary_out[:, 3] = np.max(label[:, [39]], axis=1) # Clap
inst_ary_out[:, 4] = np.max(label[:, [42]], axis=1) # Closed HH
inst_ary_out[:, 5] = np.max(label[:, [44]], axis=1) # Pedal HH
inst_ary_out[:, 6] = np.max(label[:, [46]], axis=1) # Open HH
inst_ary_out[:, 7] = np.max(label[:, [41, 43]], axis=1) # low-tom
inst_ary_out[:, 8] = np.max(label[:, [45, 47]], axis=1) # mid-tom
inst_ary_out[:, 9] = np.max(label[:, [48, 50]], axis=1) # high-tom
inst_ary_out[:, 10] = np.max(label[:, [49, 55, 57]], axis=1) # Crash
inst_ary_out[:, 11] = np.max(label[:, [51, 53, 59]], axis=1) # Ride
inst_ary_out[:, 12] = np.max(label[:, [69, 70, 82]], axis=1) # Maracas
return label, inst_ary_out
|
{"hexsha": "ce6049dc2958bd2bbe76d473f2f8c9036a8c64ad", "size": 3522, "ext": "py", "lang": "Python", "max_stars_repo_path": "omnizart/drum/labels.py", "max_stars_repo_name": "nicolasanjoran/omnizart", "max_stars_repo_head_hexsha": "b0e74af39b2e3a312ef32dbf0837626b2e043cb6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1145, "max_stars_repo_stars_event_min_datetime": "2020-11-13T10:07:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T17:35:36.000Z", "max_issues_repo_path": "omnizart/drum/labels.py", "max_issues_repo_name": "nicolasanjoran/omnizart", "max_issues_repo_head_hexsha": "b0e74af39b2e3a312ef32dbf0837626b2e043cb6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 44, "max_issues_repo_issues_event_min_datetime": "2020-12-29T04:51:16.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-15T06:52:04.000Z", "max_forks_repo_path": "omnizart/drum/labels.py", "max_forks_repo_name": "nicolasanjoran/omnizart", "max_forks_repo_head_hexsha": "b0e74af39b2e3a312ef32dbf0837626b2e043cb6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 61, "max_forks_repo_forks_event_min_datetime": "2020-12-19T09:09:42.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-23T01:26:22.000Z", "avg_line_length": 36.3092783505, "max_line_length": 85, "alphanum_fraction": 0.6249290176, "include": true, "reason": "import numpy", "num_tokens": 1064}
|
#imports
import tensorflow as tf
from tensorflow import layers
import numpy as np
import cv2
from os import listdir
from os.path import join, isfile
#standard of numpy randomness
np.random.seed(7)
#use matplotlib to read and process images
def getImages(directory, name):
images = []
tfImages = []
allImages = [image for image in listdir(directory) if isfile(join(directory, image))]#gets all images from images folder
for imageDir in allImages:
if (imageDir[:2] == "._"):#sometimes the listdir will pick up meta data thats not needed
continue
img = cv2.imread(directory + imageDir)
img = ((img - 127.5) / 127.5) #normalizes and converts images to -1 -> 1 range
images.append(img)
tfImages.append(tf.Variable(img))
np.save(name, images)
return tf.data.Dataset.from_tensor_slices((tfImages))
#load images from ready .npy file and cast to dataset
def loadImages(file):
arr = np.load(file)
return arr
def convolutLayer(inputs, outputShape):
return layers.conv2d(inputs=inputs, filters=outputShape, kernel_size=4, strides=(2, 2), padding="same",
data_format="channels_last", use_bias=True, bias_initializer=tf.constant_initializer(0),
kernel_initializer=tf.contrib.layers.xavier_initializer())
def deconvolutLayer(inputs, outputShape):
return layers.conv2d_transpose(inputs=inputs, filters=outputShape, kernel_size=4, strides=(2,2),
padding="same", data_format="channels_last", use_bias=True,
bias_initializer=tf.constant_initializer(0),
kernel_initializer=tf.contrib.layers.xavier_initializer())
def noise(size):
return np.random.normal(size=(size, 100))#noise is always 100 long
def denormalize(images):
#change to 0 -> 1 ((x + currentMin) / (currentMax - currentMin)) * (newMax - newMin) + newMin
images = (images+1) / 2
return images
|
{"hexsha": "b183257d048aabb8721410fabaf5fd47589c138b", "size": 2014, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/Obsolete-UsedDCGAN/ops.py", "max_stars_repo_name": "esslushy/AbstractArtGenerator", "max_stars_repo_head_hexsha": "48ebfee04673bc109fc202c3368dbbc80f0e7021", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2019-01-29T04:37:33.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-10T04:19:37.000Z", "max_issues_repo_path": "src/Obsolete-UsedDCGAN/ops.py", "max_issues_repo_name": "esslushy/AbstractArtGenerator", "max_issues_repo_head_hexsha": "48ebfee04673bc109fc202c3368dbbc80f0e7021", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Obsolete-UsedDCGAN/ops.py", "max_forks_repo_name": "esslushy/AbstractArtGenerator", "max_forks_repo_head_hexsha": "48ebfee04673bc109fc202c3368dbbc80f0e7021", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.28, "max_line_length": 124, "alphanum_fraction": 0.6668321748, "include": true, "reason": "import numpy", "num_tokens": 465}
|
# Test of reduction on scalar backend
# Unit testing for element-wise expressions on scalar backend
import Devectorize
import Devectorize.@devec
import Devectorize.@inspect_devec
import Devectorize.dump_devec
import Devectorize.sqr
using Base.Test
# data
a = [3., 4., 5., 6., 8., 7., 6., 5.]
b = [9., 8., 7., 6., 4., 2., 3., 1.]
c = [1., 2., 4., 3., 5., 7., 6., 8.]
abc = [a b c]
#################################################
#
# full reduction
#
#################################################
r = zeros(1)
@devec r = sum(a)
@test isequal(r, sum(a))
@devec r = sum(a[:,:])
@test isequal(r, sum(a))
@devec r = sum(abc[:,:])
@test isequal(r, sum(abc))
@devec r = max(a)
@test isequal(r, max(a))
@devec r = max(c)
@test isequal(r, max(c))
@devec r = min(a)
@test isequal(r, min(a))
@devec r = min(c)
@test isequal(r, min(c))
@devec r = mean(a)
@test isequal(r, mean(a))
@devec r = mean(abc[:,:])
@test isequal(r, mean(abc))
@devec r = dot(a, b)
@test isequal(r, dot(a, b))
@devec r = dot(a[:,:], b[:,:])
@test isequal(r, dot(a, b))
@devec r = dot(abc[:,:], abc)
@test isequal(r, dot(abc[:], abc[:]))
#################################################
#
# partial reduction
#
#################################################
@devec r = sum(abc, 1)
@test isequal(r, sum(abc, 1))
@devec r = sum(abc, 2)
@test isequal(r, sum(abc, 2))
r = zeros(size(abc, 2))
r0 = r
@devec r[:] = sum(abc, 1)
@test r === r0
@test isequal(r, vec(sum(abc, 1)))
r = zeros(size(abc, 1))
r0 = r
@devec r[:] = sum(abc, 2)
@test r === r0
@test isequal(r, vec(sum(abc, 2)))
@devec r = mean(abc, 1)
@test isequal(r, sum(abc, 1) / size(abc, 1))
@devec r = mean(abc, 2)
@test isequal(r, sum(abc, 2) / size(abc, 2))
@devec r = max(abc, (), 1)
@test isequal(r, max(abc, (), 1))
@devec r = max(abc, (), 2)
@test isequal(r, max(abc, (), 2))
@devec r = min(abc, (), 1)
@test isequal(r, min(abc, (), 1))
@devec r = min(abc, (), 2)
@test isequal(r, min(abc, (), 2))
@devec r = sum(sqr(abc), 1)
@test isequal(r, sum(abc .* abc, 1))
@devec r = sum(sqr(abc), 2)
@test isequal(r, sum(abc .* abc, 2))
|
{"hexsha": "58d53dddd3e458435a9793784817f803fdb7d107", "size": 2100, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/test_scalar_reduc.jl", "max_stars_repo_name": "jakebolewski/Devectorize.jl", "max_stars_repo_head_hexsha": "6e28d48cfdb70c0fe9622ac5a79942decdc73460", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2016-02-07T01:54:11.000Z", "max_stars_repo_stars_event_max_datetime": "2017-05-10T20:55:38.000Z", "max_issues_repo_path": "test/test_scalar_reduc.jl", "max_issues_repo_name": "jakebolewski/Devectorize.jl", "max_issues_repo_head_hexsha": "6e28d48cfdb70c0fe9622ac5a79942decdc73460", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/test_scalar_reduc.jl", "max_forks_repo_name": "jakebolewski/Devectorize.jl", "max_forks_repo_head_hexsha": "6e28d48cfdb70c0fe9622ac5a79942decdc73460", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.2608695652, "max_line_length": 61, "alphanum_fraction": 0.5185714286, "num_tokens": 765}
|
# -*- coding: utf-8 -*-
# *****************************************************************************
# ufit, a universal scattering fitting suite
#
# Copyright (c) 2013-2019, Georg Brandl and contributors. All rights reserved.
# Licensed under a 2-clause BSD license, see LICENSE.
# *****************************************************************************
"""Backend using plain scipy leastsq."""
from __future__ import absolute_import
from numpy import sqrt, inf
from scipy.optimize import leastsq
from ufit.param import prepare_params, update_params
from ufit.utils import get_chisqr
__all__ = ['do_fit', 'backend_name']
backend_name = 'scipy'
def do_fit(data, fcn, params, add_kw):
x, y, dy = data.fit_columns
meta = data.meta
varying, varynames, dependent, _ = prepare_params(params, meta)
def leastsqfcn(params, data):
pd = dict(zip(varynames, params))
update_params(dependent, meta, pd)
return (fcn(pd, x) - y) / dy
initpars = []
warned = False
for p in varying:
initpars.append(p.value)
if (p.pmin is not None or p.pmax is not None) and not warned:
print('Sorry, scipy backend cannot handle parameter bounds.')
warned = True
try:
res = leastsq(leastsqfcn, initpars, args=(data,), full_output=1, **add_kw)
except Exception as e:
return False, str(e), 0
popt, pcov, infodict, errmsg, ier = res
success = (ier in [1, 2, 3, 4])
nfree = len(y) - len(varying)
if nfree > 0 and pcov is not None:
s_sq = (leastsqfcn(popt, data)**2).sum() / nfree
pcov = pcov * s_sq
else:
pcov = inf
pd = {}
for i, p in enumerate(varying):
pd[p.name] = popt[i]
if pcov is not inf:
p.error = sqrt(pcov[i, i])
else:
p.error = 0
p.correl = {} # XXX
update_params(dependent, meta, pd)
for p in params:
p.value = pd[p.name]
return success, errmsg, get_chisqr(fcn, x, y, dy, params)
|
{"hexsha": "4c2577a98a7ce9d52c332b8742023397d1b7cd65", "size": 2028, "ext": "py", "lang": "Python", "max_stars_repo_path": "ufit/backends/scipy.py", "max_stars_repo_name": "McStasMcXtrace/ufit", "max_stars_repo_head_hexsha": "02640e2b802bf6d42ae6829a1c1852b21c6fa9f7", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ufit/backends/scipy.py", "max_issues_repo_name": "McStasMcXtrace/ufit", "max_issues_repo_head_hexsha": "02640e2b802bf6d42ae6829a1c1852b21c6fa9f7", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ufit/backends/scipy.py", "max_forks_repo_name": "McStasMcXtrace/ufit", "max_forks_repo_head_hexsha": "02640e2b802bf6d42ae6829a1c1852b21c6fa9f7", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.9714285714, "max_line_length": 82, "alphanum_fraction": 0.567061144, "include": true, "reason": "from numpy,from scipy", "num_tokens": 542}
|
#!/usr/bin/env python
# -*- coding: utf8
from __future__ import division, print_function
import numpy as np
import os
import pandas as pd
import plac
import tables
def get_above(time_series):
pops = time_series[:, 0]
dups = time_series[:, 1]
audi = time_series[:, 2]
dups = dups[pops >= 20]
audi = audi[pops >= 20]
return dups / audi
def print_results(results_array):
n = results_array.shape[0]
msk_inf = results_array != np.inf
num_infs = msk_inf.sum()
no_infs = results_array[results_array != np.inf]
print('Num time windows', n)
print('% of time windows with 0 audience', 1 - (num_infs / n))
print('mean of dups/audience', no_infs.mean())
print('median of dups/audience', np.median(no_infs))
print('25perc of dups/audience', np.percentile(no_infs, 25) if n > 2 else np.nan)
print('75perc of dups/audience', np.percentile(no_infs, 75) if n > 2 else np.nan)
print('std of dups/audience', no_infs.std())
print()
def main(input_fpath, out_folder):
store = pd.HDFStore(input_fpath)
h_results = []
d_results = []
w_results = []
m_results = []
for obj_key in store.keys():
h_frame = store[obj_key]
d_frame = h_frame.resample('1d', how='sum')
w_frame = d_frame.resample('1W', how='sum')
m_frame = w_frame.resample('1M', how='sum')
h_time_series = h_frame.values
d_time_series = d_frame.values
w_time_series = w_frame.values
m_time_series = m_frame.values
h_results.extend(get_above(h_time_series))
d_results.extend(get_above(d_time_series))
w_results.extend(get_above(w_time_series))
m_results.extend(get_above(m_time_series))
store.close()
#infs mean that no unique visitors appeared in that time window
h_results = np.asarray(h_results)
d_results = np.asarray(d_results)
w_results = np.asarray(w_results)
m_results = np.asarray(m_results)
print('Hourly')
print_results(h_results)
print('Daily')
print_results(d_results)
print('Weekly')
print_results(w_results)
print('Monthly')
print_results(m_results)
if __name__ == '__main__':
plac.call(main)
|
{"hexsha": "d2ecd7d1a16d94cd59990aa3ca8f08b7afecd5c7", "size": 2256, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/revisits_over_time.py", "max_stars_repo_name": "flaviovdf/phoenix", "max_stars_repo_head_hexsha": "59177657f13337b14d1fe27527a9b09c2c1c1419", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-06-24T17:42:10.000Z", "max_stars_repo_stars_event_max_datetime": "2019-06-24T17:42:10.000Z", "max_issues_repo_path": "scripts/revisits_over_time.py", "max_issues_repo_name": "flaviovdf/phoenix", "max_issues_repo_head_hexsha": "59177657f13337b14d1fe27527a9b09c2c1c1419", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/revisits_over_time.py", "max_forks_repo_name": "flaviovdf/phoenix", "max_forks_repo_head_hexsha": "59177657f13337b14d1fe27527a9b09c2c1c1419", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.2325581395, "max_line_length": 85, "alphanum_fraction": 0.6493794326, "include": true, "reason": "import numpy", "num_tokens": 601}
|
#!/usr/bin/env python
"""SimPEG: Simulation and Parameter Estimation in Geophysics
SimPEG is a python package for simulation and gradient based
parameter estimation in the context of geophysical applications.
"""
import numpy as np
import os
import sys
import subprocess
from distutils.core import setup
from setuptools import find_packages
from distutils.extension import Extension
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Physics',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Natural Language :: English',
]
args = sys.argv[1:]
# Make a `cleanall` rule to get rid of intermediate and library files
if "cleanall" in args:
print "Deleting cython files..."
# Just in case the build directory was created by accident,
# note that shell=True should be OK here because the command is constant.
subprocess.Popen("rm -rf build", shell=True, executable="/bin/bash")
subprocess.Popen("find . -name \*.c -type f -delete", shell=True, executable="/bin/bash")
subprocess.Popen("find . -name \*.so -type f -delete", shell=True, executable="/bin/bash")
# Now do a normal clean
sys.argv[sys.argv.index('cleanall')] = "clean"
# We want to always use build_ext --inplace
if args.count("build_ext") > 0 and args.count("--inplace") == 0:
sys.argv.insert(sys.argv.index("build_ext")+1, "--inplace")
try:
from Cython.Build import cythonize
from Cython.Distutils import build_ext
cythonKwargs = dict(cmdclass={'build_ext': build_ext})
USE_CYTHON = True
except Exception, e:
USE_CYTHON = False
cythonKwargs = dict()
ext = '.pyx' if USE_CYTHON else '.c'
cython_files = [
"SimPEG/Utils/interputils_cython",
"SimPEG/Mesh/TreeUtils"
]
extensions = [Extension(f, [f+ext]) for f in cython_files]
scripts = [f+'.pyx' for f in cython_files]
if USE_CYTHON and "cleanall" not in args:
from Cython.Build import cythonize
extensions = cythonize(extensions)
import os, os.path
with open("README.rst") as f:
LONG_DESCRIPTION = ''.join(f.readlines())
setup(
name = "SimPEG",
version = "0.1.9",
packages = find_packages(),
install_requires = ['numpy>=1.7',
'scipy>=0.13',
'Cython'
],
author = "Rowan Cockett",
author_email = "rowan@3ptscience.com",
description = "SimPEG: Simulation and Parameter Estimation in Geophysics",
long_description = LONG_DESCRIPTION,
license = "MIT",
keywords = "geophysics inverse problem",
url = "http://simpeg.xyz/",
download_url = "http://github.com/simpeg/simpeg",
classifiers=CLASSIFIERS,
platforms = ["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"],
use_2to3 = False,
include_dirs=[np.get_include()],
ext_modules = extensions,
scripts=scripts,
**cythonKwargs
)
|
{"hexsha": "bcb5b8e364009b2572c9670aa5e4674d6ba1b521", "size": 3218, "ext": "py", "lang": "Python", "max_stars_repo_path": "setup.py", "max_stars_repo_name": "KyuboNoh/HY", "max_stars_repo_head_hexsha": "8ba9815137c2cff2f1931a1940e1b762e8df0b02", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-11-27T03:26:22.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-27T03:26:22.000Z", "max_issues_repo_path": "setup.py", "max_issues_repo_name": "KyuboNoh/HY", "max_issues_repo_head_hexsha": "8ba9815137c2cff2f1931a1940e1b762e8df0b02", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "setup.py", "max_forks_repo_name": "KyuboNoh/HY", "max_forks_repo_head_hexsha": "8ba9815137c2cff2f1931a1940e1b762e8df0b02", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.5490196078, "max_line_length": 94, "alphanum_fraction": 0.671535115, "include": true, "reason": "import numpy", "num_tokens": 793}
|
import os
import glob
import random
import time
import h5py
import numpy as np
import torch
import torch.nn.functional as F
from torchvision import transforms as T
from torch.utils.data import Dataset
import torchvision.transforms.functional as tf
def data_augmentation(images):
mode = np.random.randint(0, 5)
if mode == 0:
# random brightness
brightness_factor = 1.0 + random.uniform(-0.2, 0.3)
xi = tf.adjust_brightness(images, brightness_factor)
elif mode == 1:
# random saturation
saturation_factor = 1.0 + random.uniform(-0.2, 0.5)
xi = tf.adjust_saturation(images, saturation_factor)
elif mode == 2:
# random hue
hue_factor = random.uniform(-0.2, 0.2)
xi = tf.adjust_hue(images, hue_factor)
elif mode == 3:
# random contrast
contrast_factor = 1.0 + random.uniform(-0.2, 0.4)
xi = tf.adjust_contrast(images, contrast_factor)
else:
xi = images
return xi
def random_resized_crop_and_adjust_intrinsics(
images, intrinsics, crop_h, crop_w,
min_scale=1, max_scale=1):
"""Randomly resize and crop images, and adjust intrinsics accordingly.
Args:
images: [..., H, W] images
intrinsics: [..., 3, 3] camera intrinsics
crop_h: (int) height of output crops
crop_w: (int) width of output crops
min_scale: (float) minimum scale factor
max_scale: (float) maximum scale factor
Returns:
Randomly resized and cropped images and according intrinsics
"""
im_size = torch.tensor(images.shape[-2:])
if min_scale == 1.0 and max_scale == 1.0:
scale_factor = 1
else:
scale_factor = np.random.uniform(min_scale, max_scale)
scaled_size = (scale_factor * im_size).int()
offset_limit = scaled_size - torch.tensor([crop_h, crop_w]) + 1
offset_y = np.random.randint(0, offset_limit[0])
offset_x = np.random.randint(0, offset_limit[1])
cropped_images, cropped_intrinsics = crop_image_and_adjust_intrinsics(
images, intrinsics, scale_factor,
offset_y, offset_x, crop_h, crop_w)
return cropped_images, cropped_intrinsics
def crop_image_and_adjust_intrinsics(
image, intrinsics, scale, offset_y, offset_x,
crop_h, crop_w):
"""Resize, crop images and adjust instrinsics accordingly.
Args:
image: [..., H, W] images
intrinsics: [..., 3, 3] camera intrinsics
scale: scale factor for resizing
offset_y: y-offset in pixels from top of image
offset_x: x-offset in pixels from left of image
crop_h: height of region to be cropped
crop_w: width of region to be cropped
Returns:
[..., crop_h, crop_w] cropped images,
[..., 3, 3] adjusted intrinsics
"""
im_size = image.shape[-2:]
resized_images = tf.resize(image, (int(scale*im_size[0]), int(scale*im_size[1])))
cropped_images = tf.crop(resized_images,
offset_y, offset_x, crop_h, crop_w)
cropped_intrinsics = intrinsics.clone()
cropped_intrinsics[..., :2, :] *= scale
cropped_intrinsics[..., :2, -1] -= torch.tensor([offset_x, offset_y])
return cropped_images, cropped_intrinsics
def random_subsequence(seq, length, min_stride=1, max_stride=1):
"""Returns a random subsequence with min_stride <= stride <= max_stride.
For example if self.length = 4 and we ask for a length 2
sequence (with default min/max_stride=1), there are three possibilities:
[0,1], [1,2], [2,3].
Args:
seq: list of image sequence indices
length: the length of the subsequence to be returned.
min_stride: the minimum stride (> 0) between elements of the sequence
max_stride: the maximum stride (> 0) between elements of the sequence
Returns:
A random, uniformly chosen subsequence of the requested length
and stride.
"""
# First pick a stride.
if max_stride == min_stride:
stride = min_stride
else:
stride = np.random.randint(min_stride, max_stride+1)
# Now pick the starting index.
# If the subsequence starts at index i, then its final element will be at
# index i + (length - 1) * stride, which must be less than the length of
# the sequence. Therefore i must be less than maxval, where:
maxval = len(seq) - (length - 1) * stride
start = np.random.randint(0, maxval)
end = start + 1 + (length - 1) * stride
return seq[start:end:stride]
class RealEstateDataset(Dataset):
def __init__(self, data_path, filename,
num_frames=10,
min_stride=3,
max_stride=10,
num_sources=2,
split='train'):
self.data_path = data_path
self.filename = filename
self.num_frames = num_frames
self.min_stride = min_stride
self.max_stride = max_stride
self.num_sources = num_sources
self.split = split
self.define_transforms()
self._init_dataset()
def _init_dataset(self):
self.img_path = os.path.join(self.data_path, self.filename)
filename = os.path.split(self.img_path)[-1]
w, h = [int(x) for x in filename[:-3].split('x')]
self.w = w
self.h = h
all_ints = np.load(os.path.join(self.data_path, 'int.npy'), allow_pickle=True)
all_ints = [np.array(x) for x in all_ints]
all_exts = np.load(os.path.join(self.data_path, 'ext.npy'), allow_pickle=True)
all_exts = [np.array(x) for x in all_exts]
# self.total_scene_count = len(all_ints)
self.usable_scenes = []
required_length = (self.num_frames - 1) * self.max_stride + 1
self.ints = []
self.exts = []
for idx, (i, e) in enumerate(zip(all_ints, all_exts)):
assert len(i) == len(e)
if len(i) > required_length:
self.usable_scenes.append(idx)
self.ints.append(i)
self.exts.append(e)
self.total_scene_count = len(self.ints)
for scene in self.ints:
# Setup poses
scene[:, :1, :] *= w
scene[:, 1:2, :] *= h
self.total_img_count = sum([len(x) for x in self.ints])
print(f'Read {self.total_scene_count} scenes, {self.total_img_count} images')
def define_transforms(self):
self.transforms = T.ToTensor()
def __getitem__(self, idx):
# Select input/output frame
img_count = len(self.ints[idx])
indices = [x for x in range(img_count)]
subseq = random_subsequence(indices, self.num_frames, self.min_stride, self.max_stride)
subseq = np.random.permutation(subseq)
src_cams = subseq[:self.num_sources]
tgt_cam = subseq[self.num_sources]
# Read data
with h5py.File(self.img_path, 'r') as hf:
scene_idx = self.usable_scenes[idx]
src_imgs = [self.transforms(hf[str(scene_idx)][x])[:3] for x in src_cams]
src_imgs = torch.stack(src_imgs, 0)
tgt_img = self.transforms(hf[str(scene_idx)][tgt_cam])[:3]
src_exts = [torch.FloatTensor(self.exts[idx][x]) for x in src_cams]
src_exts = torch.stack(src_exts, 0)
tgt_ext = torch.FloatTensor(self.exts[idx][tgt_cam])
src_ints = [torch.FloatTensor(self.ints[idx][x]) for x in src_cams]
src_ints = torch.stack(src_ints, 0)
tgt_int = torch.FloatTensor(self.ints[idx][tgt_cam])
if self.split == 'train':
img_seq = torch.cat([src_imgs, tgt_img.unsqueeze(0)], 0)
img_ints = torch.cat([src_ints, tgt_int.unsqueeze(0)], 0)
# TODO remove this
new_seq, new_ints = random_resized_crop_and_adjust_intrinsics(
img_seq, img_ints, 256, 256, 1, 2)
new_seq = data_augmentation(new_seq)
src_imgs, tgt_img = torch.split(new_seq, (self.num_sources, 1))
src_ints, tgt_int = torch.split(new_ints, (self.num_sources, 1))
tgt_img = tgt_img[0]
tgt_int = tgt_int[0]
return dict(
src_imgs=src_imgs,
src_exts=src_exts,
src_ints=src_ints,
tgt_ext=tgt_ext,
tgt_int=tgt_int,
tgt_rgb=tgt_img
)
def __len__(self):
return self.total_scene_count
|
{"hexsha": "7782a17bb51979df4e358a6ef69ef34b68eaa65a", "size": 8370, "ext": "py", "lang": "Python", "max_stars_repo_path": "train_mpi/datasets/realestate.py", "max_stars_repo_name": "ken2576/deep-3dmask", "max_stars_repo_head_hexsha": "00c12af81ee48b5d0e612fa0f17395284d23fcc2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2021-10-03T10:56:04.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-26T06:14:45.000Z", "max_issues_repo_path": "train_mpi/datasets/realestate.py", "max_issues_repo_name": "ken2576/deep-3dmask", "max_issues_repo_head_hexsha": "00c12af81ee48b5d0e612fa0f17395284d23fcc2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-10-13T07:07:59.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-07T00:17:46.000Z", "max_forks_repo_path": "train_mpi/datasets/realestate.py", "max_forks_repo_name": "ken2576/deep-3dmask", "max_forks_repo_head_hexsha": "00c12af81ee48b5d0e612fa0f17395284d23fcc2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2022-01-04T13:41:02.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-19T04:31:25.000Z", "avg_line_length": 36.872246696, "max_line_length": 95, "alphanum_fraction": 0.6249701314, "include": true, "reason": "import numpy", "num_tokens": 2124}
|
module Test.FmtTest
import IdrTest.Test
import IdrTest.Expectation
import Fmt
simpleTest : Test
simpleTest =
test "Simple test" (\_ => assertEq
(fmt "Hello")
"Hello"
)
stringTest : Test
stringTest =
test "String test" (\_ => assertEq
(fmt "Hello %s" "world")
"Hello world"
)
intTest : Test
intTest =
test "String test" (\_ => assertEq
(fmt "Health %d" 99)
"Health 99"
)
mixedTest : Test
mixedTest =
test "String test" (\_ => assertEq
(fmt "Name %s Age %d" "Thomas" 50)
"Name Thomas Age 50"
)
export
suite : Test
suite =
describe "Fmt Tests"
[ simpleTest
, stringTest
, intTest
, mixedTest
]
|
{"hexsha": "05a4c811ad71de6b765bd43adc9713a122c64d08", "size": 668, "ext": "idr", "lang": "Idris", "max_stars_repo_path": "Base/Fmt/Test/FmtTest.idr", "max_stars_repo_name": "Z-snails/inigo", "max_stars_repo_head_hexsha": "57f5b5c051222d8c630010a0a3cf7d7138910127", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 34, "max_stars_repo_stars_event_min_datetime": "2020-12-25T05:28:11.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-01T09:26:18.000Z", "max_issues_repo_path": "Base/Fmt/Test/FmtTest.idr", "max_issues_repo_name": "Z-snails/inigo", "max_issues_repo_head_hexsha": "57f5b5c051222d8c630010a0a3cf7d7138910127", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 22, "max_issues_repo_issues_event_min_datetime": "2021-01-05T16:35:27.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-12T18:24:30.000Z", "max_forks_repo_path": "Base/Fmt/Test/FmtTest.idr", "max_forks_repo_name": "Z-snails/inigo", "max_forks_repo_head_hexsha": "57f5b5c051222d8c630010a0a3cf7d7138910127", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2021-01-13T07:44:45.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-20T03:58:38.000Z", "avg_line_length": 14.8444444444, "max_line_length": 38, "alphanum_fraction": 0.6152694611, "num_tokens": 215}
|
import copy
import json
import numpy
import cepton_sdk.common.transform
from cepton_sdk.common import *
_all_builder = AllBuilder(__name__)
def _convert_keys_to_int(d, ignore_invalid=False):
d_int = {}
for key, value in d.items():
try:
key = int(key)
except:
if ignore_invalid:
continue
else:
raise
d_int[key] = value
return d_int
def _convert_keys_to_string(d):
return {str(key): value for (key, value) in d.items()}
def _get_pretty_json(d):
d = _convert_keys_to_string(d)
return json.dumps(d, sort_keys=True, indent=2, separators=(',', ': '))
def _save_pretty_json(d, f):
f.write(_get_pretty_json(d))
class _ManagerBase:
def update_from_dict(self, input_dict):
raise NotImplementedError()
def to_dict(self):
raise NotImplementedError()
def update_from_json(self, input_json):
input_dict = input_json
self.update_from_dict(input_dict)
@classmethod
def from_json(cls, input_json):
self = cls()
self.update_from_json(input_json)
return self
def to_json(self):
input_dict = self.to_dict()
input_json = _convert_keys_to_string(input_dict)
return input_json
def update_from_file(self, input_file):
input_json = json.load(input_file)
self.update_from_json(input_json)
@classmethod
def from_file(cls, input_file):
self = cls()
self.update_from_file(input_file)
return self
def to_file(self, output_file):
output_json = self.to_json()
_save_pretty_json(output_json, output_file)
def process_sensor_points(self, sensor_serial_number, points):
raise NotImplementedError
def process_points(self, points_dict):
for sensor_serial_number, points in points_dict.items():
self.process_sensor_points(sensor_serial_number, points)
return points_dict
class SensorTransformManager(_ManagerBase):
def __init__(self):
self.transforms = {}
def update_from_dict(self, transforms_dict):
for key, transform_dict in transforms_dict.items():
try:
sensor_serial_number = int(key)
except:
continue
transform = cepton_sdk.common.transform.Transform3d()
transform.translation = \
numpy.array(transform_dict["translation"], dtype=float)
rotation = numpy.array(transform_dict["rotation"], dtype=float)
transform.rotation = \
cepton_sdk.common.transform.Quaternion.from_vector(rotation)
self.transforms[sensor_serial_number] = transform
def to_dict(self):
transforms_dict = {}
for sensor_serial_number, transform in self.transforms.items():
transform_dict = {}
transform_dict["translation"] = transform.translation.tolist()
transform_dict["rotation"] = transform.rotation.to_vector().tolist()
transforms_dict[sensor_serial_number] = transform_dict
return transforms_dict
def process_sensor_points(self, sensor_serial_number, points):
if sensor_serial_number not in self.transforms:
return points
if len(points) == 0:
return points
transform = self.transforms[sensor_serial_number]
points.positions[:, :] = transform.apply(points.positions)
return points
class SensorClip:
def __init__(self):
self.distance_lb = -numpy.inf
self.distance_ub = numpy.inf
self.image_lb = numpy.full([2], -numpy.inf)
self.image_ub = numpy.full([2], numpy.inf)
@classmethod
def from_dict(cls, d):
self = cls()
if "min_distance" in d:
self.distance_lb = d["min_distance"]
if "max_distance" in d:
self.distance_ub = d["max_distance"]
if "min_image_x" in d:
self.image_lb[0] = d["min_image_x"]
if "max_image_x" in d:
self.image_ub[0] = d["max_image_x"]
if "min_image_z" in d:
self.image_lb[1] = d["min_image_z"]
if "max_image_z" in d:
self.image_ub[1] = d["max_image_z"]
return self
def find_points(self, points):
if len(points) == 0:
return numpy.array([], dtype=bool)
return numpy.logical_or.reduce([
points.distances <= self.distance_lb,
points.distances > self.distance_ub,
numpy.any(points.image_positions < self.image_lb, axis=-1),
numpy.any(points.image_positions > self.image_ub, axis=-1),
])
class FocusClip:
def __init__(self):
self.lb = numpy.full([3], -numpy.inf)
self.ub = numpy.full([3], numpy.inf)
@classmethod
def from_dict(cls, d):
self = cls()
if "min_x" in d:
self.lb[0] = d["min_x"]
if "max_x" in d:
self.ub[0] = d["max_x"]
if "min_y" in d:
self.lb[1] = d["min_y"]
if "max_y" in d:
self.ub[1] = d["max_y"]
if "min_z" in d:
self.lb[2] = d["min_z"]
if "max_z" in d:
self.ub[2] = d["max_z"]
return self
def find_points(self, points):
if len(points) == 0:
return numpy.array([], dtype=bool)
return numpy.logical_or.reduce([
numpy.any(points.positions < self.lb, axis=-1),
numpy.any(points.positions > self.ub, axis=-1),
])
class GroundClip:
def __init__(self):
self.height = numpy.inf
self.distance_ub = 0
@classmethod
def from_dict(cls, d):
self = cls()
if "height" in d:
self.height = d["height"]
if "max_distance" in d:
self.distance_ub = d["max_distance"]
return self
def find_points(self, points):
if len(points) == 0:
return numpy.array([], dtype=bool)
return numpy.logical_and.reduce([
points.positions[:, 2] < self.height,
points.distances < self.distance_ub,
])
class SensorClipManager(_ManagerBase):
def __init__(self):
self.focus_clip = FocusClip()
self.ground_clip = GroundClip()
self.clips = {}
def update_from_dict(self, d):
for key, d_tmp in d.items():
if key == "focus":
self.focus_clip = FocusClip.from_dict(d_tmp)
elif key == "ground":
self.ground_clip = GroundClip.from_dict(d_tmp)
else:
try:
sensor_serial_number = int(key)
except:
continue
self.clips[sensor_serial_number] = SensorClip.from_dict(d_tmp)
def process_sensor_points(self, sensor_serial_number, points):
if len(points) == 0:
return points
is_clipped_list = [
self.focus_clip.find_points(points),
self.ground_clip.find_points(points),
]
if sensor_serial_number in self.clips:
is_clipped_list.append(
self.clips[sensor_serial_number].find_points(points))
is_clipped = numpy.logical_or.reduce(is_clipped_list)
points.flags[is_clipped, cepton_sdk.PointFlag.VALID] = False
return points
__all__ = _all_builder.get()
|
{"hexsha": "ffb1b066119712f4df04846c2b225ad40eebb3a7", "size": 7420, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/cepton_sdk/settings.py", "max_stars_repo_name": "Ly0n/cepton_sdk_redist", "max_stars_repo_head_hexsha": "5b4bf24edadb4fdaf9b8149a70c60d207922a1ad", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-08-07T23:16:10.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-10T14:37:38.000Z", "max_issues_repo_path": "python/cepton_sdk/settings.py", "max_issues_repo_name": "Ly0n/cepton_sdk_redist", "max_issues_repo_head_hexsha": "5b4bf24edadb4fdaf9b8149a70c60d207922a1ad", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/cepton_sdk/settings.py", "max_forks_repo_name": "Ly0n/cepton_sdk_redist", "max_forks_repo_head_hexsha": "5b4bf24edadb4fdaf9b8149a70c60d207922a1ad", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-03-19T17:19:32.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-19T17:19:32.000Z", "avg_line_length": 29.68, "max_line_length": 80, "alphanum_fraction": 0.5971698113, "include": true, "reason": "import numpy", "num_tokens": 1633}
|
from __future__ import absolute_import
from copy import deepcopy
import torch
import numpy as np
import pandas as pd
from .utils import get_transform
from .random_noise import label_noise, image_noise
from .datasets import CIFAR10, CIFAR100, Nexperia, Nexperia_eval
from myImageFolder import MyImageFolder
from concatDataset import ConcatDataset
from torchvision import transforms
import os
def get_loader(args, data_aug=True):
tform_train = get_transform(args, train=True, data_aug=data_aug)
tform_test = get_transform(args, train=False, data_aug=data_aug)
if args.dataset == 'cifar10':
clean_train_set = CIFAR10(root=args.data_root, train=True, download=True, transform=tform_train)
test_set = CIFAR10(root=args.data_root, train=False, download=True, transform=tform_test)
elif args.dataset == 'cifar100':
clean_train_set = CIFAR100(root=args.data_root, train=True, download=True, transform=tform_train)
test_set = CIFAR100(root=args.data_root, train=False, download=True, transform=tform_test)
elif args.dataset == 'nexperia':
image_datasets = {x: MyImageFolder(os.path.join(args.data_root, x),
tform_train)
for x in ['train', 'val', 'test']}
combined_dataset = ConcatDataset([image_datasets[x] for x in ['train', 'val', 'test']])
dataloader = torch.utils.data.DataLoader(combined_dataset, batch_size=args.batch_size,
shuffle=True, num_workers=4)
return dataloader, 10, np.load('files/targets.npy')
elif args.dataset=='nexperia_split':
data_transforms = {
'train': tform_train,
'val': tform_test,
'test': tform_test
}
image_datasets = {x: MyImageFolder(os.path.join(args.data_root, x),
data_transforms[x])
for x in ['train', 'val', 'test']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=args.batch_size,
shuffle=True, num_workers=4)
for x in ['train', 'val', 'test']}
return dataloaders['train'], dataloaders['val'], dataloaders['test'], 10, np.load('files/targets.npy')
elif args.dataset=='nexperia_train':
data_transforms = {
'train': tform_train,
'val': tform_test,
'test': tform_test
}
image_datasets = {'train': Nexperia(args.data_root, args.train_set, data_transforms['train']),
'val': Nexperia(args.data_root, args.val_set, data_transforms['val']),
'test': Nexperia(args.data_root, args.test_set, data_transforms['test'])}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=args.batch_size,
shuffle=True, num_workers=4)
for x in ['train', 'val', 'test']}
train_labels = pd.read_csv(os.path.join(args.data_root, args.train_set),
header=None, squeeze=True).str.rsplit(' ', n=1, expand=True).values[:,1].astype(np.int) - 1
val_labels = pd.read_csv(os.path.join(args.data_root, args.val_set),
header=None, squeeze=True).str.rsplit(' ', n=1, expand=True).values[:,1].astype(np.int) - 1
test_labels = pd.read_csv(os.path.join(args.data_root, args.test_set),
header=None, squeeze=True).str.rsplit(' ', n=1, expand=True).values[:,1].astype(np.int) - 1
labels = np.concatenate((train_labels, val_labels, test_labels))
return dataloaders['train'], dataloaders['val'], dataloaders['test'], len(image_datasets['train'].classes), train_labels, val_labels, test_labels, labels, image_datasets['train'].class_to_idx['Pass']
elif args.dataset=='nexperia_eval':
image_dataset = Nexperia_eval(args.data_root, args.val_set, tform_test)
dataloader = torch.utils.data.DataLoader(image_dataset, collate_fn=collate_fn, batch_size=args.batch_size, shuffle=True, num_workers=4)
if 'csv' in args.val_set:
val_labels = pd.read_csv(
os.path.join(args.data_root, args.val_set), squeeze=True)['label'].map(image_dataset.class_to_idx).values
elif 'txt' in args.val_set:
val_labels = pd.read_csv(os.path.join(args.data_root, args.val_set),
header=None, squeeze=True).str.rsplit(' ', n=1, expand=True).values[:,1].astype(np.int) - 1
else:
raise KeyError("Val set {} is not supported.".format(args.val_set))
return dataloader, len(image_dataset.classes), val_labels, image_dataset.class_to_idx['Pass']
elif args.dataset=='nexperia_merge':
data_transforms = {
'train': tform_train,
'val': tform_test,
'test': tform_test
}
image_datasets_1 = {x: MyImageFolder(os.path.join(args.data_root_1, x),
data_transforms[x])
for x in ['train', 'val', 'test']}
image_datasets_2 = {x: MyImageFolder(os.path.join(args.data_root_2, x),
data_transforms[x])
for x in ['train', 'val', 'test']}
if args.data_root_3 is not None:
image_datasets_3 = {x: MyImageFolder(args.data_root_3, data_transforms[x])
for x in ['train', 'val', 'test']}
image_datasets = {x: ConcatDataset((image_datasets_1[x], image_datasets_2[x], image_datasets_3[x]))
for x in ['train', 'val', 'test']}
train_set = np.loadtxt(args.train_set, dtype=str)[:,0]
val_set = np.loadtxt(args.val_set, dtype=str)[:,0]
test_set = np.loadtxt(args.test_set, dtype=str)[:,0]
train_indices = []
val_indices = []
test_indices = []
for i in range(len(image_datasets['train'])):
if image_datasets['train'][i][-1]!=2 or image_datasets['train'][i][0][-1][
len('/import/home/share/SourceData/DownSampled/'):] in train_set:
train_indices.append(i)
for i in range(len(image_datasets['val'])):
if image_datasets['val'][i][-1]!=2 or image_datasets['val'][i][0][-1][
len('/import/home/share/SourceData/DownSampled/'):] in val_set:
val_indices.append(i)
for i in range(len(image_datasets['test'])):
if image_datasets['test'][i][-1]!=2 or image_datasets['test'][i][0][-1][
len('/import/home/share/SourceData/DownSampled/'):] in test_set:
test_indices.append(i)
samplers = {'train': torch.utils.data.SubsetRandomSampler(list(train_indices)),
'val': torch.utils.data.SubsetRandomSampler(list(val_indices)),
'test': torch.utils.data.SubsetRandomSampler(list(test_indices))}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=args.batch_size, sampler=samplers[x], num_workers=4)
for x in ['train', 'val', 'test']}
else:
image_datasets = {x: ConcatDataset((image_datasets_1[x], image_datasets_2[x]))
for x in ['train', 'val', 'test']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=args.batch_size, shuffle=True, num_workers=4)
for x in ['train', 'val', 'test']}
return dataloaders['train'], dataloaders['val'], dataloaders['test'], 11, image_datasets
elif args.dataset=='nexperia_month':
return
else:
raise ValueError("Dataset `{}` is not supported yet.".format(args.dataset))
if args.noise_rate > 0:
noisy_train_set = deepcopy(clean_train_set)
'''corrupt the dataset'''
if args.noise_type == 'corrupted_label':
label_noise(noisy_train_set, args)
elif args.noise_type in ['Gaussian', 'random_pixels', 'shuffled_pixels']:
image_noise(noisy_train_set, args)
else:
raise ValueError("Noise type {} is not supported yet.".format(args.noise_type))
train_set = noisy_train_set
else:
print("Using clean dataset.")
train_set = clean_train_set
num_train = int(len(train_set) * 0.9)
train_idx = list(range(num_train))
val_idx = list(range(num_train, len(train_set)))
if args.train_sets == 'trainval':
train_loader = torch.utils.data.DataLoader(
train_set, batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
elif args.train_sets == 'train':
train_subset = torch.utils.data.Subset(train_set, train_idx)
train_loader = torch.utils.data.DataLoader(
train_subset, batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
else:
raise KeyError("Train sets {} is not supported.".format(args.train_sets))
# for validation, we need to disable the data augmentation
clean_train_set_for_val = deepcopy(clean_train_set)
clean_train_set_for_val.transform = tform_test
if args.noise_rate > 0:
noisy_train_set_for_val = deepcopy(noisy_train_set)
noisy_train_set_for_val.transform = tform_test
val_sets = []
if 'clean_set' in args.val_sets:
val_sets.append(clean_train_set_for_val)
if 'noisy_set' in args.val_sets:
val_sets.append(noisy_train_set_for_val)
if 'test_set' in args.val_sets:
val_sets.append(test_set)
if 'clean_train' in args.val_sets:
val_sets.append(torch.utils.data.Subset(clean_train_set_for_val, train_idx))
if 'noisy_train' in args.val_sets:
val_sets.append(torch.utils.data.Subset(noisy_train_set_for_val, train_idx))
if 'clean_val' in args.val_sets:
val_sets.append(torch.utils.data.Subset(clean_train_set_for_val, val_idx))
if 'noisy_val' in args.val_sets:
val_sets.append(torch.utils.data.Subset(noisy_train_set_for_val, val_idx))
val_loaders = [
torch.utils.data.DataLoader(
val_set, batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
for val_set in val_sets
]
test_loader = torch.utils.data.DataLoader(
test_set, batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
return train_loader, val_loaders, test_loader, train_set.num_classes, np.asarray(train_set.targets)
def collate_fn(batch):
batch = list(filter(lambda x: x is not None, batch))
return torch.utils.data.dataloader.default_collate(batch)
|
{"hexsha": "5c52fdc3378dbf2fca2ae7d5ad17ccd715579251", "size": 11192, "ext": "py", "lang": "Python", "max_stars_repo_path": "datasets/loaders.py", "max_stars_repo_name": "huangkaiyikatherine/nexperia_new", "max_stars_repo_head_hexsha": "b9b5c35d989883f8b29280726bfbb0fc84e62b10", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-11-09T13:45:51.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-25T12:00:11.000Z", "max_issues_repo_path": "datasets/loaders.py", "max_issues_repo_name": "huangkaiyikatherine/nexperia_new", "max_issues_repo_head_hexsha": "b9b5c35d989883f8b29280726bfbb0fc84e62b10", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "datasets/loaders.py", "max_forks_repo_name": "huangkaiyikatherine/nexperia_new", "max_forks_repo_head_hexsha": "b9b5c35d989883f8b29280726bfbb0fc84e62b10", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-12-27T03:44:53.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-16T04:24:12.000Z", "avg_line_length": 47.6255319149, "max_line_length": 207, "alphanum_fraction": 0.6065046462, "include": true, "reason": "import numpy", "num_tokens": 2492}
|
import unittest
import pickle
import time
import sys
import numpy as np
import mrestimator as mre
from mrestimator.utility import log
def test_similarity(value1, value2, ratio_different=1e-10):
print('ratio difference: {:.3e}'.format(np.max(np.fabs(value1 - value2)/((value1 + value2)/2))))
return np.all(np.fabs(value1 - value2)/((value1 + value2)/2) < ratio_different)
def test_similarity_abs(value1, value2, max_difference=1e-10):
print('max difference: {:.3e}'.format(np.max(np.fabs(value1 - value2))))
return np.all(np.fabs(value1 - value2) < max_difference)
def calc_corr_arr_stationary(activity_mat, k_arr):
average = np.mean(activity_mat)
corr_arr = np.zeros_like(k_arr, dtype="float64")
n = len(activity_mat[0])
variance = np.mean((activity_mat[:]-average)**2)*(n/(n-1))
for i, k in enumerate(k_arr):
corr_arr[i] = np.mean((activity_mat[:,:-k]-average) * (activity_mat[:,k:] - average)) * ((n-k)/(n-k-1)) / variance
return corr_arr
def calc_corr_arr_stationary_new(activity_mat, k_arr):
corr_arr = np.zeros((len(k_arr)), dtype="float64")
for i, k in enumerate(k_arr):
x = activity_mat[:,:-k]
y = activity_mat[:,k:]
x_mean = np.mean(x)
y_mean = np.mean(y)
corr_arr[i] = np.sum(np.mean((x-x_mean) * (y - y_mean), axis=1))/\
np.sum(np.mean((x-x_mean)**2, axis=1))
return corr_arr
def calc_corr_mat_separate(activity_mat, k_arr):
corr_arr = np.zeros((len(k_arr)), dtype="float64")
for i, k in enumerate(k_arr):
x = activity_mat[:,:-k]
y = activity_mat[:,k:]
x_mean = np.mean(x, axis=1)[:, np.newaxis]
y_mean = np.mean(y, axis=1)[:, np.newaxis]
corr_arr[i] = np.mean(np.mean((x-x_mean) * (y - y_mean), axis=1)/
np.var(x, axis=1))
return corr_arr
class TestCorrCoeff(unittest.TestCase):
log.setLevel(40)
def test_stationary_mean(self):
print("\nTesting stationary mean correlation coefficients: \n")
for ele_num in range(0,40,10):
name_data = "./data/activity_mat_{}.pickled".format(ele_num)
activity_mat = pickle.load(open(name_data, "rb"))
k_arr = np.arange(7, 1500, 1)
activity_mat = activity_mat.astype(dtype="float64")
corr_arr = calc_corr_arr_stationary_new(activity_mat, k_arr)
time_beg = time.time()
numboot = 100
mre_res = mre.coefficients(activity_mat,
steps=k_arr,
method='stationarymean',
numboot=numboot)
print('stationarymean, time needed: {:.2f} ms'.format((time.time()-time_beg)*1000))
print('mre: ', mre_res.coefficients[:5])
print('true value: ', corr_arr[:5])
self.assertTrue(test_similarity(mre_res.coefficients, corr_arr, ratio_different = 1e-8))
bootstrap_mat = np.array([boot.coefficients for boot in mre_res.bootstrapcrs])
mean_bootstrap = np.mean(bootstrap_mat, axis=0)
print("boot mean: ", mean_bootstrap[:5])
self.assertTrue(test_similarity_abs(mre_res.coefficients, np.mean(bootstrap_mat, axis=0),
max_difference=0.04/np.sqrt(numboot)))
def test_separate(self):
print("\nTesting trial separated correlation coefficients: \n")
for ele_num in range(0,40,10):
name_data = "./data/activity_mat_{}.pickled".format(ele_num)
activity_mat = pickle.load(open(name_data, "rb"))
activity_mat = activity_mat.astype(dtype="float64")
k_arr = np.arange(7, 1500, 1)
corr_arr = calc_corr_mat_separate(activity_mat, k_arr)
time_beg = time.time()
numboot = 100
mre_res = mre.coefficients(activity_mat,
steps=k_arr,
method='trialseparated',
numboot=numboot)
print('trialseparated, time needed: {:.2f} ms'.format((time.time()-time_beg)*1000))
print('mre: ', mre_res.coefficients[:5])
print('true value: ', corr_arr[:5])
self.assertTrue(test_similarity(mre_res.coefficients, corr_arr, ratio_different = 1e-10))
bootstrap_mat = np.array([boot.coefficients for boot in mre_res.bootstrapcrs])
mean_bootstrap = np.mean(bootstrap_mat, axis=0)
print("boot mean: ", mean_bootstrap[:5])
self.assertTrue(test_similarity_abs(mre_res.coefficients, np.mean(bootstrap_mat, axis=0),
max_difference=0.04/np.sqrt(numboot)))
from mrestimator.coefficients import *
from mrestimator.simulate import simulate_branching
class TestCCKnownMean(unittest.TestCase):
def test_sm(self):
print("Testing knownmean argument to sm_method")
data = simulate_branching(m=.98, a=100, numtrials=5)
steps = np.arange(1,25,1)
mx, my, x_y, x_x = sm_precompute(data, steps, 0.0)
assert(np.all(mx == 0))
assert(np.all(my == 0))
mx, my, x_y, x_x = sm_precompute(data, steps, 5.0)
assert(np.all(mx == 5))
assert(np.all(my == 5))
sm_prepped = mx, my, x_y, x_x
rk = sm_method(sm_prepped, steps)
rk2 = coefficients(data, steps=steps, method='sm',
knownmean = 5.0).coefficients
assert(test_similarity(rk, rk2, ratio_different = 1e-8))
mx2, my2, x_y2, x_x2 = sm_precompute(data, steps, None)
assert(mx2.shape == mx.shape)
assert(my2.shape == my.shape)
assert(x_y2.shape == x_y.shape)
assert(x_x2.shape == x_x.shape)
sm_prepped = mx2, my2, x_y2, x_x2
rk = sm_method(sm_prepped, steps)
# check that this matches the default.
rk2 = coefficients(data, steps=steps, method='sm').coefficients
assert(test_similarity(rk, rk2, ratio_different = 1e-8))
def test_ts(self):
print("Testing knownmean argument to ts_method")
data = simulate_branching(m=.98, a=100, numtrials=5)
steps = np.arange(1,25,1)
ts_prepped = ts_precompute(data, steps, 0.0)
rk = ts_method(ts_prepped, steps)
rk2 = coefficients(data, steps=steps, method='ts',
knownmean=0.0).coefficients
# nothing we can access here to compare...
ts_prepped = ts_precompute(data, steps, None)
rk = ts_method(ts_prepped, steps)
rk2 = coefficients(data, steps=steps, method='ts').coefficients
assert(test_similarity(rk, rk2, ratio_different = 1e-8))
if __name__ == "__main__":
unittest.main()
|
{"hexsha": "b34133bf4c7666e8424ac698638430f11f3042d8", "size": 6745, "ext": "py", "lang": "Python", "max_stars_repo_path": "mrestimator/test_suite/test_coefficients.py", "max_stars_repo_name": "balajisriram/mrestimator", "max_stars_repo_head_hexsha": "62a17ed8b101362862364850bb93991dd7e3893a", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2018-11-27T10:14:56.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-09T14:53:10.000Z", "max_issues_repo_path": "mrestimator/test_suite/test_coefficients.py", "max_issues_repo_name": "balajisriram/mrestimator", "max_issues_repo_head_hexsha": "62a17ed8b101362862364850bb93991dd7e3893a", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-05-15T21:35:29.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-15T21:35:29.000Z", "max_forks_repo_path": "mrestimator/test_suite/test_coefficients.py", "max_forks_repo_name": "balajisriram/mrestimator", "max_forks_repo_head_hexsha": "62a17ed8b101362862364850bb93991dd7e3893a", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-08-19T21:44:17.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-18T20:32:59.000Z", "avg_line_length": 41.3803680982, "max_line_length": 122, "alphanum_fraction": 0.6069681245, "include": true, "reason": "import numpy", "num_tokens": 1729}
|
# ############################################################################
# cpgd.py
# =======
# Authors : Adrien Besson [adribesson@gmail.com] and Matthieu Simeoni [matthieu.simeoni@gmail.com]
# ############################################################################
"""
Class for the CPGD algorithm. Description and analysis of the algorithm available at:
[1] Simeoni, M., Besson, A., Hurley, P. & Vetterli, M. (2020). Cadzow Plug-and-Play Gradient Descent for Generalised FRI.
Under review.
"""
import numpy as np
from typing import Optional
from pyoneer.algorithms.base_reconstruction_algorithm import BaseReconstructionAlgorithm
from pyoneer.algorithms.cadzow_denoising import CadzowAlgorithm
from pyoneer.operators.linear_operator import ToeplitzificationOperator, LinearOperatorFromMatrix
from scipy.sparse.linalg import eigs
class CPGDAlgorithm(BaseReconstructionAlgorithm):
"""
Class for the CPGD algorithm, with parent class `BaseReconstructionAlgorithm`.
:attribute linear_op: LinearOperatorFromMatrix
Forward operator G modelling the measurement process.
:attribute toeplitz_op: ToeplitzificationOperator
Toeplitzification operator.
:attribute rank: int
Rank parameter for Cadzow denoising.
:attribute rho: float
Rho parameter for Cadzow denoising.
:attribute tol: float
Tolerance for stopping criterion.
:attribute eig_tol: float
Tolerance for the convergence of the low rank approximation algorithms. Only used if `cadzow_backend` is 'scipy.sparse'.
:attribute provided_init_sol: {None,np.ndarray}
Initial solution for warm start.
:attribute tau_weight: {None,float}
Factor for setting the step size tau of the gradient descent.
:attribute tau: float
Step size tau of the gradient descent.
:attribute min_error: float
Minimal data mismatch so far. Used to select best reconstruction among multiple runs with different random
initalizations.
:attribute best_estimate: {None, np.ndarray}
Estimate with minimal data mismatch so far. Prior to any iteration set to None.
:attribute denoise_verbose: bool
Verbosity of Cazow denoising.
:attribute nb_cadzow_iter: int
Number of iterations of Cazow denoising.
:attribute cadzow_backend: {numpy, scipy, scipy.sparse}
Backend of Cazow denoising.
denoise_verbose = denoise_verbose
self.nb_cadzow_iter = nb_cadzow_iter
self.cadzow_backend
Description and analysis of the algorithm available at:
[1] Simeoni, M., Besson, A., Hurley, P. & Vetterli, M. (2020). Cadzow Plug-and-Play Gradient Descent for Generalised FRI.
Under review.
"""
def __init__(self, nb_iter: int, linear_op: LinearOperatorFromMatrix, toeplitz_op: ToeplitzificationOperator,
rank: int, nb_cadzow_iter: int = 20, denoise_verbose: bool = False, rho: float = np.Inf,
tol: float = 1e-6, eig_tol: float = 1e-8, init_sol: np.ndarray = None, tau: float = None,
tau_init_type: str = 'safest', tau_weight: float = 1.5, beta: Optional[float] = None,
nb_init: int = 1, random_state: int = 1, cadzow_backend: str = 'scipy'):
"""
Initialize an object of the class.
:param nb_iter: int
Number of iterations.
:param linear_op: LinearOperatorFromMatrix
Forward operator G modelling the measurement process.
:param toeplitz_op: ToeplitzificationOperator
Toeplitzification operator.
:param rank: int
Rank parameter for Cadzow denoising.
:param nb_cadzow_iter: int
Number of iterations of Cazow denoising.
:param denoise_verbose: bool
Verbosity of Cazow denoising.
:param rho: float
Rho parameter for Cadzow denoising.
:param tol: float
Tolerance for stopping criterion.
:param eig_tol: float
Tolerance for the convergence of the low rank approximation algorithms. Only used if `cadzow_backend` is 'scipy.sparse'.
:param init_sol: {None, np.ndarray}
Potential initial solution for warm start.
:param tau: float
Step size tau of the gradient descent.
:param tau_init_type: str
Method for choosing `tau`.
:param tau_weight: float
Weight for tau if `tau_init_type` is not one of {'safest','largest','fastest'}.
:param nb_init: int
Number of random initializations.
:param random_state: int
Seed the random number generator for reproducibility.
:param cadzow_backend: {numpy, scipy, scipy.sparse}
Backend of Cazow denoising.
"""
super(CPGDAlgorithm, self).__init__(nb_iter=nb_iter, nb_init=nb_init, name='CPGD', random_state=random_state)
if not isinstance(linear_op, LinearOperatorFromMatrix):
raise ValueError("Argument linear_op must be an instance of LinearOperatorFromMatrix class.")
self.linear_op = linear_op
if not isinstance(toeplitz_op, ToeplitzificationOperator):
raise ValueError("Argument toeplitz_op must be an instance of ToeplitzificationOperator class.")
self.toeplitz_op = toeplitz_op
self.rank = rank
self.rho = rho
self.tol = tol
self.eig_tol = eig_tol
self.provided_init_sol = init_sol
self.beta = beta
if tau is None:
self.tau_weight = tau_weight
self.init_tau(type=tau_init_type, weight=self.tau_weight)
else:
self.tau_weight = None
self.tau = tau
self.min_error = np.infty
self.best_estimate = None
# Initialize Cadzow denoising algorithm
self.denoise_verbose = denoise_verbose
self.nb_cadzow_iter = nb_cadzow_iter
self.cadzow_backend = cadzow_backend
self.preweight = 1 / np.sqrt(toeplitz_op.gram)
self.postweight = np.sqrt(toeplitz_op.gram)
self.denoising_algorithm = CadzowAlgorithm(nb_iter=self.nb_cadzow_iter, toeplitz_op=self.toeplitz_op,
rank=self.rank, rho=self.rho, tol=self.eig_tol,
backend=self.cadzow_backend)
def initialize(self, y: np.ndarray) -> list:
"""
Initialize the estimate and store the data. If `nb_init==1` the estimate is initialized to zero otherwise randomly.
:param y: np.ndarray
Generalised measurements for the reconstruction.
:return: list[np.ndarray, np.ndarray]
Initial estimate and data.
Note: In practice we have observed that initializing the estimate to zero yields to higher reconstruction accuracy.
"""
# Initialize the solution of the algorithm
if self.provided_init_sol is not None:
init_sol = self.provided_init_sol.astype(np.complex128)
else:
if self.nb_init == 1:
init_sol = np.zeros(shape=(self.linear_op.shape[1],), dtype=np.complex128)
else:
print('CPGD randomly initialized!')
init_sol = self.rng.standard_normal(self.linear_op.shape[1]) + 1j * self.rng.standard_normal(
self.linear_op.shape[1])
return [init_sol, y] # y is the data
def init_tau(self, type: str = 'safest', weight: float = 1.5):
"""
Set the value of tau, the size of the gradient steps. See Theorems 2 and 4 of [1] for more details.
:param type: str
Name of the various strategies for setting tau.
:param weight:
Weight for tau if `tau_init_type` is not one of {'safest','largest','fastest'}.
Note: To ensure convergence, we recommend using type='safest'. {'fastest','largest'} can improve convergence speed
but can also sometimes make the algorithm diverge.
"""
P = self.toeplitz_op.P
weighted_gram = 2 * self.linear_op.gram
if self.beta is not None:
beta = self.beta
else:
try:
beta = eigs(weighted_gram, k=1, which='LM', return_eigenvectors=False, tol=self.eig_tol)
beta *= (1 + self.eig_tol)
except Exception('Eigs solver did not converge, trying again with small tolerance...'):
beta = eigs(weighted_gram, k=1, which='LM', return_eigenvectors=False, tol=1e-3)
beta *= (1 + 1e-3)
ub = 1 / beta * (1 + 1 / np.sqrt(P + 1))
lb = 1 / beta * (1 - 1 / np.sqrt(P + 1))
if type == 'fastest':
try:
alpha = eigs(weighted_gram, k=1, which='SM', return_eigenvectors=False, tol=self.eig_tol)
alpha *= (1 + self.eig_tol)
except Exception('Eigs solver did not converge. Alpha is set to zero.'):
alpha = 0
tau_opt = 2 / (beta + alpha)
if (tau_opt <= ub) & (tau_opt >= lb):
self.tau = tau_opt
else:
min_lb = np.fmin(np.abs(1 - lb * alpha), np.abs(1 - lb * beta))
min_ub = np.fmin(np.abs(1 - ub * alpha), np.abs(1 - ub * beta))
if np.argmin([min_lb, min_ub]) == 0:
self.tau = lb
else:
self.tau = ub
elif type == 'safest':
self.tau = 1 / beta
elif type == 'largest':
self.tau = ub
else:
self.tau = weight / beta
def iterate(self, x: list) -> list:
"""
Iterations of CPGD.
:param x: list
`x[0]` is the estimate so far, `x[1]` the data.
:return: list
`x[0]` is the updated estimate, `x[1]` the data.
"""
derivative = 2 * self.linear_op.rmatvec(self.linear_op.matvec(x[0]) - x[1])
x[0] = x[0] - self.tau * derivative
x[0] = self.denoising_algorithm.reconstruct(x[0], verbose=self.denoise_verbose, verbose_frequency=1)
return x
def stop_criterion(self, x: list) -> dict:
"""
Determines when to stop the algorithm based on relative improvement.
:param x: list
`x[0]` is the estimate so far, `x[1]` the data.
:return: dict
If key `stop` of `stop_dict` is True the reconstruction stops at the next iteration.
"""
stop_dict = dict()
if np.linalg.norm(self.x_old[0]) == 0:
relative_improvement = np.infty
else:
relative_improvement = np.linalg.norm((x[0] - self.x_old[0])) / np.linalg.norm(self.x_old[0])
stop_dict['relative_improvement'] = relative_improvement
stop_dict["stop"] = (relative_improvement < self.tol)
return stop_dict
def postprocess(self, x: list) -> np.ndarray:
"""
Compute the data mismatch and update the best estimate so far.
:param x: list
`x[0]` is the estimate so far, `x[1]` the data.
:return: np.ndarray
Estimate with minimal data mismatch.
"""
data_mismatch = np.linalg.norm(x[1] - self.linear_op.matvec(x[0]))
# Update min_error and best solution if required
if data_mismatch < self.min_error:
if self.best_estimate is not None:
print('Better solution found!')
self.best_estimate = x[0]
self.min_error = data_mismatch
return self.best_estimate
def display_log(self, **kwargs):
"""
Print log to terminal.
"""
stop_dict = kwargs["stop_dict"]
if stop_dict['stop'] == 1:
print(
"{} algorithm -- Iteration {} over {} -- Elapsed time {} s -- Converged".format(self.name,
kwargs["iteration"],
self.nb_iter,
kwargs["elapsed_time"]))
else:
print(
'{} algorithm -- Iteration {} over {} -- Elapsed time {} s -- Relative improvement {} %'.format(
self.name,
kwargs["iteration"],
self.nb_iter,
kwargs["elapsed_time"],
100 * stop_dict['relative_improvement']))
|
{"hexsha": "5a1b5fdb3355525cedc0d5c99ceb79ba4ddd0b82", "size": 12364, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyoneer/algorithms/cpgd.py", "max_stars_repo_name": "matthieumeo/pyoneer", "max_stars_repo_head_hexsha": "fb7ee05c5319248f9180c10068235ff311b844b0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2020-04-03T16:30:53.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-25T09:06:44.000Z", "max_issues_repo_path": "pyoneer/algorithms/cpgd.py", "max_issues_repo_name": "matthieumeo/pyoneer", "max_issues_repo_head_hexsha": "fb7ee05c5319248f9180c10068235ff311b844b0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyoneer/algorithms/cpgd.py", "max_forks_repo_name": "matthieumeo/pyoneer", "max_forks_repo_head_hexsha": "fb7ee05c5319248f9180c10068235ff311b844b0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-08-12T15:22:39.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-12T15:22:39.000Z", "avg_line_length": 45.6236162362, "max_line_length": 128, "alphanum_fraction": 0.6007764478, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2952}
|
## Transforms take values at Chebyshev points of the first and second kinds and produce Chebyshev coefficients
abstract type ChebyshevPlan{T} <: Plan{T} end
size(P::ChebyshevPlan) = isdefined(P, :plan) ? size(P.plan) : (0,)
length(P::ChebyshevPlan) = isdefined(P, :plan) ? length(P.plan) : 0
const FIRSTKIND = FFTW.REDFT10
const SECONDKIND = FFTW.REDFT00
struct ChebyshevTransformPlan{T,kind,K,inplace,N,R} <: ChebyshevPlan{T}
plan::FFTW.r2rFFTWPlan{T,K,inplace,N,R}
ChebyshevTransformPlan{T,kind,K,inplace,N,R}(plan) where {T,kind,K,inplace,N,R} = new{T,kind,K,inplace,N,R}(plan)
ChebyshevTransformPlan{T,kind,K,inplace,N,R}() where {T,kind,K,inplace,N,R} = new{T,kind,K,inplace,N,R}()
end
ChebyshevTransformPlan{T,kind,K}(plan::FFTW.r2rFFTWPlan{T,K,inplace,N,R}) where {T,kind,K,inplace,N,R} =
ChebyshevTransformPlan{T,kind,K,inplace,N,R}(plan)
# jump through some hoops to make inferrable
@inline kindtuple(KIND,N) = ntuple(_ -> KIND,N)
@inline kindtuple(KIND,N,::Integer) = (KIND,)
function plan_chebyshevtransform!(x::AbstractArray{T,N}, ::Val{1}, dims...; kws...) where {T<:fftwNumber,N}
if isempty(x)
ChebyshevTransformPlan{T,1,kindtuple(FIRSTKIND,N,dims...),true,N,isempty(dims) ? UnitRange{Int} : typeof(dims)}()
else
ChebyshevTransformPlan{T,1,kindtuple(FIRSTKIND,N,dims...)}(FFTW.plan_r2r!(x, FIRSTKIND, dims...; kws...))
end
end
function plan_chebyshevtransform!(x::AbstractArray{T,N}, ::Val{2}, dims...; kws...) where {T<:fftwNumber,N}
any(≤(1),size(x)) && throw(ArgumentError("Array must contain at least 2 entries"))
ChebyshevTransformPlan{T,2,kindtuple(SECONDKIND,N,dims...)}(FFTW.plan_r2r!(x, SECONDKIND, dims...; kws...))
end
function plan_chebyshevtransform(x::AbstractArray{T,N}, ::Val{1}, dims...; kws...) where {T<:fftwNumber,N}
if isempty(x)
ChebyshevTransformPlan{T,1,kindtuple(FIRSTKIND,N,dims...),false,N,isempty(dims) ? UnitRange{Int} : typeof(dims)}()
else
ChebyshevTransformPlan{T,1,kindtuple(FIRSTKIND,N,dims...)}(FFTW.plan_r2r(x, FIRSTKIND, dims...; kws...))
end
end
function plan_chebyshevtransform(x::AbstractArray{T,N}, ::Val{2}, dims...; kws...) where {T<:fftwNumber,N}
any(≤(1),size(x)) && throw(ArgumentError("Array must contain at least 2 entries"))
ChebyshevTransformPlan{T,2,kindtuple(SECONDKIND,N,dims...)}(FFTW.plan_r2r(x, SECONDKIND, dims...; kws...))
end
plan_chebyshevtransform!(x::AbstractArray, dims...; kws...) = plan_chebyshevtransform!(x, Val(1), dims...; kws...)
plan_chebyshevtransform(x::AbstractArray, dims...; kws...) = plan_chebyshevtransform(x, Val(1), dims...; kws...)
# convert x if necessary
@inline _plan_mul!(y::AbstractArray{T}, P::Plan{T}, x::StridedArray{T}) where T = mul!(y, P, x)
@inline _plan_mul!(y::AbstractArray{T}, P::Plan{T}, x::AbstractArray) where T = mul!(y, P, convert(Array{T}, x))
@inline _cheb1_rescale!(_, y::AbstractVector) = (y[1] /= 2; ldiv!(length(y), y))
@inline function _cheb1_rescale!(d::Number, y::AbstractMatrix{T}) where T
if isone(d)
ldiv!(2, view(y,1,:))
else
ldiv!(2, view(y,:,1))
end
ldiv!(size(y,d), y)
end
# TODO: higher dimensional arrays
@inline function _cheb1_rescale!(d::UnitRange, y::AbstractMatrix{T}) where T
@assert d == 1:2
ldiv!(2, view(y,1,:))
ldiv!(2, view(y,:,1))
ldiv!(prod(size(y)), y)
end
function *(P::ChebyshevTransformPlan{T,1,K,true,N}, x::AbstractArray{T,N}) where {T,K,N}
n = length(x)
n == 0 && return x
y = P.plan*x # will be === x if in-place
_cheb1_rescale!(P.plan.region, y)
end
function mul!(y::AbstractArray{T,N}, P::ChebyshevTransformPlan{T,1,K,false,N}, x::AbstractArray{<:Any,N}) where {T,K,N}
n = length(x)
length(y) == n || throw(DimensionMismatch("output must match dimension"))
n == 0 && return y
_plan_mul!(y, P.plan, x)
_cheb1_rescale!(P.plan.region, y)
end
_cheb2_rescale!(_, y::AbstractVector) = (y[1] /= 2; y[end] /= 2; ldiv!(length(y)-1, y))
function _cheb2_rescale!(d::Number, y::AbstractMatrix{T}) where T
if isone(d)
ldiv!(2, @view(y[1,:]))
ldiv!(2, @view(y[end,:]))
else
ldiv!(2, @view(y[:,1]))
ldiv!(2, @view(y[:,end]))
end
ldiv!(size(y,d)-1, y)
end
# TODO: higher dimensional arrays
function _cheb2_rescale!(d::UnitRange, y::AbstractMatrix{T}) where T
@assert d == 1:2
ldiv!(2, @view(y[1,:]))
ldiv!(2, @view(y[end,:]))
ldiv!(2, @view(y[:,1]))
ldiv!(2, @view(y[:,end]))
ldiv!(prod(size(y) .- 1), y)
end
function *(P::ChebyshevTransformPlan{T,2,K,true,N}, x::AbstractArray{T,N}) where {T,K,N}
n = length(x)
y = P.plan*x # will be === x if in-place
_cheb2_rescale!(P.plan.region, y)
end
function mul!(y::AbstractArray{T,N}, P::ChebyshevTransformPlan{T,2,K,false,N}, x::AbstractArray{<:Any,N}) where {T,K,N}
n = length(x)
length(y) == n || throw(DimensionMismatch("output must match dimension"))
_plan_mul!(y, P.plan, x)
_cheb2_rescale!(P.plan.region, y)
end
*(P::ChebyshevTransformPlan{T,kind,K,false,N}, x::AbstractArray{T,N}) where {T,kind,K,N} =
mul!(similar(x), P, x)
"""
chebyshevtransform!(x, kind=Val(1))
transforms from values on a Chebyshev grid of the first or second kind to Chebyshev
coefficients, in-place
"""
chebyshevtransform!(x, dims...; kws...) = plan_chebyshevtransform!(x, dims...; kws...)*x
"""
chebyshevtransform(x, kind=Val(1))
transforms from values on a Chebyshev grid of the first or second kind to Chebyshev
coefficients.
"""
chebyshevtransform(x, dims...; kws...) = plan_chebyshevtransform(x, dims...; kws...) * x
## Inverse transforms take Chebyshev coefficients and produce values at Chebyshev points of the first and second kinds
const IFIRSTKIND = FFTW.REDFT01
struct IChebyshevTransformPlan{T,kind,K,inplace,N,R} <: ChebyshevPlan{T}
plan::FFTW.r2rFFTWPlan{T,K,inplace,N,R}
IChebyshevTransformPlan{T,kind,K,inplace,N,R}(plan) where {T,kind,K,inplace,N,R} = new{T,kind,K,inplace,N,R}(plan)
IChebyshevTransformPlan{T,kind,K,inplace,N,R}() where {T,kind,K,inplace,N,R} = new{T,kind,K,inplace,N,R}()
end
IChebyshevTransformPlan{T,kind,K}(F::FFTW.r2rFFTWPlan{T,K,inplace,N,R}) where {T,kind,K,inplace,N,R} =
IChebyshevTransformPlan{T,kind,K,inplace,N,R}(F)
# second kind Chebyshev transforms share a plan with their inverse
# so we support this via inv
inv(P::ChebyshevTransformPlan{T,2,K}) where {T,K} = IChebyshevTransformPlan{T,2,K}(P.plan)
inv(P::IChebyshevTransformPlan{T,2,K}) where {T,K} = ChebyshevTransformPlan{T,2,K}(P.plan)
\(P::ChebyshevTransformPlan, x::AbstractArray) = inv(P) * x
\(P::IChebyshevTransformPlan, x::AbstractArray) = inv(P) * x
function plan_ichebyshevtransform!(x::AbstractArray{T,N}, ::Val{1}, dims...; kws...) where {T<:fftwNumber,N}
if isempty(x)
IChebyshevTransformPlan{T,1,kindtuple(IFIRSTKIND,N,dims...),true,N,isempty(dims) ? UnitRange{Int} : typeof(dims)}()
else
IChebyshevTransformPlan{T,1,kindtuple(IFIRSTKIND,N,dims...)}(FFTW.plan_r2r!(x, IFIRSTKIND, dims...; kws...))
end
end
function plan_ichebyshevtransform!(x::AbstractArray{T}, ::Val{2}, dims...; kws...) where T<:fftwNumber
inv(plan_chebyshevtransform!(x, Val(2), dims...; kws...))
end
function plan_ichebyshevtransform(x::AbstractArray{T,N}, ::Val{1}, dims...; kws...) where {T<:fftwNumber,N}
if isempty(x)
IChebyshevTransformPlan{T,1,kindtuple(IFIRSTKIND,N,dims...),false,N,isempty(dims) ? UnitRange{Int} : typeof(dims)}()
else
IChebyshevTransformPlan{T,1,kindtuple(IFIRSTKIND,N,dims...)}(FFTW.plan_r2r(x, IFIRSTKIND, dims...; kws...))
end
end
function plan_ichebyshevtransform(x::AbstractArray{T}, ::Val{2}, dims...; kws...) where T<:fftwNumber
inv(plan_chebyshevtransform(x, Val(2), dims...; kws...))
end
plan_ichebyshevtransform!(x::AbstractArray, dims...; kws...) = plan_ichebyshevtransform!(x, Val(1), dims...; kws...)
plan_ichebyshevtransform(x::AbstractArray, dims...; kws...) = plan_ichebyshevtransform(x, Val(1), dims...; kws...)
@inline _icheb1_prescale!(_, x::AbstractVector) = (x[1] *= 2)
@inline function _icheb1_prescale!(d::Number, x::AbstractMatrix)
if isone(d)
lmul!(2, view(x,1,:))
else
lmul!(2, view(x,:,1))
end
x
end
@inline function _icheb1_prescale!(d::UnitRange, x::AbstractMatrix)
lmul!(2, view(x,:,1))
lmul!(2, view(x,1,:))
x
end
@inline _icheb1_postscale!(_, x::AbstractVector) = (x[1] /= 2)
@inline function _icheb1_postscale!(d::Number, x::AbstractMatrix)
if isone(d)
ldiv!(2, view(x,1,:))
else
ldiv!(2, view(x,:,1))
end
x
end
@inline function _icheb1_postscale!(d::UnitRange, x::AbstractMatrix)
ldiv!(2, view(x,1,:))
ldiv!(2, view(x,:,1))
x
end
function *(P::IChebyshevTransformPlan{T,1,K,true,N}, x::AbstractArray{T,N}) where {T<:fftwNumber,K,N}
n = length(x)
n == 0 && return x
_icheb1_prescale!(P.plan.region, x)
x = ldiv!(2^length(P.plan.region), P.plan*x)
x
end
function mul!(y::AbstractArray{T,N}, P::IChebyshevTransformPlan{T,1,K,false,N}, x::AbstractArray{T,N}) where {T<:fftwNumber,K,N}
size(y) == size(x) || throw(DimensionMismatch("output must match dimension"))
isempty(x) && return y
_icheb1_prescale!(P.plan.region, x) # Todo: don't mutate x
_plan_mul!(y, P.plan, x)
_icheb1_postscale!(P.plan.region, x)
ldiv!(2^length(P.plan.region), y)
end
@inline _icheb2_prescale!(_, x::AbstractVector) = (x[1] *= 2; x[end] *= 2)
@inline function _icheb2_prescale!(d::Number, x::AbstractMatrix)
if isone(d)
lmul!(2, @view(x[1,:]))
lmul!(2, @view(x[end,:]))
else
lmul!(2, @view(x[:,1]))
lmul!(2, @view(x[:,end]))
end
x
end
@inline function _icheb2_prescale!(d::UnitRange, x::AbstractMatrix)
lmul!(2, @view(x[1,:]))
lmul!(2, @view(x[end,:]))
lmul!(2, @view(x[:,1]))
lmul!(2, @view(x[:,end]))
x
end
@inline _icheb2_postrescale!(_, x::AbstractVector) = (x[1] /= 2; x[end] /= 2)
@inline function _icheb2_postrescale!(d::Number, x::AbstractMatrix)
if isone(d)
ldiv!(2, @view(x[1,:]))
ldiv!(2, @view(x[end,:]))
else
ldiv!(2, @view(x[:,1]))
ldiv!(2, @view(x[:,end]))
end
x
end
@inline function _icheb2_postrescale!(d::UnitRange, x::AbstractMatrix)
ldiv!(2, @view(x[1,:]))
ldiv!(2, @view(x[end,:]))
ldiv!(2, @view(x[:,1]))
ldiv!(2, @view(x[:,end]))
x
end
@inline function _icheb2_rescale!(d::Number, y::AbstractArray{T}) where T
_icheb2_prescale!(d, y)
lmul!(convert(T, size(y,d) - 1)/2, y)
y
end
@inline function _icheb2_rescale!(d::UnitRange, y::AbstractArray{T}) where T
_icheb2_prescale!(d, y)
lmul!(prod(convert.(T, size(y) .- 1)./2), y)
y
end
function *(P::IChebyshevTransformPlan{T,2,K,true,N}, x::AbstractArray{T,N}) where {T<:fftwNumber,K,N}
n = length(x)
_icheb2_prescale!(P.plan.region, x)
x = inv(P)*x
_icheb2_rescale!(P.plan.region, x)
end
function mul!(y::AbstractArray{T,N}, P::IChebyshevTransformPlan{T,2,K,false,N}, x::AbstractArray{<:Any,N}) where {T<:fftwNumber,K,N}
n = length(x)
length(y) == n || throw(DimensionMismatch("output must match dimension"))
_icheb2_prescale!(P.plan.region, x)
_plan_mul!(y, inv(P), x)
_icheb2_postrescale!(P.plan.region, x)
_icheb2_rescale!(P.plan.region, y)
end
*(P::IChebyshevTransformPlan{T,kind,K,false,N}, x::AbstractArray{T,N}) where {T,kind,K,N} = mul!(similar(x), P, x)
ichebyshevtransform!(x::AbstractArray, dims...; kwds...) = plan_ichebyshevtransform!(x, dims...; kwds...)*x
ichebyshevtransform(x, dims...; kwds...) = plan_ichebyshevtransform(x, dims...; kwds...)*x
## Chebyshev U
const UFIRSTKIND = FFTW.RODFT10
const USECONDKIND = FFTW.RODFT00
struct ChebyshevUTransformPlan{T,kind,K,inplace,N,R} <: ChebyshevPlan{T}
plan::FFTW.r2rFFTWPlan{T,K,inplace,N,R}
ChebyshevUTransformPlan{T,kind,K,inplace,N,R}(plan) where {T,kind,K,inplace,N,R} = new{T,kind,K,inplace,N,R}(plan)
ChebyshevUTransformPlan{T,kind,K,inplace,N,R}() where {T,kind,K,inplace,N,R} = new{T,kind,K,inplace,N,R}()
end
ChebyshevUTransformPlan{T,kind,K}(plan::FFTW.r2rFFTWPlan{T,K,inplace,N,R}) where {T,kind,K,inplace,N,R} =
ChebyshevUTransformPlan{T,kind,K,inplace,N,R}(plan)
function plan_chebyshevutransform!(x::AbstractArray{T,N}, ::Val{1}, dims...; kws...) where {T<:fftwNumber,N}
if isempty(x)
ChebyshevUTransformPlan{T,1,kindtuple(UFIRSTKIND,N,dims...),true,N,isempty(dims) ? UnitRange{Int} : typeof(dims)}()
else
ChebyshevUTransformPlan{T,1,kindtuple(UFIRSTKIND,N,dims...)}(FFTW.plan_r2r!(x, UFIRSTKIND, dims...; kws...))
end
end
function plan_chebyshevutransform!(x::AbstractArray{T,N}, ::Val{2}, dims...; kws...) where {T<:fftwNumber,N}
any(≤(1),size(x)) && throw(ArgumentError("Array must contain at least 2 entries"))
ChebyshevUTransformPlan{T,2,kindtuple(USECONDKIND,N,dims...)}(FFTW.plan_r2r!(x, USECONDKIND, dims...; kws...))
end
function plan_chebyshevutransform(x::AbstractArray{T,N}, ::Val{1}, dims...; kws...) where {T<:fftwNumber,N}
if isempty(x)
ChebyshevUTransformPlan{T,1,kindtuple(UFIRSTKIND,N,dims...),false,N,isempty(dims) ? UnitRange{Int} : typeof(dims)}()
else
ChebyshevUTransformPlan{T,1,kindtuple(UFIRSTKIND,N,dims...)}(FFTW.plan_r2r(x, UFIRSTKIND, dims...; kws...))
end
end
function plan_chebyshevutransform(x::AbstractArray{T,N}, ::Val{2}, dims...; kws...) where {T<:fftwNumber,N}
any(≤(1),size(x)) && throw(ArgumentError("Array must contain at least 2 entries"))
ChebyshevUTransformPlan{T,2,kindtuple(USECONDKIND,N,dims...)}(FFTW.plan_r2r(x, USECONDKIND, dims...; kws...))
end
plan_chebyshevutransform!(x::AbstractArray, dims...; kws...) = plan_chebyshevutransform!(x, Val(1), dims...; kws...)
plan_chebyshevutransform(x::AbstractArray, dims...; kws...) = plan_chebyshevutransform(x, Val(1), dims...; kws...)
@inline function _chebu1_prescale!(_, x::AbstractVector{T}) where T
n = length(x)
for k=1:n # sqrt(1-x_j^2) weight
x[k] *= sinpi(one(T)/(2n) + (k-one(T))/n)/n
end
x
end
@inline function _chebu1_postscale!(_, x::AbstractVector{T}) where T
n = length(x)
for k=1:n # sqrt(1-x_j^2) weight
x[k] /= sinpi(one(T)/(2n) + (k-one(T))/n)/n
end
x
end
function *(P::ChebyshevUTransformPlan{T,1,K,true}, x::AbstractVector{T}) where {T,K}
length(x) ≤ 1 && return x
_chebu1_prescale!(P.plan.region, x)
P.plan * x
end
function mul!(y::AbstractVector{T}, P::ChebyshevUTransformPlan{T,1,K,false}, x::AbstractVector{T}) where {T,K}
n = length(x)
length(x) ≤ 1 && return copyto!(y, x)
_chebu1_prescale!(P.plan.region, x)
_plan_mul!(y, P.plan, x)
_chebu1_postscale!(P.plan.region, x)
y
end
@inline function _chebu2_prescale!(_, x::AbstractVector{T}) where T
n = length(x)
c = one(T)/ (n+1)
for k=1:n # sqrt(1-x_j^2) weight
x[k] *= sinpi(k*c)
end
x
end
@inline function _chebu2_postscale!(_, x::AbstractVector{T}) where T
n = length(x)
c = one(T)/ (n+1)
@inbounds for k=1:n # sqrt(1-x_j^2) weight
x[k] /= sinpi(k*c)
end
x
end
function *(P::ChebyshevUTransformPlan{T,2,K,true}, x::AbstractVector{T}) where {T,K}
n = length(x)
n ≤ 1 && return x
_chebu2_prescale!(P.plan.region, x)
lmul!(one(T)/ (n+1), P.plan * x)
end
function mul!(y::AbstractVector{T}, P::ChebyshevUTransformPlan{T,2,K,false}, x::AbstractVector{T}) where {T,K}
n = length(x)
n ≤ 1 && return copyto!(y, x)
_chebu2_prescale!(P.plan.region, x)
_plan_mul!(y, P.plan, x)
_chebu2_postscale!(P.plan.region, x)
lmul!(one(T)/ (n+1), y)
end
*(P::ChebyshevUTransformPlan{T,kind,K,false,N}, x::AbstractArray{T,N}) where {T,kind,K,N} =
mul!(similar(x), P, x)
chebyshevutransform!(x::AbstractVector{T}, dims...; kws...) where {T<:fftwNumber} =
plan_chebyshevutransform!(x, dims...; kws...)*x
"""
chebyshevutransform(x, ::Val{kind}=Val(1))
transforms from values on a Chebyshev grid of the first or second kind to Chebyshev
coefficients of the 2nd kind (Chebyshev U expansion).
"""
chebyshevutransform(x, dims...; kws...) = plan_chebyshevutransform(x, dims...; kws...)*x
## Inverse transforms take ChebyshevU coefficients and produce values at ChebyshevU points of the first and second kinds
const IUFIRSTKIND = FFTW.RODFT01
struct IChebyshevUTransformPlan{T,kind,K,inplace,N,R} <: ChebyshevPlan{T}
plan::FFTW.r2rFFTWPlan{T,K,inplace,N,R}
IChebyshevUTransformPlan{T,kind,K,inplace,N,R}(plan) where {T,kind,K,inplace,N,R} = new{T,kind,K,inplace,N,R}(plan)
IChebyshevUTransformPlan{T,kind,K,inplace,N,R}() where {T,kind,K,inplace,N,R} = new{T,kind,K,inplace,N,R}()
end
IChebyshevUTransformPlan{T,kind,K}(F::FFTW.r2rFFTWPlan{T,K,inplace,N,R}) where {T,kind,K,inplace,N,R} =
IChebyshevUTransformPlan{T,kind,K,inplace,N,R}(F)
function plan_ichebyshevutransform!(x::AbstractArray{T,N}, ::Val{1}, dims...; kws...) where {T<:fftwNumber,N}
if isempty(x)
IChebyshevUTransformPlan{T,1,kindtuple(IUFIRSTKIND,N,dims...),true,N,isempty(dims) ? UnitRange{Int} : typeof(dims)}()
else
IChebyshevUTransformPlan{T,1,kindtuple(IUFIRSTKIND,N,dims...)}(FFTW.plan_r2r!(x, IUFIRSTKIND, dims...; kws...))
end
end
function plan_ichebyshevutransform!(x::AbstractArray{T,N}, ::Val{2}, dims...; kws...) where {T<:fftwNumber,N}
any(≤(1),size(x)) && throw(ArgumentError("Array must contain at least 2 entries"))
IChebyshevUTransformPlan{T,2,kindtuple(USECONDKIND,N,dims...)}(FFTW.plan_r2r!(x, USECONDKIND))
end
function plan_ichebyshevutransform(x::AbstractArray{T,N}, ::Val{1}, dims...; kws...) where {T<:fftwNumber,N}
if isempty(x)
IChebyshevUTransformPlan{T,1,kindtuple(IUFIRSTKIND,N,dims...),false,N,isempty(dims) ? UnitRange{Int} : typeof(dims)}()
else
IChebyshevUTransformPlan{T,1,kindtuple(IUFIRSTKIND,N,dims...)}(FFTW.plan_r2r(x, IUFIRSTKIND, dims...; kws...))
end
end
function plan_ichebyshevutransform(x::AbstractArray{T,N}, ::Val{2}, dims...; kws...) where {T<:fftwNumber,N}
any(≤(1),size(x)) && throw(ArgumentError("Array must contain at least 2 entries"))
IChebyshevUTransformPlan{T,2,kindtuple(USECONDKIND,N,dims...)}(FFTW.plan_r2r(x, USECONDKIND))
end
plan_ichebyshevutransform!(x::AbstractArray, dims...; kws...) = plan_ichebyshevutransform!(x, Val(1), dims...; kws...)
plan_ichebyshevutransform(x::AbstractArray, dims...; kws...) = plan_ichebyshevutransform(x, Val(1), dims...; kws...)
function _ichebyu1_postscale!(_, x::AbstractVector{T}) where T
n = length(x)
@inbounds for k=1:n # sqrt(1-x_j^2) weight
x[k] /= 2sinpi(one(T)/(2n) + (k-one(T))/n)
end
x
end
function *(P::IChebyshevUTransformPlan{T,1,K,true}, x::AbstractVector{T}) where {T<:fftwNumber,K}
n = length(x)
n ≤ 1 && return x
x = P.plan * x
_ichebyu1_postscale!(P.plan.region, x)
end
function mul!(y::AbstractVector{T}, P::IChebyshevUTransformPlan{T,1,K,false}, x::AbstractVector{T}) where {T<:fftwNumber,K}
n = length(x)
length(y) == n || throw(DimensionMismatch("output must match dimension"))
n ≤ 1 && return x
_plan_mul!(y, P.plan, x)
_ichebyu1_postscale!(P.plan.region, y)
end
function _ichebu2_rescale!(_, x::AbstractVector{T}) where T
n = length(x)
c = one(T)/ (n+1)
for k=1:n # sqrt(1-x_j^2) weight
x[k] /= sinpi(k*c)
end
ldiv!(2, x)
x
end
function *(P::IChebyshevUTransformPlan{T,2,K,true}, x::AbstractVector{T}) where {T<:fftwNumber,K}
n = length(x)
n ≤ 1 && return x
x = P.plan * x
_ichebu2_rescale!(P.plan.region, x)
end
function mul!(y::AbstractVector{T}, P::IChebyshevUTransformPlan{T,2,K,false}, x::AbstractVector{T}) where {T<:fftwNumber,K}
n = length(x)
length(y) == n || throw(DimensionMismatch("output must match dimension"))
n ≤ 1 && return x
_plan_mul!(y, P.plan, x)
_ichebu2_rescale!(P.plan.region, y)
end
ichebyshevutransform!(x::AbstractVector{T}, dims...; kwds...) where {T<:fftwNumber} =
plan_ichebyshevutransform!(x, dims...; kwds...)*x
ichebyshevutransform(x, dims...; kwds...) = plan_ichebyshevutransform(x, dims...; kwds...)*x
*(P::IChebyshevUTransformPlan{T,k,K,false,N}, x::AbstractArray{T,N}) where {T,k,K,N} =
mul!(similar(x), P, x)
## Code generation for integer inputs
for func in (:chebyshevtransform,:ichebyshevtransform,:chebyshevutransform,:ichebyshevutransform)
@eval $func(x::AbstractVector{T}, dims...; kwds...) where {T<:Integer} = $func(convert(AbstractVector{Float64},x), dims...; kwds...)
end
## points
struct ChebyshevGrid{kind,T} <: AbstractVector{T}
n::Int
function ChebyshevGrid{1,T}(n::Int) where T
n ≥ 0 || throw(ArgumentError("Number of points must be nonnehative"))
new{1,T}(n)
end
function ChebyshevGrid{2,T}(n::Int) where T
n ≥ 2 || throw(ArgumentError("Number of points must be greater than 2"))
new{2,T}(n)
end
end
ChebyshevGrid{kind}(n::Integer) where kind = ChebyshevGrid{kind,Float64}(n)
size(g::ChebyshevGrid) = (g.n,)
getindex(g::ChebyshevGrid{1,T}, k::Integer) where T =
sinpi(convert(T,g.n-2k+1)/(2g.n))
getindex(g::ChebyshevGrid{2,T}, k::Integer) where T =
sinpi(convert(T,g.n-2k+1)/(2g.n-2))
chebyshevpoints(::Type{T}, n::Integer, ::Val{kind}) where {T<:Number,kind} = ChebyshevGrid{kind,T}(n)
chebyshevpoints(::Type{T}, n::Integer) where T = chebyshevpoints(T, n, Val(1))
chebyshevpoints(n::Integer, kind=Val(1)) = chebyshevpoints(Float64, n, kind)
# sin(nθ) coefficients to values at Clenshaw-Curtis nodes except ±1
#
# struct DSTPlan{T,kind,inplace,P} <: Plan{T}
# plan::P
# end
#
# DSTPlan{k,inp}(plan) where {k,inp} =
# DSTPlan{eltype(plan),k,inp,typeof(plan)}(plan)
#
#
# plan_DSTI!(x) = length(x) > 0 ? DSTPlan{1,true}(FFTW.FFTW.plan_r2r!(x, FFTW.FFTW.RODFT00)) :
# fill(one(T),1,length(x))
#
# function *(P::DSTPlan{T,1}, x::AbstractArray) where {T}
# x = P.plan*x
# rmul!(x,half(T))
# end
###
# BigFloat
# Use `Nothing` and fall back to FFT
###
plan_chebyshevtransform(x::AbstractArray{T,N}, ::Val{kind}, dims...; kws...) where {T,N,kind} =
ChebyshevTransformPlan{T,kind,Nothing,false,N,UnitRange{Int}}()
plan_ichebyshevtransform(x::AbstractArray{T,N}, ::Val{kind}, dims...; kws...) where {T,N,kind} =
IChebyshevTransformPlan{T,kind,Nothing,false,N,UnitRange{Int}}()
plan_chebyshevtransform!(x::AbstractArray{T,N}, ::Val{kind}, dims...; kws...) where {T,N,kind} =
ChebyshevTransformPlan{T,kind,Nothing,true,N,UnitRange{Int}}()
plan_ichebyshevtransform!(x::AbstractArray{T,N}, ::Val{kind}, dims...; kws...) where {T,N,kind} =
IChebyshevTransformPlan{T,kind,Nothing,true,N,UnitRange{Int}}()
#following Chebfun's @Chebtech1/vals2coeffs.m and @Chebtech2/vals2coeffs.m
function *(P::ChebyshevTransformPlan{T,1,Nothing,false}, x::AbstractVector{T}) where T
n = length(x)
if n == 1
x
else
w = [2exp(im*convert(T,π)*k/2n) for k=0:n-1]
ret = w.*ifft([x;reverse(x)])[1:n]
ret = T<:Real ? real(ret) : ret
ret[1] /= 2
ret
end
end
# function *(P::ChebyshevTransformPlan{T,1,K,Nothing,false}, x::AbstractVector{T}) where {T,K}
# n = length(x)
# if n == 1
# x
# else
# ret = ifft([x;x[end:-1:2]])[1:n]
# ret = T<:Real ? real(ret) : ret
# ret[2:n-1] *= 2
# ret
# end
# end
*(P::ChebyshevTransformPlan{T,1,Nothing,true,N,R}, x::AbstractVector{T}) where {T,N,R} =
copyto!(x, ChebyshevTransformPlan{T,1,Nothing,false,N,R}() * x)
# *(P::ChebyshevTransformPlan{T,2,true,Nothing}, x::AbstractVector{T}) where T =
# copyto!(x, ChebyshevTransformPlan{T,2,false,Nothing}() * x)
#following Chebfun's @Chebtech1/vals2coeffs.m and @Chebtech2/vals2coeffs.m
function *(P::IChebyshevTransformPlan{T,1,Nothing,false}, x::AbstractVector{T}) where T
n = length(x)
if n == 1
x
else
w = [exp(-im*convert(T,π)*k/2n)/2 for k=0:2n-1]
w[1] *= 2;w[n+1] *= 0;w[n+2:end] *= -1
ret = fft(w.*[x;one(T);x[end:-1:2]])
ret = T<:Real ? real(ret) : ret
ret[1:n]
end
end
# function *(P::IChebyshevTransformPlan{T,2,K,Nothing,true}, x::AbstractVector{T}) where {T,K}
# n = length(x)
# if n == 1
# x
# else
# x[1] *= 2; x[end] *= 2
# chebyshevtransform!(x, Val(2))
# x[1] *= 2; x[end] *= 2
# lmul!(convert(T,n-1)/2, x)
# x
# end
# end
*(P::IChebyshevTransformPlan{T,1,Nothing,true,N,R}, x::AbstractVector{T}) where {T,N,R} =
copyto!(x, IChebyshevTransformPlan{T,1,Nothing,false,N,R}() * x)
# *(P::IChebyshevTransformPlan{T,SECONDKIND,false,Nothing}, x::AbstractVector{T}) where T =
# IChebyshevTransformPlan{T,SECONDKIND,true,Nothing}() * copy(x)
|
{"hexsha": "0563f26bed5b28df98b6ff8a57f841dcca8afcc7", "size": 24831, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/chebyshevtransform.jl", "max_stars_repo_name": "JuliaApproximation/FastTransforms.jl", "max_stars_repo_head_hexsha": "4199ae9ac7970390da2546aef7f290f4cf675c65", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 138, "max_stars_repo_stars_event_min_datetime": "2019-04-13T05:52:03.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-15T10:03:15.000Z", "max_issues_repo_path": "src/chebyshevtransform.jl", "max_issues_repo_name": "JuliaApproximation/FastTransforms.jl", "max_issues_repo_head_hexsha": "4199ae9ac7970390da2546aef7f290f4cf675c65", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 113, "max_issues_repo_issues_event_min_datetime": "2019-04-12T20:11:16.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T16:16:28.000Z", "max_forks_repo_path": "src/chebyshevtransform.jl", "max_forks_repo_name": "JuliaApproximation/FastTransforms.jl", "max_forks_repo_head_hexsha": "4199ae9ac7970390da2546aef7f290f4cf675c65", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 20, "max_forks_repo_forks_event_min_datetime": "2019-04-23T08:43:45.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-20T15:35:17.000Z", "avg_line_length": 36.4090909091, "max_line_length": 136, "alphanum_fraction": 0.6512021264, "num_tokens": 8775}
|
Set Implicit Arguments.
Require Import Coq.Setoids.Setoid.
Require Import Coq.Arith.EqNat.
Local Open Scope bool_scope.
Definition pk : Set := (nat * nat * nat * nat) %type.
Inductive trace : Set :=
| tr_single : pk -> trace
| tr_cons : pk -> trace -> trace.
Inductive hdr :=
| sw : hdr
| pt : hdr
| src : hdr
| dst : hdr.
Definition val := nat.
Definition upd (h : hdr) (v : val) (p : pk) : pk :=
match (h, p) with
| (sw, (_, v2, v3, v4)) => (v, v2, v3, v4)
| (pt, (v1, _, v3, v4)) => (v1, v, v3, v4)
| (src, (v1, v2, _, v4)) => (v1, v2, v, v4)
| (dst, (v1, v2, v3, _)) => (v1, v2, v3, v)
end.
Definition test (h : hdr) (v : val) (p : pk) : bool :=
match (h, p) with
| (sw, (v1, v2, v3, v4)) => beq_nat v v1
| (pt, (v1, v2, v3, v4)) => beq_nat v v2
| (src, (v1, v2, v3, v4)) => beq_nat v v3
| (dst, (v1, v2, v3, v4)) => beq_nat v v4
end.
Definition head (tr : trace) : pk :=
match tr with
| tr_single pk => pk
| tr_cons pk _ => pk
end.
Definition replace_head (pk : pk) (tr : trace) : trace :=
match tr with
| tr_single _ => tr_single pk
| tr_cons _ tr' => tr_cons pk tr'
end.
Axiom hdr_eqdec : forall (h1 h2 : hdr), { h1 = h2 } + { h1 <> h2 }.
Axiom val_eqdec : forall (v1 v2 : val), { v1 = v2 } + { v1 <> v2 }.
Create HintDb pkt.
Lemma head_replace_head : forall pk a, head (replace_head pk a) = pk.
Proof with auto.
intros.
destruct a...
Qed.
Lemma replace_head2 :
forall pk1 pk2 a,
replace_head pk1 (replace_head pk2 a) = replace_head pk1 a.
Proof with auto.
intros.
destruct a...
Qed.
Hint Rewrite head_replace_head replace_head2 : pkt.
Lemma test_upd_true : forall h n pk, test h n (upd h n pk) = true.
Proof with auto.
intros.
destruct pk0 as [[[sw pt] src] dst].
unfold test.
unfold upd.
destruct h; auto; rewrite <- beq_nat_refl...
Qed.
Lemma test_upd_ignore :
forall h1 h2 m n pk,
h1 <> h2 ->
test h2 n (upd h1 m pk) = test h2 n pk.
Proof with auto.
intros.
destruct pk0 as [[[sw pt] src] dst].
unfold not in H.
destruct h1; destruct h2; try solve[contradiction H; auto];
unfold upd;
unfold test...
Qed.
Lemma test_upd_0 :
forall h m n pk,
m <> n ->
test h m (upd h n pk) = false.
Proof with auto.
intros.
destruct pk0 as [[[sw pt] src] dst].
destruct h; unfold upd; unfold test;
rewrite -> beq_nat_false_iff...
Qed.
Lemma test_true_diff :
forall h m n pk,
true = test h m pk ->
true = test h n pk ->
m = n.
Proof with auto.
intros.
destruct pk0 as [[[sw pt] src] dst].
unfold test in *.
symmetry in H.
symmetry in H0.
destruct h; rewrite -> beq_nat_true_iff in *; subst...
Qed.
Lemma upd_upd_compress :
forall h m n pk,
upd h m (upd h n pk) = upd h m pk.
Proof with auto.
intros.
destruct pk0 as [[[sw pt] src] dst].
unfold upd.
destruct h; simpl...
Qed.
|
{"author": "frenetic-lang", "repo": "featherweight-openflow", "sha": "4470518794e3ed867919d30500be2d0128b1de1c", "save_path": "github-repos/coq/frenetic-lang-featherweight-openflow", "path": "github-repos/coq/frenetic-lang-featherweight-openflow/featherweight-openflow-4470518794e3ed867919d30500be2d0128b1de1c/coq/Netkat/Packet.v"}
|
# This file is auto-generated by AWSMetadata.jl
using AWS
using AWS.AWSServices: database_migration_service
using AWS.Compat
using AWS.UUIDs
"""
AddTagsToResource()
Adds metadata tags to an AWS DMS resource, including replication instance, endpoint, security group, and migration task. These tags can also be used with cost allocation reporting to track cost associated with DMS resources, or used in a Condition statement in an IAM policy for DMS. For more information, see Tag data type description.
# Required Parameters
- `ResourceArn`: Identifies the AWS DMS resource to which tags should be added. The value for this parameter is an Amazon Resource Name (ARN). For AWS DMS, you can tag a replication instance, an endpoint, or a replication task.
- `Tags`: One or more tags to be assigned to the resource.
"""
add_tags_to_resource(ResourceArn, Tags; aws_config::AWSConfig=global_aws_config()) = database_migration_service("AddTagsToResource", Dict{String, Any}("ResourceArn"=>ResourceArn, "Tags"=>Tags); aws_config=aws_config)
add_tags_to_resource(ResourceArn, Tags, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("AddTagsToResource", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("ResourceArn"=>ResourceArn, "Tags"=>Tags), args)); aws_config=aws_config)
"""
ApplyPendingMaintenanceAction()
Applies a pending maintenance action to a resource (for example, to a replication instance).
# Required Parameters
- `ApplyAction`: The pending maintenance action to apply to this resource.
- `OptInType`: A value that specifies the type of opt-in request, or undoes an opt-in request. You can't undo an opt-in request of type immediate. Valid values: immediate - Apply the maintenance action immediately. next-maintenance - Apply the maintenance action during the next maintenance window for the resource. undo-opt-in - Cancel any existing next-maintenance opt-in requests.
- `ReplicationInstanceArn`: The Amazon Resource Name (ARN) of the AWS DMS resource that the pending maintenance action applies to.
"""
apply_pending_maintenance_action(ApplyAction, OptInType, ReplicationInstanceArn; aws_config::AWSConfig=global_aws_config()) = database_migration_service("ApplyPendingMaintenanceAction", Dict{String, Any}("ApplyAction"=>ApplyAction, "OptInType"=>OptInType, "ReplicationInstanceArn"=>ReplicationInstanceArn); aws_config=aws_config)
apply_pending_maintenance_action(ApplyAction, OptInType, ReplicationInstanceArn, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("ApplyPendingMaintenanceAction", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("ApplyAction"=>ApplyAction, "OptInType"=>OptInType, "ReplicationInstanceArn"=>ReplicationInstanceArn), args)); aws_config=aws_config)
"""
CancelReplicationTaskAssessmentRun()
Cancels a single premigration assessment run. This operation prevents any individual assessments from running if they haven't started running. It also attempts to cancel any individual assessments that are currently running.
# Required Parameters
- `ReplicationTaskAssessmentRunArn`: Amazon Resource Name (ARN) of the premigration assessment run to be canceled.
"""
cancel_replication_task_assessment_run(ReplicationTaskAssessmentRunArn; aws_config::AWSConfig=global_aws_config()) = database_migration_service("CancelReplicationTaskAssessmentRun", Dict{String, Any}("ReplicationTaskAssessmentRunArn"=>ReplicationTaskAssessmentRunArn); aws_config=aws_config)
cancel_replication_task_assessment_run(ReplicationTaskAssessmentRunArn, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("CancelReplicationTaskAssessmentRun", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("ReplicationTaskAssessmentRunArn"=>ReplicationTaskAssessmentRunArn), args)); aws_config=aws_config)
"""
CreateEndpoint()
Creates an endpoint using the provided settings.
# Required Parameters
- `EndpointIdentifier`: The database endpoint identifier. Identifiers must begin with a letter and must contain only ASCII letters, digits, and hyphens. They can't end with a hyphen, or contain two consecutive hyphens.
- `EndpointType`: The type of endpoint. Valid values are source and target.
- `EngineName`: The type of engine for the endpoint. Valid values, depending on the EndpointType value, include \"mysql\", \"oracle\", \"postgres\", \"mariadb\", \"aurora\", \"aurora-postgresql\", \"redshift\", \"s3\", \"db2\", \"azuredb\", \"sybase\", \"dynamodb\", \"mongodb\", \"kinesis\", \"kafka\", \"elasticsearch\", \"docdb\", \"sqlserver\", and \"neptune\".
# Optional Parameters
- `CertificateArn`: The Amazon Resource Name (ARN) for the certificate.
- `DatabaseName`: The name of the endpoint database.
- `DmsTransferSettings`: The settings in JSON format for the DMS transfer type of source endpoint. Possible settings include the following: ServiceAccessRoleArn - The IAM role that has permission to access the Amazon S3 bucket. BucketName - The name of the S3 bucket to use. CompressionType - An optional parameter to use GZIP to compress the target files. To use GZIP, set this value to NONE (the default). To keep the files uncompressed, don't use this value. Shorthand syntax for these settings is as follows: ServiceAccessRoleArn=string,BucketName=string,CompressionType=string JSON syntax for these settings is as follows: { \"ServiceAccessRoleArn\": \"string\", \"BucketName\": \"string\", \"CompressionType\": \"none\"|\"gzip\" }
- `DynamoDbSettings`: Settings in JSON format for the target Amazon DynamoDB endpoint. For information about other available settings, see Using Object Mapping to Migrate Data to DynamoDB in the AWS Database Migration Service User Guide.
- `ElasticsearchSettings`: Settings in JSON format for the target Elasticsearch endpoint. For more information about the available settings, see Extra Connection Attributes When Using Elasticsearch as a Target for AWS DMS in the AWS Database Migration Service User Guide.
- `ExternalTableDefinition`: The external table definition.
- `ExtraConnectionAttributes`: Additional attributes associated with the connection. Each attribute is specified as a name-value pair associated by an equal sign (=). Multiple attributes are separated by a semicolon (;) with no additional white space. For information on the attributes available for connecting your source or target endpoint, see Working with AWS DMS Endpoints in the AWS Database Migration Service User Guide.
- `IBMDb2Settings`: Settings in JSON format for the source IBM Db2 LUW endpoint. For information about other available settings, see Extra connection attributes when using Db2 LUW as a source for AWS DMS in the AWS Database Migration Service User Guide.
- `KafkaSettings`: Settings in JSON format for the target Apache Kafka endpoint. For more information about the available settings, see Using Apache Kafka as a Target for AWS Database Migration Service in the AWS Database Migration Service User Guide.
- `KinesisSettings`: Settings in JSON format for the target endpoint for Amazon Kinesis Data Streams. For more information about the available settings, see Using Amazon Kinesis Data Streams as a Target for AWS Database Migration Service in the AWS Database Migration Service User Guide.
- `KmsKeyId`: An AWS KMS key identifier that is used to encrypt the connection parameters for the endpoint. If you don't specify a value for the KmsKeyId parameter, then AWS DMS uses your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS Region.
- `MicrosoftSQLServerSettings`: Settings in JSON format for the source and target Microsoft SQL Server endpoint. For information about other available settings, see Extra connection attributes when using SQL Server as a source for AWS DMS and Extra connection attributes when using SQL Server as a target for AWS DMS in the AWS Database Migration Service User Guide.
- `MongoDbSettings`: Settings in JSON format for the source MongoDB endpoint. For more information about the available settings, see Using MongoDB as a Target for AWS Database Migration Service in the AWS Database Migration Service User Guide.
- `MySQLSettings`: Settings in JSON format for the source and target MySQL endpoint. For information about other available settings, see Extra connection attributes when using MySQL as a source for AWS DMS and Extra connection attributes when using a MySQL-compatible database as a target for AWS DMS in the AWS Database Migration Service User Guide.
- `NeptuneSettings`: Settings in JSON format for the target Amazon Neptune endpoint. For more information about the available settings, see Specifying Endpoint Settings for Amazon Neptune as a Target in the AWS Database Migration Service User Guide.
- `OracleSettings`: Settings in JSON format for the source and target Oracle endpoint. For information about other available settings, see Extra connection attributes when using Oracle as a source for AWS DMS and Extra connection attributes when using Oracle as a target for AWS DMS in the AWS Database Migration Service User Guide.
- `Password`: The password to be used to log in to the endpoint database.
- `Port`: The port used by the endpoint database.
- `PostgreSQLSettings`: Settings in JSON format for the source and target PostgreSQL endpoint. For information about other available settings, see Extra connection attributes when using PostgreSQL as a source for AWS DMS and Extra connection attributes when using PostgreSQL as a target for AWS DMS in the AWS Database Migration Service User Guide.
- `RedshiftSettings`:
- `ResourceIdentifier`: A friendly name for the resource identifier at the end of the EndpointArn response parameter that is returned in the created Endpoint object. The value for this parameter can have up to 31 characters. It can contain only ASCII letters, digits, and hyphen ('-'). Also, it can't end with a hyphen or contain two consecutive hyphens, and can only begin with a letter, such as Example-App-ARN1. For example, this value might result in the EndpointArn value arn:aws:dms:eu-west-1:012345678901:rep:Example-App-ARN1. If you don't specify a ResourceIdentifier value, AWS DMS generates a default identifier value for the end of EndpointArn.
- `S3Settings`: Settings in JSON format for the target Amazon S3 endpoint. For more information about the available settings, see Extra Connection Attributes When Using Amazon S3 as a Target for AWS DMS in the AWS Database Migration Service User Guide.
- `ServerName`: The name of the server where the endpoint database resides.
- `ServiceAccessRoleArn`: The Amazon Resource Name (ARN) for the service access role that you want to use to create the endpoint.
- `SslMode`: The Secure Sockets Layer (SSL) mode to use for the SSL connection. The default is none
- `SybaseSettings`: Settings in JSON format for the source and target SAP ASE endpoint. For information about other available settings, see Extra connection attributes when using SAP ASE as a source for AWS DMS and Extra connection attributes when using SAP ASE as a target for AWS DMS in the AWS Database Migration Service User Guide.
- `Tags`: One or more tags to be assigned to the endpoint.
- `Username`: The user name to be used to log in to the endpoint database.
"""
create_endpoint(EndpointIdentifier, EndpointType, EngineName; aws_config::AWSConfig=global_aws_config()) = database_migration_service("CreateEndpoint", Dict{String, Any}("EndpointIdentifier"=>EndpointIdentifier, "EndpointType"=>EndpointType, "EngineName"=>EngineName); aws_config=aws_config)
create_endpoint(EndpointIdentifier, EndpointType, EngineName, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("CreateEndpoint", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("EndpointIdentifier"=>EndpointIdentifier, "EndpointType"=>EndpointType, "EngineName"=>EngineName), args)); aws_config=aws_config)
"""
CreateEventSubscription()
Creates an AWS DMS event notification subscription. You can specify the type of source (SourceType) you want to be notified of, provide a list of AWS DMS source IDs (SourceIds) that triggers the events, and provide a list of event categories (EventCategories) for events you want to be notified of. If you specify both the SourceType and SourceIds, such as SourceType = replication-instance and SourceIdentifier = my-replinstance, you will be notified of all the replication instance events for the specified source. If you specify a SourceType but don't specify a SourceIdentifier, you receive notice of the events for that source type for all your AWS DMS sources. If you don't specify either SourceType nor SourceIdentifier, you will be notified of events generated from all AWS DMS sources belonging to your customer account. For more information about AWS DMS events, see Working with Events and Notifications in the AWS Database Migration Service User Guide.
# Required Parameters
- `SnsTopicArn`: The Amazon Resource Name (ARN) of the Amazon SNS topic created for event notification. The ARN is created by Amazon SNS when you create a topic and subscribe to it.
- `SubscriptionName`: The name of the AWS DMS event notification subscription. This name must be less than 255 characters.
# Optional Parameters
- `Enabled`: A Boolean value; set to true to activate the subscription, or set to false to create the subscription but not activate it.
- `EventCategories`: A list of event categories for a source type that you want to subscribe to. For more information, see Working with Events and Notifications in the AWS Database Migration Service User Guide.
- `SourceIds`: A list of identifiers for which AWS DMS provides notification events. If you don't specify a value, notifications are provided for all sources. If you specify multiple values, they must be of the same type. For example, if you specify a database instance ID, then all of the other values must be database instance IDs.
- `SourceType`: The type of AWS DMS resource that generates the events. For example, if you want to be notified of events generated by a replication instance, you set this parameter to replication-instance. If this value isn't specified, all events are returned. Valid values: replication-instance | replication-task
- `Tags`: One or more tags to be assigned to the event subscription.
"""
create_event_subscription(SnsTopicArn, SubscriptionName; aws_config::AWSConfig=global_aws_config()) = database_migration_service("CreateEventSubscription", Dict{String, Any}("SnsTopicArn"=>SnsTopicArn, "SubscriptionName"=>SubscriptionName); aws_config=aws_config)
create_event_subscription(SnsTopicArn, SubscriptionName, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("CreateEventSubscription", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("SnsTopicArn"=>SnsTopicArn, "SubscriptionName"=>SubscriptionName), args)); aws_config=aws_config)
"""
CreateReplicationInstance()
Creates the replication instance using the specified parameters. AWS DMS requires that your account have certain roles with appropriate permissions before you can create a replication instance. For information on the required roles, see Creating the IAM Roles to Use With the AWS CLI and AWS DMS API. For information on the required permissions, see IAM Permissions Needed to Use AWS DMS.
# Required Parameters
- `ReplicationInstanceClass`: The compute and memory capacity of the replication instance as defined for the specified replication instance class. For example to specify the instance class dms.c4.large, set this parameter to \"dms.c4.large\". For more information on the settings and capacities for the available replication instance classes, see Selecting the right AWS DMS replication instance for your migration.
- `ReplicationInstanceIdentifier`: The replication instance identifier. This parameter is stored as a lowercase string. Constraints: Must contain 1-63 alphanumeric characters or hyphens. First character must be a letter. Can't end with a hyphen or contain two consecutive hyphens. Example: myrepinstance
# Optional Parameters
- `AllocatedStorage`: The amount of storage (in gigabytes) to be initially allocated for the replication instance.
- `AutoMinorVersionUpgrade`: A value that indicates whether minor engine upgrades are applied automatically to the replication instance during the maintenance window. This parameter defaults to true. Default: true
- `AvailabilityZone`: The Availability Zone where the replication instance will be created. The default value is a random, system-chosen Availability Zone in the endpoint's AWS Region, for example: us-east-1d
- `DnsNameServers`: A list of custom DNS name servers supported for the replication instance to access your on-premise source or target database. This list overrides the default name servers supported by the replication instance. You can specify a comma-separated list of internet addresses for up to four on-premise DNS name servers. For example: \"1.1.1.1,2.2.2.2,3.3.3.3,4.4.4.4\"
- `EngineVersion`: The engine version number of the replication instance. If an engine version number is not specified when a replication instance is created, the default is the latest engine version available.
- `KmsKeyId`: An AWS KMS key identifier that is used to encrypt the data on the replication instance. If you don't specify a value for the KmsKeyId parameter, then AWS DMS uses your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS Region.
- `MultiAZ`: Specifies whether the replication instance is a Multi-AZ deployment. You can't set the AvailabilityZone parameter if the Multi-AZ parameter is set to true.
- `PreferredMaintenanceWindow`: The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC). Format: ddd:hh24:mi-ddd:hh24:mi Default: A 30-minute window selected at random from an 8-hour block of time per AWS Region, occurring on a random day of the week. Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun Constraints: Minimum 30-minute window.
- `PubliclyAccessible`: Specifies the accessibility options for the replication instance. A value of true represents an instance with a public IP address. A value of false represents an instance with a private IP address. The default value is true.
- `ReplicationSubnetGroupIdentifier`: A subnet group to associate with the replication instance.
- `ResourceIdentifier`: A friendly name for the resource identifier at the end of the EndpointArn response parameter that is returned in the created Endpoint object. The value for this parameter can have up to 31 characters. It can contain only ASCII letters, digits, and hyphen ('-'). Also, it can't end with a hyphen or contain two consecutive hyphens, and can only begin with a letter, such as Example-App-ARN1. For example, this value might result in the EndpointArn value arn:aws:dms:eu-west-1:012345678901:rep:Example-App-ARN1. If you don't specify a ResourceIdentifier value, AWS DMS generates a default identifier value for the end of EndpointArn.
- `Tags`: One or more tags to be assigned to the replication instance.
- `VpcSecurityGroupIds`: Specifies the VPC security group to be used with the replication instance. The VPC security group must work with the VPC containing the replication instance.
"""
create_replication_instance(ReplicationInstanceClass, ReplicationInstanceIdentifier; aws_config::AWSConfig=global_aws_config()) = database_migration_service("CreateReplicationInstance", Dict{String, Any}("ReplicationInstanceClass"=>ReplicationInstanceClass, "ReplicationInstanceIdentifier"=>ReplicationInstanceIdentifier); aws_config=aws_config)
create_replication_instance(ReplicationInstanceClass, ReplicationInstanceIdentifier, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("CreateReplicationInstance", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("ReplicationInstanceClass"=>ReplicationInstanceClass, "ReplicationInstanceIdentifier"=>ReplicationInstanceIdentifier), args)); aws_config=aws_config)
"""
CreateReplicationSubnetGroup()
Creates a replication subnet group given a list of the subnet IDs in a VPC.
# Required Parameters
- `ReplicationSubnetGroupDescription`: The description for the subnet group.
- `ReplicationSubnetGroupIdentifier`: The name for the replication subnet group. This value is stored as a lowercase string. Constraints: Must contain no more than 255 alphanumeric characters, periods, spaces, underscores, or hyphens. Must not be \"default\". Example: mySubnetgroup
- `SubnetIds`: One or more subnet IDs to be assigned to the subnet group.
# Optional Parameters
- `Tags`: One or more tags to be assigned to the subnet group.
"""
create_replication_subnet_group(ReplicationSubnetGroupDescription, ReplicationSubnetGroupIdentifier, SubnetIds; aws_config::AWSConfig=global_aws_config()) = database_migration_service("CreateReplicationSubnetGroup", Dict{String, Any}("ReplicationSubnetGroupDescription"=>ReplicationSubnetGroupDescription, "ReplicationSubnetGroupIdentifier"=>ReplicationSubnetGroupIdentifier, "SubnetIds"=>SubnetIds); aws_config=aws_config)
create_replication_subnet_group(ReplicationSubnetGroupDescription, ReplicationSubnetGroupIdentifier, SubnetIds, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("CreateReplicationSubnetGroup", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("ReplicationSubnetGroupDescription"=>ReplicationSubnetGroupDescription, "ReplicationSubnetGroupIdentifier"=>ReplicationSubnetGroupIdentifier, "SubnetIds"=>SubnetIds), args)); aws_config=aws_config)
"""
CreateReplicationTask()
Creates a replication task using the specified parameters.
# Required Parameters
- `MigrationType`: The migration type. Valid values: full-load | cdc | full-load-and-cdc
- `ReplicationInstanceArn`: The Amazon Resource Name (ARN) of a replication instance.
- `ReplicationTaskIdentifier`: An identifier for the replication task. Constraints: Must contain 1-255 alphanumeric characters or hyphens. First character must be a letter. Cannot end with a hyphen or contain two consecutive hyphens.
- `SourceEndpointArn`: An Amazon Resource Name (ARN) that uniquely identifies the source endpoint.
- `TableMappings`: The table mappings for the task, in JSON format. For more information, see Using Table Mapping to Specify Task Settings in the AWS Database Migration Service User Guide.
- `TargetEndpointArn`: An Amazon Resource Name (ARN) that uniquely identifies the target endpoint.
# Optional Parameters
- `CdcStartPosition`: Indicates when you want a change data capture (CDC) operation to start. Use either CdcStartPosition or CdcStartTime to specify when you want a CDC operation to start. Specifying both values results in an error. The value can be in date, checkpoint, or LSN/SCN format. Date Example: --cdc-start-position “2018-03-08T12:12:12” Checkpoint Example: --cdc-start-position \"checkpoint:V1#27#mysql-bin-changelog.157832:1975:-1:2002:677883278264080:mysql-bin-changelog.157832:1876#0#0#*#0#93\" LSN Example: --cdc-start-position “mysql-bin-changelog.000024:373” When you use this task setting with a source PostgreSQL database, a logical replication slot should already be created and associated with the source endpoint. You can verify this by setting the slotName extra connection attribute to the name of this logical replication slot. For more information, see Extra Connection Attributes When Using PostgreSQL as a Source for AWS DMS.
- `CdcStartTime`: Indicates the start time for a change data capture (CDC) operation. Use either CdcStartTime or CdcStartPosition to specify when you want a CDC operation to start. Specifying both values results in an error. Timestamp Example: --cdc-start-time “2018-03-08T12:12:12”
- `CdcStopPosition`: Indicates when you want a change data capture (CDC) operation to stop. The value can be either server time or commit time. Server time example: --cdc-stop-position “server_time:2018-02-09T12:12:12” Commit time example: --cdc-stop-position “commit_time: 2018-02-09T12:12:12 “
- `ReplicationTaskSettings`: Overall settings for the task, in JSON format. For more information, see Specifying Task Settings for AWS Database Migration Service Tasks in the AWS Database Migration User Guide.
- `ResourceIdentifier`: A friendly name for the resource identifier at the end of the EndpointArn response parameter that is returned in the created Endpoint object. The value for this parameter can have up to 31 characters. It can contain only ASCII letters, digits, and hyphen ('-'). Also, it can't end with a hyphen or contain two consecutive hyphens, and can only begin with a letter, such as Example-App-ARN1. For example, this value might result in the EndpointArn value arn:aws:dms:eu-west-1:012345678901:rep:Example-App-ARN1. If you don't specify a ResourceIdentifier value, AWS DMS generates a default identifier value for the end of EndpointArn.
- `Tags`: One or more tags to be assigned to the replication task.
- `TaskData`: Supplemental information that the task requires to migrate the data for certain source and target endpoints. For more information, see Specifying Supplemental Data for Task Settings in the AWS Database Migration Service User Guide.
"""
create_replication_task(MigrationType, ReplicationInstanceArn, ReplicationTaskIdentifier, SourceEndpointArn, TableMappings, TargetEndpointArn; aws_config::AWSConfig=global_aws_config()) = database_migration_service("CreateReplicationTask", Dict{String, Any}("MigrationType"=>MigrationType, "ReplicationInstanceArn"=>ReplicationInstanceArn, "ReplicationTaskIdentifier"=>ReplicationTaskIdentifier, "SourceEndpointArn"=>SourceEndpointArn, "TableMappings"=>TableMappings, "TargetEndpointArn"=>TargetEndpointArn); aws_config=aws_config)
create_replication_task(MigrationType, ReplicationInstanceArn, ReplicationTaskIdentifier, SourceEndpointArn, TableMappings, TargetEndpointArn, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("CreateReplicationTask", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("MigrationType"=>MigrationType, "ReplicationInstanceArn"=>ReplicationInstanceArn, "ReplicationTaskIdentifier"=>ReplicationTaskIdentifier, "SourceEndpointArn"=>SourceEndpointArn, "TableMappings"=>TableMappings, "TargetEndpointArn"=>TargetEndpointArn), args)); aws_config=aws_config)
"""
DeleteCertificate()
Deletes the specified certificate.
# Required Parameters
- `CertificateArn`: The Amazon Resource Name (ARN) of the deleted certificate.
"""
delete_certificate(CertificateArn; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DeleteCertificate", Dict{String, Any}("CertificateArn"=>CertificateArn); aws_config=aws_config)
delete_certificate(CertificateArn, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DeleteCertificate", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("CertificateArn"=>CertificateArn), args)); aws_config=aws_config)
"""
DeleteConnection()
Deletes the connection between a replication instance and an endpoint.
# Required Parameters
- `EndpointArn`: The Amazon Resource Name (ARN) string that uniquely identifies the endpoint.
- `ReplicationInstanceArn`: The Amazon Resource Name (ARN) of the replication instance.
"""
delete_connection(EndpointArn, ReplicationInstanceArn; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DeleteConnection", Dict{String, Any}("EndpointArn"=>EndpointArn, "ReplicationInstanceArn"=>ReplicationInstanceArn); aws_config=aws_config)
delete_connection(EndpointArn, ReplicationInstanceArn, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DeleteConnection", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("EndpointArn"=>EndpointArn, "ReplicationInstanceArn"=>ReplicationInstanceArn), args)); aws_config=aws_config)
"""
DeleteEndpoint()
Deletes the specified endpoint. All tasks associated with the endpoint must be deleted before you can delete the endpoint.
# Required Parameters
- `EndpointArn`: The Amazon Resource Name (ARN) string that uniquely identifies the endpoint.
"""
delete_endpoint(EndpointArn; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DeleteEndpoint", Dict{String, Any}("EndpointArn"=>EndpointArn); aws_config=aws_config)
delete_endpoint(EndpointArn, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DeleteEndpoint", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("EndpointArn"=>EndpointArn), args)); aws_config=aws_config)
"""
DeleteEventSubscription()
Deletes an AWS DMS event subscription.
# Required Parameters
- `SubscriptionName`: The name of the DMS event notification subscription to be deleted.
"""
delete_event_subscription(SubscriptionName; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DeleteEventSubscription", Dict{String, Any}("SubscriptionName"=>SubscriptionName); aws_config=aws_config)
delete_event_subscription(SubscriptionName, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DeleteEventSubscription", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("SubscriptionName"=>SubscriptionName), args)); aws_config=aws_config)
"""
DeleteReplicationInstance()
Deletes the specified replication instance. You must delete any migration tasks that are associated with the replication instance before you can delete it.
# Required Parameters
- `ReplicationInstanceArn`: The Amazon Resource Name (ARN) of the replication instance to be deleted.
"""
delete_replication_instance(ReplicationInstanceArn; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DeleteReplicationInstance", Dict{String, Any}("ReplicationInstanceArn"=>ReplicationInstanceArn); aws_config=aws_config)
delete_replication_instance(ReplicationInstanceArn, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DeleteReplicationInstance", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("ReplicationInstanceArn"=>ReplicationInstanceArn), args)); aws_config=aws_config)
"""
DeleteReplicationSubnetGroup()
Deletes a subnet group.
# Required Parameters
- `ReplicationSubnetGroupIdentifier`: The subnet group name of the replication instance.
"""
delete_replication_subnet_group(ReplicationSubnetGroupIdentifier; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DeleteReplicationSubnetGroup", Dict{String, Any}("ReplicationSubnetGroupIdentifier"=>ReplicationSubnetGroupIdentifier); aws_config=aws_config)
delete_replication_subnet_group(ReplicationSubnetGroupIdentifier, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DeleteReplicationSubnetGroup", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("ReplicationSubnetGroupIdentifier"=>ReplicationSubnetGroupIdentifier), args)); aws_config=aws_config)
"""
DeleteReplicationTask()
Deletes the specified replication task.
# Required Parameters
- `ReplicationTaskArn`: The Amazon Resource Name (ARN) of the replication task to be deleted.
"""
delete_replication_task(ReplicationTaskArn; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DeleteReplicationTask", Dict{String, Any}("ReplicationTaskArn"=>ReplicationTaskArn); aws_config=aws_config)
delete_replication_task(ReplicationTaskArn, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DeleteReplicationTask", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("ReplicationTaskArn"=>ReplicationTaskArn), args)); aws_config=aws_config)
"""
DeleteReplicationTaskAssessmentRun()
Deletes the record of a single premigration assessment run. This operation removes all metadata that AWS DMS maintains about this assessment run. However, the operation leaves untouched all information about this assessment run that is stored in your Amazon S3 bucket.
# Required Parameters
- `ReplicationTaskAssessmentRunArn`: Amazon Resource Name (ARN) of the premigration assessment run to be deleted.
"""
delete_replication_task_assessment_run(ReplicationTaskAssessmentRunArn; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DeleteReplicationTaskAssessmentRun", Dict{String, Any}("ReplicationTaskAssessmentRunArn"=>ReplicationTaskAssessmentRunArn); aws_config=aws_config)
delete_replication_task_assessment_run(ReplicationTaskAssessmentRunArn, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DeleteReplicationTaskAssessmentRun", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("ReplicationTaskAssessmentRunArn"=>ReplicationTaskAssessmentRunArn), args)); aws_config=aws_config)
"""
DescribeAccountAttributes()
Lists all of the AWS DMS attributes for a customer account. These attributes include AWS DMS quotas for the account and a unique account identifier in a particular DMS region. DMS quotas include a list of resource quotas supported by the account, such as the number of replication instances allowed. The description for each resource quota, includes the quota name, current usage toward that quota, and the quota's maximum value. DMS uses the unique account identifier to name each artifact used by DMS in the given region. This command does not take any parameters.
"""
describe_account_attributes(; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DescribeAccountAttributes"; aws_config=aws_config)
describe_account_attributes(args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DescribeAccountAttributes", args; aws_config=aws_config)
"""
DescribeApplicableIndividualAssessments()
Provides a list of individual assessments that you can specify for a new premigration assessment run, given one or more parameters. If you specify an existing migration task, this operation provides the default individual assessments you can specify for that task. Otherwise, the specified parameters model elements of a possible migration task on which to base a premigration assessment run. To use these migration task modeling parameters, you must specify an existing replication instance, a source database engine, a target database engine, and a migration type. This combination of parameters potentially limits the default individual assessments available for an assessment run created for a corresponding migration task. If you specify no parameters, this operation provides a list of all possible individual assessments that you can specify for an assessment run. If you specify any one of the task modeling parameters, you must specify all of them or the operation cannot provide a list of individual assessments. The only parameter that you can specify alone is for an existing migration task. The specified task definition then determines the default list of individual assessments that you can specify in an assessment run for the task.
# Optional Parameters
- `Marker`: Optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.
- `MaxRecords`: Maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.
- `MigrationType`: Name of the migration type that each provided individual assessment must support.
- `ReplicationInstanceArn`: ARN of a replication instance on which you want to base the default list of individual assessments.
- `ReplicationTaskArn`: Amazon Resource Name (ARN) of a migration task on which you want to base the default list of individual assessments.
- `SourceEngineName`: Name of a database engine that the specified replication instance supports as a source.
- `TargetEngineName`: Name of a database engine that the specified replication instance supports as a target.
"""
describe_applicable_individual_assessments(; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DescribeApplicableIndividualAssessments"; aws_config=aws_config)
describe_applicable_individual_assessments(args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DescribeApplicableIndividualAssessments", args; aws_config=aws_config)
"""
DescribeCertificates()
Provides a description of the certificate.
# Optional Parameters
- `Filters`: Filters applied to the certificates described in the form of key-value pairs.
- `Marker`: An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.
- `MaxRecords`: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved. Default: 10
"""
describe_certificates(; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DescribeCertificates"; aws_config=aws_config)
describe_certificates(args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DescribeCertificates", args; aws_config=aws_config)
"""
DescribeConnections()
Describes the status of the connections that have been made between the replication instance and an endpoint. Connections are created when you test an endpoint.
# Optional Parameters
- `Filters`: The filters applied to the connection. Valid filter names: endpoint-arn | replication-instance-arn
- `Marker`: An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.
- `MaxRecords`: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved. Default: 100 Constraints: Minimum 20, maximum 100.
"""
describe_connections(; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DescribeConnections"; aws_config=aws_config)
describe_connections(args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DescribeConnections", args; aws_config=aws_config)
"""
DescribeEndpointTypes()
Returns information about the type of endpoints available.
# Optional Parameters
- `Filters`: Filters applied to the endpoint types. Valid filter names: engine-name | endpoint-type
- `Marker`: An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.
- `MaxRecords`: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved. Default: 100 Constraints: Minimum 20, maximum 100.
"""
describe_endpoint_types(; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DescribeEndpointTypes"; aws_config=aws_config)
describe_endpoint_types(args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DescribeEndpointTypes", args; aws_config=aws_config)
"""
DescribeEndpoints()
Returns information about the endpoints for your account in the current region.
# Optional Parameters
- `Filters`: Filters applied to the endpoints. Valid filter names: endpoint-arn | endpoint-type | endpoint-id | engine-name
- `Marker`: An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.
- `MaxRecords`: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved. Default: 100 Constraints: Minimum 20, maximum 100.
"""
describe_endpoints(; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DescribeEndpoints"; aws_config=aws_config)
describe_endpoints(args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DescribeEndpoints", args; aws_config=aws_config)
"""
DescribeEventCategories()
Lists categories for all event source types, or, if specified, for a specified source type. You can see a list of the event categories and source types in Working with Events and Notifications in the AWS Database Migration Service User Guide.
# Optional Parameters
- `Filters`: Filters applied to the event categories.
- `SourceType`: The type of AWS DMS resource that generates events. Valid values: replication-instance | replication-task
"""
describe_event_categories(; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DescribeEventCategories"; aws_config=aws_config)
describe_event_categories(args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DescribeEventCategories", args; aws_config=aws_config)
"""
DescribeEventSubscriptions()
Lists all the event subscriptions for a customer account. The description of a subscription includes SubscriptionName, SNSTopicARN, CustomerID, SourceType, SourceID, CreationTime, and Status. If you specify SubscriptionName, this action lists the description for that subscription.
# Optional Parameters
- `Filters`: Filters applied to event subscriptions.
- `Marker`: An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.
- `MaxRecords`: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved. Default: 100 Constraints: Minimum 20, maximum 100.
- `SubscriptionName`: The name of the AWS DMS event subscription to be described.
"""
describe_event_subscriptions(; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DescribeEventSubscriptions"; aws_config=aws_config)
describe_event_subscriptions(args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DescribeEventSubscriptions", args; aws_config=aws_config)
"""
DescribeEvents()
Lists events for a given source identifier and source type. You can also specify a start and end time. For more information on AWS DMS events, see Working with Events and Notifications in the AWS Database Migration User Guide.
# Optional Parameters
- `Duration`: The duration of the events to be listed.
- `EndTime`: The end time for the events to be listed.
- `EventCategories`: A list of event categories for the source type that you've chosen.
- `Filters`: Filters applied to events.
- `Marker`: An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.
- `MaxRecords`: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved. Default: 100 Constraints: Minimum 20, maximum 100.
- `SourceIdentifier`: The identifier of an event source.
- `SourceType`: The type of AWS DMS resource that generates events. Valid values: replication-instance | replication-task
- `StartTime`: The start time for the events to be listed.
"""
describe_events(; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DescribeEvents"; aws_config=aws_config)
describe_events(args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DescribeEvents", args; aws_config=aws_config)
"""
DescribeOrderableReplicationInstances()
Returns information about the replication instance types that can be created in the specified region.
# Optional Parameters
- `Marker`: An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.
- `MaxRecords`: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved. Default: 100 Constraints: Minimum 20, maximum 100.
"""
describe_orderable_replication_instances(; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DescribeOrderableReplicationInstances"; aws_config=aws_config)
describe_orderable_replication_instances(args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DescribeOrderableReplicationInstances", args; aws_config=aws_config)
"""
DescribePendingMaintenanceActions()
For internal use only
# Optional Parameters
- `Filters`:
- `Marker`: An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.
- `MaxRecords`: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved. Default: 100 Constraints: Minimum 20, maximum 100.
- `ReplicationInstanceArn`: The Amazon Resource Name (ARN) of the replication instance.
"""
describe_pending_maintenance_actions(; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DescribePendingMaintenanceActions"; aws_config=aws_config)
describe_pending_maintenance_actions(args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DescribePendingMaintenanceActions", args; aws_config=aws_config)
"""
DescribeRefreshSchemasStatus()
Returns the status of the RefreshSchemas operation.
# Required Parameters
- `EndpointArn`: The Amazon Resource Name (ARN) string that uniquely identifies the endpoint.
"""
describe_refresh_schemas_status(EndpointArn; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DescribeRefreshSchemasStatus", Dict{String, Any}("EndpointArn"=>EndpointArn); aws_config=aws_config)
describe_refresh_schemas_status(EndpointArn, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DescribeRefreshSchemasStatus", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("EndpointArn"=>EndpointArn), args)); aws_config=aws_config)
"""
DescribeReplicationInstanceTaskLogs()
Returns information about the task logs for the specified task.
# Required Parameters
- `ReplicationInstanceArn`: The Amazon Resource Name (ARN) of the replication instance.
# Optional Parameters
- `Marker`: An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.
- `MaxRecords`: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved. Default: 100 Constraints: Minimum 20, maximum 100.
"""
describe_replication_instance_task_logs(ReplicationInstanceArn; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DescribeReplicationInstanceTaskLogs", Dict{String, Any}("ReplicationInstanceArn"=>ReplicationInstanceArn); aws_config=aws_config)
describe_replication_instance_task_logs(ReplicationInstanceArn, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DescribeReplicationInstanceTaskLogs", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("ReplicationInstanceArn"=>ReplicationInstanceArn), args)); aws_config=aws_config)
"""
DescribeReplicationInstances()
Returns information about replication instances for your account in the current region.
# Optional Parameters
- `Filters`: Filters applied to replication instances. Valid filter names: replication-instance-arn | replication-instance-id | replication-instance-class | engine-version
- `Marker`: An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.
- `MaxRecords`: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved. Default: 100 Constraints: Minimum 20, maximum 100.
"""
describe_replication_instances(; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DescribeReplicationInstances"; aws_config=aws_config)
describe_replication_instances(args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DescribeReplicationInstances", args; aws_config=aws_config)
"""
DescribeReplicationSubnetGroups()
Returns information about the replication subnet groups.
# Optional Parameters
- `Filters`: Filters applied to replication subnet groups. Valid filter names: replication-subnet-group-id
- `Marker`: An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.
- `MaxRecords`: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved. Default: 100 Constraints: Minimum 20, maximum 100.
"""
describe_replication_subnet_groups(; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DescribeReplicationSubnetGroups"; aws_config=aws_config)
describe_replication_subnet_groups(args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DescribeReplicationSubnetGroups", args; aws_config=aws_config)
"""
DescribeReplicationTaskAssessmentResults()
Returns the task assessment results from Amazon S3. This action always returns the latest results.
# Optional Parameters
- `Marker`: An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.
- `MaxRecords`: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved. Default: 100 Constraints: Minimum 20, maximum 100.
- `ReplicationTaskArn`: The Amazon Resource Name (ARN) string that uniquely identifies the task. When this input parameter is specified, the API returns only one result and ignore the values of the MaxRecords and Marker parameters.
"""
describe_replication_task_assessment_results(; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DescribeReplicationTaskAssessmentResults"; aws_config=aws_config)
describe_replication_task_assessment_results(args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DescribeReplicationTaskAssessmentResults", args; aws_config=aws_config)
"""
DescribeReplicationTaskAssessmentRuns()
Returns a paginated list of premigration assessment runs based on filter settings. These filter settings can specify a combination of premigration assessment runs, migration tasks, replication instances, and assessment run status values. This operation doesn't return information about individual assessments. For this information, see the DescribeReplicationTaskIndividualAssessments operation.
# Optional Parameters
- `Filters`: Filters applied to the premigration assessment runs described in the form of key-value pairs. Valid filter names: replication-task-assessment-run-arn, replication-task-arn, replication-instance-arn, status
- `Marker`: An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.
- `MaxRecords`: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.
"""
describe_replication_task_assessment_runs(; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DescribeReplicationTaskAssessmentRuns"; aws_config=aws_config)
describe_replication_task_assessment_runs(args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DescribeReplicationTaskAssessmentRuns", args; aws_config=aws_config)
"""
DescribeReplicationTaskIndividualAssessments()
Returns a paginated list of individual assessments based on filter settings. These filter settings can specify a combination of premigration assessment runs, migration tasks, and assessment status values.
# Optional Parameters
- `Filters`: Filters applied to the individual assessments described in the form of key-value pairs. Valid filter names: replication-task-assessment-run-arn, replication-task-arn, status
- `Marker`: An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.
- `MaxRecords`: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.
"""
describe_replication_task_individual_assessments(; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DescribeReplicationTaskIndividualAssessments"; aws_config=aws_config)
describe_replication_task_individual_assessments(args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DescribeReplicationTaskIndividualAssessments", args; aws_config=aws_config)
"""
DescribeReplicationTasks()
Returns information about replication tasks for your account in the current region.
# Optional Parameters
- `Filters`: Filters applied to replication tasks. Valid filter names: replication-task-arn | replication-task-id | migration-type | endpoint-arn | replication-instance-arn
- `Marker`: An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.
- `MaxRecords`: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved. Default: 100 Constraints: Minimum 20, maximum 100.
- `WithoutSettings`: An option to set to avoid returning information about settings. Use this to reduce overhead when setting information is too large. To use this option, choose true; otherwise, choose false (the default).
"""
describe_replication_tasks(; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DescribeReplicationTasks"; aws_config=aws_config)
describe_replication_tasks(args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DescribeReplicationTasks", args; aws_config=aws_config)
"""
DescribeSchemas()
Returns information about the schema for the specified endpoint.
# Required Parameters
- `EndpointArn`: The Amazon Resource Name (ARN) string that uniquely identifies the endpoint.
# Optional Parameters
- `Marker`: An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.
- `MaxRecords`: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved. Default: 100 Constraints: Minimum 20, maximum 100.
"""
describe_schemas(EndpointArn; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DescribeSchemas", Dict{String, Any}("EndpointArn"=>EndpointArn); aws_config=aws_config)
describe_schemas(EndpointArn, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DescribeSchemas", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("EndpointArn"=>EndpointArn), args)); aws_config=aws_config)
"""
DescribeTableStatistics()
Returns table statistics on the database migration task, including table name, rows inserted, rows updated, and rows deleted. Note that the \"last updated\" column the DMS console only indicates the time that AWS DMS last updated the table statistics record for a table. It does not indicate the time of the last update to the table.
# Required Parameters
- `ReplicationTaskArn`: The Amazon Resource Name (ARN) of the replication task.
# Optional Parameters
- `Filters`: Filters applied to table statistics. Valid filter names: schema-name | table-name | table-state A combination of filters creates an AND condition where each record matches all specified filters.
- `Marker`: An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.
- `MaxRecords`: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved. Default: 100 Constraints: Minimum 20, maximum 500.
"""
describe_table_statistics(ReplicationTaskArn; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DescribeTableStatistics", Dict{String, Any}("ReplicationTaskArn"=>ReplicationTaskArn); aws_config=aws_config)
describe_table_statistics(ReplicationTaskArn, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("DescribeTableStatistics", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("ReplicationTaskArn"=>ReplicationTaskArn), args)); aws_config=aws_config)
"""
ImportCertificate()
Uploads the specified certificate.
# Required Parameters
- `CertificateIdentifier`: A customer-assigned name for the certificate. Identifiers must begin with a letter and must contain only ASCII letters, digits, and hyphens. They can't end with a hyphen or contain two consecutive hyphens.
# Optional Parameters
- `CertificatePem`: The contents of a .pem file, which contains an X.509 certificate.
- `CertificateWallet`: The location of an imported Oracle Wallet certificate for use with SSL.
- `Tags`: The tags associated with the certificate.
"""
import_certificate(CertificateIdentifier; aws_config::AWSConfig=global_aws_config()) = database_migration_service("ImportCertificate", Dict{String, Any}("CertificateIdentifier"=>CertificateIdentifier); aws_config=aws_config)
import_certificate(CertificateIdentifier, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("ImportCertificate", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("CertificateIdentifier"=>CertificateIdentifier), args)); aws_config=aws_config)
"""
ListTagsForResource()
Lists all metadata tags attached to an AWS DMS resource, including replication instance, endpoint, security group, and migration task. For more information, see Tag data type description.
# Required Parameters
- `ResourceArn`: The Amazon Resource Name (ARN) string that uniquely identifies the AWS DMS resource.
"""
list_tags_for_resource(ResourceArn; aws_config::AWSConfig=global_aws_config()) = database_migration_service("ListTagsForResource", Dict{String, Any}("ResourceArn"=>ResourceArn); aws_config=aws_config)
list_tags_for_resource(ResourceArn, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("ListTagsForResource", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("ResourceArn"=>ResourceArn), args)); aws_config=aws_config)
"""
ModifyEndpoint()
Modifies the specified endpoint.
# Required Parameters
- `EndpointArn`: The Amazon Resource Name (ARN) string that uniquely identifies the endpoint.
# Optional Parameters
- `CertificateArn`: The Amazon Resource Name (ARN) of the certificate used for SSL connection.
- `DatabaseName`: The name of the endpoint database.
- `DmsTransferSettings`: The settings in JSON format for the DMS transfer type of source endpoint. Attributes include the following: serviceAccessRoleArn - The AWS Identity and Access Management (IAM) role that has permission to access the Amazon S3 bucket. BucketName - The name of the S3 bucket to use. compressionType - An optional parameter to use GZIP to compress the target files. Either set this parameter to NONE (the default) or don't use it to leave the files uncompressed. Shorthand syntax for these settings is as follows: ServiceAccessRoleArn=string ,BucketName=string,CompressionType=string JSON syntax for these settings is as follows: { \"ServiceAccessRoleArn\": \"string\", \"BucketName\": \"string\", \"CompressionType\": \"none\"|\"gzip\" }
- `DynamoDbSettings`: Settings in JSON format for the target Amazon DynamoDB endpoint. For information about other available settings, see Using Object Mapping to Migrate Data to DynamoDB in the AWS Database Migration Service User Guide.
- `ElasticsearchSettings`: Settings in JSON format for the target Elasticsearch endpoint. For more information about the available settings, see Extra Connection Attributes When Using Elasticsearch as a Target for AWS DMS in the AWS Database Migration Service User Guide.
- `EndpointIdentifier`: The database endpoint identifier. Identifiers must begin with a letter and must contain only ASCII letters, digits, and hyphens. They can't end with a hyphen or contain two consecutive hyphens.
- `EndpointType`: The type of endpoint. Valid values are source and target.
- `EngineName`: The type of engine for the endpoint. Valid values, depending on the EndpointType, include \"mysql\", \"oracle\", \"postgres\", \"mariadb\", \"aurora\", \"aurora-postgresql\", \"redshift\", \"s3\", \"db2\", \"azuredb\", \"sybase\", \"dynamodb\", \"mongodb\", \"kinesis\", \"kafka\", \"elasticsearch\", \"documentdb\", \"sqlserver\", and \"neptune\".
- `ExternalTableDefinition`: The external table definition.
- `ExtraConnectionAttributes`: Additional attributes associated with the connection. To reset this parameter, pass the empty string (\"\") as an argument.
- `IBMDb2Settings`: Settings in JSON format for the source IBM Db2 LUW endpoint. For information about other available settings, see Extra connection attributes when using Db2 LUW as a source for AWS DMS in the AWS Database Migration Service User Guide.
- `KafkaSettings`: Settings in JSON format for the target Apache Kafka endpoint. For more information about the available settings, see Using Apache Kafka as a Target for AWS Database Migration Service in the AWS Database Migration Service User Guide.
- `KinesisSettings`: Settings in JSON format for the target endpoint for Amazon Kinesis Data Streams. For more information about the available settings, see Using Amazon Kinesis Data Streams as a Target for AWS Database Migration Service in the AWS Database Migration Service User Guide.
- `MicrosoftSQLServerSettings`: Settings in JSON format for the source and target Microsoft SQL Server endpoint. For information about other available settings, see Extra connection attributes when using SQL Server as a source for AWS DMS and Extra connection attributes when using SQL Server as a target for AWS DMS in the AWS Database Migration Service User Guide.
- `MongoDbSettings`: Settings in JSON format for the source MongoDB endpoint. For more information about the available settings, see the configuration properties section in Using MongoDB as a Target for AWS Database Migration Service in the AWS Database Migration Service User Guide.
- `MySQLSettings`: Settings in JSON format for the source and target MySQL endpoint. For information about other available settings, see Extra connection attributes when using MySQL as a source for AWS DMS and Extra connection attributes when using a MySQL-compatible database as a target for AWS DMS in the AWS Database Migration Service User Guide.
- `NeptuneSettings`: Settings in JSON format for the target Amazon Neptune endpoint. For more information about the available settings, see Specifying Endpoint Settings for Amazon Neptune as a Target in the AWS Database Migration Service User Guide.
- `OracleSettings`: Settings in JSON format for the source and target Oracle endpoint. For information about other available settings, see Extra connection attributes when using Oracle as a source for AWS DMS and Extra connection attributes when using Oracle as a target for AWS DMS in the AWS Database Migration Service User Guide.
- `Password`: The password to be used to login to the endpoint database.
- `Port`: The port used by the endpoint database.
- `PostgreSQLSettings`: Settings in JSON format for the source and target PostgreSQL endpoint. For information about other available settings, see Extra connection attributes when using PostgreSQL as a source for AWS DMS and Extra connection attributes when using PostgreSQL as a target for AWS DMS in the AWS Database Migration Service User Guide.
- `RedshiftSettings`:
- `S3Settings`: Settings in JSON format for the target Amazon S3 endpoint. For more information about the available settings, see Extra Connection Attributes When Using Amazon S3 as a Target for AWS DMS in the AWS Database Migration Service User Guide.
- `ServerName`: The name of the server where the endpoint database resides.
- `ServiceAccessRoleArn`: The Amazon Resource Name (ARN) for the service access role you want to use to modify the endpoint.
- `SslMode`: The SSL mode used to connect to the endpoint. The default value is none.
- `SybaseSettings`: Settings in JSON format for the source and target SAP ASE endpoint. For information about other available settings, see Extra connection attributes when using SAP ASE as a source for AWS DMS and Extra connection attributes when using SAP ASE as a target for AWS DMS in the AWS Database Migration Service User Guide.
- `Username`: The user name to be used to login to the endpoint database.
"""
modify_endpoint(EndpointArn; aws_config::AWSConfig=global_aws_config()) = database_migration_service("ModifyEndpoint", Dict{String, Any}("EndpointArn"=>EndpointArn); aws_config=aws_config)
modify_endpoint(EndpointArn, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("ModifyEndpoint", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("EndpointArn"=>EndpointArn), args)); aws_config=aws_config)
"""
ModifyEventSubscription()
Modifies an existing AWS DMS event notification subscription.
# Required Parameters
- `SubscriptionName`: The name of the AWS DMS event notification subscription to be modified.
# Optional Parameters
- `Enabled`: A Boolean value; set to true to activate the subscription.
- `EventCategories`: A list of event categories for a source type that you want to subscribe to. Use the DescribeEventCategories action to see a list of event categories.
- `SnsTopicArn`: The Amazon Resource Name (ARN) of the Amazon SNS topic created for event notification. The ARN is created by Amazon SNS when you create a topic and subscribe to it.
- `SourceType`: The type of AWS DMS resource that generates the events you want to subscribe to. Valid values: replication-instance | replication-task
"""
modify_event_subscription(SubscriptionName; aws_config::AWSConfig=global_aws_config()) = database_migration_service("ModifyEventSubscription", Dict{String, Any}("SubscriptionName"=>SubscriptionName); aws_config=aws_config)
modify_event_subscription(SubscriptionName, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("ModifyEventSubscription", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("SubscriptionName"=>SubscriptionName), args)); aws_config=aws_config)
"""
ModifyReplicationInstance()
Modifies the replication instance to apply new settings. You can change one or more parameters by specifying these parameters and the new values in the request. Some settings are applied during the maintenance window.
# Required Parameters
- `ReplicationInstanceArn`: The Amazon Resource Name (ARN) of the replication instance.
# Optional Parameters
- `AllocatedStorage`: The amount of storage (in gigabytes) to be allocated for the replication instance.
- `AllowMajorVersionUpgrade`: Indicates that major version upgrades are allowed. Changing this parameter does not result in an outage, and the change is asynchronously applied as soon as possible. This parameter must be set to true when specifying a value for the EngineVersion parameter that is a different major version than the replication instance's current version.
- `ApplyImmediately`: Indicates whether the changes should be applied immediately or during the next maintenance window.
- `AutoMinorVersionUpgrade`: A value that indicates that minor version upgrades are applied automatically to the replication instance during the maintenance window. Changing this parameter doesn't result in an outage, except in the case dsecribed following. The change is asynchronously applied as soon as possible. An outage does result if these factors apply: This parameter is set to true during the maintenance window. A newer minor version is available. AWS DMS has enabled automatic patching for the given engine version.
- `EngineVersion`: The engine version number of the replication instance. When modifying a major engine version of an instance, also set AllowMajorVersionUpgrade to true.
- `MultiAZ`: Specifies whether the replication instance is a Multi-AZ deployment. You can't set the AvailabilityZone parameter if the Multi-AZ parameter is set to true.
- `PreferredMaintenanceWindow`: The weekly time range (in UTC) during which system maintenance can occur, which might result in an outage. Changing this parameter does not result in an outage, except in the following situation, and the change is asynchronously applied as soon as possible. If moving this window to the current time, there must be at least 30 minutes between the current time and end of the window to ensure pending changes are applied. Default: Uses existing setting Format: ddd:hh24:mi-ddd:hh24:mi Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun Constraints: Must be at least 30 minutes
- `ReplicationInstanceClass`: The compute and memory capacity of the replication instance as defined for the specified replication instance class. For example to specify the instance class dms.c4.large, set this parameter to \"dms.c4.large\". For more information on the settings and capacities for the available replication instance classes, see Selecting the right AWS DMS replication instance for your migration.
- `ReplicationInstanceIdentifier`: The replication instance identifier. This parameter is stored as a lowercase string.
- `VpcSecurityGroupIds`: Specifies the VPC security group to be used with the replication instance. The VPC security group must work with the VPC containing the replication instance.
"""
modify_replication_instance(ReplicationInstanceArn; aws_config::AWSConfig=global_aws_config()) = database_migration_service("ModifyReplicationInstance", Dict{String, Any}("ReplicationInstanceArn"=>ReplicationInstanceArn); aws_config=aws_config)
modify_replication_instance(ReplicationInstanceArn, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("ModifyReplicationInstance", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("ReplicationInstanceArn"=>ReplicationInstanceArn), args)); aws_config=aws_config)
"""
ModifyReplicationSubnetGroup()
Modifies the settings for the specified replication subnet group.
# Required Parameters
- `ReplicationSubnetGroupIdentifier`: The name of the replication instance subnet group.
- `SubnetIds`: A list of subnet IDs.
# Optional Parameters
- `ReplicationSubnetGroupDescription`: A description for the replication instance subnet group.
"""
modify_replication_subnet_group(ReplicationSubnetGroupIdentifier, SubnetIds; aws_config::AWSConfig=global_aws_config()) = database_migration_service("ModifyReplicationSubnetGroup", Dict{String, Any}("ReplicationSubnetGroupIdentifier"=>ReplicationSubnetGroupIdentifier, "SubnetIds"=>SubnetIds); aws_config=aws_config)
modify_replication_subnet_group(ReplicationSubnetGroupIdentifier, SubnetIds, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("ModifyReplicationSubnetGroup", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("ReplicationSubnetGroupIdentifier"=>ReplicationSubnetGroupIdentifier, "SubnetIds"=>SubnetIds), args)); aws_config=aws_config)
"""
ModifyReplicationTask()
Modifies the specified replication task. You can't modify the task endpoints. The task must be stopped before you can modify it. For more information about AWS DMS tasks, see Working with Migration Tasks in the AWS Database Migration Service User Guide.
# Required Parameters
- `ReplicationTaskArn`: The Amazon Resource Name (ARN) of the replication task.
# Optional Parameters
- `CdcStartPosition`: Indicates when you want a change data capture (CDC) operation to start. Use either CdcStartPosition or CdcStartTime to specify when you want a CDC operation to start. Specifying both values results in an error. The value can be in date, checkpoint, or LSN/SCN format. Date Example: --cdc-start-position “2018-03-08T12:12:12” Checkpoint Example: --cdc-start-position \"checkpoint:V1#27#mysql-bin-changelog.157832:1975:-1:2002:677883278264080:mysql-bin-changelog.157832:1876#0#0#*#0#93\" LSN Example: --cdc-start-position “mysql-bin-changelog.000024:373” When you use this task setting with a source PostgreSQL database, a logical replication slot should already be created and associated with the source endpoint. You can verify this by setting the slotName extra connection attribute to the name of this logical replication slot. For more information, see Extra Connection Attributes When Using PostgreSQL as a Source for AWS DMS.
- `CdcStartTime`: Indicates the start time for a change data capture (CDC) operation. Use either CdcStartTime or CdcStartPosition to specify when you want a CDC operation to start. Specifying both values results in an error. Timestamp Example: --cdc-start-time “2018-03-08T12:12:12”
- `CdcStopPosition`: Indicates when you want a change data capture (CDC) operation to stop. The value can be either server time or commit time. Server time example: --cdc-stop-position “server_time:2018-02-09T12:12:12” Commit time example: --cdc-stop-position “commit_time: 2018-02-09T12:12:12 “
- `MigrationType`: The migration type. Valid values: full-load | cdc | full-load-and-cdc
- `ReplicationTaskIdentifier`: The replication task identifier. Constraints: Must contain 1-255 alphanumeric characters or hyphens. First character must be a letter. Cannot end with a hyphen or contain two consecutive hyphens.
- `ReplicationTaskSettings`: JSON file that contains settings for the task, such as task metadata settings.
- `TableMappings`: When using the AWS CLI or boto3, provide the path of the JSON file that contains the table mappings. Precede the path with file://. When working with the DMS API, provide the JSON as the parameter value, for example: --table-mappings file://mappingfile.json
- `TaskData`: Supplemental information that the task requires to migrate the data for certain source and target endpoints. For more information, see Specifying Supplemental Data for Task Settings in the AWS Database Migration Service User Guide.
"""
modify_replication_task(ReplicationTaskArn; aws_config::AWSConfig=global_aws_config()) = database_migration_service("ModifyReplicationTask", Dict{String, Any}("ReplicationTaskArn"=>ReplicationTaskArn); aws_config=aws_config)
modify_replication_task(ReplicationTaskArn, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("ModifyReplicationTask", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("ReplicationTaskArn"=>ReplicationTaskArn), args)); aws_config=aws_config)
"""
RebootReplicationInstance()
Reboots a replication instance. Rebooting results in a momentary outage, until the replication instance becomes available again.
# Required Parameters
- `ReplicationInstanceArn`: The Amazon Resource Name (ARN) of the replication instance.
# Optional Parameters
- `ForceFailover`: If this parameter is true, the reboot is conducted through a Multi-AZ failover. (If the instance isn't configured for Multi-AZ, then you can't specify true.)
"""
reboot_replication_instance(ReplicationInstanceArn; aws_config::AWSConfig=global_aws_config()) = database_migration_service("RebootReplicationInstance", Dict{String, Any}("ReplicationInstanceArn"=>ReplicationInstanceArn); aws_config=aws_config)
reboot_replication_instance(ReplicationInstanceArn, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("RebootReplicationInstance", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("ReplicationInstanceArn"=>ReplicationInstanceArn), args)); aws_config=aws_config)
"""
RefreshSchemas()
Populates the schema for the specified endpoint. This is an asynchronous operation and can take several minutes. You can check the status of this operation by calling the DescribeRefreshSchemasStatus operation.
# Required Parameters
- `EndpointArn`: The Amazon Resource Name (ARN) string that uniquely identifies the endpoint.
- `ReplicationInstanceArn`: The Amazon Resource Name (ARN) of the replication instance.
"""
refresh_schemas(EndpointArn, ReplicationInstanceArn; aws_config::AWSConfig=global_aws_config()) = database_migration_service("RefreshSchemas", Dict{String, Any}("EndpointArn"=>EndpointArn, "ReplicationInstanceArn"=>ReplicationInstanceArn); aws_config=aws_config)
refresh_schemas(EndpointArn, ReplicationInstanceArn, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("RefreshSchemas", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("EndpointArn"=>EndpointArn, "ReplicationInstanceArn"=>ReplicationInstanceArn), args)); aws_config=aws_config)
"""
ReloadTables()
Reloads the target database table with the source data.
# Required Parameters
- `ReplicationTaskArn`: The Amazon Resource Name (ARN) of the replication task.
- `TablesToReload`: The name and schema of the table to be reloaded.
# Optional Parameters
- `ReloadOption`: Options for reload. Specify data-reload to reload the data and re-validate it if validation is enabled. Specify validate-only to re-validate the table. This option applies only when validation is enabled for the task. Valid values: data-reload, validate-only Default value is data-reload.
"""
reload_tables(ReplicationTaskArn, TablesToReload; aws_config::AWSConfig=global_aws_config()) = database_migration_service("ReloadTables", Dict{String, Any}("ReplicationTaskArn"=>ReplicationTaskArn, "TablesToReload"=>TablesToReload); aws_config=aws_config)
reload_tables(ReplicationTaskArn, TablesToReload, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("ReloadTables", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("ReplicationTaskArn"=>ReplicationTaskArn, "TablesToReload"=>TablesToReload), args)); aws_config=aws_config)
"""
RemoveTagsFromResource()
Removes metadata tags from an AWS DMS resource, including replication instance, endpoint, security group, and migration task. For more information, see Tag data type description.
# Required Parameters
- `ResourceArn`: An AWS DMS resource from which you want to remove tag(s). The value for this parameter is an Amazon Resource Name (ARN).
- `TagKeys`: The tag key (name) of the tag to be removed.
"""
remove_tags_from_resource(ResourceArn, TagKeys; aws_config::AWSConfig=global_aws_config()) = database_migration_service("RemoveTagsFromResource", Dict{String, Any}("ResourceArn"=>ResourceArn, "TagKeys"=>TagKeys); aws_config=aws_config)
remove_tags_from_resource(ResourceArn, TagKeys, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("RemoveTagsFromResource", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("ResourceArn"=>ResourceArn, "TagKeys"=>TagKeys), args)); aws_config=aws_config)
"""
StartReplicationTask()
Starts the replication task. For more information about AWS DMS tasks, see Working with Migration Tasks in the AWS Database Migration Service User Guide.
# Required Parameters
- `ReplicationTaskArn`: The Amazon Resource Name (ARN) of the replication task to be started.
- `StartReplicationTaskType`: A type of replication task.
# Optional Parameters
- `CdcStartPosition`: Indicates when you want a change data capture (CDC) operation to start. Use either CdcStartPosition or CdcStartTime to specify when you want a CDC operation to start. Specifying both values results in an error. The value can be in date, checkpoint, or LSN/SCN format. Date Example: --cdc-start-position “2018-03-08T12:12:12” Checkpoint Example: --cdc-start-position \"checkpoint:V1#27#mysql-bin-changelog.157832:1975:-1:2002:677883278264080:mysql-bin-changelog.157832:1876#0#0#*#0#93\" LSN Example: --cdc-start-position “mysql-bin-changelog.000024:373” When you use this task setting with a source PostgreSQL database, a logical replication slot should already be created and associated with the source endpoint. You can verify this by setting the slotName extra connection attribute to the name of this logical replication slot. For more information, see Extra Connection Attributes When Using PostgreSQL as a Source for AWS DMS.
- `CdcStartTime`: Indicates the start time for a change data capture (CDC) operation. Use either CdcStartTime or CdcStartPosition to specify when you want a CDC operation to start. Specifying both values results in an error. Timestamp Example: --cdc-start-time “2018-03-08T12:12:12”
- `CdcStopPosition`: Indicates when you want a change data capture (CDC) operation to stop. The value can be either server time or commit time. Server time example: --cdc-stop-position “server_time:2018-02-09T12:12:12” Commit time example: --cdc-stop-position “commit_time: 2018-02-09T12:12:12 “
"""
start_replication_task(ReplicationTaskArn, StartReplicationTaskType; aws_config::AWSConfig=global_aws_config()) = database_migration_service("StartReplicationTask", Dict{String, Any}("ReplicationTaskArn"=>ReplicationTaskArn, "StartReplicationTaskType"=>StartReplicationTaskType); aws_config=aws_config)
start_replication_task(ReplicationTaskArn, StartReplicationTaskType, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("StartReplicationTask", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("ReplicationTaskArn"=>ReplicationTaskArn, "StartReplicationTaskType"=>StartReplicationTaskType), args)); aws_config=aws_config)
"""
StartReplicationTaskAssessment()
Starts the replication task assessment for unsupported data types in the source database.
# Required Parameters
- `ReplicationTaskArn`: The Amazon Resource Name (ARN) of the replication task.
"""
start_replication_task_assessment(ReplicationTaskArn; aws_config::AWSConfig=global_aws_config()) = database_migration_service("StartReplicationTaskAssessment", Dict{String, Any}("ReplicationTaskArn"=>ReplicationTaskArn); aws_config=aws_config)
start_replication_task_assessment(ReplicationTaskArn, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("StartReplicationTaskAssessment", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("ReplicationTaskArn"=>ReplicationTaskArn), args)); aws_config=aws_config)
"""
StartReplicationTaskAssessmentRun()
Starts a new premigration assessment run for one or more individual assessments of a migration task. The assessments that you can specify depend on the source and target database engine and the migration type defined for the given task. To run this operation, your migration task must already be created. After you run this operation, you can review the status of each individual assessment. You can also run the migration task manually after the assessment run and its individual assessments complete.
# Required Parameters
- `AssessmentRunName`: Unique name to identify the assessment run.
- `ReplicationTaskArn`: Amazon Resource Name (ARN) of the migration task associated with the premigration assessment run that you want to start.
- `ResultLocationBucket`: Amazon S3 bucket where you want AWS DMS to store the results of this assessment run.
- `ServiceAccessRoleArn`: ARN of a service role needed to start the assessment run.
# Optional Parameters
- `Exclude`: Space-separated list of names for specific individual assessments that you want to exclude. These names come from the default list of individual assessments that AWS DMS supports for the associated migration task. This task is specified by ReplicationTaskArn. You can't set a value for Exclude if you also set a value for IncludeOnly in the API operation. To identify the names of the default individual assessments that AWS DMS supports for the associated migration task, run the DescribeApplicableIndividualAssessments operation using its own ReplicationTaskArn request parameter.
- `IncludeOnly`: Space-separated list of names for specific individual assessments that you want to include. These names come from the default list of individual assessments that AWS DMS supports for the associated migration task. This task is specified by ReplicationTaskArn. You can't set a value for IncludeOnly if you also set a value for Exclude in the API operation. To identify the names of the default individual assessments that AWS DMS supports for the associated migration task, run the DescribeApplicableIndividualAssessments operation using its own ReplicationTaskArn request parameter.
- `ResultEncryptionMode`: Encryption mode that you can specify to encrypt the results of this assessment run. If you don't specify this request parameter, AWS DMS stores the assessment run results without encryption. You can specify one of the options following: \"SSE_S3\" – The server-side encryption provided as a default by Amazon S3. \"SSE_KMS\" – AWS Key Management Service (AWS KMS) encryption. This encryption can use either a custom KMS encryption key that you specify or the default KMS encryption key that DMS provides.
- `ResultKmsKeyArn`: ARN of a custom KMS encryption key that you specify when you set ResultEncryptionMode to \"SSE_KMS\".
- `ResultLocationFolder`: Folder within an Amazon S3 bucket where you want AWS DMS to store the results of this assessment run.
"""
start_replication_task_assessment_run(AssessmentRunName, ReplicationTaskArn, ResultLocationBucket, ServiceAccessRoleArn; aws_config::AWSConfig=global_aws_config()) = database_migration_service("StartReplicationTaskAssessmentRun", Dict{String, Any}("AssessmentRunName"=>AssessmentRunName, "ReplicationTaskArn"=>ReplicationTaskArn, "ResultLocationBucket"=>ResultLocationBucket, "ServiceAccessRoleArn"=>ServiceAccessRoleArn); aws_config=aws_config)
start_replication_task_assessment_run(AssessmentRunName, ReplicationTaskArn, ResultLocationBucket, ServiceAccessRoleArn, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("StartReplicationTaskAssessmentRun", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("AssessmentRunName"=>AssessmentRunName, "ReplicationTaskArn"=>ReplicationTaskArn, "ResultLocationBucket"=>ResultLocationBucket, "ServiceAccessRoleArn"=>ServiceAccessRoleArn), args)); aws_config=aws_config)
"""
StopReplicationTask()
Stops the replication task.
# Required Parameters
- `ReplicationTaskArn`: The Amazon Resource Name(ARN) of the replication task to be stopped.
"""
stop_replication_task(ReplicationTaskArn; aws_config::AWSConfig=global_aws_config()) = database_migration_service("StopReplicationTask", Dict{String, Any}("ReplicationTaskArn"=>ReplicationTaskArn); aws_config=aws_config)
stop_replication_task(ReplicationTaskArn, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("StopReplicationTask", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("ReplicationTaskArn"=>ReplicationTaskArn), args)); aws_config=aws_config)
"""
TestConnection()
Tests the connection between the replication instance and the endpoint.
# Required Parameters
- `EndpointArn`: The Amazon Resource Name (ARN) string that uniquely identifies the endpoint.
- `ReplicationInstanceArn`: The Amazon Resource Name (ARN) of the replication instance.
"""
test_connection(EndpointArn, ReplicationInstanceArn; aws_config::AWSConfig=global_aws_config()) = database_migration_service("TestConnection", Dict{String, Any}("EndpointArn"=>EndpointArn, "ReplicationInstanceArn"=>ReplicationInstanceArn); aws_config=aws_config)
test_connection(EndpointArn, ReplicationInstanceArn, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = database_migration_service("TestConnection", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("EndpointArn"=>EndpointArn, "ReplicationInstanceArn"=>ReplicationInstanceArn), args)); aws_config=aws_config)
|
{"hexsha": "5d703f036ed123e1882ef2e9f8cc9695ac2de86d", "size": 91969, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/services/database_migration_service.jl", "max_stars_repo_name": "ExpandingMan/AWS.jl", "max_stars_repo_head_hexsha": "8e6e61eb9dcd84fb1e148ff2afe093b3010d9edb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/services/database_migration_service.jl", "max_issues_repo_name": "ExpandingMan/AWS.jl", "max_issues_repo_head_hexsha": "8e6e61eb9dcd84fb1e148ff2afe093b3010d9edb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/services/database_migration_service.jl", "max_forks_repo_name": "ExpandingMan/AWS.jl", "max_forks_repo_head_hexsha": "8e6e61eb9dcd84fb1e148ff2afe093b3010d9edb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 103.9197740113, "max_line_length": 1248, "alphanum_fraction": 0.8079787755, "num_tokens": 19787}
|
//==================================================================================================
/*!
@file
@copyright 2016 NumScale SAS
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)
**/
//==================================================================================================
#ifndef BOOST_SIMD_ALGORITHM_REDUCE_HPP_INCLUDED
#define BOOST_SIMD_ALGORITHM_REDUCE_HPP_INCLUDED
#include <boost/simd/range/segmented_input_range.hpp>
#include <boost/simd/function/sum.hpp>
#include <boost/simd/pack.hpp>
namespace boost { namespace simd
{
/*!
@ingroup group-std
Computes the sum over elements in the given Contiguous Range @range{first,last} and
the initial value @c init.
\notebox{The summation order can be different from the order of a sequential summation
, thus leading to different results.
}
@par Example:
@snippet reduce.simple.cpp reduce-simple
Possible output:
@code
SIMD reduce : 45
STD accumulate : 45
@endcode
@param first Beginning of the range of elements to sum
@param last End of the range of elements to sum
@param init Initial value of the sum
@return The sum of the given value and elements in the given range.
**/
template<typename T> T reduce(T const* first, T const* last, T init)
{
pack<T> acc(0);
auto pr = segmented_input_range(first,last);
for( auto const& e : std::get<0>(pr) ) init += e;
for( auto const& e : std::get<1>(pr) ) acc += e;
for( auto const& e : std::get<2>(pr) ) init += e;
return init + sum(acc);
}
/*!
@ingroup group-std
Computes the generalized sum of the elements in the given Contiguous Range @range{first,last}
over the binary functions @c binop and @c reduce, using @c init as the initial value.
While @c binop is applied over the result of dereferencing the input pointers, @c reduce is to
be used in the final reduction of the SIMD part of the generalized sum.
\notebox{The summation order can be different from the order of a sequential summation
, thus leading to different results.
}
@par Example:
@snippet reduce.phases.cpp reduce-phases
Possible output:
@code
SIMD reduce : 285
@endcode
@param first Beginning of the range of elements to sum
@param last End of the range of elements to sum
@param init Initial value of the reduction
@param binop Binary function object that will be applied in unspecified order to the
result of dereferencing the input pointers, the results of other @c binop
and @ init.
@param neutral Value containing the neutral element of @c binop
@param reduce Binary function object that will be applied to complete the reduction
@return The generalized sum of the given value and elements in the given range over @ binop.
**/
template<typename T, typename U, typename F, typename N, typename G>
U reduce( T const* first, T const* last, U init, F binop, N neutral, G reduce )
{
pack<U> acc(neutral);
auto pr = segmented_input_range(first,last);
for( auto const& e : std::get<0>(pr) ) init = binop(init,e);
for( auto const& e : std::get<1>(pr) ) acc = binop(acc,e);
for( auto const& e : std::get<2>(pr) ) init = binop(init,e);
for( U e : acc) init = reduce(init,e);
return init;
}
/*!
@ingroup group-std
Computes the generalized sum of the elements in the given Contiguous Range @range{first,last}
over the binary function @c binop, using @c init as the initial value.
@c binop is applied over the result of dereferencing the input pointers and in the final
reduction of the SIMD part of the generalized sum.
\notebox{The summation order can be different from the order of a sequential summation
, thus leading to different results.
}
@par Example:
@snippet reduce.phase.cpp reduce-phase
Possible output:
@code
SIMD reduce : 362880
@endcode
@param first Beginning of the range of elements to sum
@param last End of the range of elements to sum
@param init Initial value of the reduction
@param binop Binary function object that will be applied in unspecified order to the
result of dereferencing the input pointers, the results of other @c binop
and @c init.
@param neutral Value containing the neutral element of @c binop
@return The sum of the given value and elements in the given range.
**/
template<typename T, typename U, typename F, typename N>
U reduce(T const* first, T const* last, U init, F binop, N neutral)
{
return reduce(first,last,init,binop,neutral,binop);
}
} }
#endif
|
{"hexsha": "11b2a290a787beb6f90e28febd5050ae2105a0e8", "size": 4926, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "third_party/boost/simd/algorithm/reduce.hpp", "max_stars_repo_name": "xmar/pythran", "max_stars_repo_head_hexsha": "dbf2e8b70ed1e4d4ac6b5f26ead4add940a72592", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 5.0, "max_stars_repo_stars_event_min_datetime": "2018-02-20T11:21:12.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-12T13:45:09.000Z", "max_issues_repo_path": "third_party/boost/simd/algorithm/reduce.hpp", "max_issues_repo_name": "xmar/pythran", "max_issues_repo_head_hexsha": "dbf2e8b70ed1e4d4ac6b5f26ead4add940a72592", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "third_party/boost/simd/algorithm/reduce.hpp", "max_forks_repo_name": "xmar/pythran", "max_forks_repo_head_hexsha": "dbf2e8b70ed1e4d4ac6b5f26ead4add940a72592", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2017-12-12T12:29:52.000Z", "max_forks_repo_forks_event_max_datetime": "2019-04-08T15:55:25.000Z", "avg_line_length": 35.4388489209, "max_line_length": 100, "alphanum_fraction": 0.6427121397, "num_tokens": 1183}
|
using SuiteSparseMatrixCollection
using MatrixMarket
using SuiteSparseGraphBLAS
using BenchmarkTools
using SparseArrays
include("tc.jl")
include("pr.jl")
graphs = [
"karate",
"com-Youtube",
"as-Skitter",
"com-LiveJournal",
"com-Orkut",
"com-Friendster",
]
ssmc = ssmc_db()
matrices = filter(row -> row.name ∈ graphs, ssmc)
BenchmarkTools.DEFAULT_PARAMETERS.gcsample = true
for name ∈ graphs
path = fetch_ssmc(matrices[matrices.name .== name, :])[1]
G = GBMatrix(convert(SparseMatrixCSC{Float64}, MatrixMarket.mmread(joinpath(path, "$name.mtx"))))
SuiteSparseGraphBLAS.gbset(G, SuiteSparseGraphBLAS.FORMAT, SuiteSparseGraphBLAS.BYROW)
GC.gc()
println("$name | $(size(G)) | $(nnz(G)) edges")
for centrality in [PR, TC1, TC3]
println("Benchmarking $(string(centrality)) on $(name)")
result = @benchmark $centrality($G) samples=3 seconds=600
show(stdout,MIME"text/plain"(),result)
end
end
|
{"hexsha": "9cc2389465d6373d2929db0426efe63021943d12", "size": 962, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "julia/script.jl", "max_stars_repo_name": "Wimmerer/HPEC21-TriangleCentrality", "max_stars_repo_head_hexsha": "dd65a7d1571670b70e444bf46e8c280ae35b754f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "julia/script.jl", "max_issues_repo_name": "Wimmerer/HPEC21-TriangleCentrality", "max_issues_repo_head_hexsha": "dd65a7d1571670b70e444bf46e8c280ae35b754f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "julia/script.jl", "max_forks_repo_name": "Wimmerer/HPEC21-TriangleCentrality", "max_forks_repo_head_hexsha": "dd65a7d1571670b70e444bf46e8c280ae35b754f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.0625, "max_line_length": 101, "alphanum_fraction": 0.6891891892, "num_tokens": 281}
|
module UNet
using Flux, Images
using Flux: @treelike
# model
export unet
# utilities
export img2array, array2img, unet_tiling
include("model.jl")
include("utils.jl")
end # module
|
{"hexsha": "3f2408761cac59243cdf935547dba7e0eeafa512", "size": 184, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/UNet.jl", "max_stars_repo_name": "CDonnerer/UNet.jl", "max_stars_repo_head_hexsha": "8d34190944f52f7d0beb44140fc32e9259b52db6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/UNet.jl", "max_issues_repo_name": "CDonnerer/UNet.jl", "max_issues_repo_head_hexsha": "8d34190944f52f7d0beb44140fc32e9259b52db6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/UNet.jl", "max_forks_repo_name": "CDonnerer/UNet.jl", "max_forks_repo_head_hexsha": "8d34190944f52f7d0beb44140fc32e9259b52db6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 11.5, "max_line_length": 40, "alphanum_fraction": 0.75, "num_tokens": 57}
|
[STATEMENT]
lemma Trgs_are_ide:
shows "Trgs T \<subseteq> Collect R.ide"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Trgs T \<subseteq> Collect R.ide
[PROOF STEP]
apply (induct T)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. Trgs [] \<subseteq> Collect R.ide
2. \<And>a T. Trgs T \<subseteq> Collect R.ide \<Longrightarrow> Trgs (a # T) \<subseteq> Collect R.ide
[PROOF STEP]
apply simp
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>a T. Trgs T \<subseteq> Collect R.ide \<Longrightarrow> Trgs (a # T) \<subseteq> Collect R.ide
[PROOF STEP]
by (metis R.arr_iff_has_target R.sources_resid Srcs.simps(2) Trgs.simps(2-3)
Srcs_are_ide empty_subsetI list.exhaust R.arrE)
|
{"llama_tokens": 288, "file": "ResiduatedTransitionSystem_ResiduatedTransitionSystem", "length": 3}
|
import numpy as np
import matplotlib.pyplot as plt
import shutil
import argparse
import os
import json
import random
import warnings
from termcolor import colored
import pandas as pd
from sklearn.metrics import confusion_matrix
import cv2
import importlib
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torch.optim as optim
import torch.utils.data as data
from ignite.contrib.handlers import ProgressBar
from ignite.engine import Engine, Events
from ignite.handlers import ModelCheckpoint, Timer
from ignite.metrics import RunningAverage
from tensorboardX import SummaryWriter
import imgaug # https://github.com/aleju/imgaug
from imgaug import augmenters as iaa
import misc as misc
import dataset as dataset
from config import Config
from define_network import define_network
####
class Trainer(Config):
####
def train_step(self, net, batch, optimizer, device):
net.train() # train mode
imgs, true = batch # batch is NHWC
imgs = imgs.permute(0, 3, 1, 2) # to NCHW
# push data to GPUs and convert to float32
imgs = imgs.to(device).float()
true = true.to(device).long() # not one-hot
# -----------------------------------------------------------
net.zero_grad() # not rnn so not accumulate
logit = net(imgs) # forward
prob = F.softmax(logit, dim=-1)
pred = torch.argmax(prob, dim=-1)
# has built-int log softmax so accept logit
loss = F.cross_entropy(logit, true, reduction='mean')
acc = torch.mean((pred == true).float()) # batch accuracy
# gradient update
loss.backward()
optimizer.step()
# -----------------------------------------------------------p
return dict(loss=loss.item(),
acc=acc.item())
####
def infer_step(self, net, batch, device):
net.eval() # infer mode
imgs, true = batch # batch is NHWC
imgs = imgs.permute(0, 3, 1, 2) # to NCHW
# push data to GPUs and convert to float32
imgs = imgs.to(device).float()
true = true.to(device).long() # not one-hot
# -----------------------------------------------------------
with torch.no_grad(): # dont compute gradient
logit = net(imgs)
prob = nn.functional.softmax(logit, dim=-1)
return dict(prob=prob.cpu().numpy(),
true=true.cpu().numpy())
####
def run_once(self, log_dir):
"""
`pretrained_path` should lead to pytorch checkpoint
"""
misc.check_manual_seed(self.seed)
train_pairs, valid_pairs = eval(f'dataset.prepare_{self.dataset}_patch_data()')
# --------------------------- Dataloader
train_augmentors = self.train_augmentors()
train_dataset = dataset.DatasetSerial(train_pairs[:],
shape_augs=iaa.Sequential(train_augmentors[0]),
input_augs=iaa.Sequential(train_augmentors[1]))
infer_augmentors = self.infer_augmentors()
infer_dataset = dataset.DatasetSerial(valid_pairs[:],
shape_augs=iaa.Sequential(infer_augmentors[0]),
input_augs=iaa.Sequential(infer_augmentors[1]))
train_loader = data.DataLoader(train_dataset,
num_workers=self.nr_procs_train,
batch_size=self.train_batch_size,
shuffle=True, drop_last=True)
valid_loader = data.DataLoader(infer_dataset,
num_workers=self.nr_procs_valid,
batch_size=self.infer_batch_size,
shuffle=True, drop_last=False)
# --------------------------- Training Sequence
if self.logging:
misc.check_log_dir(log_dir)
device = 'cuda'
# networks
net = define_network(self.network_name, self.nr_class)
net = torch.nn.DataParallel(net).to(device)
optimizer, optimizer_args = self.optimizer
optimizer = optimizer(net.parameters(), **optimizer_args)
scheduler = self.scheduler(optimizer)
trainer = Engine(lambda engine, batch: self.train_step(net, batch, optimizer, device))
inferer = Engine(lambda engine, batch: self.infer_step(net, batch, device))
train_output = ['loss', 'acc']
infer_output = ['prob', 'true']
##
if self.logging:
@trainer.on(Events.EPOCH_COMPLETED)
def save_chkpoints(engine):
torch.save(net.state_dict(), self.log_dir + '_net_' + str(engine.state.epoch) + '.pth')
timer = Timer(average=True)
timer.attach(trainer, start=Events.EPOCH_STARTED, resume=Events.ITERATION_STARTED,
pause=Events.ITERATION_COMPLETED, step=Events.ITERATION_COMPLETED)
timer.attach(inferer, start=Events.EPOCH_STARTED, resume=Events.ITERATION_STARTED,
pause=Events.ITERATION_COMPLETED, step=Events.ITERATION_COMPLETED)
# attach running average metrics computation
# decay of EMA to 0.95 to match tensorpack default
RunningAverage(alpha=0.95, output_transform=lambda x: x['loss']).attach(trainer, 'loss')
RunningAverage(alpha=0.95, output_transform=lambda x: x['acc']).attach(trainer, 'acc')
# attach progress bar
pbar = ProgressBar(persist=True)
pbar.attach(trainer, metric_names=['loss'])
pbar.attach(inferer)
# writer for tensorboard logging
if self.logging:
writer = SummaryWriter(log_dir=log_dir)
json_log_file = log_dir + '/stats.json'
with open(json_log_file, 'w') as json_file:
json.dump({}, json_file) # create empty file
@trainer.on(Events.EPOCH_STARTED)
# @trainer.on(Events.EPOCH_COMPLETED)
def log_lrs(engine):
if self.logging:
lr = float(optimizer.param_groups[0]['lr'])
writer.add_scalar("lr", lr, engine.state.epoch)
# advance scheduler clock
if scheduler is not None:
scheduler.step()
####
def update_logs(output, epoch, prefix, color):
# print values and convert
max_length = len(max(output.keys(), key=len))
for metric in output:
key = colored(prefix + '-' + metric.ljust(max_length), color)
print('------%s : ' % key, end='')
if metric != 'conf_mat':
print('%0.7f' % output[metric])
else:
conf_mat = output['conf_mat'] # use pivot to turn back
conf_mat_df = pd.DataFrame(conf_mat)
conf_mat_df.index.name = 'True'
conf_mat_df.columns.name = 'Pred'
output['conf_mat'] = conf_mat_df
print('\n', conf_mat_df)
if 'train' in prefix:
lr = float(optimizer.param_groups[0]['lr'])
key = colored(prefix + '-' + 'lr'.ljust(max_length), color)
print('------%s : %0.7f' % (key, lr))
if not self.logging:
return
# create stat dicts
stat_dict = {}
for metric in output:
if metric != 'conf_mat':
metric_value = output[metric]
else:
conf_mat_df = output['conf_mat'] # use pivot to turn back
conf_mat_df = conf_mat_df.unstack().rename('value').reset_index()
conf_mat_df = pd.Series({'conf_mat': conf_mat}).to_json(orient='records')
metric_value = conf_mat_df
stat_dict['%s-%s' % (prefix, metric)] = metric_value
# json stat log file, update and overwrite
with open(json_log_file) as json_file:
json_data = json.load(json_file)
current_epoch = str(epoch)
if current_epoch in json_data:
old_stat_dict = json_data[current_epoch]
stat_dict.update(old_stat_dict)
current_epoch_dict = {current_epoch: stat_dict}
json_data.update(current_epoch_dict)
with open(json_log_file, 'w') as json_file:
json.dump(json_data, json_file)
# log values to tensorboard
for metric in output:
if metric != 'conf_mat':
writer.add_scalar(prefix + '-' + metric, output[metric], current_epoch)
@trainer.on(Events.EPOCH_COMPLETED)
def log_train_running_results(engine):
"""
running training measurement
"""
training_ema_output = engine.state.metrics #
update_logs(training_ema_output, engine.state.epoch, prefix='train-ema', color='green')
####
def get_init_accumulator(output_names):
return {metric: [] for metric in output_names}
def process_accumulated_output(output):
#
def uneven_seq_to_np(seq, batch_size=self.infer_batch_size):
item_count = batch_size * (len(seq) - 1) + len(seq[-1])
cat_array = np.zeros((item_count,) + seq[0][0].shape, seq[0].dtype)
for idx in range(0, len(seq) - 1):
cat_array[idx * batch_size:
(idx + 1) * batch_size] = seq[idx]
cat_array[(idx + 1) * batch_size:] = seq[-1]
return cat_array
#
prob = uneven_seq_to_np(output['prob'])
true = uneven_seq_to_np(output['true'])
# threshold then get accuracy
pred = np.argmax(prob, axis=-1)
acc = np.mean(pred == true)
# confusion matrix
conf_mat = confusion_matrix(true, pred,
labels=np.arange(self.nr_classes))
#
proc_output = dict(acc=acc, conf_mat=conf_mat)
return proc_output
@trainer.on(Events.EPOCH_COMPLETED)
def infer_valid(engine):
"""
inference measurement
"""
inferer.accumulator = get_init_accumulator(infer_output)
inferer.run(valid_loader)
output_stat = process_accumulated_output(inferer.accumulator)
update_logs(output_stat, engine.state.epoch, prefix='valid', color='red')
@inferer.on(Events.ITERATION_COMPLETED)
def accumulate_outputs(engine):
batch_output = engine.state.output
for key, item in batch_output.items():
engine.accumulator[key].extend([item])
###
# Setup is done. Now let's run the training
trainer.run(train_loader, self.nr_epochs)
return
####
def run(self):
def get_last_chkpt_path(phase1_dir):
stat_file_path = phase1_dir + '/stats.json'
with open(stat_file_path) as stat_file:
info = json.load(stat_file)
chkpt_list = [int(epoch) for epoch in info.keys()]
last_chkpts_path = "%smodel_net_%d.pth" % (phase1_dir, max(chkpt_list))
return last_chkpts_path
self.run_once(self.log_dir)
return
def infer(self):
misc.check_manual_seed(self.seed)
train_pairs, valid_pairs = eval(f'dataset.prepare_{self.dataset}_patch_data()')
infer_augmentors = self.infer_augmentors()
infer_dataset = dataset.DatasetSerial(valid_pairs[:],
shape_augs=iaa.Sequential(infer_augmentors[0]),
input_augs=iaa.Sequential(infer_augmentors[1]))
device = torch.device("cuda:0", )
net = define_network(self.network_name)
ckpt = torch.load(self.saved_path)
if isinstance(ckpt, torch.nn.DataParallel):
ckpt = ckpt.module.state_dict()
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in ckpt.items():
name = k[7:]
new_state_dict[name] = v
net.load_state_dict(new_state_dict, strict=True)
net = torch.nn.DataParallel(net).to(device)
net.eval()
true = []
pred = []
for idx in range(0, len(valid_pairs)):
image, label = infer_dataset.__getitem__(idx)
img = torch.from_numpy(np.array(image))
img = torch.unsqueeze(img, dim=0)
img = img.permute(0, 3, 1, 2)
img = img.to(device).float()
logit = net(img)
prob = nn.functional.softmax(logit, dim=-1)
prob = prob.detach().cpu().numpy()
p = np.argmax(prob, axis=-1)
pred.append(p)
true.append(label)
true = np.reshape(true, (len(true),))
pred = np.reshape(pred, (len(pred),))
conf_mat = confusion_matrix(true, pred,
labels=np.arange(self.nr_class))
print('conf_mat : ', conf_mat)
return 0
###
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', default='0', help='comma separated list of GPU(s) to use.')
parser.add_argument('--view', help='view dataset', action='store_true')
parser.add_argument('--dataset', type=str, default='colon_tma', help='colon_tma, prostate_tma')
parser.add_argument('--network_name', type=str, default='VGG', help='ResNet, MobileNetV1, EfficientNet, VGG, ResNeSt'
'MuDeep, MSDNet, Res2Net'
'ResNet_MSBP, ResNet_add, ResNet_conv, ResNet_concat'
'ResNet_concat_zm, ResNet_conv_zm')
parser.add_argument('--saved_path', type=str, default='', help='path to trained models to validate')
args = parser.parse_args()
trainer = Trainer(_args=args)
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
trainer.run()
|
{"hexsha": "202ec67999b3abd915bf1dd48c8d109a1cd0c26d", "size": 14439, "ext": "py", "lang": "Python", "max_stars_repo_path": "trainer.py", "max_stars_repo_name": "timmyvg/MSBP_Net", "max_stars_repo_head_hexsha": "e2ee6d57ccfc17cd5e5c64e399cbc245281e878d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-11-23T07:05:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-12T13:48:50.000Z", "max_issues_repo_path": "trainer.py", "max_issues_repo_name": "timmyvg/MSBP_Net", "max_issues_repo_head_hexsha": "e2ee6d57ccfc17cd5e5c64e399cbc245281e878d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "trainer.py", "max_forks_repo_name": "timmyvg/MSBP_Net", "max_forks_repo_head_hexsha": "e2ee6d57ccfc17cd5e5c64e399cbc245281e878d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-11-22T10:01:01.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-23T23:45:08.000Z", "avg_line_length": 38.6069518717, "max_line_length": 125, "alphanum_fraction": 0.5591107417, "include": true, "reason": "import numpy", "num_tokens": 3011}
|
function serverStarted = mrmCheckServer(host)
%Test whether the server has been started.
%
% serverStarted = mrmCheckServer(host)
%
% There should be a way to test without creating a new window. But I am
% not sure how.
%
% (c) Stanford Vista Team, 2008
if ieNotDefined('host'), host = 'localhost'; end
% Try opening a window
stat = mrMesh(host, 999, 'refresh');
% If the server is not started, the window won't open and we get a return
% of -1000. Otherwise, the window was properly opened.
if stat ~= -1000
% Window was opened properly by server. So close it and return.
serverStarted = 1;
mrMesh(host,999,'close');
return;
else
% Tell 'em that the call didn't work, so the server isn't running.
serverStarted = 0;
end
return;
|
{"author": "vistalab", "repo": "vistasoft", "sha": "7f0102c696c091c858233340cc7e1ab02f064d4c", "save_path": "github-repos/MATLAB/vistalab-vistasoft", "path": "github-repos/MATLAB/vistalab-vistasoft/vistasoft-7f0102c696c091c858233340cc7e1ab02f064d4c/mrMesh/mrm/mrmCheckServer.m"}
|
import Base: ==, copy, size, convert
import SparseArrays: sparse
#
# bm: SymmetricBandedMatrix
# bmat: Banded matrix (the field in a SymmetricBandedMatrix object)
# m: Regular matix
# sbm: Semi-banded matrix, e.g.
# sbm = [0 0 1; 0 2 3; 4 5 6]
# hbw: Half bandwidth, hbw = (bw - 1)÷2 (notice the \div symbol)
#
mutable struct SymmetricBandedMatrix{Tv}
hbw::Int # Bandwidth bw = 1 + 2*hbw
bmat::Matrix{Tv} # Banded matrix
end
size(bm::SymmetricBandedMatrix) = (size(bm.bmat, 1), size(bm.bmat, 1))
==(bm1::SymmetricBandedMatrix, bm2::SymmetricBandedMatrix) = (bm1.hbw==bm2.hbw && bm1.bmat==bm2.bmat)
function copy(bm::SymmetricBandedMatrix)
Tv = eltype(bm.bmat)
SymmetricBandedMatrix{Tv}(bm.hbw, copy(bm.bmat))
end
# Conversion routine similar to default constructor
function convert(::Type{NumericalMethodsforEngineers.SymmetricBandedMatrix}, hbw::Int, bmat::AbstractMatrix)
Tv = eltype(a)
SymmetricBandedMatrix{Tv}(hbw, a)
end
# Conversion routine to turn a symmetric matrix into a SymmetricBandedMatrix
function convert(::Type{NumericalMethodsforEngineers.SymmetricBandedMatrix}, am::AbstractMatrix)
tosymmetricbandedmatrix(am)
end
function sparse(bm::SymmetricBandedMatrix)
sparse(full(bm))
end
"""
# Convert symmetric AbstractMatrix to a SymmetricBandedMatrix
### Function
```julia
bm = tosymmetricbandedmatrix{Tv}(hbw::Int, am::AbstractMatrix{Tv})
or
bm = tosymmetricbandedmatrix{Tv}(am::AbstractMatrix{Tv})
```
### Arguments
```julia
* `hbw` : Half bandwidth.
* `am` : Symmetric AbstractMatrix
(if no hbw is specified, hbw will be derived from the matrix)
```
"""
function tosymmetricbandedmatrix(am::AbstractMatrix)
m = copy(am)
typeof(m) <: SparseMatrixCSC && (m = Matrix(m))
(!issymmetric(m)) && throw(ArgumentError("Matrix not symmetric"))
hbw = 0
n = size(m, 1)
for i in 1:n, j in 1:n
abs(m[i, j]) > eps() && abs(i-j) > hbw && (hbw = abs(i-j))
end
tosymmetricbandedmatrix(hbw, m)
end
function tosymmetricbandedmatrix(hbw::Int, am::AbstractMatrix)
m = copy(am)
typeof(m) <: SparseMatrixCSC && (m = full(m))
(!issymmetric(m)) && throw(ArgumentError("Matrix not symmetric"))
Tv = eltype(m)
n = size(m, 1)
b = zeros(eltype(m), n, hbw+1)
for i in 1:n
b[i, hbw+1] = m[i, i]
for j in 1:hbw
i - hbw + j > 1 && (b[i, j] = m[i, i - hbw + j - 1])
end
end
SymmetricBandedMatrix{Tv}(hbw, b)
end
function full(bm::SymmetricBandedMatrix)
n = size(bm.bmat, 1)
b = zeros(eltype(bm.bmat), n, n)
hbw = bm.hbw
for i in n:-1:1
b[i, i] = bm.bmat[i, hbw+1]
for j in hbw:-1:1
if i-hbw+j-1 >0 && abs(bm.bmat[i, j]) > eps()
b[i, i-hbw+j-1] = bm.bmat[i, j]
b[i-hbw+j-1, i] = bm.bmat[i, j]
end
end
end
b
end
export
full
|
{"hexsha": "eee57f033278c3219e15b086d9e713391ba46133", "size": 2882, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/nmlib/SymmetricBandedMatrices.jl", "max_stars_repo_name": "PtFEM/NumericalMethodsforEngineers.jl", "max_stars_repo_head_hexsha": "e4a997a14adbb86b7efe1586962df39eb9285ebb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2018-07-23T18:12:52.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-25T03:32:45.000Z", "max_issues_repo_path": "src/nmlib/SymmetricBandedMatrices.jl", "max_issues_repo_name": "PtFEM/NumericalMethodsforEngineers.jl", "max_issues_repo_head_hexsha": "e4a997a14adbb86b7efe1586962df39eb9285ebb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2018-07-23T21:46:36.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-27T23:14:46.000Z", "max_forks_repo_path": "src/nmlib/SymmetricBandedMatrices.jl", "max_forks_repo_name": "PtFEM/NumericalMethodsforEngineers.jl", "max_forks_repo_head_hexsha": "e4a997a14adbb86b7efe1586962df39eb9285ebb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2018-10-27T14:13:34.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-20T18:54:06.000Z", "avg_line_length": 27.9805825243, "max_line_length": 108, "alphanum_fraction": 0.6360166551, "num_tokens": 983}
|
# Maze Navigation. Originally proposed in
# Backpropamine: differentiable neuromdulated plasticity.
#
# This code implements the "Grid Maze" task. See Section 4.5 in Miconi et al.
# ICML 2018 ( https://arxiv.org/abs/1804.02464 ), or Section 4.2 in
# Miconi et al. ICLR 2019 ( https://openreview.net/pdf?id=r1lrAiA5Ym )
#
# This file is modified to implement the maze itself. The `run.py` file
# contains the WarpGrad Network used in
#
# Meta-Learning With Warped Gradient Descent
# Flennerhag et. al., ICLR (2020), https://openreview.net/forum?id=rkeiQlBFPB
import collections
import numpy as np
Position = collections.namedtuple("Positions",
["right", "center",
"reward_right", "reward_center"])
ObsSpec = collections.namedtuple("ObsSpec",
["num_action",
"ref_size",
"additional_inputs",
"total_inputs",
"episode_length"])
def is_reward(position, idx):
"""Is agent in rewarding position?
Args:
position (Position): current agent position
idx (int): batch index.
Returns:
is_reward (bool): whether the agent is in a reward state or not.
"""
return (position.reward_right[idx] == position.right[idx]) and \
(position.reward_center[idx] == position.center[idx])
def initialize(maze_size, batch_size):
"""Initialize maze.
Args:
maze_size (int): size of maze (H, W).
batch_size (int): number of parallel mazes.
Returns (2):
maze (np.array): grid maze.
position (Position): the agent's position.
"""
maze = np.ones((maze_size, maze_size))
center = maze_size // 2
# Grid maze
maze[1:maze_size - 1, 1:maze_size - 1].fill(0)
for row in range(1, maze_size - 1):
for col in range(1, maze_size - 1):
if row % 2 == 0 and col % 2 == 0:
maze[row, col] = 1
maze[center, center] = 0
pos_right = {}
pos_center = {}
pos_reward_right = {}
pos_reward_center = {}
for nb in range(batch_size):
# Note: it doesn't matter if the reward is on the center (see below).
# All we need is not to put it on a wall or pillar (maze=1)
myrposr = 0; myrposc = 0
while (maze[myrposr, myrposc] == 1) or \
(myrposr == center and myrposc == center):
myrposr = np.random.randint(1, maze_size - 1)
myrposc = np.random.randint(1, maze_size - 1)
pos_reward_right[nb] = myrposr; pos_reward_center[nb] = myrposc
# Agent always starts an episode from the center
pos_center[nb] = center
pos_right[nb] = center
return maze, Position(pos_center, pos_right,
pos_reward_center, pos_reward_right)
def step_fun(maze, position, actions, batch_size, wall_penalty, reward_value):
"""Step function for maze
Args:
maze (np.array): the underlying maze.
position (Position): current agent position.
actions (np.array): actions taken.
batch_size (int): num parallel envs.
wall_penalty (float): penalty for hitting walls.
reward_value (float): value at goal location.
Returns (2):
new_position (Position): the agent's update position.
rewards (np.array): rewards for each parallel env.
"""
maze_size = maze.shape[0]
reward = np.zeros(batch_size)
for nb in range(batch_size):
action = actions[nb]
to_position_center = position.center[nb]
to_position_right = position.right[nb]
if action == 0: # Up
to_position_right -= 1
elif action == 1: # Down
to_position_right += 1
elif action == 2: # Left
to_position_center -= 1
elif action == 3: # Right
to_position_center += 1
else:
raise ValueError("Wrong Action")
reward[nb] = 0.0
if maze[to_position_right][to_position_center] == 1:
reward[nb] -= wall_penalty
else:
position.center[nb] = to_position_center
position.right[nb] = to_position_right
if is_reward(position, nb):
reward[nb] += reward_value
while is_reward(position, nb):
position.right[nb] = np.random.randint(1, maze_size - 1)
position.center[nb] = np.random.randint(1, maze_size - 1)
return position, reward
class Maze:
"""Navigation Maze.
Args:
obs_spec (ObsSpec): observation specs.
maze_size (int): size of maze (H, W).
batch_size (int): number of parallel mazes.
batch_size (int): num parallel envs.
wall_penalty (float): penalty for hitting walls.
reward_value (float): value at goal location.
"""
def __init__(self, obs_spec, maze_size, batch_size,
wall_penalty, reward_value):
self._obs_spec = obs_spec
self._batch_size = batch_size
self._maze, self._position = initialize(maze_size, batch_size)
def _step(actions):
return step_fun(self._maze, self._position, actions,
batch_size, wall_penalty, reward_value)
self._step = _step
def step(self, actions):
self._position, rewards = self._step(actions)
return rewards
def obs(self, actions, rewards, num_steps):
tot_size = self._obs_spec.total_inputs
ext_size = self._obs_spec.additional_inputs
eps_size = self._obs_spec.episode_length
ref_size = self._obs_spec.ref_size
pos_size = ref_size * ref_size
pos = self._position
obs = np.zeros((self._batch_size, tot_size), dtype=np.float32)
def get_right(idx):
return (pos.right[idx] - ref_size // 2,
pos.right[idx] + ref_size // 2 + 1)
def get_center(idx):
return (pos.center[idx] - ref_size // 2,
pos.center[idx] + ref_size // 2 + 1)
mz = self._maze.copy()
for nb in range(self._batch_size):
# Position
x0, x1 = get_right(nb)
y0, y1 = get_center(nb)
obs[nb, 0:pos_size] = mz[x0:x1,y0:y1].flatten() * 1.0
# Auxiliary inputs
obs[nb, pos_size + 1] = 1.0 # Bias neuron
obs[nb, pos_size + 2] = num_steps / eps_size
obs[nb, pos_size + 3] = 1.0 * rewards[nb]
obs[nb, pos_size + ext_size + actions[nb]] = 1
return obs
|
{"hexsha": "cc711b85d615a221b71e1a017fccdba933f54b98", "size": 6648, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/maze_navigation/maze.py", "max_stars_repo_name": "rcmalli/warpgrad", "max_stars_repo_head_hexsha": "d9ef72af10eec62ae92bc24595cb1a4a0207e319", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 80, "max_stars_repo_stars_event_min_datetime": "2020-02-18T09:55:10.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-10T12:59:49.000Z", "max_issues_repo_path": "src/maze_navigation/maze.py", "max_issues_repo_name": "rcmalli/warpgrad", "max_issues_repo_head_hexsha": "d9ef72af10eec62ae92bc24595cb1a4a0207e319", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2020-07-21T16:47:42.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-31T06:19:36.000Z", "max_forks_repo_path": "src/maze_navigation/maze.py", "max_forks_repo_name": "rcmalli/warpgrad", "max_forks_repo_head_hexsha": "d9ef72af10eec62ae92bc24595cb1a4a0207e319", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2020-02-18T13:19:48.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-05T23:52:11.000Z", "avg_line_length": 33.7461928934, "max_line_length": 78, "alphanum_fraction": 0.5840854392, "include": true, "reason": "import numpy", "num_tokens": 1662}
|
import abc
from typing import List
import numpy as np
import pandas as pd
# abstract base class
class TransformationStrategy():
@abc.abstractclassmethod
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
pass
@abc.abstractclassmethod
def get_code(self, df_name: str) -> str:
pass
def __str__(self) -> str:
return f"{self.__class__.__name__}"
# load and save
class LoadExcelStrategy(TransformationStrategy):
def __init__(self, file_name: str, sheet_name: str | int = 0, index: str | None = None):
self.file_name = file_name
self.sheet_name = sheet_name
self.index = index
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
df = pd.read_excel(self.file_name, sheet_name=self.sheet_name, index_col=self.index)
df = df.replace({np.nan: None})
return df
def get_code(self, df_name: str) -> str:
return f"""
#here we read the data provided by the {self.file_name} from the sheet {self.sheet_name if type(self.sheet_name) != int else "number "+str(self.sheet_name)} and set the index to {self.index}
{df_name} = pd.read_excel("{self.file_name}", sheet_name = {self.sheet_name if type(self.sheet_name) == int else "'"+self.sheet_name+"'"}{f', index_col = "'+self.index+'"' if self.index is not None else ""})
{df_name} = {df_name}.replace({{np.nan:None}})
"""
class SaveExcelStrategy(TransformationStrategy):
def __init__(self, file_name: str, sheet_name: str = "Sheet1", impl_bool_3: bool = False):
self.file_name = file_name
self.sheet_name = sheet_name
self.index = impl_bool_3
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
df.to_excel(self.file_name, index=self.index, sheet_name=self.sheet_name)
return df
def get_code(self, df_name: str) -> str:
return f"""
#here we save the data from {df_name} to the the {self.file_name} into the sheet {self.sheet_name} and {"use" if self.index else "dont use"} the index
{df_name}.to_excel("{self.file_name}", sheet_name = "{self.sheet_name}", index = {self.index})
"""
# views
class SelectRowsStrategy(TransformationStrategy):
def __init__(self, query: str):
self.query = query
self.engines = [{"engine": "numexpr"}, {"engine": "python"}]
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
for e in self.engines:
try:
df = df.query(self.query, **e)
return df
except Exception:
print(f"{e} didn't work on quering {self.eval_string} trying next engine")
return pd.DataFrame(columns=df.columns)
def get_code(self, df_name: str) -> str:
return f"""#Here we try to query the Expression {self.query} with diffrent engines
for engine in {self.engines}:
try:
{df_name} = {df_name}.query('{self.query}', **engine)
except Exception:
print(engine, "failed to query", '{self.query}', "trying next")
"""
class SelectColumnStrategy(TransformationStrategy):
def __init__(self, column: List[str]) -> None:
self.cols = column
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
return df[self.cols]
def get_code(self, df_name: str) -> str:
return f"#we get a subset of the dataframe with these columns {self.cols}\n{df_name} = {df_name}[{self.cols}]\n"
class RenameStrategy(TransformationStrategy):
def __init__(self, from_col: str, to_col: str) -> None:
self.mapper = {from_col: to_col}
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
return df.rename(columns=self.mapper)
def get_code(self, df_name: str) -> str:
return f'#rename column from-> to {self.mapper}\n{df_name}={df_name}.rename(columns={self.mapper})\n'
class deleteDataStrategy(TransformationStrategy):
def __init__(self, impl_bool_1: bool = False) -> None:
self.keep = impl_bool_1
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
if self.keep:
return df[0:0]
return pd.DataFrame()
def get_code(self, df_name: str) -> str:
return f"#delete all data keep structure\n{df_name}[0:0]" if self.keep else f"#delete dataframe completly by creating a new one\n{df_name} = pd.DataFrame()"
# updates row based
class addRowStrategy(TransformationStrategy):
def __init__(self, values: List) -> None:
if type(values[0]) == dict:
self.order = False
self.tba = {k: [v] for d in values for k, v in d.items()}
else:
self.order = True
self.tba = values
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
if self.order:
df.loc[len(df.index)] = self.tba
else:
row_df = pd.DataFrame(data=self.tba)
df = df.append(row_df, ignore_index=True)
return df
def get_code(self, df_name: str) -> str:
if self.order:
return f"#add row based on args order\n{df_name}.loc[len({df_name}.index)] = {self.tba}"
return f"#add row based on keywords by creating a temp df\ntemp_df = pd.DataFrame(data = {self.tba})\n{df_name} = {df_name}.append(temp_df, ignore_index = True)"
class deleteRowStrategy(TransformationStrategy):
def __init__(self, values: List) -> None:
self.engines = [{"engine": "numexpr"}, {"engine": "python"}]
if type(values[0]) == dict:
self.order = False
delist = {k: v for d in values for k, v in d.items()}
self.tbd = " and ".join([f"""(`{key}` == {entry if type(entry) is not str else '"'+entry+'"' })""" for key, entry in delist.items()])
else:
self.order = True
self.tbd = values
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
if self.order:
self.tbd = " and ".join(f"""(`{key}` == {entry if type(entry) is not str else '"'+entry+'"' })""" for key, entry in dict(zip(df.columns, self.tbd)).items())
return SelectRowsStrategy(f" not ({self.tbd})").transform(df)
def get_code(self, df_name: str) -> str:
comment = "# delete row by filtering the inverse of the provided values\n"
if self.order:
filter_part = f"""deli = '"'
filter = " and ".join(f"(`{{key}}` == {{entry if type(entry) is not str else deli+entry+deli}})" for key, entry in dict(zip({df_name}.columns, {self.tbd})).items())
"""
else:
filter_part = f"filter = '{self.tbd}'\n"
query_part = f"""for engine in {self.engines}:
try:
{df_name} = {df_name}.query(f'not ({{filter}})', **engine)
except Exception:
print(engine, "failed to query", f'not ({{filter}})', "trying next")
"""
return comment + filter_part + query_part
class changeRowStrategy(TransformationStrategy):
def __init__(self, org_value: List, new_value: List) -> None:
self.cur = deleteRowStrategy(org_value)
self.new_v = addRowStrategy(new_value)
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
df = self.cur.transform(df)
return self.new_v.transform(df)
def get_code(self, df_name: str) -> str:
return f"""#updating values by first deleting and then adding
#deleting
{self.cur.get_code(df_name)}
#adding
{self.new_v.get_code(df_name)}
"""
# updates on cols
class deleteColumnStrategy(TransformationStrategy):
def __init__(self, column: str) -> None:
self.tbd = column
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
return df.drop(self.tbd, axis=1)
def get_code(self, df_name: str) -> str:
return f"#delete column\n{df_name} = {df_name}.drop('{self.tbd}', axis = 1)"
class setColumnStrategy(TransformationStrategy):
def __init__(self, column: str, value: str) -> None:
self.expr = f"`{column}` = {value}"
self.engines = [{}, {"engine": "python"}]
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
for engine in self.engines:
try:
df = df.eval(self.expr, **engine)
return df
except Exception:
# prop logging of some sort
pass
raise Exception("Transformation failed: not a valid value provided")
def get_code(self, df_name: str) -> str:
return f"""#Here we try to evaluate the Expression {self.expr} with diffrent engines
for engine in {self.engines}:
try:
{df_name} = {df_name}.eval('{self.expr}', **engine)
except Exception:
print(engine, "failed to evaluate", '{self.expr}', "trying next")
"""
class addColumnStrategy(TransformationStrategy):
def __init__(self, column: str, value: str) -> None:
self.setter = setColumnStrategy(column, value)
self.column = column
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
if self.column in df.columns:
raise Exception(f"Transformation failed: Column {self.column} already in dataframe")
return self.setter.transform(df)
def get_code(self, df_name: str) -> str:
return self.setter.get_code(df_name)
class changeColumnStrategy(TransformationStrategy):
def __init__(self, column: str, value: str) -> None:
self.setter = setColumnStrategy(column, value)
self.column = column
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
if self.column not in df.columns:
raise Exception(f"Transformation failed: Column {self.column} not in dataframe")
return self.setter.transform(df)
def get_code(self, df_name: str) -> str:
return self.setter.get_code(df_name)
# special functions
class DoNothingStrategy(TransformationStrategy):
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
return df
def get_code(self, df_name: str) -> str:
return f"# fyi here was called a Do Nothing call on {df_name}\n"
class SetIndexStrategy(TransformationStrategy):
def __init__(self, column: str):
self.column = column
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
return df.set_index(self.column)
def get_code(self, df_name: str) -> str:
return f"# set index to {self.column} in {df_name}\n{df_name}.set_index('{self.column}', inplace = True)"
class ResetIndexStrategy(TransformationStrategy):
def __init__(self, impl_bool_1: bool = False):
self.drop = not impl_bool_1
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
return df.reset_index(drop=self.drop)
def get_code(self, df_name: str) -> str:
return f"# rest index to in {df_name}\n{df_name}.reset_index(drop = {self.drop}, inplace = True)"
# special special functions
class dotStrategy(TransformationStrategy):
def __init__(self, func_string: str):
self.func_string = func_string
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
exec_string = f'edf = df{self.func_string}'
loc = {}
exec(exec_string, locals(), loc)
return loc["edf"]
def get_code(self, df_name: str) -> str:
return f"#Here is a call Happing using the dotStrategy aka pure python code beaware\n{df_name} = {df_name}{self.func_string}\n"
|
{"hexsha": "ca249b61f93d9968a1155ead8cd906d293685255", "size": 11214, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/BPMN/TransformationStrategy.py", "max_stars_repo_name": "oilyshelf/AutomaModela", "max_stars_repo_head_hexsha": "690bbc51bb21cccf07457d9e5f7bce504800db19", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/BPMN/TransformationStrategy.py", "max_issues_repo_name": "oilyshelf/AutomaModela", "max_issues_repo_head_hexsha": "690bbc51bb21cccf07457d9e5f7bce504800db19", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/BPMN/TransformationStrategy.py", "max_forks_repo_name": "oilyshelf/AutomaModela", "max_forks_repo_head_hexsha": "690bbc51bb21cccf07457d9e5f7bce504800db19", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.4873417722, "max_line_length": 207, "alphanum_fraction": 0.6369716426, "include": true, "reason": "import numpy", "num_tokens": 2785}
|
// node
#include <node.h> // for NODE_SET_PROTOTYPE_METHOD, etc
#include <node_object_wrap.h> // for ObjectWrap
#include <v8.h>
#include <uv.h>
#include <node_buffer.h>
#include <node_version.h>
// mapnik
#include <mapnik/color.hpp> // for color
#include <mapnik/image_view.hpp> // for image_view, etc
#include <mapnik/image_util.hpp>
#include <mapnik/graphics.hpp>
// boost
#include <boost/make_shared.hpp>
#include "mapnik_image.hpp"
#include "mapnik_image_view.hpp"
#include "mapnik_color.hpp"
#include "mapnik_palette.hpp"
#include "utils.hpp"
// std
#include <exception>
Persistent<FunctionTemplate> ImageView::constructor;
void ImageView::Initialize(Handle<Object> target) {
HandleScope scope;
constructor = Persistent<FunctionTemplate>::New(FunctionTemplate::New(ImageView::New));
constructor->InstanceTemplate()->SetInternalFieldCount(1);
constructor->SetClassName(String::NewSymbol("ImageView"));
NODE_SET_PROTOTYPE_METHOD(constructor, "encodeSync", encodeSync);
NODE_SET_PROTOTYPE_METHOD(constructor, "encode", encode);
NODE_SET_PROTOTYPE_METHOD(constructor, "save", save);
NODE_SET_PROTOTYPE_METHOD(constructor, "width", width);
NODE_SET_PROTOTYPE_METHOD(constructor, "height", height);
NODE_SET_PROTOTYPE_METHOD(constructor, "isSolid", isSolid);
NODE_SET_PROTOTYPE_METHOD(constructor, "isSolidSync", isSolidSync);
NODE_SET_PROTOTYPE_METHOD(constructor, "getPixel", getPixel);
target->Set(String::NewSymbol("ImageView"),constructor->GetFunction());
}
ImageView::ImageView(Image * JSImage) :
ObjectWrap(),
this_(),
JSImage_(JSImage) {
JSImage_->_ref();
}
ImageView::~ImageView()
{
JSImage_->_unref();
}
Handle<Value> ImageView::New(const Arguments& args)
{
HandleScope scope;
if (!args.IsConstructCall())
return ThrowException(String::New("Cannot call constructor as function, you need to use 'new' keyword"));
if (args[0]->IsExternal())
{
//std::clog << "image view external!\n";
Local<External> ext = Local<External>::Cast(args[0]);
void* ptr = ext->Value();
ImageView* im = static_cast<ImageView*>(ptr);
im->Wrap(args.This());
return args.This();
} else {
return ThrowException(String::New("Cannot create this object from Javascript"));
}
return Undefined();
}
Handle<Value> ImageView::New(Image * JSImage ,
unsigned x,
unsigned y,
unsigned w,
unsigned h
)
{
HandleScope scope;
ImageView* imv = new ImageView(JSImage);
imv->this_ = boost::make_shared<mapnik::image_view<mapnik::image_data_32> >(JSImage->get()->get_view(x,y,w,h));
Handle<Value> ext = External::New(imv);
Handle<Object> obj = constructor->GetFunction()->NewInstance(1, &ext);
return scope.Close(obj);
}
typedef struct {
uv_work_t request;
ImageView* im;
Persistent<Function> cb;
bool error;
std::string error_name;
bool result;
mapnik::image_view<mapnik::image_data_32>::pixel_type pixel;
} is_solid_image_view_baton_t;
Handle<Value> ImageView::isSolid(const Arguments& args)
{
HandleScope scope;
ImageView* im = node::ObjectWrap::Unwrap<ImageView>(args.This());
if (args.Length() == 0) {
return isSolidSync(args);
}
// ensure callback is a function
Local<Value> callback = args[args.Length()-1];
if (!args[args.Length()-1]->IsFunction())
return ThrowException(Exception::TypeError(
String::New("last argument must be a callback function")));
is_solid_image_view_baton_t *closure = new is_solid_image_view_baton_t();
closure->request.data = closure;
closure->im = im;
closure->result = true;
closure->pixel = 0;
closure->error = false;
closure->cb = Persistent<Function>::New(Handle<Function>::Cast(callback));
uv_queue_work(uv_default_loop(), &closure->request, EIO_IsSolid, (uv_after_work_cb)EIO_AfterIsSolid);
im->Ref();
return Undefined();
}
void ImageView::EIO_IsSolid(uv_work_t* req)
{
is_solid_image_view_baton_t *closure = static_cast<is_solid_image_view_baton_t *>(req->data);
image_view_ptr view = closure->im->get();
if (view->width() > 0 && view->height() > 0)
{
typedef mapnik::image_view<mapnik::image_data_32>::pixel_type pixel_type;
pixel_type const first_pixel = view->getRow(0)[0];
closure->pixel = first_pixel;
for (unsigned y = 0; y < view->height(); ++y)
{
pixel_type const * row = view->getRow(y);
for (unsigned x = 0; x < view->width(); ++x)
{
if (first_pixel != row[x])
{
closure->result = false;
return;
}
}
}
}
else
{
closure->error = true;
closure->error_name = "image does not have valid dimensions";
}
}
void ImageView::EIO_AfterIsSolid(uv_work_t* req)
{
HandleScope scope;
is_solid_image_view_baton_t *closure = static_cast<is_solid_image_view_baton_t *>(req->data);
TryCatch try_catch;
if (closure->error) {
Local<Value> argv[1] = { Exception::Error(String::New(closure->error_name.c_str())) };
closure->cb->Call(Context::GetCurrent()->Global(), 1, argv);
}
else
{
if (closure->result)
{
Local<Value> argv[3] = { Local<Value>::New(Null()),
Local<Value>::New(Boolean::New(closure->result)),
Local<Value>::New(Number::New(closure->pixel)),
};
closure->cb->Call(Context::GetCurrent()->Global(), 3, argv);
}
else
{
Local<Value> argv[2] = { Local<Value>::New(Null()),
Local<Value>::New(Boolean::New(closure->result))
};
closure->cb->Call(Context::GetCurrent()->Global(), 2, argv);
}
}
if (try_catch.HasCaught())
{
node::FatalException(try_catch);
}
closure->im->Unref();
closure->cb.Dispose();
delete closure;
}
Handle<Value> ImageView::isSolidSync(const Arguments& args)
{
HandleScope scope;
ImageView* im = node::ObjectWrap::Unwrap<ImageView>(args.This());
image_view_ptr view = im->get();
if (view->width() > 0 && view->height() > 0)
{
mapnik::image_view<mapnik::image_data_32>::pixel_type const* first_row = view->getRow(0);
mapnik::image_view<mapnik::image_data_32>::pixel_type const first_pixel = first_row[0];
for (unsigned y = 0; y < view->height(); ++y)
{
mapnik::image_view<mapnik::image_data_32>::pixel_type const * row = view->getRow(y);
for (unsigned x = 0; x < view->width(); ++x)
{
if (first_pixel != row[x])
{
return scope.Close(False());
}
}
}
}
return scope.Close(True());
}
Handle<Value> ImageView::getPixel(const Arguments& args)
{
HandleScope scope;
int x = 0;
int y = 0;
if (args.Length() >= 2) {
if (!args[0]->IsNumber())
return ThrowException(Exception::TypeError(
String::New("first arg, 'x' must be an integer")));
if (!args[1]->IsNumber())
return ThrowException(Exception::TypeError(
String::New("second arg, 'y' must be an integer")));
x = args[0]->IntegerValue();
y = args[1]->IntegerValue();
} else {
return ThrowException(Exception::TypeError(
String::New("must supply x,y to query pixel color")));
}
ImageView* im = node::ObjectWrap::Unwrap<ImageView>(args.This());
image_view_ptr view = im->get();
if (x >= 0 && x < static_cast<int>(view->width())
&& y >=0 && y < static_cast<int>(view->height()))
{
mapnik::image_view<mapnik::image_data_32>::pixel_type const * row = view->getRow(y);
mapnik::image_view<mapnik::image_data_32>::pixel_type const pixel = row[x];
unsigned r = pixel & 0xff;
unsigned g = (pixel >> 8) & 0xff;
unsigned b = (pixel >> 16) & 0xff;
unsigned a = (pixel >> 24) & 0xff;
return Color::New(mapnik::color(r,g,b,a));
}
return Undefined();
}
Handle<Value> ImageView::width(const Arguments& args)
{
HandleScope scope;
ImageView* im = node::ObjectWrap::Unwrap<ImageView>(args.This());
return scope.Close(Integer::New(im->get()->width()));
}
Handle<Value> ImageView::height(const Arguments& args)
{
HandleScope scope;
ImageView* im = node::ObjectWrap::Unwrap<ImageView>(args.This());
return scope.Close(Integer::New(im->get()->height()));
}
Handle<Value> ImageView::encodeSync(const Arguments& args)
{
HandleScope scope;
ImageView* im = node::ObjectWrap::Unwrap<ImageView>(args.This());
std::string format = "png";
palette_ptr palette;
// accept custom format
if (args.Length() >= 1) {
if (!args[0]->IsString())
return ThrowException(Exception::TypeError(
String::New("first arg, 'format' must be a string")));
format = TOSTR(args[0]);
}
// options hash
if (args.Length() >= 2) {
if (!args[1]->IsObject())
return ThrowException(Exception::TypeError(
String::New("optional second arg must be an options object")));
Local<Object> options = args[1]->ToObject();
if (options->Has(String::New("palette")))
{
Local<Value> format_opt = options->Get(String::New("palette"));
if (!format_opt->IsObject())
return ThrowException(Exception::TypeError(
String::New("'palette' must be an object")));
Local<Object> obj = format_opt->ToObject();
if (obj->IsNull() || obj->IsUndefined() || !Palette::constructor->HasInstance(obj))
return ThrowException(Exception::TypeError(String::New("mapnik.Palette expected as second arg")));
palette = node::ObjectWrap::Unwrap<Palette>(obj)->palette();
}
}
try {
std::string s;
mapnik::image_view<mapnik::image_data_32> const& image = *(im->this_);
if (palette.get())
{
s = save_to_string(image, format, *palette);
}
else {
s = save_to_string(image, format);
}
#if NODE_VERSION_AT_LEAST(0, 11, 0)
return scope.Close(node::Buffer::New((char*)s.data(),s.size()));
#else
return scope.Close(node::Buffer::New((char*)s.data(),s.size())->handle_);
#endif
}
catch (std::exception const& ex)
{
return ThrowException(Exception::Error(
String::New(ex.what())));
}
}
typedef struct {
uv_work_t request;
ImageView* im;
std::string format;
palette_ptr palette;
bool error;
std::string error_name;
Persistent<Function> cb;
std::string result;
} encode_image_view_baton_t;
Handle<Value> ImageView::encode(const Arguments& args)
{
HandleScope scope;
ImageView* im = node::ObjectWrap::Unwrap<ImageView>(args.This());
std::string format = "png";
palette_ptr palette;
// accept custom format
if (args.Length() > 1){
if (!args[0]->IsString())
return ThrowException(Exception::TypeError(
String::New("first arg, 'format' must be a string")));
format = TOSTR(args[0]);
}
// options hash
if (args.Length() >= 2) {
if (!args[1]->IsObject())
return ThrowException(Exception::TypeError(
String::New("optional second arg must be an options object")));
Local<Object> options = args[1]->ToObject();
if (options->Has(String::New("palette")))
{
Local<Value> format_opt = options->Get(String::New("palette"));
if (!format_opt->IsObject())
return ThrowException(Exception::TypeError(
String::New("'palette' must be an object")));
Local<Object> obj = format_opt->ToObject();
if (obj->IsNull() || obj->IsUndefined() || !Palette::constructor->HasInstance(obj))
return ThrowException(Exception::TypeError(String::New("mapnik.Palette expected as second arg")));
palette = node::ObjectWrap::Unwrap<Palette>(obj)->palette();
}
}
// ensure callback is a function
Local<Value> callback = args[args.Length()-1];
if (!args[args.Length()-1]->IsFunction())
return ThrowException(Exception::TypeError(
String::New("last argument must be a callback function")));
encode_image_view_baton_t *closure = new encode_image_view_baton_t();
closure->request.data = closure;
closure->im = im;
closure->format = format;
closure->palette = palette;
closure->error = false;
closure->cb = Persistent<Function>::New(Handle<Function>::Cast(callback));
uv_queue_work(uv_default_loop(), &closure->request, EIO_Encode, (uv_after_work_cb)EIO_AfterEncode);
im->Ref();
return Undefined();
}
void ImageView::EIO_Encode(uv_work_t* req)
{
encode_image_view_baton_t *closure = static_cast<encode_image_view_baton_t *>(req->data);
try {
mapnik::image_view<mapnik::image_data_32> const& im = *(closure->im->this_);
if (closure->palette.get())
{
closure->result = save_to_string(im, closure->format, *closure->palette);
}
else
{
closure->result = save_to_string(im, closure->format);
}
}
catch (std::exception const& ex)
{
closure->error = true;
closure->error_name = ex.what();
}
}
void ImageView::EIO_AfterEncode(uv_work_t* req)
{
HandleScope scope;
encode_image_view_baton_t *closure = static_cast<encode_image_view_baton_t *>(req->data);
TryCatch try_catch;
if (closure->error) {
Local<Value> argv[1] = { Exception::Error(String::New(closure->error_name.c_str())) };
closure->cb->Call(Context::GetCurrent()->Global(), 1, argv);
}
else
{
#if NODE_VERSION_AT_LEAST(0, 11, 0)
Local<Value> argv[2] = { Local<Value>::New(Null()), Local<Value>::New(node::Buffer::New((char*)closure->result.data(),closure->result.size())) };
#else
Local<Value> argv[2] = { Local<Value>::New(Null()), Local<Value>::New(node::Buffer::New((char*)closure->result.data(),closure->result.size())->handle_) };
#endif
closure->cb->Call(Context::GetCurrent()->Global(), 2, argv);
}
if (try_catch.HasCaught()) {
node::FatalException(try_catch);
}
closure->im->Unref();
closure->cb.Dispose();
delete closure;
}
Handle<Value> ImageView::save(const Arguments& args)
{
HandleScope scope;
if (args.Length() == 0 || !args[0]->IsString()){
return ThrowException(Exception::TypeError(
String::New("filename required")));
}
std::string filename = TOSTR(args[0]);
std::string format("");
if (args.Length() >= 2) {
if (!args[1]->IsString())
return ThrowException(Exception::TypeError(
String::New("both 'filename' and 'format' arguments must be strings")));
format = mapnik::guess_type(TOSTR(args[1]));
if (format == "<unknown>") {
std::ostringstream s("");
s << "unknown output extension for: " << filename << "\n";
return ThrowException(Exception::Error(
String::New(s.str().c_str())));
}
}
ImageView* im = node::ObjectWrap::Unwrap<ImageView>(args.This());
try
{
save_to_file(*im->get(),filename);
}
catch (std::exception const& ex)
{
return ThrowException(Exception::Error(
String::New(ex.what())));
}
return Undefined();
}
|
{"hexsha": "e6825324d8d7d46406bdfc8512a91fe01621666b", "size": 16434, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/mapnik_image_view.cpp", "max_stars_repo_name": "calvinmetcalf/node-mapnik", "max_stars_repo_head_hexsha": "3d26f2089dee3cfc901965f6646d50004a0e0e56", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/mapnik_image_view.cpp", "max_issues_repo_name": "calvinmetcalf/node-mapnik", "max_issues_repo_head_hexsha": "3d26f2089dee3cfc901965f6646d50004a0e0e56", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/mapnik_image_view.cpp", "max_forks_repo_name": "calvinmetcalf/node-mapnik", "max_forks_repo_head_hexsha": "3d26f2089dee3cfc901965f6646d50004a0e0e56", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.2235294118, "max_line_length": 162, "alphanum_fraction": 0.5828769624, "num_tokens": 3844}
|
import time
import numpy as np
from datetime import datetime
from Recon import SensorReader, Recon
class Env(object):
def cap(self,x, down, up, ninter):
if x<=down:
x=down
if up<=x:
x=up-1
step=(up-down)/ninter
#print x
return (x-down)//step
def __init__(self):
self.n_action=16
self.n_state=500
self.max_step=25
self.recon= Recon()
self.reset()
self.cs=0
self.origin=datetime.now()
self.read= SensorReader()
def get_state(self):
x1=self.cap(self.temp, 10, 25, 10)
x2=self.cap(self.humi, 10, 100, 5)
x3=self.light
x4=self.watp
return int(x1*5*2*2 + x2*5*2*2 + x3*2*2 + x4*2)
def reset(self, temp=25, humi=50, light=0, watp=0):
self.temp=temp
self.humi=humi
self.light=light
self.watp=watp
self.done=0
self.n_step=0
return self.get_state()
def set_target(self, temp=25, humi=80, light=0, watp=0):
self.t_temp=temp
self.t_humi=humi
self.t_light=light
self.t_watp=watp
return self.get_state()
def get_reward(self):
if self.t_temp-1<self.temp and self.temp<=self.t_temp+1:
if self.t_humi-5<self.humi and self.humi<=self.t_humi+5:
return 1
return 0
#return np.log(np.sqrt(((self.t_temp-self.temp)**2)+((self.t_humi-self.humi)**2)))
def code2int(self,code):
#(comp,mist,light,watp)
return code[0]*8 + code[1]*4 + code[2]*2 + code[3]
def int2code(self,a=0):
b=bin(16+a)
return int(b[-4]),int(b[-3]),int(b[-2]),int(b[-1])
def step(self,a):
self.cycle()
self.n_step+=1
comp,mist,light,watp = self.int2code(a)
if comp==1:
self.recon.com_on()
else:
self.recon.com_off()
if mist==1:
self.recon.humi_on()
else :
self.recon.humi_off()
if light==1:
self.recon.light_on()
else:
self.recon.light_off()
if watp==1:
self.recon.pump_on()
else:
self.recon.pump_off()
if self.max_step <= self.n_step:
self.done=1
self.temp = self.read.get_temp()
self.humi = self.read.get_humi()
return self.get_state(), self.get_reward(), self.done, 0
def render(self):
print( "step:{} temp:{}, humi:{}, light:{}, watp{}".format(
self.n_step, self.temp, self.humi, self.light, self.watp))
def cycle(self):
cs=0
origin = datetime.now()
if cs == 0:
self.set_target(temp=18, humi= 80, light=1 ,watp =1)
print("light on and pump on")
if 120 <= (datetime.now()-origin).seconds:
cs = 1
origin=datetime.now()
else :
print("Morning water")
if cs == 1:
self.set_target(temp=18, humi= 80, light=1 ,watp =0)
print("light on and pump off")
if 21420 <= (datetime.now()-origin).seconds:
cs= 2
origin=datetime.now()
else:
print("Morning time")
if cs == 2:
self.set_target(temp=10, humi= 60, light=0 ,watp =1)
print("light off and pump on")
if 120 <= (datetime.now()-origin).seconds:
cs = 3
origin=datetime.now()
else :
print("Night water")
if cs == 3:
self.set_target(temp=10, humi= 60, light=0 ,watp =0)
print("light off and pump off")
if 64620 <= (datetime.now()-origin).seconds:
cs = 0
origin=datetime.now()
else:
print("Night time")
|
{"hexsha": "19f609685e2a2649c0b28a27da87f2da6d1f6327", "size": 3993, "ext": "py", "lang": "Python", "max_stars_repo_path": "Strawry/control/Env.py", "max_stars_repo_name": "Bossabossy/Strawry", "max_stars_repo_head_hexsha": "8ee28138599d258eaa48a625ea929a8b4ccbd639", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Strawry/control/Env.py", "max_issues_repo_name": "Bossabossy/Strawry", "max_issues_repo_head_hexsha": "8ee28138599d258eaa48a625ea929a8b4ccbd639", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Strawry/control/Env.py", "max_forks_repo_name": "Bossabossy/Strawry", "max_forks_repo_head_hexsha": "8ee28138599d258eaa48a625ea929a8b4ccbd639", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.5379310345, "max_line_length": 90, "alphanum_fraction": 0.490608565, "include": true, "reason": "import numpy", "num_tokens": 1080}
|
import os
import george
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn import gaussian_process
data_dir = '/home/ilya/Dropbox/petya'
data_file = 'Total_rate_vs_Years_v2.txt'
df = pd.read_table(os.path.join(data_dir, data_file), delim_whitespace=True,
names=['exper', 'band', 'date', 'time', 'st1', 'st2',
'rate_off1', 'rate_off2', 'total_rate', 'snr'])
df['datetime'] = pd.to_datetime(df['date'] + ' ' + df['time'])
# Convert datetime to timedelta
df['timedelta'] = df['datetime'] - sorted(df['datetime'])[0]
df['timedelta'] = [int(dt.days) for dt in df['timedelta']]
df['total_rate'] = 3. * 10 ** 10 * df['total_rate']
ground_stations = set(df['st2'])
colors = ('b', 'g', 'r', 'c', 'm', 'y', 'k')
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
for i, ground_station in enumerate(ground_stations):
ax.plot(df[df['st2'] == ground_station]['datetime'],
df[df['st2'] == ground_station]['total_rate'],
'.{}'.format(colors[i]), label=ground_station)
plt.legend(loc=2)
plt.gcf().autofmt_xdate()
fig.show()
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
for i, ground_station in enumerate(ground_stations):
ax.plot(df[df['st2'] == ground_station]['datetime'],
df[df['st2'] == ground_station]['total_rate'],
'.{}'.format(colors[i]), label=ground_station)
plt.legend(loc=2)
plt.gcf().autofmt_xdate()
fig.show()
# # Fit data with GP regression
from george.kernels import CosineKernel, ConstantKernel
kernel = CosineKernel(365) + ConstantKernel(-1.)
gp = george.GP(kernel)
x = np.array(df[df['st2'] == 'ARECIBO']['timedelta']) +\
np.random.normal(0., scale=0.5,
size=len(df[df['st2'] == 'ARECIBO']['timedelta']))
x = (x - np.mean(x)) / np.std(x)
gp.compute(x, yerr=10**(-3))
t = np.linspace(0, 900, 900)
mu, cov = gp.predict(df['total_rate'], t)
std = np.sqrt(np.diag(cov))
X = np.atleast_2d(np.array(df['timedelta'], dtype=float) +
np.random.normal(0., scale=0.1, size=len(df['timedelta']))).T
y = df['total_rate']
x = np.atleast_2d(np.linspace(0, 900, 1000)).T
gp = gaussian_process.GaussianProcess(regr='constant', theta0=1e-1, thetaL=1e-4,
thetaU=1e-1, nugget=0.1)
gp.fit(X, y)
y_pred, sigma2_pred = gp.predict(x, eval_MSE=True)
plt.fill_between(x[:, 0], y_pred+sigma2_pred, y_pred-sigma2_pred, color="k",
alpha=0.1)
plt.plot(x[:, 0], y_pred+sigma2_pred, color="k", alpha=1, lw=0.25)
plt.plot(x[:, 0], y_pred-sigma2_pred, color="k", alpha=1, lw=0.25)
plt.plot(x[:, 0], y_pred, color="k", alpha=1, lw=0.5)
plt.plot(df['timedelta'], df['total_rate'], ".k")
|
{"hexsha": "2c8b27754088c20c01704ed1c0359c9151a39094", "size": 2698, "ext": "py", "lang": "Python", "max_stars_repo_path": "gp.py", "max_stars_repo_name": "ipashchenko/ra_orbit", "max_stars_repo_head_hexsha": "d58a920b6b185450012770cc4468be9399f2ea5f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "gp.py", "max_issues_repo_name": "ipashchenko/ra_orbit", "max_issues_repo_head_hexsha": "d58a920b6b185450012770cc4468be9399f2ea5f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gp.py", "max_forks_repo_name": "ipashchenko/ra_orbit", "max_forks_repo_head_hexsha": "d58a920b6b185450012770cc4468be9399f2ea5f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.9589041096, "max_line_length": 80, "alphanum_fraction": 0.6249073388, "include": true, "reason": "import numpy", "num_tokens": 839}
|
# Linear Algebra, Handling of Arrays and more Python Features
## Introduction
The aim of this set of lectures is to review some central linear algebra algorithms that we will need in our
data analysis part and in the construction of Machine Learning algorithms (ML).
This will allow us to introduce some central programming features of high-level languages like Python and
compiled languages like C++ and/or Fortran.
As discussed in the introductory notes, these series of lectures focuses both on using
central Python packages like **tensorflow** and **scikit-learn** as well
as writing your own codes for some central ML algorithms. The
latter can be written in a language of your choice, be it Python, Julia, R,
Rust, C++, Fortran etc. In order to avoid confusion however, in these lectures we will limit our
attention to Python, C++ and Fortran.
## Important Matrix and vector handling packages
There are several central software packages for linear algebra and eigenvalue problems. Several of the more
popular ones have been wrapped into ofter software packages like those from the widely used text **Numerical Recipes**. The original source codes in many of the available packages are often taken from the widely used
software package LAPACK, which follows two other popular packages
developed in the 1970s, namely EISPACK and LINPACK. We describe them shortly here.
* LINPACK: package for linear equations and least square problems.
* LAPACK:package for solving symmetric, unsymmetric and generalized eigenvalue problems. From LAPACK's website <http://www.netlib.org> it is possible to download for free all source codes from this library. Both C/C++ and Fortran versions are available.
* BLAS (I, II and III): (Basic Linear Algebra Subprograms) are routines that provide standard building blocks for performing basic vector and matrix operations. Blas I is vector operations, II vector-matrix operations and III matrix-matrix operations. Highly parallelized and efficient codes, all available for download from <http://www.netlib.org>.
When dealing with matrices and vectors a central issue is memory
handling and allocation. If our code is written in Python the way we
declare these objects and the way they are handled, interpreted and
used by say a linear algebra library, requires codes that interface
our Python program with such libraries. For Python programmers,
**Numpy** is by now the standard Python package for numerical arrays in
Python as well as the source of functions which act on these
arrays. These functions span from eigenvalue solvers to functions that
compute the mean value, variance or the covariance matrix. If you are
not familiar with how arrays are handled in say Python or compiled
languages like C++ and Fortran, the sections in this chapter may be
useful. For C++ programmer, **Armadillo** is widely used library for
linear algebra and eigenvalue problems. In addition it offers a
convenient way to handle and organize arrays. We discuss this library
as well. Before we proceed we believe it may be convenient to repeat some basic features of
matrices and vectors.
## Basic Matrix Features
Matrix properties reminder
$$
\mathbf{A} =
\begin{bmatrix} a_{11} & a_{12} & a_{13} & a_{14} \\
a_{21} & a_{22} & a_{23} & a_{24} \\
a_{31} & a_{32} & a_{33} & a_{34} \\
a_{41} & a_{42} & a_{43} & a_{44}
\end{bmatrix}\qquad
\mathbf{I} =
\begin{bmatrix} 1 & 0 & 0 & 0 \\
0 & 1 & 0 & 0 \\
0 & 0 & 1 & 0 \\
0 & 0 & 0 & 1
\end{bmatrix}
$$
The inverse of a matrix is defined by
$$
\mathbf{A}^{-1} \cdot \mathbf{A} = I
$$
<table border="1">
<thead>
<tr><th align="center"> Relations </th> <th align="center"> Name </th> <th align="center"> matrix elements </th> </tr>
</thead>
<tbody>
<tr><td align="center"> $A = A^{T}$ </td> <td align="center"> symmetric </td> <td align="center"> $a_{ij} = a_{ji}$ </td> </tr>
<tr><td align="center"> $A = \left (A^{T} \right )^{-1}$ </td> <td align="center"> real orthogonal </td> <td align="center"> $\sum_k a_{ik} a_{jk} = \sum_k a_{ki} a_{kj} = \delta_{ij}$ </td> </tr>
<tr><td align="center"> $A = A^{ * }$ </td> <td align="center"> real matrix </td> <td align="center"> $a_{ij} = a_{ij}^{ * }$ </td> </tr>
<tr><td align="center"> $A = A^{\dagger}$ </td> <td align="center"> hermitian </td> <td align="center"> $a_{ij} = a_{ji}^{ * }$ </td> </tr>
<tr><td align="center"> $A = \left (A^{\dagger} \right )^{-1}$ </td> <td align="center"> unitary </td> <td align="center"> $\sum_k a_{ik} a_{jk}^{ * } = \sum_k a_{ki}^{ * } a_{kj} = \delta_{ij}$ </td> </tr>
</tbody>
</table>
### Some famous Matrices
* Diagonal if $a_{ij}=0$ for $i\ne j$
* Upper triangular if $a_{ij}=0$ for $i > j$
* Lower triangular if $a_{ij}=0$ for $i < j$
* Upper Hessenberg if $a_{ij}=0$ for $i > j+1$
* Lower Hessenberg if $a_{ij}=0$ for $i < j+1$
* Tridiagonal if $a_{ij}=0$ for $|i -j| > 1$
* Lower banded with bandwidth $p$: $a_{ij}=0$ for $i > j+p$
* Upper banded with bandwidth $p$: $a_{ij}=0$ for $i < j+p$
* Banded, block upper triangular, block lower triangular....
Some Equivalent Statements. For an $N\times N$ matrix $\mathbf{A}$ the following properties are all equivalent
* If the inverse of $\mathbf{A}$ exists, $\mathbf{A}$ is nonsingular.
* The equation $\mathbf{Ax}=0$ implies $\mathbf{x}=0$.
* The rows of $\mathbf{A}$ form a basis of $R^N$.
* The columns of $\mathbf{A}$ form a basis of $R^N$.
* $\mathbf{A}$ is a product of elementary matrices.
* $0$ is not eigenvalue of $\mathbf{A}$.
## Numpy and arrays
[Numpy](http://www.numpy.org/) provides an easy way to handle arrays in Python. The standard way to import this library is as
import numpy as np
n = 10
x = np.random.normal(size=n)
print(x)
Here we have defined a vector $x$ with $n=10$ elements with its values given by the Normal distribution $N(0,1)$.
Another alternative is to declare a vector as follows
import numpy as np
x = np.array([1, 2, 3])
print(x)
Here we have defined a vector with three elements, with $x_0=1$, $x_1=2$ and $x_2=3$. Note that both Python and C++
start numbering array elements from $0$ and on. This means that a vector with $n$ elements has a sequence of entities $x_0, x_1, x_2, \dots, x_{n-1}$. We could also let (recommended) Numpy to compute the logarithms of a specific array as
import numpy as np
x = np.log(np.array([4, 7, 8]))
print(x)
Here we have used Numpy's unary function $np.log$. This function is
highly tuned to compute array elements since the code is vectorized
and does not require looping. We normaly recommend that you use the
Numpy intrinsic functions instead of the corresponding **log** function
from Python's **math** module. The looping is done explicitely by the
**np.log** function. The alternative, and slower way to compute the
logarithms of a vector would be to write
import numpy as np
from math import log
x = np.array([4, 7, 8])
for i in range(0, len(x)):
x[i] = log(x[i])
print(x)
We note that our code is much longer already and we need to import the **log** function from the **math** module.
The attentive reader will also notice that the output is $[1, 1, 2]$. Python interprets automacally our numbers as integers (like the **automatic** keyword in C++). To change this we could define our array elements to be double precision numbers as
import numpy as np
x = np.log(np.array([4, 7, 8], dtype = np.float64))
print(x)
or simply write them as double precision numbers (Python uses 64 bits as default for floating point type variables), that is
import numpy as np
x = np.log(np.array([4.0, 7.0, 8.0])
print(x)
To check the number of bytes (remember that one byte contains eight bits for double precision variables), you can use simple use the **itemsize** functionality (the array $x$ is actually an object which inherits the functionalities defined in Numpy) as
import numpy as np
x = np.log(np.array([4.0, 7.0, 8.0])
print(x.itemsize)
Having defined vectors, we are now ready to try out matrices. We can define a $3 \times 3 $ real matrix $\hat{A}$
as (recall that we user lowercase letters for vectors and uppercase letters for matrices)
import numpy as np
A = np.log(np.array([ [4.0, 7.0, 8.0], [3.0, 10.0, 11.0], [4.0, 5.0, 7.0] ]))
print(A)
If we use the **shape** function we would get $(3, 3)$ as output, that is verifying that our matrix is a $3\times 3$ matrix. We can slice the matrix and print for example the first column (Python organized matrix elements in a row-major order, see below) as
import numpy as np
A = np.log(np.array([ [4.0, 7.0, 8.0], [3.0, 10.0, 11.0], [4.0, 5.0, 7.0] ]))
# print the first column, row-major order and elements start with 0
print(A[:,0])
We can continue this was by printing out other columns or rows. The example here prints out the second column
import numpy as np
A = np.log(np.array([ [4.0, 7.0, 8.0], [3.0, 10.0, 11.0], [4.0, 5.0, 7.0] ]))
# print the first column, row-major order and elements start with 0
print(A[1,:])
Numpy contains many other functionalities that allow us to slice, subdivide etc etc arrays. We strongly recommend that you look up the [Numpy website for more details](http://www.numpy.org/). Useful functions when defining a matrix are the **np.zeros** function which declares a matrix of a given dimension and sets all elements to zero
import numpy as np
n = 10
# define a matrix of dimension 10 x 10 and set all elements to zero
A = np.zeros( (n, n) )
print(A)
or initializing all elements to
import numpy as np
n = 10
# define a matrix of dimension 10 x 10 and set all elements to one
A = np.ones( (n, n) )
print(A)
or as unitarily distributed random numbers (see the material on random number generators in the statistics part)
import numpy as np
n = 10
# define a matrix of dimension 10 x 10 and set all elements to random numbers with x \in [0, 1]
A = np.random.rand(n, n)
print(A)
As we will see throughout these lectures, there are several extremely useful functionalities in Numpy.
As an example, consider the discussion of the covariance matrix. Suppose we have defined three vectors
$\hat{x}, \hat{y}, \hat{z}$ with $n$ elements each. The covariance matrix is defined as
$$
\hat{\Sigma} = \begin{bmatrix} \sigma_{xx} & \sigma_{xy} & \sigma_{xz} \\
\sigma_{yx} & \sigma_{yy} & \sigma_{yz} \\
\sigma_{zx} & \sigma_{zy} & \sigma_{zz}
\end{bmatrix},
$$
where for example
$$
\sigma_{xy} =\frac{1}{n} \sum_{i=0}^{n-1}(x_i- \overline{x})(y_i- \overline{y}).
$$
The Numpy function **np.cov** calculates the covariance elements using the factor $1/(n-1)$ instead of $1/n$ since it assumes we do not have the exact mean values. For a more in-depth discussion of the covariance and covariance matrix and its meaning, we refer you to the lectures on statistics.
The following simple function uses the **np.vstack** function which takes each vector of dimension $1\times n$ and produces a $ 3\times n$ matrix $\hat{W}$
$$
\hat{W} = \begin{bmatrix} x_0 & y_0 & z_0 \\
x_1 & y_1 & z_1 \\
x_2 & y_2 & z_2 \\
\dots & \dots & \dots \\
x_{n-2} & y_{n-2} & z_{n-2} \\
x_{n-1} & y_{n-1} & z_{n-1}
\end{bmatrix},
$$
which in turn is converted into into the $3 times 3$ covariance matrix
$\hat{\Sigma}$ via the Numpy function **np.cov()**. In our review of
statistical functions and quantities we will discuss more about the
meaning of the covariance matrix. Here we note that we can calculate
the mean value of each set of samples $\hat{x}$ etc using the Numpy
function **np.mean(x)**. We can also extract the eigenvalues of the
covariance matrix through the **np.linalg.eig()** function.
# Importing various packages
import numpy as np
n = 100
x = np.random.normal(size=n)
print(np.mean(x))
y = 4+3*x+np.random.normal(size=n)
print(np.mean(y))
z = x**3+np.random.normal(size=n)
print(np.mean(z))
W = np.vstack((x, y, z))
Sigma = np.cov(W)
print(Sigma)
Eigvals, Eigvecs = np.linalg.eig(Sigma)
print(Eigvals)
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from scipy import sparse
eye = np.eye(4)
print(eye)
sparse_mtx = sparse.csr_matrix(eye)
print(sparse_mtx)
x = np.linspace(-10,10,100)
y = np.sin(x)
plt.plot(x,y,marker='x')
plt.show()
## Gaussian Elimination
We start with the linear set of equations
$$
\mathbf{A}\mathbf{x} = \mathbf{w}.
$$
We assume also that the matrix $\mathbf{A}$ is non-singular and that the
matrix elements along the diagonal satisfy $a_{ii} \ne 0$. Simple $4\times 4 $ example
$$
\begin{bmatrix}
a_{11}& a_{12} &a_{13}& a_{14}\\
a_{21}& a_{22} &a_{23}& a_{24}\\
a_{31}& a_{32} &a_{33}& a_{34}\\
a_{41}& a_{42} &a_{43}& a_{44}\\
\end{bmatrix} \begin{bmatrix}
x_1\\
x_2\\
x_3 \\
x_4 \\
\end{bmatrix}
=\begin{bmatrix}
w_1\\
w_2\\
w_3 \\
w_4\\
\end{bmatrix}.
$$
or
$$
a_{11}x_1 +a_{12}x_2 +a_{13}x_3 + a_{14}x_4=w_1 \nonumber
$$
$$
a_{21}x_1 + a_{22}x_2 + a_{23}x_3 + a_{24}x_4=w_2 \nonumber
$$
$$
a_{31}x_1 + a_{32}x_2 + a_{33}x_3 + a_{34}x_4=w_3 \nonumber
$$
$$
a_{41}x_1 + a_{42}x_2 + a_{43}x_3 + a_{44}x_4=w_4. \nonumber
$$
The basic idea of Gaussian elimination is to use the first equation to eliminate the first unknown $x_1$
from the remaining $n-1$ equations. Then we use the new second equation to eliminate the second unknown
$x_2$ from the remaining $n-2$ equations. With $n-1$ such eliminations
we obtain a so-called upper triangular set of equations of the form
$$
b_{11}x_1 +b_{12}x_2 +b_{13}x_3 + b_{14}x_4=y_1 \nonumber
$$
$$
b_{22}x_2 + b_{23}x_3 + b_{24}x_4=y_2 \nonumber
$$
$$
b_{33}x_3 + b_{34}x_4=y_3 \nonumber
$$
<!-- Equation labels as ordinary links -->
<div id="eq:gaussbacksub"></div>
$$
b_{44}x_4=y_4. \nonumber
\label{eq:gaussbacksub} \tag{1}
$$
We can solve this system of equations recursively starting from $x_n$ (in our case $x_4$) and proceed with
what is called a backward substitution.
This process can be expressed mathematically as
<!-- Equation labels as ordinary links -->
<div id="_auto1"></div>
$$
\begin{equation}
x_m = \frac{1}{b_{mm}}\left(y_m-\sum_{k=m+1}^nb_{mk}x_k\right)\quad m=n-1,n-2,\dots,1.
\label{_auto1} \tag{2}
\end{equation}
$$
To arrive at such an upper triangular system of equations, we start by eliminating
the unknown $x_1$ for $j=2,n$. We achieve this by multiplying the first equation by $a_{j1}/a_{11}$ and then subtract
the result from the $j$th equation. We assume obviously that $a_{11}\ne 0$ and that
$\mathbf{A}$ is not singular.
Our actual $4\times 4$ example reads after the first operation
$$
\begin{bmatrix}
a_{11}& a_{12} &a_{13}& a_{14}\\
0& (a_{22}-\frac{a_{21}a_{12}}{a_{11}}) &(a_{23}-\frac{a_{21}a_{13}}{a_{11}}) & (a_{24}-\frac{a_{21}a_{14}}{a_{11}})\\
0& (a_{32}-\frac{a_{31}a_{12}}{a_{11}})& (a_{33}-\frac{a_{31}a_{13}}{a_{11}})& (a_{34}-\frac{a_{31}a_{14}}{a_{11}})\\
0&(a_{42}-\frac{a_{41}a_{12}}{a_{11}}) &(a_{43}-\frac{a_{41}a_{13}}{a_{11}}) & (a_{44}-\frac{a_{41}a_{14}}{a_{11}}) \\
\end{bmatrix} \begin{bmatrix}
x_1\\
x_2\\
x_3 \\
x_4 \\
\end{bmatrix}
=\begin{bmatrix}
y_1\\
w_2^{(2)}\\
w_3^{(2)} \\
w_4^{(2)}\\
\end{bmatrix},
$$
or
$$
b_{11}x_1 +b_{12}x_2 +b_{13}x_3 + b_{14}x_4=y_1 \nonumber
$$
$$
a^{(2)}_{22}x_2 + a^{(2)}_{23}x_3 + a^{(2)}_{24}x_4=w^{(2)}_2 \nonumber
$$
$$
a^{(2)}_{32}x_2 + a^{(2)}_{33}x_3 + a^{(2)}_{34}x_4=w^{(2)}_3 \nonumber
$$
$$
a^{(2)}_{42}x_2 + a^{(2)}_{43}x_3 + a^{(2)}_{44}x_4=w^{(2)}_4, \nonumber
$$
<!-- Equation labels as ordinary links -->
<div id="_auto2"></div>
$$
\begin{equation}
\label{_auto2} \tag{3}
\end{equation}
$$
The new coefficients are
<!-- Equation labels as ordinary links -->
<div id="_auto3"></div>
$$
\begin{equation}
b_{1k} = a_{1k}^{(1)} \quad k=1,\dots,n,
\label{_auto3} \tag{4}
\end{equation}
$$
where each $a_{1k}^{(1)}$ is equal to the original $a_{1k}$ element. The other coefficients are
<!-- Equation labels as ordinary links -->
<div id="_auto4"></div>
$$
\begin{equation}
a_{jk}^{(2)} = a_{jk}^{(1)}-\frac{a_{j1}^{(1)}a_{1k}^{(1)}}{a_{11}^{(1)}} \quad j,k=2,\dots,n,
\label{_auto4} \tag{5}
\end{equation}
$$
with a new right-hand side given by
<!-- Equation labels as ordinary links -->
<div id="_auto5"></div>
$$
\begin{equation}
y_{1}=w_1^{(1)}, \quad w_j^{(2)} =w_j^{(1)}-\frac{a_{j1}^{(1)}w_1^{(1)}}{a_{11}^{(1)}} \quad j=2,\dots,n.
\label{_auto5} \tag{6}
\end{equation}
$$
We have also set $w_1^{(1)}=w_1$, the original vector element.
We see that the system of unknowns $x_1,\dots,x_n$ is transformed into an $(n-1)\times (n-1)$ problem.
This step is called forward substitution.
Proceeding with these substitutions, we obtain the
general expressions for the new coefficients
<!-- Equation labels as ordinary links -->
<div id="_auto6"></div>
$$
\begin{equation}
a_{jk}^{(m+1)} = a_{jk}^{(m)}-\frac{a_{jm}^{(m)}a_{mk}^{(m)}}{a_{mm}^{(m)}} \quad j,k=m+1,\dots,n,
\label{_auto6} \tag{7}
\end{equation}
$$
with $m=1,\dots,n-1$ and a
right-hand side given by
<!-- Equation labels as ordinary links -->
<div id="_auto7"></div>
$$
\begin{equation}
w_j^{(m+1)} =w_j^{(m)}-\frac{a_{jm}^{(m)}w_m^{(m)}}{a_{mm}^{(m)}}\quad j=m+1,\dots,n.
\label{_auto7} \tag{8}
\end{equation}
$$
This set of $n-1$ elimations leads us to an equations which is solved by back substitution.
If the arithmetics is exact and the matrix $\mathbf{A}$ is not singular, then the computed answer will be exact.
Even though the matrix elements along the diagonal are not zero,
numerically small numbers may appear and subsequent divisions may lead to large numbers, which, if added
to a small number may yield losses of precision. Suppose for example that our first division in $(a_{22}-a_{21}a_{12}/a_{11})$
results in $-10^{-7}$ and that $a_{22}$ is one.
one. We are then
adding $10^7+1$. With single precision this results in $10^7$.
* Gaussian elimination, $O(2/3n^3)$ flops, general matrix
* LU decomposition, upper triangular and lower tridiagonal matrices, $O(2/3n^3)$ flops, general matrix. Get easily the inverse, determinant and can solve linear equations with back-substitution only, $O(n^2)$ flops
* Cholesky decomposition. Real symmetric or hermitian positive definite matrix, $O(1/3n^3)$ flops.
* Tridiagonal linear systems, important for differential equations. Normally positive definite and non-singular. $O(8n)$ flops for symmetric. Special case of banded matrices.
* Singular value decomposition
* the QR method will be discussed in chapter 7 in connection with eigenvalue systems. $O(4/3n^3)$ flops.
The LU decomposition method means that we can rewrite
this matrix as the product of two matrices $\mathbf{L}$ and $\mathbf{U}$
where
$$
\begin{bmatrix}
a_{11} & a_{12} & a_{13} & a_{14} \\
a_{21} & a_{22} & a_{23} & a_{24} \\
a_{31} & a_{32} & a_{33} & a_{34} \\
a_{41} & a_{42} & a_{43} & a_{44}
\end{bmatrix}
= \begin{bmatrix}
1 & 0 & 0 & 0 \\
l_{21} & 1 & 0 & 0 \\
l_{31} & l_{32} & 1 & 0 \\
l_{41} & l_{42} & l_{43} & 1
\end{bmatrix}
\begin{bmatrix}
u_{11} & u_{12} & u_{13} & u_{14} \\
0 & u_{22} & u_{23} & u_{24} \\
0 & 0 & u_{33} & u_{34} \\
0 & 0 & 0 & u_{44}
\end{bmatrix}.
$$
LU decomposition forms the backbone of other algorithms in linear algebra, such as the
solution of linear equations given by
$$
a_{11}x_1 +a_{12}x_2 +a_{13}x_3 + a_{14}x_4=w_1 \nonumber
$$
$$
a_{21}x_1 + a_{22}x_2 + a_{23}x_3 + a_{24}x_4=w_2 \nonumber
$$
$$
a_{31}x_1 + a_{32}x_2 + a_{33}x_3 + a_{34}x_4=w_3 \nonumber
$$
$$
a_{41}x_1 + a_{42}x_2 + a_{43}x_3 + a_{44}x_4=w_4. \nonumber
$$
The above set of equations is conveniently solved by using LU decomposition as an intermediate step.
The matrix $\mathbf{A}\in \mathbb{R}^{n\times n}$ has an LU factorization if the determinant
is different from zero. If the LU factorization exists and $\mathbf{A}$ is non-singular, then the LU factorization
is unique and the determinant is given by
$$
det\{\mathbf{A}\}=det\{\mathbf{LU}\}= det\{\mathbf{L}\}det\{\mathbf{U}\}=u_{11}u_{22}\dots u_{nn}.
$$
There are at least three main advantages with LU decomposition compared with standard Gaussian elimination:
* It is straightforward to compute the determinant of a matrix
* If we have to solve sets of linear equations with the same matrix but with different vectors $\mathbf{y}$, the number of FLOPS is of the order $n^3$.
* The inverse is such an operation
With the LU decomposition it is rather
simple to solve a system of linear equations
$$
a_{11}x_1 +a_{12}x_2 +a_{13}x_3 + a_{14}x_4=w_1 \nonumber
$$
$$
a_{21}x_1 + a_{22}x_2 + a_{23}x_3 + a_{24}x_4=w_2 \nonumber
$$
$$
a_{31}x_1 + a_{32}x_2 + a_{33}x_3 + a_{34}x_4=w_3 \nonumber
$$
$$
a_{41}x_1 + a_{42}x_2 + a_{43}x_3 + a_{44}x_4=w_4. \nonumber
$$
This can be written in matrix form as
$$
\mathbf{Ax}=\mathbf{w}.
$$
where $\mathbf{A}$ and $\mathbf{w}$ are known and we have to solve for
$\mathbf{x}$. Using the LU dcomposition we write
$$
\mathbf{A} \mathbf{x} \equiv \mathbf{L} \mathbf{U} \mathbf{x} =\mathbf{w}.
$$
The previous equation can be calculated in two steps
$$
\mathbf{L} \mathbf{y} = \mathbf{w};\qquad \mathbf{Ux}=\mathbf{y}.
$$
To show that this is correct we use to the LU decomposition
to rewrite our system of linear equations as
$$
\mathbf{LUx}=\mathbf{w},
$$
and since the determinant of $\mathbf{L}$ is equal to 1 (by construction
since the diagonals of $\mathbf{L}$ equal 1) we can use the inverse of
$\mathbf{L}$ to obtain
$$
\mathbf{Ux}=\mathbf{L^{-1}w}=\mathbf{y},
$$
which yields the intermediate step
$$
\mathbf{L^{-1}w}=\mathbf{y}
$$
and as soon as we have $\mathbf{y}$ we can obtain $\mathbf{x}$
through $\mathbf{Ux}=\mathbf{y}$.
For our four-dimentional example this takes the form
$$
y_1=w_1 \nonumber
$$
$$
l_{21}y_1 + y_2=w_2\nonumber
$$
$$
l_{31}y_1 + l_{32}y_2 + y_3 =w_3\nonumber
$$
$$
l_{41}y_1 + l_{42}y_2 + l_{43}y_3 + y_4=w_4. \nonumber
$$
and
$$
u_{11}x_1 +u_{12}x_2 +u_{13}x_3 + u_{14}x_4=y_1 \nonumber
$$
$$
u_{22}x_2 + u_{23}x_3 + u_{24}x_4=y_2\nonumber
$$
$$
u_{33}x_3 + u_{34}x_4=y_3\nonumber
$$
$$
u_{44}x_4=y_4 \nonumber
$$
This example shows the basis for the algorithm
needed to solve the set of $n$ linear equations.
The algorithm goes as follows
* Set up the matrix $\bf A$ and the vector $\bf w$ with their correct dimensions. This determines the dimensionality of the unknown vector $\bf x$.
* Then LU decompose the matrix $\bf A$ through a call to the function `ludcmp(double a, int n, int indx, double &d)`. This functions returns the LU decomposed matrix $\bf A$, its determinant and the vector indx which keeps track of the number of interchanges of rows. If the determinant is zero, the solution is malconditioned.
* Thereafter you call the function `lubksb(double a, int n, int indx, double w)` which uses the LU decomposed matrix $\bf A$ and the vector $\bf w$ and returns $\bf x$ in the same place as $\bf w$. Upon exit the original content in $\bf w$ is destroyed. If you wish to keep this information, you should make a backup of it in your calling function.
### LU Decomposition, the inverse of a matrix
If the inverse exists then
$$
\mathbf{A}^{-1}\mathbf{A}=\mathbf{I},
$$
the identity matrix. With an LU decomposed matrix we can rewrite the last equation as
$$
\mathbf{LU}\mathbf{A}^{-1}=\mathbf{I}.
$$
If we assume that the first column (that is column 1) of the inverse matrix
can be written as a vector with unknown entries
$$
\mathbf{A}_1^{-1}= \begin{bmatrix}
a_{11}^{-1} \\
a_{21}^{-1} \\
\dots \\
a_{n1}^{-1} \\
\end{bmatrix},
$$
then we have a linear set of equations
$$
\mathbf{LU}\begin{bmatrix}
a_{11}^{-1} \\
a_{21}^{-1} \\
\dots \\
a_{n1}^{-1} \\
\end{bmatrix} =\begin{bmatrix}
1 \\
0 \\
\dots \\
0 \\
\end{bmatrix}.
$$
In a similar way we can compute the unknow entries of the second column,
$$
\mathbf{LU}\begin{bmatrix}
a_{12}^{-1} \\
a_{22}^{-1} \\
\dots \\
a_{n2}^{-1} \\
\end{bmatrix}=\begin{bmatrix}
0 \\
1 \\
\dots \\
0 \\
\end{bmatrix},
$$
and continue till we have solved all $n$ sets of linear equations.
|
{"hexsha": "729fa16cd7b47b4d580bd65a3830aee8bed70d39", "size": 26602, "ext": "py", "lang": "Python", "max_stars_repo_path": "doc/LectureNotes/_build/jupyter_execute/linalg.py", "max_stars_repo_name": "marlgryd/MachineLearning", "max_stars_repo_head_hexsha": "e07439cee1f9e3042aec765754116dccdf8bcf01", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "doc/LectureNotes/_build/jupyter_execute/linalg.py", "max_issues_repo_name": "marlgryd/MachineLearning", "max_issues_repo_head_hexsha": "e07439cee1f9e3042aec765754116dccdf8bcf01", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "doc/LectureNotes/_build/jupyter_execute/linalg.py", "max_forks_repo_name": "marlgryd/MachineLearning", "max_forks_repo_head_hexsha": "e07439cee1f9e3042aec765754116dccdf8bcf01", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-09-04T16:21:16.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-04T16:21:16.000Z", "avg_line_length": 35.0487483531, "max_line_length": 351, "alphanum_fraction": 0.6125855199, "include": true, "reason": "import numpy,from scipy", "num_tokens": 8088}
|
import numpy as np
import argparse
import json
def get_argument_parser():
parser = argparse.ArgumentParser();
parser.add_argument('--data_type', type=str, default='iid',
help='the type of data that needs to be generated')
parser.add_argument('--num_samples', type=int, default=100000,
help='length of the sequence to be generated')
parser.add_argument('--markovity', type=int, default=30,
help='Step for Markovity')
parser.add_argument('--file_name', type=str, default='input.txt',
help='The name of the output file')
parser.add_argument('--info_file', type=str, default='input_info.txt',
help='Name of the info file')
parser.add_argument('--p1', type=float, default=0.5,
help='the probability for the entire sequence, or the base')
parser.add_argument('--n1', type=float, default=0.0,
help='the probability for the entire sequence, or the base')
return parser
# Computes the binary entropy
def entropy_iid(prob):
p1 = prob
p0 = 1.0 - prob
H = -(p1*np.log(p1) + p0*np.log(p0))
H /= np.log(2.0)
return H
def main():
parser = get_argument_parser()
FLAGS = parser.parse_args()
FLAGS.p0 = 1.0 - FLAGS.p1
FLAGS.n0 = 1.0 - FLAGS.n1
_keys = ["data_type","p1","n1"]
data = np.empty([FLAGS.num_samples,1],dtype='S1')
#print data.shape
if FLAGS.data_type=='iid':
#Generate data
data = np.random.choice(['a', 'b'], size=(FLAGS.num_samples,1), p=[FLAGS.p0, FLAGS.p1])
FLAGS.Entropy = entropy_iid(FLAGS.p1)
_keys.append("Entropy")
elif FLAGS.data_type=='0entropy':
data[:FLAGS.markovity,:] = np.random.choice(['a', 'b'], size=(FLAGS.markovity,1), p=[FLAGS.p0, FLAGS.p1])
for i in range(FLAGS.markovity, FLAGS.num_samples):
if data[i-1] == data[i-FLAGS.markovity]:
data[i] = 'a'
else:
data[i] = 'b'
FLAGS.Entropy = 0
_keys.append("Entropy")
_keys.append("markovity")
elif FLAGS.data_type=='HMM':
data[:FLAGS.markovity,:] = np.random.choice(['a', 'b'], size=(FLAGS.markovity,1), p=[FLAGS.p0, FLAGS.p1])
for i in range(FLAGS.markovity, FLAGS.num_samples):
if data[i-1] == data[i-FLAGS.markovity]:
data[i] = np.random.choice(['a','b'], p=[FLAGS.n0, FLAGS.n1])
else:
data[i] = np.random.choice(['b','a'], p=[FLAGS.n0, FLAGS.n1])
FLAGS.Entropy = entropy_iid(FLAGS.n1)
_keys.append("Entropy")
_keys.append("markovity")
print "HMM Data generated ..."
np.savetxt(FLAGS.file_name,data,delimiter='', fmt='%c',newline='');
#print _keys
args = vars(FLAGS)
info = { key : args[key] for key in _keys }
#print info
with open(FLAGS.info_file,"wb") as f:
json.dump(info,f)
if __name__ == '__main__':
main()
|
{"hexsha": "07d393d2f2df9baae4997c1526ca3d956166c2f3", "size": 3042, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/data_generation_scripts/generate_sequence_data.py", "max_stars_repo_name": "rajatdiptabiswas/NN_compression", "max_stars_repo_head_hexsha": "1a2650ad897bcc1f32f3b63d0a6477b8f6be6e29", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 211, "max_stars_repo_stars_event_min_datetime": "2017-04-05T15:37:37.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-03T16:17:02.000Z", "max_issues_repo_path": "src/data_generation_scripts/generate_sequence_data.py", "max_issues_repo_name": "Benjamin-Etheredge/NN_compression", "max_issues_repo_head_hexsha": "1ded955ac6069299a8fbe37e14373f9e173ad912", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 14, "max_issues_repo_issues_event_min_datetime": "2017-08-31T02:40:29.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-29T06:26:19.000Z", "max_forks_repo_path": "src/data_generation_scripts/generate_sequence_data.py", "max_forks_repo_name": "Benjamin-Etheredge/NN_compression", "max_forks_repo_head_hexsha": "1ded955ac6069299a8fbe37e14373f9e173ad912", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 45, "max_forks_repo_forks_event_min_datetime": "2017-07-08T10:19:21.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-15T07:35:35.000Z", "avg_line_length": 36.2142857143, "max_line_length": 113, "alphanum_fraction": 0.5746219592, "include": true, "reason": "import numpy", "num_tokens": 791}
|
#!/usr/bin/env python
# coding: utf-8
# In[4]:
from numpy import sin, pi, sqrt, arccos, log
from pandas import read_excel
e = 1.602e-19 # [C] electron charge
r_p = 0.15e-3 # [m] probe radius
l_p = 1e-3 # [m] probe length
h = 0.5e-3 # [m] Hole radius
s = 0.7e-3 # [m] Rotation center to Hole edge
R = 0.6e-3 # [m] Rotation center to Wire center
m_i = (19*2+10) * 1.67e-27 #[kg] mass of BF2+
k = 1.38e-23 #[m2kg/s2K] Boltzmann const
alpha = pi/2 # [rad] angle between B-field and Rotation center to Wire center
gamma = (1+0.5)/(2+0.5)
class Machprobe():
def __init__(self, ne, Te, Ti, m_i, I):
self.ne = ne
self.I = I
self.Cs =sqrt(e*(Te+Ti)/(m_i))
d_alpha = arccos((s**2 + R**2 - h**2)/(2*s*R))
self.A_eff = l_p*(R*sin(alpha)+r_p-max(R*sin(alpha)-r_p, s*sin(alpha-d_alpha)))
#print('Te : {} eV'.format(Te))
#print('Ti : {} eV'.format(Ti))
#print('Effective area : {} m2'.format(self.A_eff))
#print('Ion sound speed : {} m/s'.format(self.Cs))
def perp_current(self):
self.I_D = (r_p/l_p)*(1-gamma)*self.A_eff # diffusion current calculation
self.I_sat = gamma*e*self.A_eff*self.Cs*self.ne # saturation current calculation
self.I_perp = self.I - self.I_D - self.I_sat # perpendicular current calculation
#print('diffusion current : ',self.I_D)
#print('saturation current : ',self.I_sat)
#print('perp current : ',self.I_perp)
test = Machprobe(1e17,5,0.1,m_i,1e-5)
test.perp_current()
print(test.I_perp)
# In[ ]:
|
{"hexsha": "f65a5260d81742e10ab591a0c83d700dd4d7de7a", "size": 1595, "ext": "py", "lang": "Python", "max_stars_repo_path": "Python_Projects/Mach_Probe/Untitled.py", "max_stars_repo_name": "GUNU-GO/SNUPI", "max_stars_repo_head_hexsha": "a73137699d9fc6ae8fa3d1522f341c04d8d43052", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Python_Projects/Mach_Probe/Untitled.py", "max_issues_repo_name": "GUNU-GO/SNUPI", "max_issues_repo_head_hexsha": "a73137699d9fc6ae8fa3d1522f341c04d8d43052", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Python_Projects/Mach_Probe/Untitled.py", "max_forks_repo_name": "GUNU-GO/SNUPI", "max_forks_repo_head_hexsha": "a73137699d9fc6ae8fa3d1522f341c04d8d43052", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.6730769231, "max_line_length": 100, "alphanum_fraction": 0.589968652, "include": true, "reason": "from numpy", "num_tokens": 542}
|
import time
import numpy as np
start = time.perf_counter()
def get_input():
with open('inputs/test7.txt') as f:
temp = f.readlines()
init = temp[0].split(",")
init[-1] = init[-1].strip("\n")
for i, val in enumerate(init):
init[i] = int(val)
return init
def day7part1(init):
med = np.median(init)
fuel = 0
for val in init:
fuel += abs(val - med)
return fuel
def day7part2(init):
averages = [np.floor(np.mean(init)), np.ceil(np.mean(init))] # get floor and ceil around mean if not .00
res = []
fuel = 0
# compute fuel for floor vs ceil average
for avg in averages:
fuel = 0
for val in init:
diff = int(abs(avg - val))
fuel += diff/2*(diff + 1)
res.append(fuel)
return min(res)
init = get_input()
print(day7part1(init))
print(day7part2(init))
print((time.perf_counter() - start)*1000, "ms")
|
{"hexsha": "def65e2b4f9c62fd01f370d19512e46394132215", "size": 901, "ext": "py", "lang": "Python", "max_stars_repo_path": "day7.py", "max_stars_repo_name": "gitkoogie/AdventOfCode2021", "max_stars_repo_head_hexsha": "416408c22bc704abc95ed46105d086d08e116e1d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "day7.py", "max_issues_repo_name": "gitkoogie/AdventOfCode2021", "max_issues_repo_head_hexsha": "416408c22bc704abc95ed46105d086d08e116e1d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "day7.py", "max_forks_repo_name": "gitkoogie/AdventOfCode2021", "max_forks_repo_head_hexsha": "416408c22bc704abc95ed46105d086d08e116e1d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 20.9534883721, "max_line_length": 109, "alphanum_fraction": 0.5982241953, "include": true, "reason": "import numpy", "num_tokens": 262}
|
from typing import Dict, Tuple
import numpy as np
def pad(
img: np.ndarray, target_size: Tuple, pad_value: float = 0.0, targets: Dict = None
):
targets = dict() if targets is None else targets
h, w, c = img.shape
tw, th = target_size
pad_left = int((tw - w) // 2) + (tw - w) % 2
pad_right = int((tw - w) // 2)
if w > tw:
pad_left, pad_right = 0, 0
pad_up = int((th - h) // 2) + (th - h) % 2
pad_down = int((th - h) // 2)
if h > th:
pad_up, pad_down = 0, 0
nimg = np.ones((th, tw, c), dtype=img.dtype) * pad_value
nimg[pad_up : th - pad_down, pad_left : tw - pad_right] = img
if "target_boxes" in targets:
target_boxes = targets["target_boxes"]
if len(target_boxes.shape) == 2 and target_boxes.shape[0] > 0:
target_boxes[:, [0, 2]] = target_boxes[:, [0, 2]] + pad_left
target_boxes[:, [1, 3]] = target_boxes[:, [1, 3]] + pad_up
targets["target_boxes"] = target_boxes
return nimg, targets
|
{"hexsha": "f54ed3ef55454769cc593cbda83988a6dd9babad", "size": 1017, "ext": "py", "lang": "Python", "max_stars_repo_path": "fastface/transforms/functional/pad.py", "max_stars_repo_name": "mdornseif/fastface", "max_stars_repo_head_hexsha": "72772db1fae4af17e829cd5479c4848fe5eb8948", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 72, "max_stars_repo_stars_event_min_datetime": "2021-01-03T05:43:56.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-17T06:09:35.000Z", "max_issues_repo_path": "fastface/transforms/functional/pad.py", "max_issues_repo_name": "mdornseif/fastface", "max_issues_repo_head_hexsha": "72772db1fae4af17e829cd5479c4848fe5eb8948", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-09-23T22:26:57.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-31T10:11:48.000Z", "max_forks_repo_path": "fastface/transforms/functional/pad.py", "max_forks_repo_name": "mdornseif/fastface", "max_forks_repo_head_hexsha": "72772db1fae4af17e829cd5479c4848fe5eb8948", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2021-02-15T19:58:57.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-19T12:46:41.000Z", "avg_line_length": 28.25, "max_line_length": 85, "alphanum_fraction": 0.5663716814, "include": true, "reason": "import numpy", "num_tokens": 335}
|
source("../power_priors_aux.r")
source("../data_Gaussian.r")
gs.data <- list(
N0 = N_0,
y0 = y_0,
mu0 = mu_0,
kappa0 = kappa_0,
alpha0 = alpha_0,
beta0 = beta_0,
a_0 = 1
)
###
get_l_a0_gaussian <- function(y0, n0, alpha0, beta0, m0, k0, a_0){
nstar <- a_0 * n0
ybar <- mean(y0)
s <- mean( (y0-ybar)^2 )
kappa_n <- k0 + nstar
alpha_n <- alpha0 + nstar/2
beta_n <- beta0 + .5 * (nstar * s + (k0 * nstar * (ybar - m0)^2 )/kappa_n )
ans <- lgamma(alpha_n)-lgamma(alpha0)
ans <- ans + alpha0 * log(beta0) - alpha_n * log(beta_n)
ans <- ans + .5 *( log(k0) - log(kappa_n) )-nstar/2 * log(2*pi)
return(ans)
}
############
l_a0 <- function(x) {
get_l_a0_gaussian(
y0 = gs.data$y0,
n0 = gs.data$N0,
alpha0 = gs.data$alpha0,
beta0 = gs.data$beta0,
m0 = gs.data$mu0,
k0 = gs.data$kappa0,
a_0 = x
)
}
l_a0 <- Vectorize(l_a0)
###
l_a0_p <- function(x) numDeriv::grad(l_a0, x)
l_a0_p <- Vectorize(l_a0_p)
l_a0_pp <- function(x) numDeriv::hessian(l_a0, x)
l_a0_pp <- Vectorize(l_a0_pp)
###
find_zero <- function(){
obj <- function(x) (l_a0_p(x))^2
res <- optimize(obj, lower = 0, upper = 10)
return(res$minimum)
}
find_zero()
lhs <- function(a0){
digamma(alpha_0 + N_0/2 * a0)
}
rhs <- function(a0){
ybar <- mean(y_0)
s <- sum((y_0-ybar)^2)
kappa_n <- kappa_0 + N_0*a0
##
delta <- .5 * ( s + (kappa_0/ kappa_n) * N_0 *(ybar-mu_0)^2)
t1 <- (delta *(alpha_0 + N_0/2 * a0) )/(N_0 * (delta*a0 + beta_0))
t2 <- log(delta*a0 + beta_0)
t3 <- 4/ (a0 *N_0 + kappa_0)
t4 <- log(2*pi)
ans <- t1 + t2 + t3 + t4
return(ans)
}
curve(lhs, 0, 1, lwd = 3, xlab = expression(a[0]))
curve(rhs, 0, 1, add = TRUE, col = 3, lwd = 3)
abline(v = find_zero(), lty = 2, lwd = 2)
find_zero_2 <- function(){
obj2 <- function(x) (rhs(x) - lhs(x))^2
res2 <- optimize(obj2, lower = 0, upper = 10)
return(res2$minimum)
}
curve(l_a0_p)
abline(h = 0, lty = 2, lwd = 2)
abline(v = find_zero_2(), lty = 2, lwd = 2)
lfn <- function(x) l_a0(x + 1)-l_a0(x)
fn <- function(x) exp(lfn(x))
curve(lfn, 0, 10)
curve(fn, 0, 10)
|
{"hexsha": "f4c74be7be65d7f8bfce867e02e3bf2e7af1f968", "size": 2092, "ext": "r", "lang": "R", "max_stars_repo_path": "code/extra/test_Gaussian_derivative.r", "max_stars_repo_name": "maxbiostat/propriety_power_priors", "max_stars_repo_head_hexsha": "43a9dc7bd007d5647bc453cd8a875e82c16ad6eb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "code/extra/test_Gaussian_derivative.r", "max_issues_repo_name": "maxbiostat/propriety_power_priors", "max_issues_repo_head_hexsha": "43a9dc7bd007d5647bc453cd8a875e82c16ad6eb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2020-05-29T19:11:11.000Z", "max_issues_repo_issues_event_max_datetime": "2020-08-29T15:58:08.000Z", "max_forks_repo_path": "code/extra/test_Gaussian_derivative.r", "max_forks_repo_name": "maxbiostat/propriety_power_priors", "max_forks_repo_head_hexsha": "43a9dc7bd007d5647bc453cd8a875e82c16ad6eb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.4946236559, "max_line_length": 79, "alphanum_fraction": 0.5731357553, "num_tokens": 888}
|
#include <yaml-cpp/yaml.h>
#include <boost/filesystem.hpp>
#include "imgui.h"
#include "imgui-SFML.h"
#include <SFML/Graphics.hpp>
#include "tinyfiledialogs.h"
#include "scene/Scene.hpp"
scene::Scene* parseSceneFromFile(const std::string &path) {
try {
return new scene::Scene(YAML::LoadFile(path));
} catch (const std::exception &e) {
std::string msg = "An error occurred while parsing \"";
msg.append(path);
msg.append("\":\r\n");
msg.append(e.what());
tinyfd_messageBox("Error", msg.c_str(), "ok", "error", 1);
}
return nullptr;
}
void getScenesList(std::vector<scene::Scene*> &scenes) {
std::string path = SCENES_DIR;
if (!boost::filesystem::exists(path)) {
if (!boost::filesystem::create_directory(path)) {
return;
}
}
for (auto &scene : scenes) {
delete scene;
}
scenes.clear();
for (auto &entry : boost::filesystem::directory_iterator(path)) {
auto ext = entry.path().extension().string();
std::transform(ext.begin(), ext.end(), ext.begin(), ::tolower);
if (!(ext == ".yaml" || ext == ".yml")) {
continue;
}
auto scene = parseSceneFromFile(entry.path().string());
if (scene != nullptr) {
scene->path = boost::filesystem::canonical(entry.path()).generic_string();
scenes.push_back(scene);
}
}
}
inline bool ends_with(std::string const &value, std::string const &ending) {
if (ending.size() > value.size()) return false;
return std::equal(ending.rbegin(), ending.rend(), value.rbegin());
}
void exportPng(const scene::Scene &scene, cv::Mat* mat) {
std::string imgName = scene.getName();
imgName.append(".png");
char const* filterPatterns[1] = { "*.png" };
char const* fileName = tinyfd_saveFileDialog("Save image as...", imgName.c_str(), 1, filterPatterns, nullptr);
if (fileName) {
std::vector<int> compressionParams;
compressionParams.push_back(CV_IMWRITE_PNG_COMPRESSION);
compressionParams.push_back(9);
auto lower = std::string(fileName);
std::string path = std::string(fileName);
std::transform(lower.begin(), lower.end(), lower.begin(), ::tolower);
if (!ends_with(lower, ".png")) {
path.append(".png");
};
cv::imwrite(path, *mat, compressionParams);
}
}
scene::Scene* importYamlDialog(std::string &path) {
char const* filterPatterns[2] = { "*.yaml", "*.yml" };
char const* fileName = tinyfd_openFileDialog("Load scene from...", "", 2, filterPatterns, nullptr, false);
if (fileName) {
path = fileName;
return parseSceneFromFile(path);
}
return nullptr;
}
void importYaml(std::vector<scene::Scene*> &scenes) {
std::string p;
auto imported = importYamlDialog(p);
if (imported != nullptr) {
boost::filesystem::path path(p);
std::string dest = SCENES_DIR;
dest.append(path.filename().string());
if (boost::filesystem::equivalent(path, dest)) {
return;
}
if (boost::filesystem::exists(dest)) {
int res = tinyfd_messageBox("Overwrite file?", "This file already exists in the scenes directory.\r\nDo you want to replace it?", "yesno", "question", 1);
if (res == 0) {
return;
}
}
imported->path = boost::filesystem::weakly_canonical(dest).generic_string();
boost::filesystem::copy_file(path, dest, boost::filesystem::copy_option::overwrite_if_exists);
for (auto it = scenes.begin(); it != scenes.end();) {
auto existingScene = *it;
if (existingScene->path == imported->path) {
it = scenes.erase(it);
delete existingScene;
} else {
++it;
}
}
scenes.push_back(imported);
}
}
int main(int argv, char** argc) {
std::vector<scene::Scene*> scenes;
getScenesList(scenes);
auto scenesGetter = [](void* data, int n, const char** out) -> bool {
const std::vector<scene::Scene*>* v = (std::vector<scene::Scene*>*)data;
*out = v->at(static_cast<unsigned long long int>(n))->getName().c_str();
return true;
};
sf::Image image;
sf::Texture texture;
cv::Mat* mat = nullptr;
std::thread renderThread;
std::atomic<bool> renderThreadDone {false};
std::promise<std::string> renderTimePromise;
std::future<std::string> renderTimeFuture;
std::string renderTime;
bool renderWindowOpened = false;
scene::Scene* renderedScene = nullptr;
auto refreshTexture = [&]() {
cv::Mat matRgb;
cv::cvtColor(*mat, matRgb, cv::COLOR_BGRA2RGBA);
image.create(static_cast<unsigned int>(matRgb.cols), static_cast<unsigned int>(matRgb.rows), matRgb.ptr());
texture.loadFromImage(image);
};
sf::RenderWindow window(sf::VideoMode(1280, 720), "Raytracing", sf::Style::Default);
window.setVerticalSyncEnabled(true);
ImGui::SFML::Init(window);
sf::Color bgColor(0, 0, 0);
window.resetGLStates();
sf::Clock deltaClock;
while (window.isOpen()) {
sf::Event event {};
while (window.pollEvent(event)) {
ImGui::SFML::ProcessEvent(event);
if (event.type == sf::Event::Closed) {
window.close();
}
}
ImGui::SFML::Update(window, deltaClock.restart());
bool isCurrentlyRendering = !(renderThreadDone || mat == nullptr);
//////// Scenes Manager
ImGui::Begin("Scenes Manager");
if (ImGui::GetWindowSize().y < 100.0f) {
ImGui::SetWindowSize(ImVec2(500.0f, 325.0f), true);
}
ImGui::BeginChild("Left pane", ImVec2(250, 0), true);
ImGui::BeginChild("List container", ImVec2(0, -ImGui::GetItemsLineHeightWithSpacing()));
static int selectedSceneIdx = -1;
static scene::Scene* selectedScene = nullptr;
{
ImGui::Text("Found scenes:");
ImGui::PushItemWidth(-1.0f);
auto numScenes = static_cast<int>(scenes.size());
if (ImGui::ListBox("##ListScenes", &selectedSceneIdx, scenesGetter, (void*)&scenes, numScenes, numScenes)) {
selectedScene = scenes[selectedSceneIdx];
}
ImGui::PopItemWidth();
}
ImGui::EndChild(); // End List container
if (isCurrentlyRendering) {
ImGui::PushStyleVar(ImGuiStyleVar_Alpha, ImGui::GetStyle().Alpha * 0.5f);
}
if (ImGui::Button("Refresh") && !isCurrentlyRendering) {
selectedSceneIdx = -1;
selectedScene = nullptr;
renderedScene = nullptr;
getScenesList(scenes);
}
if (isCurrentlyRendering) {
ImGui::PopStyleVar();
}
ImGui::SameLine();
if (ImGui::Button("Import Scene")) {
selectedSceneIdx = -1;
selectedScene = nullptr;
importYaml(scenes);
}
ImGui::EndChild(); // End Left pane
ImGui::SameLine();
ImGui::BeginGroup(); // Right pane
if (selectedScene != nullptr) {
ImGui::BeginChild("Container", ImVec2(0.0f, 0.0f), true);
ImGui::BeginChild("Item view", ImVec2(0.0f, -ImGui::GetItemsLineHeightWithSpacing()));
ImGui::Text("%s", selectedScene->getName().c_str());
ImGui::Separator();
ImGui::TextWrapped("Path: %s", selectedScene->path.c_str());
ImGui::NewLine();
ImGui::Text("%d x %d", (int)selectedScene->getCamera().getWidth(), (int)selectedScene->getCamera().getHeight());
ImGui::Text("%d lights", (int)selectedScene->getAllLights().size());
ImGui::Text("%d objects", (int)selectedScene->getAllObj().size());
ImGui::EndChild(); // End Item view
if (isCurrentlyRendering) {
ImGui::PushStyleVar(ImGuiStyleVar_Alpha, ImGui::GetStyle().Alpha * 0.5f);
}
if (ImGui::Button("Render") && !isCurrentlyRendering) {
delete mat;
renderedScene = selectedScene;
auto w = renderedScene->getCamera().getWidth();
auto h = renderedScene->getCamera().getHeight();
mat = new cv::Mat((int)h, (int)w, CV_8UC4);
renderThreadDone.store(false);
renderTimeFuture = renderTimePromise.get_future();
renderThread = renderedScene->render(mat, std::move(renderTimePromise), renderThreadDone);
if (!renderWindowOpened) {
ImGui::SetNextWindowPos(ImVec2(window.getSize().x - w - 24.0f, 8.0f), true);
ImGui::SetNextWindowFocus();
}
renderWindowOpened = true;
}
ImGui::SameLine();
if (ImGui::Button("Reload") && !isCurrentlyRendering) {
bool remove = false;
if (boost::filesystem::exists(selectedScene->path)) {
auto path = selectedScene->path;
auto newScene = parseSceneFromFile(path);
if (newScene != nullptr) {
newScene->path = path;
scenes[selectedSceneIdx] = newScene;
delete selectedScene;
selectedScene = newScene;
} else {
remove = true;
}
} else {
remove = tinyfd_messageBox("Remove dangling scene?", "This file does not exist anymore.\r\nDo you want to remove the scene from the list?", "yesno", "question", 1) != 0;
}
if (remove) {
scenes.erase(scenes.begin() + selectedSceneIdx);
delete selectedScene;
selectedScene = nullptr;
selectedSceneIdx = -1;
}
}
if (isCurrentlyRendering) {
ImGui::PopStyleVar();
}
ImGui::EndChild(); // End Container
}
ImGui::EndGroup(); // End Right pane
ImGui::End(); // End Scenes Manager
////////
//////// Render Window
if (renderWindowOpened && renderedScene != nullptr) {
ImGui::Begin("Render", nullptr, ImGuiWindowFlags_AlwaysAutoResize);
if (!renderThreadDone) {
refreshTexture();
}
ImGui::Image(texture);
if (renderThreadDone) {
ImGui::Text("%s", renderTime.c_str());
if (ImGui::Button("Export as PNG")) {
exportPng(*renderedScene, mat);
}
}
ImGui::End(); // End Render
}
////////
window.clear(bgColor);
ImGui::SFML::Render(window);
window.display();
if (renderThreadDone && renderThread.joinable()) {
refreshTexture();
renderThread.join();
renderTime = renderTimeFuture.get();
renderTimePromise = std::promise<std::string>();
}
}
ImGui::SFML::Shutdown();
for (auto &scene : scenes) {
delete scene;
}
scenes.clear();
delete mat;
return 0;
}
|
{"hexsha": "b411064f8dc5b1f2b04e2cad84917882141d40d8", "size": 11399, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/main.cpp", "max_stars_repo_name": "r-o-b-o-t-o/cpp-raytracer", "max_stars_repo_head_hexsha": "23f7e483a786f760c8663d9fcaf0ec1627902e38", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4.0, "max_stars_repo_stars_event_min_datetime": "2021-03-15T10:05:35.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-15T10:14:45.000Z", "max_issues_repo_path": "src/main.cpp", "max_issues_repo_name": "r-o-b-o-t-o/cpp-raytracer", "max_issues_repo_head_hexsha": "23f7e483a786f760c8663d9fcaf0ec1627902e38", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/main.cpp", "max_forks_repo_name": "r-o-b-o-t-o/cpp-raytracer", "max_forks_repo_head_hexsha": "23f7e483a786f760c8663d9fcaf0ec1627902e38", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.3025477707, "max_line_length": 189, "alphanum_fraction": 0.5510132468, "num_tokens": 2595}
|
program lonlat_dist
implicit none
c
character*240 cfilea,cfileb,cline
integer ios,l
logical lsum,skip_new,skip_old
real lat1,lat2,lon1,lon2,dist,distmax
real*8 dist_sum
real*4 spherdist
c
c lonlat_dist - Usage: lonlat_dist in.txt out.txt [maxdist]
C lonlat_dist_sum in.txt out.txt
c
c adds the distance in km to a list of lon,lat values
c
c lonlat_dist_sum calculates the distance from the 1st point.
c
c if maxdist is present, it instead addd a blank line before any
c distance greater than maxdist
c
c --- each line of in.txt should contain:
c --- a) a lon,lat pair, or
c --- b) is blank, or
c --- c) a comment starting with #, or
c --- d) a mark-type (number) preceeded by >>>
c --- e) a label preceeded by ***
c --- f) a polyline color index preceeded by +++
c
integer iargc
integer narg
character*240 carg
c
c read arguments.
c
call getarg(0,carg)
l = len_trim(carg)
lsum = carg(l-3:l) .eq. "_sum"
c
narg = iargc()
C
if (narg.eq.3 .and. .not.lsum) then
call getarg(1,cfilea)
call getarg(2,cfileb)
call getarg(3,carg)
read(carg,*) distmax
elseif (narg.eq.2) then
call getarg(1,cfilea)
call getarg(2,cfileb)
distmax = -1.0
ELSEIF (lsum) then
WRITE(6,*)
+ 'Usage: lonlat_dist_sum in.txt out.txt'
CALL EXIT(1)
ELSE
WRITE(6,*)
+ 'Usage: lonlat_dist in.txt out.txt [maxdist]'
CALL EXIT(1)
ENDIF
c
open(unit=11, file=cfilea, form='formatted', status='old',
+ iostat=ios)
if (ios.ne.0) then
write(6,*) 'Error: can''t open ',trim(cfilea)
write(6,*) 'ios = ',ios
call exit(3)
endif
open(unit=21, file=cfileb, form='formatted', status='new',
+ iostat=ios)
if (ios.ne.0) then
write(6,*) 'Error: can''t open ',trim(cfileb)
write(6,*) 'ios = ',ios
call exit(5)
endif
c
read(11,'(a)') cline
skip_new = cline .eq.' ' .or.
& cline(1:1).eq.'#' .or.
& cline(1:3).eq.'>>>' .or.
& cline(1:3).eq.'+++' .or.
& cline(1:3).eq.'***'
if (skip_new) then
lon2 = 0.0
lat2 = 0.0
write(21,'(a)') trim(cline)
elseif (distmax.lt.0.0) then
read(cline,*) lon2,lat2
dist_sum = 0.d0
write(21,'(a,f12.4)') trim(cline),0.0
else
read(cline,*) lon2,lat2
dist_sum = 0.d0
write(21,'(a,f12.4)') trim(cline)
endif
c
do
read(unit=11,end=100,fmt='(a)') cline
skip_old = skip_new
skip_new = cline .eq.' ' .or.
& cline(1:1).eq.'#' .or.
& cline(1:3).eq.'>>>' .or.
& cline(1:3).eq.'+++' .or.
& cline(1:3).eq.'***'
c
if (skip_new) then
write(21,'(a)') trim(cline)
else
lon1 = lon2
lat1 = lat2
read(cline,*) lon2,lat2
if (skip_old) then
if (distmax.lt.0.0) then
dist_sum = 0.d0
write(21,'(a,f12.4)') trim(cline),0.0
else
write(21,'(a)') trim(cline)
endif
else
dist = 0.001*spherdist(lon1,lat1,lon2,lat2)
if (lsum) then
dist_sum = dist_sum + dist
write(21,'(a,f12.4)') trim(cline),dist_sum
elseif (distmax.lt.0.0) then
write(21,'(a,f12.4)') trim(cline),dist
elseif (dist.eq.0.0) then
c same location as last line - don't repeat it
else
if (dist.gt.distmax) then
write(21,*)
endif
write(21,'(a)') trim(cline)
* write(21,'(a,1pe20.6)') trim(cline),dist
endif
endif !skip_old:else
endif !skip_new:else
enddo
100 continue
close(21)
end
real*4 function spherdist(lon1,lat1,lon2,lat2)
implicit none
real, intent(in) :: lon1,lat1,lon2,lat2 ! Pos. in degrees
c
c --- ------------------------------------------------
c --- Computes the distance between geo. pos.
c --- lon1,lat1 and lon2,lat2.
c --- input is in degrees.
c
c --- output is real*4 for better global consistancy,
c --- by truncating double precision roundoff errors.
c --- real*4 is not in f90, but is widely supported.
c
c --- Based on m_spherdist.F90 from Geir Evanson.
c --- ------------------------------------------------
c
double precision, parameter :: invradian=0.017453292d0
double precision, parameter :: rearth=6371001.0d0 ! Radius of earth
c
double precision dlon1,dlon2
double precision rlon1,rlat1,rlon2,rlat2 ! Pos. in radians
double precision x1,y1,z1,x2,y2,z2 ! Cartesian position
double precision dr ! Arc length
c
c ensure that spherdist(ax,ay,bx,by) == spherdist(bx,by,ax,ay)
c
if (lon1.eq.lon2 .and. lat1.eq.lat2) then
spherdist=0.0
return
endif
c
dlon1 = lon1
dlon1 = mod(dlon1,360.d0)
if (dlon1.lt.0.d0) then
dlon1 = dlon1 + 360.d0
endif
dlon2 = lon2
dlon2 = mod(dlon2,360.d0)
if (dlon2.lt.0.d0) then
dlon2 = dlon2 + 360.d0
endif
if (lat1.lt.lat2) then
rlon1=dlon1*invradian !lon1 in rad
rlat1=(90.d0-lat1)*invradian !90-lat1 in rad
rlon2=dlon2*invradian !lon2 in rad
rlat2=(90.d0-lat2)*invradian !90-lat2 in rad
elseif (lat1.eq.lat2 .and. dlon1.le.dlon2) then
rlon1=dlon1*invradian !lon1 in rad
rlat1=(90.d0-lat1)*invradian !90-lat1 in rad
rlon2=dlon2*invradian !lon2 in rad
rlat2=(90.d0-lat2)*invradian !90-lat2 in rad
else
rlon2=dlon1*invradian !lon1 in rad
rlat2=(90.d0-lat1)*invradian !90-lat1 in rad
rlon1=dlon2*invradian !lon2 in rad
rlat1=(90.d0-lat2)*invradian !90-lat2 in rad
endif
c
x1= sin(rlat1)*cos(rlon1) !x,y,z of pos 1.
y1= sin(rlat1)*sin(rlon1)
z1= cos(rlat1)
c
x2= sin(rlat2)*cos(rlon2) !x,y,z of pos 2.
y2= sin(rlat2)*sin(rlon2)
z2= cos(rlat2)
c
dr=acos(min(1.d0,x1*x2+y1*y2+z1*z2)) ! Arc length
c
spherdist=dr*rearth
c
end function spherdist
|
{"hexsha": "d6295ca0ad776c257e6d8373ab823b8737622cff", "size": 6647, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "bin/lonlat_dist.f", "max_stars_repo_name": "TillRasmussen/HYCOM-tools", "max_stars_repo_head_hexsha": "7d26b60ce65ac9d785e0e36add36aca05c0f496d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2019-05-31T02:47:50.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-02T19:21:04.000Z", "max_issues_repo_path": "bin/lonlat_dist.f", "max_issues_repo_name": "TillRasmussen/HYCOM-tools", "max_issues_repo_head_hexsha": "7d26b60ce65ac9d785e0e36add36aca05c0f496d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2019-09-27T08:20:13.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-18T16:50:53.000Z", "max_forks_repo_path": "bin/lonlat_dist.f", "max_forks_repo_name": "TillRasmussen/HYCOM-tools", "max_forks_repo_head_hexsha": "7d26b60ce65ac9d785e0e36add36aca05c0f496d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2019-03-21T08:43:56.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-24T08:08:56.000Z", "avg_line_length": 31.6523809524, "max_line_length": 78, "alphanum_fraction": 0.5148187152, "num_tokens": 2224}
|
Require Import StateType SmallStepRelations Divergence.
Require Import Classical_Prop Coq.Logic.Epsilon.
Set Implicit Arguments.
Lemma three_possibilities S `{StateType S} (σ:S)
: (exists σ', star2 step σ nil σ' /\ normal2 step σ')
\/ (exists σ', star2 step σ nil σ' /\ activated σ')
\/ diverges σ.
Proof.
destruct (classic (exists σ' : S, star2 step σ nil σ' /\ normal2 step σ')); eauto; right.
destruct (classic (exists σ' : S, star2 step σ nil σ' /\ activated σ')); eauto; right.
eapply neither_diverges; eauto.
Qed.
Require Import Coq.Logic.ClassicalDescription.
Lemma three_possibilities_strong S `{StateType S} (σ:S)
: { σ' : S | star2 step σ nil σ' /\ normal2 step σ' }
+ { σ' : S & { p : star2 step σ nil σ' &
{ ext : extern & { σ'' : S | step σ' (EvtExtern ext) σ'' } } } }
+ diverges σ.
Proof.
destruct (excluded_middle_informative (exists σ' : S, star2 step σ nil σ' /\ normal2 step σ')); eauto.
- eapply constructive_indefinite_description in e. eauto.
- destruct (excluded_middle_informative (exists σ' : S, star2 step σ nil σ' /\ activated σ')).
+ eapply constructive_indefinite_description in e.
left; right. destruct e. eexists x; eauto. dcr; eauto.
eapply constructive_indefinite_description in H1. destruct H1.
eapply constructive_indefinite_description in e. destruct e.
eauto.
+ right. revert σ n n0. cofix f.
intros. destruct (@step_dec _ H σ).
* inv H0; dcr.
destruct x.
-- exfalso. eapply n0; eexists σ; split; eauto using star2_refl.
do 2 eexists; eauto.
-- econstructor. eauto. eapply f; intro; dcr.
++ eapply n; eexists; split; eauto. eapply star2_silent; eauto.
++ eapply n0; eexists; split; eauto. eapply star2_silent; eauto.
* exfalso. eapply n; eexists σ; split; eauto using star2_refl.
Qed.
|
{"author": "sigurdschneider", "repo": "lvc", "sha": "be41194f16495d283fe7bbc982c3393ac554dd5b", "save_path": "github-repos/coq/sigurdschneider-lvc", "path": "github-repos/coq/sigurdschneider-lvc/lvc-be41194f16495d283fe7bbc982c3393ac554dd5b/theories/Equiv/StateTypeProperties.v"}
|
#classes and subclasses to import
import cv2
import numpy as np
def blend_transparent(face_img, overlay_t_img):
# Split out the transparency mask from the colour info
overlay_img = overlay_t_img[:,:,:3] # Grab the BRG planes
overlay_mask = overlay_t_img[:,:,3:] # And the alpha plane
# Again calculate the inverse mask
background_mask = 255 - overlay_mask
# Turn the masks into three channel, so we can use them as weights
overlay_mask = cv2.cvtColor(overlay_mask, cv2.COLOR_GRAY2BGR)
background_mask = cv2.cvtColor(background_mask, cv2.COLOR_GRAY2BGR)
# Create a masked out face image, and masked out overlay
# We convert the images to floating point in range 0.0 - 1.0
face_part = (face_img * (1 / 255.0)) * (background_mask * (1 / 255.0))
overlay_part = (overlay_img * (1 / 255.0)) * (overlay_mask * (1 / 255.0))
# And finally just add them together, and rescale it back to an 8bit integer image
return np.uint8(cv2.addWeighted(face_part, 255.0, overlay_part, 255.0, 0.0))
# build our cv2 Cascade Classifiers
face_cascade = cv2.CascadeClassifier("haarcascade_file/haarcascade_frontalface_default.xml")
smile_cascade = cv2.CascadeClassifier("haarcascade_file/haarcascade_smile.xml")
imgMustache = cv2.imread('images/lips2.png',-1)
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
for (x, y, w, h) in faces:
#face = cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = frame[y:y+h, x:x+w]
smile = smile_cascade.detectMultiScale(
roi_gray,
scaleFactor= 1.7,
minNeighbors=22,
minSize=(25, 25)
)
for (nx,ny,nw,nh) in smile:
cv2.rectangle(roi_color,(nx,ny),(nx+nw,ny+nh),(255,0,0),2)
mustacheWidth = 0.9 * nw
mustacheHeight = nh//0.6
# Center the mustache on the bottom of the nose
x1 = nx - int(mustacheWidth/2)
x2 = nx + nw + int(mustacheWidth/2)
y1 = ny + nh - int(mustacheHeight)
y2 = ny + nh + int(mustacheHeight)
# Check for clipping
if x1 < 0:
x1 = 0
if y1 < 0:
y1 = 0
if x2 > w:
x2 = w
if y2 > h:
y2 = h
# Re-calculate the width and height of the mustache image
mustacheWidth = x2 - x1
mustacheHeight = y2 - y1
mustache = cv2.resize(imgMustache, (int(mustacheWidth),int(mustacheHeight)))
#print(mustache.shape)
roi = roi_color[y1:y2, x1:x2,:]
#print(roi.shape)
roi_bg = blend_transparent(roi, mustache)
roi_color[y1:y2, x1:x2] = roi_bg
break
# Display the resulting frame
cv2.imshow('Video', frame)
# press any key to exit
# NOTE; x86 systems may need to remove: " 0xFF == ord('q')"
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
{"hexsha": "362f2c9978b3b02f595499ad518b85d9529d463a", "size": 3366, "ext": "py", "lang": "Python", "max_stars_repo_path": "snappy_lips.py", "max_stars_repo_name": "ninjakx/Snappy", "max_stars_repo_head_hexsha": "b0289ae7d79d86c875c010972744e06d75f5575d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-03-26T23:58:35.000Z", "max_stars_repo_stars_event_max_datetime": "2018-03-26T23:58:35.000Z", "max_issues_repo_path": "snappy_lips.py", "max_issues_repo_name": "ninjakx/Snappy", "max_issues_repo_head_hexsha": "b0289ae7d79d86c875c010972744e06d75f5575d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "snappy_lips.py", "max_forks_repo_name": "ninjakx/Snappy", "max_forks_repo_head_hexsha": "b0289ae7d79d86c875c010972744e06d75f5575d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.2695652174, "max_line_length": 92, "alphanum_fraction": 0.5861556744, "include": true, "reason": "import numpy", "num_tokens": 953}
|
# coding=utf-8
import numpy as np
import scipy.stats
from .cobsampler import ChangeOfBasisSampler
class Test(object):
"""
Super class implementing tests for CoBSampler. Sub-classes should specify
target distribution.
"""
def __init__(self, ndim, target, nsteps, cobparams={}):
self.ndim = ndim
self.targetdist = target
self.niterations = nsteps
self.firscob = cobparams.pop('firstcob', 1000)
self.ncob = cobparams.pop('ncob', 1000)
self.updatecob = cobparams.pop('updatecob', 1000)
self.sampler = ChangeOfBasisSampler
def run(self, p0):
# initialise sampler
sampler = self.sampler(self.ndim, self.targetdist.logpdf, (),
{}, startpca=self.firscob,
nupdatepca=self.updatecob,
npca=self.ncob)
# p0 = np.zeros(self.ndim)
sampler.run_mcmc(self.niterations, p0)
return sampler
class MultinormalTest(Test):
"""
Class implementing test on multinormal distribution.
"""
def __init__(self, nsteps, ndim=2, mean=None, cov=None, cobparams={}):
"""
:param int nsteps: number of MCMC iterations.
:param int ndim: target dimension
:param np.array cov: covariance matrix. If None, random covariance is
constructed.
"""
target = Multinormal(ndim, mean, cov)
super(MultinormalTest, self).__init__(ndim, target, nsteps, cobparams)
class RosenbrockTest(Test):
"""
Class implementing test on Rosenbrock density.
"""
def __init__(self, nsteps, a=1, b=100, cobparams={}):
target = Rosenbrock(a, b, 2)
super(RosenbrockTest, self).__init__(2, target, nsteps, cobparams)
class TargetDistribution(object):
"""
Class for test target distributions.
"""
class Multinormal(TargetDistribution):
def __init__(self, ndim=2, mean=None, cov=None):
self.ndim = ndim
if mean is None:
mean = np.zeros(ndim)
else:
assert len(mean) == ndim, 'Dimensions of mean arry do no match ' \
'of dimensions.'
self.mean = mean
if cov is not None:
assert cov.shape == (ndim, ndim), 'Dimensions of covariance ' \
'matrix do no match.'
self.cov = cov
else:
# If covariance is not given, initialise at random.
self.cov = 0.5 - np.random.rand(self.ndim ** 2).reshape((self.ndim,
self.ndim))
self.cov = np.triu(self.cov)
self.cov += self.cov.T - np.diag(self.cov.diagonal())
self.cov = np.dot(self.cov, self.cov)
self.dist = scipy.stats.multivariate_normal(mean=self.mean,
cov=self.cov)
def pdf(self, x):
"""
Return value of pdf at point x
:param np.array x: Position in parameter space.
:return:
"""
return self.dist.pdf(x)
def logpdf(self, x):
return self.dist.logpdf(x)
class Rosenbrock(TargetDistribution):
"""
Class implementing the Rosenbrock density.
"""
def __init__(self, a=1, b=100, ndim=2):
self.a = a
self.b = b
self.ndim = ndim
def pdf(self, x):
if (np.abs(x[0]) > 10) or (np.abs(x[1]) > 10):
return 0
return np.exp(-(self.a - x[0])**2 - self.b*(x[1] - x[0]**2)**2)
def logpdf(self, x):
if (np.abs(x[0]) > 30) or (np.abs(x[1]) > 30):
return -np.inf
else:
return -(self.a - x[0])**2 - self.b*(x[1] - x[0]*x[0])**2
def contour(self, k, n=1000):
"""
:param float k: constant identifying contour.
:param int n: number of points used to construct contour.
"""
x = np.linspace(self.a - k, self.a + k, n)
yplus = x**2 + np.sqrt( (k**2 - (x - self.a)**2)/self.b )
yminus = x ** 2 - np.sqrt((k ** 2 - (x - self.a) ** 2) / self.b)
xx = np.concatenate((x, x[::-1]))
yy = np.concatenate((yminus, yplus[::-1]))
return np.array([xx, yy])
def rvs(self, size=1):
"""
Draw samples from the Rosenbrock density.
Uses the fact that p(x1,x2) = p(x2|x1)*p(x1) and that:
1) p(x1) \propto N(a, 1)
2) p(x2|x1) \propto N(x1**2, 1/sqrt(2*b))
"""
# Draw samples from marginal p(x1)
x1 = np.random.randn(size) + self.a
# Draw samples from conditional, p(x2 | x1)
sigma = 1./np.sqrt(2 * self.b)
x2 = np.random.randn(size) * sigma + x1**2
return np.array([x1, x2]).T
|
{"hexsha": "6ff64929cc4dd7af2b28f398df524a10cf507044", "size": 4799, "ext": "py", "lang": "Python", "max_stars_repo_path": "cobmcmc/tests.py", "max_stars_repo_name": "exord/cobmcmc", "max_stars_repo_head_hexsha": "162ba8d4fab35aa44bc8a4828eb51e25df13c4e2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "cobmcmc/tests.py", "max_issues_repo_name": "exord/cobmcmc", "max_issues_repo_head_hexsha": "162ba8d4fab35aa44bc8a4828eb51e25df13c4e2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cobmcmc/tests.py", "max_forks_repo_name": "exord/cobmcmc", "max_forks_repo_head_hexsha": "162ba8d4fab35aa44bc8a4828eb51e25df13c4e2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.7628205128, "max_line_length": 80, "alphanum_fraction": 0.5349031048, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1270}
|
\section{UTxO}
\label{sec:utxo}
\begin{figure*}[htb]
\emph{Derived types}
%
\begin{equation*}
\begin{array}{r@{~\in~}l@{\qquad=\qquad}lr}
\var{uin}
& \UTxOIn
& \TxId \times \Ix
% & \text{transaction output preference}
\\
\var{uout}
& \UTxOOut
& (\TxOutND \uniondistinct \TxOutP) \times \Slot
% & \text{transaction outputs}
\\
\var{utxo}
& \UTxO
& \UTxOIn \mapsto \UTxOOut
% & \text{unspent tx outputs}
\\
\var{cur}
& \ScriptPurpose
& \PolicyID \uniondistinct \UTxOIn \uniondistinct \AddrRWD \uniondistinct \DCert
% & \text{item the script is validated for}
\end{array}
\end{equation*}
\caption{Definitions used in the UTxO transition system}
\label{fig:defs:utxo-shelley-1}
\end{figure*}
We make a number of changes to the Shelley UTxO model~\ref{XX} to support the Goguen Extended UTxO model
(see Figure~\ref{fig:defs:utxo-shelley-1}).
\begin{itemize}
\item
$\UTxO$ entries are stored in the finite map $\UTxOIn\mapsto \UTxOOut$.
\item
$\UTxOIn$ is the same type as $\TxIn$ in Shelley, but we have changed
the name because the types of transaction inputs and UTxO keys
differ in Goguen.
\item
$\UTxOOut$ is the type of UTxO entries.
Note that, like in the case of the type of transaction inputs,
this type differs from the type of the output of a transaction.
Goguen UTxO entries include a slot number for each output
that indicates when the output was created.
This will be used for future functionality.
\item
$\ScriptPurpose$ is the type of the items that scripts can validate.
\end{itemize}
\subsection{UTxO Transitions}
\label{sec:utxo-trans}
We have added several functions that deal with transaction and UTxO inputs and
outputs as shown in Figure \ref{fig:functions:insouts}. These are used in the definition of the UTxO transition system.
\begin{itemize}
\item The function $\fun{txinputs_{vf}}$ returns only those transaction inputs
that were selected to pay transaction fees (we call these ``fee-marked'' inputs).
These inputs may only contain Ada.
\begin{note}
Make it more obvious that we check that the fee inputs only contain Ada, maybe mention it elsewhere. Also, what does vf stand for?
\end{note}
\item The predicate $\fun{feesOK}$ checks whether the transaction is
paying the necessary fees, and that it does it correctly. That is, it checks that:
\begin{enumerate}[label=({\roman*})]
\item the fee-marked inputs are not locked by non-native scripts;
\item all the fee-marked inputs contain strictly Ada and no other kinds of token;
\item the fee-marked inputs are sufficient to cover the fee amount that is stated
in the transaction; and
\item the fee amount that the transaction states it is paying suffices to cover
the minimum fee that the transaction is obligated to pay.
\end{enumerate}
\item The function $\fun{getOut}$ selects the data from a transaction output that
will be stored in the UTxO, i.e. a $\UTxOOut$ without the slot number.
\item The function $\fun{txins}$ returns the UTxO keys of transaction inputs.
\end{itemize}
Note that when creating a transaction, the wallet is responsible for
determining the fees. Thus, it also has to execute the non-native scripts
and include the fees for their execution.
\begin{figure}[htb]
\begin{align*}
& \fun{txinputs_{vf}} \in \TxBody \to \powerset{\TxId \times \Ix} \\
& \fun{txinputs_{vf}} ~txb~= \\
&~~\{ (txid,ix)~\vert~(txid,ix,\var{isfee}) \in
\fun{txinputs} ~txb,~
\var{isfee} = \True\}
\nextdef
& \fun{feesOK} \in \N \to \PParams \to \GoguenTx \to \UTxO \to \Bool \\
& \fun{feesOK} ~n~\var{pp}~tx~utxo~= \\
&~~\fun{range}~(\fun{txinputs_{vf}}~{txb} \restrictdom \var{utxo}) \subseteq \TxOutND ~ \\
&~~\wedge~ \var{balance} \in \Coin \\
&~~ \wedge~ \var{balance} \geq \txfee{txb} ~ \\
&~~ \wedge~ \minfee~n~{pp}~{tx} \leq \txfee{txb} \\
&~~ \where \\
& ~~~~~~~ \var{txb}~=~\txbody{tx} \\
& ~~~~~~~ \var{balance}~=~\fun{ubalance}~(\fun{txinputs_{vf}}~{txb} \restrictdom \var{utxo})
\nextdef
& \fun{getOut} \in \TxOut \to \TxOutND \uniondistinct \TxOutP \\
& \fun{getOut} ~{txout}~= \begin{cases}
\var{txout} & \text{if~} \var{txout} \in \TxOutND \\
(\fun{getAddr}~\var{txout}, \fun{getValue}~\var{txout},
\fun{getDataHash}~\var{txout}) & \text{otherwise}
\end{cases}
\nextdef
& \fun{txins} \in \TxBody \to \powerset{\TxId \times \Ix} \\
& \fun{txins} ~\var{txb} = \{(txid,ix) \mid ((txid,ix),\wcard)\in\fun{txinputs} ~txb\}
\end{align*}
\caption{Functions on Tx Inputs and Outputs.}
\label{fig:functions:insouts}
\end{figure}
%
Figure~\ref{fig:functions:utxo} defines the functions that are needed for the UTxO transition system.
The changes that are needed for Plutus integration are:
\begin{itemize}
\item The $\fun{getCoin}$ function sums all the Ada in a given output and returns it as a
$\Coin$ value.
\item The function $\fun{outs}$ discards the $\HasDV$ tag from a
transaction output and adds the slot number of the block in which the transaction is
included.
\item $\fun{txscriptfee}$ calculates the fee that a transaction must pay for script
execution based on the amount of $\ExUnits$ it has budgeted, and the prices in the current protocol parameters
for each component of $\ExUnits$.
\item The minimum fee calculation, $\fun{minfee}$, includes the script
fees that the transaction is obligated to pay in order to run its scripts.
\item The $\fun{produced}$ calculation requires the current slot number as an argument -- this is
needed to construct the correct UTxO outputs.
\end{itemize}
Since the $\Tx$ type combines both $\ShelleyTx$ and $\GoguenTx$, and
there was already a way of computing an ID from a Shelley transaction, there
is potential for confusion how the ID of a transaction is
computed. Here, $\TxId$ is always computed from values of type $\Tx$,
\emph{and never from the underlying $\ShelleyTx$ or $\GoguenTx$}. That is, there is a \emph{canonical} ID for each Goguen transaction,
and this is not necessarily the same as the corresponding ID in the underlying Shelley or Goguen transaction type.
\begin{figure}[htb]
\emph{Helper Functions}
\begin{align*}
& \fun{getCoin} \in \UTxOOut \to \Coin \\
& \fun{getCoin}~{\var{out}} ~=~\sum_{\mathsf{adaID} \mapsto tkns \in \fun{getValue}~out}
(\sum_{q \in \range~{tkns}} \fun{co}~q)
\end{align*}
%
\emph{Main Calculations}
\begin{align*}
& \fun{outs} \in \Slot \to \TxBody \to \UTxO \\
& \fun{outs} ~ \var{slot}~\var{txb} =
\left\{
(\fun{txid} ~ \var{txb}, \var{ix}) \mapsto (\fun{getOut}~\var{txout},\var{slot}) ~
\middle|~
\var{ix} \mapsto \var{txout} \in \txouts{txb}
\right\}
\nextdef
& \fun{txscriptfee} \in \N \to \Prices \to \ExUnits \to \Coin \\
& \fun{txscriptfee}~n~ (\var{pr_{init}, pr_{mem}, pr_{steps}})~ (\var{mem, steps})
= \var{pr_{init}}*n + \var{pr_{mem}}*\var{mem} + \var{pr_{steps}}*\var{steps}
\nextdef
&\fun{minfee} \in \N \to \PParams \to \GoguenTx \to \Coin \\
&\fun{minfee} ~n~\var{pp}~\var{tx} = \\
&~~(\fun{a}~\var{pp}) \cdot \fun{txSize}~\var{tx} + (\fun{b}~\var{pp}) +
\fun{txscriptfee}~n~(\fun{prices}~{pp})~(\fun{txexunits}~(\fun{txbody}~{tx}))
\nextdef
& \fun{produced} \in \Slot \to \PParams \to \StakePools \to \TxBody \to \Value \\
& \fun{produced}~\var{slot}~\var{pp}~\var{stpools}~\var{txb} = \\
&~~\ubalance{(\outs{slot}~{txb})} + \fun{coinToValue}(\txfee{txb} + \deposits{pp}{stpools}~{(\txcerts{txb})})
\end{align*}
\caption{Functions used in UTxO rules}
\label{fig:functions:utxo}
\end{figure}
\subsection{Combining Scripts with Their Inputs}
\label{sec:scripts-inputs}
Figure~\ref{fig:functions:script1} shows the helper functions that are needed to
retrieve all the data that is relevant to Plutus script validation.
These include:
\begin{itemize}
\item
$\fun{indexof}$ finds the index of a given certificate, value, input, or
withdrawal in a list, finite map, or set of such objects.
This function assumes there is some ordering on each of these structures.
% This function is abstract because it assumes there is some ordering rather
% than giving it explicitly.
\begin{note}
$\fun{indexof}$ might need an actual implementation. Also, some
restructuring in related functions might make it easier.
Sets and maps don't really have indexes, of course.
\end{note}
\item
$\fun{indexedScripts}$ and $\fun{indexedDats}$ create finite maps from sets of the corresponding object.
The finite maps are indexed by the hashes of the objects that they contain.
%
% wherein, respectively, all the scripts
% and datums that a transaction carries as sets, are indexed by their hashes.
\item
$\fun{findRdmr}$ finds the redeemer in a Goguen transaction
that corresponds to a given item in the indexed redeemer structure, if it exists.
\end{itemize}
\begin{figure}[htb]
%
\emph{Abstract functions}
\begin{align*}
&\fun{indexof} \in \DCert \to \seqof{\DCert} \to \Ix\\
&\fun{indexof} \in \AddrRWD \to \Wdrl \to \Ix\\
&\fun{indexof} \in \UTxOIn \to \powerset{\TxIn} \to \Ix\\
&\fun{indexof} \in \PolicyID \to \Value \to \Ix\\
& \text{get the index of an item in an ordered representation}
\end{align*}
%
\emph{Helper functions}
\begin{align*}
&\fun{indexedScripts} \in \GoguenTx \to (\ScriptHash \mapsto \Script) \\
&\fun{indexedScripts}~{tx} ~=~ \{ h \mapsto s ~\vert~ \fun{hashScript}~{s}~=~h,
s\in~\fun{txscripts}~(\fun{txwits}~{tx})\}
\nextdef
&\fun{indexedDats} \in \GoguenTx \to (\DataHash \mapsto \Data)\\
&\fun{indexedDats}~{tx} ~=~ \{ h \mapsto d ~\vert~ \fun{hashData}~{d}~=~h,
d\in~\fun{txdats}~(\fun{txwits}~{tx})\}
\nextdef
&\fun{toRdmrPtr} \in \GoguenTx \to \ScriptPurpose \to \RdmrPtr \\
&\fun{toRdmrPtr}~{tx}~{it} ~=~
\begin{cases}
(\mathsf{certTag},\fun{indexof}~\var{it}~(\fun{txcerts}~{txb})) & \var{it}~\in~\DCert \\
(\mathsf{wdrlTag},\fun{indexof}~\var{it}~(\fun{txwdrls}~{txb})) & \var{it}~\in~\AddrRWD \\
(\mathsf{forgeTag},\fun{indexof}~\var{it}~(\fun{forge}~{txb})) & \var{it}~\in~\PolicyID \\
(\mathsf{inputTag},\fun{indexof}~\var{it}~(\fun{txinputs}~{txb})) & \var{it}~\in~\UTxOIn
\end{cases} \\
& ~~\where \\
& ~~~~~~~ \var{txb}~=~\txbody{tx}
\nextdef
&\fun{findRdmr} \in \GoguenTx \to (\ScriptPurpose \mapsto \Data)\\
& \fun{findRdmr}~{tx} ~=~ \{ \var{it} \mapsto \var{d} ~|~
\var{it} \in \ScriptPurpose,~ \fun{toRdmrPtr}~{tx}~{it} \mapsto \var{d} \in \fun{txrdmrs}~(\fun{txwits}~{tx}) \}
\end{align*}
\caption{Combining Script Validators and their Inputs}
\label{fig:functions:script1}
\end{figure}
\textbf{Plutus Script Validation}
Figure~\ref{fig:defs:functions-valid} shows the abstract functions that are used for script validation.
\begin{itemize}
\item
$\fun{valContext}$ constructs the validation context.
This includes all the necessary transaction and chain state data that needs to be passed to the script interpreter.
It has a $\UTxO$ as its argument to recover the full information of the inputs of the transaction,
but only the inputs of the transaction are provided to scripts.
\item
$\fun{hashScript},~ \fun{hashData}$ are abstract hashing functions.
\item
$\fun{runMSigScript}$ validates multi-signature scripts, exactly as in the Shelley ledger specification (where it is called $\fun{evaluateScript}$).
\item
$\fun{runPLCScript}$ validates Plutus scripts. It takes the following
arguments:
\begin{itemize}
\item A cost model, that is used to calculate the $\ExUnits$ that are needed for script execution;
\item A list of terms of type $\Data$ that will be passed to the script; %is given access to.
\item The execution unit budget.
\end{itemize}
It outputs a pair of the validation result (as a Boolean)
and the remaining execution units (subtracting those that are used for script execution).
Note that script execution stops if the full budget has been spent before validation is complete.
\end{itemize}
\begin{note}
Maybe rename runPLCScript.
\end{note}
\begin{note}
\textbf{Know your contract arguments}
A Plutus validator script may receive either a list of three terms of type $\Data$, in case it validates the spending of script outputs
or two terms (redeemer and context, with no datum), for all other uses.
Script authors must keep this in mind when writing scripts, since the ledger call to the interpreter is oblivious to what
arguments are required.
\end{note}
\begin{figure*}[htb]
\emph{Abstract Script Validation Functions}
%
\begin{align*}
&\fun{hashScript} \in ~\Script\to \ScriptHash \\
&\text{Compute script hash} \\~\\
&\fun{hashData} \in ~\Data \to \DataHash \\
&\text{Compute hash of data} \\~\\
&\fun{valContext} \in \UTxO \to \GoguenTx \to \ScriptPurpose \to \Data \\
&\text{Build Validation Data} \\~\\
&\fun{runMSigScript} \in\ScriptMSig\to \GoguenTx \to \IsValidating \\
&\text{Validate a multi-signature script} \\~\\
&\fun{runPLCScript} \in \CostMod \to\ScriptPlutus \to
\seqof{\Data} \to \ExUnits \to (\IsValidating \times \ExUnits) \\
&\text{Validate a Plutus script, taking resource limits into account}
\end{align*}
%
\emph{Notation}
%
\begin{align*}
\llbracket \var{script_v} \rrbracket_{\var{cm},\var{exunits}}~\var{d}
&=& \fun{runPLCScript} ~{cm}~\var{script_v}~\var{d}~\var{exunits}
\end{align*}
\caption{Script Validation, cont.}
\label{fig:defs:functions-valid}
\end{figure*}
Note that no ``checks'' are performed within the functions that match the
scripts with their inputs. Missing validators, missing inputs, incorrect hashes, the wrong type of script etc,
are caught during the application of the UTXOW rule (before these functions are ever applied).
%
Various items of data are involved in building the inputs for script validation:
\begin{itemize}
\item The hash of the validator script;
\item The hash of the required datum, if any;
\item The corresponding full validator and datum object, which are looked up in the finite map
constructed by $\fun{indexedScripts}$ and $\fun{indexedDats}$, respectively;
\item The redeemer, which is contained in the transaction's indexed redeemer structure
and which is located using the $\fun{findRdmr}$ function; and
\item the validation data, built using the UTxO, the transaction itself,
and the current item being validated.
\end{itemize}
\begin{figure}[htb]
\begin{align*}
& \fun{getData} \in \GoguenTx \to \UTxO \to \ScriptPurpose \to \seqof{\Data} \\
& \fun{getData}~{tx}~{utxo}~{it}~=~
\begin{cases}
[\var{d}] & \var{it} \mapsto ((a,\_),h_d, \_) \in \var{utxo}, \var{h_d}\mapsto \var{d} \in \fun{indexedDats}~{tx} \\
\epsilon & \text{otherwise}
\end{cases}
\nextdef
& \fun{mkPLCLst} \in \PParams \to \GoguenTx \to \UTxO \to \seqof{(\ScriptPlutus \times \seqof{\Data} \times \CostMod)} \\
& \fun{mkPLCLst} ~\var{pp}~\var{tx}~ \var{utxo} ~=~ \\
& ~~\fun{toList} \{ (\var{script}, (r; \fun{valContext}~\var{utxo}~\var{tx}~\var{cur}; \fun{getData}~{tx}~{utxo}~{cur}), \var{cm}) \mid \\
& ~~~~(\var{cur}, \var{scriptHash}) \in \fun{scriptsNeeded}~{tx}~{utxo}, \\
& ~~~~\var{cur} \mapsto \var{r} \in \fun{findRdmr}~{tx}, \\
& ~~~~\var{scriptHash}\mapsto \var{script}\in \fun{indexedScripts}~{tx}, \\
& ~~~~\fun{language}~{script} \mapsto \var{cm} \in \fun{costmdls}~{pp} \}
\end{align*}
\caption{Scripts and their Arguments}
\label{fig:functions:script2}
\end{figure}
\subsection{Two-Phase Transaction Validation for Non-Native Scripts}
\label{sec:two-phase}
\begin{note}
Make it more obvious somewhere where native vs non-native scripts are processed.
\end{note}
The costs of processing native scripts (those that involve only executing standard ledger rules) are included in the standard transaction fees.
In order to ensure that users pay for the computational resources that are needed to validate non-native scripts, even
if transactions are invalid, transactions are validated in two phases:
the first phase consists of every aspect of transaction validation apart from executing the non-native scripts; and
the second phase involves actually executing those scripts.
%
Our validation approach uses four transition systems, each with different responsibilities. We
give the details of each below, but to summarize, when a transction is processed,
the processing is done
in the following order:
\begin{tabular}{lp{12cm}}
\textbf{(UTXOW):} & Verifies that all the necessary witnessing information is present, including
VKey witnesses, scripts, and all the script input data. It also performs
key witness checks and runs multisig scripts. It then applies the state changes that are
computed by the UTXO transition.
\\
\textbf{(UTXO):} & Verifies that a transaction satisfies all the accounting requirements
(including the general accounting property, correct fee payment, etc.),
and applies the state changes that are computed by the UTXOS transition.
\\
\textbf{(UTXOS):} & Performs the appropriate UTxO state changes, based on the
value of the $\IsValidating$ tag, which it checks using the SVAL transition.
\\
\textbf{(SVAL):} & Runs the scripts, verifying that the $\IsValidating$ tag
is applied correctly.
\end{tabular}
In general, there is no way to check that the budget that has been supplied is sufficient for the transaction,
except by running the scripts. To avoid over-spending the budget, we run them sequentially,
stopping whenever one does not validate, and charging the transaction the corresponding
fees. From the point of view of the ledger, there is no difference
between a script running out of $\ExUnits$ during validation, or not validating.
If a transaction contains an invalid script, the only change to the ledger
as a result of applying this transaction is the fees.
Two-phase validation requires a new transition system
(see Figure \ref{fig:ts-types:utxos}) to sequentially run
scripts and to track spent execution units as part of its state
($\var{remExU}$). The signal here is a sequence of pairs of a validator
script and the corresponding input data.
Note that there is one state variable in the SVAL transition system. The reason
for this is that in the second, script-running validation phase, we separate
the UTxO state update from sequentially running scripts. This transition
system is strictly for running the scripts, and a transition of this type
will be used by another rule to perform the correct UTxO update.
Running scripts sequentially
to verify that they all validate in the allotted $\ExUnits$ budget only requires
the amount of remaining $\ExUnits$ to be included in the state, and nothing else.
In the environment, we need the protocol parameters and the
transaction being validated. All other data needed
to run the scripts comes from the signal.
\begin{figure}[htb]
\emph{Validation environment}
\begin{equation*}
\ValEnv =
\left(
\begin{array}{r@{~\in~}lr}
\var{pp} & \PParams & \text{protocol parameters}\\
\var{tx} & \GoguenTx & \text{transaction being processed} \\
\end{array}
\right)
\end{equation*}
%
\emph{Validation state}
\begin{equation*}
\ValState =
\left(
\begin{array}{r@{~\in~}lr}
\var{remExU} & \ExUnits & \text{exunits remaining to spend on validation} \\
\end{array}
\right)
\end{equation*}
%
\emph{Script transitions}
\begin{equation*}
\_ \vdash
\var{\_} \trans{sval}{\_} \var{\_}
\subseteq \powerset (\ValEnv \times \ValState \times \seqof{(\ScriptPlutus\times\seqof{\Data}\times\CostMod)} \times \ValState)
\end{equation*}
%
\caption{UTxO script validation types}
\label{fig:ts-types:utxos}
\end{figure}
The rules for the second-phase script validation SVAL are given in
Figure~\ref{fig:rules:utxo-scrval}. Again, no UTxO state update
is done in this rule. Its purpose is to verify that the
validation tag ($\fun{txvaltag}$) is applied correctly by the creater of
the block. It does this by running all the scripts.
Note that following the Shelley ledger specification approach, every function
that we define and use in the preconditions or calculations that are used in the ledger rules is
necessarily total.
In this way, all the errors (validation failures) that we encounter always come from
rule applications, i.e. a precondition of a rule is not met.
We mention this here because the SVAL rule looks as if it could be
simply a function. However, we want the incorrect application of the
validation tag to be an error, so this must be an error that comes form
an unmet precondition of a rule.
There are three transition rules.
The first rule, $\mathsf{Scripts\mbox{-}Val}$, applies when:
\begin{enumerate}
\item There
are no scripts left to validate in the signal list (i.e. this is the base case of
induction when all the scripts have validated) -- note that there could be $\ExUnits$ left over; and
\item The validation tag is applied correctly (it is $\True$).
\end{enumerate}
The $\mathsf{Scripts\mbox{-}Stop}$ rule applies when:
\begin{enumerate}
\item The current script-input pair does not validate;
(either because the transaction ran out of $\ExUnits$ or for any other reason); and
\item The validation tag is correct ($\False$ in this case).
\end{enumerate}
These first two rules require no state change.
The $\mathsf{Scripts\mbox{-}Ind}$ rule applies when:
\begin{enumerate}
\item The current script being validated has been validated;
\item There is a non-negative fee which remains to pay for validating
the rest of the scripts in the list; and
\item Transition rules apply for the rest of the list (without the current script).
\end{enumerate}
The only state change in this rule is of the variable $\var{remExU}$.
It is decreased by subtracting the cost of the execution of the
current script from its current value.
We use this variable to keep track of the remaining funds for
script execution. If the transaction is overpaying ($\fun{txscriptfee}~{tx}$
is too big), then the whole fee is still taken.
It is always in the interest of the slot leader to have the new block validate,
and for it to contain only valid transactions. This motivates the
slot leader to:
\begin{enumerate}
\item Correctly apply the $\IsValidating$ tag;
\item Include all transactions that validate,
\textit{even in case of a 2nd step script validation failure};
\item Exclude any transactions that are invalid in some way \textit{other than 2nd step script validation failure}.
\end{enumerate}
We want to
discard the blocks which have transactions with these tags
applied incorrectly.
One of the reasons for having the correct validation tag added by the slot leader
to a transaction is that re-applying blocks would not require repeat
execution of scripts in the transactions inside a block. In fact, when replaying
blocks, all the witnessing information can be thrown away.
We also rely on the correct use of tags in other rules (at this time, only in
the rules that are shown in Figure \ref{fig:rules:ledger}).
\textbf{Non-integral calculations within the Plutus interpreter.} At present, all Plutus calculations use integral types. If there
will be any future non-integral calculations (e.g. from the Actus contracts that are implemented using
the Marlowe interpreter), these should
be performed exactly as in the Shelley ledger (as described in~\cite{non_int}). This is a matter of
ensuring deterministic script validation outcomes: inconsistent rounding, for example, could
result in different validation outcomes running the same script on the same
arguments.
\begin{figure}[htb]
\begin{equation}
\inference[Scripts-Val]
{
\fun{txvaltag}~\var{tx} = \True &
\var{remExU}~\geq~0
}
{
\begin{array}{l}
\var{pp}\\
\var{tx}\\
\end{array}
\vdash
\left(
\begin{array}{r}
\var{remExU}\\
\end{array}
\right)
\trans{sval}{\epsilon}
\left(
\begin{array}{r}
\var{remExU}\\
\end{array}
\right) \\
}
\end{equation}
\begin{equation}
\inference[Scripts-Stop]
{ \\~\\
(\var{isVal},\var{remExU'})~:=~ \llbracket sc \rrbracket_
{cm,\var{remExU}} dt \\
(sc, dt, cm) := s
\\
~
\\
\fun{txvaltag}~\var{tx} = \False &
(\var{remExU'}~<~0 ~ \lor ~ \var{isVal} = \False)
}
{
\begin{array}{l}
\var{pp}\\
\var{tx}\\
\end{array}
\vdash
\left(
\begin{array}{r}
\var{remExU}\\
\end{array}
\right)
\trans{sval}{\Gamma;s}
\left(
\begin{array}{r}
\var{remExU}\\
\end{array}
\right)
}
\end{equation}
\begin{equation}
\inference[Scripts-Ind]
{
{
\begin{array}{l}
\var{pp}\\
\var{tx}\\
\end{array}
}
\vdash
\left(
{
\begin{array}{r}
\var{remExU}\\
\end{array}
}
\right)
\trans{sval}{\Gamma}
\left(
{
\begin{array}{r}
\var{remExU'}\\
\end{array}
}
\right) \\
(\var{isVal},\var{remExU''})~:=~ \llbracket sc \rrbracket
_{cm,\var{remExU'}} dt \\
(sc, dt, cm) := s & \var{remExU''}~\geq~0
}
{
\begin{array}{l}
\var{pp}\\
\var{tx}\\
\end{array}
\vdash
\left(
\begin{array}{r}
\var{remExU}\\
\end{array}
\right)
\trans{sval}{\Gamma;s}
\left(
\begin{array}{r}
\varUpdate{remExU''}\\
\end{array}
\right)
}
\end{equation}
\caption{Script validation rules}
\label{fig:rules:utxo-scrval}
\end{figure}
\subsection{Updating the UTxO State}
\label{sec:utxo-state-trans}
We have defined a separate transition system, UTXOS, to represent the two distinct
UTxO state changes, one resulting from all scripts in a transaction validating,
the other - from at least one failing to validate. Its transition types
are all the same as for the for the UTXO transition, see Figure
\ref{fig:ts-types:utxo-scripts}.
\begin{figure}[htb]
\emph{State transitions}
\begin{equation*}
\_ \vdash
\var{\_} \trans{utxo, utxos}{\_} \var{\_}
\subseteq \powerset (\UTxOEnv \times \UTxOState \times \GoguenTx \times \UTxOState)
\end{equation*}
%
\caption{UTxO and UTxO script state update types}
\label{fig:ts-types:utxo-scripts}
\end{figure}
There are two rules, corresponding to the two possible state changes of the
UTxO state in the UTXOS transition system (Figure~\ref{fig:rules:utxo-state-upd}).
%
In both cases, the SVAL transition is called upon to verify that the $\IsValidating$
tag has been applied correctly. The function $\fun{mkPLCLst}$ is used to build
the signal list $\var{sLst}$ for the SVAL transition.
%
The first rule
applies when the validation tag is $\True$.
In this case, the states of the UTxO, fee
and deposit pots, and updates are updated exactly as in the current Shelley
ledger spec.
%
The second rule
applies when the validation tag is $\False$.
In this case, the UTxO state changes as follows:
\begin{enumerate}
\item All the
UTxO entries corresponding to the transaction inputs selected for covering
script fees are removed;
\item The sum total of the value of the marked UTxO entries
is added to the fee pot.
\end{enumerate}
\begin{figure}[htb]
\begin{equation}
\inference[Scripts-Yes]
{
\var{txb}\leteq\txbody{tx} &
\fun{txvaltag}~\var{tx} = \True
\\
~
\\
\var{sLst} := \fun{mkPLCLst}~\var{pp}~\var{tx}~\var{utxo}
\\~\\
{
\left(
\begin{array}{r}
\var{pp} \\
\var{tx} \\
\end{array}
\right)
}
\vdash
\var{\fun{txexunits}~{tx}}
\trans{sval}{sLst}\var{remExU}
\\~\\
{
\left(
\begin{array}{r}
\var{slot} \\
\var{pp} \\
\var{genDelegs} \\
\end{array}
\right)
}
\vdash \var{ups} \trans{\hyperref[fig:rules:update]{up}}{\fun{txup}~\var{tx}} \var{ups'}
\\~\\
\var{refunded} \leteq \keyRefunds{pp}{stkCreds}~{txb}
\\
\var{depositChange} \leteq
(\deposits{pp}~{stpools}~{(\txcerts{txb})}) - \var{refunded}
}
{
\begin{array}{l}
\var{slot}\\
\var{pp}\\
\var{stkCreds}\\
\var{stpools}\\
\var{genDelegs}\\
\end{array}
\vdash
\left(
\begin{array}{r}
\var{utxo} \\
\var{deposits} \\
\var{fees} \\
\var{ups} \\
\end{array}
\right)
\trans{utxos}{tx}
\left(
\begin{array}{r}
\varUpdate{\var{(\txins{txb} \subtractdom \var{utxo}) \cup \outs{slot}~{txb}}} \\
\varUpdate{\var{deposits} + \var{depositChange}} \\
\varUpdate{\var{fees} + \txfee{txb}} \\
\varUpdate{\var{ups'}} \\
\end{array}
\right) \\
}
\end{equation}
\begin{equation}
\inference[Scripts-No]
{
\var{txb}\leteq\txbody{tx} &
\fun{txvaltag}~\var{tx} = \False
\\
~
\\
\var{sLst} := \fun{mkPLCLst}~\var{pp}~\var{tx}~\var{utxo}
\\~\\
{
\left(
\begin{array}{r}
\var{pp} \\
\var{tx} \\
\end{array}
\right)
}
\vdash
\var{\fun{txexunits}~{tx}}
\trans{sval}{sLst}\var{remExU}
}
{
\begin{array}{l}
\var{slot}\\
\var{pp}\\
\var{stkCreds}\\
\var{stpools}\\
\var{genDelegs}\\
\end{array}
\vdash
\left(
\begin{array}{r}
\var{utxo} \\
\var{deposits} \\
\var{fees} \\
\var{ups} \\
\end{array}
\right)
\trans{utxos}{tx}
\left(
\begin{array}{r}
\varUpdate{\var{\fun{txinputs_{vf}}~{txb} \subtractdom \var{utxo}}} \\
\var{deposits} \\
\varUpdate{\var{fees} + \fun{ubalance}~(\fun{txinputs_{vf}}~{txb}\restrictdom \var{utxo})} \\
\var{ups} \\
\end{array}
\right)
}
\end{equation}
\caption{State update rules}
\label{fig:rules:utxo-state-upd}
\end{figure}
Figure \ref{fig:rules:utxo-shelley} shows the $\type{UTxO-inductive}$
transition rule for the UTXO transition type. Note that the
signal for this transition is specifically of type $\GoguenTx$, so it does not
work with Shelley transactions. Thus, the UTXOW rule needs to convert a $\Tx$ into
a $\GoguenTx$ to invoke the UTXO rule.
This rule has the following preconditions (plus the relevant ones
from the original Shelley specification):
\begin{enumerate}
\item The transaction is being processed within its validity interval;
\item The transaction has at least one input;
\item All inputs in a transaction correspond to UTxO entries;
\item The general accounting property holds;
\item The transaction pays fees correctly;
\item The transaction is not forging any Ada;
\item All outputs of the transaction contain only non-negative quantities;
\item The transaction size does not exceed the maximum limit;
\item The execution units budget that a transaction gives does not exceed the maximum
permitted number of units;
\item The UTXOS state transition is valid.
\end{enumerate}
The resulting state transition is defined entirely by the application of the
UTXOS rule.
\begin{figure}[htb]
\begin{equation}\label{eq:utxo-inductive-shelley}
\inference[UTxO-inductive]
{
\var{txb}\leteq\txbody{tx} &
\var{txw}\leteq\fun{txwits}~{tx} \\
\fun{txfst}~txb \leq \var{slot}
& \fun{txttl}~txb \geq \var{slot}
\\
\txins{txb} \neq \emptyset
& \txins{txb} \subseteq \dom \var{utxo}
\\
\consumed{pp}{utxo}{stkCreds}{rewards}~{txb} = \produced{slot}~{pp}~{stpools}~{txb}
\\~\\
\fun{feesOK}~(\vert~ \fun{txscripts}~{tx} \cap \ScriptPlutus ~\vert) ~pp~tx~utxo \\
\\
~
\\
\mathsf{adaID}~\notin \dom~{\fun{forge}~tx} \\
\forall txout \in \txouts{txb}, ~ \fun{getValue}~txout ~\geq ~ 0 \\~
\forall txout \in \txouts{txb}, ~ \fun{getCoin}~txout ~\geq \\
\fun{valueSize}~(\fun{getValue}~txout) * \fun{minUTxOValue}~pp \\~
\\
\fun{txsize}~{tx}\leq\fun{maxTxSize}~\var{pp} \\
\fun{txexunits}~{txb} \leq \fun{maxTxExUnits}~{pp}
\\
~
\\
{
\begin{array}{c}
\var{slot}\\
\var{pp}\\
\var{stkCreds}\\
\var{stpools}\\
\var{genDelegs}\\
\end{array}
}
\vdash
{
\left(
\begin{array}{r}
\var{utxo} \\
\var{deposits} \\
\var{fees} \\
\var{ups}\\
\end{array}
\right)
}
\trans{utxos}{\var{tx}}
{
\left(
\begin{array}{r}
\var{utxo'} \\
\var{deposits'} \\
\var{fees'} \\
\var{ups'}\\
\end{array}
\right)
}
}
{
\begin{array}{l}
\var{slot}\\
\var{pp}\\
\var{stkCreds}\\
\var{stpools}\\
\var{genDelegs}\\
\end{array}
\vdash
\left(
\begin{array}{r}
\var{utxo} \\
\var{deposits} \\
\var{fees} \\
\var{ups}\\
\end{array}
\right)
\trans{utxo}{tx}
\left(
\begin{array}{r}
\varUpdate{\var{utxo'}} \\
\varUpdate{\var{deposits'}} \\
\varUpdate{\var{fees'}} \\
\varUpdate{\var{ups'}}\\
\end{array}
\right)
}
\end{equation}
\caption{UTxO inference rules}
\label{fig:rules:utxo-shelley}
\end{figure}
\subsection{Witnessing}
\label{sec:wits}
Because of two-phase transaction validation, Plutus script validation is not part of transaction witnessing.
However, native script validation does remain part of transaction witnessing.
When witnessing a transaction, we therefore need to validate only the native scripts.
We have consequently changed the definition of the function
$\fun{scriptsNeeded}$, see Figure~\ref{fig:functions-witnesses} to include both multi-signature and Plutus scripts, plus the scripts that are used for any
validation purpose (forging, outputs, certificates, withdrawals).
\begin{figure}[htb]
\begin{align*}
& \hspace{-1cm}\fun{scriptsNeeded} \in \UTxO \to \GoguenTx \to \powerset (\ScriptPurpose \times \ScriptHash) \\
& \hspace{-1cm}\fun{scriptsNeeded}~\var{utxo}~\var{tx} = \\
& ~~\{ (\var{i}, \fun{validatorHash}~a) \mid i \mapsto (a, \wcard) \in \var{utxo},\\
& ~~~~~i\in\fun{txinsScript}~{(\fun{txins~\var{txb}})}~{utxo}\} \\
\cup & ~~\{ (\var{a}, \fun{stakeCred_{r}}~\var{a}) \mid a \in \dom (\AddrRWDScr
\restrictdom \fun{txwdrls}~\var{txb}) \} \\
\cup & ~~\{ (\var{cert}, \var{c}) \mid \var{cert} \in (\DCertDeleg \cup \DCertDeRegKey)\cap\fun{txcerts}~(\txbody{tx}), \\
& ~~~~~~\var{c} \in \cwitness{cert} \cap \AddrScr\} \\
\cup & ~~\{ (\var{pid}, \var{pid}) \mid \var{pid} \in \supp~(\fun{forge}~\var{txb}) \} \\
& \where \\
& ~~~~~~~ \var{txb}~=~\txbody{tx}
\end{align*}
\caption{Scripts Needed}
\label{fig:functions-witnesses}
\end{figure}
In the Goguen era, we must be able to validate both Shelley transactions
and Goguen transactions. To do this, we transform any Shelley transactions
into Goguen ones, filling any missing data fields with default values.
The only time that we need the original Shelley transaction is to check the signatures
in the hash of the original transaction body, as shown in
Figure~\ref{fig:rules:utxow-goguen}. In addition to the Shelley UTXOW preconditions
that still apply, we have made the following changes and additions:
\begin{itemize}
\item All the multi-signature scripts in the transaction validate;
\item The transaction contains exactly those scripts that are required for witnessing and no
additional ones (this includes all languages of scripts, for all purposes);
\item The transaction contains a redeemer for every item that needs to be validated
by a Plutus script;
\item The only certificates that are allowed to have scripts as witnesses
are delegation de-registration certificates;
\item The transaction has a datum for every Plutus script output that it spends;
\item The transaction has a datum for every Plutus script output that is
marked with the $\True$ tag for $\HasDV$;
\item
The hash of the subset of protocol parameters in the transaction body is equal to
the hash of the same subset of protocol parameters that are currently on the ledger;
\item The hash of the indexed redeemer structure that is attached to the transaction is
the same as $\fun{rdmrsHash}~{tx}$ (the hash value that is contained in the signed body of
the transaction).
\end{itemize}
If these conditions are all satisfied, then the resulting UTxO state change is fully determined
by the UTXO transition (the application of which is also part of the conditions).
\begin{figure}[htb]
\emph{State transitions}
\begin{equation*}
\_ \vdash
\var{\_} \trans{utxow}{\_} \var{\_}
\subseteq \powerset (\UTxOEnv \times \UTxOState \times \Tx \times \UTxOState)
\end{equation*}
%
\caption{UTxO with witnesses state update types}
\label{fig:ts-types:utxo-witness}
\end{figure}
\begin{figure}
\begin{equation}
\label{eq:utxo-witness-inductive-goguen}
\inference[UTxO-witG]
{
\var{tx}~\leteq~\fun{toGoguenTx}~{tx}_o \\~\\
\var{txb}\leteq\txbody{tx} &
\var{txw}\leteq\fun{txwits}~{tx} &
\var{tx}~\in~\GoguenTx \\
(utxo, \wcard, \wcard, \wcard) \leteq \var{utxoSt} \\
\var{witsKeyHashes} \leteq \{\fun{hashKey}~\var{vk} \vert \var{vk} \in
\dom (\txwitsVKey{txw}) \}\\~\\
\forall \var{validator} \in \fun{txscripts}~{txw} \cap \ScriptMSig,\\
\fun{runMSigScript}~\var{validator}~\var{tx}\\~\\
\{ h \mid (\wcard, h) \in \fun{scriptsNeeded}~\var{utxo}~\var{tx}\} ~=~ \dom (\fun{indexedScripts}~{tx}) \\
\forall (\var{purp}, h) \in ~\fun{scriptsNeeded}~\var{utxo}~\var{tx}, ~h\mapsto s~\in~\fun{indexedScripts}~{tx},\\
s \in \ScriptPlutus~\Leftrightarrow \exists r, \var{purp} \mapsto r \in \fun{findRdmr}~{tx}
\\~\\
\forall \var{cert}~\in~\fun{txcerts}~{txb}, \fun{regCred}~{cert}\in \PolicyID \Leftrightarrow
\var{cert} \in~ \DCertDeRegKey \\~\\
\forall~\var{txin}\in\fun{txinputs}~{txb},
\var{txin} \mapsto \var{(\wcard,\wcard,h_d)} \in \var{utxo},
\var{h_d} ~\in \fun{dom}(\fun{indexedDats}~{tx})
\\
~
\\
\forall~ix \mapsto (a,v,d_h,\True) ~\in~\fun{txouts}~{txb}, \\
\var{d_h}\in \fun{dom}~ (\fun{indexedDats}~{tx})
\\
~
\\
\fun{ppHash}~{txb}~=~\fun{hashLanguagePP}~\var{pp}~(\fun{cmlangs}~(\fun{txscripts}~\var{txw})) \\~\\
\fun{txrdmrs}~\var{txw} ~=~ \emptyset \Leftrightarrow \fun{rdmrsHash}~{txb}~=~\Nothing \\
\fun{txrdmrs}~\var{txw} ~\neq~ \emptyset \Leftrightarrow
\fun{hash}~(\fun{txrdmrs}~\var{txw})~ =~ \fun{rdmrsHash}~{txb} \\
\\~\\
\forall \var{vk} \mapsto \sigma \in \txwitsVKey{txw},
\mathcal{V}_{\var{vk}}{\serialised{tx_{o}}}_{\sigma} \\
\fun{witsVKeyNeeded}~{utxo}~{tx}~{genDelegs} \subseteq \var{witsKeyHashes}
\\~\\
genSig \leteq
\left\{
\fun{hashKey}~gkey \vert gkey \in\dom{genDelegs}
\right\}
\cap
\var{witsKeyHashes}
\\
\left\{
c\in\txcerts{txb}~\cap\DCertMir
\right\} \neq\emptyset \implies \vert genSig\vert \geq \Quorum \wedge
\fun{d}~\var{pp} > 0
\\~\\
\var{mdh}\leteq\fun{txMDhash}~\var{txb}
&
\var{md}\leteq\fun{txMD}~\var{tx}
\\
(\var{mdh}=\Nothing \land \var{md}=\Nothing)
\lor
(\var{mdh}=\fun{hashMD}~\var{md})
\\~\\
{
\begin{array}{r}
\var{slot}\\
\var{pp}\\
\var{stkCreds}\\
\var{stpools}\\
\var{genDelegs}\\
\end{array}
}
\vdash \var{utxoSt} \trans{\hyperref[fig:rules:utxo-shelley]{utxo}}{tx}
\var{utxoSt'}\\
}
{
\begin{array}{r}
\var{slot}\\
\var{pp}\\
\var{stkCreds}\\
\var{stpools}\\
\var{genDelegs}\\
\end{array}
\vdash \var{utxoSt} \trans{utxow}{{tx}_o} \varUpdate{\var{utxoSt'}}
}
\end{equation}
\caption{UTxO with witnesses inference rules for GoguenTx}
\label{fig:rules:utxow-goguen}
\end{figure}
|
{"hexsha": "5d45206e919e0e19320f9c4607ad6dc34e4daadd", "size": 40872, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "goguen/formal-spec/utxo.tex", "max_stars_repo_name": "michaelpj/cardano-ledger-specs", "max_stars_repo_head_hexsha": "d371ad2ebf5d1ddff93776fac2dfd1045aa6b06c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "goguen/formal-spec/utxo.tex", "max_issues_repo_name": "michaelpj/cardano-ledger-specs", "max_issues_repo_head_hexsha": "d371ad2ebf5d1ddff93776fac2dfd1045aa6b06c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "goguen/formal-spec/utxo.tex", "max_forks_repo_name": "michaelpj/cardano-ledger-specs", "max_forks_repo_head_hexsha": "d371ad2ebf5d1ddff93776fac2dfd1045aa6b06c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.623655914, "max_line_length": 154, "alphanum_fraction": 0.6519622235, "num_tokens": 12999}
|
Require Import PV.Types.
Require Import PV.Nat.
Require Import Coq.Lists.List.
Require Import Coq.Arith.PeanoNat.
Lemma remove_not_in :
forall (a : Type) (xs : list a) (x : a),
forall (dec : DecidableEq a),
~In x xs -> xs = remove dec x xs.
Proof.
intros.
induction xs.
compute. auto.
unfold remove.
case (dec x a0).
intro.
rewrite e in H.
destruct H.
apply in_eq.
intro.
assert (~ In x xs).
intro.
destruct H.
apply in_cons.
apply H0.
specialize (IHxs H0).
rewrite IHxs in |- * at 1.
unfold remove.
auto.
Qed.
Lemma list_succ_len_eq :
forall (a : Type) (xs : list a) (x : a),
Datatypes.length (x :: xs) = S (Datatypes.length xs).
Proof.
intros.
apply Add_length with (a := x).
apply Add_head.
Qed.
Lemma remove_head_2 :
forall (a : Type) (xs : list a) (x1 x2 : a),
forall (dec : DecidableEq a),
x1 <> x2 -> remove dec x1 (x2::xs) = x2 :: (remove dec x1 xs).
Proof.
intros.
unfold remove.
case (dec x1 x2).
intro.
contradiction.
intro.
auto.
Qed.
Lemma remove_head_4 :
forall (a : Type) (xs : list a) (x1 x2 : a),
forall (dec : DecidableEq a),
x1 = x2 -> remove dec x1 (x2::xs) = remove dec x1 xs.
Proof.
intros.
rewrite H.
unfold remove.
case (dec x2 x2).
intro.
tauto.
intro.
rewrite H in H.
contradiction.
Qed.
Lemma remove_head_1 :
forall (a : Type) (xs : list a) (x : a),
forall (dec : DecidableEq a),
~ In x xs -> xs = remove dec x (x::xs).
Proof.
intro. intro.
unfold remove.
intros.
case (dec x x).
intro.
rewrite <- remove_not_in with (x := x) (dec := dec).
tauto.
apply H.
intro.
contradiction.
Qed.
Lemma remove_in_1 :
forall (a : Type) (xs : list a) (x : a) (y : a),
forall (dec : DecidableEq a),
In x (remove dec y xs) -> In x xs.
Proof.
intros.
induction xs.
compute in H. contradiction.
case (dec x a0).
intro.
rewrite e.
apply in_eq.
intro.
apply in_cons.
case (dec y a0).
intro.
rewrite e in H.
apply in_cons with (a := a0) in H.
apply in_inv in H.
destruct H.
symmetry in H.
contradiction.
rewrite remove_head_4 in H.
rewrite e in IHxs.
apply IHxs in H.
apply H.
tauto.
intro.
rewrite remove_head_2 in H.
apply in_inv in H.
destruct H.
symmetry in H.
contradiction.
apply IHxs in H.
apply H.
apply n0.
Qed.
Lemma remove_head_3 :
forall (a : Type) (xs : list a) (x y: a),
forall (dec : DecidableEq a),
In x (remove dec y xs) <-> (x <> y /\ In x xs).
Proof.
intros.
(* -> *)
split.
intro.
split.
intro.
rewrite H0 in H. apply remove_In in H. apply H.
intros.
apply remove_in_1 in H.
apply H.
(* <- *)
intros.
destruct H.
induction xs.
compute in H0. contradiction.
case (dec y a0).
intro.
rewrite <- e in H0.
apply in_inv in H0.
destruct H0.
symmetry in H0. contradiction.
apply IHxs in H0.
rewrite <- e.
rewrite remove_head_4.
apply H0.
auto.
intros.
rewrite remove_head_2.
case (dec x a0).
intro. rewrite e.
apply in_eq.
intro.
apply in_cons.
apply in_inv in H0.
destruct H0.
symmetry in H0. contradiction.
apply IHxs in H0.
apply H0.
apply n.
Qed.
Lemma list_remove_add :
forall (a : Type) (xs : list a) (x : a),
forall (dec : DecidableEq a),
NoDup xs -> In x xs ->
Add x (remove dec x xs) xs.
Proof.
intros.
induction xs.
apply in_nil in H0.
contradiction.
pose dec.
specialize (d x a0).
destruct d.
rewrite <- e.
rewrite <- remove_head_1 with (x := x) (dec := dec).
apply Add_head.
assert (~ In x xs /\ NoDup xs).
rewrite <- NoDup_cons_iff.
rewrite <- e in H.
apply H.
destruct H1.
apply H1.
specialize (in_inv H0).
intros.
destruct H1.
symmetry in H1. contradiction.
assert (NoDup xs). apply NoDup_cons_iff in H. destruct H.
apply H2.
specialize (IHxs H2 H1).
rewrite remove_head_2.
apply Add_cons.
apply IHxs.
apply n.
Qed.
Lemma list_remove_nodup_len_1 :
forall (a : Type) (xs : list a) (x : a),
forall (dec : DecidableEq a),
NoDup xs -> In x xs ->
Datatypes.length xs = S (Datatypes.length (remove dec x xs)).
Proof.
intros.
induction xs.
(* Empty list *)
apply in_nil in H0. contradiction.
(* Induction case *)
apply Add_length with (a := x).
apply list_remove_add.
apply H.
apply H0.
Qed.
Lemma remove_not_in_1 :
forall (a : Type) (xs : list a) (x : a) (y : a),
forall (dec : DecidableEq a),
~ In x (remove dec y xs) -> (x = y \/ ~ In x xs).
Proof.
intros.
pose (dec x y).
destruct s.
left.
tauto.
right.
intro.
destruct H.
induction xs.
compute. tauto.
simpl.
case (dec y a0).
intro.
assert (In x xs).
apply in_inv in H0.
destruct H0.
rewrite e in n.
rewrite H in n.
contradiction.
apply H.
specialize (IHxs H).
apply IHxs.
intro.
case (dec x a0).
intro.
rewrite e.
apply in_eq.
intro.
apply in_inv in H0.
destruct H0.
symmetry in H.
contradiction.
specialize (IHxs H).
apply in_cons.
apply IHxs.
Qed.
Lemma list_remove_nodup_len_2 :
forall (a : Type) (xs : list a) (ys : list a) (x : a),
forall (dec : DecidableEq a),
Datatypes.length xs = Datatypes.length ys ->
NoDup xs -> NoDup ys -> In x ys ->
Datatypes.length xs = S (Datatypes.length (remove dec x ys)).
Proof.
intros.
rewrite <- list_remove_nodup_len_1. apply H. apply H1.
apply H2.
Qed.
Lemma remove_not_in_3 :
forall (a : Type) (xs : list a) (x y : a),
forall (dec : DecidableEq a),
In x (remove dec y xs) -> x <> y.
Proof.
intros.
case (dec x y).
intro.
rewrite e in H.
apply remove_In in H.
contradiction.
intro.
apply n.
Qed.
Lemma remove_not_in_2 :
forall (a : Type) (xs : list a) (x : a) (y : a),
forall (dec : DecidableEq a),
In x (remove dec y xs) <-> (x <> y /\ In x xs).
Proof.
intros.
(* -> *)
split.
intro.
induction xs.
compute in H. contradiction.
pose (in_dec dec x (a0 :: xs)).
destruct s.
pose (dec x y).
destruct s.
rewrite e in H.
apply remove_In in H. contradiction.
auto.
apply remove_not_in with (dec := dec) in n.
rewrite <- remove_not_in in H.
rewrite n in H.
apply remove_In in H. contradiction.
rewrite remove_head_3 in H.
destruct H.
rewrite n in H0.
apply remove_In in H0. contradiction.
(* <- *)
intros.
destruct H.
apply remove_head_3.
auto.
Qed.
Lemma list_remove_preserves_nodup :
forall (a : Type) (xs : list a) (x : a),
forall (dec : DecidableEq a),
NoDup xs -> NoDup (remove dec x xs).
Proof.
intros.
induction H.
compute. apply NoDup_nil.
case (dec x x0).
Focus 2.
intro.
rewrite remove_head_2.
apply NoDup_cons.
intro.
rewrite remove_head_3 in H1.
destruct H1.
contradiction.
apply IHNoDup.
apply n.
intro.
rewrite e.
rewrite remove_head_4.
rewrite e in IHNoDup.
apply IHNoDup.
tauto.
Qed.
Lemma list_diff_exists :
forall (elt : Type) (xs : list elt) (ys : list elt),
DecidableEq elt ->
NoDup xs /\ NoDup ys ->
Datatypes.length xs <> Datatypes.length ys ->
exists (x : elt),
(In x xs /\ ~ In x ys) \/
(~In x xs /\ In x ys).
Proof.
(* Try #1 *)
intro.
intro.
induction xs.
intros.
induction ys.
(* Case 1: Two empty lists *)
intros.
contradiction.
(* Case 2: An empty list and a nonempty list *)
exists a.
right.
split.
auto.
apply in_eq.
(* Case 3: Two nonempty lists (a::xs) and ys *)
intros.
destruct H.
rewrite NoDup_cons_iff in H.
destruct H.
pose in_dec.
specialize s with (A := elt) (a := a) (l := ys).
assert (forall (a : elt) (l : list elt), {In a l} + {~ In a l}).
apply s.
apply X.
specialize (X0 a ys).
destruct X0.
(* Case 3b: a is in both, so throw it out and apply induction hypothesis *)
pose (Nat.eq_dec (Datatypes.length xs) (Datatypes.length ys)).
destruct s0.
pose (remove X a ys).
pose (list_remove_nodup_len_2 elt xs ys a X e H2 H1).
apply s_neq in e0.
assert (NoDup xs /\ NoDup l). split.
apply H2. apply list_remove_preserves_nodup. apply H1.
specialize (IHxs l X H3 e0).
destruct IHxs.
exists x.
destruct H4.
destruct H4.
left.
split.
apply in_cons. apply H4.
pose (remove_not_in_1 elt ys x a X).
specialize (o H5).
destruct o.
rewrite H6 in H4.
contradiction.
apply H6.
destruct H4.
right.
split.
pose (X a x).
destruct s0.
assert (~ In x l).
unfold l.
rewrite e1 in |- * at 1.
apply remove_In.
contradiction.
apply not_in_cons.
split.
auto.
apply H4.
destruct H3.
unfold l in H5.
apply remove_in_1 with (dec := X) (y := a).
apply H5.
pose (remove X a ys).
assert (NoDup l). apply list_remove_preserves_nodup. apply H1.
assert (NoDup xs /\ NoDup l). auto.
specialize (IHxs l X H4).
pose (list_eq_dec X xs ys). apply i.
(* Case: a is again in both, so exclude it *)
rewrite list_succ_len_eq in H0.
rewrite list_remove_nodup_len_1 with (xs := ys) (x := a) (dec := X) in H0.
rewrite s_neq_2 in H0.
assert (NoDup xs /\ NoDup (remove X a ys)). split.
tauto. apply list_remove_preserves_nodup. apply H1.
specialize (IHxs (remove X a ys) X H3 H0).
destruct IHxs.
exists x.
destruct H4.
destruct H4.
left.
split.
apply in_cons. apply H4.
assert (x = a \/ ~ In x ys).
apply remove_not_in_1 with (dec := X).
apply H5.
destruct H6.
rewrite H6 in H4. contradiction.
apply H6.
destruct H4.
right.
split.
apply not_in_cons.
split.
pose remove_not_in_3.
specialize (n0 elt ys x a X H5).
apply n0.
apply H4.
pose remove_in_1.
specialize (i0 elt ys x a X H5).
apply i0.
apply H1.
apply i.
(* Case *)
exists a.
left.
split.
apply in_eq.
apply n.
Qed.
|
{"author": "MichaelBurge", "repo": "pornview", "sha": "b4aefdc0e49504aa88345b96710bd86645ab2477", "save_path": "github-repos/coq/MichaelBurge-pornview", "path": "github-repos/coq/MichaelBurge-pornview/pornview-b4aefdc0e49504aa88345b96710bd86645ab2477/PV/Lists.v"}
|
from ektelo.dataset import DatasetFromRelation
import numpy as np
from ektelo import support
from ektelo.operators import TransformationOperator
class Vectorize(TransformationOperator):
stability = 1
def __init__(self, name, normed=False, weights=None, reduced_domain=None):
self.name = name
self.normed = normed
self.weights = weights
self.reduced_domain = reduced_domain
def transform(self, relation):
return DatasetFromRelation(relation,
self.name,
normed=self.normed,
weights=self.weights,
reduce_to_dom_shape=self.reduced_domain).payload.flatten()
class Where(TransformationOperator):
stability = 1
def __init__(self, query):
"""The query can be any valid pandas DataFrame query string:
for example, 'age > 30'. Note that only the domains corresponding
to fields that are marked as 'active' in the config will be updated.
"""
self.query = query
def transform(self, relation):
new_relation = relation.clone()
return new_relation.where(self.query)
class Project(TransformationOperator):
stability = 1
def __init__(self, fields):
"""The fields should be a list of strings denoting the names of
fields from the relation that should be retained.
"""
self.fields = fields
def transform(self, relation):
new_relation = relation.clone()
return new_relation.project(self.fields)
class Group(TransformationOperator):
stability = 1
def __init__(self, idxs):
self.idxs = idxs
def transform(self, X):
return X[self.idxs]
class ReduceByPartition(TransformationOperator):
stability = 1
def __init__(self, mapping):
self.mapping = mapping
def transform(self, X):
return support.reduction_matrix(self.mapping) * X
class Reshape(TransformationOperator):
stability = 1
def __init__(self, shape):
self.shape = shape
def transform(self, X):
return X.reshape(self.shape)
class Filter(TransformationOperator):
stability = 1
def __init__(self, mask):
self.mask = mask
def transform(self, X):
assert self.mask.shape == X.shape, 'mask must have same shape as X'
return self.mask * X
class Null(TransformationOperator):
stability = 1
def __init__(self):
pass
def transform(self, X):
return X
|
{"hexsha": "8caf1406bf79f7d51f58d9a56e3adf62757f78ea", "size": 2598, "ext": "py", "lang": "Python", "max_stars_repo_path": "ektelo/private/transformation.py", "max_stars_repo_name": "dpcomp-org/ektelo", "max_stars_repo_head_hexsha": "7629fbf106f9b9568c66a0b97f6005280022c3d8", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 32, "max_stars_repo_stars_event_min_datetime": "2018-07-26T23:17:05.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T14:23:39.000Z", "max_issues_repo_path": "ektelo/private/transformation.py", "max_issues_repo_name": "ektelo/ektelo", "max_issues_repo_head_hexsha": "53129fc417ae9b5a9830beb1229ec3e2611c5534", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2018-07-26T19:39:09.000Z", "max_issues_repo_issues_event_max_datetime": "2020-03-20T04:19:27.000Z", "max_forks_repo_path": "ektelo/private/transformation.py", "max_forks_repo_name": "dpcomp-org/ektelo", "max_forks_repo_head_hexsha": "7629fbf106f9b9568c66a0b97f6005280022c3d8", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2018-06-21T12:44:42.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-27T15:43:57.000Z", "avg_line_length": 22.9911504425, "max_line_length": 93, "alphanum_fraction": 0.6297151655, "include": true, "reason": "import numpy", "num_tokens": 541}
|
import numpy as np
logistic = lambda z: 1.0 / (1.0 + np.exp(-z))
tanh = lambda z: (np.exp(z) - np.exp(-z)) / (np.exp(z) + np.exp(-z))
rectifier = lambda z: np.maximum(0.0, z)
|
{"hexsha": "c6ffd7fba07289bb52f9d4f59a68af4d9adbf44e", "size": 175, "ext": "py", "lang": "Python", "max_stars_repo_path": "titanium/activation.py", "max_stars_repo_name": "MaxNoe/titanium", "max_stars_repo_head_hexsha": "ace635604d29a5607b5005653ef486b5f2fb6b9f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2017-08-13T20:03:50.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-24T09:38:56.000Z", "max_issues_repo_path": "titanium/activation.py", "max_issues_repo_name": "MaxNoe/titanium", "max_issues_repo_head_hexsha": "ace635604d29a5607b5005653ef486b5f2fb6b9f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2017-12-07T19:09:09.000Z", "max_issues_repo_issues_event_max_datetime": "2019-06-26T12:38:00.000Z", "max_forks_repo_path": "titanium/activation.py", "max_forks_repo_name": "MaxNoe/titanium", "max_forks_repo_head_hexsha": "ace635604d29a5607b5005653ef486b5f2fb6b9f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-06-26T12:34:26.000Z", "max_forks_repo_forks_event_max_datetime": "2019-06-26T12:34:26.000Z", "avg_line_length": 35.0, "max_line_length": 68, "alphanum_fraction": 0.5885714286, "include": true, "reason": "import numpy", "num_tokens": 70}
|
import numpy as np
from math import pi
import os
from src import RDModes, Config, list_tl_files
import matplotlib.pyplot as plt
plt.style.use('elr')
plt.ion()
fc = 400
z_int = 150.
cf = Config(fc=fc)
tl_files = list_tl_files(fc)
tl_data = np.load(tl_files[23])
r_a = tl_data['rplot']
rd_modes = RDModes(tl_data['c_bg'], tl_data['x_a'], tl_data['z_a'],
cf.fc, cf.z_src, s=None, c_bounds=cf.c_bounds)
xs = tl_data['xs']
dr = (rd_modes.r_plot[-1] - rd_modes.r_plot[0]) / (rd_modes.r_plot.size - 1)
r_max = 60e3
num_r = int(np.ceil(r_max / dr))
r_a_modes = (np.arange(num_r) + 1) * dr
l_len = -2 * pi / (np.diff(np.real(rd_modes.k_bg)) - np.spacing(1))
# reference energy
psi_s = np.exp(1j * pi / 4) / (rd_modes.rho0 * np.sqrt(8 * pi)) \
* rd_modes.psi_ier(rd_modes.z_src)
psi_s /= np.sqrt(rd_modes.k_bg)
psi_s *= 4 * pi
z_a = tl_data['zplot']
dz = (z_a[-1] - z_a[0]) / (z_a.size - 1)
dom_modes = (rd_modes.mode_number == 0) | (rd_modes.mode_number == 1)
# either 3 or 4 selected modes
dom_modes = np.zeros_like(dom_modes)
am = np.argmax(l_len)
if l_len[am + 1] > 6e4:
am = [am, am + 1]
else:
am = [am]
am = np.hstack([[am[0] - 1], am, [am[-1] + 1]])
labels = np.arange(rd_modes.mode_number.size)[am]
fig, ax = plt.subplots(1, 2, sharey=True, figsize=(cf.jasa_1clm, 2.75))
ax[0].plot(rd_modes.bg_prof, rd_modes.z_a, color='0.2')
ax[1].plot(rd_modes.psi_bg[am[0], :], rd_modes.z_a, label='#'+str(labels[0]))
ax[1].plot(rd_modes.psi_bg[am[1], :], rd_modes.z_a, label='#'+str(labels[1]))
ax[1].plot(rd_modes.psi_bg[am[2], :], rd_modes.z_a, label='#'+str(labels[2]))
ax[0].set_xlim(1497, 1510)
ax[1].set_ylim(150, 0)
ax[0].grid()
ax[0].set_xticks([1505], minor=True)
ax[1].grid()
ax[0].set_xlabel('Sound speed (m/s)')
ax[1].set_xlabel('Mode amplitude')
ax[0].set_ylabel('Depth (m)')
ax[1].legend(loc=(0.48, 0.02), framealpha=1)
pos = ax[0].get_position()
pos.x0 += 0.05
pos.x1 += -0.02
pos.y1 += 0.06
pos.y0 += 0.06
ax[0].set_position(pos)
pos = ax[1].get_position()
pos.x0 += -0.06
pos.x1 += 0.06
pos.y1 += 0.06
pos.y0 += 0.06
ax[1].set_position(pos)
savedir = 'reports/jasa/figures'
fig.savefig(os.path.join(savedir, 'mode_shapes.png'), dpi=300)
|
{"hexsha": "8836dee62a94a1afd038d3a528c224f6d078537c", "size": 2194, "ext": "py", "lang": "Python", "max_stars_repo_path": "reports/jasa/mode_shapes.py", "max_stars_repo_name": "nedlrichards/tau_decomp", "max_stars_repo_head_hexsha": "77560307836f67ae68f3571fb6cd0fd9d831398d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "reports/jasa/mode_shapes.py", "max_issues_repo_name": "nedlrichards/tau_decomp", "max_issues_repo_head_hexsha": "77560307836f67ae68f3571fb6cd0fd9d831398d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "reports/jasa/mode_shapes.py", "max_forks_repo_name": "nedlrichards/tau_decomp", "max_forks_repo_head_hexsha": "77560307836f67ae68f3571fb6cd0fd9d831398d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.8117647059, "max_line_length": 77, "alphanum_fraction": 0.6458523245, "include": true, "reason": "import numpy", "num_tokens": 818}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 12 12:35:01 2018
@author: abhijay
"""
import numpy as np
import pandas as pd
#import os
#import pickle
import copy
from sklearn import preprocessing
from sklearn import tree
#os.chdir('/home/abhijay/Documents/ML/hw_2/Q10')
class tree_node():
def __init__(self, splitOnAttribute):
self.attribute = splitOnAttribute
self.children = []
self.attributeValue = []
self.attributeValueType = None
def make_target_variable(data):
data['salary-bracket'] = data['salary-bracket'].apply(lambda y: 0 if y==" <=50K" else 1)
return data
def find_categorical_continuous_features(data):
categorical_features = [data.columns[col] for col, col_type in enumerate(data.dtypes) if col_type == np.dtype('O') ]
continuous_features = list(set(data.columns) - set(categorical_features))
return categorical_features, continuous_features
##### entropy calculation #####
def entropy(data):
binCount = np.bincount(data) # frequency
ind = np.nonzero(binCount)[0] # indices or attribute value
stackedBinCounts = (np.vstack((ind, binCount[ind])).T).astype(float) # stack with attribute value against frequency
stackedBinCounts[:,1] = stackedBinCounts[:,1]/data.shape[0]
return sum([(-stackedBinCount[1] * np.log2(stackedBinCount[1])) for stackedBinCount in stackedBinCounts if stackedBinCount[1]!=0.0]) # Calculate entropy
##### entropy calculation for continuous variables #####
##### Getting the splits i.e. if continuous variable is 1,2,3,4
##### Then splits are 1.5, 2.5, 3.5 (This was mentioned in class)
def gain_continuousVar(y, x, split):
entropy_ = entropy(y) # H(X) As mentioned in the lecture slides
entropy_ -= ((x>split).sum()/len(x)) * entropy(y[x > split])
entropy_ -= ((x<split).sum()/len(x)) * entropy(y[x < split])
return entropy_ # return Info gain (weighted sum of entropy; do not confuse)
##### entropy calculation for categorical variables #####
def gain_categoricalVar(x,y):
entropy_ = entropy(x) # H(X) As mentioned in the lecture slides
binCount = np.bincount(x) # frequency
ind = np.nonzero(binCount)[0] # indices
stackedBinCounts = (np.vstack((ind, binCount[ind])).T).astype(float)
stackedBinCounts[:,1] = stackedBinCounts[:,1]/x.shape[0]
##### Calculate info gain using entropy #####
return entropy_ + sum([(-stackedBinCount[1] * entropy(y[x == stackedBinCount[0]])) for stackedBinCount in stackedBinCounts])
##### Traverse the tree and predict #####
def predict(node, x, y):
yPred = np.array([])
##### Traverse the tree #####
if node.attributeValueType == 0:
##### if categorical split #####
for child, attributeVal in zip(node.children, node.attributeValue):
if type(child) == np.ndarray:
##### if child is an array and not a pointer to a branch then predict #####
y_ = y[x[:, int(node.attribute)] == attributeVal]
y_predicted = np.full(y_.shape, np.sign(np.sum(child)))
yPred = np.append(yPred,np.array(y_predicted == y_).astype(int))
else:
##### if child is pointer to a branch then branch out #####
if x.shape[0] > 0:
x_ = x[x[:, int(node.attribute)] == attributeVal, :]
y_ = y[x[:, int(node.attribute)] == attributeVal]
y_predicted = predict(child, x_, y_)
yPred = np.append(yPred, y_predicted)
else:
##### if continuous split #####
for i, child in enumerate(node.children):
if i == 0:
##### if greater than split condition #####
if type(child) == np.ndarray:
y_ = y[x[:, int(node.attribute.split('_')[0])] > float(node.attribute.split('_')[1])]
y_predicted = np.full(y_.shape, np.sign(np.sum(child)))
yPred = np.append(yPred, np.array(y_predicted == y_).astype(int))
elif x.shape[0] > 0:
##### if child is pointer to a branch then branch out #####
x_ = x[x[:, int(node.attribute.split('_')[0])] > float(node.attribute.split('_')[1]), :]
y_ = y[x[:, int(node.attribute.split('_')[0])] > float(node.attribute.split('_')[1])]
y_predicted = predict(child, x_, y_)
yPred = np.append(yPred, y_predicted)
elif i == 1:
##### if less than split condition #####
if type(child) == np.ndarray:
##### if child is an array and not a pointer to a branch then predict #####
y_ = y[x[:, int(node.attribute.split('_')[0])] < float(node.attribute.split('_')[1])]
y_predicted = np.full(y_.shape, np.sign(np.sum(child)))
yPred = np.append(yPred, np.array(y_predicted == y_).astype(int))
elif x.shape[0] > 0:
##### if child is pointer to a branch then branch out #####
x_ = x[x[:, int(node.attribute.split('_')[0])] < float(node.attribute.split('_')[1]), :]
y_ = y[x[:, int(node.attribute.split('_')[0])] < float(node.attribute.split('_')[1])]
y_predicted = predict(child, x_, y_)
yPred = np.append(yPred, y_predicted)
return yPred
##### Pruning #####
def pruning(node, x, y):
prune = False
yPred = np.array([])
if x.shape[0] == 0:
return yPred, node, prune
##### Traverse the tree #####
if node.attributeValueType == 0:
##### if categorical split #####
for i, (child, attributeVal) in enumerate(zip(node.children, node.attributeValue)):
if type(child) == np.ndarray:
y_ = y[x[:, int(node.attribute)] == attributeVal]
y_predicted = np.full(y_.shape, np.sign(np.sum(child)))
yPred = np.append(yPred, np.array(y_predicted == y_).astype(int))
else:
if x.shape[0] > 0:
x_ = x[x[:, int(node.attribute)] == attributeVal, :]
y_ = y[x[:, int(node.attribute)] == attributeVal]
y_predicted, node.children[i], prune = pruning(child, x_, y_)
if prune:
##### Prune and assign class #####
node.children[i] = y_
yPred = np.append(yPred, y_)
prune = False
else:
yPred = np.append(yPred, y_predicted)
else:
##### if continuous split #####
for i, child in enumerate(node.children):
##### is greater than split condition #####
if i == 0:
if type(child) == np.ndarray:
y_ = y[x[:, int(node.attribute.split('_')[0])] > float(node.attribute.split('_')[1])]
y_predicted = np.full(y_.shape, np.sign(np.sum(child)))
yPred = np.append(yPred, np.array(y_predicted == y_).astype(int))
elif x.shape[0] > 0:
x_ = x[x[:, int(node.attribute.split('_')[0])] > float(node.attribute.split('_')[1]), :]
y_ = y[x[:, int(node.attribute.split('_')[0])] > float(node.attribute.split('_')[1])]
y_predicted, node.children[i], prune = pruning(child, x_, y_)
if prune:
##### Prune and assign class #####
node.children[i] = y_
yPred = np.append(yPred, y_)
prune = False
else:
yPred = np.append(yPred, y_predicted)
elif i == 1:
##### if less than split condition #####
if type(child) == np.ndarray:
y_ = y[x[:, int(node.attribute.split('_')[0])] < float(node.attribute.split('_')[1])]
y_predicted = np.full(y_.shape, np.sign(np.sum(child)))
yPred = np.append(yPred, np.array(y_predicted == y_).astype(int))
elif x.shape[0] > 0:
x_ = x[x[:, int(node.attribute.split('_')[0])] < float(node.attribute.split('_')[1]), :]
y_ = y[x[:, int(node.attribute.split('_')[0])] < float(node.attribute.split('_')[1])]
y_predicted, node.children[i], prune = pruning(child, x_, y_)
if prune:
##### Prune and assign class #####
node.children[i] = y_
yPred = np.append(yPred, y_)
prune = False
else:
yPred = np.append(yPred, y_predicted)
##### prune condition #####
##### If better validation accuracy can be achieved on pruning this branch #####
if (yPred.sum() < y.sum()):
prune = True
return yPred, node, prune
def pure(y):
return len(set(y)) == 1
# Reference: http://gabrielelanaro.github.io/blog/2016/03/03/decision-trees.html
# Took a little inspiration from the way partition is done for optimization while training ..mentioned as refer[1]
# Rest of the code is my own work; Please note every other function is my own implementation and very different from the code by gabrielelanaro.
# Even id3_train() function is heavily tailored to they way we were supposed to implement the algo and very little reference has been taken from gabrielelanaro's.
def partition(a):
return {c: (a==c).nonzero()[0] for c in np.unique(a)}
def id3_train(x, y):
if pure(y) or len(y) == 0:
return y
##### Calculate gain of categorical variables #####
gainCategoricalVar = np.apply_along_axis(gain_categoricalVar, 0, x[:,categorical_features], y)
##### Calculate gain of continuous variables #####
gainContinuousVar = {}
for continuous_feature in continuous_features:
for split in splits[continuous_feature]:
gainContinuousVar[str(continuous_feature)+'_'+str(split)] = gain_continuousVar(y,x[:,continuous_feature],split)
##### Is the featureToSplitOn categorical or continuous
if np.max(gainCategoricalVar) > gainContinuousVar[max(gainContinuousVar, key=gainContinuousVar.get)]:
##### Categorical feature to split on #####
featureToSplitOn = categorical_features[np.argmax(gainCategoricalVar)]
if np.max(gainCategoricalVar) < 1e-4:
return y
# Refer ..[1]
sets = partition(x[:, featureToSplitOn])
node = tree_node(str(featureToSplitOn))
node.attributeValueType = 0 # Categorical
# Refer ..[1]
for k, v in sets.items():
y_subset = y.take(v, axis=0)
x_subset = x.take(v, axis=0)
node.attributeValue.append(k)
node.children.append(id3_train(x_subset, y_subset))
##### Some attributes were getting missed add those splits #####
for key in (set(featuresUniqueValues[featureToSplitOn]) - set(sets.keys())):
node.attributeValue.append(key)
node.children.append(y)
else:
##### Coninuous feature to split on #####
featureToSplitOn = max(gainContinuousVar, key=gainContinuousVar.get)
if (gainContinuousVar[max(gainContinuousVar, key=gainContinuousVar.get)] < 1e-4):
return y
dataSplitIndices = [ x[:, int(featureToSplitOn.split('_')[0])]>float(featureToSplitOn.split('_')[1]), x[:, int(featureToSplitOn.split('_')[0])]<float(featureToSplitOn.split('_')[1])]
node = tree_node(featureToSplitOn)
node.attributeValueType = 1
for indices in dataSplitIndices:
y_subset = y[indices]
x_subset = x[indices]
node.children.append(id3_train(x_subset, y_subset))
return node
if __name__ == "__main__":
col_names = ["age","workclass","education","marital-status","occupation","race","gender","hours-per-week","native-country","salary-bracket"]
##### Load data #####
train_data = pd.read_csv("income-data/income.train.txt", names = col_names)
dev_data = pd.read_csv("income-data/income.dev.txt", names = col_names)
test_data = pd.read_csv("income-data/income.test.txt", names = col_names)
train_data = make_target_variable(train_data)
test_data = make_target_variable(test_data)
dev_data = make_target_variable(dev_data)
categorical_features_, continuous_features_ = find_categorical_continuous_features(train_data.iloc[:,0:-1])
categorical_features = [train_data.columns.get_loc(c) for c in categorical_features_]
continuous_features = [train_data.columns.get_loc(c) for c in continuous_features_]
##### Encoding categorical values to labels #####
le = preprocessing.LabelEncoder()
all_df = pd.concat([train_data,test_data,dev_data])
for feature in categorical_features_:
le.fit(all_df[feature])
train_data[feature] = le.transform(train_data[feature])
test_data[feature] = le.transform(test_data[feature])
dev_data[feature] = le.transform(dev_data[feature])
featuresUniqueValues = [train_data[col].unique() for col in col_names]
##### Convert pandas dataframe to numpy array #####
x = train_data.iloc[:,0:train_data.shape[1]-1].values
y = (train_data.values)[:,-1]
x_test = test_data.iloc[:,0:test_data.shape[1]-1].values
y_test = (test_data.values)[:,-1]
x_dev = dev_data.iloc[:,0:dev_data.shape[1]-1].values
y_dev = (dev_data.values)[:,-1]
##### Getting the splits i.e. if continuous variable is 1,2,3,4
##### Then splits are 1.5, 2.5, 3.5 (This was mentioned in class)
splits = {}
for feature in continuous_features:
uniqueValues = np.unique(x[:,feature])
uniqueValues.sort()
splits[feature] = uniqueValues[0:-1] + (uniqueValues[1:] - uniqueValues[0:-1]) / 2
print ("\nTraining please wait ..... (takes 60 seconds)")
import time
start_time = time.time()
node_ = id3_train(x,y)
print("--- %s seconds ---" % (time.time() - start_time))
##### For storing the model #####
# with open("id3tree_v4", "wb") as f:
# pickle.dump(node_, f)
#
# with open("id3tree_v4", "rb") as f:
# node_ = pickle.load(f)
yPred_train = predict(node_, x, y)
print ("\nTraining Accuracy: "+str( round(100*yPred_train.sum()/x.shape[0],2))+"%")
yPred_dev = predict(node_, x_dev, y_dev)
print ("\nDev Accuracy: "+str( round(100*yPred_dev.sum()/x_dev.shape[0],2))+"%")
yPred_test = predict(node_, x_test, y_test)
print ("\nTesting Accuracy: "+str( round(100*yPred_test.sum()/x_test.shape[0],2))+"%")
print ("\n\nPruning.....")
yPred, node_pruned, prune = pruning(copy.deepcopy(node_), x_dev, y_dev)
yPred_train_pruned = predict(node_pruned, x, y)
print ("\nTraining Accuracy: "+str( round(100*yPred_train_pruned.sum()/x.shape[0],2))+"%")
yPred_dev_pruned = predict(node_pruned, x_dev, y_dev)
print ("\nDev Accuracy: "+str( round(100*yPred_dev_pruned.sum()/x_dev.shape[0],2))+"%")
yPred_test_pruned = predict(node_pruned, x_test, y_test)
print ("\nTesting Accuracy: "+str( round(100*yPred_test_pruned.sum()/x_test.shape[0],2))+"%")
print ("\n\nComparing with Scikit implementation.....")
clf = tree.DecisionTreeClassifier()
clf = clf.fit(x, y)
print ("\nTraining Accuracy: "+str( round(100*np.sum(y == clf.predict(x))/x.shape[0],2))+"%")
print ("\nDev Accuracy: "+str( round(100*np.sum(y_dev == clf.predict(x_dev))/x_dev.shape[0],2))+"%")
print ("\nTesting Accuracy: "+str( round(100*np.sum(y_test == clf.predict(x_test))/x_test.shape[0],2))+"%")
|
{"hexsha": "05f2e7331cb9a34641aecd991068062164844130", "size": 16024, "ext": "py", "lang": "Python", "max_stars_repo_path": "id3.py", "max_stars_repo_name": "abhijayghildyal/id3DecisionTree", "max_stars_repo_head_hexsha": "e6e126b91db6b086af748c34a058a937b74f4b72", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "id3.py", "max_issues_repo_name": "abhijayghildyal/id3DecisionTree", "max_issues_repo_head_hexsha": "e6e126b91db6b086af748c34a058a937b74f4b72", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "id3.py", "max_forks_repo_name": "abhijayghildyal/id3DecisionTree", "max_forks_repo_head_hexsha": "e6e126b91db6b086af748c34a058a937b74f4b72", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.5813953488, "max_line_length": 190, "alphanum_fraction": 0.5795057414, "include": true, "reason": "import numpy", "num_tokens": 3797}
|
/*=============================================================================
Copyright (c) 2001-2011 Joel de Guzman
Distributed under the Boost Software License, Version 1.0. (See accompanying
file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
==============================================================================*/
#if !defined(FUSION_CONVERT_09232005_1215)
#define FUSION_CONVERT_09232005_1215
#include <boost/fusion/support/config.hpp>
#include <boost/fusion/container/list/cons.hpp>
#include <boost/fusion/container/list/detail/build_cons.hpp>
#include <boost/fusion/container/list/detail/convert_impl.hpp>
#include <boost/fusion/sequence/intrinsic/empty.hpp>
#include <boost/fusion/sequence/intrinsic/begin.hpp>
#include <boost/fusion/sequence/intrinsic/end.hpp>
namespace boost { namespace fusion
{
namespace result_of
{
template <typename Sequence>
struct as_list
{
typedef typename
detail::build_cons<
typename result_of::begin<Sequence>::type
, typename result_of::end<Sequence>::type
>
build_cons;
typedef typename build_cons::type type;
BOOST_CONSTEXPR BOOST_FUSION_GPU_ENABLED
static type
call(Sequence& seq)
{
return build_cons::call(fusion::begin(seq), fusion::end(seq));
}
};
}
template <typename Sequence>
BOOST_CONSTEXPR BOOST_FUSION_GPU_ENABLED
inline typename result_of::as_list<Sequence>::type
as_list(Sequence& seq)
{
return result_of::as_list<Sequence>::call(seq);
}
template <typename Sequence>
BOOST_CONSTEXPR BOOST_FUSION_GPU_ENABLED
inline typename result_of::as_list<Sequence const>::type
as_list(Sequence const& seq)
{
return result_of::as_list<Sequence const>::call(seq);
}
}}
#endif
|
{"hexsha": "a3b29dabffaa5eb52c77182bf41655c5cb8f7104", "size": 2014, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "ios/Pods/boost-for-react-native/boost/fusion/container/list/convert.hpp", "max_stars_repo_name": "rudylee/expo", "max_stars_repo_head_hexsha": "b3e65a7a5b205f14a3eb6cd6fa8d13c8d663b1cc", "max_stars_repo_licenses": ["Apache-2.0", "MIT"], "max_stars_count": 8805.0, "max_stars_repo_stars_event_min_datetime": "2015-11-03T00:52:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T22:30:03.000Z", "max_issues_repo_path": "ios/Pods/boost-for-react-native/boost/fusion/container/list/convert.hpp", "max_issues_repo_name": "rudylee/expo", "max_issues_repo_head_hexsha": "b3e65a7a5b205f14a3eb6cd6fa8d13c8d663b1cc", "max_issues_repo_licenses": ["Apache-2.0", "MIT"], "max_issues_count": 14694.0, "max_issues_repo_issues_event_min_datetime": "2015-02-24T15:13:42.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T13:16:45.000Z", "max_forks_repo_path": "ios/Pods/boost-for-react-native/boost/fusion/container/list/convert.hpp", "max_forks_repo_name": "rudylee/expo", "max_forks_repo_head_hexsha": "b3e65a7a5b205f14a3eb6cd6fa8d13c8d663b1cc", "max_forks_repo_licenses": ["Apache-2.0", "MIT"], "max_forks_count": 1329.0, "max_forks_repo_forks_event_min_datetime": "2015-11-03T20:25:51.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T18:10:38.000Z", "avg_line_length": 33.0163934426, "max_line_length": 81, "alphanum_fraction": 0.5928500497, "num_tokens": 414}
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import Counter
import itertools
from typing import Optional, Iterable, Tuple, List
import numpy as np
import potts_model
import utils
def combine_k_rounds(num_rounds: int, mutations: Iterable[Tuple[Tuple[int, int], ...]]) -> List[Tuple[Tuple[int, int], ...]]:
"""Return the result of combining `mutations` for `num_rounds`.
Starting with a pool of M `mutations` m_1 ... m_M, stack them for K=`num_rounds` rounds. For example,
for K=3 rounds of combination, this will result in every variant (m_i + m_j + m_k), for i, j, k \\in M.
Be careful of memory usage, as this can be very large due to combinatorial possibilities.
In the best case, this scales with {M \\choose K}. But if mutations overlap at P positions,
combining them produces 1 + 2^{P} variants. So in the worst case, this will produce
{M \\choose K} * 2^{P} variants. See the definition for `utils.merge_mutation_sets` for more on
mutation merging.
Args:
num_rounds: The number of rounds of combination
mutations: The starting pool of mutations, where each mutation is an iterable of
tuples encoding mutations (position, mutation).
Returns:
A list of tuples of mutations, where each element will be a combination of
`num_rounds` mutations from `mutations`. Note that each tuple will possibly be of different lengths.
"""
if num_rounds == 0:
return list(mutations)
mutation_combinations = itertools.combinations(mutations, num_rounds + 1)
all_samples = []
for mutation_combination in mutation_combinations:
all_samples.extend(utils.merge_multiple_mutation_sets(mutation_combination))
return all_samples
def filter_mutation_set_by_position(mutation_sets: Iterable[Tuple[Tuple[int, int], ...]], limit: int = 10):
"""Return a filtered mutation set, where each position is used a maximum of `limit` times."""
filtered_mutation_sets = []
position_counter = Counter()
for mutation_set in mutation_sets:
positions = [m[0] for m in mutation_set]
if any([position_counter[position] >= limit for position in positions]):
continue
else:
position_counter.update(positions)
filtered_mutation_sets.append(mutation_set)
return filtered_mutation_sets
def get_epistatic_seqs_for_landscape(landscape: potts_model.PottsModel,
distance: int,
n: int,
adaptive: bool = True,
max_reuse: Optional[int] = None,
top_k: Optional[int] = None,
random_state: np.random.RandomState = np.random.RandomState(0)
) -> List[np.ndarray]:
"""Return `n` variants at `distance` that are enriched for epistasis on `landscape`.
To construct epistatic sequences, the top epistatic pairs are taken directly from the landscape
epistasis tensor, and used as building blocks for higher order mutants. If `max_reuse` is set, the
top epistatic pairs are filtered greedily to only reuse the same positions `max_reuse` times.
Args:
landscape: The landscape.
distance: The number of mutations from the landscape wildtype. Raises a ValueError if not an even number.
n: The number of variants in the test set.
adaptive: When True (False), return sequences enriched for adaptive (deleterious) epistasis
max_reuse: An integer indicating the maximum number of times a position can be reused in the starting pool
of epistatic pairs.
top_k: The number of highest magnitude interactions to use for sampling. All epistatic pairs included in the
resulting variants are guaranteed to be within the `top_k` highest magnitude.
random_state: An instance of np.random.RandomState
Return:
A List of sequences.
"""
if distance % 2 != 0:
raise ValueError('Odd distance not supported.')
if not top_k:
top_k = n
mutation_pairs = utils.get_top_n_mutation_pairs(landscape.epistasis_tensor, top_k, lowest=not adaptive)
if max_reuse is not None:
assert max_reuse > 0
mutation_pairs = filter_mutation_set_by_position(mutation_pairs, limit=max_reuse)
print(f'{len(mutation_pairs)} after filtering {top_k}')
num_rounds = distance // 2
all_combined = combine_k_rounds(num_rounds, mutation_pairs)
all_combined = [element for element in all_combined if len(element) == distance]
if len(all_combined) < n:
raise ValueError(f'Not enough ({len(all_combined)} < {n}) mutants at distance {distance}, try increasing `top_k`.')
# TODO(nthomas) after switching to np.random.Generator, we can do rng.choice(all_combined)
subset_idxs = random_state.choice(len(all_combined), n, replace=False)
subset = [all_combined[i] for i in subset_idxs]
seqs = [utils.apply_mutations(landscape.wildtype_sequence, m) for m in subset]
return seqs
|
{"hexsha": "06d4a3ce2dd778daad5c8fbb19588bd35c1713b6", "size": 5510, "ext": "py", "lang": "Python", "max_stars_repo_path": "epistasis_selection.py", "max_stars_repo_name": "captaincapsaicin/slip", "max_stars_repo_head_hexsha": "3c112f51cd11118f1e11c0c6fdd8c3d31d304d9b", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-03T00:36:25.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-03T00:36:25.000Z", "max_issues_repo_path": "epistasis_selection.py", "max_issues_repo_name": "captaincapsaicin/slip", "max_issues_repo_head_hexsha": "3c112f51cd11118f1e11c0c6fdd8c3d31d304d9b", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "epistasis_selection.py", "max_forks_repo_name": "captaincapsaicin/slip", "max_forks_repo_head_hexsha": "3c112f51cd11118f1e11c0c6fdd8c3d31d304d9b", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.3025210084, "max_line_length": 125, "alphanum_fraction": 0.7145190563, "include": true, "reason": "import numpy", "num_tokens": 1273}
|
"""
Extended kalman filter (EKF) localization sample
author: Atsushi Sakai (@Atsushi_twi)
"""
import math
import matplotlib.pyplot as plt
import numpy as np
# Covariance for EKF simulation
Q = np.diag([
0.01, # variance of location on x-axis
0.01, # variance of location on y-axis
np.deg2rad(1.0), # variance of yaw angle
1.0, # variance of velocity
np.deg2rad(1.0) * 100, # acceleration of yaw angle
100 # variance of acceleraiton
]) ** 2 # predict state covariance
R = np.diag([1.0, 1.0]) ** 2 # Observation x,y position covariance
print("the Q:", Q)
print("the R:", R)
# Simulation parameter
INPUT_NOISE = np.diag([1.0, np.deg2rad(30.0), 1.0, 1.0]) ** 2
GPS_NOISE = np.diag([0.5, 0.5]) ** 2
DT = 0.1 # time tick [s]
SIM_TIME = 50.0 # simulation time [s]
show_animation = 1
def calc_input():
v = 1.0 # [m/s]
yawrate = 0.1 # [rad/s]
acceleraiton = 0.000
angle_acceleraion = 0.000
u = np.array([[v], [yawrate], [acceleraiton], [angle_acceleraion]])
return u
def observation(xTrue, xd, u):
xTrue = motion_model(xTrue, u)
# add noise to gps x-y
z = observation_model(xTrue) + GPS_NOISE @ np.random.randn(2, 1)
# add noise to input
ud = u + INPUT_NOISE @ np.random.randn(4, 1)
xd = motion_model(xd, ud)
return xTrue, z, xd, ud
def motion_model(x, u):
#[X, Y, PSI, velocity, velocity_angle, acceleration]
F = np.array([[1.0, 0, 0, 0, 0, 0],
[0, 1.0, 0, 0, 0, 0],
[0, 0, 1.0, 0, 0, 0],
[0, 0, 0, 1.0, 0, 0],
[0, 0, 0, 0, 1.0, 0],
[0, 0, 0, 0, 0, 0.0]])
#[velocity, angle_velocity, acceleration, angle_acceleraion]
B = np.array([[DT * math.cos(x[2, 0]), 0, 0, 0],
[DT * math.sin(x[2, 0]), 0, 0, 0],
[0.0, DT, 0, 0],
[0.0, 0, DT, 0],
[0, 0, 0, DT],
[0.0, 0.0, 1.0, 0.0]])
x = F @ x + B @ u
return x
def observation_model(x):
H = np.array([
[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0]
])
z = H @ x
return z
def jacob_f(x, u):
"""
Jacobian of Motion Model
motion model
x_{t+1} = x_t+v*dt*cos(yaw)
y_{t+1} = y_t+v*dt*sin(yaw)
yaw_{t+1} = yaw_t+omega*dt
v_{t+1} = v{t}
so
dx/dyaw = -v*dt*sin(yaw)
dx/dv = dt*cos(yaw)
dy/dyaw = v*dt*cos(yaw)
dy/dv = dt*sin(yaw)
"""
yaw = x[2, 0]
v = u[0, 0]
jF = np.array([
[1.0, 0.0, -DT * v * math.sin(yaw), DT * math.cos(yaw), 0.0, 0.0],
[0.0, 1.0, DT * v * math.cos(yaw), DT * math.sin(yaw), 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, DT, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0, DT],
[0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0] ])
return jF
def jacob_h():
# Jacobian of Observation Model
jH = np.array([
[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0]
])
return jH
def ekf_estimation(xEst, PEst, z, u):
# Predict
xPred = motion_model(xEst, u)
jF = jacob_f(xPred, u)
PPred = jF @ PEst @ jF.T + Q
# Update
jH = jacob_h()
zPred = observation_model(xPred)
y = z - zPred
S = jH @ PPred @ jH.T + R
K = PPred @ jH.T @ np.linalg.inv(S)
xEst = xPred + K @ y
PEst = (np.eye(len(xEst)) - K @ jH) @ PPred
return xEst, PEst
def plot_covariance_ellipse(xEst, PEst): # pragma: no cover
Pxy = PEst[0:2, 0:2]
eigval, eigvec = np.linalg.eig(Pxy)
if eigval[0] >= eigval[1]:
bigind = 0
smallind = 1
else:
bigind = 1
smallind = 0
t = np.arange(0, 2 * math.pi + 0.1, 0.1)
a = math.sqrt(eigval[bigind])
b = math.sqrt(eigval[smallind])
x = [a * math.cos(it) for it in t]
y = [b * math.sin(it) for it in t]
angle = math.atan2(eigvec[bigind, 1], eigvec[bigind, 0])
rot = np.array([[math.cos(angle), math.sin(angle)],
[-math.sin(angle), math.cos(angle)]])
fx = rot @ (np.array([x, y]))
px = np.array(fx[0, :] + xEst[0, 0]).flatten()
py = np.array(fx[1, :] + xEst[1, 0]).flatten()
#plt.plot(px, py, "--r")
def main():
print(__file__ + " start!!")
time = 0.0
# State Vector [x y yaw v]'
xEst = np.zeros((6, 1))
xTrue = np.zeros((6, 1))
PEst = np.eye(6)
xDR = np.zeros((6, 1)) # Dead reckoning
# history
hxEst = xEst
hxTrue = xTrue
hxDR = xTrue
hz = np.zeros((2, 1))
while SIM_TIME >= time:
time += DT
u = calc_input()
xTrue, z, xDR, ud = observation(xTrue, xDR, u)
xEst, PEst = ekf_estimation(xEst, PEst, z, ud)
# store data history
hxEst = np.hstack((hxEst, xEst))
hxDR = np.hstack((hxDR, xDR))
hxTrue = np.hstack((hxTrue, xTrue))
hz = np.hstack((hz, z))
if show_animation:
plt.cla()
plt.plot(hz[0, :], hz[1, :], ".g")
plt.plot(hxTrue[0, :].flatten(),
hxTrue[1, :].flatten(), "-b")
plt.plot(hxDR[0, :].flatten(),
hxDR[1, :].flatten(), "-k")
plt.plot(hxEst[0, :].flatten(),
hxEst[1, :].flatten(), "-r")
plot_covariance_ellipse(xEst, PEst)
plt.axis("equal")
plt.grid(True)
plt.pause(0.001)
if __name__ == '__main__':
main()
|
{"hexsha": "fc599e63fb4a8ff1c1c5578d6d44ae5325de4344", "size": 5405, "ext": "py", "lang": "Python", "max_stars_repo_path": "Localization/extended_kalman_filter_6_state/extended_kalman_filter.py", "max_stars_repo_name": "MC9529/PythonRobotics", "max_stars_repo_head_hexsha": "e8aef156ccb32186e502576bdf6875475181742b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Localization/extended_kalman_filter_6_state/extended_kalman_filter.py", "max_issues_repo_name": "MC9529/PythonRobotics", "max_issues_repo_head_hexsha": "e8aef156ccb32186e502576bdf6875475181742b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Localization/extended_kalman_filter_6_state/extended_kalman_filter.py", "max_forks_repo_name": "MC9529/PythonRobotics", "max_forks_repo_head_hexsha": "e8aef156ccb32186e502576bdf6875475181742b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.4570135747, "max_line_length": 74, "alphanum_fraction": 0.4980573543, "include": true, "reason": "import numpy", "num_tokens": 2066}
|
import numpy as np
import pywcs
# **** check that rotang we are using agrees with telescope definition! ****
# -- set geometry for RSS (and write region file)
# should probably have some smarter way of storing these global parameters
pxscale=0.2507/2. # unbinned
dcr=4./60. # radius of field (deg)
#dcr=3.9/60. # radius of field (deg)
# shrink radius slightly to avoid part of slit being drawn outside of mask circle?
# global CCD parameters:
ccd_dx=2034.
ccd_xgap=70.
ccd_dy=4102.
# define centre in pixel coords
ccd_cx=(2.*(ccd_dx+ccd_xgap)+ccd_dx)/2.
ccd_cy=ccd_dy/2.
def RSSskyfromPix(cra,cdec,rotang,equinox,ccd1,ccd2,ccd3,circ_fov):
# -- Make a WCS to convert to RSS pixel coords
# Create a new WCS object. The number of axes must be set
# from the start
wcs = pywcs.WCS(naxis=2)
wcs.wcs.crpix = [ccd_cx,ccd_cy]
# wcs.wcs.crpix = [0,0] # define centre relative to zero
#wcs.wcs.cdelt = np.array([-pxscale, pxscale])
wcs.wcs.cdelt = np.array([-pxscale, pxscale])/3600.
wcs.wcs.crval = [cra, cdec]
wcs.wcs.ctype = ["RA---TAN", "DEC--TAN"]
wcs.wcs.crota = [-rotang, -rotang] # rotate SKY this amount?
# [This is consistent with original RSMT angle definition]
wcs.wcs.equinox=equinox
# wcs.wcs.print_contents()
#ra,dec = wcs.wcs_pix2sky(xpix,ypix, 1)
# -- convert central coords to Ra, dec and linear sizes to angular sizes:
ccd1_sky=[wcs.wcs_pix2sky(np.reshape(np.array(ccd1[0]),(1,2)), 1),ccd1[1]*pxscale,ccd1[2]*pxscale ]
ccd2_sky=[wcs.wcs_pix2sky(np.reshape(np.array(ccd2[0]),(1,2)), 1),ccd2[1]*pxscale,ccd2[2]*pxscale ]
ccd3_sky=[wcs.wcs_pix2sky(np.reshape(np.array(ccd3[0]),(1,2)), 1),ccd3[1]*pxscale,ccd3[2]*pxscale ]
circ_fov_sky=[wcs.wcs_pix2sky(np.reshape(np.array(circ_fov[0]),(1,2)), 1),circ_fov[1]*pxscale]
return ccd1_sky,ccd2_sky,ccd3_sky,circ_fov_sky
def DefineCCDsPix(cra,cdec,rotang,equinox):
#rect=Rectangle((0,0),ccd_dx,ccd_dy,color='none',ec='y')
#ax.add_patch(rect)
#rect=Rectangle((0+ccd_dx+ccd_xgap,0),ccd_dx,ccd_dy,color='none',ec='y')
#ax.add_patch(rect)
#rect=Rectangle((0+2.*(ccd_dx+ccd_xgap),0),ccd_dx,ccd_dy,color='none',ec='y')
#ax.add_patch(rect)
# Define lower left corner, + width and height
# CCD 1:
# offset=[ccd_dx/2.,ccd_dy/2.] # bottom left --> centre of chip
# ccd1=[ [0,0],ccd_dx,ccd_dy ]
# ccd2=[ [0+ccd_dx+ccd_xgap,0],ccd_dx,ccd_dy]
# ccd3=[[0+2.*(ccd_dx+ccd_xgap),0],ccd_dx,ccd_dy]
ccd1=[ [0+ccd_dx/2.,0+ccd_dy/2.],ccd_dx,ccd_dy ]
ccd2=[ [0+ccd_dx+ccd_xgap+ccd_dx/2.,0+ccd_dy/2.],ccd_dx,ccd_dy]
ccd3=[[0+2.*(ccd_dx+ccd_xgap)+ccd_dx/2.,0+ccd_dy/2.],ccd_dx,ccd_dy]
# ccd1=[ (-(ccd_dx+ccd_xgap),0),ccd_dx,ccd_dy ]
# ccd2=[ (0.,0.),ccd_dx,ccd_dy]
# ccd3=[((ccd_dx+ccd_xgap),0),ccd_dx,ccd_dy]
circ_fov=[(ccd_cx,ccd_cy),dcr*3600./pxscale] # everything in pix
# circ_fov=[(0.,0.),dcr*3600./pxscale] # everything in pix
return ccd1,ccd2,ccd3,circ_fov
def toRegionSky(sccd1,sccd2,sccd3,scirc_fov,rotang,regfile):
regstr='FK5;'
# print np.shape(sccd1[0])
# print (sccd3[0][0,0], sccd3[0][0,1], sccd3[1], sccd3[2], rotang)
xxx=open(regfile,'w')
regstr+='box(%s, %s, %s\", %s\", %s)\n' % \
(sccd1[0][0,0], sccd1[0][0,1], sccd1[1], sccd1[2], rotang)
xxx.write(regstr)
regstr='FK5;'
regstr+='box(%s, %s, %s\", %s\", %s)\n' % \
(sccd2[0][0,0], sccd2[0][0,1], sccd2[1], sccd2[2], rotang)
xxx.write(regstr)
regstr='FK5;'
regstr+='box(%s, %s, %s\", %s\", %s)\n' % \
(sccd3[0][0,0], sccd3[0][0,1], sccd3[1], sccd3[2], rotang)
xxx.write(regstr)
regstr='FK5;'
regstr+='circle(%s, %s, %s\")\n' % (scirc_fov[0][0,0],scirc_fov[0][0,1], scirc_fov[1])
xxx.write(regstr)
xxx.close()
#return regstr
if __name__ == '__main__':
cra=53.934130928509504
cdec=-27.04987517144887
rotang=70.
equinox=2000.0
# -- detector footprints, pix.:
# [This rotang is consistent with original RSMT file angle definition.]
det1,det2,det3,detc=DefineCCDsPix(cra,cdec,rotang,equinox)
#print det1,det2,det3,detc
# -- detector footprints, sky coor.:
skyd1,skyd2,skyd3,skycirc = RSSskyfromPix(cra,cdec,rotang,equinox,det1,det2,det3,detc)
#print skyd1,skyd2,skyd3,skycirc
# i think the -ve sign here is right. It needs to be opposite to the angle in the CD matrix, anyway
# **** CHECK this agrees with ROTANG defined for telescope ****
#print
# -- write region file
regfile='test.reg'
toRegionSky(skyd1,skyd2,skyd3,skycirc,rotang,regfile)
#fk5;circle(35.458437,-3.7601347,5.3749084")
# ============================================================================================================================
def FOVTest(cra,cdec,equinox,rotang,slitra,slitdec,slit_length,tilt):
# ---- Test if slit lies entirely within RSS FOV
# If the top or bottom edge of the slit lies outside the nominal FOV, flag as outside
# -- convert sky coords to pix:
wcs = pywcs.WCS(naxis=2)
wcs.wcs.crpix = [ccd_cx,ccd_cy]
# wcs.wcs.crpix = [0,0] # define centre relative to zero
#wcs.wcs.cdelt = np.array([-pxscale, pxscale])
wcs.wcs.cdelt = np.array([-pxscale, pxscale])/3600.
wcs.wcs.crval = [cra, cdec]
wcs.wcs.ctype = ["RA---TAN", "DEC--TAN"]
wcs.wcs.crota = [rotang, rotang] # rotate SKY this amount?
wcs.wcs.equinox=equinox
xp,yp = wcs.wcs_sky2pix(slitra,slitdec, 1)
# since we are only supporting rotang = 0/180 so far:
ytop=yp+(slit_length/2.0/pxscale)
ybot=yp-(slit_length/2.0/pxscale)
# assume slit width is negligible. could add small safety padding to dcr
# --test perimeter of field:
fov_flag = np.zeros(len(cra))
# 0 means in FOV, 1 means ootside:
maxdis2 = dcr*3600.0/pxscale
topdis2 = (ytop-ccd_cy)**2 + (xp-ccd_cx)**2
botdis2 = (ybot-ccd_cy)**2 + (xp-ccd_cx)**2
#oof=np.reshape((topdis2>=maxdis2).nonzero(),-1)
oof=np.reshape((topdis2>=maxdis2).nonzero(),-1)
fov_flag[oof]=1
oof=np.reshape((botdis2>=maxdis2).nonzero(),-1)
fov_flag[oof]=1
return fov_flag
# =============================================================================================================================
def ConvertSlitLen(objra,objdec,slit_tilt,rotang,len1,len2):
# ---- Convert user-defined object position and (possible) asymmetric slit lengths to a symmetrized slit centre:
# take object coords, and length1, length2
# **** only support rotang=0./180. and tilt=0. for now ****
# from wiki"
# Len1 -- (real) requested length above object (in direction of PA) in arcsec; default set by parameters
# Len2 -- (real) requested length below object (opposite PA) in arcsec; default set by parameters
slit_length=len1+len2 # symmetrized
# calculate shift from midpoint (object coords):
ddec = slit_length/2.0 - len1
slitdec = objdec+ddec # ****
slitra = objra # need to update for arbitrary angles later ****
return slitra,slitdec
# =============================================================================================================================
def FindCollisions(slit_ra,slit_dec,slit_length,slit_width,cra,cdec,rotang,equinox,xpad,ypad,xleft,xright):
idebug=0
# xleft,xright are spectral lengths in pixels - set to large dummy values for now
# **** again, only does special case of PA=0/180, no slit tilt...
from pyslit_optimize import RSSpixfromSky
nslits=len(slit_ra)
# -- identify collisions between slits and tag ID of colliding objects:
coll_flag=['false']*nslits
coll_ids=['none']*nslits ## can assign a list of collisions to any item in this list
xp,yp=RSSpixfromSky(cra,cdec,rotang,slit_ra,slit_dec,equinox)
# **** i think for general case i just need to replace slit width below with projected height which would be:
# total_height = slit_length*cos(theta) + 2.* 0.5 * slit_width*sin(theta)
# where theta is some combination of rotang and tilt
#
# need to update optimiser for more general case, too.
tx0=xp-xleft-xpad/2.
tx1=xp+xright+xpad/2. # [ careful switching between lower left and width and lower left and upper right notation ]
ty0=yp-(slit_length/2.)-ypad/2.
ty1=yp+-(slit_length/2.)+ypad/2.
# dumb but reliable way of checking:
for ii in range(np.size(tx0)):
if idebug: print ii
tcoll_ids=[]
for jj in range(np.size(tx0)):
# -- check if this rectangle overlaps with any currently in mask:
if ii == jj: continue # don't compare slit with itself
if idebug: print 'comparing slit ',ii,' with ',jj
# http://tech-read.com/2009/02/06/program-to-check-rectangle-overlapping/
r1x1=tx0[ii]
r1x2=tx1[ii]
r1y1=ty0[ii]
r1y2=ty1[ii]
r2x1=tx0[jj]
r2x2=tx1[jj]
r2y1=ty0[jj]
r2y2=ty1[jj]
# isOVerlap= ((r1x2 >= r2x1) &&
# (r1y2 >= r2y1) &&
# (r1x1 <= r2x2) &&
# (r1y1 <= r2y2));
#print np.shape(r1x2),np.shape(r2x1)
if ((r1x2 >= r2x1) and \
(r1y2 >= r2y1) and \
(r1x1 <= r2x2) and \
(r1y1 <= r2y2)) : coll_flag[ii]='true'
# else: olap=0
if idebug: print r1y1,r1y2,r2y1,r2y2
#if (r1y2 >= r2y1) and (r1y1 <= r2y2) : olap=1
tcoll_ids.append(jj)
coll_ids[ii]=tcoll_ids
# **** might want slit name rather than ID number ****
return coll_flag,tcoll_ids
|
{"hexsha": "e50b1ad0c4697a95841d589cd990d358b9b899a9", "size": 9819, "ext": "py", "lang": "Python", "max_stars_repo_path": "proptools/RSS_geom.py", "max_stars_repo_name": "Richard-Tarbell/pysalt", "max_stars_repo_head_hexsha": "2815d5533c7e60b7042f2bc3cf46cecdd38fc609", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2015-02-22T08:35:04.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-12T11:32:34.000Z", "max_issues_repo_path": "proptools/RSS_geom.py", "max_issues_repo_name": "Richard-Tarbell/pysalt", "max_issues_repo_head_hexsha": "2815d5533c7e60b7042f2bc3cf46cecdd38fc609", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 17, "max_issues_repo_issues_event_min_datetime": "2015-02-24T18:40:26.000Z", "max_issues_repo_issues_event_max_datetime": "2018-06-05T12:52:59.000Z", "max_forks_repo_path": "proptools/RSS_geom.py", "max_forks_repo_name": "Richard-Tarbell/pysalt", "max_forks_repo_head_hexsha": "2815d5533c7e60b7042f2bc3cf46cecdd38fc609", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2015-03-20T14:46:19.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-12T18:30:01.000Z", "avg_line_length": 37.4770992366, "max_line_length": 127, "alphanum_fraction": 0.5996537326, "include": true, "reason": "import numpy", "num_tokens": 3299}
|
#pragma once
#include <Python.h>
#include <boost/python.hpp>
#include <sys/inotify.h>
#include <blackboard/Adapter.hpp>
#include <types/BehaviourRequest.hpp>
#include <utils/Timer.hpp>
class Blackboard;
#define INBUF_LEN 32 * (sizeof(struct inotify_event) + 16)
class PythonSkill : Adapter {
public:
static PythonSkill* getInstance(Blackboard *blackboard);
PythonSkill(Blackboard *blackboard);
~PythonSkill();
/* Check if Python needs a reload, and run a cycle of behaviour */
BehaviourRequest execute();
private:
// used by inotify
static PythonSkill *instance;
int inotify_fd;
char inotify_buf[INBUF_LEN];
fd_set inotify_fdss;
struct timeval inotify_timeout;
const char *path;
// pure python modules to import
const char* behaviourModuleName;
const char* robotModuleName;
// used to make sure we clear the error before trying te execute again
bool errorOccured;
// refs to pure python objects
boost::python::object main_module;
boost::python::object sys_module;
boost::python::object initial_modules;
boost::python::object behaviour_module;
boost::python::object behaviour_tick;
boost::python::object pyKeyboardInterrupt;
/* Start watching a directory with inotify */
void startInotify();
/* Start the Python interpreter, load robot and behaviour modules */
void startPython();
/* Reload the Python behaviour module */
void reloadPython();
/* Stand and say python error with LEDs flashing */
void doErrorStance(BehaviourRequest &req);
/* Common exception handler */
void handlePyError(const boost::python::error_already_set &ex);
/* Polls the inotify fd for changes that would require a reload */
bool inotify_Check();
/* Produce a flashy error pose. */
ActionCommand::LED flashLEDs();
};
// global blackboard, accessible by Cython
// extern Blackboard *blackboard;
|
{"hexsha": "d78f0df8f00e4a25909e3ea49d7777c164469dde", "size": 2015, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/Core/External/unsw/unsw/perception/behaviour/python/PythonSkill.hpp", "max_stars_repo_name": "pedrohsreis/boulos", "max_stars_repo_head_hexsha": "a5b68a32cad8cc1fb9f6fbf47fc487ef99d3166e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Core/External/unsw/unsw/perception/behaviour/python/PythonSkill.hpp", "max_issues_repo_name": "pedrohsreis/boulos", "max_issues_repo_head_hexsha": "a5b68a32cad8cc1fb9f6fbf47fc487ef99d3166e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Core/External/unsw/unsw/perception/behaviour/python/PythonSkill.hpp", "max_forks_repo_name": "pedrohsreis/boulos", "max_forks_repo_head_hexsha": "a5b68a32cad8cc1fb9f6fbf47fc487ef99d3166e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.0746268657, "max_line_length": 76, "alphanum_fraction": 0.6828784119, "num_tokens": 437}
|
include("$(pwd())/startup.jl")
#fp = "./ExampleFiles/STOFDATA/" # All files in this path will be processed
fp = "/media/wiebke/Extreme SSD/PSM_vs_PTR3/Data/apiTOFdata/CLOUD10/run1734_02/"
filefilterRegexp = r"\.h5$"
#rf = "./ExampleFiles/STOFDATA/2017-05-24_12h50m39_NH4.h5" # The mass scale from this file defines the mass scale of all
rf = "$(fp)APi4_Data_2015.10.20-13h34m44s.h5"
#masslist = MasslistFunctions.loadMasslist("./ExampleFiles/MASSLISTS/exampleMasslistSTOF.csv")
masslist = MasslistFunctions.loadMasslist("$(fp)MassList_NO3-_AP_03-01-20.csv")
cr = [37 137]
# alternatively: use an auto generated masslist
# masslistMasses, masslistElements, masslistElementsMasses, masslistCompositions = createMassList(C=0:20, O=0:20, N=0:1, allowRadicals=false) #
(masslistMasses, masslistElements, masslistElementsMasses, masslistCompositions) = masslist
s = (masslistMasses.>0) .& ( masslistMasses.<600)
masslistMasses = masslistMasses[s]
masslistCompositions = masslistCompositions[s,:]
####################### END OF SETTINGS ###############################################################
####################### Processing sequence ###########################################################
correctMassScaleAndExtractSumSpec(
fp,
masslistMasses,
masslistElements,
masslistElementsMasses,
masslistCompositions,
rf,
cr,
filefilterRegexp=filefilterRegexp,
onlyUseAverages = true,
plotControlMass = true,
recalibInterval = 300,
resolution = 1500,
firstNFiles=0,
lastNFiles = 0
)
baselineAndPeakshape(
fp,
peakshapeRegions=4,
peakshapeRegionStretch=1,
peakshapeQuantileValue = 0.2,
peakfindingNoiseThresholdValue = 2,
peakfindingSignalLimit=0.01
)
mtrx = deconvolute(
fp,
calcTransposed = false
)
|
{"hexsha": "4ca2e1eb5863c711f7f03f21e2b0984d3f33a43e", "size": 1815, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "processingProjects/processingProject-example_STOF.jl", "max_stars_repo_name": "weikou/TOF-Tracer2", "max_stars_repo_head_hexsha": "78406cc829d9903aece2d848960344aa09a263f9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "processingProjects/processingProject-example_STOF.jl", "max_issues_repo_name": "weikou/TOF-Tracer2", "max_issues_repo_head_hexsha": "78406cc829d9903aece2d848960344aa09a263f9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "processingProjects/processingProject-example_STOF.jl", "max_forks_repo_name": "weikou/TOF-Tracer2", "max_forks_repo_head_hexsha": "78406cc829d9903aece2d848960344aa09a263f9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.7540983607, "max_line_length": 143, "alphanum_fraction": 0.670523416, "num_tokens": 519}
|
% !TeX root = article.tex
\section{Description of plasticity in the framework of physics engines}
In this section, key concepts related to the introduced model are explained. The main differences between
traditional structural analysis and physics engines based approaches are reviewed and discussed.
Velocity-based formulation of constraint based rigid body simulation
is commonly used by physics based game
developers and film production teams.
%\citet[p.~45]{erleben.thesis}
\cite{erleben.thesis}
provides reasoning and theoretical details for the popularity of
velocity-based formulation in constraint-based rigid body simulation instead of accelaration based.
The main reason is that collision handling can be done without the use of additional procedures.
Work presented by
\cite{erleben.thesis} provides the basis for the velocity-based formulation discussed in this work.
%\citet[p.~45-50]{erleben.thesis}.
% pdf page 64
In the following section, these formulations will be clarified by a simple example using \cbullet\ implementation.
Impulse $\vec{J}$
in the time interval $\Delta t $ can be written as:
\begin{equation} \label{eq:impulseIntegral}
\vec{J} = \int_{0}^{\Delta t} \vec{f}_{true}(t) dt,
\end{equation}
where $\vec{f}_{true}(t)$ is force.
Using Newton's second law of motion $\vec{F}=m\vec{a}$ ,
$\vec{v}^{\Delta t}$ can be solved for the velocity as:
\begin{equation} \label{eq:impulseIntegraWithNewton}
\int_{0}^{\Delta t} m \frac{d\vec{v}}{dt}dt= \int_{0}^{\Delta t} \vec{f}_{true}(t)
\end{equation}
\begin{equation} \label{eq:impulse}
m(\vec{v}^{\, \Delta t} - \vec{v}^{\, 0})=\vec{J},
\end{equation}
where superscripts denote time, i.e. ${\vec{v}}^{\Delta t}=\vec{v}(\Delta t)$.
Next position can be found
by integrating the velocity.
Updates after each step can be summarized for locations and
for velocities respectively as follows:
\begin{equation} \label{eq:eomL} % pdf page 69
\vec{s}^{\, t+\Delta t} = \vec{s}^{\, t}+\Delta t S \vec{u}^{\, t+\Delta t}
\end{equation}
\begin{equation} \label{eq:eomV}
\vec{u}^{\, t+\Delta t} = \vec{u}^{\, t}+\Delta t M^{-1}(C N \vec{f}^{\ t+\Delta t} + \vec{f}_{ext}) .
\end{equation}
The symbols used in Equations \ref{eq:eomL} and \ref{eq:eomV}
are summarized in Table \ref{tab:eom}.
Figure \ref{fig:eom-contact} describes the collision of two
bodies, rectangular body $B_1$ and triangular body $B_2$
where $\vec{r}_i$ is position of body $i$,
$\vec{p}_k $ is position of contact point $k$,
$\vec{r}_{ik} $ is a vector between the center of gravity of body $i$ and contact point $k$,
and
$\vec{n}_{k}$ is the contact normal for contact point $k$.
It is common convention that the contact normal points
from the body with the smallest index to the body with the largest index, \cite{erleben.thesis}.
In case of point or edge contacts, averaging the normals of neighboring polygons can be used, \cite{Hahn:1998}.
\begin{figure}[tb!]
\centering
\begin{tikzpicture}
\coordinate (O1) at(2,1);
\coordinate (O2) at(2,4);
\coordinate (C) at(2,2);
\draw (0,0) -- (4,0) -- (4,2) -- (0,2) --(0,0);
\draw (2,2) -- (3.5,5) -- (0.5,5) -- (2,2) ;
\node at (3.5,0.4) {$B_1$};
\filldraw (O1) circle (0.5mm) node[anchor=north] {$\vec{r}_1$};
\node at (2.8,4.5) {$B_2$};
\filldraw (O2) circle (0.5mm) node[anchor=south] {$\vec{r}_2$};
\filldraw (C) circle (0.5mm) node[anchor=north west] {$\vec{p}_1$};
\draw[-{Stealth[length=3mm]}] (O1) -- (C) node[anchor=north east] {$\vec{r}_{11}$};
\draw[-{Stealth[length=3mm]}] (O2) -- (C) node[anchor=south east] {$\vec{r}_{21}$};
\draw[-latex,thick] (C) -- ++(0,1.4) node[anchor=west] {$\vec{n}_{1}$};
\node[anchor=west] at (4.5,4.5) {
$\vec{r}_i$ = position of body $i$
};
\node[anchor=west] at (4.5,4) {$\vec{p}_k $ = position of contact point $k$};
\node[anchor=west] at (4.5,3.5) {$\vec{r}_{ik} $ = $\vec{p}_k - \vec{r}_i $};
\node[anchor=west] at (4.5,3) {$\vec{n}_{k} $ = normal for contact point $k$};
\end{tikzpicture}
\caption{Illustration of nomenclature for equations of motion for collision.}
\label{fig:eom-contact}
\end{figure}
% pdf page 33, notation in typical ODEs
\begin{table}
\tbl{Nomenclature for equations of motion}{
\begin{tabular}{|l| l|}
\hline
{\bf Symbol} & {\bf Description} \\ \hline
$\vec{r}_i$ & position of center of mass for body $i$ \\ \hline
$\vec{q}_i$ & orientation for body $i$ as quaternion $\lbrack s_i, x_i, y_i, z_i \rbrack ^T $ \\
\hline
$\vec{p}_h$ & contact or joint point $k$ \\ \hline
$\vec{r}_{ki}$ & $\vec{p}_k - \vec{r}_i$ \\ \hline
$\vec{s}$ & $\lbrack \vec{r}_1, \vec{q}_1,...,\vec{r}_n, \vec{q}_n \rbrack ^T $\\ \hline
$Q_i$ & \begin{tabular}{@{}c}
rotation of quaternion $\vec{q}_i$
as matrix \\ where
$\frac{1}{2}\vec{\omega}_i \vec{q}_i=Q_i \vec{\omega}_i$
\end{tabular}
$
\frac{1}{2} \left[ \begin{array}{ccc}
-x_i & -y_i & -z_i \\
s_i & z_i & -y_i \\
-z_i & s_i & x_i \\
y_i & -x_i & s_i
\end{array} \right]
$
\\ \hline
$S$ &
\begin{tabular}{@{}c}
generalized transformation matrix \\
$ S \in \mathbb{R}^{7n \times 6n}$
\end{tabular}
$ \left[ \begin{array}{ccccc}
1 & & & & 0 \\
& Q_i \\
& & \ddots \\
& & & 1 \\
0 & & & & Q_n
\end{array} \right]
$
\\ \hline
$\vec{v}_i$ & linear velocity of center of mass for body $i$ \\ \hline
$\vec{\omega}_i$ & angular velocity of center of mass for body $i$ \\ \hline
$\vec{u}$ & $\lbrack \vec{v}_1, \vec{\omega}_1,...,\vec{v}_n, \vec{\omega}_n \rbrack ^T $\\ \hline
$M$ &
\begin{tabular}{@{}c}
generalized mass matrix \\
$ M \in \mathbb{R}^{6n \times 6n}$
\end{tabular}
$
\left[ \begin{array}{ccccc}
m_i 1 & & & & 0 \\
& I_1 \\
& & \ddots \\
& & & m_n 1 \\
0 & & & & I_n
\end{array} \right]
$
\\ \hline
$I_i$ & inertia tensor for body $i$ \\ \hline
$C$ & contact condition matrix $ C \in \mathbb{R}^{6n \times 3K}$ \\ \hline
$N$ & contact normal matrix $ N \in \mathbb{R}^{3K \times K}$ \\ \hline
\end {tabular}}
\label{tab:eom}
\end{table}
Friction in contacts and joint constraints can be handled in a unified way by refactoring
equation \ref{eq:eomV} as,
\cite{erleben.thesis}
%\citet[p.~66-67]{erleben.thesis}
\begin{equation} \label{eq:eomV2}
\vec{u}^{\, t+\Delta t} = \vec{u}^{\, t}+\Delta t M^{-1}(
J_{contact}^T \vec{\lambda}_{contact}
+ J_{joint}^T \vec{\lambda}_{joint}
+ \vec{f}_{ext}),
\end{equation}
where Jacobian terms $J_{joint}^T$ for joints are
derived by taking time derivatives of the kinematic constraints.
Symbols used in Equation \ref{eq:eomV2} are summarized in Table
\ref{tab:eom-g} and Figure \ref{fig:eom-joint},
where $\vec{r}_{anc}^{\,i}$ is used to define at which point
joint constraint is applied relative to body $i$.
\begin{figure}
\centering
\begin{tikzpicture}
\coordinate (O1) at(1,1);
\coordinate (O2) at(1,3);
\coordinate (C) at(1,2);
\draw (0,0) -- (2,0) -- (2,2) -- (0,2) --(0,0);
\draw (0,2) -- (2,2) -- (2,4) -- (0,4) --(0,2) ;
\node at (0.5,0.4) {$B_1$};
\filldraw (O1) circle (0.5mm) node[anchor=north] {$\vec{r}_1$};
\node at (0.5,3.5) {$B_2$};
\filldraw (O2) circle (0.5mm) node[anchor=south] {$\vec{r}_2$};
\draw[-{Stealth[length=3mm]}] (O1) -- (C) node[anchor=north east] {$\vec{r}_{anc}^{\,1}$};
\draw[-{Stealth[length=3mm]}] (O2) -- (C) node[anchor=south east] {$\vec{r}_{anc}^{\,2}$};
\node[anchor=west] at (4.5,3.5) {
$\vec{r}_i$ = position of body $i$
};
\node[anchor=west] at (4.5,3) {$\vec{r}_{anc}^{\,i} $ = body frame vector $i$};
\end{tikzpicture}
\caption{Illustration of nomenclature for equations of motion for joint.}
\label{fig:eom-joint}
\end{figure}
% pdf page 33, notation in typical ODEs
\begin{table}
\tbl{Additional terms for generalized equations of motion}{
\begin{tabular}{|l| l|}
\hline
{\bf Symbol} & {\bf Description} \\ \hline
$J_{contact}$ & Jacobian matrix for contacts \\ \hline
$\lambda_{contact}$ & vector of lagrange multipliers for contacts \\ \hline
$J_{joint}$ & Jacobian matrix for joints \\ \hline
$\lambda_{joint}$ & vector of lagrange multipliers for joints \\ \hline
\end {tabular}}
\label{tab:eom-g}
\end {table}
Constraint processing in \cbullet\ is based on ODE, \cite{ode}.
Joints are also discussed in detail in
\cite{erleben.thesis}.
%\citet[p.~60-90]{erleben.thesis}.
Equations \ref{eq:constraintEquation}, \ref{eq:lambdaLow} and
\ref{eq:lambdaHigh}
are created for each constraint.
Derivation for terms in Equation \ref{eq:constraintEquation}
can be done using the position and orientation of connected bodies
e.g. for ball joint formulation is based on both joint points having the same position.
In contact cases, formulation is easier if it is done using velocities, \cite{ode.joints}.
\begin{equation} \label{eq:constraintEquation}
J_1 \vec{v}_1 + \Omega_1 \vec{\omega}_1 +
J_2 \vec{v}_2 + \Omega_2 \vec{\omega}_2 = \vec{c} + C \vec{\lambda}
\end{equation}
\begin{equation} \label{eq:lambdaLow}
\vec{\lambda} \geq \vec{l}
\end{equation}
\begin{equation} \label{eq:lambdaHigh}
\vec{\lambda} \leq \vec{h}
\end{equation}
In the following section, these equations will be explained by a simple example.
The main parameters and corresponding fields in \cbullet\
are given in Table \ref{tab:constraintParameters}.
\begin {table}
\tbl {Constraint parameters}{
\begin{tabular}{|c| l| l|}
\hline
{\bf Parameter} & {\bf Description} & {\bf btConstraintInfo2 pointer}\\ \hline
$J_1, \Omega_1$ & Jacobian & m\_J1linearAxis, m\_J1angularAxis \\
$J_2, \Omega_2$ & & m\_J2linearAxis, m\_J2angularAxis \\ \hline
$\vec{v}$ & linear velocity & \\ \hline
$\vec{\omega}$ & angular velocity & \\ \hline
$\vec{c}$ & right side vector & m\_constraintError \\ \hline
$C$ & constraint force mixing & cfm \\ \hline
$\vec{\lambda}$ & constraint force & \\ \hline
$\vec{l}$ & low limit for constraint force & m\_lowerLimit \\ \hline
$\vec{h}$ & high limit for constraint force & m\_upperLimit \\ \hline
\end {tabular}}
\label{tab:constraintParameters}
\end {table}
In structural analysis, a formulation and associated numerical solution procedure are selected
based on needed features.
Often, finite element method is used.
In most cases, a static solution with an assumption of linear strain-displacement relation
using displacement based boundary conditions is used.
\cite{bathe-1975} provides a description for handling of various nonlinearities.
In large displacement analysis, formulation may be based on updated formulation (Eulerian) or
Lagrangian formulation where initial configuration is used.
Further enhancements are material nonlinearity and dynamic analysis.
Physics engine provides dynamic analysis with large reference translations and rotations
while assuming bodies to be undeformable.
Material plasticity can be accounted for in simulations by using a suitable coefficient of restitution.
This provides a reasonable means to simulate loss of energy in collisions.
In this work simulation of breaking of bodies made of ductile material is made more realistic
by splitting the rigid body
to multiple bodies that are connected by energy absorbing joints.
A typical engineering stress-strain curve of ductile steel is shown in Figure \ref{fig:sscurve}.
\begin{figure}
\centering
\begin{tikzpicture}
\coordinate (Y) at (1,4);
\draw[->] (0,0) -- (6,0) node[right] {\large{$\epsilon$}};
\draw[->] (0,0) -- (0,5) node[above] {\large{$\sigma$}};
\draw(0,0) -- (Y) -- (2,4) .. controls (5,5) .. (6,4);
\draw[dashed](0,4) -- (Y);
\node at (-0.2,4) [align=right] {$f_y$};
\draw(0.25,1) -- (0.5,1) -- (0.5,2);
\node at (0.75,1.5) {$E$};
\node at (0.8,2.5) [anchor=west] {$\sigma = E \epsilon$ if $\sigma \le f_y$};
\end{tikzpicture}
\caption{Engineering stress-strain curve of ductile steel (not to scale).}
\label{fig:sscurve}
\end{figure}
In Figure \ref{fig:sscurve}, $\sigma$ is stress, $E$ is Youngs modulus and $f_y$ is yield stress.
Engineering stress and strain mean that original dimensions are used in stress calculation,
\cite{dowling}.
%\citet[p.~108]{dowling}.
The stress-strain curve is not drawn to scale as elastic strain could not be seen as it is typically
0.001 to 0.005 and fracture strain can be 100 times larger.
In this work, an elastic-fully plastic material model is used in most scenarios.
Having elastic part allows elastic displacements for slender structures.
Elastic material behavior is ignored in approach introduced in this work if
the deformation is related to ahigher frequency
than integration stability would allow.
It should be noted that geometry
of bodies is not updated during analysis and thus engineering stress-strain properties are used.
In this work, strain hardening is taken into account by assuming that plastic volume in bending
expands,
\cite{dowling}.
%\citet[p.~672]{dowling}.
Material that starts to yield first is hardened and as a result of which yielding moves.
The difference between the elastic and plastic section modulus is depicted in Figure \ref{fig:wp}.
\begin{figure}[htb!]
\centering
\begin{tikzpicture}
\coordinate (S) at (2.5,5);
\draw (0,5) -- (4,5) ;
\draw (0,0) -- (4,0) ;
\draw (2,0) -- (2,5) ;
\draw (1.5,0) -- (S);
\node[above] at (S) [align=center] {\large{$\sigma<f_y$}};
\node[anchor=west] at (3,3) {
\begin{tabular}{l}
Under elastic load\\
stress increases\\
linearly from zero\\
at neutral axis to\\
maximum value at \\
surface of body
\end{tabular}
};
\end{tikzpicture}
\hspace{1cm}
\begin{tikzpicture}
\coordinate (S) at (3,5);
\draw (0,5) -- (4,5) ;
\draw (0,0) -- (4,0) ;
\draw (2,0) -- (2,5) ;
\draw (1,0) -- (1,2.5) -- (3,2.5) -- (S);
\node[above] at (S) [align=center] {\large{$\sigma=f_y$}};
\node[anchor=west] at (3,3) {
\begin{tabular}{l}
Under fully plastic load\\
stress is at yield\\
level over full\\
cross section
\end{tabular}
};
\end{tikzpicture}
\caption{Axial stress distribution over a cross section for bending under elastic and fully plastic loads.}
\label{fig:wp}
\end{figure}
As shown in Figure 2.4, if stress is below yield limit $f_y$, stress and strain are linear within the material.
If cross section is fully plastic, stress is assumed to be at yield level over the whole cross section such that
the plastic section modulus is higher than the elastic section modulus.
In this work, plasticity is handled by defining maximum forces
in Equations \ref{eq:lambdaLow} and
\ref{eq:lambdaHigh} using plastic capacities, which are defined below.
Maximum force acting in a direction of $\vec{r}_{anc}^{\,i} $
is product of area and yield stress as follows:
\begin{equation} \label{eq:fN}
N_{max}= \int_A f_y.
\end{equation}
Maximum forces acting perpendicular to $\vec{r}_{anc}^{\,i} $
are a product of area and shear yield stress $\tau_y$ as follows:
\begin{equation} \label{eq:fQ}
Q_{max}= \int_A \tau_y.
\end{equation}
Maximum moments acting around the axis perpendicular to $\vec{r}_{anc}^{\,i} $
are integrals of the perpdendicular distance
and yield stress $f_y$ as given for the moment around the $x$-axis
and moment around the $z$-axis, respectively:
\begin{equation} \label{eq:Mx}
M_{max}^x= \int_A z f_y,
\end{equation}
\begin{equation} \label{eq:Mz}
M_{max}^z= \int_A x f_y.
\end{equation}
Maximum moment around $\vec{r}_{anc}^{\,i} $
is an integral of distance $d$ from the joint point
and shear yield stress $\tau_y$ as:
\begin{equation} \label{eq:My}
M_{max}^y= \int_A d \tau_y.
\end{equation}
Maximum forces and moments for a
rectangular section with width $b$ and height $h$ using constant yield stress
are given in Table \ref{tab:maxForces}.
Yield shear stress is assumed to be $ 0.5\, f_y$ using the Tresca yield critetion.
If the von Mises yield criterion is used 0.5 is replaced by 0.58 ($1/\sqrt{3}$), \cite{dowling}.
% p. 262, p. 268
These are not exact values in a multiaxial stress state but they
should be acceptable in most gaming scenarios.
\begin {table}
\tbl {Maximum forces and moments for
rectangular section with width $b$ and height $h$ using constant yield stress $f_y$}{
\begin{tabular}{| c| c|}
\hline
{\bf Direction} & {\bf Maximum value} \\ \hline
maximum shear force & $0.5\, b\, h f_y$ \\ \hline
maximum normal force & $b\, h\, f_y$ \\ \hline
maximum bending moment in direction of $h$& $0.25\, b\, h^2 \, f_y$ \\ \hline
maximum bending moment in direction of $b$ & $0.25\, b^2\, h\, f_y$ \\ \hline
maximum torque & $ \approx 0.19\, b\, h\, \frac{b\, + h}{2} f_y$ \\ \hline
\end{tabular}}
\label{tab:maxForces}
\end {table}
For torque there is a closed form solution only for
circular cross sections.
Given approximation is
best suited for cases where $b$ and $h$ are similar.
Better approximation for any given $b$ and $h$ can be obtained
by integrating distance from the center of the joint over cross section and
multiplying it with the yield shear stress e.g. using Octave, \cite{octave}.
An example of calculation of the maximum moment around $\vec{r}_{anc}^{\,i} $
is shown in Figure \ref{fig:octave-mp}.
\begin{figure}
\centering
\lstset{language=octave}
\begin{lstlisting}
b=0.01; h=0.01; fy=200e6;
wpy=fy/2*dblquad(@(x,z)...
sqrt(x.*x+z.*z),-b/2,b/2,-h/2,h/2)
38.2
\end{lstlisting}
\caption{Calculation of maximum moment around $\vec{r}_{anc}^{\,i} $ using Octave.}
\label{fig:octave-mp}
\end{figure}
The basic idea introduced in this study can be tested with any framework having motors and hinge constraints.
This can be done by setting the target velocity of the motor to zero and limiting
the maximum motor impulse to plastic moment multiplied by a timestep.
Further enhancements were created and tested by forking \cbullet\ source code
and adding new constraints, \cite{pbullet}.
Instructions for using windows executable and source code are available, \cite{bp}.
|
{"hexsha": "5d238740e4b5a484233187b66593d0ee9b5fd7cc", "size": 17549, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "pdocs/thesis/article-section-2.tex", "max_stars_repo_name": "simo-11/bullet3", "max_stars_repo_head_hexsha": "af7753f5d7fbc0030a3abbe43356d9a9ea784a62", "max_stars_repo_licenses": ["Zlib"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pdocs/thesis/article-section-2.tex", "max_issues_repo_name": "simo-11/bullet3", "max_issues_repo_head_hexsha": "af7753f5d7fbc0030a3abbe43356d9a9ea784a62", "max_issues_repo_licenses": ["Zlib"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pdocs/thesis/article-section-2.tex", "max_forks_repo_name": "simo-11/bullet3", "max_forks_repo_head_hexsha": "af7753f5d7fbc0030a3abbe43356d9a9ea784a62", "max_forks_repo_licenses": ["Zlib"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.4004376368, "max_line_length": 114, "alphanum_fraction": 0.6939426748, "num_tokens": 5830}
|
"""
Utility functions for COOT model
"""
import ctypes
import datetime
import logging
import multiprocessing as mp
import os
from pathlib import Path
import random
import sys
from typing import Tuple, Dict
import numpy as np
import torch
import torch.backends.cudnn as cudnn
from torch import cuda
import torch.nn.functional as F
EVALKEYS = ["r1", "r5", "r10", "r50", "medr", "meanr", "sum"]
EVALHEADER = "Retriev | R@1 | R@5 | R@10 | R@50 | MeanR | MedR | Sum"
def create_dataloader_path(data_root,
dataset_name='youcook2',
text_feature_name='default',
video_feature_name='100m'):
"""create the path to meta file and features
Args:
data_root ([PATH]): [Path to the data folder]
Returns:
[Dict]: [path to meta data and video/language features]
"""
meta_data_path = Path(
os.path.join(data_root, "meta_{}.json".format(video_feature_name)))
video_feat_path = Path(
os.path.join(data_root, "video_feat_{}.h5".format(video_feature_name)))
language_feat_path = Path(
os.path.join(data_root, "text_{}.h5".format(text_feature_name)))
meta_text_len_path = Path(
os.path.join(data_root, "text_lens_{}.json".format(text_feature_name)))
return {
"dataset_name": dataset_name,
"meta_data": meta_data_path,
"video_feats": video_feat_path,
"language_feats": language_feat_path,
"meta_text_len": meta_text_len_path,
}
def get_csv_header_keys(compute_clip_retrieval):
""" get CSV header keys"""
metric_keys = ["ep", "time"]
prefixes = ["v", "p"]
if compute_clip_retrieval:
prefixes += ["c", "s"]
for prefix in prefixes:
for key in EVALKEYS:
metric_keys.append(f"{prefix}-{key}")
return metric_keys
def expand_segment(num_frames, num_target_frames, start_frame, stop_frame):
""" expand the segment"""
num_frames_seg = stop_frame - start_frame + 1
changes = False
num_target_frames = min(num_target_frames, num_frames)
if num_frames_seg < num_target_frames:
while True:
if start_frame > 0:
start_frame -= 1
num_frames_seg += 1
changes = True
if num_frames_seg == num_target_frames:
break
if stop_frame < num_frames - 1:
stop_frame += 1
num_frames_seg += 1
changes = True
if num_frames_seg == num_target_frames:
break
return start_frame, stop_frame, changes
def set_seed(seed: int) -> None:
""" set seed"""
torch.manual_seed(seed)
cuda.manual_seed(seed)
cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
cudnn.benchmark = False
cudnn.deterministic = True
def make_shared_array(np_array: np.ndarray) -> mp.Array:
""" shared array"""
flat_shape = int(np.prod(np_array.shape))
shared_array_base = mp.Array(ctypes.c_float, flat_shape)
shared_array = np.ctypeslib.as_array(shared_array_base.get_obj())
shared_array = shared_array.reshape(np_array.shape)
shared_array[:] = np_array[:]
return shared_array
def compute_indices(num_frames_orig: int, num_frames_target: int,
is_train: bool):
""" compute indices """
def round_half_down(array: np.ndarray) -> np.ndarray:
return np.ceil(array - 0.5)
if is_train:
# random sampling during training
start_points = np.linspace(0,
num_frames_orig,
num_frames_target,
endpoint=False)
start_points = round_half_down(start_points).astype(int)
offsets = start_points[1:] - start_points[:-1]
np.random.shuffle(offsets)
last_offset = num_frames_orig - np.sum(offsets)
offsets = np.concatenate([offsets, np.array([last_offset])])
new_start_points = np.cumsum(offsets) - offsets[0]
offsets = np.roll(offsets, -1)
random_offsets = offsets * np.random.rand(num_frames_target)
indices = new_start_points + random_offsets
indices = np.floor(indices).astype(int)
return indices
# center sampling during validation
start_points = np.linspace(0,
num_frames_orig,
num_frames_target,
endpoint=False)
offset = num_frames_orig / num_frames_target / 2
indices = start_points + offset
indices = np.floor(indices).astype(int)
return indices
def truncated_normal_fill(shape: Tuple[int],
mean: float = 0,
std: float = 1,
limit: float = 2) -> torch.Tensor:
""" truncate normal """
num_examples = 8
tmp = torch.empty(shape + (num_examples, )).normal_()
valid = (tmp < limit) & (tmp > -limit)
_, ind = valid.max(-1, keepdim=True)
return tmp.gather(-1, ind).squeeze(-1).mul_(std).add_(mean)
def retrieval_results_to_str(results: Dict[str, float], name: str):
""" retrieval results string """
return ("{:7s} | {:.3f} | {:.3f} | {:.3f} | {:.3f} | {:5.1f} | "
"{:5.1f} | {:6.3f}").format(name, *[results[a] for a in EVALKEYS])
def compute_retr_vid_to_par(video_feat, cap_feat):
""" compute similarity scores video to paragraph """
similarity_scores = np.dot(video_feat, cap_feat.T)
return compute_retrieval_metrics(similarity_scores)
def compute_retr_par_to_vid(video_feat, cap_feat):
""" compute similarity scores paragraph to video """
similarity_scores = np.dot(cap_feat, video_feat.T)
return compute_retrieval_metrics(similarity_scores)
def compute_retrieval_metrics(dot_product):
""" Compute the retrieval performance
Args:
dot_product (similarity of embeddings X1 and X2): dot_product(X1, X2)
Returns:
Retrieval evaluation metrics such as R1, R5 and so on.
"""
sort_similarity = np.sort(-dot_product, axis=1)
diag_similarity = np.diag(-dot_product)
diag_similarity = diag_similarity[:, np.newaxis]
ranks = sort_similarity - diag_similarity
ranks = np.where(ranks == 0)
ranks = ranks[1]
report_dict = dict()
report_dict['r1'] = float(np.sum(ranks == 0)) / len(ranks)
report_dict['r5'] = float(np.sum(ranks < 5)) / len(ranks)
report_dict['r10'] = float(np.sum(ranks < 10)) / len(ranks)
report_dict['r50'] = float(np.sum(ranks < 50)) / len(ranks)
report_dict['medr'] = np.median(ranks) + 1
report_dict['meanr'] = ranks.mean()
report_dict[
'sum'] = report_dict['r1'] + report_dict['r5'] + report_dict['r50']
return report_dict, ranks
def compare_metrics(comparison, best):
""" compare metrics """
if best is None:
return True
threshold = 1e-4
rel_epsilon = threshold + 1.
return comparison > best * rel_epsilon
def get_logging_formatter():
""" logging formatter """
return logging.Formatter("%(asctime)s %(levelname)s %(message)s",
datefmt="%m%d %H%M%S")
def get_timestamp_for_filename():
""" timestamp"""
time_split = str(datetime.datetime.now()).split(".")[0].replace(" ", "_")
time_split = time_split.replace(":", "_").replace("-", "_")
return time_split
def get_logger_without_file(name, log_level="INFO") -> logging.Logger:
""" gett basic logger"""
logger = logging.getLogger(name)
logger.setLevel(log_level)
strm_hdlr = logging.StreamHandler(sys.stdout)
strm_hdlr.setFormatter(get_logging_formatter())
logger.addHandler(strm_hdlr)
return logger
def get_logger(logdir,
name,
filename="run",
log_level="INFO",
log_file=True) -> logging.Logger:
"""Get logger
Returns:
logger
"""
logger = logging.getLogger(name)
logger.setLevel(log_level)
formatter = get_logging_formatter()
if log_file:
file_path = Path(logdir) / "{}_{}.log".format(
filename,
str(datetime.datetime.now()).split(".")[0].replace(
" ", "_").replace(":", "_").replace("-", "_"))
file_hdlr = logging.FileHandler(str(file_path))
file_hdlr.setFormatter(formatter)
logger.addHandler(file_hdlr)
strm_hdlr = logging.StreamHandler(sys.stdout)
strm_hdlr.setFormatter(formatter)
logger.addHandler(strm_hdlr)
logger.propagate = False
return logger
def close_logger(logger: logging.Logger):
""" close logger """
log_handle_list = list(logger.handlers)
for i in log_handle_list:
logger.removeHandler(i)
i.flush()
i.close()
def unpack_data(data_dict, use_cuda):
"""unpack data
"""
def to_device(x):
if use_cuda and isinstance(x, torch.Tensor):
return x.cuda(non_blocking=True)
return x
return [
to_device(data_dict[a])
for a in ("vid_id", "vid_frames", "vid_frames_mask", "vid_frames_len",
"par_cap_vectors", "par_cap_mask", "par_cap_len", "clip_num",
"clip_frames", "clip_frames_len", "clip_frames_mask",
"sent_num", "sent_cap_vectors", "sent_cap_mask",
"sent_cap_len")
]
def compute_constrastive_loss(config, contrastive_loss, vid_emb, par_emb,
clip_emb, sent_emb, vid_context, par_context):
"""Normalize embeddings and calculate alignment loss in different levels:
Video-paragraph, clip-sentence, global context
Args:
contrastive_loss (loss function): MaxMargingRanking loss
vid_emb (tensor): video embeddings with shape batch*dim
par_emb (tensor): paragraph embeddings with shape batch*dim
clip_emb (tensor): clip embeddings
sent_emb (tensor): sentence embeddings
vid_context (tensor): video global context
par_context (tensor): paragraph global context
Returns:
total loss
"""
vid_context_norm = F.normalize(vid_context)
clip_emb_norm = F.normalize(clip_emb)
vid_emb_norm = F.normalize(vid_emb)
par_context_norm = F.normalize(par_context)
sent_emb_norm = F.normalize(sent_emb)
par_emb_norm = F.normalize(par_emb)
loss = contrastive_loss(vid_emb_norm, par_emb_norm)
loss += config.CONFIG.TRAIN.LOSS_CONTRASTIVE_CLIP_W * contrastive_loss(
clip_emb_norm, sent_emb_norm)
loss += contrastive_loss(vid_context_norm, par_context_norm)
loss += (contrastive_loss(vid_emb_norm, vid_emb_norm) +
contrastive_loss(par_emb_norm, par_emb_norm)) / 2
loss += (contrastive_loss(clip_emb_norm, clip_emb_norm) +
contrastive_loss(sent_emb_norm, sent_emb_norm)) / 2
return loss
def compute_cmc_loss(cyc_consistency_loss, loss_weight, clip_emb_reshape,
clip_emb_mask, clip_emb_lens, sent_emb_reshape,
sent_emb_mask, sent_emb_lens):
"""Calculate the total cycle consistency loss between video clips and paragraph sentences
Args:
cyc_consistency_loss (loss function): cycle consistency loss function
loss_weight (float): weight of loss
clip_emb_reshape
clip_emb_mask
clip_emb_lens
sent_emb_reshape
sent_emb_mask
sent_emb_lens
Returns:
total loss
"""
clip_clip_loss, sent_sent_loss = cyc_consistency_loss(
clip_emb_reshape, clip_emb_mask, clip_emb_lens, sent_emb_reshape,
sent_emb_mask, sent_emb_lens)
loss = loss_weight * (clip_clip_loss + sent_sent_loss)
return loss
|
{"hexsha": "5d0095cfb0e4086db7918fa3f2a637440e53c9a7", "size": 11745, "ext": "py", "lang": "Python", "max_stars_repo_path": "gluoncv/torch/utils/coot_utils.py", "max_stars_repo_name": "RafLit/gluon-cv", "max_stars_repo_head_hexsha": "dae504a4de8fff1421fd4fe398accbe396c504cc", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 5447, "max_stars_repo_stars_event_min_datetime": "2018-04-25T18:02:51.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T00:59:49.000Z", "max_issues_repo_path": "gluoncv/torch/utils/coot_utils.py", "max_issues_repo_name": "RafLit/gluon-cv", "max_issues_repo_head_hexsha": "dae504a4de8fff1421fd4fe398accbe396c504cc", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1566, "max_issues_repo_issues_event_min_datetime": "2018-04-25T21:14:04.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T06:42:42.000Z", "max_forks_repo_path": "gluoncv/torch/utils/coot_utils.py", "max_forks_repo_name": "RafLit/gluon-cv", "max_forks_repo_head_hexsha": "dae504a4de8fff1421fd4fe398accbe396c504cc", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1345, "max_forks_repo_forks_event_min_datetime": "2018-04-25T18:44:13.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T19:32:53.000Z", "avg_line_length": 33.0845070423, "max_line_length": 93, "alphanum_fraction": 0.6347381865, "include": true, "reason": "import numpy", "num_tokens": 2715}
|
library(data.table)
library(hyperSpec)
source("~/Repositories/PHESANT/summarise_phenotypes.r")
root_file <- "pharma_exomes_parsed_output_100k_chunk"
number_of_chunks <- 10
# Old
phenotype_info_file <- "../variable-info/outcome_info_final_round2.tsv"
# Latest pharma firm variable info file
phenotype_info_file <- "../variable-info/outcome_info_final_round3.tsv"
coding_info_file <- "../variable-info/data-coding-ordinal-info.txt"
out_root_file <- "pharma_parsed_and_restricted_to_100K_sample_subset"
only_males_file <- "../should_only_be_in_males.tsv"
only_females_file <- "../should_only_be_in_females.tsv"
# Biomarkers
root_file <- "pharma_exomes_biomarkers_parsed_output_100k_chunk"
number_of_chunks <- 1
out_root_file <- "pharma_biomarkers_parsed_and_restricted_to_100K_sample_subset_sex_added"
create_cts_summary_file_from_get_hist_and_notes_output <- function(get_hists_and_notes_output, only_males, only_females, males_females_or_both='both')
{
get_hists_and_notes_output <- data.frame(get_hists_and_notes_output, stringsAsFactors = FALSE)
get_hists_and_notes_output$variable.type <- get_hists_and_notes_output$PHESANT.notes
get_hists_and_notes_output$variable.type[grep("IRNT", get_hists_and_notes_output$PHESANT.notes)] <- "CONTINOUS IRNT"
if(males_females_or_both == "both") {
get_hists_and_notes_output <- get_hists_and_notes_output[!(rownames(get_hists_and_notes_output) %in% c(only_males$FullFieldID, only_females$FullFieldID)),]
} else if(males_females_or_both == "males") {
get_hists_and_notes_output <- get_hists_and_notes_output[!(rownames(get_hists_and_notes_output) %in% only_females$FullFieldID),]
} else {
get_hists_and_notes_output <- get_hists_and_notes_output[!(rownames(get_hists_and_notes_output) %in% only_males$FullFieldID),]
}
get_hists_and_notes_output_cp <- get_hists_and_notes_output
get_hists_and_notes_output_cp$variable.type[grep("IRNT", get_hists_and_notes_output$PHESANT.notes)] <- "CONTINOUS RAW"
get_hists_and_notes_output_cp$PHESANT.notes <- gsub(" IRNT \\|\\|", "", get_hists_and_notes_output_cp$PHESANT.notes)
rownames(get_hists_and_notes_output_cp) <- paste0(rownames(get_hists_and_notes_output_cp), "_raw")
rownames(get_hists_and_notes_output) <- paste0(rownames(get_hists_and_notes_output), "_irnt")
get_hists_and_notes_output <- rbind(get_hists_and_notes_output, get_hists_and_notes_output_cp)
return(get_hists_and_notes_output)
}
for (i in 1:number_of_chunks)
{
# Both sexes
hist_filename <- paste0(out_root_file, "_cat_variables_both_sexes.", i, "_hist")
pheno_summary <- paste0(out_root_file, "_cat_variables_both_sexes.", i, "_phenosummary.tsv")
filename <- paste0(out_root_file, "_cat_variables_both_sexes.", i)
tsv_filename <- paste(filename, ".tsv", sep="")
log_filename <- paste0(root_file, ".", i)
log_file <- paste(log_filename, ".log", sep="")
# If the file doesn't exists, then in that chunk, no phenotypes made it through PHESANT.
if(!file.exists(tsv_filename)) next
tsv_data <- read.table(tsv_filename, header=TRUE, sep='\t')
names(tsv_data)[1] <- "userId"
outcome_info <- read.table(phenotype_info_file, sep='\t', quote="", comment.char="", header=TRUE)
print(paste0("both sexes ", i))
summary <- get_hists_and_notes(hist_filename, tsv_data, log_file, outcome_info, codings_tables, samples_for_inclusion=TRUE, check=FALSE, start_column=2)
write.table(summary, file=pheno_summary, col.names=TRUE, row.names=TRUE, sep='\t', quote=FALSE)
# Males
hist_filename <- paste0(out_root_file, "_cat_variables_males.", i, "_hist")
pheno_summary <- paste0(out_root_file, "_cat_variables_males.", i, "_phenosummary.tsv")
filename <- paste0(out_root_file, "_cat_variables_males.", i)
tsv_filename <- paste(filename, ".tsv", sep="")
log_filename <- paste0(root_file, ".", i)
log_file <- paste(log_filename, ".log", sep="")
if(file.exists(tsv_filename))
{
tsv_data <- read.table(tsv_filename, header=TRUE, sep='\t')
names(tsv_data)[1] <- "userId"
outcome_info <- read.table(phenotype_info_file, sep='\t', quote="", comment.char="", header=TRUE)
print(paste0("males ", i))
summary <- get_hists_and_notes(hist_filename, tsv_data, log_file, outcome_info, codings_tables, samples_for_inclusion=TRUE, check=FALSE, start_column=2)
write.table(summary, file=pheno_summary, col.names=TRUE, row.names=TRUE, sep='\t', quote=FALSE)
}
# Females
hist_filename <- paste0(out_root_file, "_cat_variables_females.", i, "_hist")
pheno_summary <- paste0(out_root_file, "_cat_variables_females.", i, "_phenosummary.tsv")
filename <- paste0(out_root_file, "_cat_variables_females.", i)
tsv_filename <- paste(filename, ".tsv", sep="")
log_filename <- paste0(root_file, ".", i)
log_file <- paste(log_filename, ".log", sep="")
if(file.exists(tsv_filename))
{
tsv_data <- read.table(tsv_filename, header=TRUE, sep='\t')
names(tsv_data)[1] <- "userId"
outcome_info <- read.table(phenotype_info_file, sep='\t', quote="", comment.char="", header=TRUE)
print(paste0("females ", i))
summary <- get_hists_and_notes(hist_filename, tsv_data, log_file, outcome_info, codings_tables, samples_for_inclusion=TRUE, check=FALSE, start_column=2)
write.table(summary, file=pheno_summary, col.names=TRUE, row.names=TRUE, sep='\t', quote=FALSE)
}
}
# Now, remove variables that are in male only or female only from both sexes and the opposite sex phenotype summary file.
only_males <- fread(only_males_file, header=FALSE, sep='\t')
only_females <- fread(only_females_file, header=FALSE, sep='\t')
names(only_males) <- c("FullFieldID_rowname","FullFieldID", "FieldID", "Field", "SubFieldID", "SubField",
"N.non.missing.males", "N.missing.males", "N.controls.males", "N.cases.males",
"N.non.missing", "N.missing", "N.controls", "N.cases")
names(only_females) <- c("FullFieldID_rowname", "FullFieldID", "FieldID", "Field", "SubFieldID", "SubField",
"N.non.missing.females", "N.missing.females", "N.controls.females", "N.cases.females",
"N.non.missing", "N.missing", "N.controls", "N.cases")
# We also need to do the cts phenotypes - new format now because of how I parsed them.
system(paste0("awk 'FNR==1 && NR!=1 { while (/^<header>/) getline; } 1 {print}' ", root_file, ".*.log > ", root_file, "_all.log"))
# Both sexes
hist_filename <- paste0(out_root_file, "_cts_irnt_both_sexes_hist")
pheno_summary <- paste0(out_root_file, "_cts_irnt_both_sexes_phenosummary.tsv")
summary_file_sex_specific <- paste0(out_root_file, "_cts_both_sexes_phenosummary.tsv")
filename <- paste0(out_root_file, "_cts_irnt")
tsv_filename <- paste(filename, ".tsv", sep="")
log_filename <- paste0(root_file, "_all")
log_file <- paste(log_filename, ".log", sep="")
tsv_data <- read.table(tsv_filename, header=TRUE, sep='\t')
names(tsv_data)[1] <- "userId"
outcome_info <- read.table(phenotype_info_file, sep='\t', quote="", comment.char="", header=TRUE)
summary <- get_hists_and_notes(hist_filename, tsv_data, log_file, outcome_info, codings_tables, samples_for_inclusion=TRUE, check=FALSE, start_column=2)
write.table(summary, file=pheno_summary, col.names=TRUE, row.names=TRUE, sep='\t', quote=FALSE)
final_summary <- create_cts_summary_file_from_get_hist_and_notes_output(summary, only_males, only_females)
write.table(final_summary, file=summary_file_sex_specific, col.names=TRUE, row.names=TRUE, sep='\t', quote=FALSE)
# Males
hist_filename <- paste0(out_root_file, "_cts_irnt_males_hist")
pheno_summary <- paste0(out_root_file, "_cts_irnt_males_phenosummary.tsv")
summary_file_sex_specific <- paste0(out_root_file, "_cts_males_phenosummary.tsv")
filename <- paste0(out_root_file, "_cts_irnt_males")
tsv_filename <- paste(filename, ".tsv", sep="")
log_filename <- paste0(root_file, "_all")
log_file <- paste(log_filename, ".log", sep="")
tsv_data <- read.table(tsv_filename, header=TRUE, sep='\t')
names(tsv_data)[1] <- "userId"
summary <- get_hists_and_notes(hist_filename, tsv_data, log_file, outcome_info, codings_tables, samples_for_inclusion=TRUE, check=FALSE, start_column=2)
write.table(summary, file=pheno_summary, col.names=TRUE, row.names=TRUE, sep='\t', quote=FALSE)
final_summary <- create_cts_summary_file_from_get_hist_and_notes_output(summary, only_males, only_females, "males")
write.table(final_summary, file=summary_file_sex_specific, col.names=TRUE, row.names=TRUE, sep='\t', quote=FALSE)
# Females
hist_filename <- paste0(out_root_file, "_cts_irnt_females_hist")
pheno_summary <- paste0(out_root_file, "_cts_irnt_females_phenosummary.tsv")
summary_file_sex_specific <- paste0(out_root_file, "_cts_females_phenosummary.tsv")
filename <- paste0(out_root_file, "_cts_irnt_females")
tsv_filename <- paste(filename, ".tsv", sep="")
log_filename <- paste0(root_file, "_all")
log_file <- paste(log_filename, ".log", sep="")
tsv_data <- read.table(tsv_filename, header=TRUE, sep='\t')
names(tsv_data)[1] <- "userId"
summary <- get_hists_and_notes(hist_filename, tsv_data, log_file, outcome_info, codings_tables, samples_for_inclusion=TRUE, check=FALSE, start_column=2)
write.table(summary, file=pheno_summary, col.names=TRUE, row.names=TRUE, sep='\t', quote=FALSE)
final_summary <- create_cts_summary_file_from_get_hist_and_notes_output(summary, only_males, only_females, "females")
write.table(final_summary, file=summary_file_sex_specific, col.names=TRUE, row.names=TRUE, sep='\t', quote=FALSE)
for (i in 1:number_of_chunks)
{
# Both sexes.
pheno_summary_file <- paste0(out_root_file, "_cat_variables_both_sexes.", i, "_phenosummary.tsv")
if(file.exists(pheno_summary_file)) {
n_lines <- strsplit(system(paste0("wc -l ", pheno_summary_file), intern=TRUE), split=" ")[[1]]
n_lines <- as.integer(n_lines[length(n_lines)-1])
if(n_lines > 1)
{
pheno_summary_codings_included_file <- paste0(out_root_file, "_cat_variables_both_sexes_phesant_recodings.", i, "_phenosummary.tsv")
pheno_summary_codings_included_file_sex_specific <- paste0(out_root_file, "_cat_variables_both_sexes_phesant_recodings_remove_sex_specific.", i, "_phenosummary.tsv")
pheno_summary_codings_included <- include_PHESANT_reassignment_names(pheno_summary_file, outcome_info)
write.table(pheno_summary_codings_included, file=pheno_summary_codings_included_file,
col.names=TRUE, row.names=TRUE, sep='\t', quote=FALSE)
pheno_summary_codings_included <- pheno_summary_codings_included[!(rownames(pheno_summary_codings_included) %in% c(only_males$FullFieldID, only_females$FullFieldID)),]
pheno_summary_codings_included$variable.type <- pheno_summary_codings_included$PHESANT.notes
pheno_summary_codings_included$variable.type[grep("CAT-ORD", pheno_summary_codings_included$PHESANT.notes)] <- "CAT-ORDERED"
pheno_summary_codings_included$variable.type[grep("CAT-SINGLE-BINARY", pheno_summary_codings_included$PHESANT.notes)] <- "CAT-UNORDERED"
pheno_summary_codings_included$variable.type[grep("CAT-MUL-BINARY-VAR", pheno_summary_codings_included$PHESANT.notes)] <- "CAT-UNORDERED"
pheno_summary_codings_included$variable.type[grep("two bins and treat as binary", pheno_summary_codings_included$PHESANT.notes)] <- "CAT-UNORDERED"
write.table(pheno_summary_codings_included, file=pheno_summary_codings_included_file_sex_specific,
col.names=TRUE, row.names=TRUE, sep='\t', quote=FALSE)
}
}
# Males.
pheno_summary_file <- paste0(out_root_file, "_cat_variables_males.", i, "_phenosummary.tsv")
if(file.exists(pheno_summary_file)) {
n_lines <- strsplit(system(paste0("wc -l ", pheno_summary_file), intern=TRUE), split=" ")[[1]]
n_lines <- as.integer(n_lines[length(n_lines)-1])
if(n_lines > 1)
{
pheno_summary_codings_included_file <- paste0(out_root_file, "_cat_variables_males_phesant_recodings.", i, "_phenosummary.tsv")
pheno_summary_codings_included_file_sex_specific <- paste0(out_root_file, "_cat_variables_males_phesant_recodings_remove_sex_specific.", i, "_phenosummary.tsv")
pheno_summary_codings_included <- include_PHESANT_reassignment_names(pheno_summary_file, outcome_info)
write.table(pheno_summary_codings_included, file=pheno_summary_codings_included_file,
col.names=TRUE, row.names=TRUE, sep='\t', quote=FALSE)
pheno_summary_codings_included <- pheno_summary_codings_included[!(rownames(pheno_summary_codings_included) %in% only_females$FullFieldID),]
pheno_summary_codings_included$variable.type <- pheno_summary_codings_included$PHESANT.notes
pheno_summary_codings_included$variable.type[grep("CAT-ORD", pheno_summary_codings_included$PHESANT.notes)] <- "CAT-ORDERED"
pheno_summary_codings_included$variable.type[grep("CAT-SINGLE-BINARY", pheno_summary_codings_included$PHESANT.notes)] <- "CAT-UNORDERED"
pheno_summary_codings_included$variable.type[grep("CAT-MUL-BINARY-VAR", pheno_summary_codings_included$PHESANT.notes)] <- "CAT-UNORDERED"
pheno_summary_codings_included$variable.type[grep("two bins and treat as binary", pheno_summary_codings_included$PHESANT.notes)] <- "CAT-UNORDERED"
write.table(pheno_summary_codings_included, file=pheno_summary_codings_included_file_sex_specific,
col.names=TRUE, row.names=TRUE, sep='\t', quote=FALSE)
}
}
# Females.
pheno_summary_file <- paste0(out_root_file, "_cat_variables_females.", i, "_phenosummary.tsv")
if(file.exists(pheno_summary_file)) {
n_lines <- strsplit(system(paste0("wc -l ", pheno_summary_file), intern=TRUE), split=" ")[[1]]
n_lines <- as.integer(n_lines[length(n_lines)-1])
if(n_lines > 1)
{
pheno_summary_codings_included_file <- paste0(out_root_file, "_cat_variables_females_phesant_recodings.", i, "_phenosummary.tsv")
pheno_summary_codings_included_file_sex_specific <- paste0(out_root_file, "_cat_variables_females_phesant_recodings_remove_sex_specific.", i, "_phenosummary.tsv")
pheno_summary_codings_included <- include_PHESANT_reassignment_names(pheno_summary_file, outcome_info)
write.table(pheno_summary_codings_included, file=pheno_summary_codings_included_file,
col.names=TRUE, row.names=TRUE, sep='\t', quote=FALSE)
pheno_summary_codings_included <- pheno_summary_codings_included[!(rownames(pheno_summary_codings_included) %in% only_males$FullFieldID),]
pheno_summary_codings_included$variable.type <- pheno_summary_codings_included$PHESANT.notes
pheno_summary_codings_included$variable.type[grep("CAT-ORD", pheno_summary_codings_included$PHESANT.notes)] <- "CAT-ORDERED"
pheno_summary_codings_included$variable.type[grep("CAT-SINGLE-BINARY", pheno_summary_codings_included$PHESANT.notes)] <- "CAT-UNORDERED"
pheno_summary_codings_included$variable.type[grep("CAT-MUL-BINARY-VAR", pheno_summary_codings_included$PHESANT.notes)] <- "CAT-UNORDERED"
pheno_summary_codings_included$variable.type[grep("two bins and treat as binary", pheno_summary_codings_included$PHESANT.notes)] <- "CAT-UNORDERED"
write.table(pheno_summary_codings_included, file=pheno_summary_codings_included_file_sex_specific,
col.names=TRUE, row.names=TRUE, sep='\t', quote=FALSE)
}
}
}
# for (i in 1:number_of_chunks)
# {
# filename <- paste0(root_file, ".", i)
# tsv_filename <- paste(filename, ".tsv", sep="")
# log_file <- paste(filename, ".log", sep="")
# tsv_data <- read.table(tsv_filename, header=TRUE, sep='\t')
# outcome_info <- read.table("~/Repositories/PHESANT/variable-info/outcome_info_final_round2.tsv",
# sep='\t', quote="", comment.char="", header=TRUE)
# categorical_11214 <- get_barplot_numbers(tsv_data, log_file, outcome_info, codings_tables)
# if(i == 1) {
# categorical_full <- categorical_11214
# } else {
# categorical_full <- rbind(categorical_full, categorical_11214)
# }
# print(dim(categorical_full))
# }
# write.table(categorical_full, file="~/Repositories/PHESANT/variable-info/PHESANT_categoricals.tsv",
# col.names=TRUE, row.names=TRUE, sep='\t', quote=FALSE)
|
{"hexsha": "1bc8d91f055ad87fd8cfdf6d8cdc406f3e113863", "size": 15751, "ext": "r", "lang": "R", "max_stars_repo_path": "post_PHESANT_exome_pipeline/03_run_summarise_exomes_phenotypes_cloud.r", "max_stars_repo_name": "lganel/PHESANT", "max_stars_repo_head_hexsha": "0f94a3683986b18ca90e20bff0d8bf723bd80211", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-12-21T13:46:04.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-21T13:46:04.000Z", "max_issues_repo_path": "post_PHESANT_exome_pipeline/03_run_summarise_exomes_phenotypes_cloud.r", "max_issues_repo_name": "EvaAusChina/PHESANT", "max_issues_repo_head_hexsha": "5e114342f22c447b663ac496b79dc7633fc6cd51", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "post_PHESANT_exome_pipeline/03_run_summarise_exomes_phenotypes_cloud.r", "max_forks_repo_name": "EvaAusChina/PHESANT", "max_forks_repo_head_hexsha": "5e114342f22c447b663ac496b79dc7633fc6cd51", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 55.8546099291, "max_line_length": 170, "alphanum_fraction": 0.7766491016, "num_tokens": 4428}
|
(* Copyright (c) 2011. Greg Morrisett, Gang Tan, Joseph Tassarotti,
Jean-Baptiste Tristan, and Edward Gan.
This file is part of RockSalt.
This file is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of
the License, or (at your option) any later version.
*)
(* This file provides simple bit-level parsing combinators for disassembling
* Intel IA32 (x86) 32-bit binaries. *)
Require Coqlib.
Require Import Coq.Init.Logic.
Require Import Bool.
Require Import List.
Require Import String.
Require Import Maps.
Require Import Ascii.
Require Import ZArith.
Require Import Eqdep.
Require Import Parser.
Unset Automatic Introduction.
Set Implicit Arguments.
Local Open Scope Z_scope.
Require ExtrOcamlString.
Require ExtrOcamlNatBigInt.
(* a module for generating the parser for x86 instructions *)
Module X86_PARSER_ARG.
Require Import X86Syntax.
Require Import Bits.
Definition char_p : Set := bool.
Definition char_eq : forall (c1 c2:char_p), {c1=c2}+{c1<>c2} := bool_dec.
Inductive type : Set :=
| Int_t : type
| Register_t : type
| Byte_t : type
| Half_t : type
| Word_t : type
| Scale_t : type
| Condition_t : type
| Operand_t : type
| Instruction_t : type
| Control_Register_t : type
| Debug_Register_t : type
| Segment_Register_t : type
| Lock_or_Rep_t : type
| Bool_t : type
| Prefix_t : type
| Option_t (t: type) : type
(* Need pairs at this level if I want to have options of pairs*)
| Pair_t (t1 t2: type) : type.
Definition tipe := type.
Definition tipe_eq : forall (t1 t2:tipe), {t1=t2} + {t1<>t2}.
intros ; decide equality.
Defined.
Fixpoint tipe_m (t:tipe) :=
match t with
| Int_t => Z
| Register_t => register
| Byte_t => int8
| Half_t => int16
| Word_t => int32
| Scale_t => scale
| Condition_t => condition_type
| Operand_t => operand
| Instruction_t => instr
| Control_Register_t => control_register
| Debug_Register_t => debug_register
| Segment_Register_t => segment_register
| Lock_or_Rep_t => lock_or_rep
| Bool_t => bool
| Prefix_t => prefix
| Option_t t => option (tipe_m t)
| Pair_t t1 t2 => ((tipe_m t1) * (tipe_m t2))%type
end.
End X86_PARSER_ARG.
Module X86_PARSER.
Module X86_BASE_PARSER := Parser.Parser(X86_PARSER_ARG).
Require Import X86Syntax.
Require Import Bits.
Import X86_PARSER_ARG.
Import X86_BASE_PARSER.
Definition option_t x := tipe_t (Option_t x).
Definition int_t := tipe_t Int_t.
Definition register_t := tipe_t Register_t.
Definition byte_t := tipe_t Byte_t.
Definition half_t := tipe_t Half_t.
Definition word_t := tipe_t Word_t.
Definition scale_t := tipe_t Scale_t.
Definition condition_t := tipe_t Condition_t.
Definition operand_t := tipe_t Operand_t.
Definition instruction_t := tipe_t Instruction_t.
Definition control_register_t := tipe_t Control_Register_t.
Definition debug_register_t := tipe_t Debug_Register_t.
Definition segment_register_t := tipe_t Segment_Register_t.
Definition lock_or_rep_t := tipe_t Lock_or_Rep_t.
Definition bool_t := tipe_t Bool_t.
Definition prefix_t := tipe_t Prefix_t.
(* combinators for building parsers *)
Definition bit(x:bool) : parser char_t := Char_p x.
Definition never t : parser t := Zero_p t.
Definition always t (x:result_m t) : parser t := @Map_p unit_t t (fun (_:unit) => x) Eps_p.
Definition alt t (p1 p2:parser t) : parser t := Alt_p p1 p2.
Definition alts t (ps: list (parser t)) : parser t := List.fold_right (@alt t) (@never t) ps.
Definition map t1 t2 (p:parser t1) (f:result_m t1 -> result_m t2) : parser t2 :=
@Map_p t1 t2 f p.
Implicit Arguments map [t1 t2].
Definition seq t1 t2 (p1:parser t1) (p2:parser t2) : parser (pair_t t1 t2) := Cat_p p1 p2.
Definition cons t (pair : result_m (pair_t t (list_t t))) : result_m (list_t t) :=
(fst pair)::(snd pair).
Definition seqs t (ps:list (parser t)) : parser (list_t t) :=
List.fold_right (fun p1 p2 => map (seq p1 p2) (@cons t))
(@always (list_t t) (@nil (result_m t))) ps.
Fixpoint string_to_bool_list (s:string) : list bool :=
match s with
| EmptyString => nil
| String a s =>
(if ascii_dec a "0"%char then false else true)::(string_to_bool_list s)
end.
Fixpoint bits_n (n:nat) : result :=
match n with
| 0%nat => unit_t
| S n => pair_t char_t (bits_n n)
end.
Fixpoint field'(n:nat) : parser (bits_n n) :=
match n with
| 0%nat => Eps_p
| S n => Cat_p Any_p (field' n)
end.
Fixpoint bits2Z(n:nat)(a:Z) : result_m (bits_n n) -> result_m int_t :=
match n with
| 0%nat => fun _ => a
| S n => fun p => bits2Z n (2*a + (if (fst p) then 1 else 0)) (snd p)
end.
Definition bits2int(n:nat)(bs:result_m (bits_n n)) : result_m int_t := bits2Z n 0 bs.
Fixpoint bits (x:string) : parser (bits_n (String.length x)) :=
match x with
| EmptyString => Eps_p
| String c s =>
(Cat_p (Char_p (if ascii_dec c "0"%char then false else true)) (bits s))
end.
(* notation for building parsers *)
Infix "|+|" := alt (right associativity, at level 80).
Infix "$" := seq (right associativity, at level 70).
Infix "@" := map (right associativity, at level 75).
Notation "e %% t" := (e : result_m t) (at level 80).
Definition bitsleft t (s:string)(p:parser t) : parser t :=
bits s $ p @ (@snd _ _).
Infix "$$" := bitsleft (right associativity, at level 70).
Definition anybit : parser char_t := Any_p.
Definition field(n:nat) := (field' n) @ (bits2int n).
Definition reg := (field 3) @ (Z_to_register : _ -> result_m register_t).
Definition byte := (field 8) @ (@Word.repr 7 : _ -> result_m byte_t).
(* Definition halfword := (field 16) @ (@Word.repr 15 : _ -> result_m half_t).
Definition word := (field 32) @ (@Word.repr 31 : _ -> result_m word_t). *)
Definition halfword := (byte $ byte) @ ((fun p =>
let b0 := Word.repr (Word.unsigned (fst p)) in
let b1 := Word.repr (Word.unsigned (snd p)) in
Word.or (Word.shl b1 (Word.repr 8)) b0): _ -> result_m half_t).
Definition word := (byte $ byte $ byte $ byte) @
((fun p =>
let b0 := zero_extend8_32 (fst p) in
let b1 := zero_extend8_32 (fst (snd p)) in
let b2 := zero_extend8_32 (fst (snd (snd p))) in
let b3 := zero_extend8_32 (snd (snd (snd p))) in
let w1 := Word.shl b1 (Word.repr 8) in
let w2 := Word.shl b2 (Word.repr 16) in
let w3 := Word.shl b3 (Word.repr 24) in
Word.or w3 (Word.or w2 (Word.or w1 b0)))
: _ -> result_m word_t).
Definition scale_p := (field 2) @ (Z_to_scale : _ -> result_m scale_t).
Definition tttn := (field 4) @ (Z_to_condition_type : _ -> result_m condition_t).
(* This is used in a strange edge-case for modrm parsing. See the
footnotes on p37 of the manual in the repo This is a case where I
think intersections/complements would be nice operators *)
(* JGM: we can handle this in the semantic action instead of the parser,
so I replaced si, which used this and another pattern for [bits "100"]
to the simpler case below -- helps to avoid some explosions in the
definitions. *)
Definition reg_no_esp : parser register_t :=
(bits "000" |+| bits "001" |+| bits "010" |+|
bits "011" |+| (* bits "100" <- this is esp *) bits "101" |+|
bits "110" |+| bits "111") @
((fun bs => Z_to_register (bits2int 3 bs)) : _ -> result_m register_t).
Definition reg_no_ebp : parser register_t :=
(bits "000" |+| bits "001" |+| bits "010" |+|
bits "011" |+| bits "100" (* |+| bits "101" <- this is ebp *) |+|
bits "110" |+| bits "111") @
((fun bs => Z_to_register (bits2int 3 bs)) : _ -> result_m register_t).
Definition si :=
(scale_p $ reg) @ (fun p => match snd p with
| ESP => None
| _ => Some p
end %% option_t (Pair_t Scale_t Register_t)).
Definition sib := si $ reg.
(* These next 4 parsers are used in the definition of the mod/rm parser *)
Definition rm00 : parser operand_t :=
( bits "000"
|+| bits "001"
|+| bits "010"
|+| bits "011"
|+| bits "110"
|+| bits "111" ) @
(fun bs => Address_op (mkAddress (Word.repr 0)
(Some (Z_to_register(bits2int 3 bs))) None) %% operand_t)
|+| bits "100" $ si $ reg_no_ebp @
(fun p => match p with
| (_,(si,base)) =>
Address_op (mkAddress (Word.repr 0)
(Some base) si)
end : result_m operand_t)
|+| bits "100" $ si $ bits "101" $ word @
(fun p => match p with
| (_,(si,(_, disp))) =>
Address_op (mkAddress disp
(None) si)
end : result_m operand_t)
|+| bits "101" $ word @
(fun p => match p with
| (_, disp) =>
Address_op (mkAddress disp None None)
end %% operand_t).
Definition rm01 : parser operand_t :=
(( bits "000"
|+| bits "001"
|+| bits "010"
|+| bits "011"
|+| bits "101"
|+| bits "110"
|+| bits "111") $ byte) @
(fun p =>
match p with
| (bs, disp) =>
Address_op (mkAddress (sign_extend8_32 disp)
(Some (Z_to_register(bits2int 3 bs))) None)
end %% operand_t)
|+| bits "100" $ sib $ byte @
(fun p =>
match p with
| (_,((si,base),disp)) =>
Address_op (mkAddress (sign_extend8_32 disp) (Some base)
(si))
end %% operand_t).
Definition rm10 : parser operand_t :=
(( bits "000"
|+| bits "001"
|+| bits "010"
|+| bits "011"
|+| bits "101"
|+| bits "110"
|+| bits "111") $ word) @
(fun p =>
match p with
| (bs, disp) =>
Address_op (mkAddress disp (Some (Z_to_register(bits2int 3 bs))) None)
end %% operand_t)
|+| bits "100" $ sib $ word @
(fun p =>
match p with
| (_,((si,base),disp)) =>
Address_op (mkAddress disp (Some base) si)
end %% operand_t).
Definition rm11 : parser operand_t := reg @ (fun x => Reg_op x : result_m operand_t).
Definition modrm : parser (pair_t operand_t operand_t) :=
( (bits "00" $ reg $ rm00)
|+| (bits "01" $ reg $ rm01)
|+| (bits "10" $ reg $ rm10)
|+| (bits "11" $ reg $ rm11) ) @
(fun p => match p with
| (_, (r, op)) => (Reg_op r, op)
end %% (pair_t operand_t operand_t)).
(* same as modrm but disallows the register case *)
Definition modrm_noreg :=
( ("00" $$ reg $ rm00)
|+| ("01" $$ reg $ rm01)
|+| ("10" $$ reg $ rm10)).
(* Similar to mod/rm parser except that the register field is fixed to a
* particular bit-pattern, and the pattern starting with "11" is excluded. *)
Definition ext_op_modrm(bs:string) : parser operand_t :=
( (bits "00" $ bits bs $ rm00)
|+| (bits "01" $ bits bs $ rm01)
|+| (bits "10" $ bits bs $ rm10) ) @
(fun p => match p with
| (_,(_,op)) => op
end %% operand_t).
Definition ext_op_modrm2(bs:string) : parser operand_t :=
( (bits "00" $ bits bs $ rm00)
|+| (bits "01" $ bits bs $ rm01)
|+| (bits "10" $ bits bs $ rm10)
|+| (bits "11" $ bits bs $ rm11) ) @
(fun p => match p with
| (_,(_,op)) => op
end %% operand_t).
(* Parsers for the individual instructions *)
Definition AAA_p := bits "00110111" @ (fun _ => AAA %% instruction_t).
Definition AAD_p := bits "1101010100001010" @ (fun _ => AAD %% instruction_t).
Definition AAM_p := bits "1101010000001010" @ (fun _ => AAM %% instruction_t).
Definition AAS_p := bits "00111111" @ (fun _ => AAS %% instruction_t).
(* The parsing for ADC, ADD, AND, CMP, OR, SBB, SUB, and XOR can be shared *)
Definition imm_op (opsize_override: bool) : parser operand_t :=
match opsize_override with
| false => word @ (fun w => Imm_op w %% operand_t)
| true => halfword @ (fun w => Imm_op (sign_extend16_32 w) %% operand_t)
end.
Definition logic_or_arith_p (opsize_override: bool)
(op1 : string) (* first 5 bits for most cases *)
(op2 : string) (* when first 5 bits are 10000, the next byte has 3 bits
that determine the opcode *)
(InstCon : bool->operand->operand->instr) (* instruction constructor *)
: parser instruction_t
:=
(* register/memory to register and vice versa -- the d bit specifies
* the direction. *)
op1 $$ "0" $$ anybit $ anybit $ modrm @
(fun p => match p with
| (d, (w, (op1, op2))) =>
if d then InstCon w op1 op2 else InstCon w op2 op1
end %% instruction_t)
|+|
(* sign extend immediate byte to register *)
"1000" $$ "0011" $$ "11" $$ op2 $$ reg $ byte @
(fun p =>
let (r,imm) := p in InstCon true (Reg_op r) (Imm_op (sign_extend8_32 imm)) %%
instruction_t)
|+|
(* zero-extend immediate byte to register *)
"1000" $$ "0000" $$ "11" $$ op2 $$ reg $ byte @
(fun p =>
let (r,imm) := p in InstCon false (Reg_op r) (Imm_op (zero_extend8_32 imm)) %%
instruction_t)
|+|
(* immediate word to register *)
"1000" $$ "0001" $$ "11" $$ op2 $$ reg $ imm_op opsize_override @
(fun p => let (r,imm) := p in InstCon true (Reg_op r) imm %% instruction_t)
|+|
(* zero-extend immediate byte to EAX *)
op1 $$ "100" $$ byte @
(fun imm => InstCon false (Reg_op EAX) (Imm_op (zero_extend8_32 imm)) %% instruction_t)
|+|
(* word to EAX *)
op1 $$ "101" $$ imm_op opsize_override @
(fun imm => InstCon true (Reg_op EAX) imm %% instruction_t)
|+|
(* zero-extend immediate byte to memory *)
"1000" $$ "0000" $$ ext_op_modrm op2 $ byte @
(fun p => let (op,imm) := p in InstCon false op (Imm_op (zero_extend8_32 imm)) %%
instruction_t)
|+|
(* sign-extend immediate byte to memory *)
"1000" $$ "0011" $$ ext_op_modrm op2 $ byte @
(fun p => let (op,imm) := p in InstCon true op (Imm_op (sign_extend8_32 imm)) %%
instruction_t)
|+|
(* immediate word to memory *)
"1000" $$ "0001" $$ ext_op_modrm op2 $ imm_op opsize_override @
(fun p => let (op,imm) := p in InstCon true op imm %% instruction_t).
Definition ADC_p s := logic_or_arith_p s "00010" "010" ADC.
Definition ADD_p s := logic_or_arith_p s "00000" "000" ADD.
Definition AND_p s := logic_or_arith_p s "00100" "100" AND.
Definition CMP_p s := logic_or_arith_p s "00111" "111" CMP.
Definition OR_p s := logic_or_arith_p s "00001" "001" OR.
Definition SBB_p s := logic_or_arith_p s "00011" "011" SBB.
Definition SUB_p s := logic_or_arith_p s "00101" "101" SUB.
Definition XOR_p s := logic_or_arith_p s "00110" "110" XOR.
Definition ARPL_p :=
"0110" $$ "0011" $$ modrm @
(fun p => let (op1,op2) := p in ARPL op1 op2 %% instruction_t).
Definition BOUND_p :=
"0110" $$ "0010" $$ modrm @
(fun p => let (op1,op2) := p in BOUND op1 op2 %% instruction_t).
Definition BSF_p :=
"0000" $$ "1111" $$ "1011" $$ "1100" $$ modrm @
(fun p => let (op1,op2) := p in BSF op1 op2 %% instruction_t).
Definition BSR_p :=
"0000" $$ "1111" $$ "1011" $$ "1101" $$ modrm @
(fun p => let (op1,op2) := p in BSR op1 op2 %% instruction_t).
Definition BSWAP_p :=
"0000" $$ "1111" $$ "1100" $$ "1" $$ reg @ (fun x => BSWAP x %% instruction_t).
(* The various bit-testing operations can also share a parser *)
Definition bit_test_p (opcode1:string) (opcode2:string)
(Instr : operand -> operand -> instr) :=
"0000" $$ "1111" $$ "1011" $$ "1010" $$ "11" $$ opcode1 $$ reg $ byte @
(fun p =>
let (r,imm) := p in Instr (Reg_op r) (Imm_op (zero_extend8_32 imm)) %% instruction_t)
|+|
"0000" $$ "1111" $$ "1011" $$ "1010" $$ ext_op_modrm opcode1 $ byte @
(fun p =>
let (op1,imm) := p in Instr op1 (Imm_op (zero_extend8_32 imm)) %% instruction_t)
|+|
"0000" $$ "1111" $$ "101" $$ opcode2 $$ "011" $$ modrm @
(fun p => let (op2,op1) := p in Instr op1 op2 %% instruction_t).
Definition BT_p := bit_test_p "100" "00" BT.
Definition BTC_p := bit_test_p "111" "11" BTC.
Definition BTR_p := bit_test_p "110" "10" BTR.
Definition BTS_p := bit_test_p "101" "01" BTS.
Definition CALL_p :=
"1110" $$ "1000" $$ word @
(fun w => CALL true false (Imm_op w) None %% instruction_t)
|+|
"1111" $$ "1111" $$ ext_op_modrm2 "010" @
(fun op => CALL true true op None %% instruction_t)
|+|
"1001" $$ "1010" $$ halfword $ word @
(fun p => CALL false false (Imm_op (snd p)) (Some (fst p)) %% instruction_t)
|+|
"1111" $$ "1111" $$ ext_op_modrm2 "011" @
(fun op => CALL false true op None %% instruction_t).
Definition CDQ_p := "1001" $$ bits "1001" @ (fun _ => CDQ %% instruction_t).
Definition CLC_p := "1111" $$ bits "1000" @ (fun _ => CLC %% instruction_t).
Definition CLD_p := "1111" $$ bits "1100" @ (fun _ => CLD %% instruction_t).
Definition CLI_p := "1111" $$ bits "1010" @ (fun _ => CLI %% instruction_t).
Definition CLTS_p := "0000" $$ "1111" $$ "0000" $$ bits "0110" @
(fun _ => CLTS %% instruction_t).
Definition CMC_p := "1111" $$ bits "0101" @ (fun _ => CMC %% instruction_t).
Definition CMPS_p := "1010" $$ "011" $$ anybit @ (fun x => CMPS x %% instruction_t).
Definition CMPXCHG_p :=
"0000" $$ "1111" $$ "1011" $$ "000" $$ anybit $ modrm @
(fun p => match p with
| (w,(op1,op2)) => CMPXCHG w op2 op1
end %% instruction_t).
Definition CPUID_p := "0000" $$ "1111" $$ "1010" $$ bits "0010" @
(fun _ => CPUID %% instruction_t).
Definition CWDE_p := "1001" $$ bits "1000" @ (fun _ => CWDE %% instruction_t).
Definition DAA_p := "0010" $$ bits "0111" @ (fun _ => DAA %% instruction_t).
Definition DAS_p := "0010" $$ bits "1111" @ (fun _ => DAS %% instruction_t).
Definition DEC_p :=
"1111" $$ "111" $$ anybit $ "11001" $$ reg @
(fun p => let (w,r) := p in DEC w (Reg_op r) %% instruction_t)
|+|
"0100" $$ "1" $$ reg @
(fun r => DEC true (Reg_op r) %% instruction_t)
|+|
"1111" $$ "111" $$ anybit $ ext_op_modrm "001" @
(fun p => let (w,op1) := p in DEC w op1 %% instruction_t).
Definition DIV_p :=
"1111" $$ "011" $$ anybit $ "11110" $$ reg @
(fun p => let (w,r) := p in DIV w (Reg_op r) %% instruction_t)
|+|
"1111" $$ "011" $$ anybit $ ext_op_modrm "110" @
(fun p => let (w,op1) := p in DIV w op1 %% instruction_t).
Definition HLT_p := "1111" $$ bits "0100" @ (fun _ => HLT %% instruction_t).
Definition IDIV_p :=
"1111" $$ "011" $$ anybit $ "11111" $$ reg @
(fun p => let (w,r) := p in IDIV w (Reg_op r) %% instruction_t)
|+|
"1111" $$ "011" $$ anybit $ ext_op_modrm "111" @
(fun p => let (w,op1) := p in IDIV w op1 %% instruction_t).
Definition IMUL_p opsize_override :=
"1111" $$ "011" $$ anybit $ ext_op_modrm2 "101" @
(fun p => let (w,op1) := p in IMUL w op1 None None %% instruction_t)
|+|
"0000" $$ "1111" $$ "1010" $$ "1111" $$ modrm @
(fun p => let (op1,op2) := p in IMUL true op1 (Some op2) None %% instruction_t)
|+|
"0110" $$ "1011" $$ modrm $ byte @
(fun p => match p with
| ((op1,op2),imm) =>
IMUL true op1 (Some op2) (Some (sign_extend8_32 imm))
end %% instruction_t)
|+|
match opsize_override with
| false =>
"0110" $$ "1001" $$ modrm $ word @
(fun p => match p with
| ((op1,op2),imm) =>
IMUL true op1 (Some op2) (Some imm)
end %% instruction_t)
| true =>
"0110" $$ "1001" $$ modrm $ halfword @
(fun p => match p with
| ((op1,op2),imm) =>
IMUL true op1 (Some op2) (Some (sign_extend16_32 imm))
end %% instruction_t)
end.
Definition IN_p :=
"1110" $$ "010" $$ anybit $ byte @
(fun p => let (w,pt) := p in IN w (Some pt) %% instruction_t)
|+|
"1110" $$ "110" $$ anybit @ (fun w => IN w None %% instruction_t).
Definition INC_p :=
"1111" $$ "111" $$ anybit $ "11000" $$ reg @
(fun p => let (w,r) := p in INC w (Reg_op r) %% instruction_t)
|+|
"0100" $$ "0" $$ reg @ (fun r => INC true (Reg_op r) %% instruction_t)
|+|
"1111" $$ "111" $$ anybit $ ext_op_modrm "000" @
(fun p => let (w,op1) := p in INC w op1 %% instruction_t).
Definition INS_p := "0110" $$ "110" $$ anybit @ (fun x => INS x %% instruction_t).
Definition INTn_p := "1100" $$ "1101" $$ byte @ (fun x => INTn x %% instruction_t).
Definition INT_p := "1100" $$ bits "1100" @ (fun _ => INT %% instruction_t).
Definition INTO_p := "1100" $$ bits "1110" @ (fun _ => INTO %% instruction_t).
Definition INVD_p := "0000" $$ "1111" $$ "0000" $$ bits "1000" @
(fun _ => INVD %% instruction_t).
Definition INVLPG_p :=
"0000" $$ "1111" $$ "0000" $$ "0001" $$ ext_op_modrm "111" @
(fun x => INVLPG x %% instruction_t).
Definition IRET_p := "1100" $$ bits "1111" @ (fun _ => IRET %% instruction_t).
Definition Jcc_p :=
"0111" $$ tttn $ byte @
(fun p => let (ct,imm) := p in Jcc ct (sign_extend8_32 imm) %% instruction_t)
|+|
"0000" $$ "1111" $$ "1000" $$ tttn $ word @
(fun p => let (ct,imm) := p in Jcc ct imm %% instruction_t).
Definition JCXZ_p := "1110" $$ "0011" $$ byte @ (fun x => JCXZ x %% instruction_t).
Definition JMP_p :=
"1110" $$ "1011" $$ byte @
(fun b => JMP true false (Imm_op (sign_extend8_32 b)) None %% instruction_t)
|+|
"1110" $$ "1001" $$ word @
(fun w => JMP true false (Imm_op w) None %% instruction_t)
|+|
"1111" $$ "1111" $$ ext_op_modrm2 "100" @
(fun op => JMP true true op None %% instruction_t)
|+|
"1110" $$ "1010" $$ halfword $ word @
(fun p => JMP false true (Imm_op (snd p)) (Some (fst p)) %% instruction_t)
|+|
"1111" $$ "1111" $$ ext_op_modrm2 "101" @
(fun op => JMP false true op None %% instruction_t).
Definition LAHF_p := "1001" $$ bits "1111" @ (fun _ => LAHF %% instruction_t).
Definition LAR_p :=
"0000" $$ "1111" $$ "0000" $$ "0010" $$ modrm @
(fun p => LAR (fst p) (snd p) %% instruction_t).
Definition LDS_p := "1100" $$ "0101" $$ modrm @
(fun p => LDS (fst p) (snd p) %% instruction_t).
Definition LEA_p := "1000" $$ "1101" $$ modrm_noreg @
(fun p => LEA (Reg_op (fst p)) (snd p) %% instruction_t).
Definition LEAVE_p := "1100" $$ bits "1001" @
(fun _ => LEAVE %% instruction_t).
Definition LES_p := "1100" $$ "0100" $$ modrm @
(fun p => LES (fst p) (snd p) %% instruction_t).
Definition LFS_p := "0000" $$ "1111" $$ "1011" $$ "0100" $$ modrm @
(fun p => LFS (fst p) (snd p) %% instruction_t).
Definition LGDT_p := "0000" $$ "1111" $$ "0000" $$ "0001" $$ ext_op_modrm "010" @
(fun x => LGDT x %% instruction_t).
Definition LGS_p := "0000" $$ "1111" $$ "1011" $$ "0101" $$ modrm @
(fun p => LGS (fst p) (snd p) %% instruction_t).
Definition LIDT_p := "0000" $$ "1111" $$ "0000" $$ "0001" $$ ext_op_modrm "011" @
(fun x => LIDT x %% instruction_t).
Definition LLDT_p :=
"0000" $$ "1111" $$ "0000" $$ "0000" $$ "11" $$ "010" $$ reg @
(fun r => LLDT (Reg_op r) %% instruction_t)
|+|
"0000" $$ "1111" $$ "0000" $$ "0000" $$ ext_op_modrm "010" @
(fun x => LLDT x %% instruction_t).
Definition LMSW_p :=
"0000" $$ "1111" $$ "0000" $$ "0001" $$ "11" $$ "110" $$ reg @
(fun r => LMSW (Reg_op r) %% instruction_t)
|+|
"0000" $$ "1111" $$ "0000" $$ "0001" $$ "11" $$ ext_op_modrm "110" @
(fun x => LMSW x %% instruction_t).
(* JGM: note, this isn't really an instruction, but rather a prefix. So it
shouldn't be included in the list of instruction parsers. *)
(* Definition LOCK_p := "1111" $$ bits "0000" @ (fun _ => LOCK %% instruction_t). *)
Definition LODS_p := "1010" $$ "110" $$ anybit @ (fun x => LODS x %% instruction_t).
Definition LOOP_p := "1110" $$ "0010" $$ byte @ (fun x => LOOP x %% instruction_t).
Definition LOOPZ_p := "1110" $$ "0001" $$ byte @ (fun x => LOOPZ x %% instruction_t).
Definition LOOPNZ_p := "1110" $$ "0000" $$ byte @ (fun x => LOOPNZ x %% instruction_t).
Definition LSL_p := "0000" $$ "1111" $$ "0000" $$ "0011" $$ modrm @
(fun p => LSL (fst p) (snd p) %% instruction_t).
Definition LSS_p := "0000" $$ "1111" $$ "1011" $$ "0010" $$ modrm @
(fun p => LSS (fst p) (snd p) %% instruction_t).
Definition LTR_p := "0000" $$ "1111" $$ "0000" $$ "0000" $$ ext_op_modrm "011" @
(fun x => LTR x %% instruction_t).
(* This is may not be right. Need to test this thoroughly.
There is no 8bit mode for CMOVcc *)
Definition CMOVcc_p :=
"0000" $$ "1111" $$ "0100" $$ tttn $ modrm @
(fun p => match p with | (tttn, (op1, op2))=>CMOVcc tttn op1 op2 end %% instruction_t).
Definition MOV_p opsize_override :=
"1000" $$ "101" $$ anybit $ modrm @
(fun p => match p with | (w,(op1,op2)) => MOV w op1 op2 end %% instruction_t)
|+|
"1000" $$ "100" $$ anybit $ modrm @
(fun p => match p with | (w,(op1,op2)) => MOV w op2 op1 end %% instruction_t)
|+|
"1100" $$ "0111" $$ "11" $$ "000" $$ reg $ imm_op opsize_override @
(fun p => match p with | (r,w) => MOV true (Reg_op r) w end %% instruction_t)
|+|
"1100" $$ "0110" $$ "11" $$ "000" $$ reg $ byte @
(fun p => match p with
| (r,b) => MOV false (Reg_op r) (Imm_op (zero_extend8_32 b))
end %% instruction_t)
|+|
"1011" $$ "1" $$ reg $ imm_op opsize_override @
(fun p => match p with | (r,w) => MOV true (Reg_op r) w
end %% instruction_t)
|+|
"1011" $$ "0" $$ reg $ byte @
(fun p => match p with
| (r,b) => MOV false (Reg_op r) (Imm_op (zero_extend8_32 b))
end %% instruction_t)
|+|
"1100" $$ "0111" $$ ext_op_modrm "000" $ imm_op opsize_override @
(fun p => match p with | (op,w) => MOV true op w end %% instruction_t)
|+|
"1100" $$ "0110" $$ ext_op_modrm "000" $ byte @
(fun p => match p with | (op,b) => MOV false op (Imm_op (zero_extend8_32 b)) end %% instruction_t)
|+|
"1010" $$ "0001" $$ word @ (fun w => MOV true (Reg_op EAX) (Offset_op w) %% instruction_t)
|+|
"1010" $$ "0000" $$ word @ (fun w => MOV false (Reg_op EAX) (Offset_op w) %% instruction_t)
|+|
"1010" $$ "0011" $$ word @ (fun w => MOV true (Offset_op w) (Reg_op EAX) %% instruction_t)
|+|
"1010" $$ "0010" $$ word @ (fun w => MOV false (Offset_op w) (Reg_op EAX) %% instruction_t).
Definition control_reg_p :=
bits "000" @ (fun _ => CR0 %% control_register_t)
|+| bits "010" @ (fun _ => CR2 %% control_register_t)
|+| bits "011" @ (fun _ => CR3 %% control_register_t)
|+| bits "100" @ (fun _ => CR4 %% control_register_t).
Definition MOVCR_p :=
"0000" $$ "1111" $$ "0010" $$ "0010" $$ "11" $$ control_reg_p $ reg @
(fun p => MOVCR false (fst p) (snd p) %% instruction_t)
|+|
"0000" $$ "1111" $$ "0010" $$ "0000" $$ "11" $$ control_reg_p $ reg @
(fun p => MOVCR true (fst p) (snd p) %% instruction_t).
(* Note: apparently, the bit patterns corresponding to DR4 and DR5 either
* (a) get mapped to DR6 and DR7 respectively or else (b) cause a fault,
* depending upon the value of some control register. My guess is that it's
* okay for us to just consider this a fault. Something similar seems to
* happen with the CR registers above -- e.g., we don't have a CR1. *)
Definition debug_reg_p :=
bits "000" @ (fun _ => DR0 %% debug_register_t)
|+| bits "001" @ (fun _ => DR1 %% debug_register_t)
|+| bits "010" @ (fun _ => DR2 %% debug_register_t)
|+| bits "011" @ (fun _ => DR3 %% debug_register_t)
|+| bits "110" @ (fun _ => DR6 %% debug_register_t)
|+| bits "111" @ (fun _ => DR7 %% debug_register_t).
Definition MOVDR_p :=
"0000" $$ "1111" $$ "0010" $$ "0011" $$ "11" $$ debug_reg_p $ reg @
(fun p => MOVDR false (fst p) (snd p) %% instruction_t)
|+|
"0000" $$ "1111" $$ "0010" $$ "0001" $$ "11" $$ debug_reg_p $ reg @
(fun p => MOVDR true (fst p) (snd p) %% instruction_t).
Definition segment_reg_p :=
bits "000" @ (fun _ => ES %% segment_register_t)
|+| bits "001" @ (fun _ => CS %% segment_register_t)
|+| bits "010" @ (fun _ => SS %% segment_register_t)
|+| bits "011" @ (fun _ => DS %% segment_register_t)
|+| bits "100" @ (fun _ => FS %% segment_register_t)
|+| bits "101" @ (fun _ => GS %% segment_register_t).
Definition seg_modrm : parser (pair_t segment_register_t operand_t) :=
("00" $$ segment_reg_p $ rm00)
|+| ("01" $$ segment_reg_p $ rm01)
|+| ("10" $$ segment_reg_p $ rm10)
|+| ("11" $$ segment_reg_p $ rm11).
Definition MOVSR_p :=
"1000" $$ "1110" $$ seg_modrm @
(fun p => MOVSR false (fst p) (snd p) %% instruction_t)
|+|
"1000" $$ "1100" $$ seg_modrm @
(fun p => MOVSR true (fst p) (snd p) %% instruction_t).
Definition MOVBE_p :=
"0000" $$ "1111" $$ "0011" $$ "1000" $$ "1111" $$ "0000" $$ modrm @
(fun p => MOVBE (snd p) (fst p) %% instruction_t)
|+|
"0000" $$ "1111" $$ "0011" $$ "1000" $$ "1111" $$ "0001" $$ modrm @
(fun p => MOVBE (fst p) (snd p) %% instruction_t).
Definition MOVS_p := "1010" $$ "010" $$ anybit @ (fun x => MOVS x %% instruction_t).
Definition MOVSX_p := "0000" $$ "1111" $$ "1011" $$ "111" $$ anybit $ modrm @
(fun p => match p with | (w,(op1,op2)) => MOVSX w op1 op2 end %% instruction_t).
Definition MOVZX_p := "0000" $$ "1111" $$ "1011" $$ "011" $$ anybit $ modrm @
(fun p => match p with | (w,(op1,op2)) => MOVZX w op1 op2 end %% instruction_t).
Definition MUL_p :=
"1111" $$ "011" $$ anybit $ ext_op_modrm2 "100" @
(fun p => MUL (fst p) (snd p) %% instruction_t).
Definition NEG_p :=
"1111" $$ "011" $$ anybit $ ext_op_modrm2 "011" @
(fun p => NEG (fst p) (snd p) %% instruction_t).
(*
Definition NOP_p :=
"1001" $$ bits "0000" @ (fun _ => NOP None %% instruction_t)
|+|
"0000" $$ "1111" $$ "0001" $$ "1111" $$ ext_op_modrm "000" @
(fun op => NOP (Some op) %% instruction_t).
*)
Definition NOT_p :=
"1111" $$ "011" $$ anybit $ ext_op_modrm2 "010" @
(fun p => NOT (fst p) (snd p) %% instruction_t).
Definition OUT_p :=
"1110" $$ "011" $$ anybit $ byte @
(fun p => OUT (fst p) (Some (snd p)) %% instruction_t)
|+|
"1110" $$ "111" $$ anybit @ (fun w => OUT w None %% instruction_t).
Definition OUTS_p := "0110" $$ "111" $$ anybit @ (fun x => OUTS x %% instruction_t).
Definition POP_p :=
"1000" $$ "1111" $$ ext_op_modrm "000" @ (fun x => POP x %% instruction_t)
|+|
"0101" $$ "1" $$ reg @ (fun r => POP (Reg_op r) %% instruction_t).
Definition POPSR_p :=
"000" $$ "00" $$ bits "111" @ (fun _ => POPSR ES %% instruction_t)
|+|
"000" $$ "10" $$ bits "111" @ (fun _ => POPSR SS %% instruction_t)
|+|
"000" $$ "11" $$ bits "111" @ (fun _ => POPSR DS %% instruction_t)
|+|
"0000" $$ "1111" $$ "10" $$ "100" $$ bits "001" @
(fun _ => POPSR FS %% instruction_t)
|+|
"0000" $$ "1111" $$ "10" $$ "101" $$ bits "001" @
(fun _ => POPSR GS %% instruction_t).
Definition POPA_p := "0110" $$ bits "0001" @ (fun _ => POPA %% instruction_t).
Definition POPF_p := "1001" $$ bits "1101" @ (fun _ => POPF %% instruction_t).
Definition PUSH_p :=
"1111" $$ "1111" $$ ext_op_modrm "110" @ (fun x => PUSH true x %% instruction_t)
|+|
"0101" $$ "0" $$ reg @ (fun r => PUSH true (Reg_op r) %% instruction_t)
|+|
"0110" $$ "1010" $$ byte @
(fun b => PUSH false (Imm_op (sign_extend8_32 b)) %% instruction_t)
|+|
"0110" $$ "1000" $$ word @ (fun w => PUSH true (Imm_op w) %% instruction_t).
Definition segment_reg2_p :=
bits "00" @ (fun _ => ES %% segment_register_t)
|+| bits "01" @ (fun _ => CS %% segment_register_t)
|+| bits "10" @ (fun _ => SS %% segment_register_t)
|+| bits "11" @ (fun _ => DS %% segment_register_t).
Definition PUSHSR_p :=
"000" $$ segment_reg2_p $ bits "110" @
(fun p => PUSHSR (fst p) %% instruction_t)
|+|
"0000" $$ "1111" $$ "10" $$ "100" $$ bits "000" @
(fun _ => PUSHSR FS %% instruction_t)
|+|
"0000" $$ "1111" $$ "10" $$ "101" $$ bits "000" @
(fun _ => PUSHSR GS %% instruction_t).
Definition PUSHA_p := "0110" $$ bits "0000" @ (fun _ => PUSHA %% instruction_t).
Definition PUSHF_p := "1001" $$ bits "1100" @ (fun _ => PUSHF %% instruction_t).
Definition rotate_p extop (inst : bool -> operand -> reg_or_immed -> instr) :=
"1101" $$ "000" $$ anybit $ ext_op_modrm2 extop @
(fun p => inst (fst p) (snd p) (Imm_ri (Word.repr 1)) %% instruction_t)
|+|
"1101" $$ "001" $$ anybit $ ext_op_modrm2 extop @
(fun p => inst (fst p) (snd p) (Reg_ri ECX) %% instruction_t)
|+|
"1100" $$ "000" $$ anybit $ ext_op_modrm2 extop $ byte @
(fun p => match p with | (w, (op,b)) => inst w op (Imm_ri b) end %% instruction_t).
Definition RCL_p := rotate_p "010" RCL.
Definition RCR_p := rotate_p "011" RCR.
Definition RDMSR_p := "0000" $$ "1111" $$ "0011" $$ bits "0010" @
(fun _ => RDMSR %% instruction_t).
Definition RDPMC_p := "0000" $$ "1111" $$ "0011" $$ bits "0011" @
(fun _ => RDPMC %% instruction_t).
Definition RDTSC_p := "0000" $$ "1111" $$ "0011" $$ bits "0001" @
(fun _ => RDTSC %% instruction_t).
Definition RDTSCP_p := "0000" $$ "1111" $$ "0000" $$ "0001" $$ "1111" $$ bits "1001" @
(fun _ => RDTSCP %% instruction_t).
(*
Definition REPINS_p := "1111" $$ "0011" $$ "0110" $$ "110" $$ anybit @
(fun x => REPINS x %% instruction_t).
Definition REPLODS_p := "1111" $$ "0011" $$ "1010" $$ "110" $$ anybit @
(fun x => REPLODS x %% instruction_t).
Definition REPMOVS_p := "1111" $$ "0011" $$ "1010" $$ "010" $$ anybit @
(fun x => REPMOVS x %% instruction_t).
Definition REPOUTS_p := "1111" $$ "0011" $$ "0110" $$ "111" $$ anybit @
(fun x => REPOUTS x %% instruction_t).
Definition REPSTOS_p := "1111" $$ "0011" $$ "1010" $$ "101" $$ anybit @
(fun x => REPSTOS x %% instruction_t).
Definition REPECMPS_p := "1111" $$ "0011" $$ "1010" $$ "011" $$ anybit @
(fun x => REPECMPS x %% instruction_t).
Definition REPESCAS_p := "1111" $$ "0011" $$ "1010" $$ "111" $$ anybit @
(fun x => REPESCAS x %% instruction_t).
Definition REPNECMPS_p := "1111" $$ "0010" $$ "1010" $$ "011" $$ anybit @
(fun x => REPNECMPS x %% instruction_t).
Definition REPNESCAS_p := "1111" $$ "0010" $$ "1010" $$ "111" $$ anybit @
(fun x => REPNESCAS x %% instruction_t).
*)
Definition RET_p :=
"1100" $$ bits "0011" @ (fun _ => RET true None %% instruction_t)
|+|
"1100" $$ "0010" $$ halfword @ (fun h => RET true (Some h) %% instruction_t)
|+|
"1100" $$ bits "1011" @ (fun _ => RET false None %% instruction_t)
|+|
"1100" $$ "1010" $$ halfword @ (fun h => RET false (Some h) %% instruction_t).
Definition ROL_p := rotate_p "000" ROL.
Definition ROR_p := rotate_p "001" ROR.
Definition RSM_p := "0000" $$ "1111" $$ "1010" $$ bits "1010" @
(fun _ => RSM %% instruction_t).
Definition SAHF_p := "1001" $$ bits "1110" @
(fun _ => SAHF %% instruction_t).
Definition SAR_p := rotate_p "111" SAR.
Definition SCAS_p := "1010" $$ "111" $$ anybit @ (fun x => SCAS x %% instruction_t).
Definition SETcc_p :=
"0000" $$ "1111" $$ "1001" $$ tttn $ modrm @
(fun p => SETcc (fst p) (snd (snd p)) %% instruction_t).
Definition SGDT_p := "0000" $$ "1111" $$ "0000" $$ "0001" $$ ext_op_modrm "000" @
(fun x => SGDT x %% instruction_t).
Definition SHL_p := rotate_p "100" SHL.
Definition shiftdouble_p opcode inst :=
("0000" $$ "1111" $$ "1010" $$ opcode $$ "00" $$ "11" $$ reg $ reg $ byte) @
(fun p => match p with | (r2,(r1,b)) => inst (Reg_op r1) r2 (Imm_ri b) end %% instruction_t)
|+|
("0000" $$ "1111" $$ "1010" $$ opcode $$ "00" $$ modrm_noreg $ byte) @
(fun p => match p with | ((r,op), b) => inst op r (Imm_ri b) end %% instruction_t)
|+|
("0000" $$ "1111" $$ "1010" $$ opcode $$ "01" $$ "11" $$ reg $ reg) @
(fun p => match p with | (r2,r1) => inst (Reg_op r1) r2 (Reg_ri ECX) end %% instruction_t)
|+|
("0000" $$ "1111" $$ "1010" $$ opcode $$ "01" $$ modrm_noreg) @
(fun p => match p with | (r,op) => inst op r (Reg_ri ECX) end %% instruction_t).
Definition SHLD_p := shiftdouble_p "01" SHLD.
Definition SHR_p := rotate_p "101" SHR.
Definition SHRD_p := shiftdouble_p "11" SHRD.
Definition SIDT_p := "0000" $$ "1111" $$ "0000" $$ "0001" $$ ext_op_modrm "001" @
(fun x => SIDT x %% instruction_t).
Definition SLDT_p := "0000" $$ "1111" $$ "0000" $$ "0000" $$ ext_op_modrm "000" @
(fun x => SLDT x %% instruction_t).
Definition SMSW_p := "0000" $$ "1111" $$ "0000" $$ "0001" $$ ext_op_modrm "100" @
(fun x => SMSW x %% instruction_t).
Definition STC_p := "1111" $$ bits "1001" @ (fun _ => STC %% instruction_t).
Definition STD_p := "1111" $$ bits "1101" @ (fun _ => STD %% instruction_t).
Definition STI_p := "1111" $$ bits "1011" @ (fun _ => STI %% instruction_t).
Definition STOS_p := "1010" $$ "101" $$ anybit @
(fun x => STOS x %% instruction_t).
Definition STR_p := "0000" $$ "1111" $$ "0000" $$ "0000" $$ ext_op_modrm "001" @
(fun x => STR x %% instruction_t).
Definition TEST_p (opsize_override: bool) :=
"1111" $$ "0111" $$ ext_op_modrm2 "000" $ imm_op opsize_override @
(fun p => TEST true (fst p) (snd p) %% instruction_t)
|+|
"1111" $$ "0110" $$ ext_op_modrm2 "000" $ byte @
(fun p => TEST false (fst p) (Imm_op (zero_extend8_32 (snd p))) %% instruction_t)
|+|
"1000" $$ "010" $$ anybit $ modrm @
(fun p => match p with | (w,(op1,op2)) => TEST w op1 op2 end %% instruction_t)
|+|
"1010" $$ "1001" $$ imm_op opsize_override @ (fun w => TEST true w (Reg_op EAX) %% instruction_t)
|+|
"1010" $$ "1000" $$ byte @
(fun b => TEST true (Imm_op (zero_extend8_32 b)) (Reg_op EAX) %% instruction_t).
Definition UD2_p := "0000" $$ "1111" $$ "0000" $$ bits "1011" @
(fun _ => UD2 %% instruction_t).
Definition VERR_p := "0000" $$ "1111" $$ "0000" $$ "0000" $$ ext_op_modrm "100" @
(fun x => VERR x %% instruction_t).
Definition VERW_p := "0000" $$ "1111" $$ "0000" $$ "0000" $$ ext_op_modrm "101" @
(fun x => VERW x %% instruction_t).
Definition WAIT_p := "1001" $$ bits "1011 " @ (fun _ => WAIT %% instruction_t).
Definition WBINVD_p := "0000" $$ "1111" $$ "0000" $$ bits "1001" @
(fun _ => WBINVD %% instruction_t).
Definition WRMSR_p := "0000" $$ "1111" $$ "0011" $$ bits "0000" @
(fun _ => WRMSR %% instruction_t).
Definition XADD_p :=
"0000" $$ "1111" $$ "1100" $$ "000" $$ anybit $ modrm @
(fun p => match p with | (w,(op1,op2)) => XADD w op2 op1 end %% instruction_t).
Definition XCHG_p :=
"1000" $$ "011" $$ anybit $ modrm @
(fun p => match p with | (w,(op1,op2)) => XCHG w op1 op2 end %% instruction_t)
|+|
"1001" $$ "0" $$ reg @ (fun r => XCHG true (Reg_op EAX) (Reg_op r) %% instruction_t).
Definition XLAT_p := "1101" $$ bits "0111" @ (fun _ => XLAT %% instruction_t).
(* Now glue all of the individual instruction parsers together into
one big parser. *)
Definition instr_parsers_opsize_pre : list (parser instruction_t) :=
ADC_p true :: ADD_p true :: AND_p true :: CMP_p true :: OR_p true :: SBB_p true :: SUB_p true :: SHL_p :: SHLD_p :: SHR_p :: SAR_p :: SHRD_p :: XOR_p true ::IMUL_p true :: MOV_p true :: MOVSX_p :: MOVZX_p :: NEG_p :: NOT_p :: DIV_p :: IDIV_p :: TEST_p true :: CDQ_p :: CWDE_p :: MUL_p :: XCHG_p :: nil.
Definition instr_parsers_nosize_pre : list (parser instruction_t) :=
AAA_p :: AAD_p :: AAM_p :: AAS_p :: ADC_p false :: ADD_p false :: AND_p false :: CMP_p false :: OR_p false :: SBB_p false :: SUB_p false :: XOR_p false :: ARPL_p :: BOUND_p :: BSF_p :: BSR_p :: BSWAP_p :: BT_p :: BTC_p :: BTR_p :: BTS_p :: CALL_p :: CDQ_p :: CLC_p :: CLD_p :: CLI_p :: CMOVcc_p :: CMC_p :: CMPS_p :: CMPXCHG_p :: CPUID_p :: CWDE_p :: DAA_p :: DAS_p :: DEC_p :: DIV_p :: HLT_p :: IDIV_p :: IMUL_p false :: IN_p :: INC_p :: INS_p :: INTn_p :: INT_p :: INTO_p :: INVD_p :: INVLPG_p :: IRET_p :: Jcc_p :: JCXZ_p :: JMP_p :: LAHF_p :: LAR_p :: LDS_p :: LEA_p :: LEAVE_p :: LES_p :: LFS_p :: LGDT_p :: LGS_p :: LIDT_p :: LLDT_p :: LMSW_p :: (* LOCK_p :: -- see note above about LOCK_p *) LODS_p :: LOOP_p :: LOOPZ_p :: LOOPNZ_p :: LSL_p :: LSS_p :: LTR_p :: MOV_p false :: MOVCR_p :: MOVDR_p :: MOVSR_p :: MOVBE_p :: MOVS_p :: MOVSX_p :: MOVZX_p :: MUL_p :: NEG_p :: (* NOP_p :: *) NOT_p :: OUT_p :: OUTS_p :: POP_p :: POPSR_p :: POPA_p :: POPF_p :: PUSH_p :: PUSHSR_p :: PUSHA_p :: PUSHF_p :: RCL_p :: RCR_p :: RDMSR_p :: RDPMC_p :: RDTSC_p :: RDTSCP_p :: (* REPINS_p :: REPLODS_p :: REPMOVS_p :: REPOUTS_p :: REPSTOS_p :: REPECMPS_p :: REPESCAS_p :: REPNECMPS_p :: REPNESCAS_p :: *) RET_p :: ROL_p :: ROR_p :: RSM_p :: SAHF_p :: SAR_p :: SCAS_p :: SETcc_p :: SGDT_p :: SHL_p :: SHLD_p :: SHR_p :: SHRD_p :: SIDT_p :: SLDT_p :: SMSW_p :: STC_p :: STD_p :: STI_p :: STOS_p :: STR_p :: TEST_p false :: UD2_p :: VERR_p :: VERW_p :: WAIT_p :: WBINVD_p :: WRMSR_p :: XADD_p :: XCHG_p :: XLAT_p :: nil.
Fixpoint list2pair_t (l: list result) :=
match l with
| nil => unit_t
| r::r'::nil => pair_t r r'
| r::l' => pair_t r (list2pair_t l')
end.
Definition lock_or_rep_p : parser lock_or_rep_t :=
("1111" $$ ( bits "0000" @ (fun _ => lock %% lock_or_rep_t)
|+| bits "0010" @ (fun _ => repn %% lock_or_rep_t)
|+| bits "0011" @ (fun _ => rep %% lock_or_rep_t))).
Definition segment_override_p : parser segment_register_t :=
("0010" $$ bits "1110" @ (fun _ => CS %% segment_register_t)
|+| "0011" $$ bits "0110" @ (fun _ => SS %% segment_register_t)
|+| "0011" $$ bits "1110" @ (fun _ => DS %% segment_register_t)
|+| "0010" $$ bits "0110" @ (fun _ => ES %% segment_register_t)
|+| "0110" $$ bits "0100" @ (fun _ => FS %% segment_register_t)
|+| "0110" $$ bits "0101" @ (fun _ => GS %% segment_register_t)).
Definition op_override_p : parser bool_t :=
"0110" $$ bits "0110" @ (fun _ => true %% bool_t).
Definition addr_override_p : parser bool_t :=
"0110" $$ bits "0111" @ (fun _ => true %% bool_t).
(* Ok, now I want all permutations of the above four parsers.
I make a little perm2 combinator that takes two parsers and gives you
p1 $ p2 |+| p2 $ p1, making sure to swap the results in the second case *)
Definition perm2 t1 t2 (p1: parser t1) (p2: parser t2) : parser (pair_t t1 t2) :=
p1 $ p2 |+|
p2 $ p1 @ (fun p => match p with (a, b) => (b, a) %% pair_t t1 t2 end).
(* Then I build that up into a perm3 and perm4. One could make a recursive
function to do this, but I didn't want to bother with the necessary
proofs and type-system juggling.*)
Definition perm3 t1 t2 t3 (p1: parser t1) (p2: parser t2) (p3: parser t3)
: parser (pair_t t1 (pair_t t2 t3)) :=
let r_t := pair_t t1 (pair_t t2 t3) in
p1 $ (perm2 p2 p3)
|+| p2 $ (perm2 p1 p3) @ (fun p => match p with (b, (a, c)) => (a, (b, c)) %% r_t end)
|+| p3 $ (perm2 p1 p2) @ (fun p => match p with (c, (a, b)) => (a, (b, c)) %% r_t end).
Definition perm4 t1 t2 t3 t4 (p1: parser t1) (p2: parser t2) (p3: parser t3)
(p4: parser t4) : parser (pair_t t1 (pair_t t2 (pair_t t3 t4))) :=
let r_t := pair_t t1 (pair_t t2 (pair_t t3 t4)) in
p1 $ (perm3 p2 p3 p4)
|+| p2 $ (perm3 p1 p3 p4) @
(fun p => match p with (b, (a, (c, d))) => (a, (b, (c, d))) %% r_t end)
|+| p3 $ (perm3 p1 p2 p4) @
(fun p => match p with (c, (a, (b, d))) => (a, (b, (c, d))) %% r_t end)
|+| p4 $ (perm3 p1 p2 p3) @
(fun p => match p with (d, (a, (b, c))) => (a, (b, (c, d))) %% r_t end).
(* In this case, prefixes are optional. Before, each of the above
parsing rules for the prefixes accepted Eps, and this was how we
handled this. However, if the parsers you join with perm can
each accept Eps, then the result is a _highly_ ambiguous parser.
Instead we have a different combinator, called option_perm, that
handles this without introducing extra ambiguity *)
(* This signature is slightly awkward - because there's no result
type corresponding to option (and I'm hesitant to add it to
Parser at the moment) we can't just have a signature like parser
t1 -> parser t2 -> parser (option_t t1) (option_t t2)) *)
Definition option_perm2 t1 t2 (p1: parser (tipe_t t1)) (p2: parser (tipe_t t2))
: parser (pair_t (option_t t1) (option_t t2)) :=
let r_t := pair_t (option_t t1) (option_t t2) in
Eps_p @ (fun p => (None, None) %% r_t)
|+| p1 @ (fun p => (Some p, None) %% r_t )
|+| p2 @ (fun p => (None, Some p) %% r_t)
|+| perm2 p1 p2 @ (fun p => match p with (a, b) => (Some a, Some b) %%r_t end).
Definition option_perm3 t1 t2 t3 (p1:parser(tipe_t t1)) (p2:parser(tipe_t t2))
(p3:parser(tipe_t t3)): parser(pair_t(option_t t1)(pair_t(option_t t2) (option_t t3)))
:=
let r_t := pair_t(option_t t1)(pair_t(option_t t2) (option_t t3)) in
Eps_p @ (fun p => (None, (None, None)) %% r_t)
|+| p1 @ (fun p => (Some p, (None, None)) %% r_t)
|+| p2 @ (fun p => (None, (Some p, None)) %% r_t)
|+| p3 @ (fun p => (None, (None, Some p)) %% r_t)
|+| perm2 p1 p2 @(fun p => match p with (a, b) => (Some a, (Some b, None)) %%r_t end)
|+| perm2 p1 p3 @(fun p => match p with (a, c) => (Some a, (None, Some c)) %%r_t end)
|+| perm2 p2 p3 @(fun p => match p with (b, c) => (None, (Some b, Some c)) %%r_t end)
|+| perm3 p1 p2 p3 @ (fun p => match p with (a, (b, c))
=> (Some a, (Some b, Some c)) %%r_t end).
(* This is beginning to get quite nasty. Someone should write a form for arbitrary
n and prove it's correct :) *)
Definition option_perm4 t1 t2 t3 t4 (p1:parser(tipe_t t1)) (p2: parser(tipe_t t2))
(p3: parser(tipe_t t3)) (p4: parser(tipe_t t4)) :
parser(pair_t(option_t t1) (pair_t(option_t t2) (pair_t(option_t t3) (option_t t4))))
:=
let r_t := pair_t(option_t t1) (pair_t(option_t t2)
(pair_t(option_t t3)(option_t t4))) in
Eps_p @ (fun p => (None, (None, (None, None))) %% r_t)
|+| p1 @ (fun p => (Some p, (None, (None, None))) %% r_t)
|+| p2 @ (fun p => (None, (Some p, (None, None))) %% r_t)
|+| p3 @ (fun p => (None, (None, (Some p, None))) %% r_t)
|+| p4 @ (fun p => (None, (None, (None, Some p))) %% r_t)
|+| perm2 p1 p2 @ (fun p => match p with (a, b)
=> (Some a, (Some b, (None, None))) %% r_t end)
|+| perm2 p1 p3 @ (fun p => match p with (a, c)
=> (Some a, (None, (Some c, None))) %% r_t end)
|+| perm2 p1 p4 @ (fun p => match p with (a, d)
=> (Some a, (None, (None, Some d))) %% r_t end)
|+| perm2 p2 p3 @ (fun p => match p with (b, c)
=> (None, (Some b, (Some c, None))) %% r_t end)
|+| perm2 p2 p4 @ (fun p => match p with (b, d)
=> (None, (Some b, (None, Some d))) %% r_t end)
|+| perm2 p3 p4 @ (fun p => match p with (c, d)
=> (None, (None, (Some c, Some d))) %% r_t end)
|+| perm3 p1 p2 p3 @ (fun p => match p with (a, (b, c))
=> (Some a, (Some b, (Some c, None))) %%r_t end)
|+| perm3 p1 p3 p4 @ (fun p => match p with (a, (c, d))
=> (Some a, (None, (Some c, Some d))) %%r_t end)
|+| perm3 p1 p2 p4 @ (fun p => match p with (a, (b, d))
=> (Some a, (Some b, (None, Some d))) %%r_t end)
|+| perm3 p2 p3 p4 @ (fun p => match p with (b, (c, d))
=> (None, (Some b, (Some c, Some d))) %%r_t end)
|+| perm4 p1 p2 p3 p4 @ (fun p => match p with (a, (b, (c, d)))
=> (Some a, (Some b, (Some c, Some d))) %% r_t end).
Definition opt2b (a: option bool) (default: bool) :=
match a with
| Some b => b
| None => default
end.
Definition prefix_parser_nooverride :=
option_perm2 lock_or_rep_p segment_override_p @
(fun p => match p with (l, s) =>
mkPrefix l s false false %% prefix_t end).
Definition prefix_parser_opsize :=
op_override_p @ (fun p => mkPrefix None None p false %% prefix_t)
|+| op_override_p $ lock_or_rep_p @
(fun p => match p with (b, l) => (mkPrefix (Some l) None b false %% prefix_t) end)
|+| op_override_p $ segment_override_p @
(fun p => match p with (b, s) => (mkPrefix None (Some s) b false %% prefix_t) end)
|+| op_override_p $ lock_or_rep_p $ segment_override_p @
(fun p => match p with (b, (l, s)) =>
(mkPrefix (Some l) (Some s) b false %% prefix_t) end)
|+| op_override_p $ segment_override_p $ lock_or_rep_p @
(fun p => match p with (b, (s, l)) =>
(mkPrefix (Some l) (Some s) b false %% prefix_t) end)
|+| segment_override_p $ op_override_p @
(fun p => match p with (s, b) =>
(mkPrefix None (Some s) b false %% prefix_t) end)
|+| segment_override_p $ op_override_p $ lock_or_rep_p @
(fun p => match p with (s, (b, l)) =>
(mkPrefix (Some l) (Some s) b false %% prefix_t) end)
|+| segment_override_p $ lock_or_rep_p $ op_override_p @
(fun p => match p with (s, (l, b)) =>
(mkPrefix (Some l) (Some s) b false %% prefix_t) end)
|+| lock_or_rep_p $ op_override_p @
(fun p => match p with (l, b) =>
(mkPrefix (Some l) None b false %% prefix_t) end)
|+| lock_or_rep_p $ op_override_p $ segment_override_p @
(fun p => match p with (l, (b, s)) =>
(mkPrefix (Some l) (Some s) b false %% prefix_t) end)
|+| lock_or_rep_p $ segment_override_p $ op_override_p @
(fun p => match p with (l, (s, b)) =>
(mkPrefix (Some l) (Some s) b false %% prefix_t) end).
Definition instruction_parser_list :=
(List.map (fun (p:parser instruction_t) => prefix_parser_nooverride $ p)
instr_parsers_nosize_pre) ++
(List.map (fun (p:parser instruction_t) => prefix_parser_opsize $ p)
instr_parsers_opsize_pre).
Definition instruction_parser := alts instruction_parser_list.
Definition instruction_regexp_pair := parser2regexp instruction_parser.
Record instParserState := mkPS {
inst_ctxt : ctxt_t ;
inst_regexp : regexp (pair_t prefix_t instruction_t) ;
inst_regexp_wf : wf_regexp inst_ctxt inst_regexp
}.
Definition initial_parser_state : instParserState :=
mkPS (snd instruction_regexp_pair) (fst instruction_regexp_pair)
(p2r_wf instruction_parser _).
Definition byte_explode (b:int8) : list bool :=
let bs := Word.bits_of_Z 8 (Word.unsigned b) in
(bs 7)::(bs 6)::(bs 5)::(bs 4)::(bs 3)::(bs 2)::(bs 1)::(bs 0)::nil.
Definition parse_byte (ps:instParserState) (b:int8) :
instParserState * list (prefix * instr) :=
let cs := byte_explode b in
let r' := deriv_parse' (inst_regexp ps) cs in
let wf' := wf_derivs (inst_ctxt ps) cs (inst_regexp ps) (inst_regexp_wf ps) in
(mkPS (inst_ctxt ps) r' wf', apply_null (inst_ctxt ps) r' wf').
End X86_PARSER.
|
{"author": "mpettersson", "repo": "reins-verifier-proof", "sha": "44d0b8e0c29b07eb71b1d6d44b020648783409fb", "save_path": "github-repos/coq/mpettersson-reins-verifier-proof", "path": "github-repos/coq/mpettersson-reins-verifier-proof/reins-verifier-proof-44d0b8e0c29b07eb71b1d6d44b020648783409fb/Model/Decode.v"}
|
subroutine system_setup
use systemparams
implicit none
open(3,file = parameter_path, status='unknown')
read(3,*) nx
read(3,*) star_mass
read(3,*) star_lum
read(3,*) a
read(3,*) e
read(3,*) phi_peri
read(3,*) angular_position
read(3,*) period
read(3,*) spin_obliquity
read(3,*) azim_obliquity
read(3,*) ocean_fraction
read(3,*) initial_temp
read(3,*) sim_time
read(3,*) freq
read(3,*) p
close(3)
allocate(T_old(nx+1))
allocate(T(nx+1))
allocate(tt(nx+1))
allocate(x(nx+1))
allocate(lat(nx+1))
allocate(latdeg(nx+1))
allocate(cos_H(nx+1))
allocate(f_ice(nx+1))
allocate(C_tot(nx+1))
allocate(albedo(nx+1))
allocate(insol(nx+1))
allocate(tau_ir(nx+1))
allocate(tau_ir_1(nx+1))
allocate(tau_0(nx+1))
allocate(infrared(nx+1))
allocate(Q(nx+1))
allocate(deltax(nx+1))
allocate(hab(nx+1))
dlat = pi/(1.0*nx)
T(:) = initial_temp
T_old(:) = T(:)
time = 0.0
do i=1,nx+1
lat(i) = -pi/2.0 + (i-1)*dlat
x(i) = sin(lat(i))
end do
latdeg(:) = lat(:)*180.0/pi
deltax(:) = cos(lat(:))*dlat
tt(:) = 0.0
if(star_lum <= 0.0) star_lum = star_mass**4.0
orb_period = sqrt(a*a*a/star_mass)
orb_freq = 2.0*pi/(orb_period*year)
h_ang = sqrt(star_mass*a*(1.0-e*e))
phi_peri = phi_peri*pi/180.0
spin_obliquity = spin_obliquity*pi/180.0
azim_obliquity = azim_obliquity*pi/180.0
angular_position = angular_position*pi/180.0
r = a*(1.0-e*e)/(1.0+e*cos(angular_position-phi_peri))
diff = D*period*period*(p/p0)
if(ocean_fraction>1.0) ocean_fraction = 1.0
if(ocean_fraction<0.0) ocean_fraction = 0.0
f_land = 1.0-ocean_fraction
end subroutine system_setup
|
{"hexsha": "317a9414ceb338c950660889c9cd4c336c714951", "size": 1836, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/system_setup.f90", "max_stars_repo_name": "mjdbahram/exoClimate", "max_stars_repo_head_hexsha": "48ae1ea333d8ca117dd736da575fd1f2a55012f4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/system_setup.f90", "max_issues_repo_name": "mjdbahram/exoClimate", "max_issues_repo_head_hexsha": "48ae1ea333d8ca117dd736da575fd1f2a55012f4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/system_setup.f90", "max_forks_repo_name": "mjdbahram/exoClimate", "max_forks_repo_head_hexsha": "48ae1ea333d8ca117dd736da575fd1f2a55012f4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.6875, "max_line_length": 58, "alphanum_fraction": 0.584422658, "num_tokens": 650}
|
# -*- coding: utf-8 -*-
"""
This script works for foam phantom.
"""
import numpy as np
import glob
import dxchange
import matplotlib.pyplot as plt
import scipy.interpolate
import tomopy
from scipy.interpolate import Rbf
from mpl_toolkits.mplot3d.axes3d import Axes3D
from matplotlib import cm
from project import *
from simulator import *
from sinogram import *
from instrument import *
from sample import *
np.set_printoptions(threshold='infinite')
if __name__ == '__main__':
sim = Simulator()
sim.read_raw_sinogram('data/foam_sino_halved.tiff', center=1024)
inst = Instrument(512)
sino_width = 2048
half_sino_width = 1024
center_pos = [(1024, 844), (1024, 1204)]
stage_pos = [844, 1204]
inst.add_center_positions(center_pos)
inst.add_stage_positions(stage_pos)
sim.load_instrument(inst)
sim.sample_full_sinogram_local(save_path='temp/masks', save_mask=True)
|
{"hexsha": "286679f84960e6ae0f9492ba15fa6e49685d30ea", "size": 916, "ext": "py", "lang": "Python", "max_stars_repo_path": "sampling_demo.py", "max_stars_repo_name": "mdw771/tomosim", "max_stars_repo_head_hexsha": "7736031aee861cd0ac995d83c2231a7df4fc3365", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-07-14T15:06:50.000Z", "max_stars_repo_stars_event_max_datetime": "2018-07-14T15:06:50.000Z", "max_issues_repo_path": "sampling_demo.py", "max_issues_repo_name": "mdw771/tomosim", "max_issues_repo_head_hexsha": "7736031aee861cd0ac995d83c2231a7df4fc3365", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sampling_demo.py", "max_forks_repo_name": "mdw771/tomosim", "max_forks_repo_head_hexsha": "7736031aee861cd0ac995d83c2231a7df4fc3365", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 20.8181818182, "max_line_length": 74, "alphanum_fraction": 0.7412663755, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 235}
|
import numpy as np
import torch
from lamp.optimization import LearningScheduleWrapper
from bottleneck.components import PredictionEnsemble, Analysis
from bottleneck.VirtualObservables import QuerryPointEnsemble, QuerryEnsemble, VirtualObservablesEnsemble, EnergyVirtualObservablesEnsemble
from torch.utils.tensorboard import SummaryWriter
from bottleneck.VirtualObservables import RadialBasisFunctionSampler
from physics.BoundaryConditions import BoundaryConditionEnsemble
import time
import os
from factories.model import ModelFactory
from factories.data import DataFactory
from utils.time import Timer
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
import matplotlib.gridspec as gridspec
from fawkes.Plotting import PlotFunction2D
from tqdm import tqdm
from mpl_toolkits.mplot3d import Axes3D
class TrainerParameters(object):
def __init__(self):
self.data = dict()
self.scheduler = dict()
self.trainer = dict()
self.optimizer = dict()
self.margs = dict()
self.dargs = dict()
self._dtype = None
self._device = None
@property
def device(self):
if self._device is None:
self._recover_dtype_device()
return self._device
@property
def dtype(self):
if self.dtype is None:
self._recover_dtype_device()
return self._dtype
def _recover_dtype_device(self):
try:
mf = ModelFactory.FromIdentifier(self.identifier)
except AttributeError:
raise RuntimeError('TrainerParameters cannot provide dtype or device, since identifier has not yet been set')
self._dtype, self._device = mf.dtype, mf.device
class Trainer(object):
def __init__(self, mf, df, folder=None, comment='', debug = False):
self._mf = mf
self._df = df
self._folder = folder
self._dl = None
self._dlu = None
self.model = None
self.physics = None
self.encoder = None
self.discriminative_model = None
self.datasets = None
self._optimizer = None
self._scheduler_wrapper = None
self._PE = None
self._analyis = None
self._dtpye = None
self._device = None
self._global_runtime = 0
self._global_iteration_counter = 0
physics, model, discriminative_model, encoder, dtype, device = self._mf.setup()
model.writer = SummaryWriter(comment=comment)
self.model = model
self.encoder = encoder
self.physics = physics
self.discriminative_model = discriminative_model
self._dtype = dtype
self._device = device
self._config = None
self._monitor = dict()
self._monitor['elbo'] = list()
self._monitor['elbo_iter'] = list()
self._monitor['lr'] = list()
self._monitor['lr_iter'] = list()
self.debug = debug
self._armortized_bs = None
self._vo_is_initialized = False
self._finalized = False
if self._folder is not None:
if self._folder[-1] != '/':
self._folder += '/'
os.makedirs(self._folder, exist_ok=True)
def info(self):
if self.model.encoder is None:
assert self._armortized_bs is None
else:
assert self._armortized_bs is not None
n_unsupervised = 0
try:
n_unsupervised = self.model.datasets['unsupervised'].N
except:
pass
n_vo = 0
try:
n_vo = self.model.datasets['vo'].N
except:
pass
print("============ MODEL INFO ==============")
print("N_unsupervised: {}".format(n_unsupervised))
print("N_supervised: {}".format(self.model.datasets['supervised'].N))
print("N_vo: {}".format(n_vo))
armorization = self.model.encoder is not None
print("Armortization: {} ".format(armorization))
print("Device: {}".format(self.model.device))
print("Dtype: {}".format(self.model.dtype))
print("========================================")
@property
def mf(self):
return self._mf
@property
def dl(self):
return self._dl
@property
def dlu(self):
return self._dlu
def setup_config(self, **kwargs):
# config can be (and is) overwritten later
self._config = dict()
self._config['lr_init'] = None
self._config['normalize'] = False
self._config['l2_penalty'] = None
self._config['l1_penalty'] = None
self._config['N_PE_updates'] = 3
self._config['N_PE_updates_final'] = 100
self._config['N_monte_carlo_analysis'] = 64
self._config['N_monte_carlo_analysis_final'] = 128
self._config['N_monitor_interval'] = 500
self._config['N_tensorboard_logging_interval'] = 1
self._config['N_vo_update_interval'] = 250
self._config['N_vo_holdoff']= 100
self._config['N_monte_carlo_vo'] = 128
self._config['MonitorTraining'] = True
for key, value in kwargs.items():
if key not in self.config:
raise KeyError('Could not set > {} < in local config in Trainer'.format(key))
self._config[key] = value
@property
def config(self):
if self.debug:
config = self._config.copy()
config['N_monitor_interval'] = 5
config['N_PE_updates'] = 1
config['N_PE_updates_final'] = 5
config['N_monte_carlo_analysis'] = 8
config['N_monte_carlo_analysis_final'] = 16
config['N_monte_carlo_vo'] = 16
config['N_tensorboard_logging_interval'] = 1
return config
else:
return self._config
def get(self, configkey):
try:
value = self.config[configkey]
except KeyError:
raise KeyError('Could not retrieve > {} < from local config in Trainer'.format(configkey))
return value
@classmethod
def FromIdentifier(cls, identifier, margs = None, dargs = None, *args, **kwargs):
mf = ModelFactory.FromIdentifier(identifier)
if margs is not None:
for key, val in margs.items():
mf.set(key, val)
# deprecated: this is actually not used and handled externally.
df = None
return cls(mf=mf, df=df, *args, **kwargs)
@property
def device(self):
return self._device
def reset(self):
raise NotImplementedError
@property
def dtype(self):
return self._dtype
@property
def scheduler_wrapper(self):
return self._scheduler_wrapper
def setup(self, scheduler_wrapper, **kwargs):
if self._config is None:
raise RuntimeError('Config has not yet been setup')
if scheduler_wrapper is None:
scheduler_wrapper = LearningScheduleWrapper.Dummy()
self._optimizer = torch.optim.Adam(params = self.model.parameters(), lr=self.get('lr_init'))
self._scheduler_wrapper = scheduler_wrapper
self._PE = PredictionEnsemble(self.model, self.datasets['validation'], self._scheduler_wrapper, self.get('lr_init'), writer=self.model.writer)
self._analysis = Analysis.FromPredictionEnsemble(self._PE)
self._scheduler_wrapper.register_optimizer(self._optimizer, 'training')
self._analysis_training = Analysis(self.model.q_z['supervised'], self.model, self.model.datasets['supervised'])
self.model.tensorboard_logging_interval = self.config['N_tensorboard_logging_interval']
def tinfo(self, N = None):
if self.gn == 0:
return
avg = self._global_runtime/self.gn
print("{} iterations in {} seconds : that makes on average {} seconds per iteration".format(self.gn, self._global_runtime, avg))
if avg is not None:
print("Will require (approx) {} for {} iterations".format(avg*N, N))
@property
def gn(self):
return self._global_iteration_counter
def _step(self, *args, **kwargs):
self._global_iteration_counter += 1
def set_data_from_datasets(self, dl, dlu, datasets, Nu, Ns, Nvo, VO=None, vo_spec = None, armortized_bs = None):
assert 'validation' in datasets and len(datasets['validation']) > 0
assert Nu is not None
assert Ns is not None
assert Nvo is not None
assert Nu >= 0
assert Ns >= 0
assert Nvo >= 0
self._dl = dl
self._dlu = dlu
assert 'supervised' in datasets
if Nu > 0:
assert 'unsupervised' in datasets and datasets['unsupervised']
if Nvo > 0:
assert 'vo' in datasets and datasets['vo']
if 'supervised' in datasets:
datasets['supervised'].restrict(Ns)
if 'vo' in datasets:
datasets['vo'].restrict(Nvo)
if 'unsupervised' in datasets:
datasets['unsupervised'].restrict(Nu)
if Nvo > 0:
assert 'vo' in datasets
if VO is None:
assert isinstance(vo_spec, dict)
QPE = QuerryPointEnsemble.FromDataSet(datasets['vo'], self.physics['fom'])
if vo_spec['type'].lower() == 'energy':
assert vo_spec['l_rbf'] is not None
assert vo_spec['N_rbf'] is not None
sampler = RadialBasisFunctionSampler(qp=QPE[0], l=vo_spec['l_rbf'], N_aux=vo_spec['N_rbf'])
VO = EnergyVirtualObservablesEnsemble(QPE, vo_spec['energy_num_iterations_per_update'], sampler=sampler, dtype=self.dtype, device=self.device)
VO.set_temperature_schedule('exponential', T_init=vo_spec['T_init'], T_final=vo_spec['T_final'], num_steps=vo_spec['T_iterations'])
elif vo_spec['type'].lower() == 'constrain':
QE = QuerryEnsemble.FromQuerryPointEnsemble(QPE, self.physics, vo_spec['CGR'], vo_spec['flux'],
vo_spec['N_gaussian'], vo_spec['N_rbf'],
vo_spec['l_rbf'], dtype=self.dtype, device=self.device)
VO = VirtualObservablesEnsemble(QPE, QE, dtype=self.dtype, device=self.device)
else:
raise ValueError(
'Type: {} not known as specification.'.format(
vo_spec['type']))
else:
raise NotImplementedError('Cannot restrict a virtual observable ensemble')
create_unsupervised_qZ = True
if Nu is not None and Nu > 0:
if armortized_bs is not None:
if self._optimizer is not None:
raise RuntimeError('Optimizer has already been created without encoder')
create_unsupervised_qZ = False
self.model.encoder = self.encoder
datasets['unsupervised'].restrict(Nu)
self._armortized_bs = armortized_bs
self.model.register_datasets(datasets, VO, create_unsupervised_variational_approximation=create_unsupervised_qZ)
self.datasets = datasets
def results(self, analysis = None):
if analysis is None:
analysis = self._analysis
to_fetch = ['relerr_y', 'r2_y', 'logscore_y']
results = dict()
for fetch in to_fetch:
results[fetch] = analysis.data[fetch].final()
results['runtime'] = self._global_runtime
return results
def use_vo(self):
return 'vo' in self.datasets and self.datasets['vo']
def update_vo(self):
if self.use_vo():
update_vo = self.gn >= self.get('N_vo_holdoff') and (np.mod(self.gn, self.get('N_vo_update_interval')) == 0 or not self._vo_is_initialized) and self.datasets['vo']
else:
update_vo = False
return update_vo
def run(self, N, restart = False, verbose = True, uverbose = False, callback = None):
if self._finalized:
raise RuntimeError('Cannot run trainer which has already been finalized')
if verbose:
print("Starting Trainer - RUN")
timer = Timer(N)
t_start = time.time()
for n in tqdm(range(N)):
self._optimizer.zero_grad()
if self.update_vo():
self.model.update_virtual_observables(self.get('N_monte_carlo_vo'), return_mean_stddev=False, step=self.gn)
self._vo_is_initialized = True
elbo = self.model.elbo(step=self.gn, vo_holdoff = self.gn < self.get('N_vo_holdoff'), armortized_bs = self._armortized_bs, normalize=self.get('normalize'),
l1_penalty = self.get('l1_penalty'), l2_penalty=self.get('l2_penalty'))
J = -elbo
J.backward()
self._optimizer.step()
self._PE.update(self.get('N_PE_updates'), step=self.gn)
if np.mod(n, self.get('N_monitor_interval')) == 0 and n > 0:
self.model.record(self.gn)
self._monitor['elbo_iter'].append(self.gn)
self._monitor['elbo'].append(elbo.item())
self._monitor['lr'].append(self._optimizer.param_groups[0]['lr'])
self._monitor['lr_iter'].append(self.gn)
self._analysis.eval_all_y(self.get('N_monte_carlo_analysis'), self.gn)
if self.get('MonitorTraining'):
self._analysis_training.eval_all_y(self.get('N_monte_carlo_analysis'), self.gn)
if self.model.encoder is not None:
analysis_encoder = Analysis.FromEncoder(self.model, self.datasets['validation'])
logscore_y, r2_y, relerr_y = analysis_encoder.eval_all_y(
self.get('N_monte_carlo_analysis_final'))
self.model.writer.add_scalar('validation_encoder/logscore_y', logscore_y, global_step=self.gn)
self.model.writer.add_scalar('validation_encoder/r2_y', r2_y, global_step = self.gn)
self.model.writer.add_scalar('validation_encoder/relerr_y', relerr_y, global_step=self.gn)
if verbose:
print("Step: {} / {} || ELBO= {} || LogScore(y): {} || RRT: {} ".format(n, N, elbo.item(), self._analysis.data['logscore_y'].final(), timer.RRT(step=n)))
elif uverbose:
print("Step: {} / {} || RRT: {} ".format(n, N, timer.RRT(step=n)))
self._step(n, N)
self._scheduler_wrapper.step('training', metric=elbo)
if callback is not None:
callback(n, self.gn)
for nl in range(self.gn, self.gn + self.get('N_PE_updates_final')):
self._PE.update(self.get('N_PE_updates'), step=nl)
self._analysis.eval_all_y(self.get('N_monte_carlo_analysis_final'), self.gn + self.get('N_PE_updates_final'))
self._global_runtime += time.time() - t_start
def finalize(self):
try:
results = self.results()
hpdict = {'dummy' : 0}
self.model.writer.add_hparams(hparam_dict= hpdict, metric_dict = results)
except AttributeError:
# not implemented for older pytorch versions, apparently
pass
try:
self.model.writer.flush()
except AttributeError:
pass
self.model.writer.close()
self._finalized = True
def plot_elbo(self, figsize):
plt.figure(figsize = figsize)
plt.plot(self._monitor['elbo_iter'], self._monitor['elbo'], '-o')
plt.grid()
plt.xlabel('Iterations')
plt.ylabel('ELBO')
plt.title('ELBO')
def plot_predictive_logscore(self, figsize):
plt.figure(figsize = figsize)
plt.plot(self._analysis.data['logscore_y'].iteration, self._analysis.data['logscore_y'].value, '-o')
plt.grid()
plt.xlabel('# Iteration')
plt.ylabel('Logscore')
plt.title('Predictive Logscore (validation)')
def Plot2D(trainer , indeces = None):
if indeces is not None:
assert len(indeces) == 3
else:
indeces = [0,7,8]
azim = 240
elev = 0
width = 10
height = 16
analysis = trainer._analysis
physics = trainer.physics
Y_val = trainer.datasets['validation'].get('Y')
BCE_val = trainer.datasets['validation'].get('BCE')
def modify_ax(ax):
ax.grid(False)
ax.xaxis.pane.set_edgecolor('black')
ax.yaxis.pane.set_edgecolor('black')
ax.xaxis.pane.fill = False
ax.yaxis.pane.fill = False
ax.zaxis.pane.fill = False
ax.xaxis._axinfo['tick']['inward_factor'] = 0
ax.xaxis._axinfo['tick']['outward_factor'] = 0.4
ax.yaxis._axinfo['tick']['inward_factor'] = 0
ax.yaxis._axinfo['tick']['outward_factor'] = 0.4
ax.zaxis._axinfo['tick']['inward_factor'] = 0
ax.zaxis._axinfo['tick']['outward_factor'] = 0.4
ax.zaxis._axinfo['tick']['outward_factor'] = 0.4
ax.xaxis.set_major_locator(MultipleLocator(5))
ax.yaxis.set_major_locator(MultipleLocator(5))
ax.zaxis.set_major_locator(MultipleLocator(0.1))
def shift_axes_horizontally(ax, val):
pos1 = ax.get_position()
pos2 = [pos1.x0 + val, pos1.y0, pos1.width, pos1.height]
ax.set_position(pos2)
fig = plt.figure(figsize=(width, height))
gs1 = gridspec.GridSpec(4, 2)
gs1.update(wspace=0.025, hspace=0.0) # set the spacing between axes.
pos = 0
for i, ind in enumerate(indeces):
y_true = Y_val[ind, :].detach().cpu().numpy().flatten()
Y_sample = analysis.sample_predictive_y(1024, ind)
y_mean = torch.mean(Y_sample, 0).detach().cpu().numpy().flatten()
y_mean_f = physics['fom'].scatter_restricted_solution(y_mean, bc=BCE_val[ind], ReturnFunction=True)
y_true_f = physics['fom'].scatter_restricted_solution(y_true, bc=BCE_val[ind], ReturnFunction=True)
ax1 = plt.subplot(gs1[pos], projection='3d')
plt.axis('on')
PlotFunction2D(y_mean_f, ax=ax1, fig=fig)
ax1.set_xticklabels([])
ax1.set_yticklabels([])
ax1.view_init(azim=azim, elev=elev)
modify_ax(ax1)
pos += 1
plt.xlabel(r'$s_1$', labelpad=-12)
plt.ylabel(r'$s_2$', labelpad=-12)
if i == 0:
plt.title('Mean Prediction')
ax1 = plt.subplot(gs1[pos], projection='3d')
plt.axis('on')
PlotFunction2D(y_true_f, ax=ax1, fig=fig)
ax1.set_xticklabels([])
ax1.set_yticklabels([])
ax1.view_init(azim=azim, elev=elev)
pos += 1
if i == 0:
plt.title('Reference')
plt.xlabel(r'$s_1$', labelpad=-12)
plt.ylabel(r'$s_2$', labelpad=-12)
modify_ax(ax1)
shift_axes_horizontally(ax1, -0.08)
def CreateTrainer(params, dl, dlu):
return CreateTrainerFromPermutation(params, permutation=torch.arange(dl.N), permutation_u=torch.arange(dlu.N))
def CreateTrainerFromPermutation(params, permutation = None, permutation_u = None, dl=None, dlu=None, datasets=None, BCE_encoding = None):
trainer = Trainer.FromIdentifier(params.identifier, params.margs, params.dargs, folder=params.folder, comment = params.comment, debug=params.debug)
if BCE_encoding is not None:
BCE = BoundaryConditionEnsemble.FromEncoding(BCE_encoding, V_fom=trainer.physics['fom'].V, V_rom=trainer.physics['rom'].V, model_factory=trainer.physics['fom'].factory)
else:
BCE = None
if dl is None or dlu is None or datasets is None:
if not (dl is None and dlu is None and datasets is None):
raise Exception('Either pass all required quantities, or none')
dl, dlu, datasets = CreateDataSetsFromPermutation(params.identifier, permutation, permutation_u,
params.data['N_val'], params.data['N_u_max'], params.data['N_s_max'],
params.data['N_vo_max'], trainer.physics, BCE, trainer.dtype, trainer.device)
scheduler_wrapper = LearningScheduleWrapper.MultiStepLR(params.scheduler['milestones'], factor=params.scheduler['factor'])
trainer.set_data_from_datasets(dl, dlu, datasets, params.data['N_u'], params.data['N_s'], params.data['N_vo'], VO=None, vo_spec=params.data['vo_spec'],
armortized_bs=params.data['armortized_bs'])
trainer.setup_config(**params.trainer)
trainer.setup(scheduler_wrapper=scheduler_wrapper)
assert trainer.datasets['supervised'].get('X').shape[0] == params.data['N_s']
assert trainer.datasets['supervised'].get('Y').shape[0] == params.data['N_s']
if params.data['N_u'] > 0:
if params.data['armortized_bs'] is not None:
assert trainer.datasets['unsupervised'].get('X', params.data['armortized_bs']).shape[0] == params.data['armortized_bs']
assert trainer.datasets['unsupervised'].get('X').shape[0] == params.data['N_u']
else:
assert trainer.datasets['unsupervised'].get('X').shape[0] == params.data['N_u']
return trainer
def CreateDataSetsFromPermutation(identifier, permutation, permutation_u, N_val, N_u_max, N_s_max, N_vo_max, physics, BCE, dtype, device):
df = DataFactory.FromIdentifier(identifier)
dl, dlu = df.setup()
dl.assemble(physics, BCE=BCE)
assert len(dl) == len(permutation)
assert len(dlu) == len(permutation_u)
partition = dict()
partition['supervised'] = N_s_max
if N_vo_max > 0:
partition['vo'] = N_vo_max
partition['validation'] = N_val
dl.randomized_partition(partition, identifier='default', ForceOverwrite=False, permutation=permutation)
datasets = dl.construct_dataset_dictionary(identifier = 'default', dtype=dtype, device=device)
if N_u_max > 0:
partition_aux = dict()
partition_aux['unsupervised'] = N_u_max
dlu.randomized_partition(partition_aux, identifier='default', ForceOverwrite=False, permutation=permutation_u)
datasets_aux = dlu.construct_dataset_dictionary(identifier = 'default', dtype=dtype, device=device)
datasets['unsupervised'] = datasets_aux['unsupervised']
return dl, dlu, datasets
|
{"hexsha": "cd53c7598ac16f9139d44000fd288d304f6644fa", "size": 22506, "ext": "py", "lang": "Python", "max_stars_repo_path": "training.py", "max_stars_repo_name": "bdevl/PGMCPC", "max_stars_repo_head_hexsha": "cac2fe4304ae42ef2a0d94219b4349d51e86ab2d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-10-23T13:40:56.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-10T03:42:52.000Z", "max_issues_repo_path": "training.py", "max_issues_repo_name": "pkmtum/generative-physics-informed-pde", "max_issues_repo_head_hexsha": "63ec383da0f2dbf0d8ffbbb44a670e90d07c132e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "training.py", "max_forks_repo_name": "pkmtum/generative-physics-informed-pde", "max_forks_repo_head_hexsha": "63ec383da0f2dbf0d8ffbbb44a670e90d07c132e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.8945783133, "max_line_length": 177, "alphanum_fraction": 0.6125921976, "include": true, "reason": "import numpy", "num_tokens": 5313}
|
from collections import OrderedDict
import numpy as np
import megengine.functional as F
import megengine.module as M
from megengine import Tensor
from megengine.core._imperative_rt.core2 import apply
from megengine.core.ops import builtin
from megengine.module import Module
from megengine.traced_module import TracedModule, enable_expr_checker, trace_module
from megengine.traced_module.expr import Apply, CallFunction, Constant
class MyModule1(M.Module):
def forward(self, x):
y = Tensor(x)
y += 1
x = x + 2
return x, y
class MyModule2(M.Module):
def forward(self, x):
y = Tensor([1, x, 1])
y += 1
x = x + 2
return x, y
class MyModule3(M.Module):
def __init__(self):
super().__init__()
self.modules = [
M.Elemwise("ADD"),
M.Elemwise("ADD"),
OrderedDict([("a", M.Elemwise("ADD")), ("b", M.Elemwise("ADD"))]),
M.Elemwise("RELU"),
M.Elemwise("RELU"),
]
def forward(self, a, b):
x = self.modules[0](a, b)
y = self.modules[1](a, b)
assert list(self.modules[2].keys()) == ["a", "b"]
for _, m in self.modules[2].items():
y = m(x, y)
for m in self.modules[3:]:
y = m(y)
return y
class MyModule4(M.Module):
def __init__(self):
super().__init__()
self.add = F.add
def forward(self, x, y):
return self.add(x, y)
def test_trace_module():
enable_expr_checker()
x = Tensor(1)
m1 = MyModule1()
tm1 = trace_module(m1, x)
m2 = MyModule2()
tm2 = trace_module(m2, x)
inp = Tensor(2)
gt = m1(inp)
output = tm1(inp)
for a, b in zip(output, gt):
np.testing.assert_equal(a.numpy(), b.numpy())
gt1 = m2(inp)
output1 = tm2(inp)
for a, b in zip(output1, gt1):
np.testing.assert_equal(a.numpy(), b.numpy())
a, b = Tensor(1), Tensor(2)
m3 = MyModule3()
gt = m3(a, b)
tm3 = trace_module(m3, a, b)
out = tm3(a, b)
np.testing.assert_equal(out.numpy(), gt.numpy())
assert isinstance(tm3.modules.__dict__["0"], M.Elemwise)
assert isinstance(tm3.modules.__dict__["2"], TracedModule)
assert isinstance(tm3.modules.__dict__["2"].a, M.Elemwise)
assert isinstance(tm3.modules.__dict__["3"], M.Elemwise)
m4 = MyModule4()
tm4 = trace_module(m4, a, b)
np.testing.assert_equal(tm4(a, b).numpy(), 3)
np.testing.assert_equal(tm4(a, y=b).numpy(), 3)
np.testing.assert_equal(tm4(x=a, y=b).numpy(), 3)
tm4 = trace_module(m4, a, y=b)
np.testing.assert_equal(tm4(a, b).numpy(), 3)
np.testing.assert_equal(tm4(a, y=b).numpy(), 3)
np.testing.assert_equal(tm4(x=a, y=b).numpy(), 3)
tm4 = trace_module(m4, x=a, y=b)
np.testing.assert_equal(tm4(a, b).numpy(), 3)
np.testing.assert_equal(tm4(a, y=b).numpy(), 3)
np.testing.assert_equal(tm4(x=a, y=b).numpy(), 3)
tm5 = trace_module(tm4, a, b)
np.testing.assert_equal(tm5(a, b).numpy(), 3)
np.testing.assert_equal(tm5(a, y=b).numpy(), 3)
np.testing.assert_equal(tm5(x=a, y=b).numpy(), 3)
tm5 = trace_module(tm4, a, y=b)
np.testing.assert_equal(tm5(a, b).numpy(), 3)
np.testing.assert_equal(tm5(a, y=b).numpy(), 3)
np.testing.assert_equal(tm5(x=a, y=b).numpy(), 3)
tm5 = trace_module(tm4, x=a, y=b)
np.testing.assert_equal(tm5(a, b).numpy(), 3)
np.testing.assert_equal(tm5(a, y=b).numpy(), 3)
np.testing.assert_equal(tm5(x=a, y=b).numpy(), 3)
assert len(tm4.graph._exprs) == 1
assert isinstance(tm4.graph._exprs[0], CallFunction)
class MyModule5(Module):
def __init__(self):
super().__init__()
self.m1 = tm4
def forward(self, x, y):
return self.m1(x, y)
tm6 = trace_module(MyModule5(), a, b)
assert tm6.m1.argspec is None
assert tm6.m1._is_top is False
def test_trace_module_2():
class Model(M.Module):
def __init__(self):
super().__init__()
def forward(self, x):
out = x.shape
out = apply(builtin.Elemwise(mode="ADD"), out, Tensor(1))
return out
traced_model = trace_module(Model(), Tensor(([1,])))
assert isinstance(traced_model.graph._exprs[0], Apply) and isinstance(
traced_model.graph._exprs[0].opdef, builtin.GetVarShape
)
assert isinstance(traced_model.graph._exprs[1], Constant)
assert isinstance(traced_model.graph._exprs[2], Apply) and isinstance(
traced_model.graph._exprs[2].opdef, builtin.Elemwise
)
assert int(traced_model(Tensor([1, 2]))[0]) == 3
|
{"hexsha": "43d3f492eb4782364ae0a8cdf347342b5543c20b", "size": 4653, "ext": "py", "lang": "Python", "max_stars_repo_path": "imperative/python/test/unit/traced_module/test_trace_module.py", "max_stars_repo_name": "bealwang/MegEngine", "max_stars_repo_head_hexsha": "df4153dc718b4544e720c58e439a0623c018cee2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-21T03:13:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T03:13:45.000Z", "max_issues_repo_path": "imperative/python/test/unit/traced_module/test_trace_module.py", "max_issues_repo_name": "bealwang/MegEngine", "max_issues_repo_head_hexsha": "df4153dc718b4544e720c58e439a0623c018cee2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "imperative/python/test/unit/traced_module/test_trace_module.py", "max_forks_repo_name": "bealwang/MegEngine", "max_forks_repo_head_hexsha": "df4153dc718b4544e720c58e439a0623c018cee2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.08125, "max_line_length": 83, "alphanum_fraction": 0.6036965399, "include": true, "reason": "import numpy", "num_tokens": 1356}
|
include("../src/QPnorm.jl")
include("../examples/subproblems.jl")
using Main.QPnorm
using Random, Test
function optimality_metrics(P, q, A, b, r_min, r_max, x, λ)
m, n = size(A)
f = dot(x, P*x)/2 + dot(x, q)
grad_residual = norm(P*x + q + A'*λ[1:end-1] + λ[end]*x, Inf)
infeasibility = max(maximum(A*x - b), norm(x)^2 - r_max^2, r_min^2 - norm(x)^2, 0)
# Find active r, if any
if abs(norm(x) - r_min) < abs(norm(x) - r_max)
r = r_min
else
r = r_max
end
complementarity = maximum(minimum([abs.(λ) abs.([A*x - b; norm(x)^2 - r^2])], dims=2))
dual_infeasibility = max(-minimum(λ), 0.0)
active_set = λ .>= 1e-8
A_active = [A; x'][active_set, :]
if length(A_active) > 0
V = nullspace([A; x'][active_set, :])
else
V = diagm(0 => ones(n))
end
if length(V) > 0
min_eig = minimum(eigvals(Symmetric(V'*(P + I*λ[end])*V)))
else
min_eig = 0.0
end
return f, grad_residual, infeasibility, dual_infeasibility, complementarity, min_eig
end
rng = MersenneTwister(123)
tol = 1e-7
@testset "Optimality Conditions of a random problem" begin
n = 100
m = 200
P = randn(rng, n, n); P = (P + P')/2;
q = randn(rng, n)
A = randn(rng, m, n)
b = randn(rng, m)
r_min = 10.0; r_max = 20.0;
x_init = find_feasible_point(A, b, r_min, r_max)
x, λ = Main.QPnorm.solve(P, q, A, b, x_init, r_min=r_min, r_max=r_max, printing_interval=100)
f, grad, inf, dinf, compl, min_eig = optimality_metrics(P, q, A, b, r_min, r_max, x, λ)
@test grad < tol
@test inf < tol
@test dinf < tol
@test compl < tol
@test -min_eig < tol
end
|
{"hexsha": "5b0669c2647172c63d75f9af23a8f79f4db960ed", "size": 1675, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/test_solver.jl", "max_stars_repo_name": "oxfordcontrol/QPnorm.jl", "max_stars_repo_head_hexsha": "746eaed3901b47622c0337cde615e82321707eba", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-05-02T20:25:45.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-04T05:18:47.000Z", "max_issues_repo_path": "test/test_solver.jl", "max_issues_repo_name": "oxfordcontrol/QPnorm.jl", "max_issues_repo_head_hexsha": "746eaed3901b47622c0337cde615e82321707eba", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-07-31T20:50:17.000Z", "max_issues_repo_issues_event_max_datetime": "2020-08-03T02:58:07.000Z", "max_forks_repo_path": "test/test_solver.jl", "max_forks_repo_name": "oxfordcontrol/QPnorm.jl", "max_forks_repo_head_hexsha": "746eaed3901b47622c0337cde615e82321707eba", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.8793103448, "max_line_length": 97, "alphanum_fraction": 0.5791044776, "num_tokens": 618}
|
from numpy import exp, array, random, dot
class NeuronLayer():
def __init__(self, number_of_neurons, number_of_inputs_per_neuron):
self.synaptic_weights = 2 * random.random((number_of_inputs_per_neuron, number_of_neurons)) - 1
class NeuralNetwork():
def __init__(self, neural_layers):
self.neural_layers = neural_layers
self.neural_layers_output = []
# The Sigmoid function, which describes an S shaped curve.
# We pass the weighted sum of the inputs through this function to
# normalise them between 0 and 1.
def __sigmoid(self, x):
return 1 / (1 + exp(-x))
# The derivative of the Sigmoid function.
# This is the gradient of the Sigmoid curve.
# It indicates how confident we are about the existing weight.
def __sigmoid_derivative(self, x):
return x * (1 - x)
# We train the neural network through a process of trial and error.
# Adjusting the synaptic weights each time.
def train(self, training_set_inputs, training_set_outputs, number_of_training_iterations):
for iteration in range(number_of_training_iterations):
self.think(training_set_inputs, True)
neural_layer_length = len(self.neural_layers)
layer_error = layer_delta = layer_adjustment = None
index = neural_layer_length - 1
while index > -1:
layer = self.neural_layers[index]
if index == 0:
layer_error = layer_delta.dot(self.neural_layers[index+1].synaptic_weights.T)
else:
layer_error = training_set_outputs - self.neural_layers_output[index]
layer_delta = layer_error * self.__sigmoid_derivative(self.neural_layers_output[index])
if index == 0:
layer_adjustment = training_set_inputs.T.dot(layer_delta)
else:
layer_adjustment = self.neural_layers_output[index-1].T.dot(layer_delta)
self.neural_layers[index].synaptic_weights += layer_adjustment
index -= 1
# The neural network thinks.
def think(self, training_set_inputs, training=False):
outputs = []
for i, layer in enumerate(self.neural_layers):
output_from_layer = self.__sigmoid(dot(training_set_inputs, self.neural_layers[i].synaptic_weights))
outputs.append(output_from_layer)
training_set_inputs = output_from_layer
if training:
self.neural_layers_output = outputs
else:
return outputs
# The neural network prints its weights
def print_weights(self):
for i, layer in enumerate(self.neural_layers):
print(" Layer %s: " % (i))
print(layer.synaptic_weights)
print("=====================================")
if __name__ == "__main__":
#Seed the random number generator
random.seed(1)
neural_layers = [
NeuronLayer(4, 3), # Create layer Layer=1 (4 neurons, each with 3 inputs)
#NeuronLayer(9, 4), # Create layer Output-4 (9 neurons, each with 3 inputs)
#NeuronLayer(7, 9), # Create layer Output-3 (7 neurons, each with 9 inputs)
#NeuronLayer(4, 7), # Create layer Output-2 (4 neurons, each with 7 inputs)
NeuronLayer(3, 4), # Create layer Output-1 (3 neurons, each with 4 inputs)
#NeuronLayer(1, 3) # Create layer Output (a single neuron with 3 inputs)
]
# Combine the layers to create a neural network
neural_network = NeuralNetwork(neural_layers)
print("Stage 1) Random starting synaptic weights: ")
neural_network.print_weights()
# The training set. We have 7 examples, each consisting of 3 input values
# and 1 output value.
training_set_inputs = array([
[0, 0, 1], [0, 1, 1], [1, 0, 1], [0, 1, 0], [1, 0, 0],
[1, 1, 1], [0, 0, 1], [0, 1, 1], [1, 0, 1], [0, 1, 0],
[1, 1, 1], [0, 0, 1], [0, 1, 1], [1, 0, 1], [0, 1, 0],
[1, 0, 0], [1, 1, 1], [0, 0, 0]
])
training_set_outputs = array([
[0, 1, 1, 1, 1,
0, 0, 1, 1, 1,
0, 0, 1, 1, 1,
1, 0, 0]
]).T
# Train the neural network using the training set.
# Do it 60,000 times and make small adjustments each time.
neural_network.train(training_set_inputs, training_set_outputs, 60000)
print("Stage 2) New synaptic weights after training: ")
neural_network.print_weights()
# Test the neural network with a new situation.
print("Stage 3) Considering a new situation [1, 1, 0] -> ?: ")
outputs = neural_network.think(array([1, 1, 0]))
print(outputs[-1])
# Test the neural network with a new situation.
print("Stage 4) Considering a new situation [1, 0, 0] -> ?: ")
outputs = neural_network.think(array([1, 0, 0]))
print(outputs[-1])
|
{"hexsha": "3a489554b2dbcd153d7bbca7b3912d6fc109646b", "size": 4859, "ext": "py", "lang": "Python", "max_stars_repo_path": "talks-articles/machine-learning/toolbox/numpy/multi-layer-neural-network.py", "max_stars_repo_name": "abhishekkr/tutorials_as_code", "max_stars_repo_head_hexsha": "f355dc62a5025b710ac6d4a6ac2f9610265fad54", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 37, "max_stars_repo_stars_event_min_datetime": "2015-02-01T23:16:39.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-22T16:50:48.000Z", "max_issues_repo_path": "talks-articles/machine-learning/toolbox/numpy/multi-layer-neural-network.py", "max_issues_repo_name": "abhishekkr/tutorials_as_code", "max_issues_repo_head_hexsha": "f355dc62a5025b710ac6d4a6ac2f9610265fad54", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2017-03-02T04:55:48.000Z", "max_issues_repo_issues_event_max_datetime": "2018-01-14T10:51:11.000Z", "max_forks_repo_path": "talks-articles/machine-learning/toolbox/numpy/multi-layer-neural-network.py", "max_forks_repo_name": "abhishekkr/tutorials_as_code", "max_forks_repo_head_hexsha": "f355dc62a5025b710ac6d4a6ac2f9610265fad54", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 15, "max_forks_repo_forks_event_min_datetime": "2015-03-02T08:09:01.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-10T03:25:41.000Z", "avg_line_length": 41.1779661017, "max_line_length": 112, "alphanum_fraction": 0.6221444742, "include": true, "reason": "from numpy", "num_tokens": 1270}
|
[STATEMENT]
lemma b_least2_less_impl_eq: "b_least2 f x y < y \<Longrightarrow> (b_least2 f x y) = (Least (%z. (f x z) \<noteq> 0))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. b_least2 f x y < y \<Longrightarrow> b_least2 f x y = (LEAST z. f x z \<noteq> 0)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. b_least2 f x y < y \<Longrightarrow> b_least2 f x y = (LEAST z. f x z \<noteq> 0)
[PROOF STEP]
assume A1: "b_least2 f x y < y" (is "?b < _")
[PROOF STATE]
proof (state)
this:
b_least2 f x y < y
goal (1 subgoal):
1. b_least2 f x y < y \<Longrightarrow> b_least2 f x y = (LEAST z. f x z \<noteq> 0)
[PROOF STEP]
let ?B = "(Least (%z. (f x z) \<noteq> 0))"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. b_least2 f x y < y \<Longrightarrow> b_least2 f x y = (LEAST z. f x z \<noteq> 0)
[PROOF STEP]
from A1
[PROOF STATE]
proof (chain)
picking this:
b_least2 f x y < y
[PROOF STEP]
have S1: "f x ?b \<noteq> 0"
[PROOF STATE]
proof (prove)
using this:
b_least2 f x y < y
goal (1 subgoal):
1. f x (b_least2 f x y) \<noteq> 0
[PROOF STEP]
by (rule b_least2_less_impl_nz)
[PROOF STATE]
proof (state)
this:
f x (b_least2 f x y) \<noteq> 0
goal (1 subgoal):
1. b_least2 f x y < y \<Longrightarrow> b_least2 f x y = (LEAST z. f x z \<noteq> 0)
[PROOF STEP]
from S1
[PROOF STATE]
proof (chain)
picking this:
f x (b_least2 f x y) \<noteq> 0
[PROOF STEP]
have S2: "?B \<le> ?b"
[PROOF STATE]
proof (prove)
using this:
f x (b_least2 f x y) \<noteq> 0
goal (1 subgoal):
1. (LEAST z. f x z \<noteq> 0) \<le> b_least2 f x y
[PROOF STEP]
by (rule Least_le)
[PROOF STATE]
proof (state)
this:
(LEAST z. f x z \<noteq> 0) \<le> b_least2 f x y
goal (1 subgoal):
1. b_least2 f x y < y \<Longrightarrow> b_least2 f x y = (LEAST z. f x z \<noteq> 0)
[PROOF STEP]
from S1
[PROOF STATE]
proof (chain)
picking this:
f x (b_least2 f x y) \<noteq> 0
[PROOF STEP]
have S3: "f x ?B \<noteq> 0"
[PROOF STATE]
proof (prove)
using this:
f x (b_least2 f x y) \<noteq> 0
goal (1 subgoal):
1. f x (LEAST z. f x z \<noteq> 0) \<noteq> 0
[PROOF STEP]
by (rule LeastI)
[PROOF STATE]
proof (state)
this:
f x (LEAST z. f x z \<noteq> 0) \<noteq> 0
goal (1 subgoal):
1. b_least2 f x y < y \<Longrightarrow> b_least2 f x y = (LEAST z. f x z \<noteq> 0)
[PROOF STEP]
from S3
[PROOF STATE]
proof (chain)
picking this:
f x (LEAST z. f x z \<noteq> 0) \<noteq> 0
[PROOF STEP]
have S4: "?b \<le> ?B"
[PROOF STATE]
proof (prove)
using this:
f x (LEAST z. f x z \<noteq> 0) \<noteq> 0
goal (1 subgoal):
1. b_least2 f x y \<le> (LEAST z. f x z \<noteq> 0)
[PROOF STEP]
by (rule nz_impl_b_least2_le)
[PROOF STATE]
proof (state)
this:
b_least2 f x y \<le> (LEAST z. f x z \<noteq> 0)
goal (1 subgoal):
1. b_least2 f x y < y \<Longrightarrow> b_least2 f x y = (LEAST z. f x z \<noteq> 0)
[PROOF STEP]
from S2 S4
[PROOF STATE]
proof (chain)
picking this:
(LEAST z. f x z \<noteq> 0) \<le> b_least2 f x y
b_least2 f x y \<le> (LEAST z. f x z \<noteq> 0)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
(LEAST z. f x z \<noteq> 0) \<le> b_least2 f x y
b_least2 f x y \<le> (LEAST z. f x z \<noteq> 0)
goal (1 subgoal):
1. b_least2 f x y = (LEAST z. f x z \<noteq> 0)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
b_least2 f x y = (LEAST z. f x z \<noteq> 0)
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1675, "file": "Recursion-Theory-I_PRecFun", "length": 19}
|
C File: test_module_11.f
C Purpose: Illustrates the use of multiple Fortran modules to define values
C for several variables that are used in the program. This program
C differs from test_module_03.f in having modules that have "USE MODULE"s
C inside them , as well as some variables that are declared to be PRIVATE.
C
C Compile and run this program as follows:
C
C gfortran -c test_module_11.f # << this creates the files "mymod11a.mod"
C "mymod11b.mod"
C "mymod11c.mod"
C gfortran test_module_11.f # << this creates a file "a.out"
C
C The output generated by this program is (see test_module_11-OUT.txt):
C
C mymod11B: X = 123; Y = 234; Z = 345; U = 567; V = 678
C mymod11C: X = 123; Y = 234; Z = 12.345; U = 567; V = 678
C pgm main: X = 123; Y = 98.765; Z = 12.345; U = 87.654; V = 678
!-------------------------------------------------------------------------------
MODULE MYMOD11A
IMPLICIT NONE
INTEGER :: X = 123, Y = 234, Z = 345
END MODULE mymod11a
!-------------------------------------------------------------------------------
MODULE MYMOD11B
USE MYMOD11A ! << Note that "use mymod11a" is inside mymod11b
private :: Z ! << hides Z imported from mymod11a
INTEGER :: U = 567, V = 678
contains
subroutine print_mymod11b()
10 format ('mymod11B: X = ', I3, '; Y = ', I3, '; Z = ', I3,
& '; U = ', I3, '; V = ', I3)
write (*,10) X, Y, Z, U, V
end subroutine print_mymod11b
END MODULE mymod11b
!-------------------------------------------------------------------------------
MODULE MYMOD11C
USE mymod11B ! << Note that "use mymod11b" is inside mymod11c
PRIVATE :: Y, U ! << hides Y from mymod11a, U from mymod11b
REAL :: Z = 12.345 ! << reusing Z
contains
subroutine print_mymod11c()
call print_mymod11b()
11 FORMAT('mymod11C: X = ', I3, '; Y = ', I3,'; Z = ', F8.3,
& '; U = ', I3, '; V = ',I3)
WRITE (*,11) X, Y, Z, U, V
end subroutine print_mymod11c
END MODULE mymod11C
!-------------------------------------------------------------------------------
PROGRAM PGM
USE mymod11c
REAL :: Y = 98.765, U = 87.654 ! << reusing Y, U
call print_mymod11c()
10 FORMAT('pgm main: X = ', I3, '; Y = ', F7.3,'; Z = ', F7.3,
& '; U = ', F7.3, '; V = ',I3)
write (*,10) X, Y, Z, U, V
stop
end program PGM
!-------------------------------------------------------------------------------
|
{"hexsha": "cd3d130a57bde8cbb1fb55d9c4b0b0b3bb1473de", "size": 2771, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "tests/data/program_analysis/modules/test_module_11.f", "max_stars_repo_name": "mikiec84/delphi", "max_stars_repo_head_hexsha": "2e517f21e76e334c7dfb14325d25879ddf26d10d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 25, "max_stars_repo_stars_event_min_datetime": "2018-03-03T11:57:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-16T21:19:54.000Z", "max_issues_repo_path": "tests/data/program_analysis/modules/test_module_11.f", "max_issues_repo_name": "mikiec84/delphi", "max_issues_repo_head_hexsha": "2e517f21e76e334c7dfb14325d25879ddf26d10d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 385, "max_issues_repo_issues_event_min_datetime": "2018-02-21T16:52:06.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-17T07:44:56.000Z", "max_forks_repo_path": "tests/data/program_analysis/modules/test_module_11.f", "max_forks_repo_name": "mikiec84/delphi", "max_forks_repo_head_hexsha": "2e517f21e76e334c7dfb14325d25879ddf26d10d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 19, "max_forks_repo_forks_event_min_datetime": "2018-03-20T01:08:11.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-29T01:04:49.000Z", "avg_line_length": 37.9589041096, "max_line_length": 80, "alphanum_fraction": 0.4471309996, "num_tokens": 792}
|
using Clang
GAGE_INCLUDE = raw"C:\Program Files (x86)\Gage\CompuScope\include"
clang_includes = String[]
push!(clang_includes, GAGE_INCLUDE)
push!(clang_includes, raw"C:\Program Files\LLVM\include\clang-c",raw"C:\Program Files\LLVM\include\llvm-c" )
clang_extraargs = ["-v"]
clang_extraargs = ["-D", "__STDC_CONSTANT_MACROS","-D", "__STDC_LIMIT_MACROS"]
function wrap_header(top_hdr, cursor_hdr)
return true#startswith(dirname(cursor_hdr), GAGE_INCLUDE)
end
function lib_file(hdr)
if startswith(hdr, "CsAppSupport")
return :CsSsm
else
return :CsSsm
end
end
output_file(hdr) = "Gage.jl"
const wc = wrap_c.init(;
headers = [joinpath(GAGE_INCLUDE,"CsAppSupport.h")],
output_file= "gage_headers.jl",
common_file = "cs_common.jl",
clang_includes = clang_includes,
clang_args = clang_extraargs,
header_wrapped = wrap_header,
header_library = lib_file,
header_outputfile = output_file)
run(wc)
# @show Clang.wrap_c.debug_cursors
|
{"hexsha": "b2cb3670ab17ada559cbc64525999e42315e8e36", "size": 966, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "gage_clang_gen.jl", "max_stars_repo_name": "jarrison/Gage.jl", "max_stars_repo_head_hexsha": "b8f05666721210e272aea9737136066a59c4d65d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "gage_clang_gen.jl", "max_issues_repo_name": "jarrison/Gage.jl", "max_issues_repo_head_hexsha": "b8f05666721210e272aea9737136066a59c4d65d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gage_clang_gen.jl", "max_forks_repo_name": "jarrison/Gage.jl", "max_forks_repo_head_hexsha": "b8f05666721210e272aea9737136066a59c4d65d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.8333333333, "max_line_length": 108, "alphanum_fraction": 0.7453416149, "num_tokens": 274}
|
/*** Copyright (c), The Regents of the University of California ***
*** For more information please refer to files in the COPYRIGHT directory ***/
/*
ICAT test program.
*/
#include "rodsClient.h"
#include "parseCommandLine.h"
#include "readServerConfig.hpp"
#include "irods_server_properties.hpp"
#include "checksum.hpp"
#include "rodsUser.h"
#include "icatHighLevelRoutines.hpp"
//#include "icatMidLevelRoutines.hpp"
#include <string.h>
#include <string>
#include <boost/lexical_cast.hpp>
#include <limits>
extern icatSessionStruct *chlGetRcs();
/*
int testCml(rsComm_t *rsComm)
{
return cmlTest(rsComm);
}
*/
int testRegRule( rsComm_t *rsComm, char *name ) {
ruleExecSubmitInp_t ruleInfo;
memset( &ruleInfo, 0, sizeof( ruleInfo ) );
snprintf( ruleInfo.ruleName, sizeof( ruleInfo.ruleName ), "%s", name );
snprintf( ruleInfo.reiFilePath, sizeof( ruleInfo.reiFilePath ), "%s", "../config/packedRei/rei.file1" );
snprintf( ruleInfo.userName, sizeof( ruleInfo.userName ), "%s", "Wayne" );
snprintf( ruleInfo.exeAddress, sizeof( ruleInfo.exeAddress ), "%s", "Bermuda" );
snprintf( ruleInfo.exeTime, sizeof( ruleInfo.exeTime ), "%s", "whenEver" );
snprintf( ruleInfo.exeFrequency, sizeof( ruleInfo.exeFrequency ), "%s", "every 2 days" );
snprintf( ruleInfo.priority, sizeof( ruleInfo.priority ), "%s", "high" );
snprintf( ruleInfo.estimateExeTime, sizeof( ruleInfo.estimateExeTime ), "%s", "2 hours" );
snprintf( ruleInfo.notificationAddr, sizeof( ruleInfo.notificationAddr ), "%s", "noone@nowhere.com" );
return chlRegRuleExec( rsComm, &ruleInfo );
}
int testRename( rsComm_t *rsComm, char *id, char *newName ) {
rodsLong_t intId;
int status;
rsComm->clientUser.authInfo.authFlag = LOCAL_PRIV_USER_AUTH;
rsComm->proxyUser.authInfo.authFlag = LOCAL_PRIV_USER_AUTH;
intId = strtoll( id, 0, 0 );
status = chlRenameObject( rsComm, intId, newName );
if ( status ) {
return status;
}
return chlCommit( rsComm );
}
int testLogin( char *User, char *pw, char *pw1, rodsEnv& myEnv ) {
int status;
rcComm_t *Conn;
rErrMsg_t errMsg;
Conn = rcConnect( myEnv.rodsHost, myEnv.rodsPort, myEnv.rodsUserName,
myEnv.rodsZone, 0, &errMsg );
if ( Conn == NULL ) {
printf( "rcConnect failure" );
return -1;
}
status = clientLoginWithPassword( Conn, pw1 ); /* first login as self */
if ( status == 0 ) {
rstrcpy( Conn->clientUser.userName, User,
sizeof Conn->clientUser.userName );
rstrcpy( Conn->clientUser.rodsZone, myEnv.rodsZone,
sizeof Conn->clientUser.rodsZone ); /* default to our zone */
status = clientLoginWithPassword( Conn, pw ); /* then try other user */
}
rcDisconnect( Conn );
return status;
}
int testMove( rsComm_t *rsComm, char *id, char *destId ) {
rodsLong_t intId, intDestId;
int status;
rsComm->clientUser.authInfo.authFlag = LOCAL_PRIV_USER_AUTH;
rsComm->proxyUser.authInfo.authFlag = LOCAL_PRIV_USER_AUTH;
intId = strtoll( id, 0, 0 );
intDestId = strtoll( destId, 0, 0 );
status = chlMoveObject( rsComm, intId, intDestId );
if ( status ) {
return status;
}
return chlCommit( rsComm );
}
int testTempPw( rsComm_t *rsComm ) {
int status;
char pwValueToHash[500];
status = chlMakeTempPw( rsComm, pwValueToHash, "" );
printf( "pwValueToHash: %s\n", pwValueToHash );
return status;
}
int testTempPwConvert( char *s1, char *s2 ) {
char md5Buf[100];
unsigned char digest[RESPONSE_LEN + 2];
char digestStr[100];
/*
Calcuate the temp password: a hash of s1 (the user's main
password) and s2 (the value returned by chlGenTempPw).
*/
memset( md5Buf, 0, sizeof( md5Buf ) );
snprintf( md5Buf, sizeof( md5Buf ), "%s%s", s2, s1 );
obfMakeOneWayHash( HASH_TYPE_DEFAULT, ( unsigned char* )md5Buf, sizeof md5Buf,
digest );
hashToStr( digest, digestStr );
printf( "digestStr (derived temp pw)=%s\n", digestStr );
return 0;
}
int
testGetLocalZone( char *expectedZone ) {
std::string zone;
chlGetLocalZone( zone );
printf( "Zone is %s\n", zone.c_str() );
if ( zone != expectedZone ) {
return -1;
}
return 0;
}
int
testGetPamPw( rsComm_t *rsComm, char *username, char *testTime ) {
char *irodsPamPassword;
irodsPamPassword = ( char* )malloc( 100 );
memset( irodsPamPassword, 0, 100 );
int status = chlUpdateIrodsPamPassword( rsComm, username, 0, testTime,
&irodsPamPassword );
if ( status == 0 ) {
printf( "status=%d pw=%s \n", status, irodsPamPassword );
}
else {
printf( "status=%d\n", status );
}
return 0;
}
int testTempPwCombined( rsComm_t *rsComm, char *s1 ) {
int status;
char pwValueToHash[500];
char md5Buf[100];
unsigned char digest[RESPONSE_LEN + 2];
char digestStr[100];
status = chlMakeTempPw( rsComm, pwValueToHash, "" );
if ( status ) {
return status;
}
printf( "pwValueToHash: %s\n", pwValueToHash );
/*
Calcuate the temp password: a hash of s1 (the user's main
password) and the value returned by chlGenTempPw.
*/
memset( md5Buf, 0, sizeof( md5Buf ) );
snprintf( md5Buf, sizeof( md5Buf ), "%s%s", pwValueToHash, s1 );
obfMakeOneWayHash( HASH_TYPE_DEFAULT, ( unsigned char* )md5Buf, sizeof md5Buf,
digest );
hashToStr( digest, digestStr );
printf( "digestStr (derived temp pw)=%s\n", digestStr );
return 0;
}
int testTempPwForOther( rsComm_t *rsComm, char *s1, char *otherUser ) {
int status;
char pwValueToHash[500];
char md5Buf[100];
unsigned char digest[RESPONSE_LEN + 2];
char digestStr[100];
rsComm->clientUser.authInfo.authFlag = LOCAL_PRIV_USER_AUTH;
rsComm->proxyUser.authInfo.authFlag = LOCAL_PRIV_USER_AUTH;
status = chlMakeTempPw( rsComm, pwValueToHash, otherUser );
if ( status ) {
return status;
}
printf( "pwValueToHash: %s\n", pwValueToHash );
/*
Calcuate the temp password: a hash of s1 (the user's main
password) and the value returned by chlGenTempPw.
*/
memset( md5Buf, 0, sizeof( md5Buf ) );
snprintf( md5Buf, sizeof( md5Buf ), "%s%s", pwValueToHash, s1 );
obfMakeOneWayHash( HASH_TYPE_DEFAULT, ( unsigned char* )md5Buf, sizeof md5Buf,
digest );
hashToStr( digest, digestStr );
printf( "digestStr (derived temp pw)=%s\n", digestStr );
return 0;
}
int testCheckAuth( rsComm_t *rsComm, char *testAdminUser, char *testUser,
char *testUserZone ) {
/* Use an pre-determined user, challenge and resp */
char response[RESPONSE_LEN + 2];
char challenge[CHALLENGE_LEN + 2];
int userPrivLevel;
int clientPrivLevel;
int status, i;
char userNameAndZone[NAME_LEN * 2];
snprintf( rsComm->clientUser.userName, sizeof( rsComm->clientUser.userName ), "%s", testUser );
snprintf( rsComm->clientUser.rodsZone, sizeof( rsComm->clientUser.rodsZone ), "%s", testUserZone );
for ( i = 0; i < CHALLENGE_LEN + 2; i++ ) {
challenge[i] = ' ';
}
i = 0;
response[i++] = 0xd6; /* found to be a valid response */
response[i++] = 0x8a;
response[i++] = 0xaf;
response[i++] = 0xc4;
response[i++] = 0x83;
response[i++] = 0x46;
response[i++] = 0x1b;
response[i++] = 0xa2;
response[i++] = 0x5c;
response[i++] = 0x8c;
response[i++] = 0x6d;
response[i++] = 0xc5;
response[i++] = 0xb1;
response[i++] = 0x41;
response[i++] = 0x84;
response[i++] = 0xeb;
response[i++] = 0x00;
strncpy( userNameAndZone, testAdminUser, sizeof userNameAndZone );
userNameAndZone[ sizeof( userNameAndZone ) - 1 ] = '\0'; // JMC cppcheck - dangerous use of strncpy
strncat( userNameAndZone, "#", sizeof userNameAndZone - strlen( userNameAndZone ) );
strncat( userNameAndZone, testUserZone, sizeof userNameAndZone - strlen( userNameAndZone ) );
status = chlCheckAuth( rsComm, 0, challenge, response,
userNameAndZone,
&userPrivLevel, &clientPrivLevel );
if ( status == 0 ) {
printf( "clientPrivLevel=%d\n", clientPrivLevel );
}
return status;
}
int testDelFile( rsComm_t *rsComm, char *name, char *replica ) {
dataObjInfo_t dataObjInfo;
keyValPair_t *condInput;
memset( &dataObjInfo, 0, sizeof( dataObjInfo ) );
if ( replica != NULL && *replica != 0 ) {
int ireplica;
ireplica = atoi( replica );
if ( ireplica >= 0 ) {
dataObjInfo.replNum = ireplica;
}
if ( ireplica == 999999 ) {
dataObjInfo.replNum = -1;
}
}
snprintf( dataObjInfo.objPath, sizeof( dataObjInfo.objPath ), "%s", name );
memset( &condInput, 0, sizeof( condInput ) );
return chlUnregDataObj( rsComm, &dataObjInfo, condInput );
}
int testDelFilePriv( rsComm_t *rsComm, char *name, char *dataId,
char *replica ) {
dataObjInfo_t dataObjInfo;
keyValPair_t condInput;
rsComm->clientUser.authInfo.authFlag = LOCAL_PRIV_USER_AUTH;
memset( &condInput, 0, sizeof( condInput ) );
addKeyVal( &condInput, ADMIN_KW, " " );
memset( &dataObjInfo, 0, sizeof( dataObjInfo ) );
if ( dataId != NULL && *dataId != 0 ) {
rodsLong_t idataId;
idataId = strtoll( dataId, NULL, 0 );
if ( idataId >= 0 ) {
dataObjInfo.dataId = idataId;
}
}
dataObjInfo.replNum = -1;
if ( replica != NULL && *replica != 0 ) {
int ireplica;
ireplica = atoi( replica );
if ( ireplica >= 0 ) {
dataObjInfo.replNum = ireplica;
}
}
snprintf( dataObjInfo.objPath, sizeof( dataObjInfo.objPath ), "%s", name );
return chlUnregDataObj( rsComm, &dataObjInfo, &condInput );
}
int testDelFileTrash( rsComm_t *rsComm, char *name, char *dataId ) {
dataObjInfo_t dataObjInfo;
keyValPair_t condInput;
rsComm->clientUser.authInfo.authFlag = LOCAL_PRIV_USER_AUTH;
memset( &condInput, 0, sizeof( condInput ) );
addKeyVal( &condInput, ADMIN_RMTRASH_KW, " " );
memset( &dataObjInfo, 0, sizeof( dataObjInfo ) );
if ( dataId != NULL && *dataId != 0 ) {
rodsLong_t idataId;
idataId = strtoll( dataId, NULL, 0 );
if ( idataId >= 0 ) {
dataObjInfo.dataId = idataId;
}
}
dataObjInfo.replNum = -1;
snprintf( dataObjInfo.objPath, sizeof( dataObjInfo.objPath ), "%s", name );
return chlUnregDataObj( rsComm, &dataObjInfo, &condInput );
}
int testRegColl( rsComm_t *rsComm, char *name ) {
collInfo_t collInp;
snprintf( collInp.collName, sizeof( collInp.collName ), "%s", name );
return chlRegColl( rsComm, &collInp );
}
int testDelColl( rsComm_t *rsComm, char *name ) {
collInfo_t collInp;
snprintf( collInp.collName, sizeof( collInp.collName ), "%s", name );
return chlDelColl( rsComm, &collInp );
}
int testDelRule( rsComm_t *rsComm, char *ruleName, char *userName ) {
if ( userName != NULL && strlen( userName ) > 0 ) {
rsComm->clientUser.authInfo.authFlag = LOCAL_USER_AUTH;
rsComm->proxyUser.authInfo.authFlag = LOCAL_USER_AUTH;
snprintf( rsComm->clientUser.userName, sizeof( rsComm->clientUser.userName ),
"%s", userName );
}
else {
rsComm->clientUser.authInfo.authFlag = LOCAL_PRIV_USER_AUTH;
rsComm->proxyUser.authInfo.authFlag = LOCAL_PRIV_USER_AUTH;
}
return chlDelRuleExec( rsComm, ruleName );
}
int testRegDataObj( rsComm_t *rsComm, char *name,
char *dataType, char *filePath ) {
dataObjInfo_t dataObjInfo;
memset( &dataObjInfo, 0, sizeof( dataObjInfo_t ) );
snprintf( dataObjInfo.objPath, sizeof( dataObjInfo.objPath ), "%s", name );
dataObjInfo.replNum = 1;
snprintf( dataObjInfo.version, sizeof( dataObjInfo.version ), "%s", "12" );
snprintf( dataObjInfo.dataType, sizeof( dataObjInfo.dataType ), "%s", dataType );
dataObjInfo.dataSize = 42;
snprintf( dataObjInfo.rescName, sizeof( dataObjInfo.rescName ), "%s", "demoResc" );
snprintf( dataObjInfo.filePath, sizeof( dataObjInfo.filePath ), "%s", filePath );
dataObjInfo.replStatus = 5;
return chlRegDataObj( rsComm, &dataObjInfo );
}
/*
Do multiple data registrations. If you comment out the commit in
chlRegDataObj and then build this, it can add phony data-objects at
about 8 times the speed of lots of iput's of small files. This can
come in handy for creating simulated large instances for DBMS
performance testing and tuning. In this source file, you might also
want to change rodsLogLevel(LOG_NOTICE) to rodsLogLevel(LOG_ERROR)
and comment out rodsLogSqlReq(1);.
name is the objPath (collection/dataObj)
objPath = /newZone/home/rods/ws/f3"
filePath is the physical path
filePath = "/home/schroeder/iRODS/Vault/home/rods/ws/f3"
Example:
bin/test_chl regmulti 1000 /newZone/home/rods/ws2/f1 generic /tmp/vault/f1
*/
int testRegDataMulti( rsComm_t *rsComm, char *count,
char *nameBase, char *dataType, char *filePath ) {
try {
const int myCount = boost::lexical_cast<int>( count );
if ( myCount <= 0 || myCount > std::numeric_limits<int>::max() ) {
printf( "Invalid input: count\n" );
return USER_INPUT_OPTION_ERR;
}
for ( int i = 0; i < myCount; i++ ) {
char myName[MAX_NAME_LEN];
snprintf( myName, sizeof myName, "%s.%d", nameBase, i );
int status = testRegDataObj( rsComm, myName, dataType, filePath );
if ( status ) {
return status;
}
}
return chlCommit( rsComm );
}
catch ( ... ) {
printf( "Invalid input: count\n" );
return USER_INPUT_OPTION_ERR;
}
}
int testModDataObjMeta( rsComm_t *rsComm, char *name,
char *dataType, char *filePath ) {
dataObjInfo_t dataObjInfo;
int status;
keyValPair_t regParam;
char tmpStr[LONG_NAME_LEN], tmpStr2[LONG_NAME_LEN];
/* int replStatus; */
memset( &dataObjInfo, 0, sizeof( dataObjInfo_t ) );
memset( ®Param, 0, sizeof( regParam ) );
/*
replStatus=1;
snprintf (tmpStr, LONG_NAME_LEN, "%d", replStatus);
addKeyVal (®Param, "replStatus", tmpStr);
*/
snprintf( tmpStr, sizeof tmpStr, "fake timestamp" );
addKeyVal( ®Param, "dataCreate", tmpStr );
snprintf( tmpStr2, sizeof tmpStr2, "test comment" );
addKeyVal( ®Param, "dataComments", tmpStr2 );
snprintf( dataObjInfo.objPath, sizeof( dataObjInfo.objPath ), "%s", name );
/* dataObjInfo.replNum=1; */
dataObjInfo.replNum = 0;
snprintf( dataObjInfo.version, sizeof( dataObjInfo.version ), "%s", "12" );
snprintf( dataObjInfo.dataType, sizeof( dataObjInfo.dataType ), "%s", dataType );
dataObjInfo.dataSize = 42;
snprintf( dataObjInfo.rescName, sizeof( dataObjInfo.rescName ), "%s", "resc A" );
snprintf( dataObjInfo.filePath, sizeof( dataObjInfo.filePath ), "%s", filePath );
dataObjInfo.replStatus = 5;
status = chlModDataObjMeta( rsComm, &dataObjInfo, ®Param );
return status;
}
int testModDataObjMeta2( rsComm_t *rsComm, char *name,
char *dataType, char *filePath ) {
dataObjInfo_t dataObjInfo;
int status;
keyValPair_t regParam;
char tmpStr[LONG_NAME_LEN], tmpStr2[LONG_NAME_LEN];
memset( &dataObjInfo, 0, sizeof( dataObjInfo_t ) );
memset( ®Param, 0, sizeof( regParam ) );
snprintf( tmpStr, sizeof tmpStr, "whatever" );
addKeyVal( ®Param, "all", tmpStr );
snprintf( tmpStr2, sizeof tmpStr2, "42" );
addKeyVal( ®Param, "dataSize", tmpStr2 );
snprintf( dataObjInfo.objPath, sizeof( dataObjInfo.objPath ), "%s", name );
dataObjInfo.replNum = 0;
snprintf( dataObjInfo.version, sizeof( dataObjInfo.version ), "%s", "12" );
snprintf( dataObjInfo.dataType, sizeof( dataObjInfo.dataType ), "%s", dataType );
dataObjInfo.dataSize = 42;
snprintf( dataObjInfo.rescName, sizeof( dataObjInfo.rescName ), "%s", "resc A" );
snprintf( dataObjInfo.filePath, sizeof( dataObjInfo.filePath ), "%s", filePath );
dataObjInfo.replStatus = 5;
status = chlModDataObjMeta( rsComm, &dataObjInfo, ®Param );
return status;
}
int testModColl( rsComm_t *rsComm, char *name, char *type,
char *info1, char *info2 ) {
int status;
collInfo_t collInp;
rsComm->clientUser.authInfo.authFlag = LOCAL_PRIV_USER_AUTH;
rsComm->proxyUser.authInfo.authFlag = LOCAL_PRIV_USER_AUTH;
memset( &collInp, 0, sizeof( collInp ) );
if ( name != NULL && strlen( name ) > 0 ) {
snprintf( collInp.collName, sizeof( collInp.collName ), "%s", name );
}
if ( type != NULL && strlen( type ) > 0 ) {
snprintf( collInp.collType, sizeof( collInp.collType ), "%s", type );
}
if ( info1 != NULL && strlen( info1 ) > 0 ) {
snprintf( collInp.collInfo1, sizeof( collInp.collInfo1 ), "%s", info1 );
}
if ( info2 != NULL && strlen( info2 ) > 0 ) {
snprintf( collInp.collInfo2, sizeof( collInp.collInfo2 ), "%s", info2 );
}
status = chlModColl( rsComm, &collInp );
if ( status != 0 ) {
return status;
}
status = chlCommit( rsComm );
return status;
}
int testModRuleMeta( rsComm_t *rsComm, char *id,
char *attrName, char *attrValue ) {
/* ruleExecSubmitInp_t ruleInfo; */
char ruleId[100];
int status;
keyValPair_t regParam;
char tmpStr[LONG_NAME_LEN];
/* memset(&ruleInfo,0,sizeof(ruleExecSubmitInp_t)); */
memset( ®Param, 0, sizeof( regParam ) );
rstrcpy( tmpStr, attrValue, sizeof tmpStr );
addKeyVal( ®Param, attrName, tmpStr );
snprintf( ruleId, sizeof( ruleId ), "%s", id );
status = chlModRuleExec( rsComm, ruleId, ®Param );
return status;
}
int testModResourceFreeSpace( rsComm_t *rsComm, char *rescName,
char *numberString, char *option ) {
if ( *numberString == '\\' ) {
numberString++;
}
int number = atoi( numberString );
rsComm->clientUser.authInfo.authFlag = LOCAL_PRIV_USER_AUTH;
rsComm->proxyUser.authInfo.authFlag = LOCAL_PRIV_USER_AUTH;
int status = chlModRescFreeSpace( rsComm, rescName, number );
if ( status != 0 ) {
return status;
}
if ( option != NULL && strcmp( option, "rollback" ) == 0 ) {
status = chlRollback( rsComm );
if ( status < 0 ) {
rodsLog( LOG_ERROR, "chlRollback failed in testModResourceFreeSpace %d", status );
}
}
if ( option != NULL && strcmp( option, "close" ) == 0 ) {
status = chlClose();
return status;
}
status = chlCommit( rsComm );
return status;
}
int testRegReplica( rsComm_t *rsComm, char *srcPath, char *srcDataId,
char *srcReplNum, char *dstPath ) {
dataObjInfo_t srcDataObjInfo;
dataObjInfo_t dstDataObjInfo;
keyValPair_t condInput;
int status;
memset( &srcDataObjInfo, 0, sizeof( dataObjInfo_t ) );
memset( &dstDataObjInfo, 0, sizeof( dataObjInfo_t ) );
memset( &condInput, 0, sizeof( condInput ) );
snprintf( srcDataObjInfo.objPath, sizeof( srcDataObjInfo.objPath ), "%s", srcPath );
srcDataObjInfo.dataId = atoi( srcDataId );
srcDataObjInfo.replNum = atoi( srcReplNum );
snprintf( dstDataObjInfo.rescName, sizeof( dstDataObjInfo.rescName ), "%s", "resc A" );
snprintf( dstDataObjInfo.filePath, sizeof( dstDataObjInfo.filePath ), "%s", dstPath );
dstDataObjInfo.replStatus = 5;
status = chlRegReplica( rsComm, &srcDataObjInfo, &dstDataObjInfo,
&condInput );
return status;
}
int testSimpleQ( rsComm_t *rsComm, char *sql, char *arg1, char *format ) {
char bigBuf[1000];
int status;
int control;
int form;
rsComm->clientUser.authInfo.authFlag = LOCAL_PRIV_USER_AUTH;
rsComm->proxyUser.authInfo.authFlag = LOCAL_PRIV_USER_AUTH;
control = 0;
form = 1;
if ( format != NULL ) {
form = atoi( format );
}
status = chlSimpleQuery( rsComm, sql, arg1, 0, 0, 0,
form, &control, bigBuf, 1000 );
if ( status == 0 ) {
printf( "%s", bigBuf );
}
while ( control && ( status == 0 ) ) {
status = chlSimpleQuery( rsComm, sql, 0, 0, 0, 0,
form, &control, bigBuf, 1000 );
if ( status == 0 ) {
printf( "%s", bigBuf );
}
}
return status;
}
int testChmod( rsComm_t *rsComm, char *user, char *zone,
char *access, char *path ) {
int status;
status = chlModAccessControl( rsComm, 0, user, zone, access, path );
return status;
}
int testServerLoad( rsComm_t *rsComm, char *option ) {
int status;
rsComm->clientUser.authInfo.authFlag = LOCAL_PRIV_USER_AUTH;
status = chlRegServerLoad( rsComm, "host", "resc", option, "2", "3",
"4", "5", "6", "7" );
return status;
}
int testPurgeServerLoad( rsComm_t *rsComm, char *option ) {
int status;
rsComm->clientUser.authInfo.authFlag = LOCAL_PRIV_USER_AUTH;
if ( option == NULL ) {
status = chlPurgeServerLoad( rsComm, "2000" );
}
else {
status = chlPurgeServerLoad( rsComm, option );
}
return status;
}
int testServerLoadDigest( rsComm_t *rsComm, char *option ) {
int status;
rsComm->clientUser.authInfo.authFlag = LOCAL_PRIV_USER_AUTH;
status = chlRegServerLoadDigest( rsComm, "resc", option );
return status;
}
int testPurgeServerLoadDigest( rsComm_t *rsComm, char *option ) {
int status;
rsComm->clientUser.authInfo.authFlag = LOCAL_PRIV_USER_AUTH;
if ( option == NULL ) {
status = chlPurgeServerLoadDigest( rsComm, "2000" );
}
else {
status = chlPurgeServerLoadDigest( rsComm, option );
}
return status;
}
int testCheckQuota( rsComm_t *rsComm, char *userName, char *rescName,
char *expectedQuota, char *expectedStatus ) {
int status;
int quotaStatus;
rodsLong_t userQuota;
rsComm->clientUser.authInfo.authFlag = LOCAL_PRIV_USER_AUTH;
status = chlCheckQuota( rsComm, userName, rescName,
&userQuota, "aStatus );
rodsLog( LOG_SQL,
"chlCheckQuota status: userName:%s rescName:%s userQuota:%lld quotaStatus:%d\n",
userName, rescName, userQuota, quotaStatus );
if ( status == 0 ) {
int iExpectedStatus;
rodsLong_t iExpectedQuota;
if ( expectedQuota != NULL && strlen( expectedQuota ) > 0 ) {
rodsLong_t i;
iExpectedQuota = atoll( expectedQuota );
if ( expectedQuota[0] == 'm' ) {
i = atoll( ( char * )&expectedQuota[1] );
iExpectedQuota = -i;
}
if ( iExpectedQuota != userQuota ) {
status = -1;
}
}
if ( expectedStatus != NULL && strlen( expectedStatus ) > 0 ) {
iExpectedStatus = atoi( expectedStatus );
if ( iExpectedStatus != quotaStatus ) {
status = -2;
}
}
}
return status;
}
rodsLong_t
testCurrent() {
rodsLong_t status = 0;
icatSessionStruct *icss;
chlGetRcs( &icss );
// JMC status = cmlGetCurrentSeqVal( icss );
return status;
}
int
testAddRule( rsComm_t *rsComm, char *baseName, char *ruleName,
char *ruleHead, char *ruleCondition, char *ruleAction,
char *ruleRecovery ) {
int status;
char ruleIdStr[200];
char myTime[] = "01277237323";
char priority[] = "1";
rsComm->clientUser.authInfo.authFlag = LOCAL_PRIV_USER_AUTH;
status = chlInsRuleTable( rsComm, baseName, priority, ruleName,
ruleHead, ruleCondition, ruleAction,
ruleRecovery, ( char * )&ruleIdStr, ( char * )&myTime );
if ( status == 0 ) {
printf( "ruleIdStr: %s\n", ruleIdStr );
}
return status;
}
int
testVersionRuleBase( rsComm_t *rsComm, char *baseName ) {
int status;
char myTime[] = "01277237323";
rsComm->clientUser.authInfo.authFlag = LOCAL_PRIV_USER_AUTH;
status = chlVersionRuleBase( rsComm, baseName, ( char * )&myTime );
return status;
}
int
testVersionDvmBase( rsComm_t *rsComm, char *baseName ) {
int status;
char myTime[] = "01277237323";
rsComm->clientUser.authInfo.authFlag = LOCAL_PRIV_USER_AUTH;
status = chlVersionDvmBase( rsComm, baseName, ( char * )&myTime );
return status;
}
int
testInsFnmTable( rsComm_t *rsComm, char *arg1, char *arg2, char *arg3,
char *arg4 ) {
int status;
rsComm->clientUser.authInfo.authFlag = LOCAL_PRIV_USER_AUTH;
status = chlInsFnmTable( rsComm, arg1, arg2, arg3, arg4 );
return status;
}
int
testInsMsrvcTable( rsComm_t *rsComm, char *arg1, char *arg2, char *arg3,
char *arg4, char *arg5, char *arg6, char *arg7, char *arg8,
char *arg9 ) {
int status;
rsComm->clientUser.authInfo.authFlag = LOCAL_PRIV_USER_AUTH;
status = chlInsMsrvcTable( rsComm, arg1, arg2, arg3, arg4,
arg5, arg6, arg7, arg8, arg9, "0" );
return status;
}
int
testInsDvmTable( rsComm_t *rsComm, char *arg1, char *arg2, char *arg3,
char *arg4 ) {
int status;
char myTime[] = "01277237323";
rsComm->clientUser.authInfo.authFlag = LOCAL_PRIV_USER_AUTH;
status = chlInsDvmTable( rsComm, arg1, arg2, arg3, arg4, myTime );
return status;
}
int
testVersionFnmBase( rsComm_t *rsComm, char *arg1 ) {
int status;
char myTime[] = "01277237323";
rsComm->clientUser.authInfo.authFlag = LOCAL_PRIV_USER_AUTH;
status = chlVersionFnmBase( rsComm, arg1, myTime );
return status;
}
int
main( int argc, char **argv ) {
int status;
rsComm_t *Comm;
char *mySubName;
char *myName;
int didOne;
Comm = ( rsComm_t* )malloc( sizeof( rsComm_t ) );
memset( Comm, 0, sizeof( rsComm_t ) );
rodsLogLevel( LOG_NOTICE );
rodsLogSqlReq( 1 );
if ( argc < 2 ) {
printf( "Usage: test_chl testName [args...]\n" );
exit( 3 );
}
rodsEnv myEnv;
status = getRodsEnv( &myEnv );
if ( status < 0 ) {
rodsLog( LOG_ERROR, "main: getRodsEnv error. status = %d",
status );
exit( 1 );
}
if ( strstr( myEnv.rodsDebug, "CAT" ) != NULL ) {
chlDebug( myEnv.rodsDebug );
}
snprintf( Comm->clientUser.userName, sizeof( Comm->clientUser.userName ),
"%s", myEnv.rodsUserName );
snprintf( Comm->clientUser.rodsZone, sizeof( Comm->clientUser.rodsZone ),
"%s", myEnv.rodsZone );
/*
char rodsUserName[NAME_LEN];
char rodsZone[NAME_LEN];
userInfo_t clientUser;
char userName[NAME_LEN];
char rodsZone[NAME_LEN];
*/
if ( ( status = chlOpen() ) != 0 ) {
rodsLog( LOG_SYS_FATAL,
"initInfoWithRcat: chlopen Error. Status = %d",
status );
free( Comm ); // JMC cppcheck - leak
return status;
}
didOne = 0;
if ( strcmp( argv[1], "reg" ) == 0 ) {
status = testRegDataObj( Comm, argv[2], argv[3], argv[4] );
didOne = 1;
}
if ( strcmp( argv[1], "regmulti" ) == 0 ) {
status = testRegDataMulti( Comm, argv[2], argv[3], argv[4], argv[5] );
didOne = 1;
}
if ( strcmp( argv[1], "mod" ) == 0 ) {
status = testModDataObjMeta( Comm, argv[2], argv[3], argv[4] );
didOne = 1;
}
if ( strcmp( argv[1], "mod2" ) == 0 ) {
status = testModDataObjMeta2( Comm, argv[2], argv[3], argv[4] );
didOne = 1;
}
if ( strcmp( argv[1], "modr" ) == 0 ) {
status = testModRuleMeta( Comm, argv[2], argv[3], argv[4] );
didOne = 1;
}
if ( strcmp( argv[1], "modc" ) == 0 ) {
status = testModColl( Comm, argv[2], argv[3], argv[4], argv[5] );
didOne = 1;
}
if ( strcmp( argv[1], "rmrule" ) == 0 ) {
status = testDelRule( Comm, argv[2], argv[3] );
didOne = 1;
}
if ( strcmp( argv[1], "modrfs" ) == 0 ) {
status = testModResourceFreeSpace( Comm, argv[2], argv[3], argv[4] );
didOne = 1;
}
if ( strcmp( argv[1], "rep" ) == 0 ) {
if ( argc < 6 ) {
printf( "too few arguments\n" );
exit( 1 );
}
status = testRegReplica( Comm, argv[2], argv[3], argv[4], argv[5] );
didOne = 1;
}
/*
if (strcmp(argv[1],"cml")==0) {
status = testCml(Comm);
didOne=1;
}
*/
if ( strcmp( argv[1], "mkdir" ) == 0 ) {
status = testRegColl( Comm, argv[2] );
didOne = 1;
}
if ( strcmp( argv[1], "rmdir" ) == 0 ) {
status = testDelColl( Comm, argv[2] );
didOne = 1;
}
if ( strcmp( argv[1], "sql" ) == 0 ) {
status = testSimpleQ( Comm, argv[2], argv[3], argv[4] );
didOne = 1;
}
if ( strcmp( argv[1], "rm" ) == 0 ) {
status = testDelFile( Comm, argv[2], argv[3] );
didOne = 1;
}
if ( strcmp( argv[1], "rmtrash" ) == 0 ) {
status = testDelFileTrash( Comm, argv[2], argv[3] );
didOne = 1;
}
if ( strcmp( argv[1], "rmpriv" ) == 0 ) {
status = testDelFilePriv( Comm, argv[2], argv[3], argv[4] );
didOne = 1;
}
if ( strcmp( argv[1], "chmod" ) == 0 ) {
status = testChmod( Comm, argv[2], argv[3], argv[4], argv[5] );
didOne = 1;
}
if ( strcmp( argv[1], "regrule" ) == 0 ) {
status = testRegRule( Comm, argv[2] );
didOne = 1;
}
if ( strcmp( argv[1], "rename" ) == 0 ) {
status = testRename( Comm, argv[2], argv[3] );
// JMC testCurrent(); // exercise this as part of rename;
// testCurrent needs a SQL context
didOne = 1;
}
if ( strcmp( argv[1], "login" ) == 0 ) {
printf( "login - 2 [%s] 3 [%s] 4 [%s]\n", argv[2], argv[3], argv[4] );
status = testLogin( argv[2], argv[3], argv[4], myEnv );
didOne = 1;
}
if ( strcmp( argv[1], "move" ) == 0 ) {
status = testMove( Comm, argv[2], argv[3] );
didOne = 1;
}
if ( strcmp( argv[1], "checkauth" ) == 0 ) {
status = testCheckAuth( Comm, argv[2], argv[3], argv[4] );
didOne = 1;
}
if ( strcmp( argv[1], "temppw" ) == 0 ) {
status = testTempPw( Comm );
didOne = 1;
}
if ( strcmp( argv[1], "tpc" ) == 0 ) {
status = testTempPwConvert( argv[2], argv[3] );
didOne = 1;
}
if ( strcmp( argv[1], "tpw" ) == 0 ) {
status = testTempPwCombined( Comm, argv[2] );
didOne = 1;
}
if ( strcmp( argv[1], "tpwforother" ) == 0 ) {
status = testTempPwForOther( Comm, argv[2], argv[3] );
didOne = 1;
}
if ( strcmp( argv[1], "serverload" ) == 0 ) {
status = testServerLoad( Comm, argv[2] );
didOne = 1;
}
if ( strcmp( argv[1], "purgeload" ) == 0 ) {
status = testPurgeServerLoad( Comm, argv[2] );
didOne = 1;
}
if ( strcmp( argv[1], "serverdigest" ) == 0 ) {
status = testServerLoadDigest( Comm, argv[2] );
didOne = 1;
}
if ( strcmp( argv[1], "purgedigest" ) == 0 ) {
status = testPurgeServerLoadDigest( Comm, argv[2] );
didOne = 1;
}
if ( strcmp( argv[1], "checkquota" ) == 0 ) {
if ( argc < 5 ) {
status = testCheckQuota( Comm, argv[2], argv[3],
NULL, NULL );
}
else {
status = testCheckQuota( Comm, argv[2], argv[3],
argv[4], argv[5] );
}
didOne = 1;
}
if ( strcmp( argv[1], "open" ) == 0 ) {
int i;
for ( i = 0; i < 3; i++ ) {
status = chlClose();
if ( status ) {
printf( "close %d error", i );
}
if ( ( status = chlOpen() ) != 0 ) {
rodsLog( LOG_SYS_FATAL,
"initInfoWithRcat: chlopen %d Error. Status = %d",
i, status );
return status;
}
}
didOne = 1;
}
if ( strcmp( argv[1], "addrule" ) == 0 ) {
status = testAddRule( Comm, argv[2], argv[3],
argv[4], argv[5],
argv[6], argv[7] );
didOne = 1;
}
if ( strcmp( argv[1], "versionrulebase" ) == 0 ) {
status = testVersionRuleBase( Comm, argv[2] );
didOne = 1;
}
if ( strcmp( argv[1], "versiondvmbase" ) == 0 ) {
status = testVersionDvmBase( Comm, argv[2] );
didOne = 1;
}
if ( strcmp( argv[1], "versionfnmbase" ) == 0 ) {
status = testVersionFnmBase( Comm, argv[2] );
didOne = 1;
}
if ( strcmp( argv[1], "insfnmtable" ) == 0 ) {
status = testInsFnmTable( Comm, argv[2], argv[3], argv[4], argv[5] );
didOne = 1;
}
if ( strcmp( argv[1], "insdvmtable" ) == 0 ) {
status = testInsDvmTable( Comm, argv[2], argv[3], argv[4], argv[5] );
didOne = 1;
}
if ( strcmp( argv[1], "insmsrvctable" ) == 0 ) {
status = testInsMsrvcTable( Comm, argv[2], argv[3], argv[4], argv[5],
argv[6], argv[7], argv[8], argv[9], argv[10] );
if ( status == 0 ) {
/* do it a second time to test another logic path and
different SQL. Since no commit is part of the chl
function, and there is not corresponding Delete call, this
is an easy way to do this. */
status = testInsMsrvcTable( Comm, argv[2], argv[3], argv[4], argv[5],
argv[6], argv[7], argv[8], argv[9], argv[10] );
}
didOne = 1;
}
if ( strcmp( argv[1], "getlocalzone" ) == 0 ) {
status = testGetLocalZone( argv[2] );
didOne = 1;
}
if ( strcmp( argv[1], "getpampw" ) == 0 ) {
status = testGetPamPw( Comm, argv[2], argv[3] );
didOne = 1;
}
if ( status != 0 ) {
/*
if (Comm->rError) {
rError_t *Err;
rErrMsg_t *ErrMsg;
int i, len;
Err = Comm->rError;
len = Err->len;
for (i=0;i<len;i++) {
ErrMsg = Err->errMsg[i];
rodsLog(LOG_ERROR, "Level %d: %s",i, ErrMsg->msg);
}
}
*/
myName = rodsErrorName( status, &mySubName );
rodsLog( LOG_ERROR, "%s failed with error %d %s %s", argv[1],
status, myName, mySubName );
}
else {
if ( didOne ) {
printf( "Completed successfully\n" );
}
}
if ( didOne == 0 ) {
printf( "Unknown test type: %s\n", argv[1] );
}
exit( status );
}
/* This is a dummy version of icatApplyRule for this test program so
- the rule-engine is not needed in this ICAT test. */
int
icatApplyRule( rsComm_t*, char*, char* ) {
return 0;
}
|
{"hexsha": "9c8f8046b8d59ac6019029487b7fef8c8a40fd18", "size": 35651, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "iRODS/server/test/src/test_chl.cpp", "max_stars_repo_name": "iychoi/cyverse-irods", "max_stars_repo_head_hexsha": "0070b8677a82e763f1d940ae6537b1c8839a628a", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "iRODS/server/test/src/test_chl.cpp", "max_issues_repo_name": "iychoi/cyverse-irods", "max_issues_repo_head_hexsha": "0070b8677a82e763f1d940ae6537b1c8839a628a", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 6.0, "max_issues_repo_issues_event_min_datetime": "2019-12-02T18:17:44.000Z", "max_issues_repo_issues_event_max_datetime": "2019-12-02T18:17:57.000Z", "max_forks_repo_path": "iRODS/server/test/src/test_chl.cpp", "max_forks_repo_name": "benlazarine/cyverse-irods", "max_forks_repo_head_hexsha": "2bf9cfae4c3a1062ffe2af92b1f086ddc5fce025", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2019-12-02T05:40:13.000Z", "max_forks_repo_forks_event_max_datetime": "2019-12-02T05:40:13.000Z", "avg_line_length": 29.5858921162, "max_line_length": 108, "alphanum_fraction": 0.580376427, "num_tokens": 10563}
|
[STATEMENT]
lemma connected_trans:
assumes u_v: "u \<rightarrow>\<^sup>* v" and v_w: "v \<rightarrow>\<^sup>* w"
shows "u \<rightarrow>\<^sup>* w"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. u \<rightarrow>\<^sup>* w
[PROOF STEP]
proof-
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. u \<rightarrow>\<^sup>* w
[PROOF STEP]
obtain xs where xs: "walk xs" "xs \<noteq> Nil" "hd xs = u" "last xs = v"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>xs. \<lbrakk>walk xs; xs \<noteq> []; hd xs = u; last xs = v\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using u_v connectedE
[PROOF STATE]
proof (prove)
using this:
u \<rightarrow>\<^sup>* v
\<lbrakk>?v \<rightarrow>\<^sup>* ?w; \<And>xs. \<lbrakk>walk xs; xs \<noteq> []; hd xs = ?v; last xs = ?w\<rbrakk> \<Longrightarrow> ?thesis\<rbrakk> \<Longrightarrow> ?thesis
goal (1 subgoal):
1. (\<And>xs. \<lbrakk>walk xs; xs \<noteq> []; hd xs = u; last xs = v\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
walk xs
xs \<noteq> []
hd xs = u
last xs = v
goal (1 subgoal):
1. u \<rightarrow>\<^sup>* w
[PROOF STEP]
obtain ys where ys: "walk ys" "ys \<noteq> Nil" "hd ys = v" "last ys = w"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>ys. \<lbrakk>walk ys; ys \<noteq> []; hd ys = v; last ys = w\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using v_w connectedE
[PROOF STATE]
proof (prove)
using this:
v \<rightarrow>\<^sup>* w
\<lbrakk>?v \<rightarrow>\<^sup>* ?w; \<And>xs. \<lbrakk>walk xs; xs \<noteq> []; hd xs = ?v; last xs = ?w\<rbrakk> \<Longrightarrow> ?thesis\<rbrakk> \<Longrightarrow> ?thesis
goal (1 subgoal):
1. (\<And>ys. \<lbrakk>walk ys; ys \<noteq> []; hd ys = v; last ys = w\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
walk ys
ys \<noteq> []
hd ys = v
last ys = w
goal (1 subgoal):
1. u \<rightarrow>\<^sup>* w
[PROOF STEP]
let ?R = "xs @ tl ys"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. u \<rightarrow>\<^sup>* w
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. u \<rightarrow>\<^sup>* w
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (4 subgoals):
1. walk ?xs
2. ?xs \<noteq> []
3. hd ?xs = u
4. last ?xs = w
[PROOF STEP]
show "walk ?R"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. walk (xs @ tl ys)
[PROOF STEP]
using walk_comp[OF xs(1)]
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>walk ?ys; xs = [] \<or> ?ys = [] \<or> last xs \<rightarrow> hd ?ys\<rbrakk> \<Longrightarrow> walk (xs @ ?ys)
goal (1 subgoal):
1. walk (xs @ tl ys)
[PROOF STEP]
by (metis xs(4) ys(1,2,3) list.sel(1,3) walk.simps)
[PROOF STATE]
proof (state)
this:
walk (xs @ tl ys)
goal (3 subgoals):
1. xs @ tl ys \<noteq> []
2. hd (xs @ tl ys) = u
3. last (xs @ tl ys) = w
[PROOF STEP]
show "?R \<noteq> Nil"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. xs @ tl ys \<noteq> []
[PROOF STEP]
by (simp add: xs(2))
[PROOF STATE]
proof (state)
this:
xs @ tl ys \<noteq> []
goal (2 subgoals):
1. hd (xs @ tl ys) = u
2. last (xs @ tl ys) = w
[PROOF STEP]
show "hd ?R = u"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. hd (xs @ tl ys) = u
[PROOF STEP]
by (simp add: xs(2,3))
[PROOF STATE]
proof (state)
this:
hd (xs @ tl ys) = u
goal (1 subgoal):
1. last (xs @ tl ys) = w
[PROOF STEP]
show "last ?R = w"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. last (xs @ tl ys) = w
[PROOF STEP]
using xs(2,4) ys(2,3,4)
[PROOF STATE]
proof (prove)
using this:
xs \<noteq> []
last xs = v
ys \<noteq> []
hd ys = v
last ys = w
goal (1 subgoal):
1. last (xs @ tl ys) = w
[PROOF STEP]
by (metis append_butlast_last_id last_append last_tl list.exhaust_sel)
[PROOF STATE]
proof (state)
this:
last (xs @ tl ys) = w
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
u \<rightarrow>\<^sup>* w
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1799, "file": "Tree_Decomposition_Graph", "length": 22}
|
-makelib ies_lib/xilinx_vip -sv \
"B:/Xilinx/Vivado/2018.3/data/xilinx_vip/hdl/axi4stream_vip_axi4streampc.sv" \
"B:/Xilinx/Vivado/2018.3/data/xilinx_vip/hdl/axi_vip_axi4pc.sv" \
"B:/Xilinx/Vivado/2018.3/data/xilinx_vip/hdl/xil_common_vip_pkg.sv" \
"B:/Xilinx/Vivado/2018.3/data/xilinx_vip/hdl/axi4stream_vip_pkg.sv" \
"B:/Xilinx/Vivado/2018.3/data/xilinx_vip/hdl/axi_vip_pkg.sv" \
"B:/Xilinx/Vivado/2018.3/data/xilinx_vip/hdl/axi4stream_vip_if.sv" \
"B:/Xilinx/Vivado/2018.3/data/xilinx_vip/hdl/axi_vip_if.sv" \
"B:/Xilinx/Vivado/2018.3/data/xilinx_vip/hdl/clk_vip_if.sv" \
"B:/Xilinx/Vivado/2018.3/data/xilinx_vip/hdl/rst_vip_if.sv" \
-endlib
-makelib ies_lib/xil_defaultlib -sv \
"B:/Xilinx/Vivado/2018.3/data/ip/xpm/xpm_cdc/hdl/xpm_cdc.sv" \
"B:/Xilinx/Vivado/2018.3/data/ip/xpm/xpm_memory/hdl/xpm_memory.sv" \
-endlib
-makelib ies_lib/xpm \
"B:/Xilinx/Vivado/2018.3/data/ip/xpm/xpm_VCOMP.vhd" \
-endlib
-makelib ies_lib/axi_infrastructure_v1_1_0 \
"../../../../CNN_HW.srcs/sources_1/bd/CNN_top_module/ipshared/ec67/hdl/axi_infrastructure_v1_1_vl_rfs.v" \
-endlib
-makelib ies_lib/axi_vip_v1_1_4 -sv \
"../../../../CNN_HW.srcs/sources_1/bd/CNN_top_module/ipshared/98af/hdl/axi_vip_v1_1_vl_rfs.sv" \
-endlib
-makelib ies_lib/processing_system7_vip_v1_0_6 -sv \
"../../../../CNN_HW.srcs/sources_1/bd/CNN_top_module/ipshared/70cf/hdl/processing_system7_vip_v1_0_vl_rfs.sv" \
-endlib
-makelib ies_lib/xil_defaultlib \
"../../../bd/CNN_top_module/ip/CNN_top_module_processing_system7_0_0/sim/CNN_top_module_processing_system7_0_0.v" \
"../../../bd/CNN_top_module/ip/CNN_top_module_axi_cnn_0_0/sim/CNN_top_module_axi_cnn_0_0.v" \
-endlib
-makelib ies_lib/lib_cdc_v1_0_2 \
"../../../../CNN_HW.srcs/sources_1/bd/CNN_top_module/ipshared/ef1e/hdl/lib_cdc_v1_0_rfs.vhd" \
-endlib
-makelib ies_lib/proc_sys_reset_v5_0_13 \
"../../../../CNN_HW.srcs/sources_1/bd/CNN_top_module/ipshared/8842/hdl/proc_sys_reset_v5_0_vh_rfs.vhd" \
-endlib
-makelib ies_lib/xil_defaultlib \
"../../../bd/CNN_top_module/ip/CNN_top_module_rst_ps7_0_50M_0/sim/CNN_top_module_rst_ps7_0_50M_0.vhd" \
-endlib
-makelib ies_lib/generic_baseblocks_v2_1_0 \
"../../../../CNN_HW.srcs/sources_1/bd/CNN_top_module/ipshared/b752/hdl/generic_baseblocks_v2_1_vl_rfs.v" \
-endlib
-makelib ies_lib/fifo_generator_v13_2_3 \
"../../../../CNN_HW.srcs/sources_1/bd/CNN_top_module/ipshared/64f4/simulation/fifo_generator_vlog_beh.v" \
-endlib
-makelib ies_lib/fifo_generator_v13_2_3 \
"../../../../CNN_HW.srcs/sources_1/bd/CNN_top_module/ipshared/64f4/hdl/fifo_generator_v13_2_rfs.vhd" \
-endlib
-makelib ies_lib/fifo_generator_v13_2_3 \
"../../../../CNN_HW.srcs/sources_1/bd/CNN_top_module/ipshared/64f4/hdl/fifo_generator_v13_2_rfs.v" \
-endlib
-makelib ies_lib/axi_data_fifo_v2_1_17 \
"../../../../CNN_HW.srcs/sources_1/bd/CNN_top_module/ipshared/c4fd/hdl/axi_data_fifo_v2_1_vl_rfs.v" \
-endlib
-makelib ies_lib/axi_register_slice_v2_1_18 \
"../../../../CNN_HW.srcs/sources_1/bd/CNN_top_module/ipshared/cc23/hdl/axi_register_slice_v2_1_vl_rfs.v" \
-endlib
-makelib ies_lib/axi_protocol_converter_v2_1_18 \
"../../../../CNN_HW.srcs/sources_1/bd/CNN_top_module/ipshared/7a04/hdl/axi_protocol_converter_v2_1_vl_rfs.v" \
-endlib
-makelib ies_lib/xil_defaultlib \
"../../../bd/CNN_top_module/ip/CNN_top_module_auto_pc_0/sim/CNN_top_module_auto_pc_0.v" \
"../../../bd/CNN_top_module/sim/CNN_top_module.v" \
-endlib
-makelib ies_lib/xil_defaultlib \
glbl.v
-endlib
|
{"hexsha": "28944f8b0b48a6b06fe06baa23491a991a8ae68e", "size": 3470, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "CNN_HW.ip_user_files/sim_scripts/CNN_top_module/ies/run.f", "max_stars_repo_name": "awe777/CNN_HW", "max_stars_repo_head_hexsha": "c677e5969c235aa8f5f9cf34da71a5fbe61dcd90", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "CNN_HW.ip_user_files/sim_scripts/CNN_top_module/ies/run.f", "max_issues_repo_name": "awe777/CNN_HW", "max_issues_repo_head_hexsha": "c677e5969c235aa8f5f9cf34da71a5fbe61dcd90", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "CNN_HW.ip_user_files/sim_scripts/CNN_top_module/ies/run.f", "max_forks_repo_name": "awe777/CNN_HW", "max_forks_repo_head_hexsha": "c677e5969c235aa8f5f9cf34da71a5fbe61dcd90", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 49.5714285714, "max_line_length": 117, "alphanum_fraction": 0.7645533141, "num_tokens": 1334}
|
from __future__ import print_function
from asdl.transition_system import GenTokenAction, TransitionSystem, ApplyRuleAction, ReduceAction,score_acts
import sys, traceback
import numpy as np
from common.registerable import Registrable
import tqdm
cachepredict=[]
cachetrue=[]
from dependency import nlp
from nltk.tree import Tree
@Registrable.register('default_evaluator')
class Evaluator(object):
def __init__(self, transition_system=None, args=None):
self.transition_system = transition_system
self.default_metric = 'accuracy'
def is_hyp_correct(self, example, hyp):
return self.transition_system.compare_ast(hyp.tree, example.tgt_ast)#this func is at
def evaluate_dataset(self, examples, decode_results, fast_mode=False):
global cachepredict
global cachetrue
correct_array = []
oracle_array = []
cachepredict=[]
cachetrue=[]
allstats=[]
for example, hyp_list,atts in tqdm.tqdm(zip(examples, decode_results[0],decode_results[1])):
if fast_mode:
hyp_list = hyp_list[:1]
att=atts[:1]
# ast=attss[:1]
if hyp_list:
if(hyp_list[0].tree.sort_removedup_self().to_string()!=example.tgt_ast.sort_removedup_self().to_string()):
print(example.src_sent)
## tree=Tree.fromstring(str(nlp.parse(' '.join(example.src_sent))))
#
print([item.name for item in example.table.header])
print(att[0][1][0])
print(hyp_list[0].actions)
print([a.action for a in example.tgt_actions])
#
for action,at,ats in zip(hyp_list[0].actions,att[0][0],att[0][0]):
# if(show and number>3):
# if np.linalg.norm(at[-1]-at[0])>0.5:
print(example.src_sent)
print(action)
print(at)
# print(ats)
# a=input('jk')
# tree.draw()
a=input('jk')
# show=False
# number=0
# for at in att[0]:
# if np.linalg.norm(at[-1]-at[0])>0.5:
# show=True
# number+=1
# for action,at in zip(hyp_list[0].actions,att[0]):
# if(show and number>3):
# if np.linalg.norm(at[-1]-at[0])>0.5:
# print(example.src_sent)
# print(action)
# print(at)
# a=input('jk')
# for hyp_id, hyp in enumerate(hyp_list):
# try:
# is_correct = self.is_hyp_correct(example, hyp)
# except:
# is_correct = False
#
# print('-' * 60, file=sys.stdout)
# print('Error in evaluating Example %s, hyp %d {{ %s }}' % (example.idx, hyp_id, hyp.code),
# file=sys.stdout)
#
# print('example id: %s, hypothesis id: %d' % (example.idx, hyp_id), file=sys.stdout)
# traceback.print_exc(file=sys.stdout)
# print('-' * 60, file=sys.stdout)
#
# hyp.is_correct = is_correct
#
# correct_array.append(hyp_list[0].is_correct)
# correct_array.append(hyp_list[0].tree.to_string()==example.tgt_ast.to_string())
correct_array.append(hyp_list[0].tree.sort_removedup_self().to_string()==example.tgt_ast.sort_removedup_self().to_string())
# hyp_list[0].is_correct)
# cachepredict.append(hyp_list[0].tree)
# cachetrue.append(example.tgt_ast)
# print(hyp_list[0].actions)
# print([a.action for a in example.tgt_actions])
# oracle_array.append(any(hyp.is_correct for hyp in hyp_list))
# print(hyp_list[0].tree.to_string())
# print(example.tgt_ast.to_string())
# print(score_acts(hyp_list[0].actions,self.transition_system.get_actions(example.tgt_ast)))
# p=input('gg')
oracle_array.append(hyp_list[0].tree.sort_removedup_self().to_string()==example.tgt_ast.sort_removedup_self().to_string())
allstats.append(self.finemet(hyp_list[0].tree,example.tgt_ast,example,oracle_array[-1]))
# allstats.append([False,False,False])
else:
correct_array.append(False)
oracle_array.append(False)
allstats.append([False,False,False])
acc = np.average(correct_array)
allacc=np.mean(np.array(allstats),0)
oracle_acc = np.average(oracle_array)
eval_results = dict(accuracy=acc,
oracle_accuracy=oracle_acc,allaccs=allacc)
return eval_results
@Registrable.register('cached_evaluator')
class CachedExactMatchEvaluator(Evaluator):
def is_hyp_correct(self, example, hyp):
raise hyp.is_correct
def evaluate_dataset(self, examples, decode_results, fast_mode=False):
if fast_mode:
acc = sum(hyps[0].is_correct for hyps in decode_results if len(hyps) > 0) / float(len(examples))
return acc
acc_array = []
oracle_array = []
for hyp_list in decode_results:
acc_array.append(hyp_list[0].is_correct if hyp_list else False)
oracle_array.append(any(hyp.is_correct for hyp in hyp_list))
return dict(accuracy=np.average(acc_array),
oracle_array=np.average(oracle_array))
|
{"hexsha": "08a6035c2219b4f1db6b7180acba5e960978d7f4", "size": 5862, "ext": "py", "lang": "Python", "max_stars_repo_path": "components/evaluator.py", "max_stars_repo_name": "tomsonsgs/TRAN-MMA-master", "max_stars_repo_head_hexsha": "91bf927c64a8d813ba60ae12e61e8f44830a82cc", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "components/evaluator.py", "max_issues_repo_name": "tomsonsgs/TRAN-MMA-master", "max_issues_repo_head_hexsha": "91bf927c64a8d813ba60ae12e61e8f44830a82cc", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "components/evaluator.py", "max_forks_repo_name": "tomsonsgs/TRAN-MMA-master", "max_forks_repo_head_hexsha": "91bf927c64a8d813ba60ae12e61e8f44830a82cc", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.0923076923, "max_line_length": 139, "alphanum_fraction": 0.5465711361, "include": true, "reason": "import numpy", "num_tokens": 1248}
|
## Import the required modules
# Check time required
import time
time_start = time.time()
import sys
import os
import argparse as ap
import math
import imageio
from moviepy.editor import *
import numpy as np
sys.path.append(os.path.dirname(__file__) + "/../")
from scipy.misc import imread, imsave, imresize
from skimage.measure import compare_psnr
from config import load_config
from dataset.factory import create as create_dataset
from nnet import predict
from util import visualize
from dataset.pose_dataset import data_to_input
from multiperson.detections import extract_detections
from multiperson.predict import SpatialModel, eval_graph, get_person_conf_multicut
from multiperson.visualize import PersonDraw, visualize_detections
import matplotlib.pyplot as plt
from PIL import Image, ImageDraw, ImageFont
font = ImageFont.truetype("./font/NotoSans-Bold.ttf", 12)
import random
# for object-tracker
import dlib
import video_pose
## for SORT
from sort import *
# create instance of SORT
mot_tracker = Sort()
track_bbs_ids = []
####################
cfg = load_config("demo/pose_cfg_multi.yaml")
dataset = create_dataset(cfg)
sm = SpatialModel(cfg)
sm.load()
draw_multi = PersonDraw()
# Load and setup CNN part detector
sess, inputs, outputs = predict.setup_pose_prediction(cfg)
##########
## Get the source of video
parser = ap.ArgumentParser()
parser.add_argument('-f', "--videoFile", help="Path to Video File")
parser.add_argument('-w', "--videoWidth", help="Width of Output Video")
parser.add_argument('-o', "--videoType", help="Extension of Output Video")
parser.add_argument('-t', "--poseThreshold", help="Threshold of pose-tensorflow")
args = vars(parser.parse_args())
if args["videoFile"] is not None:
video_name = args["videoFile"]
else:
print("You have to input videoFile name")
sys.exit(1)
video = video_pose.read_video(video_name)
print("Input video size: [" + str(video.size[0]) + ", " + str(video.size[1]) + "]")
video_output_name = video_name.split('.')[0]
if args["videoWidth"] is not None:
video_width = int(args["videoWidth"])
video = video.resize(width = video_width)
print("Changed video size: [" + str(video.size[0]) + ", " + str(video.size[1]) + "]")
if args["videoType"] is not None:
video_type = args["videoType"]
else:
video_type = "mp4"
print("Output video type: " + video_type)
if args["poseThreshold"] is not None:
point_min = int(args["poseThreshold"]) # threshold of points - If there are more than point_min points in person, we define he/she is REAL PERSON
else:
point_min = 14
print("Pose Threshold: " + str(point_min))
##########
## Define some functions to mark at image
def ellipse_set(person_conf_multi, people_i, point_i):
return (person_conf_multi[people_i][point_i][0] - point_r, person_conf_multi[people_i][point_i][1] - point_r, person_conf_multi[people_i][point_i][0] + point_r, person_conf_multi[people_i][point_i][1] + point_r)
##########
video_frame_number = int(video.duration * video.fps) ## duration: second / fps: frame per second
video_frame_ciphers = math.ceil(math.log(video_frame_number, 10)) ## ex. 720 -> 3
pose_frame_list = []
point_r = 3 # radius of points
point_num = 17 # There are 17 points in 1 person
tracking_people_count = 0
tracker_len_prev = 0
##########
# for object-tracker
target_points = [] # format: [(minx, miny, maxx, maxy), (minx, miny, maxx, maxy) ... ]
tracker = []
total_people = []
image_people_list = []
PSNR_list = []
same_person_list = []
PSNR_threshold = 11.5
PSNR_up_list = []
PSNR_down_list = []
image_people_up_list = []
image_people_down_list = []
for frame_index in range(0, video_frame_number):
# Save frame_index-th frame as image
image = video.get_frame(frame_index/video.fps)
# [x][y][z]: x = width, y = height, z = rgb(3)
# print(len(image)): 360
# print(len(image[0])): 640
# print(len(image[0][0])): 3
##########
## By pose-tensorflow
image_batch = data_to_input(image)
# Compute prediction with the CNN
outputs_np = sess.run(outputs, feed_dict={inputs: image_batch})
scmap, locref, pairwise_diff = predict.extract_cnn_output(outputs_np, cfg, dataset.pairwise_stats)
detections = extract_detections(cfg, scmap, locref, pairwise_diff)
unLab, pos_array, unary_array, pwidx_array, pw_array = eval_graph(sm, detections)
person_conf_multi = get_person_conf_multicut(sm, unLab, unary_array, pos_array)
#####
# Add library to draw image
image_img = Image.fromarray(image)
# Prepare saving image with points of pose
draw = ImageDraw.Draw(image_img)
#####
people_num = 0
people_real_num = 0
people_num = person_conf_multi.size / (point_num * 2)
people_num = int(people_num)
#####
dets = []
for people_i in range(0, people_num):
point_color_r = random.randrange(0, 256)
point_color_g = random.randrange(0, 256)
point_color_b = random.randrange(0, 256)
point_color = (point_color_r, point_color_g, point_color_b, 255)
point_list = []
point_count = 0
point_i = 0 # index of points
# To find rectangle which include that people - list of points x, y coordinates
people_x = []
people_y = []
for point_i in range(0, point_num):
if person_conf_multi[people_i][point_i][0] + person_conf_multi[people_i][point_i][1] != 0: # If coordinates of point is (0, 0) == meaningless data
point_count = point_count + 1
point_list.append(point_i)
if point_count >= point_min:
people_real_num = people_real_num + 1
for point_i in range(0, point_num):
if person_conf_multi[people_i][point_i][0] + person_conf_multi[people_i][point_i][1] != 0: # If coordinates of point is (0, 0) == meaningless data
draw.ellipse(ellipse_set(person_conf_multi, people_i, point_i), fill=point_color)
people_x.append(person_conf_multi[people_i][point_i][0])
people_y.append(person_conf_multi[people_i][point_i][1])
dets.append([int(min(people_x)), int(min(people_y)), int(max(people_x)), int(max(people_y))])
dets = np.array(dets)
print(dets)
track_bbs_ids = mot_tracker.update(dets)
##########
for d in track_bbs_ids:
draw.rectangle([d[0], d[1], d[2], d[3]], outline='red')
draw.text((d[0], d[1]), str(d[4]), (255,0,0), font=font)
if not d[4] in total_people:
total_people.append(d[4])
image_people = []
for i in range(int(d[0]), int(d[2])+1): # x
image_people_temp = []
for j in range(int(d[1]), int(d[3])+1): # y
image_people_temp.append(image[j][i])
image_people.append(image_people_temp)
image_people_np = np.asarray(image_people)
image_people_np_rotate = np.transpose(image_people_np, (1, 0, 2))
### save images
#### - whole body
img_people = Image.fromarray(image_people_np_rotate)
if not (os.path.isdir("testset/" + video_output_name + "_tracking_t" + str(point_min))):
os.mkdir("testset/" + video_output_name + "_tracking_t" + str(point_min))
img_people.save("testset/" + video_output_name + "_tracking_t" + str(point_min) + "/p" + str(int(d[4])) + ".jpg")
image_people_np_up_height = int(image_people_np_rotate.shape[0] / 2)
#### - upper body
image_people_np_up = image_people_np_rotate[0:image_people_np_up_height]
img_people_up = Image.fromarray(image_people_np_up)
img_people_up.save("testset/" + video_output_name + "_tracking_t" + str(point_min) + "/p" + str(int(d[4])) + "_up.jpg")
#### - lower body
image_people_np_down = image_people_np_rotate[image_people_np_up_height:]
img_people_down = Image.fromarray(image_people_np_down)
img_people_down.save("testset/" + video_output_name + "_tracking_t" + str(point_min) + "/p" + str(int(d[4])) + "_down.jpg")
if len(image_people_list) == 0:
image_people_list.append([image_people_np_rotate, d[4]])
image_people_up_list.append([image_people_np_up, d[4]])
image_people_down_list.append([image_people_np_down, d[4]])
same_person_list.append([d[4]])
else:
PSNR_max = 0.0
PSNR_up_max = 0.0
PSNR_down_max = 0.0
PSNR_max_index = 0
PSNR_up_max_index = 0
PSNR_down_max_index = 0
for i in range(0, len(image_people_down_list)):
### calculate PSNR and find max PSNR
#### - whole body
image_people_ref = imresize(image_people_list[i][0], (len(image_people_np_rotate), len(image_people_np_rotate[0])), 'bilinear', 'RGB')
image_people_psnr = compare_psnr(image_people_ref, image_people_np_rotate)
PSNR_list.append([int(d[4]), int(image_people_list[i][1]), str(image_people_psnr)])
if float(image_people_psnr) > PSNR_max:
PSNR_max = float(image_people_psnr)
PSNR_max_index = int(image_people_list[i][1])
print(PSNR_max)
print(PSNR_max_index)
#### - upper body
image_people_up_ref = imresize(image_people_up_list[i][0], (len(image_people_np_up), len(image_people_np_up[0])), 'bilinear', 'RGB')
image_people_up_psnr = compare_psnr(image_people_up_ref, image_people_np_up)
PSNR_up_list.append([int(d[4]), int(image_people_up_list[i][1]), str(image_people_up_psnr)])
if float(image_people_up_psnr) > PSNR_up_max:
PSNR_up_max = float(image_people_up_psnr)
PSNR_up_max_index = int(image_people_up_list[i][1])
print(PSNR_up_max)
print(PSNR_up_max_index)
#### - lower body
image_people_down_ref = imresize(image_people_down_list[i][0], (len(image_people_np_down), len(image_people_np_down[0])), 'bilinear', 'RGB')
image_people_down_psnr = compare_psnr(image_people_down_ref, image_people_np_down)
PSNR_down_list.append([int(d[4]), int(image_people_down_list[i][1]), str(image_people_down_psnr)])
if float(image_people_down_psnr) > PSNR_down_max:
PSNR_down_max = float(image_people_down_psnr)
PSNR_down_max_index = int(image_people_down_list[i][1])
print(PSNR_down_max)
print(PSNR_down_max_index)
if PSNR_max > PSNR_threshold: # If PSNR_max is bigger then PSNR_threshold, we assume they are same one
for i in range(0, len(same_person_list)):
if PSNR_max_index in same_person_list[i]:
same_person_list[i].append(d[4])
else:
same_person_list.append([d[4]])
image_people_list.append([image_people_np_rotate, d[4]])
print('people_real_num: ' + str(people_real_num))
print('len(track_bbs_ids): ' + str(len(track_bbs_ids)))
print('Frame: ' + str(frame_index) + "/" + str(video_frame_number))
print('Time required: ' + str(round(time.time() - time_start, 1)) + 'sec')
draw.text((0, 0), 'total_people_list: ' + str(total_people), (0,0,0), font=font)
draw.text((0, 18), 'total_people: ' + str(len(total_people)), (0,0,0), font=font)
draw.text((0, 36), 'Frame: ' + str(frame_index) + '/' + str(video_frame_number), (0,0,0), font=font)
draw.text((0, 54), 'Total time required: ' + str(round(time.time() - time_start, 1)) + 'sec', (0,0,0), font=font)
image_img_numpy = np.asarray(image_img)
pose_frame_list.append(image_img_numpy)
image_img.save("testset/" + video_output_name + "/" + frame_index + "_" + video.fps + "_" + len(total_people) + ".jpg", "JPG")
print("PSNR_list")
for i in range(0, len(PSNR_list)):
print(PSNR_list[i])
print("PSNR_up_list")
for i in range(0, len(PSNR_up_list)):
print(PSNR_up_list[i])
print("PSNR_down_list")
for i in range(0, len(PSNR_down_list)):
print(PSNR_down_list[i])
for i in range(0, len(same_person_list)):
print(same_person_list[i])
video_pose = ImageSequenceClip(pose_frame_list, fps=video.fps)
video_pose.write_videofile("testset/" + video_output_name + "_tracking_t" + str(point_min) + "." + video_type, fps=video.fps, progress_bar=False)
print("Time(s): " + str(time.time() - time_start))
print("Output video size: [" + str(video.size[0]) + ", " + str(video.size[1]) + "]")
|
{"hexsha": "3ddc11ef37d843205813df5fc7a5913a0a68982e", "size": 12910, "ext": "py", "lang": "Python", "max_stars_repo_path": "video_tracking_sort.py", "max_stars_repo_name": "hiepnth/people-counting-pose", "max_stars_repo_head_hexsha": "8cdaab5281847c296b305643842053d496e2e4e8", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 161, "max_stars_repo_stars_event_min_datetime": "2018-02-22T15:15:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-10T16:40:06.000Z", "max_issues_repo_path": "video_tracking_sort.py", "max_issues_repo_name": "hiepnth/people-counting-pose", "max_issues_repo_head_hexsha": "8cdaab5281847c296b305643842053d496e2e4e8", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 15, "max_issues_repo_issues_event_min_datetime": "2018-03-01T23:18:00.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-15T06:23:15.000Z", "max_forks_repo_path": "video_tracking_sort.py", "max_forks_repo_name": "hiepnth/people-counting-pose", "max_forks_repo_head_hexsha": "8cdaab5281847c296b305643842053d496e2e4e8", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 41, "max_forks_repo_forks_event_min_datetime": "2018-03-01T13:03:54.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-17T14:32:22.000Z", "avg_line_length": 38.8855421687, "max_line_length": 215, "alphanum_fraction": 0.6399690163, "include": true, "reason": "import numpy,from scipy", "num_tokens": 3258}
|
#!/usr/bin/env python3
""" 2D Ising simulator using Metropolis algorithm
Author: Akhlak Mahmood
License: MIT
Last update: April 18, 2019
"""
## Import modules
# -------------------------------------------
import sys
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation
from tqdm import tqdm # fancy python progress bar (> pip install tqdm)
## Define constants
# -------------------------------------------
J = 1
## Setup plot
# -------------------------------------------
plt.rcParams["figure.figsize"] = (8, 6)
plt.rcParams["font.size"] = 12
## 2D Ising Lattice class
# -------------------------------------------
class IsingModel:
def __init__(self, sqrsize=4, initial_spins='r'):
""" Initialize a 2D square Ising lattice with the specified initial spin
Args
----
sqrsize (int) : size of the lattice
initial_spins (1 or -1 or 'r') : set spins to 1 or -1 or random if 'r'
"""
self.sqr = sqrsize*sqrsize
self.size = sqrsize
self._magvalue = np.zeros(1)
self.encoder = FFMpegWriter = animation.writers['ffmpeg']
self.writer = None
# build the system
if initial_spins == 'r':
# random spins
system = np.random.randint(0, 1+1, (self.size, self.size))
# flip to -1 if an item is a zero
system[system==0] = -1
else:
# fixed spins
system = np.ones((self.size, self.size)) * initial_spins
self.system = system
def _bc(self, x):
""" Apply pbc condition to lattice site abscissa or ordinate x """
if x+1 > self.size-1:
# wrap back to 0
return 0
elif x-1 < 0:
# wrap back to far side
return self.size-1
else:
# inside box
return x
def _deltaH(self, i, j, B=0.0):
""" Compute delta H for a single spin site i, j using pbc.
Include external field contribution if any.
"""
return -2 * self.system[i,j] * (
B + self.system[self._bc(i-1), j]
+ self.system[self._bc(i+1), j]
+ self.system[i, self._bc(j-1)]
+ self.system[i, self._bc(j+1)])
def plot_system(self, saveto=None):
""" Plot the current state of the system """
plt.close('all')
plt.ion()
plt.imshow(self.system, interpolation='nearest')
plt.xlim(0, self.size)
plt.ylim(0, self.size)
if saveto:
plt.savefig(saveto)
plt.show()
def plot_magnetization(self, saveto=None):
""" Plot magnetization vs time for the last run """
plt.close('all')
plt.plot(self._magvalue)
plt.xlabel('time')
plt.ylabel('average magnetization')
plt.grid()
if saveto:
plt.savefig(saveto)
plt.show()
@property
def magnetization(self):
""" Return average magnetization of the system """
return np.sum(self.system) / self.sqr
def run(self, T, stepcount, B=0.0):
""" Do a Metropolis sweep on the system for stepcount steps
Use run() if you need to do multiple MC sweeps as fast as possible.
Use runMovie() if you need to run single a MC sweep and save as a movie.
Args
----
T (float) : temperature of the run
stepcount (int) : number of Metropolis iterations
B (float) : external field, default 0.0
Return
------
A list of magnetization values at each time step
"""
self._magvalue = np.zeros(1)
self._magvalue[0] = self.magnetization
# 15 initial MC steps as a heat up process
for step in range(stepcount+15):
# choose a random site
i, j = np.random.randint(0, self.size, 2)
# find the energy change if the site was flipped
delE = -self._deltaH(i, j, B)
# accept if favorable
if delE <= 0. or np.random.rand() < np.exp(- delE / T):
self.system[i][j] *= -1
# a list of magnetizations
self._magvalue = np.concatenate([self._magvalue, [self.magnetization]])
# discard the initial 15 steps
self._magvalue = self._magvalue[15:]
return self._magvalue
def save_movie(self, filename='ising.mp4'):
""" Initialize or save current frame to the movie file """
# initialize movie writer
if self.writer is None:
print('saving movie %s' %filename)
self.writer = self.encoder(fps=10)
plt.close('all')
plt.ion()
fig = plt.figure()
self.writer.setup(fig, filename, 100)
# grab the current plot as movie frame
else:
img = plt.imshow(self.system, interpolation='nearest')
self.writer.grab_frame()
img.remove()
def finish_movie(self):
""" Make sure this method is called when finished with the movie """
if self.writer:
self.writer.finish()
self.writer = None
plt.close('all')
print('movie saved')
def runMovie(self, T, stepcount, B=0.0, nmov=100):
""" Run Metropolis for stepcount steps and save movie frames
with a progress bar.
finish_movie() is called automatically at the end.
Args
----
T (float) : temperature
stepcout (int) : number of metropolis steps
B (float) : external magnetic field
nmov (int) : save movie frames every nmov steps, specify false
if not to save any (for speedup reasons).
"""
self._magvalue = np.zeros(1)
self._magvalue[0] = self.magnetization
sys.stdout.flush()
for step in tqdm(range(stepcount)):
i, j = np.random.randint(0, self.size, 2)
delE = -self._deltaH(i, j, B)
if delE <= 0. or np.random.rand() < np.exp(-delE/T):
self.system[i][j] *= -1
if nmov and step % nmov == 0:
self.save_movie()
# a list of magnetizations
self._magvalue = np.concatenate([self._magvalue, [self.magnetization]])
tqdm.write("Net Magnetization: {:.2f}".format(self.magnetization))
self.finish_movie()
def make_droplet(lattice, radius, spin):
""" Choose a circular area at the center and set specified spin """
size = lattice.size
if radius < size / 2:
# center of the system
x, y = int(size/2), int(size/2)
# sweep over the sites
for i in range(size):
for j in range(size):
# measure distance from center
d = np.sqrt((x - i)**2 + (y - j)**2)
if d <= radius:
lattice.system[i][j] = spin
return lattice
def run():
""" Run for magnetization as a function of Temperature and External Field """
global lattice, size
size = 40
print(" -- reverse T, -ve B field -- ")
# --------------------------------------------------------
lattice = IsingModel(size)
lattice.save_movie('cooling-negativeB.mp4')
T = np.flip(np.linspace(0.1, 4.0, 20))
M0 = np.zeros(len(T))
sys.stdout.flush()
for i, t in enumerate(tqdm(T)):
M0[i] = np.mean(lattice.run(t, 100 * size * size, -0.1))
lattice.save_movie()
lattice.finish_movie()
plt.plot(T, M0)
plt.show()
print(" -- field dependence, T = 2.0 -- ")
# --------------------------------------------------------
lattice = IsingModel(size)
lattice.save_movie('hysteresis.mp4')
B = np.linspace(-0.225, 0.225, 20)
M0 = np.zeros(len(B))
sys.stdout.flush()
for i, b in enumerate(tqdm(B)):
M0[i] = np.mean(lattice.run(2.0, 100 * size * size, b))
lattice.save_movie()
lattice.finish_movie()
plt.plot(B, M0)
plt.show()
def main():
""" Nucleation run """
size = 40
lattice = IsingModel(size, 1)
lattice.plot_system('initial-system.png')
lattice.save_movie('no-droplet.mp4')
lattice.runMovie(1.5, 250*size*size, -0.30, False)
lattice.plot_system('no-droplet-final-system.png')
lattice.plot_magnetization('no-droplet-magnetization.png')
r = 10 #run number
lattice = make_droplet(lattice, 5.0, -1)
lattice.plot_system('droplet-system-%s.png' %r)
lattice.save_movie('droplet-%s.mp4' %r)
lattice.runMovie(1.5, 1000*size*size, -0.15, False)
lattice.plot_system('droplet-final-system-%s.png' %r)
lattice.plot_magnetization('droplet-magnetization-%s.png' %r)
if __name__ == '__main__':
main()
|
{"hexsha": "a51625211ec86631f5232753a03b2bbd22be3764", "size": 9026, "ext": "py", "lang": "Python", "max_stars_repo_path": "ising/ising.py", "max_stars_repo_name": "akhlak-mahmood/ising-model-2d", "max_stars_repo_head_hexsha": "69739ef250e5719a6d3f8dccf49e4d81828aaa80", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ising/ising.py", "max_issues_repo_name": "akhlak-mahmood/ising-model-2d", "max_issues_repo_head_hexsha": "69739ef250e5719a6d3f8dccf49e4d81828aaa80", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ising/ising.py", "max_forks_repo_name": "akhlak-mahmood/ising-model-2d", "max_forks_repo_head_hexsha": "69739ef250e5719a6d3f8dccf49e4d81828aaa80", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.124137931, "max_line_length": 83, "alphanum_fraction": 0.5325725681, "include": true, "reason": "import numpy", "num_tokens": 2208}
|
import os
import json
import time
import random
import warnings
from typing import Union, Callable, Tuple, Any
from types import MethodType
try:
import h5py
except ModuleNotFoundError:
h5py = None
import joblib
import matplotlib # for version info
import numpy as np
import pandas as pd
try:
from scipy.stats import median_abs_deviation as mad
except ImportError:
from scipy.stats import median_absolute_deviation as mad
from ai4water.nn_tools import NN
from ai4water.preprocessing.datahandler import DataHandler
from ai4water.backend import sklearn_models
from ai4water.utils.plotting_tools import Plots
from ai4water.utils.utils import ts_features, make_model
from ai4water.utils.utils import find_best_weight, reset_seed
from ai4water.models.custom_training import train_step, test_step
from ai4water.utils.visualizations import PlotResults
from ai4water.utils.utils import maybe_create_path, save_config_file, dateandtime_now
from .backend import tf, keras, torch, catboost_models, xgboost_models, lightgbm_models
from ai4water.utils.utils import maybe_three_outputs, get_version_info
import ai4water.backend as K
if K.BACKEND == 'tensorflow' and tf is not None:
from ai4water.tf_attributes import LOSSES, OPTIMIZERS
elif K.BACKEND == 'pytorch' and torch is not None:
from ai4water.pytorch_attributes import LOSSES, OPTIMIZERS
try:
from wandb.keras import WandbCallback
import wandb
except ModuleNotFoundError:
WandbCallback = None
wandb = None
class BaseModel(NN, Plots):
""" Model class that implements logic of AI4Water. """
def __init__(self,
model: Union[dict, str] = None,
data=None,
lr: float = 0.001,
optimizer='adam',
loss: Union[str, Callable] = 'mse',
quantiles=None,
epochs: int = 14,
min_val_loss: float = 0.0001,
patience: int = 100,
save_model: bool = True,
metrics: Union[str, list] = None,
val_metric: str = 'mse',
cross_validator: dict = None,
wandb_config: dict = None,
seed: int = 313,
prefix: str = None,
path: str = None,
verbosity: int = 1,
accept_additional_args: bool = False,
**kwargs):
"""
The Model class can take a large number of possible arguments depending
upon the machine learning model/algorithm used. Not all the arguments
are applicable in each case. The user must define only the relevant/applicable
parameters and leave the others as it is.
Arguments:
model :
a dictionary defining machine learning model.
If you are building a non-tensorflow model
then this dictionary must consist of name of name of model as key
and the keyword arguments to that model as dictionary. For example
to build a decision forest based model
```python
model = {'DecisionTreeRegressor': {"max_depth": 3, "criterion": "mae"}}
```
The key 'DecisionTreeRegressor' should exactly match the name of
the model from following libraries
-scikit-learn
-xgboost
-catboost
-lightgbm
The value {"max_depth": 3, "criterion": "mae"} is another dictionary
which can be any keyword argument which the `model` (DecisionTreeRegressor
in this case) accepts. The user must refer to the documentation
of the underlying library (scikit-learn for DecisionTreeRegressor)
to find out complete keyword arguments applicable for a particular model.
If You are building a Deep Learning model using tensorflow, then the key
must be 'layers' and the value must itself be a dictionary defining layers
of neural networks. For example we can build an MLP as following
```python
model = {'layers': {
"Dense_0": {'units': 64, 'activation': 'relu'},
"Flatten": {},
"Dense_3": {'units': 1}
}}
```
The MLP in this case consists of dense, and flatten layers. The user
can define any keyword arguments which is accepted by that layer in
TensorFlow. For example the `Dense` layer in TensorFlow can accept
`units` and `activation` keyword argument among others. For details
on how to buld neural networks using such layered API [see](https://ai4water.readthedocs.io/en/latest/build_dl_models/)
lr float:, default 0.001.
learning rate,
optimizer str/keras.optimizers like:
the optimizer to be used for neural network training. Default is 'adam'
loss str/callable: Default is `mse`.
the cost/loss function to be used for training neural networks.
quantiles list: Default is None
quantiles to be used when the problem is quantile regression.
epochs int: Default is 14
number of epochs to be used.
min_val_loss float: Default is 0.0001.
minimum value of validatin loss/error to be used for early stopping.
patience int:
number of epochs to wait before early stopping. Set this value to None
if you don't want to use EarlyStopping.
save_model bool:,
whether to save the model or not. For neural networks, the model will
be saved only an improvement in training/validation loss is observed.
Otherwise model is not saved.
subsequences int: Default is 3.
The number of sub-sequences. Relevent for building CNN-LSTM based models.
metrics str/list:
metrics to be monitored. e.g. ['nse', 'pbias']
val_metric :
performance metric to be used for validation/cross_validation.
This metric will be used for hyper-parameter optimizationa and
experiment comparison
cross_validator :
selects the type of cross validation to be applied. It can be any
cross validator from sklear.model_selection. Default is None, which
means validation will be done using `validation_data`. To use
kfold cross validation,
```python
cross_validator = {'kfold': {'n_splits': 5}}
```
batches str:
either `2d` or 3d`.
wandb_config :
Only valid if wandb package is installed. Default value is None,
which means, wandb will not be utilized. For simplest case, just pass
an empty dictionary. Otherwise use a dictionary of all the
arugments for wandb.init, wandb.log and WandbCallback. For
`training_data` and `validation_data` in `WandbCallback`, pass
`True` instead of providing a tuple.
seed int:
random seed for reproducibility. This can be set to None. The seed
is set to `np`, `os`, `tf`, `torch` and `random` modules simultaneously.
prefix str:
prefix to be used for the folder in which the results are saved.
default is None, which means within
./results/model_path
path str/path like:
if not given, new model_path path will not be created.
verbosity int: default is 1.
determines the amount of information being printed. 0 means no
print information. Can be between 0 and 3. Setting this value to 0
will also reqult in not showing some plots such as loss curve or
regression plot. These plots will only be saved in self.path.
accept_additional_args bool: Default is False
If you want to pass any additional argument, then this argument
must be set to True, otherwise an error will be raise.
kwargs : keyword arguments for `DataHandler` class
Example
---------
```python
>>>from ai4water import Model
>>>from ai4water.datasets import arg_beach
>>>df = arg_beach()
>>>model = Model(data=df,
... batch_size=16,
... model={'layers': {'LSTM': 64}},
...)
>>>history = model.fit()
>>>y, obs = model.predict()
```
"""
if self._go_up:
maker = make_model(
model=model,
prefix=prefix,
path=path,
verbosity=verbosity,
lr=lr,
optimizer=optimizer,
loss = loss,
quantiles = quantiles,
epochs = epochs,
min_val_loss=min_val_loss,
patience = patience,
save_model = save_model,
metrics = metrics or ['nse'],
val_metric = val_metric,
cross_validator = cross_validator,
accept_additional_args = accept_additional_args,
seed = seed,
wandb_config = wandb_config,
**kwargs
)
# data_config, model_config = config['data_config'], config['model_config']
reset_seed(maker.config['seed'], os, random, np, tf, torch)
if tf is not None:
# graph should be cleared everytime we build new `Model` otherwise, if two `Models` are prepared in same
# file, they may share same graph.
tf.keras.backend.clear_session()
self.dh = DataHandler(data=data, **maker.data_config)
# if DataHanlder defines input and output features, we must put it back in config
# so that it can be accessed as .config['input_features'] etc.
maker.config['input_features'] = self.dh.input_features
maker.config['output_features'] = self.dh.output_features
NN.__init__(self, config=maker.config)
self.path = maybe_create_path(path=path, prefix=prefix)
self.verbosity = verbosity
self.category = self.config['category']
self.mode = self.config['mode']
self.info = {}
Plots.__init__(self, self.path, self.mode, self.category,
config=maker.config)
def __getattr__(self, item):
"""instead of doing model.dh.num_ins do model.num_ins"""
if item in [
'data',
'test_indices', 'train_indices',
'num_outs', 'forecast_step', 'num_ins',
]:
return getattr(self.dh, item)
else:
raise AttributeError(f'BaseModel has no attribute named {item}')
# because __getattr__ does not work with pytorch, we explicitly get attributes from
# DataHandler and assign them to Model
@property
def forecast_len(self):
return self.dh.forecast_len
@property
def num_outs(self):
return self.dh.num_outs
@property
def num_ins(self):
return self.dh.num_ins
@property
def is_binary(self):
return self.dh.is_binary
@property
def is_multiclass(self):
return self.dh.is_multiclass
@property
def output_features(self):
return self.dh.output_features
@property
def classes(self):
return self.dh.classes
@property
def num_classes(self):
return self.dh.num_classes
@property
def is_multilabel(self):
return self.dh.is_multilabel
@property
def quantiles(self):
return self.config['quantiles']
@property
def act_path(self):
return os.path.join(self.path, 'activations')
@property
def w_path(self):
return os.path.join(self.path, 'weights')
@property
def data_path(self):
return os.path.join(self.path, 'data')
# because __getattr__ does not work with pytorch, we explicitly get attributes from
# DataHandler and assign them to Model
def training_data(self, *args, **kwargs):
return self.dh.training_data(*args, **kwargs)
def validation_data(self, *args, **kwargs):
return self.dh.validation_data(*args, **kwargs)
def test_data(self, *args, **kwargs):
return self.dh.test_data(*args, **kwargs)
def nn_layers(self):
if hasattr(self, 'layers'):
return self.layers
elif hasattr(self._model, 'layers'):
return self._model.layers
else:
return None
@property
def ai4w_outputs(self):
"""alias for keras.MOdel.outputs!"""
if hasattr(self, 'outputs'):
return self.outputs
elif hasattr(self._model, 'outputs'):
return self._model.outputs
else:
return None
def trainable_parameters(self) -> int:
"""Calculates trainable parameters in the model
for more [see](https://discuss.pytorch.org/t/how-do-i-check-the-number-of-parameters-of-a-model/4325/9)
"""
if self.config['backend'] == 'pytorch':
return sum(p.numel() for p in self.parameters() if p.requires_grad)
else:
if hasattr(self, 'count_params'):
return int(self.count_params())
else:
return int(self._model.count_params())
def loss(self):
# overwrite this function for a customized loss function.
# this function should return something which can be accepted as 'loss' by the keras Model.
# It can be a string or callable.
if callable(self.config['loss']):
return self.config['loss']
if self.config['backend'] == 'pytorch':
return LOSSES[self.config['loss'].upper()]()
return LOSSES[self.config['loss'].upper()]
@property
def fit_fn(self):
# this points to the Keras's fit method
return NotImplementedError
@property
def evaluate_fn(self):
# this points to the Keras's evaluate method
return NotImplementedError
@property
def predict_fn(self, *args, **kwargs):
return NotImplementedError
@property
def api(self):
return NotImplementedError
@property
def input_layer_names(self):
return NotImplementedError
@property
def num_input_layers(self):
return NotImplementedError
@property
def layer_names(self):
return NotImplementedError
@property
def dl_model(self):
if self.api == "subclassing":
return self
else:
return self._model
def first_layer_shape(self):
return NotImplementedError
def get_callbacks(self, val_data, callbacks=None):
if self.config['backend'] == 'pytorch':
return self.cbs_for_pytorch(val_data, callbacks)
else:
return self.cbs_for_tf(val_data, callbacks)
def cbs_for_pytorch(self, *args, **kwargs):
"""Callbacks for pytorch training."""
return []
def cbs_for_tf(self, val_data, callbacks=None):
if callbacks is None:
callbacks = {}
# container to hold all callbacks
_callbacks = list()
_monitor = 'val_loss' if val_data else 'loss'
fname = "{val_loss:.5f}.hdf5" if val_data else "{loss:.5f}.hdf5"
if int(''.join(tf.__version__.split('.')[0:2])) <= 115:
for lyr_name in self.layer_names:
if 'HA_weighted_input' in lyr_name or 'SeqWeightedAttention_weights' in lyr_name:
self.config['save_model'] = False
warnings.warn("Can not save Heirarchical model with tf<= 1.15")
if self.config['save_model']:
_callbacks.append(keras.callbacks.ModelCheckpoint(
filepath=self.w_path + f"{os.sep}weights_" + "{epoch:03d}_" + fname,
save_weights_only=True,
monitor=_monitor,
mode='min',
save_best_only=True))
if self.config['patience']:
_callbacks.append(keras.callbacks.EarlyStopping(
monitor=_monitor, min_delta=self.config['min_val_loss'],
patience=self.config['patience'], verbose=0, mode='auto'
))
if 'tensorboard' in callbacks:
tb_kwargs = callbacks['tensorboard']
if 'log_dir' not in tb_kwargs: tb_kwargs['log_dir'] = self.path
_callbacks.append(keras.callbacks.TensorBoard(**tb_kwargs))
callbacks.pop('tensorboard')
for val in callbacks.values():
_callbacks.append(val)
return _callbacks
def get_val_data(self, val_data):
"""Finds out if there is val_data or not"""
if isinstance(val_data, tuple):
if val_data[0] is None and val_data[1] is None:
return None
# val_data was probably available in kwargs, so use them as it is
return val_data
validation_data = None
if val_data is not None:
if isinstance(val_data, tuple):
x = val_data[0]
if x is not None:
if isinstance(x, np.ndarray):
if x.size > 0:
validation_data = val_data
elif isinstance(x, dict): # x may be a dictionary
for v in x.values():
if v.size > 0:
validation_data = val_data
break
elif isinstance(x, list):
for v in x:
if v.size > 0:
validation_data = val_data
break
else:
raise ValueError(f'Unrecognizable validattion data {val_data.__class__.__name__}')
return validation_data
def DO_fit(self, x, **kwargs):
"""
Some preprocessing before calling actual fit
If nans are present in y, then tf.keras.model.fit is called as it
is otherwise it is called with custom train_step and test_step which
avoids calculating loss at points containing nans."""
if kwargs.pop('nans_in_y_exist'): # todo, for model-subclassing?
if not isinstance(x, tf.data.Dataset): # when x is tf.Dataset, we don't have y in kwargs
y = kwargs['y']
assert np.isnan(y).sum() > 0
kwargs['y'] = np.nan_to_num(y) # In graph mode, masking of nans does not work
self._model.train_step = MethodType(train_step, self._model)
self._model.test_step = MethodType(test_step, self._model)
return self.fit_fn(x, **kwargs)
def _FIT(self, inputs, outputs, validation_data, validation_steps=None, callbacks=None, **kwargs):
nans_in_y_exist = False
if isinstance(outputs, np.ndarray):
if np.isnan(outputs).sum() > 0:
nans_in_y_exist = True
elif isinstance(outputs, list):
for out_array in outputs:
if np.isnan(out_array).sum() > 0:
nans_in_y_exist = True
elif isinstance(outputs, dict):
for out_array in outputs.values():
if np.isnan(out_array).sum() > 0:
nans_in_y_exist = True
validation_data = self.get_val_data(validation_data)
outputs = get_values(outputs)
if validation_data is not None:
val_outs = validation_data[-1]
val_outs = get_values(val_outs)
validation_data = (validation_data[0], val_outs)
if K.BACKEND == 'tensorflow':
callbacks = self.get_wandb_cb(callbacks, train_data=(inputs, outputs),
validation_data=validation_data,
)
callbacks = self.get_callbacks(validation_data, callbacks=callbacks)
st = time.time()
# .fit was called with epochs, so we must put that in config as well!
if 'epochs' in kwargs:
self.config['epochs'] = kwargs.pop('epochs')
self.DO_fit(x=inputs,
y=None if inputs.__class__.__name__ in ['TorchDataset', 'BatchDataset'] else outputs,
epochs=self.config['epochs'],
batch_size=None if inputs.__class__.__name__ in ['TorchDataset', 'BatchDataset'] else self.config['batch_size'],
validation_data=validation_data,
callbacks=callbacks,
shuffle=self.config['shuffle'],
steps_per_epoch=self.config['steps_per_epoch'],
verbose=self.verbosity,
nans_in_y_exist=nans_in_y_exist,
validation_steps=validation_steps,
**kwargs,
)
self.info['training_time_in_minutes'] = round(float(time.time() - st) / 60.0, 2)
return self.post_fit()
def get_wandb_cb(self, callback, train_data, validation_data) -> dict:
"""Makes WandbCallback and add it in callback"""
if callback is None:
callback = {}
self.use_wandb = False
if wandb is not None:
wandb_config: dict = self.config['wandb_config']
if wandb_config is not None:
self.use_wandb = True
wandb.init(name=os.path.basename(self.path),
project=wandb_config.get('project', 'keras_with_ai4water'),
notes=wandb_config.get('notes', f"{self.mode} with {self.config['backend']}"),
tags=['ai4water', 'keras'],
entity=wandb_config.get('entity', 'atherabbas'))
monitor = self.config.get('monitor', 'val_loss')
if 'monitor' in wandb_config:
monitor = wandb_config.pop('monitor')
add_train_data = False
if 'training_data' in wandb_config:
add_train_data = wandb_config.pop('training_data')
add_val_data = False
if 'validation_data' in wandb_config:
add_val_data = wandb_config.pop('validation_data')
assert callable(WandbCallback)
callback['wandb_callback'] = WandbCallback(monitor=monitor,
training_data=train_data if add_train_data else None,
validation_data=validation_data if add_val_data else None,
**wandb_config
)
return callback
def post_fit_wandb(self):
"""does some stuff related to wandb at the end of training."""
if K.BACKEND == 'tensorflow' and self.use_wandb:
getattr(wandb, 'finish')()
return
def post_fit(self):
"""Does some stuff after Keras model.fit has been called"""
if K.BACKEND == 'pytorch':
history = self.torch_learner.history
elif hasattr(self, 'history'):
history = self.history
else:
history = self._model.history
self.save_config(history.history)
# save all the losses or performance metrics
df = pd.DataFrame.from_dict(history.history)
df.to_csv(os.path.join(self.path, "losses.csv"))
return history
def maybe_not_3d_data(self, true, predicted, forecast_len):
if true.ndim < 3:
assert forecast_len == 1, f'{forecast_len}'
axis = 2 if true.ndim == 2 else (1, 2)
true = np.expand_dims(true, axis=axis)
if predicted.ndim < 3:
assert forecast_len == 1
axis = 2 if predicted.ndim == 2 else (1, 2)
predicted = np.expand_dims(predicted, axis=axis)
return true, predicted
def process_class_results(self,
true: np.ndarray,
predicted: np.ndarray,
metrics="minimal",
prefix=None,
index=None,
user_defined_data: bool = False
):
"""post-processes classification results."""
from ai4water.postprocessing.SeqMetrics import ClassificationMetrics
if self.is_multiclass:
pred_labels = [f"pred_{i}" for i in range(predicted.shape[1])]
true_labels = [f"true_{i}" for i in range(true.shape[1])]
fname = os.path.join(self.path, f"{prefix}_prediction.csv")
pd.DataFrame(np.concatenate([true, predicted], axis=1),
columns=true_labels + pred_labels, index=index).to_csv(fname)
metrics = ClassificationMetrics(true, predicted, categorical=True)
save_config_file(self.path,
errors=metrics.calculate_all(),
name=f"{prefix}_{dateandtime_now()}.json"
)
else:
if predicted.ndim == 1:
predicted = predicted.reshape(-1, 1)
for idx, _class in enumerate(self.out_cols):
_true = true[:, idx]
_pred = predicted[:, idx]
fpath = os.path.join(self.path, _class)
if not os.path.exists(fpath):
os.makedirs(fpath)
metrics = ClassificationMetrics(_true, _pred, categorical=False)
save_config_file(fpath,
errors=getattr(metrics, f"calculate_{metrics}")(),
name=f"{prefix}_{_class}_{dateandtime_now()}.json"
)
fname = os.path.join(fpath, f"{prefix}_{_class}.csv")
array = np.concatenate([_true.reshape(-1, 1), _pred.reshape(-1, 1)], axis=1)
pd.DataFrame(array, columns=['true', 'predicted'], index=index).to_csv(fname)
return
def process_regres_results(
self,
true: np.ndarray,
predicted: np.ndarray,
metrics="minimal",
prefix=None,
index=None,
remove_nans=True,
user_defined_data: bool = False,
annotate_with="r2",
):
"""
predicted, true are arrays of shape (examples, outs, forecast_len).
annotate_with : which value to write on regression plot
"""
from ai4water.postprocessing.SeqMetrics import RegressionMetrics
metric_names = {'r2': "$R^2$"}
visualizer = PlotResults(path=self.path)
if user_defined_data:
# when data is user_defined, we don't know what out_cols, and forecast_len are
if predicted.ndim == 1:
out_cols = ['output']
else:
out_cols = [f'output_{i}' for i in range(predicted.shape[-1])]
forecast_len = 1
true, predicted = self.maybe_not_3d_data(true, predicted, forecast_len)
else:
# for cases if they are 2D/1D, add the third dimension.
true, predicted = self.maybe_not_3d_data(true, predicted, self.forecast_len)
forecast_len = self.forecast_len
if isinstance(forecast_len, dict):
forecast_len = np.unique(list(forecast_len.values())).item()
out_cols = list(self.out_cols.values())[0] if isinstance(self.out_cols, dict) else self.out_cols
for idx, out in enumerate(out_cols):
horizon_errors = {metric_name: [] for metric_name in ['nse', 'rmse']}
for h in range(forecast_len):
errs = dict()
fpath = os.path.join(self.path, out)
if not os.path.exists(fpath):
os.makedirs(fpath)
t = pd.DataFrame(true[:, idx, h], index=index, columns=['true_' + out])
p = pd.DataFrame(predicted[:, idx, h], index=index, columns=['pred_' + out])
if wandb is not None and self.config['wandb_config'] is not None:
self._wandb_scatter(t.values, p.values, out)
df = pd.concat([t, p], axis=1)
df = df.sort_index()
fname = prefix + out + '_' + str(h) + dateandtime_now() + ".csv"
df.to_csv(os.path.join(fpath, fname), index_label='index')
annotation_val = getattr(RegressionMetrics(t, p), annotate_with)()
visualizer.plot_results(t, p, name=prefix + out + '_' + str(h), where=out,
annotation_key=metric_names.get(annotate_with, annotate_with),
annotation_val=annotation_val,
show=self.verbosity)
if remove_nans:
nan_idx = np.isnan(t)
t = t.values[~nan_idx]
p = p.values[~nan_idx]
errors = RegressionMetrics(t, p)
errs[out + '_errors_' + str(h)] = getattr(errors, f'calculate_{metrics}')()
errs[out + 'true_stats_' + str(h)] = ts_features(t)
errs[out + 'predicted_stats_' + str(h)] = ts_features(p)
save_config_file(fpath, errors=errs, name=prefix)
for p in horizon_errors.keys():
horizon_errors[p].append(getattr(errors, p)())
if forecast_len > 1:
visualizer.horizon_plots(horizon_errors, f'{prefix}_{out}_horizons.png')
return
def _wandb_scatter(self, true: np.ndarray, predicted: np.ndarray, name: str) -> None:
"""Adds a scatter plot on wandb."""
data = [[x, y] for (x, y) in zip(true.reshape(-1,), predicted.reshape(-1,))]
table = wandb.Table(data=data, columns=["true", "predicted"])
wandb.log({
"scatter_plot": wandb.plot.scatter(table, "true", "predicted",
title=name)
})
return
def build_ml_model(self):
"""Builds ml models
Models that follow sklearn api such as xgboost,
catboost, lightgbm and obviously sklearn.
"""
ml_models = {**sklearn_models, **xgboost_models, **catboost_models, **lightgbm_models}
_model = list(self.config['model'].keys())[0]
regr_name = _model.upper()
kwargs = list(self.config['model'].values())[0]
if regr_name in ['HISTGRADIENTBOOSTINGREGRESSOR', 'SGDREGRESSOR', 'MLPREGRESSOR']:
if self.config['val_fraction'] > 0.0:
kwargs.update({'validation_fraction': self.config['val_fraction']})
elif self.config['test_fraction'] > 0.0:
kwargs.update({'validation_fraction': self.config['test_fraction']})
# some algorithms allow detailed output during training, this is allowed when self.verbosity is > 1
if regr_name in ['ONECLASSSVM']:
kwargs.update({'verbose': True if self.verbosity > 1 else False})
if regr_name in ['TPOTREGRESSOR', 'TPOTCLASSIFIER']:
if 'verbosity' not in kwargs:
kwargs.update({'verbosity': self.verbosity})
if regr_name == "CATBOOSTREGRESSOR": # https://stackoverflow.com/a/52921608/5982232
if not any([arg in kwargs for arg in ['verbose', 'silent', 'logging_level']]):
if self.verbosity == 0:
kwargs['logging_level'] = 'Silent'
elif self.verbosity == 1:
kwargs['logging_level'] = 'Verbose'
else:
kwargs['logging_level'] = 'Info'
self.residual_threshold_not_set = False
if regr_name == "RANSACREGRESSOR" and 'residual_threshold' not in kwargs:
self.residual_threshold_not_set = True
if regr_name in ml_models:
model = ml_models[regr_name](**kwargs)
else:
from .backend import sklearn, lightgbm, catboost, xgboost
version_info = get_version_info(sklearn=sklearn, lightgbm=lightgbm, catboost=catboost,
xgboost=xgboost)
if regr_name in ['TWEEDIEREGRESSOR', 'POISSONREGRESSOR', 'LGBMREGRESSOR', 'LGBMCLASSIFIER',
'GAMMAREGRESSOR']:
if int(version_info['sklearn'].split('.')[1]) < 23:
raise ValueError(
f"{regr_name} is available with sklearn version >= 0.23 but you have {version_info['sklearn']}")
raise ValueError(f"model {regr_name} not found. {version_info}")
self._model = model
return
def fit(self,
data: str = 'training',
callbacks: dict = None,
**kwargs
):
"""
Trains the model with data which is taken from data accoring to `data` arguments.
Arguments:
data : data to use for model training. Default is 'training`.
callbacks : Any callback compatible with keras. If you want to log the output
to tensorboard, then just use `callbacks={'tensorboard':{}}` or
to provide additional arguments
```python
callbacks={'tensorboard': {'histogram_freq': 1}}
```
kwargs : Any keyword argument for the `fit` method of the underlying algorithm.
if 'x' is present in kwargs, that will take precedent over `data`.
Returns:
A keras history object in case of deep learning model with tensorflow
as backend or anything returned by `fit` method of underlying model.
"""
if isinstance(data, str):
assert data in ['training', 'test', 'validation']
return self.call_fit(data=data, callbacks=callbacks, **kwargs)
def call_fit(self,
data='training',
callbacks=None,
**kwargs):
visualizer = PlotResults(path=self.path)
self.is_training = True
if isinstance(data, np.ndarray): # .fit(x,...)
assert 'x' not in kwargs
kwargs['x'] = data
if isinstance(callbacks, np.ndarray): # .fit(x,y)
assert 'y' not in kwargs
kwargs['y'] = callbacks
callbacks = None
if 'x' not in kwargs:
train_data = getattr(self, f'{data}_data')()
inputs, outputs = maybe_three_outputs(train_data, self.dh.teacher_forcing)
else:
outputs = None
if 'y' in kwargs:
outputs = kwargs['y']
kwargs.pop('y')
inputs = kwargs.pop('x')
if isinstance(outputs, np.ndarray) and self.category.upper() == "DL":
if isinstance(self.ai4w_outputs, list):
assert len(self.ai4w_outputs) == 1
model_output_shape = tuple(self.ai4w_outputs[0].shape.as_list()[1:])
if getattr(self, 'quantiles', None) is not None:
assert model_output_shape[0] == len(self.quantiles) * self.num_outs
# todo, it is assumed that there is softmax as the last layer
elif self.mode == 'classification':
# todo, don't know why it is working
assert model_output_shape[0] == self.num_classes, f"""inferred number of classes are
{self.num_classes} while model's output has {model_output_shape[0]} nodes """
assert model_output_shape[0] == outputs.shape[1]
else:
assert model_output_shape == outputs.shape[1:], f"""
ShapeMismatchError: Shape of model's output is {model_output_shape}
while the targets in prepared have shape {outputs.shape[1:]}."""
self.info['training_start'] = dateandtime_now()
if self.category.upper() == "DL":
if 'validation_data' not in kwargs:
val_data = self.validation_data()
val_x, val_y = maybe_three_outputs(val_data, self.dh.teacher_forcing)
val_data = (val_x, val_y)
kwargs['validation_data'] = val_data
history = self._FIT(inputs, outputs, callbacks=callbacks, **kwargs)
visualizer.plot_loss(history.history, show=self.verbosity)
self.load_best_weights()
else:
history = self.fit_ml_models(inputs, outputs)
self.info['training_end'] = dateandtime_now()
self.save_config()
save_config_file(os.path.join(self.path, 'info.json'), others=self.info)
self.is_training = False
return history
def load_best_weights(self) -> None:
if self.config['backend'] != 'pytorch':
# load the best weights so that the best weights can be used during model.predict calls
best_weights = find_best_weight(os.path.join(self.path, 'weights'))
if best_weights is None:
warnings.warn("best weights could not be found and are not loaded", UserWarning)
else:
self.allow_weight_loading = True
self.update_weights(os.path.join(self.w_path, best_weights))
return
def fit_ml_models(self, inputs, outputs):
if self.dh.is_multiclass:
outputs = outputs
else:
outputs = outputs.reshape(-1, )
self._maybe_change_residual_threshold(outputs)
history = self._model.fit(inputs, outputs)
self._save_ml_model()
return history
def _save_ml_model(self):
"""Saves the non-NN/ML models in the disk."""
model_name = list(self.config['model'].keys())[0]
fname = os.path.join(self.w_path, self.category + '_' + self.mode + '_' + model_name)
if "TPOT" not in model_name.upper():
joblib.dump(self._model, fname)
return
def cross_val_score(self, scoring: str = None) -> float:
"""computes cross validation score
Arguments:
scoring : performance metric to use for cross validation.
If None, it will be taken from config['val_metric']
Note: Currently not working for deep learning models.
"""
from ai4water.postprocessing.SeqMetrics import RegressionMetrics, ClassificationMetrics
if self.num_outs > 1:
raise ValueError
if scoring is None:
scoring = self.config['val_metric']
scores = []
if self.config['cross_validator'] is None:
raise ValueError("Provide the `cross_validator` argument to the `Model` class upon initiation")
cross_validator = list(self.config['cross_validator'].keys())[0]
cross_validator_args = self.config['cross_validator'][cross_validator]
if callable(cross_validator):
splits = cross_validator(**cross_validator_args)
else:
splits = getattr(self.dh, f'{cross_validator}_splits')(**cross_validator_args)
for fold, ((train_x, train_y), (test_x, test_y)) in enumerate(splits):
verbosity = self.verbosity
self.verbosity = 0
# make a new classifier/regressor at every fold
self.build(self._get_dummy_input_shape())
self.verbosity = verbosity
self._maybe_change_residual_threshold(train_y)
self._model.fit(train_x, y=train_y.reshape(-1, ))
pred = self._model.predict(test_x)
metrics = RegressionMetrics(test_y.reshape(-1, self.num_outs), pred)
val_score = getattr(metrics, scoring)()
scores.append(val_score)
if self.verbosity > 0:
print(f'fold: {fold} val_score: {val_score}')
# save all the scores as json in model path
cv_name = str(cross_validator)
fname = os.path.join(self.path, f'{cv_name}_{scoring}.json')
with open(fname, 'w') as fp:
json.dump(scores, fp)
# set it as class attribute so that it can be used
setattr(self, f'cross_val_{scoring}', scores)
# if we do not run .fit(), then we should still have model saved in the disk
# so that it can be used.
self._save_ml_model()
return np.mean(scores).item()
def _maybe_change_residual_threshold(self, outputs)->None:
# https://stackoverflow.com/a/64396757/5982232
if self.residual_threshold_not_set:
old_value = self._model.residual_threshold or mad(outputs.reshape(-1, ).tolist())
if np.isnan(old_value) or old_value < 0.001:
self._model.set_params(residual_threshold=0.001)
if self.verbosity > 0:
print(f"changing residual_threshold from {old_value} to {self._model.residual_threshold}")
return
def evaluate(self, data='training', **kwargs):
"""
Evalutes the performance of the model on a given data.
calls the `evaluate` method of underlying `model`. If the `evaluate`
method is not available in underlying `model`, then `predict` is called.
Arguments:
data : which data type to use, valid values are `training`, `test`
and `validation`. You can also provide your own x,y values as keyword
arguments. In such a case, this argument will have no meaning.
kwargs : any keyword argument for the `evaluate` method of the underlying
model.
Returns:
whatever is returned by `evaluate` method of underlying model.
"""
return self.call_evaluate(data, **kwargs)
def call_evaluate(self, data=None, **kwargs):
if data:
assert data in ['training', 'test', 'validation']
# get the relevant data
data = getattr(self, f'{data}_data')()
data = maybe_three_outputs(data, self.dh.teacher_forcing)
if 'x' in kwargs: # expecting it to be called by keras' fit loop
assert data is None
if self.category == 'ML':
if hasattr(self._model, 'evaluate'):
return self._model.evaluate(kwargs['x'])
else:
return self._model.predict(kwargs['x'])
return self.evaluate_fn(**kwargs)
# this will mostly be the validation data.
elif data is not None:
# if data.__class__.__name__ in ["Dataset"]:
# if 'x' not in kwargs:
# #if self.api == 'functional':
# eval_output = self.evaluate_fn(self.val_dataset, **kwargs)
#
# else: # give priority to xy
# eval_output = self._evaluate_with_xy(**kwargs)
# else:
eval_output = self._evaluate_with_xy(data, **kwargs)
else:
raise ValueError
acc, loss = None, None
if self.category == "DL":
if K.BACKEND == 'tensorflow':
loss, acc = eval_output
else:
loss = eval_output
eval_report = f"{'*' * 30}\n{dateandtime_now()}\n Accuracy: {acc}\n Loss: {loss}\n"
fname = os.path.join(self.path, 'eval_report.txt')
with open(fname, 'a+') as fp:
fp.write(eval_report)
return eval_output
def _evaluate_with_xy(self, data, **kwargs):
x, y = data
# the user provided x,y and batch_size values should have priority
if 'x' in kwargs:
x = kwargs.pop('x')
if 'y' in kwargs:
y = kwargs.pop('y')
if 'batch_size' in kwargs:
batch_size = kwargs.pop('batch_size')
else:
batch_size = self.config['batch_size']
y = get_values(y)
return self.evaluate_fn(
x=x,
y=y,
batch_size=batch_size,
verbose=self.verbosity,
**kwargs
)
def predict(self,
data: str = 'test',
x=None,
y=None,
prefix: str = None,
process_results: bool = True,
metrics:str = "minimal",
return_true:bool = False,
**kwargs
):
"""
Makes prediction from the trained model.
Arguments:
data : which data to use. Possible values are `training`, `test` or `validation`.
By default, `test` data is used for predictions.
x : if given, it will override `data`
y : Used for pos-processing etc. if given it will overrite `data`
process_results : post processing of results
metrics : only valid if process_results is True. The metrics to calculate.
Valid values are 'minimal', 'all', 'hydro_metrics'
return_true : whether to return the true values along with predicted values
or not. Default is False, so that this method behaves sklearn type.
kwargs : any keyword argument for `fit` method.
Returns:
An numpy array of predicted values.
If return_true is True then a tuple of arrays. The first is true
and the second is predicted. If `x` is given but `y` is not given,
then, first array which is returned is None.
"""
if isinstance(data, str):
assert data in ['training', 'test', 'validation']
assert metrics in ("minimal", "all", "hydro_metrics")
return self.call_predict(data=data, x=x, y=y, process_results=process_results,
metrics=metrics,
return_true=return_true,
**kwargs)
def call_predict(self,
data='test',
x=None,
y=None,
process_results=True,
metrics="minimal",
return_true:bool = False,
**kwargs):
transformation_key = None
if isinstance(data, np.ndarray):
# the predict method is called like .predict(x)
inputs = data
user_defined_data = True
true_outputs = None
prefix = 'x'
else:
transformation_key = '5'
if x is None: # .predict('training')
if self.dh.data is None:
raise ValueError("You must specify the data on which to make prediction")
user_defined_data = False
prefix = data
data = getattr(self, f'{data}_data')(key=transformation_key)
inputs, true_outputs = maybe_three_outputs(data, self.dh.teacher_forcing)
else: # .predict(x=x,...)
user_defined_data = True
prefix = 'x'
inputs = x
true_outputs = y
if 'verbose' in kwargs:
verbosity = kwargs.pop('verbose')
else:
verbosity = self.verbosity
batch_size = self.config['batch_size']
if 'batch_size' in kwargs:
batch_size = kwargs.pop('batch_size')
if self.category == 'DL':
predicted = self.predict_fn(x=inputs,
batch_size=batch_size,
verbose=verbosity,
**kwargs)
else:
predicted = self.predict_ml_models(inputs, **kwargs)
if true_outputs is None:
if return_true:
return true_outputs, predicted
return predicted
dt_index = np.arange(len(true_outputs)) # dummy/default index when data is user defined
if not user_defined_data:
true_outputs, predicted = self.inverse_transform(true_outputs, predicted, transformation_key)
true_outputs, dt_index = self.dh.deindexify(true_outputs, key=transformation_key)
if isinstance(true_outputs, np.ndarray) and true_outputs.dtype.name == 'object':
true_outputs = true_outputs.astype(predicted.dtype)
if true_outputs is None:
process_results = False
if self.quantiles is None:
# it true_outputs and predicted are dictionary of len(1) then just get the values
true_outputs = get_values(true_outputs)
predicted = get_values(predicted)
if process_results:
if self.mode == 'regression':
self.process_regres_results(true_outputs, predicted,
metrics=metrics,
prefix=prefix + '_', index=dt_index,
user_defined_data=user_defined_data)
else:
self.process_class_results(true_outputs,
predicted,
metrics=metrics,
prefix=prefix,
index=dt_index,
user_defined_data=user_defined_data)
else:
assert self.num_outs == 1
self.plot_quantiles1(true_outputs, predicted)
self.plot_quantiles2(true_outputs, predicted)
self.plot_all_qs(true_outputs, predicted)
if return_true:
return true_outputs, predicted
return predicted
def predict_ml_models(self, inputs, **kwargs):
"""So that it can be overwritten easily for ML models."""
return self.predict_fn(inputs, **kwargs)
def inverse_transform(self,
true: Union[np.ndarray, dict],
predicted: Union[np.ndarray, dict],
key: str
) -> Tuple[np.ndarray, np.ndarray]:
if self.dh.source_is_dict or self.dh.source_is_list:
true = self.dh.inverse_transform(true, key=key)
if isinstance(predicted, np.ndarray):
assert len(true) == 1
predicted = {list(true.keys())[0]: predicted}
predicted = self.dh.inverse_transform(predicted, key=key)
else:
true_shape, pred_shape = true.shape, predicted.shape
if isinstance(true, np.ndarray) and self.forecast_len == 1 and isinstance(self.num_outs, int):
true = pd.DataFrame(true.reshape(len(true), self.num_outs), columns=self.out_cols)
predicted = pd.DataFrame(predicted.reshape(len(predicted), self.num_outs), columns=self.out_cols)
true = self.dh.inverse_transform(true, key=key)
predicted = self.dh.inverse_transform(predicted, key=key)
if predicted.__class__.__name__ in ['DataFrame', 'Series']:
predicted = predicted.values
if true.__class__.__name__ in ['DataFrame', 'Series']:
true = true.values
true = true.reshape(true_shape)
predicted = predicted.reshape(pred_shape)
return true, predicted
def plot_model(self, nn_model) -> None:
kwargs = {}
if int(tf.__version__.split('.')[1]) > 14:
kwargs['dpi'] = 300
try:
keras.utils.plot_model(nn_model, to_file=os.path.join(self.path, "model.png"), show_shapes=True, **kwargs)
except (AssertionError, ImportError) as e:
print(f"dot plot of model could not be plotted due to {e}")
return
def get_opt_args(self) -> dict:
"""get input arguments for an optimizer.
It is being explicitly defined here so that it can be overwritten
in sub-classes
"""
kwargs = {'lr': self.config['lr']}
if self.config['backend'] == 'tensorflow' and int(''.join(tf.__version__.split('.')[0:2]).ljust(3, '0')) >= 250:
kwargs['learning_rate'] = kwargs.pop('lr')
if self.config['backend'] == 'pytorch':
kwargs.update({'params': self.parameters()}) # parameters from pytorch model
return kwargs
def get_metrics(self) -> list:
"""Returns the performance metrics specified."""
_metrics = self.config['metrics']
metrics = None
if _metrics is not None:
if not isinstance(_metrics, list):
assert isinstance(_metrics, str)
_metrics = [_metrics]
from ai4water.utils.tf_losses import nse, kge, pbias, tf_r2
METRICS = {'NSE': nse,
'KGE': kge,
"R2": tf_r2,
'PBIAS': pbias}
metrics = []
for m in _metrics:
if m.upper() in METRICS.keys():
metrics.append(METRICS[m.upper()])
else:
metrics.append(m)
return metrics
def get_2d_batches(self, data, ins, outs):
if not isinstance(data, np.ndarray):
if isinstance(data, pd.DataFrame):
data = data.values
else:
raise TypeError(f"unknown data type {data.__class__.__name__} for data ")
# for case when there is not lookback, i.e first layer is dense layer and takes 2D input
input_x, input_y, label_y = data[:, 0:ins], data[:, -outs:], data[:, -outs:]
assert self.lookback == 1, """lookback should be one for MLP/Dense layer based model, but it is {}
""".format(self.lookback)
return self.check_nans(data, input_x, input_y, np.expand_dims(label_y, axis=2), outs, self.lookback,
self.config['allow_nan_labels'])
def view(self,
layer_name=None,
data='training',
x=None,
y=None,
examples_to_view=None,
show=False
):
"""shows all activations, weights and gradients of the model.
Arguments:
layer_name : the layer to view. If not given, all the layers will be viewed.
This argument is only required when the model consists of layers of neural
networks.
data : the data to use when making calls to model for activation calculation
or for gradient calculation. It can either 'training', 'validation' or
'test'.
x : alternative to data.
y : alternative to data
examples_to_view : the examples to view.
show : whether to show the plot or not!
Returns:
An isntance of ai4water.post_processing.visualize.Visualize class.
"""
from ai4water.postprocessing.visualize import Visualize
visualizer = Visualize(model=self)
visualizer(layer_name,
data=data, x=x, y=y,
examples_to_use=examples_to_view,
show=show)
return visualizer
def interpret(self, **kwargs):
"""
Interprets the underlying model. Call it after training.
Returns:
An instance of ai4water.post_processing.interpret.Interpret class
Example
-------
```python
model.fit()
model.interpret()
```
"""
# importing ealier will try to import np types as well again
from ai4water.postprocessing import Interpret
matplotlib.rcParams.update(matplotlib.rcParamsDefault)
if 'layers' not in self.config['model']:
if self.mode.lower().startswith("cl"):
self.decision_tree(which="sklearn", **kwargs)
data = self.test_data()
x, y = maybe_three_outputs(data)
self.confusion_matrx(x=x, y=y)
self.precision_recall_curve(x=x, y=y)
self.roc_curve(x=x, y=y)
return Interpret(self)
def explain(self, *args, **kwargs):
"""Calls the ai4water.post_processing.explain.explain_model
to explain the model.
"""
from ai4water.postprocessing.explain import explain_model
return explain_model(self, *args, **kwargs)
def prepare_batches(self, df: pd.DataFrame, ins, outs):
assert self.num_outs == 1
target = self.config['output_features'][0]
x = np.zeros((len(df), self.lookback, df.shape[1] - 1))
prev_y = np.zeros((len(df), self.lookback, 1))
for i, name in enumerate(list(df.columns[:-1])):
for j in range(self.lookback):
x[:, j, i] = df[name].shift(self.lookback - j - 1).fillna(method="bfill")
for j in range(self.lookback):
prev_y[:, j, 0] = df[target].shift(self.lookback - j - 1).fillna(method="bfill")
fl = self.config['forecast_len']
_y = np.zeros((df.shape[0], fl))
for i in range(df.shape[0] - fl):
_y[i - 1, :] = df[target].values[i:i + fl]
input_x = x[self.lookback:-fl, :]
prev_y = prev_y[self.lookback:-fl, :]
y = _y[self.lookback:-fl, :].reshape(-1, outs, self.forecast_len)
return self.check_nans(df, input_x, prev_y, y, outs, self.lookback, self.config['allow_nan_labels'])
def save_indices(self):
indices = {}
for idx in ['train_indices', 'test_indices']:
if hasattr(self, idx):
idx_val = getattr(self, idx)
if idx_val is not None and not isinstance(idx_val, str):
idx_val = np.array(idx_val, dtype=int).tolist()
else:
idx_val = None
indices[idx] = idx_val
save_config_file(indices=indices, path=self.path)
return
def save_config(self, history: dict = None):
self.save_indices()
config = dict()
if history is not None:
config['min_loss'] = None
config['min_val_loss'] = None
min_loss_array = history.get('min_loss_array', None)
val_loss_array = history.get('val_loss', None)
if val_loss_array is not None and not all(np.isnan(val_loss_array)):
config['min_val_loss'] = np.nanmin(val_loss_array)
if min_loss_array is not None and not all(np.isnan(min_loss_array)):
config['min_loss'] = np.nanmin(min_loss_array)
config['config'] = self.config
config['method'] = self.method
config['category'] = self.category
config['mode'] = self.mode
config['quantiles'] = self.quantiles
if self.category == "DL":
config['loss'] = self.loss_name()
save_config_file(config=config, path=self.path)
return config
@classmethod
def from_config(cls,
config_path: str,
data,
make_new_path: bool = False,
**kwargs) -> "BaseModel":
"""
Loads the model from a config file.
Arguments:
config_path : complete path of config file
data : data for Model
make_new_path : If true, then it means we want to use the config
file, only to build the model and a new path will be made. We
would not normally update the weights in such a case.
kwargs : any additional keyword arguments for the `Model`
return:
a `Model` instance
"""
config, path = cls._get_config_and_path(cls, config_path, make_new_path)
return cls(**config['config'],
data=data,
path=path,
**kwargs)
@staticmethod
def _get_config_and_path(cls, config_path, make_new_path):
"""Sets some attributes of the cls so that it can be built from config.
Also fetches config and path which are used to initiate cls."""
with open(config_path, 'r') as fp:
config = json.load(fp)
if 'path' in config['config']: config['config'].pop('path')
idx_file = os.path.join(os.path.dirname(config_path), 'indices.json')
with open(idx_file, 'r') as fp:
indices = json.load(fp)
cls.from_check_point = True
# These paras neet to be set here because they are not withing init method
cls.test_indices = indices["test_indices"]
cls.train_indices = indices["train_indices"]
if make_new_path:
cls.allow_weight_loading = False
path = None
else:
cls.allow_weight_loading = True
path = os.path.dirname(config_path)
return config, path
def update_weights(self, weight_file: str=None):
"""
Updates the weights of the underlying model.
Arguments:
weight_file str: complete path of weight file. If not given, the
weights are updated from model.w_path directory. For neural
network based models, the best weights are updated if more
than one weight file is present in model.w_path.
"""
if weight_file is None:
weight_file = find_best_weight(self.w_path)
weight_file_path = os.path.join(self.w_path, weight_file)
else:
assert os.path.isfile(weight_file), f'weight_file must be complete path of weight file'
weight_file_path = weight_file
weight_file = os.path.basename(weight_file) # for printing
if not self.allow_weight_loading:
raise ValueError(f"Weights loading not allowed because allow_weight_loading is {self.allow_weight_loading}"
f"and model path is {self.path}")
if self.category == "ML":
self._model = joblib.load(weight_file_path)
else:
# loads the weights of keras model from weight file `w_file`.
if self.api == 'functional' and self.config['backend'] == 'tensorflow':
self._model.load_weights(weight_file_path)
else:
self.load_weights(weight_file_path)
if self.verbosity > 0:
print("{} Successfully loaded weights from {} file {}".format('*' * 10, weight_file, '*' * 10))
return
def write_cache(self, _fname, input_x, input_y, label_y):
fname = os.path.join(self.path, _fname)
if h5py is not None:
h5 = h5py.File(fname, 'w')
h5.create_dataset('input_X', data=input_x)
h5.create_dataset('input_Y', data=input_y)
h5.create_dataset('label_Y', data=label_y)
h5.close()
return
def eda(self, freq: str = None, cols=None):
"""Performs comprehensive Exploratory Data Analysis.
Arguments:
freq : if specified, small chunks of data will be plotted instead of
whole data at once. The data will NOT be resampled. This is valid
only `plot_data` and `box_plot`. Possible values are `yearly`, weekly`, and
cols :
`monthly`.
"""
# importing EDA earlier will import numpy etc as well
from ai4water.eda import EDA
# todo, Uniform Manifold Approximation and Projection (UMAP) of input data
if self.data is None:
print("data is None so eda can not be performed.")
return
# todo, radial heatmap to show temporal trends http://holoviews.org/reference/elements/bokeh/RadialHeatMap.html
eda = EDA(data=self.data, path=self.path, in_cols=self.in_cols, out_cols=self.out_cols, save=True)
# plot number if missing vals
eda.plot_missing(cols=cols)
# show data as heatmapt
eda.heatmap(cols=cols)
# line plots of input/output data
eda.plot_data(cols=cols, freq=freq, subplots=True, figsize=(12, 14), sharex=True)
# plot feature-feature correlation as heatmap
eda.correlation(cols=cols)
# print stats about input/output data
eda.stats()
# box-whisker plot
eda.box_plot(freq=freq)
# principle components
eda.plot_pcs()
# scatter plots of input/output data
eda.grouped_scatter(cols=cols)
# distributions as histograms
eda.plot_histograms(cols=cols)
return
def update_info(self):
from .backend import lightgbm, tcn, catboost, xgboost
self.info['version_info'] = get_version_info(
tf=tf,
keras=keras,
torch=torch,
np=np,
pd=pd,
matplotlib=matplotlib,
h5py=h5py,
joblib=joblib,
lightgbm=lightgbm,
tcn=tcn,
catboost=catboost,
xgboost=xgboost
)
return
def print_info(self):
class_type = ''
if self.is_binary:
class_type = "binary"
elif self.is_multiclass:
class_type = "multi-class"
elif self.is_multilabel:
class_type = "multi-label"
if isinstance(self.config['model'], dict):
if 'layers' in self.config['model']:
model_name = self.__class__.__name__
else:
model_name = list(self.config['model'].keys())[0]
else:
if isinstance(self.config['model'], str):
model_name = self.config['model']
else:
model_name = self.config['model'].__class__.__name__
if self.verbosity > 0:
print('building {} model for {} {} problem using {}'.format(self.category,
class_type,
self.mode,
model_name))
return
def get_optimizer(self):
opt_args = self.get_opt_args()
optimizer = OPTIMIZERS[self.config['optimizer'].upper()](**opt_args)
return optimizer
def get_values(outputs):
if isinstance(outputs, dict) and len(outputs) == 1:
outputs = list(outputs.values())[0]
return outputs
|
{"hexsha": "a1e0fb4188617c3fd337d5335d7314541708898f", "size": 67256, "ext": "py", "lang": "Python", "max_stars_repo_path": "ai4water/_main.py", "max_stars_repo_name": "csiro-hydroinformatics/AI4Water", "max_stars_repo_head_hexsha": "cdb18bd4bf298f77b381f1829045a1e790146985", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2020-10-13T08:23:17.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-22T04:36:21.000Z", "max_issues_repo_path": "ai4water/_main.py", "max_issues_repo_name": "csiro-hydroinformatics/AI4Water", "max_issues_repo_head_hexsha": "cdb18bd4bf298f77b381f1829045a1e790146985", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-10-15T02:42:52.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-15T02:51:07.000Z", "max_forks_repo_path": "ai4water/_main.py", "max_forks_repo_name": "csiro-hydroinformatics/AI4Water", "max_forks_repo_head_hexsha": "cdb18bd4bf298f77b381f1829045a1e790146985", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-11-23T04:45:38.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-26T10:12:34.000Z", "avg_line_length": 39.0342426001, "max_line_length": 135, "alphanum_fraction": 0.5635184965, "include": true, "reason": "import numpy,from scipy", "num_tokens": 13963}
|
# File to check that the two different action-value functions (MC estimate and the action-value function in the
# estimated MDP) are actually different functions, see Section 3.2.2 in "Evaluation of Safe Policy Improvement with
# Soft Baseline Bootstrapping" by Philipp Scholl.
import os
import sys
import numpy as np
import pandas as pd
import configparser
# Set directory as the path to Evaluation-of-Safe-Policy-Improvement-with-Baseline-Bootstrapping
# directory = os.path.dirname(os.path.dirname(os.path.expanduser(__file__)))
directory = r'C:\Users\phili\PycharmProjects\Evaluation-of-Safe-Policy-Improvement-with-Baseline-Bootstrapping'
sys.path.append(directory)
path_config = configparser.ConfigParser()
path_config.read(os.path.join(directory, 'paths.ini'))
spibb_path = path_config['PATHS']['spibb_path']
sys.path.append(spibb_path)
import garnets
import spibb_utils
import spibb
import modelTransitions
nb_trajectories_list = [10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10000]
delta = 0.05
ratios = [0.1, 0.3, 0.5, 0.7, 0.9] # [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
seed = 1234
np.random.seed(seed)
gamma = 0.7
nb_states = 50
nb_actions = 4
nb_next_state_transition = 4
env_type = 1 # 1 for one terminal state, 2 for two terminal states
self_transitions = 0
results = []
for ratio in ratios:
garnet = garnets.Garnets(nb_states, nb_actions, nb_next_state_transition,
env_type=env_type, self_transitions=self_transitions)
softmax_target_perf_ratio = (ratio + 1) / 2
baseline_target_perf_ratio = ratio
pi_b, q_pi_b, pi_star_perf, pi_b_perf, pi_rand_perf = \
garnet.generate_baseline_policy(gamma,
softmax_target_perf_ratio=softmax_target_perf_ratio,
baseline_target_perf_ratio=baseline_target_perf_ratio, log=False)
reward_current = garnet.compute_reward()
current_proba = garnet.transition_function
r_reshaped = spibb_utils.get_reward_model(current_proba, reward_current)
results_traj = []
for nb_trajectories in nb_trajectories_list:
# Generate trajectories, both stored as trajectories and (s,a,s',r) transition samples
trajectories, batch_traj = spibb_utils.generate_batch(nb_trajectories, garnet, pi_b)
# Computation of the transition errors
model = modelTransitions.ModelTransitions(batch_traj, nb_states, nb_actions)
reward_model = spibb_utils.get_reward_model(model.transitions, reward_current)
# q_pi_b_est is the MC estimation of the action-value function
q_pi_b_est = spibb_utils.compute_q_pib_est_episodic(gamma=gamma, nb_actions=nb_actions, nb_states=nb_states,
batch=trajectories)
# q_m_hat is the action-value function in the estimated MDP.
_, q_m_hat = spibb.policy_evaluation_exact(pi_b, reward_model, model.transitions, gamma)
distance = np.linalg.norm(q_pi_b_est - q_m_hat, ord=1)
results_traj.append(distance)
print(
f'For ratio {ratio} and {nb_trajectories} trajectories, the L1 distance in the two calculations of q of '
f'pi_b is {distance}.')
# f'(Unvisited state action pairs: {nb_not_visited_state_1action_pairs})')
results.append(results_traj)
|
{"hexsha": "6ac2e55f4721af50cd9966b7c2a9c848e1a0bd45", "size": 3356, "ext": "py", "lang": "Python", "max_stars_repo_path": "auxiliary_tests/difference_in_action_value_functions.py", "max_stars_repo_name": "Philipp238/Safe-Policy-Improvement-Approaches-on-Discrete-Markov-Decision-Processes", "max_stars_repo_head_hexsha": "d0eb7281c5151e043547d5b7379144deea0bbe03", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "auxiliary_tests/difference_in_action_value_functions.py", "max_issues_repo_name": "Philipp238/Safe-Policy-Improvement-Approaches-on-Discrete-Markov-Decision-Processes", "max_issues_repo_head_hexsha": "d0eb7281c5151e043547d5b7379144deea0bbe03", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "auxiliary_tests/difference_in_action_value_functions.py", "max_forks_repo_name": "Philipp238/Safe-Policy-Improvement-Approaches-on-Discrete-Markov-Decision-Processes", "max_forks_repo_head_hexsha": "d0eb7281c5151e043547d5b7379144deea0bbe03", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.95, "max_line_length": 117, "alphanum_fraction": 0.7154350417, "include": true, "reason": "import numpy", "num_tokens": 857}
|
#include <boost/asio.hpp>
#include <string>
#include <sstream>
using namespace std;
namespace net {
namespace {
boost::asio::ip::tcp::iostream net;
}
void connect(const string& addr, int port) {
ostringstream oss;
oss<<addr<<':'<<port;
net.connect(oss.str());
}
void sendRaw(const string& data) {
net<<data;
net.flush();
}
string getLine() {
string s;
getline(net, s);
return s;
}
} // namespace net
|
{"hexsha": "f42aaf5f70a02e4ba32e302b1e82b7076ea17d8e", "size": 413, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "net.cpp", "max_stars_repo_name": "sisu/lib24", "max_stars_repo_head_hexsha": "b1aefdfa37a2e3d9b0b7adf9154a86afbde5f461", "max_stars_repo_licenses": ["WTFPL"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2020-12-21T14:58:20.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-21T14:58:20.000Z", "max_issues_repo_path": "net.cpp", "max_issues_repo_name": "sisu/lib24", "max_issues_repo_head_hexsha": "b1aefdfa37a2e3d9b0b7adf9154a86afbde5f461", "max_issues_repo_licenses": ["WTFPL"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "net.cpp", "max_forks_repo_name": "sisu/lib24", "max_forks_repo_head_hexsha": "b1aefdfa37a2e3d9b0b7adf9154a86afbde5f461", "max_forks_repo_licenses": ["WTFPL"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 14.2413793103, "max_line_length": 44, "alphanum_fraction": 0.6682808717, "num_tokens": 106}
|
from datetime import datetime
import os
from urllib.request import urlopen
from PIL import Image
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
filename = 'model.pb'
labels_filename = 'labels.txt'
output_layer = 'loss:0'
input_node = 'Placeholder:0'
graph_def = tf.GraphDef()
labels = []
network_input_size = 0
def _initialize():
global labels, network_input_size
# initialize the model once and save it to a global variable
if not labels:
with tf.gfile.GFile(filename, 'rb') as f:
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
with open(labels_filename, 'rt') as lf:
labels = [l.strip() for l in lf.readlines()]
with tf.Session() as sess:
input_tensor_shape = sess.graph.get_tensor_by_name('Placeholder:0').shape.as_list()
network_input_size = input_tensor_shape[1]
def _extract_bilinear_pixel(img, x, y, ratio, xOrigin, yOrigin):
xDelta = (x + 0.5) * ratio - 0.5
x0 = int(xDelta)
xDelta -= x0
x0 += xOrigin
if x0 < 0:
x0 = 0;
x1 = 0;
xDelta = 0.0;
elif x0 >= img.shape[1]-1:
x0 = img.shape[1]-1;
x1 = img.shape[1]-1;
xDelta = 0.0;
else:
x1 = x0 + 1;
yDelta = (y + 0.5) * ratio - 0.5
y0 = int(yDelta)
yDelta -= y0
y0 += yOrigin
if y0 < 0:
y0 = 0;
y1 = 0;
yDelta = 0.0;
elif y0 >= img.shape[0]-1:
y0 = img.shape[0]-1;
y1 = img.shape[0]-1;
yDelta = 0.0;
else:
y1 = y0 + 1;
#Get pixels in four corners
bl = img[y0, x0]
br = img[y0, x1]
tl = img[y1, x0]
tr = img[y1, x1]
#Calculate interpolation
b = xDelta * br + (1. - xDelta) * bl
t = xDelta * tr + (1. - xDelta) * tl
pixel = yDelta * t + (1. - yDelta) * b
return pixel.astype(np.uint8)
def _extract_and_resize(img, targetSize):
determinant = img.shape[1] * targetSize[0] - img.shape[0] * targetSize[1]
if determinant < 0:
ratio = float(img.shape[1]) / float(targetSize[1])
xOrigin = 0
yOrigin = int(0.5 * (img.shape[0] - ratio * targetSize[0]))
elif determinant > 0:
ratio = float(img.shape[0]) / float(targetSize[0])
xOrigin = int(0.5 * (img.shape[1] - ratio * targetSize[1]))
yOrigin = 0
else:
ratio = float(img.shape[0]) / float(targetSize[0])
xOrigin = 0
yOrigin = 0
resize_image = np.empty((targetSize[0], targetSize[1], img.shape[2]), dtype=np.uint8)
for y in range(targetSize[0]):
for x in range(targetSize[1]):
resize_image[y, x] = _extract_bilinear_pixel(img, x, y, ratio, xOrigin, yOrigin)
return resize_image
def _extract_and_resize_to_256_square(image):
h, w = image.shape[:2]
return _extract_and_resize(image, (256, 256))
def _crop_center(img,cropx,cropy):
h, w = img.shape[:2]
startx = max(0, w//2-(cropx//2) - 1)
starty = max(0, h//2-(cropy//2) - 1)
return img[starty:starty+cropy, startx:startx+cropx]
def _resize_down_to_1600_max_dim(image):
w,h = image.size
if h < 1600 and w < 1600:
return image
new_size = (1600 * w // h, 1600) if (h > w) else (1600, 1600 * h // w)
if max(new_size) / max(image.size) >= 0.5:
method = Image.BILINEAR
else:
method = Image.BICUBIC
return image.resize(new_size, method)
def _convert_to_nparray(image):
# RGB -> BGR
image = np.array(image)
return image[:, :, (2,1,0)]
def _update_orientation(image):
exif_orientation_tag = 0x0112
if hasattr(image, '_getexif'):
exif = image._getexif()
if exif != None and exif_orientation_tag in exif:
orientation = exif.get(exif_orientation_tag, 1)
# orientation is 1 based, shift to zero based and flip/transpose based on 0-based values
orientation -= 1
if orientation >= 4:
image = image.transpose(Image.TRANSPOSE)
if orientation == 2 or orientation == 3 or orientation == 6 or orientation == 7:
image = image.transpose(Image.FLIP_TOP_BOTTOM)
if orientation == 1 or orientation == 2 or orientation == 5 or orientation == 6:
image = image.transpose(Image.FLIP_LEFT_RIGHT)
return image
|
{"hexsha": "bbd53ce61e904fcbf979a216317965a1cb54b52b", "size": 4353, "ext": "py", "lang": "Python", "max_stars_repo_path": "predict_helpers.py", "max_stars_repo_name": "anthonychu/hackthenorth-ml-workshop", "max_stars_repo_head_hexsha": "849cbbac34cd9e9c0ec1b14d39ca8e598c8b14ce", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2019-09-14T16:17:26.000Z", "max_stars_repo_stars_event_max_datetime": "2019-09-14T16:18:07.000Z", "max_issues_repo_path": "predict_helpers.py", "max_issues_repo_name": "anthonychu/hackthenorth-ml-workshop", "max_issues_repo_head_hexsha": "849cbbac34cd9e9c0ec1b14d39ca8e598c8b14ce", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2020-01-28T22:59:23.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T01:22:35.000Z", "max_forks_repo_path": "predict_helpers.py", "max_forks_repo_name": "juliengo/SignIt", "max_forks_repo_head_hexsha": "ab65d598dee950ed2e687d1837238d510cef8301", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2019-09-14T16:17:56.000Z", "max_forks_repo_forks_event_max_datetime": "2019-09-14T20:15:54.000Z", "avg_line_length": 31.5434782609, "max_line_length": 100, "alphanum_fraction": 0.5947622329, "include": true, "reason": "import numpy", "num_tokens": 1312}
|
! Copyright (c) 2015-2021, the ELSI team.
! All rights reserved.
!
! This file is part of ELSI and is distributed under the BSD 3-clause license,
! which may be found in the LICENSE file in the ELSI root directory.
!>
!! Determine occupation numbers, chemical potential, and electronic entropy.
!!
module ELSI_OCC
use ELSI_CONSTANT, only: GAUSSIAN,FERMI,METHFESSEL_PAXTON,COLD,CUBIC,&
SQRT_PI,INVERT_SQRT_PI
use ELSI_DATATYPE, only: elsi_param_t,elsi_basic_t
use ELSI_MALLOC, only: elsi_allocate,elsi_deallocate
use ELSI_MPI
use ELSI_OUTPUT, only: elsi_say
use ELSI_PRECISION, only: r8,i4
use ELSI_SORT, only: elsi_heapsort,elsi_permute,elsi_unpermute
use ELSI_UTIL, only: elsi_check_err
implicit none
private
public :: elsi_mu_and_occ
public :: elsi_entropy
public :: elsi_get_occ_for_dm
contains
!>
!! Compute the chemical potential and occupation numbers.
!!
subroutine elsi_mu_and_occ(ph,bh,n_electron,n_state,n_spin,n_kpt,k_wt,eval,occ,&
mu)
implicit none
type(elsi_param_t), intent(in) :: ph
type(elsi_basic_t), intent(in) :: bh
real(kind=r8), intent(in) :: n_electron
integer(kind=i4), intent(in) :: n_state
integer(kind=i4), intent(in) :: n_spin
integer(kind=i4), intent(in) :: n_kpt
real(kind=r8), intent(in) :: k_wt(n_kpt)
real(kind=r8), intent(in) :: eval(n_state,n_spin,n_kpt)
real(kind=r8), intent(out) :: occ(n_state,n_spin,n_kpt)
real(kind=r8), intent(out) :: mu
real(kind=r8) :: occ1(n_state,n_spin,n_kpt)
real(kind=r8) :: occ2(n_state,n_spin,n_kpt)
real(kind=r8) :: mu1
real(kind=r8) :: mu2
if (ph%occ_non_aufbau) then
! YY: This is for non Aufbau distribution occ number.
! YY: Used for (lowest excited state) delta SCF
! YY: normal occ_number : 2 2 2 2 0 0 0 0
! YY: non-Aufbau occ_number: 2 2 2 1 1 0 0 0
call elsi_mu_and_occ_normal(ph,bh,n_electron-2,n_state,n_spin,n_kpt,k_wt,&
eval,occ1,mu1)
call elsi_mu_and_occ_normal(ph,bh,n_electron+2,n_state,n_spin,n_kpt,k_wt,&
eval,occ2,mu2)
occ = (occ1 + occ2) / 2
mu = (mu1 + mu2) / 2
else
call elsi_mu_and_occ_normal(ph,bh,n_electron,n_state,n_spin,n_kpt,k_wt,&
eval,occ,mu)
end if
end subroutine
!>
!! Compute the chemical potential and occupation numbers normal distribution.
!!
subroutine elsi_mu_and_occ_normal(ph,bh,n_electron,n_state,n_spin,n_kpt,k_wt,eval,occ,&
mu)
implicit none
type(elsi_param_t), intent(in) :: ph
type(elsi_basic_t), intent(in) :: bh
real(kind=r8), intent(in) :: n_electron
integer(kind=i4), intent(in) :: n_state
integer(kind=i4), intent(in) :: n_spin
integer(kind=i4), intent(in) :: n_kpt
real(kind=r8), intent(in) :: k_wt(n_kpt)
real(kind=r8), intent(in) :: eval(n_state,n_spin,n_kpt)
real(kind=r8), intent(out) :: occ(n_state,n_spin,n_kpt)
real(kind=r8), intent(out) :: mu
real(kind=r8) :: mu_min
real(kind=r8) :: mu_max
real(kind=r8) :: buf
real(kind=r8) :: diff_min ! Error on lower bound
real(kind=r8) :: diff_max ! Error on upper bound
integer(kind=i4) :: i_step
logical :: found_mu
logical :: found_interval
character(len=200) :: msg
integer :: i_state, i_spin, i_kpt
character(len=*), parameter :: caller = "elsi_mu_and_occ_normal"
! Determine upper and lower bounds of mu
mu_min = minval(eval)
mu_max = maxval(eval)
buf = 0.5_r8*abs(mu_max-mu_min)
if(mu_max - mu_min < ph%mu_tol) then
mu_min = mu_min-1.0_r8
mu_max = mu_max+1.0_r8
end if
occ(:,:,:) = 0.0_r8
found_mu = .false.
found_interval = .false.
! Find solution interval
do i_step = 1,ph%mu_max_steps
call elsi_check_electrons(ph,n_electron,n_state,n_spin,n_kpt,k_wt,eval,&
occ,mu_min,diff_min)
if(abs(diff_min) < ph%mu_tol) then
mu = mu_min
found_mu = .true.
exit
end if
call elsi_check_electrons(ph,n_electron,n_state,n_spin,n_kpt,k_wt,eval,&
occ,mu_max,diff_max)
if(abs(diff_max) < ph%mu_tol) then
mu = mu_max
found_mu = .true.
exit
end if
if(diff_min*diff_max < 0.0_r8) then
found_interval = .true.
exit
end if
! Enlarge interval if solution not found
mu_min = mu_min-buf
mu_max = mu_max+buf
end do
if(.not. found_interval .and. .not. found_mu) then
! Test fully occupied and empty states
diff_max = n_state*n_spin*ph%spin_degen-n_electron
if(abs(diff_max) < ph%mu_tol) then
found_mu = .true.
mu = maxval(eval)+10.0_r8
occ(:,:,:) = ph%spin_degen
else if(abs(n_electron) < ph%mu_tol) then
found_mu = .true.
mu = minval(eval)-10.0_r8
occ(:,:,:) = 0.0_r8
else
write(msg,"(A)") "*** A problem occurred in subroutine elsi_mu_and_occ_normal"
call elsi_say(bh,msg)
write(msg,"(A)") "The ELSI routine to determine occupation numbers elsi_mu_and_occ_normal was unable to"
call elsi_say(bh,msg)
write(msg,"(A)") "find a suitable chemical potential (Fermi level). This may be"
call elsi_say(bh,msg)
write(msg,"(A)") "due to faulty input to the routine, i.e. a problem at an earlier stage of"
call elsi_say(bh,msg)
write(msg,"(A)") "the computation. For reference, the eigenvalues (in internal units of the"
call elsi_say(bh,msg)
write(msg,"(A)") "the code, typically atomic units in electronic structure theory) have the"
call elsi_say(bh,msg)
write(msg,"(A)") "following values:"
call elsi_say(bh,msg)
do i_kpt = 1, n_kpt
write(msg,"(A,I8)") "k-point ", i_kpt
call elsi_say(bh,msg)
do i_spin = 1, n_spin
write(msg,"(A,I5)") "spin channel ", i_spin
call elsi_say(bh,msg)
write(msg,"(A)") "EV number eigenvalue "
call elsi_say(bh,msg)
do i_state = 1, n_state
write(msg,"(I8,A,E14.7)") i_state, " ", eval(i_state, i_spin, i_kpt)
call elsi_say(bh,msg)
end do
end do
end do
call sleep(10)
write(msg,"(A)") "Chemical potential not found - please see above &
for a more detailed error message and output"
call elsi_stop(bh,msg,caller)
end if
end if
if(found_interval .and. .not. found_mu) then
! Perform bisection
call elsi_find_mu(ph,bh,n_electron,n_state,n_spin,n_kpt,k_wt,eval,occ,&
mu_min,mu_max,mu)
end if
end subroutine
!>
!! Compute the number of electrons for a given chemical potential, return the
!! error in the number of electrons. The occupation numbers are updated as well.
!!
subroutine elsi_check_electrons(ph,n_electron,n_state,n_spin,n_kpt,k_wt,eval,&
occ,mu,diff)
implicit none
type(elsi_param_t), intent(in) :: ph
real(kind=r8), intent(in) :: n_electron
integer(kind=i4), intent(in) :: n_state
integer(kind=i4), intent(in) :: n_spin
integer(kind=i4), intent(in) :: n_kpt
real(kind=r8), intent(in) :: k_wt(n_kpt)
real(kind=r8), intent(in) :: eval(n_state,n_spin,n_kpt)
real(kind=r8), intent(out) :: occ(n_state,n_spin,n_kpt)
real(kind=r8), intent(in) :: mu
real(kind=r8), intent(out) :: diff
real(kind=r8) :: spin_degen
real(kind=r8) :: invert_width
real(kind=r8) :: delta
real(kind=r8) :: max_exp ! Maximum possible exponent
real(kind=r8) :: arg
real(kind=r8) :: wt
real(kind=r8) :: A
real(kind=r8) :: H_even
real(kind=r8) :: H_odd
integer(kind=i4) :: i_state
integer(kind=i4) :: i_kpt
integer(kind=i4) :: i_spin
integer(kind=i4) :: i_mp
character(len=*), parameter :: caller = "elsi_check_electrons"
invert_width = 1.0_r8/ph%mu_width
diff = 0.0_r8
if(.not. ph%spin_is_set) then
if(n_spin == 2) then
spin_degen = 1.0_r8
else
spin_degen = 2.0_r8
end if
else
spin_degen = ph%spin_degen
end if
select case(ph%mu_scheme)
case(GAUSSIAN)
do i_kpt = 1,n_kpt
do i_spin = 1,n_spin
do i_state = 1,n_state
occ(i_state,i_spin,i_kpt) = spin_degen*0.5_r8&
*(1.0_r8-erf((eval(i_state,i_spin,i_kpt)-mu)*invert_width))
diff = diff+occ(i_state,i_spin,i_kpt)*k_wt(i_kpt)
end do
end do
end do
case(FERMI)
max_exp = maxexponent(mu)*log(2.0_r8)
do i_kpt = 1,n_kpt
do i_spin = 1,n_spin
do i_state = 1,n_state
arg = (eval(i_state,i_spin,i_kpt)-mu)*invert_width
if(arg < max_exp) then
occ(i_state,i_spin,i_kpt) = spin_degen/(1.0_r8+exp(arg))
diff = diff+occ(i_state,i_spin,i_kpt)*k_wt(i_kpt)
else
occ(i_state,i_spin,i_kpt) = 0.0_r8
end if
end do
end do
end do
case(METHFESSEL_PAXTON)
do i_kpt = 1,n_kpt
do i_spin = 1,n_spin
do i_state = 1,n_state
arg = (eval(i_state,i_spin,i_kpt)-mu)*invert_width
wt = exp(-arg**2)
occ(i_state,i_spin,i_kpt) = 0.5_r8*(1.0_r8-erf(arg))*spin_degen
if(ph%mu_mp_order > 0) then
A = -0.25_r8*INVERT_SQRT_PI
H_even = 1.0_r8
H_odd = 2.0_r8*arg
occ(i_state,i_spin,i_kpt) = occ(i_state,i_spin,i_kpt)&
+A*H_odd*wt*spin_degen
end if
if(ph%mu_mp_order > 1) then
do i_mp = 2,ph%mu_mp_order
A = -0.25_r8/real(i_mp,kind=r8)*A
H_even = 2.0_r8*arg*H_odd-2.0_r8*real(i_mp,kind=r8)*H_even
H_odd = 2.0_r8*arg*H_even-2.0_r8*real(i_mp+1,kind=r8)*H_odd
occ(i_state,i_spin,i_kpt) = occ(i_state,i_spin,i_kpt)&
+A*H_odd*wt*spin_degen
end do
end if
diff = diff+occ(i_state,i_spin,i_kpt)*k_wt(i_kpt)
end do
end do
end do
case(CUBIC)
! To have a consistent slope of the occupation function at the chemical
! potential, the parameters for GAUSSIAN and CUBIC should be related as:
delta = 0.75_r8*SQRT_PI*ph%mu_width
do i_kpt = 1,n_kpt
do i_spin = 1,n_spin
do i_state = 1,n_state
arg = (eval(i_state,i_spin,i_kpt)-mu)/delta
if(arg <= -1.0_r8) then
occ(i_state,i_spin,i_kpt) = spin_degen
else if(arg >= 1.0_r8) then
occ(i_state,i_spin,i_kpt) = 0.0_r8
else
occ(i_state,i_spin,i_kpt) = spin_degen*0.25_r8*(arg+2.0_r8)&
*(arg-1.0_r8)**2
end if
diff = diff+occ(i_state,i_spin,i_kpt)*k_wt(i_kpt)
end do
end do
end do
case(COLD)
do i_kpt = 1,n_kpt
do i_spin = 1,n_spin
do i_state = 1,n_state
arg = (eval(i_state,i_spin,i_kpt)-mu)*invert_width
arg = arg-sqrt(0.5_r8)
occ(i_state,i_spin,i_kpt) = (0.5_r8-erf(arg)*0.5_r8&
-INVERT_SQRT_PI*sqrt(0.5_r8)*exp(-arg**2))*spin_degen
diff = diff+occ(i_state,i_spin,i_kpt)*k_wt(i_kpt)
end do
end do
end do
end select
diff = diff-n_electron
end subroutine
!>
!! Compute the chemical potential using a bisection algorithm.
!!
subroutine elsi_find_mu(ph,bh,n_electron,n_state,n_spin,n_kpt,k_wt,eval,occ,&
mu_min,mu_max,mu)
implicit none
type(elsi_param_t), intent(in) :: ph
type(elsi_basic_t), intent(in) :: bh
real(kind=r8), intent(in) :: n_electron
integer(kind=i4), intent(in) :: n_state
integer(kind=i4), intent(in) :: n_spin
integer(kind=i4), intent(in) :: n_kpt
real(kind=r8), intent(in) :: k_wt(n_kpt)
real(kind=r8), intent(in) :: eval(n_state,n_spin,n_kpt)
real(kind=r8), intent(out) :: occ(n_state,n_spin,n_kpt)
real(kind=r8), intent(in) :: mu_min
real(kind=r8), intent(in) :: mu_max
real(kind=r8), intent(out) :: mu
real(kind=r8) :: mu_left
real(kind=r8) :: mu_right
real(kind=r8) :: mu_mid
real(kind=r8) :: diff_left ! Electron count error on left bound
real(kind=r8) :: diff_right ! Electron count error on right bound
real(kind=r8) :: diff_mid ! Electron count error on middle point
integer(kind=i4) :: i_step
logical :: found_mu
character(len=200) :: msg
character(len=*), parameter :: caller = "elsi_find_mu"
i_step = 0
found_mu = .false.
mu_left = mu_min
mu_right = mu_max
call elsi_check_electrons(ph,n_electron,n_state,n_spin,n_kpt,k_wt,eval,occ,&
mu_left,diff_left)
call elsi_check_electrons(ph,n_electron,n_state,n_spin,n_kpt,k_wt,eval,occ,&
mu_right,diff_right)
if(abs(diff_left) < ph%mu_tol) then
mu = mu_left
found_mu = .true.
else if(abs(diff_right) < ph%mu_tol) then
mu = mu_right
found_mu = .true.
end if
do while(.not. found_mu .and. i_step < ph%mu_max_steps)
i_step = i_step+1
mu_mid = 0.5_r8*(mu_left+mu_right)
call elsi_check_electrons(ph,n_electron,n_state,n_spin,n_kpt,k_wt,eval,&
occ,mu_mid,diff_mid)
if(abs(diff_mid) < ph%mu_tol) then
mu = mu_mid
found_mu = .true.
else if(diff_mid < 0.0_r8) then
mu_left = mu_mid
else if(diff_mid > 0.0_r8) then
mu_right = mu_mid
end if
end do
if(found_mu) then
call elsi_check_electrons(ph,n_electron,n_state,n_spin,n_kpt,k_wt,eval,&
occ,mu,diff_right)
else
! Use mu of the right bound...
call elsi_check_electrons(ph,n_electron,n_state,n_spin,n_kpt,k_wt,eval,&
occ,mu_right,diff_right)
mu = mu_right
! ...with adjusted occupation numbers
write(msg,"(A)") "Chemical potential cannot reach required accuracy"
call elsi_say(bh,msg)
write(msg,"(A,E12.4,A)") "| Residual error :",diff_right
call elsi_say(bh,msg)
write(msg,"(A)") "Error will be removed from highest occupied states"
call elsi_say(bh,msg)
call elsi_adjust_occ(ph,bh,n_state,n_spin,n_kpt,k_wt,eval,occ,diff_right)
end if
end subroutine
!>
!! Cancel the small error in number of electrons.
!!
subroutine elsi_adjust_occ(ph,bh,n_state,n_spin,n_kpt,k_wt,eval,occ,diff)
implicit none
type(elsi_param_t), intent(in) :: ph
type(elsi_basic_t), intent(in) :: bh
integer(kind=i4), intent(in) :: n_state
integer(kind=i4), intent(in) :: n_spin
integer(kind=i4), intent(in) :: n_kpt
real(kind=r8), intent(in) :: k_wt(n_kpt)
real(kind=r8), intent(in) :: eval(n_state,n_spin,n_kpt)
real(kind=r8), intent(inout) :: occ(n_state,n_spin,n_kpt)
real(kind=r8), intent(inout) :: diff
integer(kind=i4) :: n_total
integer(kind=i4) :: i_state
integer(kind=i4) :: i_kpt
integer(kind=i4) :: i_spin
integer(kind=i4) :: i_val
real(kind=r8), allocatable :: tmp(:)
integer(kind=i4), allocatable :: perm(:)
character(len=*), parameter :: caller = "elsi_adjust_occ"
n_total = n_state*n_spin*n_kpt
call elsi_allocate(bh,tmp,n_total,"tmp",caller)
call elsi_allocate(bh,perm,n_total,"perm",caller)
! Put eval into a 1D array
i_val = 0
do i_kpt = 1,n_kpt
do i_spin = 1,n_spin
do i_state = 1,n_state
i_val = i_val+1
tmp(i_val) = eval(i_state,i_spin,i_kpt)
end do
end do
end do
! Sort eval
call elsi_heapsort(n_total,tmp,perm)
! Put occ into a 1D array
i_val = 0
do i_kpt = 1,n_kpt
do i_spin = 1,n_spin
do i_state = 1,n_state
i_val = i_val+1
tmp(i_val) = occ(i_state,i_spin,i_kpt)
end do
end do
end do
call elsi_permute(n_total,perm,tmp)
! Remove error
do i_val = 1,n_total
i_kpt = (perm(i_val)-1)/(n_spin*n_state)+1
if(tmp(i_val) > 0.0_r8) then
if(k_wt(i_kpt)*tmp(i_val) > diff) then
tmp(i_val) = tmp(i_val)-diff/k_wt(i_kpt)
diff = 0.0_r8
else
diff = diff-k_wt(i_kpt)*tmp(i_val)
tmp(i_val) = 0.0_r8
end if
end if
if(diff <= ph%mu_tol) then
exit
end if
end do
call elsi_unpermute(n_total,perm,tmp)
! Put adjusted occ back into a 3D array
i_val = 0
do i_kpt = 1,n_kpt
do i_spin = 1,n_spin
do i_state = 1,n_state
i_val = i_val+1
occ(i_state,i_spin,i_kpt) = tmp(i_val)
end do
end do
end do
call elsi_deallocate(bh,tmp,"tmp")
call elsi_deallocate(bh,perm,"perm")
end subroutine
!>
!! Compute the electronic entropy.
!!
subroutine elsi_entropy(ph,n_state,n_spin,n_kpt,k_wt,eval,occ,mu,ts)
implicit none
type(elsi_param_t), intent(in) :: ph !< Parameters
integer(kind=i4), intent(in) :: n_state !< Number of states
integer(kind=i4), intent(in) :: n_spin !< Number of spins
integer(kind=i4), intent(in) :: n_kpt !< Number of k-points
real(kind=r8), intent(in) :: k_wt(n_kpt) !< K-points weights
real(kind=r8), intent(in) :: eval(n_state,n_spin,n_kpt) !< Eigenvalues
real(kind=r8), intent(in) :: occ(n_state,n_spin,n_kpt) !< Occupation numbers
real(kind=r8), intent(in) :: mu !< Input chemical potential
real(kind=r8), intent(out) :: ts !< Entropy
real(kind=r8) :: spin_degen
real(kind=r8) :: invert_width
real(kind=r8) :: delta
real(kind=r8) :: pre
real(kind=r8) :: arg
real(kind=r8) :: wt
real(kind=r8) :: A
real(kind=r8) :: H_even
real(kind=r8) :: H_odd
integer(kind=i4) :: i_state
integer(kind=i4) :: i_kpt
integer(kind=i4) :: i_spin
integer(kind=i4) :: i_mp
real(kind=r8), parameter :: ts_thr = 1.0e-15_r8
character(len=*), parameter :: caller = "elsi_entropy"
invert_width = 1.0_r8/ph%mu_width
ts = 0.0_r8
pre = 0.0_r8
if(.not. ph%spin_is_set) then
if(n_spin == 2) then
spin_degen = 1.0_r8
else
spin_degen = 2.0_r8
end if
else
spin_degen = ph%spin_degen
end if
select case(ph%mu_scheme)
case(GAUSSIAN)
pre = 0.5_r8*spin_degen*ph%mu_width*INVERT_SQRT_PI
do i_kpt = 1,n_kpt
do i_spin = 1,n_spin
do i_state = 1,n_state
arg = (eval(i_state,i_spin,i_kpt)-mu)*invert_width
ts = ts+exp(-arg**2)*k_wt(i_kpt)
end do
end do
end do
case(FERMI)
pre = spin_degen*ph%mu_width
do i_kpt = 1,n_kpt
do i_spin = 1,n_spin
do i_state = 1,n_state
arg = occ(i_state,i_spin,i_kpt)/spin_degen
if(1.0_r8-arg > ts_thr .and. arg > ts_thr) then
ts = ts-(arg*log(arg)+(1.0_r8-arg)*log(1.0_r8-arg))&
*k_wt(i_kpt)
end if
end do
end do
end do
case(METHFESSEL_PAXTON)
pre = 0.5_r8*spin_degen*ph%mu_width
do i_kpt = 1,n_kpt
do i_spin = 1,n_spin
do i_state = 1,n_state
arg = (eval(i_state,i_spin,i_kpt)-mu)*invert_width
wt = exp(-arg**2)
A = INVERT_SQRT_PI
H_even = 1.0_r8
H_odd = 2.0_r8*arg
ts = ts+INVERT_SQRT_PI*wt*k_wt(i_kpt)
do i_mp = 1,ph%mu_mp_order
A = -0.25_r8/real(i_mp,kind=r8)*A
H_even = 2.0_r8*arg*H_odd-2.0_r8*real(i_mp,kind=r8)*H_even
H_odd = 2.0_r8*arg*H_even-2.0_r8*real(i_mp+1,kind=r8)*H_odd
ts = ts+A*H_even*wt*k_wt(i_kpt)
end do
end do
end do
end do
case(CUBIC)
delta = 0.75_r8*ph%mu_width*SQRT_PI
pre = 0.1875_r8*spin_degen*ph%mu_width
do i_kpt = 1,n_kpt
do i_spin = 1,n_spin
do i_state = 1,n_state
arg = (eval(i_state,i_spin,i_kpt)-mu)/delta
if(arg > -1.0_r8 .and. arg < 1.0_r8) then
ts = ts+(((arg**2)-1.0_r8)**2)*k_wt(i_kpt)
end if
end do
end do
end do
case(COLD)
pre = 0.5_r8*spin_degen*ph%mu_width*INVERT_SQRT_PI
do i_kpt = 1,n_kpt
do i_spin = 1,n_spin
do i_state = 1,n_state
arg = (eval(i_state,i_spin,i_kpt)-mu)*invert_width
ts = ts+exp(-(arg-sqrt(0.5_r8))**2)*(1.0_r8-sqrt(2.0_r8)*arg)
end do
end do
end do
end select
ts = pre*ts
end subroutine
!>
!! Compute the occupation numbers to be used to construct density matrices.
!!
subroutine elsi_get_occ_for_dm(ph,bh,eval,occ)
implicit none
type(elsi_param_t), intent(inout) :: ph
type(elsi_basic_t), intent(in) :: bh
real(kind=r8), intent(in) :: eval(ph%n_basis)
real(kind=r8), intent(out) :: occ(ph%n_states,ph%n_spins,ph%n_kpts)
real(kind=r8) :: mu
real(kind=r8) :: ts
real(kind=r8) :: n_electrons
integer(kind=i4) :: n_states
integer(kind=i4) :: n_spins
integer(kind=i4) :: n_kpts
integer(kind=i4) :: i
integer(kind=i4) :: ierr
real(kind=r8), allocatable :: eval_all(:,:,:)
real(kind=r8), allocatable :: k_wt(:)
real(kind=r8), allocatable :: tmp1(:)
real(kind=r8), allocatable :: tmp2(:,:,:)
character(len=*), parameter :: caller = "elsi_get_occ_for_dm"
! Gather eigenvalues and occupation numbers
call elsi_allocate(bh,eval_all,ph%n_states,ph%n_spins,ph%n_kpts,"eval_all",&
caller)
call elsi_allocate(bh,k_wt,ph%n_kpts,"k_wt",caller)
if(ph%n_kpts > 1) then
call elsi_allocate(bh,tmp1,ph%n_kpts,"tmp",caller)
if(bh%myid == 0 .and. ph%i_spin == 1) then
tmp1(ph%i_kpt) = ph%i_wt
end if
call MPI_Allreduce(tmp1,k_wt,ph%n_kpts,MPI_REAL8,MPI_SUM,bh%comm_all,ierr)
call elsi_check_err(bh,"MPI_Allreduce",ierr,caller)
call elsi_deallocate(bh,tmp1,"tmp")
else
k_wt = ph%i_wt
end if
if(ph%n_spins*ph%n_kpts > 1) then
call elsi_allocate(bh,tmp2,ph%n_states,ph%n_spins,ph%n_kpts,"tmp",caller)
if(bh%myid == 0) then
tmp2(:,ph%i_spin,ph%i_kpt) = eval(1:ph%n_states)
end if
call MPI_Allreduce(tmp2,eval_all,ph%n_states*ph%n_spins*ph%n_kpts,&
MPI_REAL8,MPI_SUM,bh%comm_all,ierr)
call elsi_check_err(bh,"MPI_Allreduce",ierr,caller)
call elsi_deallocate(bh,tmp2,"tmp")
else
eval_all(:,ph%i_spin,ph%i_kpt) = eval(1:ph%n_states)
end if
! Calculate chemical potential, occupation numbers, and electronic entropy
n_electrons = ph%n_electrons
n_states = ph%n_states
n_spins = ph%n_spins
n_kpts = ph%n_kpts
call elsi_mu_and_occ(ph,bh,n_electrons,n_states,n_spins,n_kpts,k_wt,&
eval_all,occ,mu)
call elsi_entropy(ph,n_states,n_spins,n_kpts,k_wt,eval_all,occ,mu,ts)
ph%mu = mu
ph%ts = ts
! Calculate band structure energy
ph%ebs = 0.0_r8
do i = 1,ph%n_states_solve
ph%ebs = ph%ebs+eval(i)*occ(i,ph%i_spin,ph%i_kpt)
end do
call elsi_deallocate(bh,eval_all,"eval_all")
call elsi_deallocate(bh,k_wt,"k_wt")
end subroutine
end module ELSI_OCC
|
{"hexsha": "2b61c6dcb37dd9a03879a64da0ead195980bd231", "size": 23275, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/elsi_occ.f90", "max_stars_repo_name": "ElectronicStructureLibrary/elsi-interface", "max_stars_repo_head_hexsha": "95d2b02ca627e08eea52eea8358fdb44ab0e67e3", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-12-10T09:52:01.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-28T13:38:45.000Z", "max_issues_repo_path": "src/elsi_occ.f90", "max_issues_repo_name": "ElectronicStructureLibrary/elsi-interface", "max_issues_repo_head_hexsha": "95d2b02ca627e08eea52eea8358fdb44ab0e67e3", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/elsi_occ.f90", "max_forks_repo_name": "ElectronicStructureLibrary/elsi-interface", "max_forks_repo_head_hexsha": "95d2b02ca627e08eea52eea8358fdb44ab0e67e3", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-04-07T00:30:08.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-12T07:02:50.000Z", "avg_line_length": 29.954954955, "max_line_length": 113, "alphanum_fraction": 0.6050698174, "num_tokens": 7535}
|
from ctypes import *
import numpy as np
import numpy.ctypeslib as npct
import os
import sys
import glob
version = str(sys.version_info.major)+str(sys.version_info.minor)
class Pos(Structure):
_fields_ = [('x', c_float), ('y', c_float), ('z', c_float)]
# load the library, using numpy mechanisms
_libdir = os.path.dirname(__file__)
if _libdir == '':
_libdir = './'
_libpath = os.path.join(_libdir, 'libgrid*.so')
lp = []
for lib in glob.glob(_libpath):
if version in lib:
lp.append(lib)
_libpath = lp
assert len(_libpath)==1, 'Multiple libgrid*.so or library not found! {}'.format(_libpath)
libcd = CDLL(_libpath[0])
# setup the return typs and argument types
libcd.Griding_NGP.restype = POINTER(c_double)
libcd.Griding_NGP.argtypes = [
c_int,
c_int,
c_int,
POINTER(Pos),
]
libcd.Griding_CIC.restype = POINTER(c_double)
libcd.Griding_CIC.argtypes = [
c_int,
c_int,
c_int,
POINTER(Pos),
]
libcd.Griding_PCS.restype = POINTER(c_double)
libcd.Griding_PCS.argtypes = [
c_int,
c_int,
c_int,
POINTER(Pos),
]
def NGP(pos, NG=32, L=300):
''' shape of pos is [-1, 3], conrresponding to [x, y, z]'''
if not pos.flags['C_CONTIGUOUS']:
pos = np.ascontiguous(pos, dtype=pos.dtype)
pos_ctypes_ptr = cast(pos.ctypes.data, POINTER(Pos))
Gp = libcd.Griding_NGP(NG, L, len(pos), pos_ctypes_ptr)
Grid = np.fromiter(Gp, dtype=np.float64, count=NG**3).astype(np.float32)
return Grid
def CIC(pos, NG=32, L=300):
''' shape of pos is [-1, 3], conrresponding to [x, y, z]'''
if not pos.flags['C_CONTIGUOUS']:
pos = np.ascontiguous(pos, dtype=pos.dtype)
pos_ctypes_ptr = cast(pos.ctypes.data, POINTER(Pos))
Gp = libcd.Griding_CIC(NG, L, len(pos), pos_ctypes_ptr)
Grid = np.fromiter(Gp, dtype=np.float64, count=NG**3).astype(np.float32)
return Grid
def PCS(pos, NG=32, L=300):
''' shape of pos is [-1, 3], conrresponding to [x, y, z]'''
if not pos.flags['C_CONTIGUOUS']:
pos = np.ascontiguous(pos, dtype=pos.dtype)
pos_ctypes_ptr = cast(pos.ctypes.data, POINTER(Pos))
Gp = libcd.Griding_PCS(NG, L, len(pos), pos_ctypes_ptr)
Grid = np.fromiter(Gp, dtype=np.float64, count=NG**3).astype(np.float32)
return Grid
if __name__ == '__main__':
# x = np.arange(32**3*3, dtype=np.float32)
x = np.linspace(1, 299, 256 * 3, dtype=np.float32).reshape([-1, 3])
NG = 128
grid_pcs = PCS(x, NG=NG, L=300).reshape(NG,NG,NG)
grid_cic = CIC(x, NG=NG, L=300).reshape(NG,NG,NG)
import matplotlib.pyplot as plt
plt.subplot(121)
plt.imshow(grid_pcs.mean(axis=0))
plt.subplot(122)
plt.imshow(grid_cic.mean(axis=0))
plt.show()
|
{"hexsha": "acb441f05e46f267025bcaef2daa36d67bb93a07", "size": 2697, "ext": "py", "lang": "Python", "max_stars_repo_path": "CosmAna/Ext_C/libgrid/Loadlibgrid.py", "max_stars_repo_name": "POFK/CosmAna", "max_stars_repo_head_hexsha": "153af155d243e38f64b8bdf79abc496163269219", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-07-04T10:28:05.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-04T10:28:05.000Z", "max_issues_repo_path": "CosmAna/Ext_C/libgrid/Loadlibgrid.py", "max_issues_repo_name": "POFK/CosmAna", "max_issues_repo_head_hexsha": "153af155d243e38f64b8bdf79abc496163269219", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2019-04-16T07:03:42.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-11T06:15:34.000Z", "max_forks_repo_path": "CosmAna/Ext_C/libgrid/Loadlibgrid.py", "max_forks_repo_name": "POFK/CosmAna", "max_forks_repo_head_hexsha": "153af155d243e38f64b8bdf79abc496163269219", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.6914893617, "max_line_length": 89, "alphanum_fraction": 0.6551724138, "include": true, "reason": "import numpy", "num_tokens": 856}
|
import numpy as np
import numpy.linalg as LA
import control
class KalmanFilter:
"""
Kalman Filter for a linear system
Parameters
----------
F: State Transition (A matrix)
H: Observation Model (B matrix)
Q: Process Covariance
R: Observation Covariance
B: Input Model (Optional)
x0: Initial State (Optional)
"""
def __init__(self, F, H, Q, R, B=None, x0=None):
# Check matrix dimensions
if F.shape[0] != F.shape[1]:
raise ValueError(f"State transition matrix must be square, but received matrix of size {F.shape}")
self.F = F
self.H = H
self.Q = Q
self.R = R
self.n = self.F.shape[0]
self.has_input = B is not None
if self.has_input:
self.B = B
if x0:
self.xhat = x0
else:
self.xhat = np.zeros(self.n)
# Initialise initial covariance to zero
self.P = np.zeros((self.n, self.n))
def __call__(self, measurement, u=None):
"""
Predict and update kalman filter
"""
# Predict
if self.has_input:
self.xhat = self.F @ self.xhat + self.B @ u
else:
self.xhat = self.F @ self.xhat
self.P = self.F @ self.P @ self.F.T + self.Q
# Calculate Kalman Gain
innovation = self.H @ self.P @ self.H.T + self.R
K = self.P @ self.H.T @ LA.pinv(innovation)
# Update estimate
self.xhat = self.xhat + K @ (measurement - self.H @ self.xhat)
# Update covariance
self.P = (np.eye(self.n) - K @ self.H) @ self.P
return self.xhat
@classmethod
def from_continuous(cls, A, C, Q, R, Ts, method='zoh', B=None, x0=None):
"""
Initialise Kalman Filter from continuous system description (F,B,H)
by discretization
"""
N = A.shape[0]
sys = control.StateSpace(A,B,C,0)
sysd = control.sample_system(sys, Ts, method)
A = sysd.A
B = sysd.B
kf = cls(A,C,Q,R,B=B,x0=x0)
return kf
|
{"hexsha": "180f289c032f22659f65e17153c51c4359a2fe1f", "size": 2202, "ext": "py", "lang": "Python", "max_stars_repo_path": "estimation/KalmanFilter.py", "max_stars_repo_name": "andrelimzs/state-estimation", "max_stars_repo_head_hexsha": "57fa0682bb0933200510ed5dc27d09851d39f3b1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-12T16:12:58.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-12T16:12:58.000Z", "max_issues_repo_path": "estimation/KalmanFilter.py", "max_issues_repo_name": "andrelimzs/state-estimation", "max_issues_repo_head_hexsha": "57fa0682bb0933200510ed5dc27d09851d39f3b1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "estimation/KalmanFilter.py", "max_forks_repo_name": "andrelimzs/state-estimation", "max_forks_repo_head_hexsha": "57fa0682bb0933200510ed5dc27d09851d39f3b1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.2142857143, "max_line_length": 110, "alphanum_fraction": 0.514986376, "include": true, "reason": "import numpy", "num_tokens": 556}
|
#include <rem_tree/rem_tree.h>
#include <iostream>
#include <chrono>
#include <NTL/ZZ.h>
#include <NTL/vector.h>
using namespace std;
using namespace NTL;
ZZ startFunc(ZZ modProd);
int main(){
int startBound = 10;
int endBound = 20;
Vec<ZZ> A;
A.setLength(endBound);
Vec<ZZ> m;
m.setLength(startBound);
for(int i = 0; i < endBound; i++){
A[i] = i;
m[i] = (ProbPrime(ZZ(i))) ? i : 1;
}
ZZ (*const startVal)(ZZ) = startFunc;
Vec<ZZ> newA;
newA.setLength(endBound - startBound);
Vec<ZZ> newm;
newm.setLength(endBound - startBound);
for(int i = 0; i < endBound - startBound; i++){
newA[i] = A[i + startBound];
newm[i] = m[i + startBound];
}
// create object
rem_tree T(10, 20, startVal, newA, newm);
}
ZZ startFunc(ZZ modProd){
// have to fill in
}
|
{"hexsha": "48b80de7463a3bfa6e64ac2fec9159cffda3b339", "size": 779, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "archives/cppfiles/rem_tree/main.cpp", "max_stars_repo_name": "adienes/remainder-tree", "max_stars_repo_head_hexsha": "0aa76214ab6f2a4389ec45a239ea660749989a90", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "archives/cppfiles/rem_tree/main.cpp", "max_issues_repo_name": "adienes/remainder-tree", "max_issues_repo_head_hexsha": "0aa76214ab6f2a4389ec45a239ea660749989a90", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "archives/cppfiles/rem_tree/main.cpp", "max_forks_repo_name": "adienes/remainder-tree", "max_forks_repo_head_hexsha": "0aa76214ab6f2a4389ec45a239ea660749989a90", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.1162790698, "max_line_length": 48, "alphanum_fraction": 0.6405648267, "num_tokens": 261}
|
# coding: utf-8
# # Time Optimal Velocity Profiles
#
# When the maze solver commands that the robot go forward, it can say that it must go forward one or more squares depending on what it knows about the maze. When we don't know what is after the square we pass through, we must be going slow enough to handle any scenario. In other words, there is some $V_f$ that we must reach by the end of our motion. We also begin motions at this speed, since between we arrived where we are we required that we reach $V_f$ to get there. Therefore, we start and end at $V_f$, and we want to cover some distance $d$ in the fast possible time. To do so, we accelerate at our fixed $a$ until we reach max speed, or until we need to start slowing down (whichever comes first). This gives us a trapezoid shaped velocity profile.
# ## Going Straight
# In[1]:
get_ipython().magic('load_ext tikzmagic')
# In[2]:
get_ipython().run_cell_magic('tikz', '-s 400,400', '\\draw[->] (0,0) -- (10,0);\n\\draw[->] (0,0) -- (0,5);\n\n\\draw[line width=1] (0,0.5) -- (2.5,3);\n\\draw[line width=1] (2.5,3) -- (5.5,3);\n\\draw[line width=1] (5.5,3) -- (8,0.5);\n\\draw[dashed] (0,0.5) -- (10,0.5);\n\\draw[dashed] (0,3) -- (10,3);\n\\draw[dashed] (2.5,0) -- (2.5,5);\n\\draw[dashed] (5.5,0) -- (5.5,5);\n\\draw[dashed] (8,0) -- (8,5);\n\n\\draw (-0.5, 0.5) node {$V_{f}$};\n\\draw (-0.5, 3) node {$V_{max}$};\n\\draw (2.5, -0.5) node {$t_b$};\n\\draw (5.5, -0.5) node {$t_f-t_b$};\n\\draw (8, -0.5) node {$t_f$};')
# The time to accelerate from $V_f$ to $V_{max}$ is $t_b = \frac{V-V_f}{a}$. We can substitute this into newtons first equation of motion as follows.
#
# \begin{align}
# d &= Vt_b - \frac{1}{2}a{t_b}^2 \\
# &= V\Big(\frac{V-V_f}{a}\Big) - \frac{1}{2}a\Big(\frac{V-V_f}{a}\Big)^2 \\
# &= \Big(\frac{V^2-VV_f}{a}\Big) - \Big(\frac{a(V-V_f)^2}{2a^2}\Big) \\
# &= \Big(\frac{2V^2-2VV_f}{2a}\Big) - \Big(\frac{V^2-2VV_f+{V_f}^2}{2a}\Big) \\
# &= \frac{2V^2-2VV_f - V^2 + 2VV_f - {V_f}^2}{2a} \\
# d &= \frac{V^2-{V_f}^2}{2a} \\
# \end{align}
#
# For example, if you're at starting at $V_f=0.2\frac{m}{s}$, and you're ramping up to $V=0.5\frac{m}{s}$, and you're acceleration is fixed at the $a=2\frac{m}{s^2}$, the distance you'll need to do that is $d = \frac{0.5 - 0.2}{2*2} = 0.075m$
# ## Code that proves it
# In[3]:
import numpy as np
import matplotlib.pyplot as plt
np.set_printoptions(suppress=True, precision=3)
def profile(V0, Vf, Vmax, d, A, buffer=3e-3):
v = V0
x = 0
a = A
vs = [v]
xs = [x]
a_s = [a]
dt = 0.01
while x < d:
x = x + v*dt + a*dt*dt/2.0
v = v + a*dt
ramp_d = (v*v+ - Vf*Vf) / (2.0*A)
if (d-x) < ramp_d + buffer:
a = -A
elif v < Vmax:
a = A
else:
a = 0
if v > Vmax:
v = Vmax
elif v < Vf:
v = Vf
xs.append(x)
vs.append(v)
a_s.append(a)
return xs, vs, a_s
def graph(title, idx):
plt.figure()
plt.title(title)
Vs = [0.35, 0.5, 0.75, 1, 2]
Vf = 0.02
V0 = 0.2
d = 0.35
a = 2
for V in Vs:
results = profile(V0, Vf, V, d, a)
vs = results[1]
if V == 2: # make V=2 dashed so we can see it over V=1
plt.plot(results[idx], label='V={}'.format(V), linestyle='dashed')
else:
plt.plot(results[idx], label='V={}'.format(V))
plt.legend(bbox_to_anchor=(1, 1), loc=2)
graph("position", 0)
graph("velocity", 1)
graph("acceleration", 2)
plt.show()
# ## Taking Turns
# Were we will discuss how to generate a time optimal trajectory for turns. First, let's start out with a generating trajectories that are not time optimal, but rely on specifying the final time $v_f$. For smartmouse, our state space is $[x, y, \theta]$, and a turn can be defined as starting at a point $[x_0, y_0, \theta_0]$ and going to $[x_f, y_f, \theta_0]$. Of course, we also want to specify the velocities at these point, $[\dot{x}_0, \dot{y}_0,\dot{\theta}_0]$ and $[\dot{x}_f, \dot{y}_f,\dot{\theta}_f]$. We have four constraints, so if we want to fit a smooth polynomial to those points we need a 4th order polynomial.
#
# $$q(t) = a_0 + a_1t + a_2t^2 + a_3t^3$$
# $$\dot{q}(t) = a_1 + 2a_2t + 3a_3t^2$$
#
# If we sub in our constraints, we get the following system of equations.
#
# \begin{align}
# q(0) &= a_0 \\
# \dot{q}(0) &= a_1 \\
# q(t_f) &= a_0 + a_1t_f + a_2{t_f}^2 + a_3{t_f}^3\\
# \dot{q}(t_f) &= a_1 + 2a_2t_f + 3a_3{t_f}^2\\
# \end{align}
#
# In matrix form that looks like:
# \begin{equation}
# \begin{bmatrix}
# 1 & 0 & 0 & 0 \\
# 0 & 1 & 0 & 0 \\
# 1 & t_f & t_f^2 & t_f^3 \\
# 0 & 1 & 2t_f & 3t_f^2 \\
# \end{bmatrix}
# \begin{bmatrix}
# a_0 \\
# a_1 \\
# a_2 \\
# a_3 \\
# \end{bmatrix} =
# \begin{bmatrix}
# q(0) \\
# \dot{q}(0) \\
# q(t_f) \\
# \dot{q}(t_f) \\
# \end{bmatrix}
# \end{equation}
#
# It can be shown that the matrix on the left is invertable, so long as $t_f-t_0 > 0$. So we can invert and solve this equation and get all the $a$ coefficients. We can then use this polynomial to generate the $q(t)$ and $\dot{q}(t)$ -- our trajectory.
# In[4]:
# Example: you are a point in space (one dimension) go from rest at the origin to at rest at (0.18, 0, 0) in 1 second
import numpy as np
np.set_printoptions(suppress=True, precision=3)
q_0 = np.array([0])
q_dot_0 = np.array([0])
q_f = np.array([0.18])
q_dot_f = np.array([0])
t_f = 1
b = np.array([q_0, q_dot_0, q_f, q_dot_f])
a = np.array([[1,0,0,0],[0,1,0,0],[1, t_f, pow(t_f,2),pow(t_f,3)],[0,1,2*t_f,3*pow(t_f,2)]])
coeff = np.linalg.solve(a, b)
print(coeff)
# Here you can see that the resulting coeffictions are $a_0=0$, $a_1=0$, $a_2=0.54$, $a_0=-0.36$. Intuitively, this says that we're going to have positive acceleration, but our acceleration is going to slow down over time. Let's graph it!
# In[5]:
import matplotlib.pyplot as plt
dt = 0.01
ts = np.array([[1, t, pow(t,2), pow(t,3)] for t in np.arange(0, t_f+dt, dt)])
qs = ts@coeff
plt.plot(ts[:,1], qs, label="x")
plt.xlabel("time (seconds)")
plt.xlabel("X (meters)")
plt.legend(bbox_to_anchor=(1,1), loc=2)
plt.show()
# **ooooooooooh so pretty**
#
# Let's try another example, now with our full state space of $[x, y, \theta]$.
# In[6]:
# In this example, we go from (0.18, 0.09, 0) to (0.27,0.18, -1.5707). Our starting and ending velocities are zero
q_0 = np.array([0.09,0.09,0])
q_dot_0 = np.array([0,0,0])
q_f = np.array([0.27,0.18,-1.5707])
q_dot_f = np.array([0,0,0])
t_f = 1
b = np.array([q_0, q_dot_0, q_f, q_dot_f])
a = np.array([[1,0,0,0],[0,1,0,0],[1, t_f, pow(t_f,2),pow(t_f,3)],[0,1,2*t_f,3*pow(t_f,2)]])
coeff = np.linalg.solve(a, b)
print(coeff)
dt = 0.1
ts = np.array([[1, t, pow(t,2), pow(t,3)] for t in np.arange(0, t_f+dt, dt)])
qs = ts@coeff
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.gca().set_adjustable("box")
plt.subplot(221)
plt.plot(ts[:,1], qs[:,0])
plt.xlabel("time (seconds)")
plt.title("x")
plt.subplot(222)
plt.plot(ts[:,1], qs[:,1])
plt.xlabel("time (seconds)")
plt.title("y")
plt.subplot(223)
plt.plot(ts[:,1], qs[:,2])
plt.xlabel("time (seconds)")
plt.title(r"$\theta$")
plt.subplot(224)
plt.scatter(qs[:,0], qs[:,1])
plt.axis('equal')
plt.xlabel("X")
plt.ylabel("Y")
plt.tight_layout()
plt.show()
# Gifs!
from matplotlib.animation import FuncAnimation
from IPython.display import HTML
fig, ax = plt.subplots()
x = np.arange(0, 20, 0.1)
line, = ax.plot(x, x - 5, 'r-', linewidth=2)
def update(i):
label = "t_" + str(i)
line.set_ydata(x - 5 + i)
ax.set_xlabel(label)
return line, ax
plt.rc('text', usetex=False)
anim = FuncAnimation(fig, update, frames=np.arange(0, 10), interval=100)
gif_file = 'car.gif'
anim.save(gif_file, dpi=80, writer='imagemagick')
HTML("<img src={}/>".format(gif_file))
# Well, they are smooth, but these are not possible to execute! The robot cannot simply translate sideways.
# In[ ]:
# In[ ]:
# In[ ]:
|
{"hexsha": "72a46ee4fc3d7611f74be024ef511b2bdc07056c", "size": 7998, "ext": "py", "lang": "Python", "max_stars_repo_path": "docs/ipynb_gen_py/time optimal velocity profiles.py", "max_stars_repo_name": "keionbis/SmartMouse_2018", "max_stars_repo_head_hexsha": "26d548a93c282bca8d550b58609f22ea1910fc78", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "docs/ipynb_gen_py/time optimal velocity profiles.py", "max_issues_repo_name": "keionbis/SmartMouse_2018", "max_issues_repo_head_hexsha": "26d548a93c282bca8d550b58609f22ea1910fc78", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "docs/ipynb_gen_py/time optimal velocity profiles.py", "max_forks_repo_name": "keionbis/SmartMouse_2018", "max_forks_repo_head_hexsha": "26d548a93c282bca8d550b58609f22ea1910fc78", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-08-17T17:10:54.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-17T17:10:54.000Z", "avg_line_length": 31.992, "max_line_length": 758, "alphanum_fraction": 0.6004001, "include": true, "reason": "import numpy", "num_tokens": 3007}
|
! RUN: bbc -emit-fir %s -o - | FileCheck %s
! CHECK-LABEL: func @_QPtest1(
! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.array<100xf32>>{{.*}}, %[[VAL_1:.*]]: !fir.ref<i32>{{.*}}, %[[VAL_2:.*]]: !fir.ref<i32>{{.*}}, %[[VAL_3:.*]]: !fir.ref<i32>{{.*}}) {
! CHECK: %[[VAL_4:.*]] = arith.constant 100 : index
! CHECK: %[[VAL_5:.*]] = fir.load %[[VAL_1]] : !fir.ref<i32>
! CHECK: %[[VAL_6:.*]] = fir.convert %[[VAL_5]] : (i32) -> i64
! CHECK: %[[VAL_7:.*]] = fir.convert %[[VAL_6]] : (i64) -> index
! CHECK: %[[VAL_8:.*]] = fir.load %[[VAL_3]] : !fir.ref<i32>
! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (i32) -> i64
! CHECK: %[[VAL_10:.*]] = fir.convert %[[VAL_9]] : (i64) -> index
! CHECK: %[[VAL_11:.*]] = fir.load %[[VAL_2]] : !fir.ref<i32>
! CHECK: %[[VAL_12:.*]] = fir.convert %[[VAL_11]] : (i32) -> i64
! CHECK: %[[VAL_13:.*]] = fir.convert %[[VAL_12]] : (i64) -> index
! CHECK: %[[VAL_14:.*]] = fir.shape %[[VAL_4]] : (index) -> !fir.shape<1>
! CHECK: %[[VAL_15:.*]] = fir.slice %[[VAL_7]], %[[VAL_13]], %[[VAL_10]] : (index, index, index) -> !fir.slice<1>
! CHECK: %[[VAL_16:.*]] = fir.embox %[[VAL_0]](%[[VAL_14]]) {{\[}}%[[VAL_15]]] : (!fir.ref<!fir.array<100xf32>>, !fir.shape<1>, !fir.slice<1>) -> !fir.box<!fir.array<?xf32>>
! CHECK: fir.call @_QPbob(%[[VAL_16]]) : (!fir.box<!fir.array<?xf32>>) -> ()
! CHECK: return
! CHECK: }
subroutine test1(a,i,j,k)
real a(100)
integer i, j, k
interface
subroutine bob(a)
real :: a(:)
end subroutine bob
end interface
associate (name => a(i:j:k))
call bob(name)
end associate
end subroutine test1
! CHECK-LABEL: func @_QPtest2(
! CHECK-SAME: %[[nadd:.*]]: !fir.ref<i32>{{.*}})
subroutine test2(n)
integer :: n
integer, external :: foo
! CHECK: %[[n:.*]] = fir.load %[[nadd]] : !fir.ref<i32>
! CHECK: %[[n10:.*]] = arith.addi %[[n]], %c10{{.*}} : i32
! CHECK: fir.store %[[n10]] to %{{.*}} : !fir.ref<i32>
! CHECK: %[[foo:.*]] = fir.call @_QPfoo(%{{.*}}) : (!fir.ref<i32>) -> i32
! CHECK: fir.store %[[foo]] to %{{.*}} : !fir.ref<i32>
associate (i => n, j => n + 10, k => foo(20))
print *, i, j, k, n
end associate
end subroutine test2
|
{"hexsha": "3fb34b39ea35367d2bf00409ddb248976bf813f4", "size": 2274, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "flang/test/Lower/associate-construct-2.f90", "max_stars_repo_name": "akashhansda/llvm-project", "max_stars_repo_head_hexsha": "32f146010968ded160f54af464673451ad574135", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "flang/test/Lower/associate-construct-2.f90", "max_issues_repo_name": "akashhansda/llvm-project", "max_issues_repo_head_hexsha": "32f146010968ded160f54af464673451ad574135", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "flang/test/Lower/associate-construct-2.f90", "max_forks_repo_name": "akashhansda/llvm-project", "max_forks_repo_head_hexsha": "32f146010968ded160f54af464673451ad574135", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.5882352941, "max_line_length": 181, "alphanum_fraction": 0.4964819701, "num_tokens": 878}
|
# NMF for dense matrices
using NMF
function run(algname)
# prepare data
p = 8
k = 5
n = 100
Wg = abs(randn(p, k))
Hg = abs(randn(k, n))
X = Wg * Hg + 0.1 * randn(p, n)
# run NNMF
println("Algorithm: $(algname)")
println("---------------------------------")
r = nnmf(X, k;
init=:nndsvdar,
alg=symbol(algname),
maxiter=30,
verbose=true)
# display results
println("numiters = $(r.niters)")
println("converged = $(r.converged)")
@printf("objvalue = %.6e\n", r.objvalue)
println("W matrix = ")
NMF.printf_mat(r.W)
println()
end
function print_help()
println("Usage:")
println()
println(" julia densenmf.jl <alg>")
println()
println(" <alg> is the name of the chosen algorithm, which can be ")
println()
println(" multmse: Multiplicative update (minimize MSE)")
println(" multdiv: Multiplicative update (minimize divergence)")
println(" projals: Projected ALS")
println(" alspgrad: ALS Projected Gradient Descent")
println()
end
function main(args)
if isempty(args)
print_help()
elseif length(args) == 1
a = lowercase(args[1])
if a == "-h" || a == "--help"
print_help()
else
run(a)
end
else
@warn("Invalid command line arguments.")
print_help()
end
end
main(ARGS)
|
{"hexsha": "ac6b1824c0e48a5f910bc15fa0867a617259cb2b", "size": 1461, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples/densenmf.jl", "max_stars_repo_name": "UnofficialJuliaMirror/NMF.jl-6ef6ca0d-6ad7-5ff6-b225-e928bfa0a386", "max_stars_repo_head_hexsha": "d753312f4686578879fdeae009ba1250102b1e1f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/densenmf.jl", "max_issues_repo_name": "UnofficialJuliaMirror/NMF.jl-6ef6ca0d-6ad7-5ff6-b225-e928bfa0a386", "max_issues_repo_head_hexsha": "d753312f4686578879fdeae009ba1250102b1e1f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/densenmf.jl", "max_forks_repo_name": "UnofficialJuliaMirror/NMF.jl-6ef6ca0d-6ad7-5ff6-b225-e928bfa0a386", "max_forks_repo_head_hexsha": "d753312f4686578879fdeae009ba1250102b1e1f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.1739130435, "max_line_length": 73, "alphanum_fraction": 0.5345653662, "num_tokens": 407}
|
"""
Code for "Invertible Residual Networks"
http://proceedings.mlr.press/v97/behrmann19a.html
ICML, 2019
"""
import threading
import logging
import contextlib
import numpy
import torch
import termcolor
import torch.backends.cudnn as cudnn
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import Subset
from save_data import save_data, METADATA_DIR
import numpy as np
import torchvision
import torchvision.transforms as transforms
import visdom
import os
import sys
import time
import argparse
import pdb
import random
import json
from models.utils_cifar import train, test, std, mean, get_hms, interpolate
from models.conv_iResNet import conv_iResNet as iResNet
from models.conv_iResNet import multiscale_conv_iResNet as multiscale_iResNet
from models.designs import NoDesign, UniformDesign, CoresetDesignKCentersGreedy
parser = argparse.ArgumentParser(description='Train i-ResNet/ResNet on Cifar')
parser.add_argument('-densityEstimation', '--densityEstimation', dest='densityEstimation',
action='store_true', help='perform density estimation')
parser.add_argument('--optimizer', default="adamax", type=str, help="optimizer", choices=["adam", "adamax", "sgd"])
parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
parser.add_argument('--coeff', default=0.9, type=float, help='contraction coefficient for linear layers')
parser.add_argument('--numTraceSamples', default=1, type=int, help='number of samples used for trace estimation')
parser.add_argument('--numSeriesTerms', default=1, type=int, help='number of terms used in power series for matrix log')
parser.add_argument('--powerIterSpectralNorm', default=5, type=int, help='number of power iterations used for spectral norm')
parser.add_argument('--weight_decay', default=5e-4, type=float, help='coefficient for weight decay')
parser.add_argument('--drop_rate', default=0.1, type=float, help='dropout rate')
parser.add_argument('--batch', default=128, type=int, help='batch size')
parser.add_argument('--init_batch', default=1024, type=int, help='init batch size')
parser.add_argument('--init_ds', default=2, type=int, help='initial downsampling')
parser.add_argument('--warmup_epochs', default=10, type=int, help='epochs for warmup')
parser.add_argument('--inj_pad', default=0, type=int, help='initial inj padding')
parser.add_argument('--design-epochs', default=10, type=int, help="Number of epochs where the design is updated.")
parser.add_argument('--train-epochs', default=20, type=int, help="For a given design, the number of epochs before updating the design.")
#parser.add_argument('--epochs', default=200, type=int, help='number of epochs')
parser.add_argument('--nBlocks', nargs='+', type=int, default=[4, 4, 4])
parser.add_argument('--nStrides', nargs='+', type=int, default=[1, 2, 2])
parser.add_argument('--nChannels', nargs='+', type=int, default=[16, 64, 256])
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('-interpolate', '--interpolate', dest='interpolate', action='store_true', help='train iresnet')
parser.add_argument('-drop_two', '--drop_two', dest='drop_two', action='store_true', help='2d dropout on')
parser.add_argument('-nesterov', '--nesterov', dest='nesterov', action='store_true',
help='nesterov momentum')
parser.add_argument('-norm', '--norm', dest='norm', action='store_true',
help='compute norms of conv operators')
parser.add_argument('-analysisTraceEst', '--analysisTraceEst', dest='analysisTraceEst', action='store_true',
help='analysis of trace estimation')
parser.add_argument('-multiScale', '--multiScale', dest='multiScale', action='store_true',
help='use multiscale')
parser.add_argument('-fixedPrior', '--fixedPrior', dest='fixedPrior', action='store_true',
help='use fixed prior, default is learned prior')
parser.add_argument('-noActnorm', '--noActnorm', dest='noActnorm', action='store_true',
help='disable actnorm, default uses actnorm')
parser.add_argument('--nonlin', default="elu", type=str, choices=["relu", "elu", "sorting", "softplus"])
parser.add_argument('--dataset', default='cifar10', type=str, help='dataset')
parser.add_argument('--save_dir', default=None, type=str, help='directory to save results')
parser.add_argument('--vis_port', default=8097, type=int, help="port for visdom")
parser.add_argument('--vis_server', default="localhost", type=str, help="server for visdom")
parser.add_argument('--log_every', default=10, type=int, help='logs every x iters')
parser.add_argument('-log_verbose', '--log_verbose', dest='log_verbose', action='store_true',
help='verbose logging: sigmas, max gradient')
parser.add_argument('-deterministic', '--deterministic', dest='deterministic', action='store_true',
help='fix random seeds and set cuda deterministic')
parser.add_argument('--trunc', type=float, default=1., help='Truncate the data and test sets by percentage.')
parser.add_argument('--design', choices=['none', 'uniform', 'k-centers', 'k-centers-det'], default="none", help="Experimental design method.")
parser.add_argument('--design-batch-size', type=int, default=20, help="By what size to increase label count in each step.")
parser.add_argument('--no-update', action='store_true', help="Don't update the design after the first update.")
def try_make_dir(d):
if not os.path.isdir(d):
os.mkdir(d)
try_make_dir('results')
def anaylse_trace_estimation(model, testset, use_cuda, extension):
# setup range for analysis
numSamples = np.arange(10)*10 + 1
numIter = np.arange(10)
# setup number of datapoints
testloader = torch.utils.data.DataLoader(testset, batch_size=128, shuffle=False, num_workers=2)
# TODO change
for batch_idx, (inputs, targets) in enumerate(testloader):
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda() # GPU settings
inputs, targets = Variable(inputs, requires_grad=True), Variable(targets)
# compute trace
out_bij, p_z_g_y, trace, gt_trace = model(inputs[:, :, :8, :8],
exact_trace=True)
trace = [t.cpu().numpy() for t in trace]
np.save('gtTrace'+extension, gt_trace)
np.save('estTrace'+extension, trace)
return
def test_spec_norm(model, in_shapes, extension):
i = 0
j = 0
params = [v for v in model.module.state_dict().keys() \
if "bottleneck" and "weight" in v \
and not "weight_u" in v \
and not "weight_orig" in v \
and not "bn1" in v and not "linear" in v]
logging.info(len(params))
logging.info(len(in_shapes))
svs = []
for param in params:
if i == 0:
input_shape = in_shapes[j]
else:
input_shape = in_shapes[j]
input_shape[1] = int(input_shape[1] // 4)
convKernel = model.module.state_dict()[param].cpu().numpy()
input_shape = input_shape[2:]
fft_coeff = np.fft.fft2(convKernel, input_shape, axes=[2, 3])
t_fft_coeff = np.transpose(fft_coeff)
U, D, V = np.linalg.svd(t_fft_coeff, compute_uv=True, full_matrices=False)
Dflat = np.sort(D.flatten())[::-1]
logging.info("Layer "+str(j)+" Singular Value "+str(Dflat[0]))
svs.append(Dflat[0])
if i == 2:
i = 0
j+= 1
else:
i+=1
np.save('singular_values'+extension, svs)
return
def get_init_batch(dataloader, batch_size):
"""
gets a batch to use for init
"""
batches = []
seen = 0
for x, y in dataloader:
batches.append(x)
seen += x.size(0)
if seen >= batch_size:
break
batch = torch.cat(batches)
return batch
def get_model(
multiScale,
in_shape,
nBlocks,
nStrides,
nChannels,
nClasses,
init_ds,
inj_pad,
coeff,
numTraceSamples,
numSeriesTerms,
powerIterSpectralNorm,
densityEstimation,
noActnorm,
fixedPrior,
nonlin,
):
if multiScale:
model = multiscale_iResNet(
in_shape,
nBlocks,
nStrides,
nChannels,
init_ds == 2,
inj_pad,
coeff,
densityEstimation,
nClasses,
numTraceSamples,
numSeriesTerms,
powerIterSpectralNorm,
actnorm=(not noActnorm),
learn_prior=(not fixedPrior),
nonlin=nonlin,
)
else:
model = iResNet(
nBlocks=nBlocks,
nStrides=nStrides,
nChannels=nChannels,
nClasses=nClasses,
init_ds=init_ds,
inj_pad=inj_pad,
in_shape=in_shape,
coeff=coeff,
numTraceSamples=numTraceSamples,
numSeriesTerms=numSeriesTerms,
n_power_iter=powerIterSpectralNorm,
density_estimation=densityEstimation,
actnorm=(not noActnorm),
learn_prior=(not fixedPrior),
nonlin=nonlin,
)
return model
def cifar10_classification_model():
"""
CIFAR10 classification
"""
return get_model(
False,
(3, 32, 32),
[7,7,7],
[1,2,2],
[32,64,128],
10,
1,
13,
0.9,
1,
1,
1,
False,
False,
False,
"elu",
)
def main():
args = parser.parse_args()
assert 0. < args.trunc, Exception("Can't truncate with negative percentage", args.trunc)
logging.basicConfig(
format=f"%(asctime)s (design={args.design}) [%(levelname)s:%(funcName)s:%(filename)s:%(lineno)d] %(message)s",
level=logging.DEBUG,
)
if args.deterministic:
logging.info("MODEL NOT FULLY DETERMINISTIC")
torch.manual_seed(1234)
torch.cuda.manual_seed(1234)
np.random.seed(1234)
random.seed(1234)
torch.backends.cudnn.deterministic=True
dens_est_chain = [
lambda x: (255. * x) + torch.zeros_like(x).uniform_(0., 1.),
lambda x: x / 256.,
lambda x: x - 0.5
]
if args.dataset == 'mnist':
assert args.densityEstimation, "Currently mnist is only supported for density estimation"
mnist_transforms = [transforms.Pad(2, 0), transforms.ToTensor(), lambda x: x.repeat((3, 1, 1))]
transform_train_mnist = transforms.Compose(mnist_transforms + dens_est_chain)
transform_test_mnist = transforms.Compose(mnist_transforms + dens_est_chain)
trainset = torchvision.datasets.MNIST(
root='./data', train=True, download=True, transform=transform_train_mnist)
testset = torchvision.datasets.MNIST(
root='./data', train=False, download=False, transform=transform_test_mnist)
args.nClasses = 10
in_shape = (3, 32, 32)
else:
if args.dataset == 'svhn':
train_chain = [transforms.Pad(4, padding_mode="symmetric"),
transforms.RandomCrop(32),
transforms.ToTensor()]
else:
train_chain = [transforms.Pad(4, padding_mode="symmetric"),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()]
test_chain = [transforms.ToTensor()]
if args.densityEstimation:
transform_train = transforms.Compose(train_chain + dens_est_chain)
transform_test = transforms.Compose(test_chain + dens_est_chain)
else:
clf_chain = [transforms.Normalize(mean[args.dataset], std[args.dataset])]
transform_train = transforms.Compose(train_chain + clf_chain)
transform_test = transforms.Compose(test_chain + clf_chain)
if args.dataset in ['cifar3', 'cifar5', 'cifar10']:
trainset = torchvision.datasets.CIFAR10(
root='./data', train=True, download=True, transform=transform_train)
testset = torchvision.datasets.CIFAR10(
root='./data', train=False, download=True, transform=transform_test)
if args.dataset == 'cifar10':
args.nClasses = 10
else:
nclasses = 3 if args.dataset == 'cifar3' else 5
args.nClasses = nclasses
logging.info("filtering classes 0..%d", nclasses)
train_subset_indices = []
cursor = 0
for _batch, labels in torch.utils.data.DataLoader(trainset, args.batch):
train_subset_indices += list((labels < nclasses).nonzero().flatten() + cursor)
cursor += len(labels)
trainset = Subset(trainset, train_subset_indices)
test_subset_indices = []
cursor = 0
for _batch, labels in torch.utils.data.DataLoader(testset, args.batch):
test_subset_indices += list((labels < nclasses).nonzero().flatten() + cursor)
cursor += len(labels)
testset = Subset(testset, test_subset_indices)
elif args.dataset == 'cifar100':
trainset = torchvision.datasets.CIFAR100(
root='./data', train=True, download=True, transform=transform_train)
testset = torchvision.datasets.CIFAR100(
root='./data', train=False, download=True, transform=transform_test)
args.nClasses = 100
elif args.dataset == 'svhn':
trainset = torchvision.datasets.SVHN(
root='./data', split='train', download=True, transform=transform_train)
testset = torchvision.datasets.SVHN(
root='./data', split='test', download=True, transform=transform_test)
args.nClasses = 10
in_shape = (3, 32, 32)
if args.trunc < 1:
orig_train_len = len(trainset)
orig_test_len = len(testset)
random_train_indices = list(
numpy.random.choice(
numpy.arange(len(trainset)),
int(args.trunc * len(trainset)),
replace=False,
)
)
trainset = Subset(trainset, random_train_indices)
random_test_indices = list(
numpy.random.choice(
numpy.arange(len(testset)),
int(args.trunc * len(testset)),
replace=False,
)
)
testset = Subset(testset, random_test_indices)
logging.info(
"truncated train set size to %.02f%%: %d -> %d",
100 * args.trunc,
orig_train_len,
len(trainset),
)
logging.info(
"truncated test set size to %.02f%%: %d -> %d",
100 * args.trunc,
orig_test_len,
len(testset),
)
else:
logging.info(termcolor.colored("not truncating dataset", "red"))
if args.design == 'none':
logging.info("no design")
design = NoDesign(trainset)
elif args.design == 'uniform':
logging.info("uniform design")
design = UniformDesign(trainset, args.design_batch_size, not args.no_update)
elif args.design == 'k-centers':
logging.info("k-centers design")
design = CoresetDesignKCentersGreedy(trainset, args.design_batch_size, True)
elif args.design == 'k-centers-det':
logging.info("k-centers design (deterministic)")
design = CoresetDesignKCentersGreedy(trainset, args.design_batch_size, False)
else:
raise Exception("invalid design", args.design)
# setup logging with visdom
viz = visdom.Visdom(port=args.vis_port, server="http://" + args.vis_server)
assert viz.check_connection(), "Could not make visdom"
if args.deterministic:
trainloader = lambda: torch.utils.data.DataLoader(
design.get_dataset(),
batch_size=args.batch,
shuffle=True,
num_workers=2,
worker_init_fn=np.random.seed(1234),
)
testloader = torch.utils.data.DataLoader(testset, batch_size=args.batch,
shuffle=False, num_workers=2, worker_init_fn=np.random.seed(1234))
else:
trainloader = lambda: torch.utils.data.DataLoader(
design.get_dataset(),
batch_size=args.batch,
shuffle=True,
num_workers=2,
)
testloader = torch.utils.data.DataLoader(testset, batch_size=args.batch, shuffle=False, num_workers=2)
model = get_model(
args.multiScale,
in_shape,
args.nBlocks,
args.nStrides,
args.nChannels,
args.nClasses,
args.init_ds,
args.inj_pad,
args.coeff,
args.numTraceSamples,
args.numSeriesTerms,
args.powerIterSpectralNorm,
args.densityEstimation,
args.noActnorm,
args.fixedPrior,
args.nonlin,
)
# init actnrom parameters
init_batch = get_init_batch(trainloader(), args.init_batch)
logging.info("cuda specs")
use_cuda = torch.cuda.is_available()
if use_cuda:
model.cuda()
init_batch = init_batch.cuda()
else:
in_shapes = model.get_in_shapes()
logging.info("initializing actnorm parameters...")
logging.info("batch device = %r", init_batch.device)
save_data(METADATA_DIR/"init-batch.h5", init_batch=init_batch)
with torch.no_grad():
# there is some wierd exception going on here in our setup.
# calling `model` the frist time raises that error and the next time it seems fine.
with contextlib.suppress(Exception):
model(init_batch)
model(init_batch, ignore_logdet=True)
if use_cuda:
model = torch.nn.DataParallel(model, range(torch.cuda.device_count()))
cudnn.benchmark = True
in_shapes = model.module.get_in_shapes()
logging.info("initialized")
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
logging.info("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
start_epoch = checkpoint['epoch']
best_objective = checkpoint['objective']
logging.info('objective: '+str(best_objective))
model = checkpoint['model']
if use_cuda:
model.module.set_num_terms(args.numSeriesTerms)
else:
model.set_num_terms(args.numSeriesTerms)
logging.info("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
logging.info("=> no checkpoint found at '{}'".format(args.resume))
try_make_dir(args.save_dir)
if args.analysisTraceEst:
anaylse_trace_estimation(model, testset, use_cuda, args.extension)
return
if args.norm:
test_spec_norm(model, in_shapes, args.extension)
return
if args.interpolate:
interpolate(model, testloader, testset, start_epoch, use_cuda, best_objective, args.dataset)
return
if args.evaluate:
test_log = open(os.path.join(args.save_dir, "test_log.txt"), 'w')
if use_cuda:
model.module.set_num_terms(args.numSeriesTerms)
else:
model.set_num_terms(args.numSeriesTerms)
model = torch.nn.DataParallel(model.module)
test(best_objective, args, model, start_epoch, testloader, viz, use_cuda, test_log)
return
epochs = args.design_epochs * args.train_epochs
logging.info('| Train Epochs: ' + str(epochs))
logging.info('| Initial Learning Rate: ' + str(args.lr))
elapsed_time = 0
test_objective = -np.inf
logging.info("optimizer = %r", args.optimizer)
if args.optimizer == "adam":
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
elif args.optimizer == "adamax":
optimizer = optim.Adamax(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
else:
optimizer = optim.SGD(model.parameters(), lr=args.lr,
momentum=0.9, weight_decay=args.weight_decay, nesterov=args.nesterov)
with open(os.path.join(args.save_dir, 'params.json'), 'w') as f:
f.write(json.dumps(args.__dict__, indent=4))
train_log = open(os.path.join(args.save_dir, "train_log.txt"), 'w')
logging.error("design ds size = %r", len(design.dataset))
logging.info("TRAINING!")
for epoch in range(1, epochs):
start_time = time.time()
logging.debug("start << training epoch %d", epoch)
logging.debug("trainloader = %r", trainloader)
train(args, model, optimizer, epoch, trainloader(), design.dataset, viz, use_cuda, train_log)
logging.debug("finish >> training epoch %d", epoch)
epoch_time = time.time() - start_time
elapsed_time += epoch_time
logging.info('| Elapsed time : %d:%02d:%02d' % (get_hms(elapsed_time)))
if epoch > 0 and epoch % args.train_epochs == 0:
# after every `args.train_epochs` epochs update the design.
# NOTE: since the epoch counting starts at `1` and not at `0` this doesn't happen on the first
# iteration. however, to be on the safe side for future changes the condition includes
# `epoch > 0`.
logging.debug("start << updating design for epoch %d", epoch)
design.update_design(model)
logging.debug("finish >> updating design for epoch %d", epoch)
logging.info('Testing model')
test_log = open(os.path.join(args.save_dir, "test_log.txt"), 'w')
test_objective = test(test_objective, args, model, epoch, testloader, viz, use_cuda, test_log)
logging.info('* Test results : objective = %.2f%%' % (test_objective))
with open(os.path.join(args.save_dir, 'final.txt'), 'w') as f:
f.write(str(test_objective))
model_file = os.path.join(args.save_dir, 'model.tr')
logging.info("saving model to file %s", model_file)
torch.save(model.cpu(), model_file)
if __name__ == '__main__':
main_tid = threading.main_thread().ident
try:
main()
except Exception:
if threading.main_thread().ident == threading.current_thread().ident:
logging.error("critical error", exc_info=True)
sys.exit(1) #raise
|
{"hexsha": "37c1174626f8417520492303350131821f7a3921", "size": 22819, "ext": "py", "lang": "Python", "max_stars_repo_path": "CIFAR_main.py", "max_stars_repo_name": "ohadoh-math/invertible-resnet", "max_stars_repo_head_hexsha": "4f05b9d1761c2d46cc05d9748ef3e690f8b9c0b2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "CIFAR_main.py", "max_issues_repo_name": "ohadoh-math/invertible-resnet", "max_issues_repo_head_hexsha": "4f05b9d1761c2d46cc05d9748ef3e690f8b9c0b2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "CIFAR_main.py", "max_forks_repo_name": "ohadoh-math/invertible-resnet", "max_forks_repo_head_hexsha": "4f05b9d1761c2d46cc05d9748ef3e690f8b9c0b2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.8933566434, "max_line_length": 142, "alphanum_fraction": 0.6270213419, "include": true, "reason": "import numpy", "num_tokens": 5223}
|
import os
import cv2
import numpy as np
import pandas as pd
from torchvision.transforms import transforms
from torch.utils.data import Dataset
from datasets.base_dataset import BaseDataset
from utils.augmenters.augment import seg
import xml.etree.ElementTree as ET
from PIL import Image
import matplotlib.pyplot as plt
class CARLADataset(BaseDataset):
"""
Input params:
stage: The stage of training.
configuration: Configuration dictionary.
"""
def __init__(self, configuration):
super().__init__(configuration)
self._stage = configuration["stage"]
self._image_size = tuple(configuration["input_size"])
self.dataset_path = os.path.join(configuration["dataset_path"])#, "{}".format(self._stage))
#-----------------------------------------------------------------------
#Here is where you can do things like preload data and labels or do image preprocessing
self.sim_img_paths = []
self.real_img_paths = []
for i in os.listdir(os.path.join(self.dataset_path, configuration["sim_data_folder"])):
self.sim_img_paths.append(os.path.join(self.dataset_path, configuration["sim_data_folder"], i))
for i in os.listdir(os.path.join(self.dataset_path, configuration["real_data_folder"])):
self.real_img_paths.append(os.path.join(self.dataset_path, configuration["real_data_folder"], i))
#-----------------------------------------------------------------------
self._transform = transforms.Compose(
[
transforms.ToPILImage(),
transforms.ToTensor(),
]
)
#This function returns an data, label pair. All data processing and modification should be done by the end of this function
def __getitem__(self, index):
sim_filename = self.sim_img_paths[index]
real_filename = self.real_img_paths[index]
#Image loading assuming the images are in the 'images' folder in the dataset root path
sim_image = Image.open(sim_filename)
sim_image = np.asarray(sim_image)
sim_image = sim_image.astype(np.uint8)
real_image = Image.open(real_filename)
real_image = np.asarray(real_image)
real_image = real_image.astype(np.uint8)
#Image resizing
sim_image = cv2.resize(sim_image, self._image_size)
real_image = cv2.resize(real_image, self._image_size)
#Image formatting
sim_image = np.dstack([sim_image] * 1)
real_image = np.dstack([real_image] * 1)
#Some image augmentation
# image = seg(image=image)
#Apply defined transforms to image from constructor (will convert to tensor)
sim_image = self._transform(sim_image)
real_image = self._transform(real_image)
#image should be the image data, target should be the label
return sim_image, real_image
def __len__(self):
# return the size of the dataset, replace with len of labels array
return len(self.sim_img_paths)
|
{"hexsha": "5ddc9b6b9f37f411ee3ce8dae34f97bf191bb248", "size": 3065, "ext": "py", "lang": "Python", "max_stars_repo_path": "datasets/CARLA_dataset.py", "max_stars_repo_name": "And1210/AutoencoderTransformer", "max_stars_repo_head_hexsha": "9c6142421d311d34f6a00cb90dd49388e5f1cdff", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "datasets/CARLA_dataset.py", "max_issues_repo_name": "And1210/AutoencoderTransformer", "max_issues_repo_head_hexsha": "9c6142421d311d34f6a00cb90dd49388e5f1cdff", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "datasets/CARLA_dataset.py", "max_forks_repo_name": "And1210/AutoencoderTransformer", "max_forks_repo_head_hexsha": "9c6142421d311d34f6a00cb90dd49388e5f1cdff", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.9277108434, "max_line_length": 127, "alphanum_fraction": 0.6453507341, "include": true, "reason": "import numpy", "num_tokens": 614}
|
from decimal import Decimal
import argparse
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import graph_utils
import numpy as np
import graph_utils
import process_csv
import sys
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument('--input-file', dest='input_files', nargs=2, action='append', required=True, help="csv file to plot. Needs a label as a second argument.")
parser.add_argument('--keep-temps', dest='keep_temps', default=False, action='store_true', help="Keep temp files")
parser.add_argument('--server', dest='server_ip', required=True)
parser.add_argument('--output-name', dest='output_name', required=True)
parser.add_argument('--title', dest='title', required=False, default=None)
# This is to avoid issues with tcpdump hanging.
parser.add_argument('--packets', type=int, required=False,
default=None, dest='packets',
help="Number of packets to process from a pcap file")
args = parser.parse_args(args)
plt.figure(1)
plt.clf()
plt.figure(2)
plt.clf()
plt.figure(3)
plt.clf()
plt.figure(4)
plt.clf()
pcap_files = args.input_files
output_label = args.output_name
for (pcap_file, label) in pcap_files:
if pcap_file.endswith('.csv'):
incoming_ipg_gaps = \
process_csv.extract_ipgs(pcap_file, to_ip=args.server_ip)
outgoing_ipg_gaps = \
process_csv.extract_ipgs(pcap_file, from_ip=args.server_ip)
range = [min(incoming_ipg_gaps), max(incoming_ipg_gaps)]
print "Dealing with incoming IPG gaps"
print "Range is ", range
print "Median is ", np.median(incoming_ipg_gaps)
print "Deviation is ", np.std(incoming_ipg_gaps)
# Before we plot these, they need to be converted to normal
# floats. To do this, multiply by 10**9
for i in xrange(len(incoming_ipg_gaps)):
incoming_ipg_gaps[i] = float(Decimal(1000000000.0) * incoming_ipg_gaps[i])
for i in xrange(len(outgoing_ipg_gaps)):
outgoing_ipg_gaps[i] = float(Decimal(1000000000.0) * outgoing_ipg_gaps[i])
# Remove anything greater than the 99th percentile to stop
# if affecting the bins.
i = 0
nintyninth_percentile = np.percentile(incoming_ipg_gaps, 99)
while i < len(incoming_ipg_gaps):
if incoming_ipg_gaps[i] > nintyninth_percentile:
del incoming_ipg_gaps[i]
else:
i += 1
print nintyninth_percentile
# Avoid issues witht the CDF line decreasing to zero after the data is
# plotted.
min_lim = min(incoming_ipg_gaps)
max_lim = max(incoming_ipg_gaps)
small_diff = (min_lim + max_lim) / 10000.0
bins = np.linspace(min_lim, max_lim + small_diff, 1000)
bins = np.append(bins, np.inf)
plt.figure(1)
plt.hist(incoming_ipg_gaps, bins=bins, cumulative=True,
histtype='step', normed=True, label=label)
# Now plot a log space version, with all times included.
incoming_ipg_gas_no_zeroes = graph_utils.no_zeroes(incoming_ipg_gaps)
if len(incoming_ipg_gas_no_zeroes) > 0:
lim_min = min(incoming_ipg_gas_no_zeroes)
lim_max = max(incoming_ipg_gas_no_zeroes)
bins = graph_utils.get_logspace(lim_min, lim_max)
plt.figure(2)
plt.hist(incoming_ipg_gas_no_zeroes, bins=bins, cumulative=True,
histtype='step', normed=True, label=label)
else:
print "Error:: found only zero times on the incoming IPG gaps"
# Now do the outgoing.
# Remove anything greater than the 99th percentile to stop
# if affecting the bins.
i = 0
nintyninth_percentile = np.percentile(outgoing_ipg_gaps, 99)
while i < len(outgoing_ipg_gaps):
if outgoing_ipg_gaps[i] > nintyninth_percentile:
del outgoing_ipg_gaps[i]
else:
i += 1
print nintyninth_percentile
# Avoid issues witht the CDF line decreasing to zero after the data
# is plotted.
min_lim = min(outgoing_ipg_gaps)
max_lim = max(outgoing_ipg_gaps)
small_diff = (min_lim + max_lim) / 10000.0
bins = np.linspace(min_lim, max_lim + small_diff, 1000)
bins = np.append(bins, np.inf)
plt.figure(3)
plt.hist(outgoing_ipg_gaps, bins=bins, cumulative=True,
histtype='step', normed=True, label=label)
# Now plot the logspace version.
outgoing_ipg_gaps_no_zeroes = graph_utils.no_zeroes(outgoing_ipg_gaps)
if len(outgoing_ipg_gaps_no_zeroes) > 0:
min_lim = min(outgoing_ipg_gaps_no_zeroes)
max_lim = max(outgoing_ipg_gaps_no_zeroes)
bins = graph_utils.get_logspace(min_lim, max_lim)
plt.figure(4)
plt.hist(outgoing_ipg_gaps_no_zeroes, bins=bins,
cumulative=True,
histtype='step', normed=True, label=label)
else:
print "Error: No non-zero IPGs found in outgoing data"
if args.title:
plt.figure(1)
plt.title('Client Traffic: ' + args.title)
plt.figure(2)
plt.title('Client Traffic: ' + args.title)
plt.figure(3)
plt.title('Server Traffic: ' + args.title)
plt.figure(4)
plt.title('Server Traffic: ' + args.title)
label_count = len(args.input_files)
graph_utils.latexify(bottom_label_rows=label_count / 2)
plt.figure(1)
plt.xlim([min(outgoing_ipg_gaps), nintyninth_percentile])
plt.ylabel("CDF")
plt.xlabel("Inter-Arrival Time (ns)")
graph_utils.set_legend_below()
graph_utils.set_non_negative_axes()
graph_utils.set_yax_max_one()
graph_utils.set_ticks()
filename = output_label + '_ipg_gaps_clients.eps'
plt.savefig(filename)
print "Done! File is in ", filename
plt.figure(2)
plt.ylabel("CDF")
plt.xlabel("Inter-Arrival Time (ns)")
graph_utils.set_legend_below()
graph_utils.set_log_x()
graph_utils.set_non_negative_axes()
graph_utils.set_yax_max_one()
graph_utils.set_ticks()
filename = output_label + '_ipg_gaps_clients_log.eps'
plt.savefig(filename)
print "Done! File is in ", filename
plt.figure(3)
plt.xlim([min(outgoing_ipg_gaps), nintyninth_percentile])
plt.ylabel("CDF")
plt.xlabel("Inter-Arrival Time (ns)")
graph_utils.set_legend_below()
graph_utils.set_non_negative_axes()
graph_utils.set_yax_max_one()
graph_utils.set_ticks()
filename = output_label + '_ipg_gaps_server.eps'
plt.savefig(filename)
print "Done! File is in ", filename
plt.figure(4)
plt.ylabel("CDF")
plt.xlabel("Inter-Arrival Time (ns)")
graph_utils.set_legend_below()
graph_utils.set_log_x()
graph_utils.set_non_negative_axes()
graph_utils.set_yax_max_one()
graph_utils.set_ticks()
filename = output_label + '_ipg_gaps_server_log.eps'
plt.savefig(filename)
print "Done! File is in ", filename
if __name__ == "__main__":
main(sys.argv[1:])
|
{"hexsha": "786d97d2ef18ac0971363c4909806a1a5ff8fb51", "size": 7252, "ext": "py", "lang": "Python", "max_stars_repo_path": "ipg_distribution_graph.py", "max_stars_repo_name": "j-c-w/EXPCAP_Process", "max_stars_repo_head_hexsha": "33ec7f6fdc8e794261a293d7bba0225b76606012", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ipg_distribution_graph.py", "max_issues_repo_name": "j-c-w/EXPCAP_Process", "max_issues_repo_head_hexsha": "33ec7f6fdc8e794261a293d7bba0225b76606012", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ipg_distribution_graph.py", "max_forks_repo_name": "j-c-w/EXPCAP_Process", "max_forks_repo_head_hexsha": "33ec7f6fdc8e794261a293d7bba0225b76606012", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.07960199, "max_line_length": 163, "alphanum_fraction": 0.6496138996, "include": true, "reason": "import numpy", "num_tokens": 1814}
|
import numpy as np
from pymongo.errors import DuplicateKeyError
from huntsman.drp.utils import mongo
from huntsman.drp.utils.ingest import METRIC_SUCCESS_FLAG
from huntsman.drp.utils.date import parse_date
from huntsman.drp.utils.fits import read_fits_data, read_fits_header, parse_fits_header
from huntsman.drp.collection.collection import Collection
from huntsman.drp.collection.calib import ReferenceCalibCollection
from huntsman.drp.document import ExposureDocument, CalibDocument
from huntsman.drp.metrics.raw import metric_evaluator
from huntsman.drp.document import CalibDocument, ExposureDocument
__all__ = ("ExposureCollection",)
class ExposureCollection(Collection):
""" Table to store metadata for Huntsman exposures. """
# Document type associated with this collection
_DocumentClass = ExposureDocument
# Flag to specify if the raw metrics were calculated successfully during ingestion
_metric_success_flag = METRIC_SUCCESS_FLAG
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Ignore certain metrics if required
metrics_ignore = self.config.get("raw_metrics_ignore", ())
for metric_name in metrics_ignore:
metric_evaluator.remove_function(metric_name)
self.ref_calib_collection = ReferenceCalibCollection.from_config(self.config)
# Public methods
def insert_one(self, document, *args, **kwargs):
""" Override to make sure the document does not clash with an fpacked version.
Args:
document (ExposureDocument): The document to insert.
*args: Parsed to super().insert_one.
**kwargs: Parsed to super().insert_one.
Raises:
DuplicateKeyError: If a .fz / .fits duplicate already exists.
"""
doc = self._DocumentClass(document, copy=True, config=self.config)
filename = doc["filename"]
if filename.endswith(".fits"):
if self.find({"filename": filename + ".fz"}):
raise DuplicateKeyError(f"Tried to insert {filename} but a .fz version exists.")
elif filename.endswith(".fits.fz"):
if self.find({"filename": filename.strip(".fz")}):
raise DuplicateKeyError(f"Tried to insert {filename} but a .fits version exists.")
return super().insert_one(document, *args, **kwargs)
def ingest_file(self, filename, **kwargs):
""" Calculate metrics and insert / update document in collection.
Args:
filename (str): The filename to ingest.
"""
self.logger.debug(f"Ingesting file into {self}: {filename}.")
document = {"filename": filename}
try:
document["metrics"], success = self._calculate_metrics(filename, **kwargs)
document[self._metric_success_flag] = success
except Exception as err:
self.logger.error(f"Error calculating metrics for {filename}: {err!r}")
document[self._metric_success_flag] = False
# Try and update the document with the parsed header
# NOTE: We read the header again because it may have been modified
try:
parsed_header = parse_fits_header(read_fits_header(filename))
document.update(parsed_header)
# Log error and insert document into DB anyway
except Exception as err:
self.logger.error(f"Error parsing header for {filename}: {err!r}")
# Use filename query as metrics etc can change
self.replace_one({"filename": filename}, document, upsert=True)
# Raise an exception if not success
if not document[self._metric_success_flag]:
raise RuntimeError(f"Metric evaluation unsuccessful for {filename}.")
def get_matching_raw_calibs(self, calib_document, sort_date=None, **kwargs):
""" Return matching set of calib IDs for a given calib document.
Args:
calib_document (CalibDocument): The calib document to match with.
sort_date (object, optional):
**kwargs
Returns:
list of ExposureDocument: The matching raw calibs ordered by increasing time diff.
"""
self.logger.debug(f"Finding raw calibs for {calib_document}.")
dataset_type = calib_document["datasetType"]
# Make the document filter
matching_keys = self.config["calibs"]["required_fields"][dataset_type]
doc_filter = {k: calib_document[k] for k in matching_keys}
# Add observation_type to doc filter
# NOTE: Defects are made from dark exposures
doc_filter["observation_type"] = "dark" if dataset_type == "defects" else dataset_type
# Do the query
documents = self.find(doc_filter, **kwargs)
self.logger.debug(f"Found {len(documents)} calib exposures matching {calib_document}.")
# Sort by time difference in increasing order
# This makes it easy to select only the nearest matches using indexing
if sort_date is not None:
date = parse_date(sort_date)
timedeltas = [abs(d["date"] - date) for d in documents]
indices = np.argsort(timedeltas)
documents = [documents[i] for i in indices]
return documents
def get_calib_docs(self, date, quality_filter=True, **kwargs):
""" Get all possible CalibDocuments from a set of ExposureDocuments.
Args:
date (object): The calib date.
documents (list of ExposureDocument, optional): The list of documents to process.
If not provided, will lookup the appropriate documents from the collection.
validity (datetime.timedelta): The validity of the calibs.
Returns:
set of CalibDocument: The calb documents.
"""
self.logger.debug(f"Finding calib docs from exposure documents for {date}.")
data_types = self.config["calibs"]["types"]
# Get metadata for all raw calibs that are valid for this date
documents = self.find({"observation_type": {"$in": data_types}},
quality_filter=quality_filter, **kwargs)
# Extract the calib docs from the set of exposure docs
calib_docs = set([self.raw_doc_to_calib_doc(d, date=date) for d in documents])
self.logger.debug(f"Found {len(calib_docs)} possible calib documents.")
# Get defects docs by copying darks
# NOTE: This assumes a one-to-one correspondence between darks and defects
defects_docs = []
for doc in calib_docs:
if doc["datasetType"] == "dark":
doc = doc.copy()
doc["datasetType"] = "defects"
defects_docs.append(doc)
calib_docs.update(defects_docs)
return calib_docs
def raw_doc_to_calib_doc(self, document, date):
""" Convert a ExposureDocument into its corresponding CalibDocument.
Args:
document (ExposureDocument): The raw calib document.
date (object): The calib date.
Returns:
CalibDocument: The matching calib document.
"""
datasetType = document["observation_type"]
# Get minimal calib metadata
keys = self.config["calibs"]["required_fields"][datasetType]
calib_dict = {k: document[k] for k in keys}
# Add extra required metadata
calib_dict["date"] = date
calib_dict["datasetType"] = datasetType
return CalibDocument(calib_dict)
def clear_calexp_metrics(self):
""" Clear all calexp metrics from the collection.
This is useful e.g. to trigger them for reprocessing.
"""
self.logger.info(f"Clearing all calexp metrics from {self}.")
self._collection.update_many({}, {"$unset": {"metrics.calexp": ""}})
# Private methods
def _get_quality_filter(self):
""" Return the Query object corresponding to quality cuts.
Returns:
huntsman.drp.utils.query.Query: The Query object.
"""
quality_config = self.config["quality"]["raw"].copy()
filters = []
for data_type, document_filter in quality_config.items():
if document_filter is not None:
# Create a new document filter for this data type
document_filter["observation_type"] = data_type
filters.append(mongo.encode_mongo_filter(document_filter))
# Allow data types that do not have any quality requirements in config
data_types = list(quality_config.keys())
filters.append({"observation_type": {"$nin": data_types}})
return mongo.mongo_logical_or(filters)
def _calculate_metrics(self, filename, **kwargs):
""" Calculate metrics for a file, typically on ingestion.
This function will query the calib collection for reference images.
Args:
filename (str): The filename.
**kwargs: Parsed to metric_evaluator.evaluate.
Returns:
dict: The dictionary of metrics.
bool: True if the metric calculation was successful, else False.
"""
self.logger.debug(f"Calculating metrics for {filename}")
# Read the file
data = read_fits_data(filename)
header = read_fits_header(filename)
parsed_header = parse_fits_header(header)
# Get a reference image from the calib collection
ref_image = None
if "observation_type" in parsed_header:
if parsed_header["observation_type"] in self.config["calibs"]["types"]:
try:
ref_doc = self.ref_calib_collection.get_matching_calib(parsed_header)
ref_image = read_fits_data(ref_doc["filename"])
self.logger.debug(f"Found reference calib for {filename}")
except Exception as err:
self.logger.warning(f"Unable to find reference calib for {filename}: {err!r}")
# Calculate metrics
metrics, success = metric_evaluator.evaluate(
filename, header=header, parsed_header=parsed_header, data=data, ref_image=ref_image,
**kwargs)
self.logger.debug(f"Finished calculating metrics for {filename}, success={success}")
return metrics, success
|
{"hexsha": "37869bfc04280b9092a0054512078913663a0db5", "size": 10335, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/huntsman/drp/collection/exposure.py", "max_stars_repo_name": "AstroHuntsman/huntsman-drp", "max_stars_repo_head_hexsha": "00f045ccccc1f7545da491457a2b17b9aabea89a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/huntsman/drp/collection/exposure.py", "max_issues_repo_name": "AstroHuntsman/huntsman-drp", "max_issues_repo_head_hexsha": "00f045ccccc1f7545da491457a2b17b9aabea89a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 139, "max_issues_repo_issues_event_min_datetime": "2020-10-02T01:49:29.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-07T04:58:51.000Z", "max_forks_repo_path": "src/huntsman/drp/collection/exposure.py", "max_forks_repo_name": "AstroHuntsman/huntsman-drp", "max_forks_repo_head_hexsha": "00f045ccccc1f7545da491457a2b17b9aabea89a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-09-03T03:31:30.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-07T05:22:23.000Z", "avg_line_length": 41.1752988048, "max_line_length": 98, "alphanum_fraction": 0.6503144654, "include": true, "reason": "import numpy", "num_tokens": 2134}
|
##################################################################
#--------------- Error stats for saved data ---------------
# (T. Kent: amttk@leeds.ac.uk)
##################################################################
## generic modules
import os
import errno
import numpy as np
import matplotlib.pyplot as plt
## custom modules
from parameters import *
from crps_calc_fun import crps_calc
##################################################################
def ave_stats(i, j, k, dirname):
'''
INPUT:
## e.g. if i,j,k... etc a coming from outer loop:
i=1
j=0
k=0
##
##
dirname = '/addinfv7_4dres'
##
OUTPUT:
spr, rmse, crps, OI for fc and an
'''
# LOAD DATA FROM GIVEN DIRECTORY
cwd = os.getcwd()
dirn = str(cwd+dirname+dirname+str(i+1)+str(j+1)+str(k+1))
if os.path.exists(dirn):
print ' '
print 'Path: '
print dirn
print ' exists... calculating stats...'
print ' '
# parameters for outer loop
o_d = [20,40]
loc = [1.5, 2.5, 3.5, 0.]
#inf = [1.1, 1.25, 1.5, 1.75]
inf = [1.01, 1.05, 1.1]
# LOAD DATA FROM GIVEN DIRECTORY
X = np.load(str(dirn+'/X_array.npy')) # fc ensembles
X_tr = np.load(str(dirn+'/X_tr_array.npy')) # truth
Xan = np.load(str(dirn+'/Xan_array.npy')) # an ensembles
Y_obs = np.load(str(dirn+'/Y_obs_array.npy')) # obs ensembles
OI = np.load(str(dirn+'/OI.npy')) # obs ensembles
#np.set_printoptions(formatter={'float': '{: 0.3f}'.format})
#print 'X_array shape (n_d,n_ens,T) : ', np.shape(X)
#print 'X_tr_array shape (n_d,1,T) : ', np.shape(X_tr)
#print 'Xan_array shape (n_d,n_ens,T) : ', np.shape(Xan)
#print 'Y_obs_array shape (p,n_ens,T) : ', np.shape(Y_obs)
#print 'OI shape (Neq + 1,T) : ', np.shape(OI)
##################################################################
Neq = np.shape(OI)[0] - 1
n_d = np.shape(X)[0]
Nk_fc = n_d/Neq
Kk_fc = 1./Nk_fc
n_ens = np.shape(X)[1]
n_obs = np.shape(Y_obs)[0]
obs_dens = n_d/n_obs
t_an = np.shape(X)[2]
time_vec = range(0,t_an)
# masks for locating model variables in state vector
h_mask = range(0,Nk_fc)
hu_mask = range(Nk_fc,2*Nk_fc)
hr_mask = range(2*Nk_fc,3*Nk_fc)
# masks for locating obs locations
row_vec = range(obs_dens,n_d+1,obs_dens)
obs_mask = np.array(row_vec[0:n_obs/Neq])-1
h_obs_mask = range(0,n_obs/Neq)
hu_obs_mask = range(n_obs/Neq,2*n_obs/Neq)
hr_obs_mask = range(2*n_obs/Neq,3*n_obs/Neq)
nz_index = np.where(OI[0,:])
if (len(nz_index[0]) < t_an-15):
print 'Runs crashed before Tmax...'
return float('nan'), float('nan'), float('nan'), float('nan'), float('nan'), float('nan'), float('nan'), float('nan'), float('nan')
else:
##################################################################
print 'Runs completed... '
# for means and deviations
Xbar = np.empty(np.shape(X))
Xdev = np.empty(np.shape(X))
Xanbar = np.empty(np.shape(X))
Xandev = np.empty(np.shape(X))
Xdev_tr = np.empty(np.shape(X))
Xandev_tr = np.empty(np.shape(X))
# for errs as at each assim time
rmse_fc = np.empty((Neq,len(time_vec)))
rmse_an = np.empty((Neq,len(time_vec)))
spr_fc = np.empty((Neq,len(time_vec)))
spr_an = np.empty((Neq,len(time_vec)))
ame_fc = np.empty((Neq,len(time_vec)))
ame_an = np.empty((Neq,len(time_vec)))
crps_fc = np.empty((Neq,len(time_vec)))
crps_an = np.empty((Neq,len(time_vec)))
ONE = np.ones([n_ens,n_ens])
ONE = ONE/n_ens # NxN array with elements equal to 1/N
print ' *** Calculating errors from ', dirn
for T in time_vec[1:]:
plt.clf() # clear figs from previous loop
Xbar[:,:,T] = np.dot(X[:,:,T],ONE) # fc mean
Xdev[:,:,T] = X[:,:,T] - Xbar[:,:,T] # fc deviations from mean
Xdev_tr[:,:,T] = X[:,:,T] - X_tr[:,:,T] # fc deviations from truth
Xanbar[:,:,T] = np.dot(Xan[:,:,T],ONE) # an mean
Xandev[:,:,T] = Xan[:,:,T] - Xanbar[:,:,T] # an deviations from mean
Xandev_tr[:,:,T] = Xan[:,:,T] - X_tr[:,:,T] # an deviations from truth
##################################################################
### ERRORS ####
##################################################################
# FORECAST: mean error
fc_err = Xbar[:,0,T] - X_tr[:,0,T] # fc_err = ens. mean - truth
fc_err2 = fc_err**2
# ANALYSIS: mean error
an_err = Xanbar[:,0,T] - X_tr[:,0,T] # an_err = analysis ens. mean - truth
an_err2 = an_err**2
# FORECAST: cov matrix for spread...
Pf = np.dot(Xdev[:,:,T],np.transpose(Xdev[:,:,T]))
Pf = Pf/(n_ens - 1) # fc covariance matrix
var_fc = np.diag(Pf)
# ... and rmse
Pf_tr = np.dot(Xdev_tr[:,:,T],np.transpose(Xdev_tr[:,:,T]))
Pf_tr = Pf_tr/(n_ens - 1) # fc covariance matrix w.r.t. truth
var_fct = np.diag(Pf_tr)
# ANALYSIS: cov matrix for spread...
Pa = np.dot(Xandev[:,:,T],np.transpose(Xandev[:,:,T]))
Pa = Pa/(n_ens - 1) # analysis covariance matrix
var_an = np.diag(Pa)
# ... and rmse
Pa_tr = np.dot(Xandev_tr[:,:,T],np.transpose(Xandev_tr[:,:,T]))
Pa_tr = Pa_tr/(n_ens - 1) # fc covariance matrix w.r.t truth
var_ant = np.diag(Pa_tr)
##################################################################
### CRPS ####
##################################################################
CRPS_fc = np.empty((Neq,Nk_fc))
CRPS_an = np.empty((Neq,Nk_fc))
for ii in h_mask:
CRPS_fc[0,ii] = crps_calc(X[ii,:,T],X_tr[ii,0,T])
CRPS_fc[1,ii] = crps_calc(X[ii+Nk_fc,:,T],X_tr[ii+Nk_fc,0,T])
CRPS_fc[2,ii] = crps_calc(X[ii+2*Nk_fc,:,T],X_tr[ii+2*Nk_fc,0,T])
CRPS_an[0,ii] = crps_calc(Xan[ii,:,T],X_tr[ii,0,T])
CRPS_an[1,ii] = crps_calc(Xan[ii+Nk_fc,:,T],X_tr[ii+Nk_fc,0,T])
CRPS_an[2,ii] = crps_calc(Xan[ii+2*Nk_fc,:,T],X_tr[ii+2*Nk_fc,0,T])
#################################################################
# domain-averaged errors
ame_an[0,T] = np.mean(np.absolute(an_err[h_mask]))
ame_fc[0,T] = np.mean(np.absolute(fc_err[h_mask]))
spr_an[0,T] = np.sqrt(np.mean(var_an[h_mask]))
spr_fc[0,T] = np.sqrt(np.mean(var_fc[h_mask]))
rmse_an[0,T] = np.sqrt(np.mean(an_err2[h_mask]))
rmse_fc[0,T] = np.sqrt(np.mean(fc_err2[h_mask]))
crps_an[0,T] = np.mean(CRPS_an[0,:])
crps_fc[0,T] = np.mean(CRPS_fc[0,:])
ame_an[1,T] = np.mean(np.absolute(an_err[hu_mask]))
ame_fc[1,T] = np.mean(np.absolute(fc_err[hu_mask]))
spr_an[1,T] = np.sqrt(np.mean(var_an[hu_mask]))
spr_fc[1,T] = np.sqrt(np.mean(var_fc[hu_mask]))
rmse_an[1,T] = np.sqrt(np.mean(an_err2[hu_mask]))
rmse_fc[1,T] = np.sqrt(np.mean(fc_err2[hu_mask]))
crps_an[1,T] = np.mean(CRPS_an[1,:])
crps_fc[1,T] = np.mean(CRPS_fc[1,:])
ame_an[2,T] = np.mean(np.absolute(an_err[hr_mask]))
ame_fc[2,T] = np.mean(np.absolute(fc_err[hr_mask]))
spr_an[2,T] = np.sqrt(np.mean(var_an[hr_mask]))
spr_fc[2,T] = np.sqrt(np.mean(var_fc[hr_mask]))
rmse_an[2,T] = np.sqrt(np.mean(an_err2[hr_mask]))
rmse_fc[2,T] = np.sqrt(np.mean(fc_err2[hr_mask]))
crps_an[2,T] = np.mean(CRPS_an[2,:])
crps_fc[2,T] = np.mean(CRPS_fc[2,:])
###########################################################################
spr_fc_ave = spr_fc[:,nz_index].mean(axis=-1)
err_fc_ave = ame_fc[:,nz_index].mean(axis=-1)
rmse_fc_ave = rmse_fc[:,nz_index].mean(axis=-1)
crps_fc_ave = crps_fc[:,nz_index].mean(axis=-1)
spr_an_ave = spr_an[:,nz_index].mean(axis=-1)
err_an_ave = ame_an[:,nz_index].mean(axis=-1)
rmse_an_ave = rmse_an[:,nz_index].mean(axis=-1)
crps_an_ave = crps_an[:,nz_index].mean(axis=-1)
OI_ave = 100*OI[0,nz_index].mean(axis=-1)
spr_fc_ave = spr_fc_ave.mean()
err_fc_ave = err_fc_ave.mean()
rmse_fc_ave = rmse_fc_ave.mean()
crps_fc_ave = crps_fc_ave.mean()
spr_an_ave = spr_an_ave.mean()
err_an_ave = err_an_ave.mean()
rmse_an_ave = rmse_an_ave.mean()
crps_an_ave = crps_an_ave.mean()
print 'spr_fc ave. =', spr_fc_ave
print 'err_fc ave. =', err_fc_ave
print 'rmse_fc ave. =', rmse_fc_ave
print 'crps_fc ave. =', crps_fc_ave
print 'spr_an ave. =', spr_an_ave
print 'err_an ave. =', err_an_ave
print 'rmse_an ave. =', rmse_an_ave
print 'crps_an_ave. =', crps_an_ave
print 'OI ave. =', OI_ave
return spr_fc_ave, err_fc_ave, rmse_fc_ave, crps_fc_ave, spr_an_ave, err_an_ave, rmse_an_ave, crps_an_ave, OI_ave
else:
print ' '
print ' Path:'
print dirn
print 'does not exist.. moving on to next one...'
print ' '
return float('nan'), float('nan'), float('nan'), float('nan'), float('nan'), float('nan'), float('nan'), float('nan'), float('nan')
|
{"hexsha": "9c9c2abd255d8b07967ea55699497eaf29ff141b", "size": 10395, "ext": "py", "lang": "Python", "max_stars_repo_path": "analysis_diag_stats.py", "max_stars_repo_name": "tkent198/modRSW_EnKF", "max_stars_repo_head_hexsha": "fc9f0bcc6f753a05fed245d4d2987cd3a34078ad", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-10-11T09:30:59.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-05T08:39:19.000Z", "max_issues_repo_path": "analysis_diag_stats.py", "max_issues_repo_name": "tkent198/modRSW_EnKF", "max_issues_repo_head_hexsha": "fc9f0bcc6f753a05fed245d4d2987cd3a34078ad", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "analysis_diag_stats.py", "max_forks_repo_name": "tkent198/modRSW_EnKF", "max_forks_repo_head_hexsha": "fc9f0bcc6f753a05fed245d4d2987cd3a34078ad", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2017-09-03T18:16:26.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-05T08:39:24.000Z", "avg_line_length": 41.0869565217, "max_line_length": 143, "alphanum_fraction": 0.4648388648, "include": true, "reason": "import numpy", "num_tokens": 2854}
|
import numpy as np
from lassolver.matrices.base import Base
class iidGaussian(Base):
def __init__(self, M, N, m=0, v=1):
super().__init__(M, N)
self.A = self.set_matrix(M, N, m, v)
def set_matrix(self, row, column, mean, var):
"""
Return i.i.d(independent and identically distributed) Gaussian Matrix
"""
return np.random.normal(mean, var**0.5, (row, column))
|
{"hexsha": "d7289c2aa7f58733a0aed36a4f86d9c75f3e89ba", "size": 418, "ext": "py", "lang": "Python", "max_stars_repo_path": "lassolver/matrices/iid_gauss.py", "max_stars_repo_name": "Ken529n/Lassolver", "max_stars_repo_head_hexsha": "f9f6997bf065622fe462b329c5cc99bd20f7d68b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lassolver/matrices/iid_gauss.py", "max_issues_repo_name": "Ken529n/Lassolver", "max_issues_repo_head_hexsha": "f9f6997bf065622fe462b329c5cc99bd20f7d68b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lassolver/matrices/iid_gauss.py", "max_forks_repo_name": "Ken529n/Lassolver", "max_forks_repo_head_hexsha": "f9f6997bf065622fe462b329c5cc99bd20f7d68b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.8571428571, "max_line_length": 77, "alphanum_fraction": 0.6196172249, "include": true, "reason": "import numpy", "num_tokens": 115}
|
'''
Script for training DFN on self-driving data...
TODO: iterative pruning method proposed by Han 2015
'''
import os
import numpy as np
import argparse
from keras.models import Model, load_model
from keras import optimizers
from keras.callbacks import EarlyStopping, ModelCheckpoint, Callback
from DataGenerator import DataGenerator
from keras.layers import Dense, Flatten, Input
import keras.backend as K
class CustomCallback(Callback):
def on_train_end(self, epoch, logs=None):
test_data, test_labels = data_gen.load_data(usage='test')
test_data, test_labels = data_gen.preprocess_data(test_data,
test_labels,
balance='equals',
raw=True)
print('Evaluating model on {} samples'.format(test_labels.shape[0]))
print('Class distribution:')
for i in range(3):
print('{} : {}'.format(i, np.sum(test_labels[:, i]).astype(int)))
scores = self.model.evaluate(test_data, test_labels, verbose=1)
print("Overall model performance on test dataset: {}".format(scores[1]))
def parse():
description = 'DFN self-driving car'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-data_path',
'--data',
type=str, help='path to data folder')
return parser.parse_args()
def train(dataset_path):
path_checkpoints = '/content/gdrive/Team Drives/Models/best_model.hdf5'
# path_checkpoints = '/content/gdrive/Team Drives/Models/model-improvement-{epoch:02d}-{val_acc:.2f}.hdf5'
file_train = os.path.join(dataset_path, 'train_labels.npy')
file_valid = os.path.join(dataset_path, 'valid_labels.npy')
x_train = np.load(file_train, mmap_mode='r')
x_train_samples = x_train.shape[0]
x_valid = np.load(file_valid, mmap_mode='r')
x_valid_samples = x_valid.shape[0]
del x_train, x_valid
batch_size = 64
epochs = 25
# Model
inputs = Input(shape=(10800,)) #working with already flatten image
x = Dense(1333, activation='relu')(inputs)
x = Dense(200, activation='relu')(x)
predictions = Dense(3, activation='softmax')(x)
model = Model(inputs=inputs, outputs=predictions)
sgd = optimizers.SGD(lr=0.01, decay=0.0005, momentum=0.9)
model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
trainable_count = int(np.sum([K.count_params(p) for p in set(model.trainable_weights)]))
print("Total number of parameters: {}".format(trainable_count))
stopper = EarlyStopping(monitor='val_acc', min_delta=0.0001, patience=3, verbose=1)
checkpoint = ModelCheckpoint(path_checkpoints,
monitor='val_acc',
verbose=1,
save_best_only=True,
mode='max')
custom_callback = CustomCallback()
global data_gen
data_gen = DataGenerator(dataset_path)
model.fit_generator(data_gen.npy_generator(usage='train', batch_size=batch_size),
steps_per_epoch=np.ceil(
x_train_samples / batch_size).astype(int),
validation_data=data_gen.npy_generator(
usage='valid', batch_size=batch_size),
validation_steps=np.ceil(
x_valid_samples / batch_size).astype(int),
callbacks=[stopper, checkpoint, custom_callback],
epochs=epochs,
verbose=1)
def main():
args = parse()
train(args.data)
if __name__ == '__main__':
main()
|
{"hexsha": "824201cbbe0665b08f16d90041853fcb63b4790f", "size": 3845, "ext": "py", "lang": "Python", "max_stars_repo_path": "transfer/train-dfn.py", "max_stars_repo_name": "paulaksm/feature-transfer", "max_stars_repo_head_hexsha": "0814d1e38691f67d6aa1637235df0b152d0a4b05", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-02-24T20:39:09.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-08T02:33:12.000Z", "max_issues_repo_path": "transfer/train-dfn.py", "max_issues_repo_name": "paulaksm/feature-transfer", "max_issues_repo_head_hexsha": "0814d1e38691f67d6aa1637235df0b152d0a4b05", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "transfer/train-dfn.py", "max_forks_repo_name": "paulaksm/feature-transfer", "max_forks_repo_head_hexsha": "0814d1e38691f67d6aa1637235df0b152d0a4b05", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.619047619, "max_line_length": 110, "alphanum_fraction": 0.6052015605, "include": true, "reason": "import numpy", "num_tokens": 800}
|
using Libdl, Test, OpenBLAS_jll, OpenBLAS32_jll, MKL_jll
include("utils.jl")
function unpack_loaded_libraries(config::lbt_config_t)
libs = LBTLibraryInfo[]
idx = 1
lib_ptr = unsafe_load(config.loaded_libs, idx)
while lib_ptr != C_NULL
push!(libs, LBTLibraryInfo(unsafe_load(lib_ptr), config.num_exported_symbols))
idx += 1
lib_ptr = unsafe_load(config.loaded_libs, idx)
end
return libs
end
function find_symbol_offset(config::lbt_config_t, symbol::String)
for sym_idx in 1:config.num_exported_symbols
if unsafe_string(unsafe_load(config.exported_symbols, sym_idx)) == symbol
return UInt32(sym_idx - 1)
end
end
return nothing
end
function bitfield_get(field::Vector{UInt8}, symbol_idx::UInt32)
return field[div(symbol_idx,8)+1] & (UInt8(0x01) << (symbol_idx%8))
end
lbt_link_name, lbt_prefix = build_libblastrampoline()
lbt_handle = dlopen("$(lbt_prefix)/$(binlib)/lib$(lbt_link_name).$(shlib_ext)", RTLD_GLOBAL | RTLD_DEEPBIND)
@testset "Config" begin
@test lbt_handle != C_NULL
# Get immediate config, ensure that nothing is loaded
config = lbt_get_config(lbt_handle)
@test isempty(unpack_loaded_libraries(config))
# Load OpenBLAS and OpenBLAS32_jll and then OpenBLAS_jll again
lbt_forward(lbt_handle, OpenBLAS_jll.libopenblas_path; clear=true)
lbt_forward(lbt_handle, OpenBLAS32_jll.libopenblas_path)
lbt_forward(lbt_handle, OpenBLAS_jll.libopenblas_path)
# Get config
config = lbt_get_config(lbt_handle)
# If we're x86_64, ensure LBT thinks it's f2c-adapter capable
if Sys.ARCH == :x86_64
@test (config.build_flags & LBT_BUILDFLAGS_F2C_CAPABLE) != 0
end
# Check to make sure that `dgemm_` is part of the exported symbols:
dgemm_idx = find_symbol_offset(config, "dgemm_")
@test dgemm_idx !== nothing
# Walk the libraries and check we have two
libs = unpack_loaded_libraries(config)
@test length(libs) == 2
# First check OpenBLAS_jll which may or may not be ILP64
@test libs[1].libname == OpenBLAS_jll.libopenblas_path
if Sys.WORD_SIZE == 64 && Sys.ARCH != :aarch64
@test libs[1].suffix == "64_"
@test libs[1].interface == LBT_INTERFACE_ILP64
else
@test libs[1].suffix == ""
@test libs[1].interface == LBT_INTERFACE_LP64
end
@test libs[1].f2c == LBT_F2C_PLAIN
if Sys.ARCH == :x86_64
@test libs[1].cblas == LBT_CBLAS_CONFORMANT
if Sys.iswindows()
@test libs[1].complex_retstyle == LBT_COMPLEX_RETSTYLE_ARGUMENT
else
@test libs[1].complex_retstyle == LBT_COMPLEX_RETSTYLE_NORMAL
end
else
@test libs[1].cblas == LBT_CBLAS_UNKNOWN
@test libs[1].complex_retstyle == LBT_COMPLEX_RETSTYLE_UNKNOWN
end
@test bitfield_get(libs[1].active_forwards, dgemm_idx) != 0
# Next check OpenBLAS32_jll which is always LP64
@test libs[2].libname == OpenBLAS32_jll.libopenblas_path
@test libs[2].suffix == ""
@test libs[2].interface == LBT_INTERFACE_LP64
@test libs[2].f2c == LBT_F2C_PLAIN
# If OpenBLAS32 and OpenBLAS are the same interface (e.g. i686)
# then libs[2].active_forwards should be all zero!
if libs[1].interface == libs[2].interface
@test bitfield_get(libs[2].active_forwards, dgemm_idx) == 0
else
@test bitfield_get(libs[2].active_forwards, dgemm_idx) != 0
end
# Load OpenBLAS32_jll again, but this time clearing it and ensure the config gets cleared too
lbt_forward(lbt_handle, OpenBLAS32_jll.libopenblas_path; clear=true)
config = lbt_get_config(lbt_handle)
libs = unpack_loaded_libraries(config)
@test length(libs) == 1
@test libs[1].libname == OpenBLAS32_jll.libopenblas_path
@test libs[1].suffix == ""
@test libs[1].interface == LBT_INTERFACE_LP64
@test libs[1].f2c == LBT_F2C_PLAIN
end
@testset "get/set threads" begin
lbt_forward(lbt_handle, OpenBLAS32_jll.libopenblas_path; clear=true)
# get/set threads
nthreads = ccall(dlsym(OpenBLAS32_jll.libopenblas_handle, :openblas_get_num_threads), Cint, ())
@test lbt_get_num_threads(lbt_handle) == nthreads
nthreads = div(nthreads, 2)
lbt_set_num_threads(lbt_handle, nthreads)
@test ccall(dlsym(OpenBLAS32_jll.libopenblas_handle, :openblas_get_num_threads), Cint, ()) == nthreads
@test lbt_get_num_threads(lbt_handle) == nthreads
# If we're on a 64-bit system, load OpenBLAS_jll in and cause a mismatch in the threading
if Sys.WORD_SIZE == 64 && Sys.ARCH != :aarch64
lbt_forward(lbt_handle, OpenBLAS_jll.libopenblas_path)
lbt_set_num_threads(lbt_handle, 1)
@test lbt_get_num_threads(lbt_handle) == 1
@test ccall(dlsym(OpenBLAS32_jll.libopenblas_handle, :openblas_get_num_threads), Cint, ()) == 1
ccall(dlsym(OpenBLAS32_jll.libopenblas_handle, :openblas_set_num_threads), Cvoid, (Cint,), 2)
@test lbt_get_num_threads(lbt_handle) == 2
lbt_set_num_threads(lbt_handle, 1)
@test lbt_get_num_threads(lbt_handle) == 1
end
end
slamch_args = []
function record_slamch_args(str::Cstring)
push!(slamch_args, unsafe_string(str))
return 13.37f0
end
# This "default function" will keep track of everyone who tries to call an uninitialized BLAS function
stacktraces = []
function default_capture_stacktrace()
push!(stacktraces, stacktrace(true))
return nothing
end
@testset "footgun API" begin
# Load OpenBLAS32
lbt_forward(lbt_handle, OpenBLAS32_jll.libopenblas_path; clear=true)
# Test that we can get the `dgemm_` symbol address, and that it is what we expect
slamch_32 = dlsym(OpenBLAS32_jll.libopenblas_handle, :slamch_)
@test slamch_32 != C_NULL
@test lbt_get_forward(lbt_handle, "slamch_", LBT_INTERFACE_LP64) == slamch_32
# Ensure that the libs show that `slamch_` is forwarded by this library:
config = lbt_get_config(lbt_handle)
libs = unpack_loaded_libraries(config)
@test length(libs) == 1
slamch_idx = find_symbol_offset(config, "slamch_")
@test slamch_idx !== nothing
@test bitfield_get(libs[1].active_forwards, slamch_idx) != 0
orig_forwards = copy(libs[1].active_forwards)
# Now, test that we can muck this up
my_slamch = @cfunction(record_slamch_args, Float32, (Cstring,))
@test lbt_set_forward(lbt_handle, "slamch_", my_slamch, LBT_INTERFACE_LP64) == 0
@test lbt_get_forward(lbt_handle, "slamch_", LBT_INTERFACE_LP64) == my_slamch
config = lbt_get_config(lbt_handle)
libs = unpack_loaded_libraries(config)
@test bitfield_get(libs[1].active_forwards, slamch_idx) == 0
# Ensure that we actually overrode the symbol
@test ccall(dlsym(lbt_handle, "slamch_"), Float32, (Cstring,), "test") == 13.37f0
@test slamch_args == ["test"]
# Override the default function to keep track of people who try to call uninitialized BLAS functions
@test lbt_get_default_func(lbt_handle) != C_NULL
my_default_func = @cfunction(default_capture_stacktrace, Cvoid, ())
lbt_set_default_func(lbt_handle, my_default_func)
@test lbt_get_default_func(lbt_handle) == my_default_func
# Now, set `slamch_64_` to it
@test lbt_set_forward(lbt_handle, "slamch_", C_NULL, LBT_INTERFACE_ILP64) == 0
ccall(dlsym(lbt_handle, "slamch_64_"), Float32, (Cstring,), "this will call the default function")
@test length(stacktraces) == 1
self_traces = filter(entry -> string(entry.file) == @__FILE__, stacktraces[1])
@test length(self_traces) == 3
end
if MKL_jll.is_available() && Sys.ARCH == :x86_64
# Since MKL v2022, we can explicitly link against ILP64-suffixed symbols
@testset "MKL v2022 ILP64 loading" begin
# Load the ILP64 interface library. Remember, you must load the `core`
# and a `threading` library first, with `RTLD_LAZY` for this to work!
lbt_forward(lbt_handle, libmkl_rt; clear=true, suffix_hint = "64")
# Test that we have only one library loaded
config = lbt_get_config(lbt_handle)
libs = unpack_loaded_libraries(config)
@test length(libs) == 1
# Test that it's MKL and it's correctly identified
@test libs[1].libname == MKL_jll.libmkl_rt_path
@test libs[1].interface == LBT_INTERFACE_ILP64
# Test that `dgemm` forwards to `dgemm_` within the MKL binary
mkl_dgemm = dlsym(MKL_jll.libmkl_rt_handle, :dgemm_64)
@test lbt_get_forward(lbt_handle, "dgemm_", LBT_INTERFACE_ILP64) == mkl_dgemm
end
@testset "MKL v2022 dual-interface loading" begin
# Also test that we can load both ILP64 and LP64 at the same time!
lbt_forward(lbt_handle, libmkl_rt; clear=true, suffix_hint = "64")
lbt_forward(lbt_handle, libmkl_rt; suffix_hint = "")
# Test that we have both libraries loaded
config = lbt_get_config(lbt_handle)
libs = unpack_loaded_libraries(config)
@test length(libs) == 2
# Test that it's MKL and it's correctly identified
sort!(libs; by = l -> l.libname)
@test libs[1].libname == libmkl_rt
@test libs[1].interface == LBT_INTERFACE_ILP64
@test libs[2].libname == libmkl_rt
@test libs[2].interface == LBT_INTERFACE_LP64
end
@testset "MKL v2022 CBLAS workaround" begin
# Load ILP64 MKL
lbt_forward(lbt_handle, libmkl_rt; clear=true, suffix_hint = "64")
config = lbt_get_config(lbt_handle)
libs = unpack_loaded_libraries(config)
@test length(libs) == 1
@test libs[1].interface == LBT_INTERFACE_ILP64
@test libs[1].cblas == LBT_CBLAS_DIVERGENT
@test libs[1].complex_retstyle == LBT_COMPLEX_RETSTYLE_ARGUMENT
# Call cblas_zdotc_sub, asserting that it does not try to call a forwardless-symbol
empty!(stacktraces)
A = ComplexF64[3.1 + 1.4im, -1.0 + 1.2im]
B = ComplexF64[1.3 + 0.3im, -1.1 + -3.4im]
result = ComplexF64[0]
zdotc_fptr = dlsym(lbt_handle, :cblas_zdotc_sub64_)
ccall(zdotc_fptr, Cvoid, (Int64, Ptr{ComplexF64}, Int64, Ptr{ComplexF64}, Int64, Ptr{ComplexF64}), 2, A, 1, B, 1, result)
@test result[1] ≈ ComplexF64(1.47 + 3.83im)
@test isempty(stacktraces)
# Also call `sdot_`, asserting the same.
empty!(stacktraces)
A = Float32[3.1, -1.0]
B = Float32[1.3, -1.1]
sdot_fptr = dlsym(lbt_handle, :cblas_sdot64_)
result = ccall(sdot_fptr, Cfloat, (Int64, Ptr{Float32}, Int64, Ptr{Float32}, Int64), 2, A, 1, B, 1)
@test result ≈ Float32(5.13)
@test isempty(stacktraces)
end
@testset "MKL complex retstyle" begin
lbt_forward(lbt_handle, libmkl_rt; clear=true, suffix_hint = "64")
config = lbt_get_config(lbt_handle)
libs = unpack_loaded_libraries(config)
@test length(libs) == 1
@test libs[1].interface == LBT_INTERFACE_ILP64
@test libs[1].cblas == LBT_CBLAS_DIVERGENT
@test libs[1].complex_retstyle == LBT_COMPLEX_RETSTYLE_ARGUMENT
# Call cblas_cdotc_sub64_ to test the full CBLAS workaround -> complex return style handling chain
empty!(stacktraces)
A = ComplexF32[3.1 + 1.4im, -1.0 + 1.2im]
B = ComplexF32[1.3 + 0.3im, -1.1 + -3.4im]
result = ComplexF32[0]
cdotc_fptr = dlsym(lbt_handle, :cblas_cdotc_sub64_)
ccall(cdotc_fptr, Cvoid, (Int64, Ptr{ComplexF64}, Int64, Ptr{ComplexF64}, Int64, Ptr{ComplexF64}), 2, A, 1, B, 1, result)
@test result[1] ≈ ComplexF32(1.47 + 3.83im)
@test isempty(stacktraces)
end
end
|
{"hexsha": "401a3273e50363095db2c5db5f7248bf2dc9fcb8", "size": 11629, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/direct.jl", "max_stars_repo_name": "JuliaLinearAlgebra/libblastrampoline", "max_stars_repo_head_hexsha": "145bb64256c441d11b0a742e38f9ef3f08921e8e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2021-09-20T17:38:56.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-31T19:55:41.000Z", "max_issues_repo_path": "test/direct.jl", "max_issues_repo_name": "JuliaLinearAlgebra/libblastrampoline", "max_issues_repo_head_hexsha": "145bb64256c441d11b0a742e38f9ef3f08921e8e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 17, "max_issues_repo_issues_event_min_datetime": "2021-09-22T09:09:49.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-19T15:16:17.000Z", "max_forks_repo_path": "test/direct.jl", "max_forks_repo_name": "JuliaLinearAlgebra/libblastrampoline", "max_forks_repo_head_hexsha": "145bb64256c441d11b0a742e38f9ef3f08921e8e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-12-19T14:45:02.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-28T22:55:40.000Z", "avg_line_length": 40.5191637631, "max_line_length": 129, "alphanum_fraction": 0.6882793017, "num_tokens": 3622}
|
#######################################################################
import JSON, Conda
using Compat
using Compat.Unicode: lowercase
jupyter=""
# remove deps.jl at exit if it exists, in case build.jl fails
try
#######################################################################
# Make sure Python uses UTF-8 output for Unicode paths
ENV["PYTHONIOENCODING"] = "UTF-8"
function prog_version(prog)
v = try
chomp(read(`$prog --version`, String))
catch
return nothing
end
try
return VersionNumber(v)
catch
Compat.@warn("`$jupyter --version` returned an unrecognized version number $v")
return v"0.0"
end
end
prefsfile = joinpath(first(DEPOT_PATH), "prefs", "IJulia")
mkpath(dirname(prefsfile))
global jupyter = get(ENV, "JUPYTER", isfile(prefsfile) ? readchomp(prefsfile) : Compat.Sys.isunix() && !Compat.Sys.isapple() ? "jupyter" : "")
if isempty(jupyter)
jupyter_vers = nothing
else
jupyter_vers = prog_version(jupyter)
if jupyter_vers === nothing
jupyter_vers = prog_version(jupyter * "-notebook")
end
if jupyter_vers === nothing
Compat.@warn("Could not execute `$jupyter --version`.")
end
end
isconda = dirname(jupyter) == abspath(Conda.SCRIPTDIR)
if Sys.ARCH in (:i686, :x86_64) && (jupyter_vers === nothing || jupyter_vers < v"3.0" || isconda)
isconda || jupyter_vers === nothing || Compat.@info("$jupyter was too old: got $jupyter_vers, required ≥ 3.0")
Compat.@info("Installing Jupyter via the Conda package.")
Conda.add("jupyter")
jupyter = abspath(Conda.SCRIPTDIR, "jupyter")
jupyter_vers = prog_version(jupyter)
end
if jupyter_vers === nothing || jupyter_vers < v"3.0"
error("Failed to find or install Jupyter 3.0 or later. Please install Jupyter manually, set `ENV[\"JUPYTER\"]=\"/path/to/jupyter\", and rerun `Pkg.build(\"IJulia\")`.")
end
Compat.@info("Found Jupyter version $jupyter_vers: $jupyter")
#######################################################################
# Get the latest syntax highlighter file.
if isconda
highlighter = joinpath(Conda.LIBDIR, "python2.7", "site-packages", "notebook", "static",
"components", "codemirror", "mode", "julia", "julia.js")
# CodeMirror commit from which we get the syntax highlighter
cm_commit = "ed9278cba6e1f75328df6b257f1043d35a690c59"
highlighter_url = "https://raw.githubusercontent.com/codemirror/CodeMirror/" *
cm_commit * "/mode/julia/julia.js"
if isfile(highlighter)
try
download(highlighter_url, highlighter)
catch e
Compat.@warn("The following error occurred while attempting to download latest ",
"syntax highlighting definitions:\n\n", e, "\n\nSyntax highlighting may ",
"not work as expected.")
end
end
end
#######################################################################
# Warn people upgrading from older IJulia versions:
try
juliaprof = chomp(read(pipeline(`$ipython locate profile julia`,
stderr=devnull), String))
Compat.@warn("""You should now run IJulia just via `$jupyter notebook`, without
the `--profile julia` flag. IJulia no longer maintains the profile.
Consider deleting $juliaprof""")
catch
end
#######################################################################
# Install Jupyter kernel-spec file.
include("kspec.jl")
kspec_cmd, = installkernel("Julia")
# figure out the notebook command by replacing (only!) the last occurrence of
# "kernelspec" with "notebook":
notebook = kspec_cmd.exec
n = notebook[end]
ki = findlast("kernelspec", n)
notebook[end] = n[1:prevind(n,first(ki))] * "notebook" * n[nextind(n,last(ki)):end]
#######################################################################
# make it easier to get more debugging output by setting JULIA_DEBUG=1
# when building.
IJULIA_DEBUG = lowercase(get(ENV, "IJULIA_DEBUG", "0"))
IJULIA_DEBUG = IJULIA_DEBUG in ("1", "true", "yes")
#######################################################################
# Install the deps.jl file:
if v"4.2" ≤ jupyter_vers < v"5.1"
# disable broken data-rate limit (issue #528)
push!(notebook, "--NotebookApp.iopub_data_rate_limit=2147483647")
end
deps = """
const jupyter = "$(escape_string(jupyter))"
const notebook_cmd = ["$(join(map(escape_string, notebook), "\", \""))"]
const jupyter_vers = $(repr(jupyter_vers))
const IJULIA_DEBUG = $(IJULIA_DEBUG)
"""
if !isfile("deps.jl") || read("deps.jl", String) != deps
write("deps.jl", deps)
end
write(prefsfile, jupyter)
#######################################################################
catch
isfile("deps.jl") && rm("deps.jl") # remove deps.jl file on build error
rethrow()
end
|
{"hexsha": "4769e8e506ec1b0f8ec453a9ba309c8260cfc7bb", "size": 4835, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "deps/build.jl", "max_stars_repo_name": "jordancluts/IJulia.jl", "max_stars_repo_head_hexsha": "2211a9c9b6821429254e88c60c6bb1d3c357e863", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "deps/build.jl", "max_issues_repo_name": "jordancluts/IJulia.jl", "max_issues_repo_head_hexsha": "2211a9c9b6821429254e88c60c6bb1d3c357e863", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "deps/build.jl", "max_forks_repo_name": "jordancluts/IJulia.jl", "max_forks_repo_head_hexsha": "2211a9c9b6821429254e88c60c6bb1d3c357e863", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.1923076923, "max_line_length": 172, "alphanum_fraction": 0.5873836608, "num_tokens": 1232}
|
"""
KorQuAD open 형 데이터 processor
본 스크립트는 다음의 파일을 바탕으로 작성 됨
https://github.com/huggingface/transformers/blob/master/src/transformers/data/processors/squad.py
"""
import json
import logging
import os
import sys
from functools import partial
from multiprocessing import Pool, cpu_count
import numpy as np
from tqdm import tqdm
from transformers.file_utils import is_tf_available, is_torch_available
from transformers.tokenization_bert import whitespace_tokenize
from transformers.data.processors.utils import DataProcessor
if is_torch_available():
import torch
from torch.utils.data import TensorDataset
if is_tf_available():
import tensorflow as tf
logger = logging.getLogger(__name__)
handler = logging.StreamHandler(sys.stdout)
logger.addHandler(handler)
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text):
"""Returns tokenized answer spans that better match the annotated answer."""
tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = " ".join(doc_tokens[new_start: (new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end)
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
def _new_check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# if len(doc_spans) == 1:
# return True
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span["start"] + doc_span["length"] - 1
if position < doc_span["start"]:
continue
if position > end:
continue
num_left_context = position - doc_span["start"]
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span["length"]
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
def _is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
def squad_convert_example_to_features(example, max_seq_length, doc_stride, max_query_length, is_training):
features = []
if is_training and not example.is_impossible:
# Get start and end position
start_position = example.start_position
end_position = example.end_position
# If the answer cannot be found in the text, then skip this example.
actual_text = " ".join(example.doc_tokens[start_position: (end_position + 1)])
cleaned_answer_text = " ".join(whitespace_tokenize(example.answer_text))
if actual_text.find(cleaned_answer_text) == -1:
logger.warning("Could not find answer: '%s' vs. '%s'", actual_text, cleaned_answer_text)
return []
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
if is_training and not example.is_impossible:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position, tokenizer, example.answer_text
)
spans = []
truncated_query = tokenizer.encode(example.question_text, add_special_tokens=False, max_length=max_query_length)
sequence_added_tokens = (
tokenizer.max_len - tokenizer.max_len_single_sentence + 1
if "roberta" in str(type(tokenizer))
else tokenizer.max_len - tokenizer.max_len_single_sentence
)
sequence_pair_added_tokens = tokenizer.max_len - tokenizer.max_len_sentences_pair
span_doc_tokens = all_doc_tokens
while len(spans) * doc_stride < len(all_doc_tokens):
encoded_dict = tokenizer.encode_plus(
truncated_query if tokenizer.padding_side == "right" else span_doc_tokens,
span_doc_tokens if tokenizer.padding_side == "right" else truncated_query,
max_length=max_seq_length,
return_overflowing_tokens=True,
pad_to_max_length=True,
stride=max_seq_length - doc_stride - len(truncated_query) - sequence_pair_added_tokens,
truncation_strategy="only_second" if tokenizer.padding_side == "right" else "only_first",
)
paragraph_len = min(
len(all_doc_tokens) - len(spans) * doc_stride,
max_seq_length - len(truncated_query) - sequence_pair_added_tokens,
)
if tokenizer.pad_token_id in encoded_dict["input_ids"]:
non_padded_ids = encoded_dict["input_ids"][: encoded_dict["input_ids"].index(tokenizer.pad_token_id)]
else:
non_padded_ids = encoded_dict["input_ids"]
tokens = tokenizer.convert_ids_to_tokens(non_padded_ids)
token_to_orig_map = {}
for i in range(paragraph_len):
index = len(truncated_query) + sequence_added_tokens + i if tokenizer.padding_side == "right" else i
token_to_orig_map[index] = tok_to_orig_index[len(spans) * doc_stride + i]
encoded_dict["paragraph_len"] = paragraph_len
encoded_dict["tokens"] = tokens
encoded_dict["token_to_orig_map"] = token_to_orig_map
encoded_dict["truncated_query_with_special_tokens_length"] = len(truncated_query) + sequence_added_tokens
encoded_dict["token_is_max_context"] = {}
encoded_dict["start"] = len(spans) * doc_stride
encoded_dict["length"] = paragraph_len
spans.append(encoded_dict)
if "overflowing_tokens" not in encoded_dict:
break
span_doc_tokens = encoded_dict["overflowing_tokens"]
for doc_span_index in range(len(spans)):
for j in range(spans[doc_span_index]["paragraph_len"]):
is_max_context = _new_check_is_max_context(spans, doc_span_index, doc_span_index * doc_stride + j)
index = (
j
if tokenizer.padding_side == "left"
else spans[doc_span_index]["truncated_query_with_special_tokens_length"] + j
)
spans[doc_span_index]["token_is_max_context"][index] = is_max_context
for span in spans:
# Identify the position of the CLS token
cls_index = span["input_ids"].index(tokenizer.cls_token_id)
# p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer)
# Original TF implem also keep the classification token (set to 0) (not sure why...)
p_mask = np.array(span["token_type_ids"])
p_mask = np.minimum(p_mask, 1)
if tokenizer.padding_side == "right":
# Limit positive values to one
p_mask = 1 - p_mask
p_mask[np.where(np.array(span["input_ids"]) == tokenizer.sep_token_id)[0]] = 1
# Set the CLS index to '0'
p_mask[cls_index] = 0
span_is_impossible = example.is_impossible
start_position = 0
end_position = 0
if is_training and not span_is_impossible:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = span["start"]
doc_end = span["start"] + span["length"] - 1
out_of_span = False
if not (tok_start_position >= doc_start and tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
start_position = cls_index
end_position = cls_index
span_is_impossible = True
else:
if tokenizer.padding_side == "left":
doc_offset = 0
else:
doc_offset = len(truncated_query) + sequence_added_tokens
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
features.append(
SquadFeatures(
span["input_ids"],
span["attention_mask"],
span["token_type_ids"],
cls_index,
p_mask.tolist(),
example_index=0,
# Can not set unique_id and example_index here. They will be set after multiple processing.
unique_id=0,
paragraph_len=span["paragraph_len"],
token_is_max_context=span["token_is_max_context"],
tokens=span["tokens"],
token_to_orig_map=span["token_to_orig_map"],
start_position=start_position,
end_position=end_position,
source=example.source
)
)
return features
def squad_convert_example_to_features_init(tokenizer_for_convert):
global tokenizer
tokenizer = tokenizer_for_convert
def squad_convert_example_to_features_sp(example, max_seq_length, doc_stride, max_query_length, is_training,
tokenizer_for_convert):
global tokenizer
tokenizer = tokenizer_for_convert
return squad_convert_example_to_features(example, max_seq_length, doc_stride, max_query_length, is_training)
def squad_convert_examples_to_features(
examples, tokenizer, max_seq_length, doc_stride, max_query_length, is_training, return_dataset=False, threads=1
):
"""
Converts a list of examples into a list of features that can be directly given as input to a model.
It is model-dependant and takes advantage of many of the tokenizer's features to create the model's inputs.
Args:
examples: list of :class:`~transformers.data.processors.squad.SquadExample`
tokenizer: an instance of a child of :class:`~transformers.PreTrainedTokenizer`
max_seq_length: The maximum sequence length of the inputs.
doc_stride: The stride used when the context is too large and is split across several features.
max_query_length: The maximum length of the query.
is_training: whether to create features for model evaluation or model training.
return_dataset: Default False. Either 'pt' or 'tf'.
if 'pt': returns a torch.data.TensorDataset,
if 'tf': returns a tf.data.Dataset
threads: multiple processing threadsa-smi
Returns:
list of :class:`~transformers.data.processors.squad.SquadFeatures`
Example::
processor = SquadV2Processor()
examples = processor.get_dev_examples(data_dir)
features = squad_convert_examples_to_features(
examples=examples,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride,
max_query_length=args.max_query_length,
is_training=not evaluate,
)
"""
# Defining helper methods
features = []
threads = min(threads, cpu_count())
if threads == 1:
print("squad_convert_examples_to_features")
features = []
for eg in examples:
feat = squad_convert_example_to_features_sp(
eg,
max_seq_length=max_seq_length,
doc_stride=doc_stride,
max_query_length=max_query_length,
is_training=is_training,
tokenizer_for_convert=tokenizer)
features.append(feat)
else:
print("squad_convert_examples_to_features w/ {} threads".format(threads))
with Pool(threads, initializer=squad_convert_example_to_features_init, initargs=(tokenizer,)) as p:
annotate_ = partial(
squad_convert_example_to_features,
max_seq_length=max_seq_length,
doc_stride=doc_stride,
max_query_length=max_query_length,
is_training=is_training,
)
features = list(
p.imap(annotate_, examples, chunksize=32)
)
new_features = []
unique_id = 1000000000
example_index = 0
for example_features in features:
if not example_features:
continue
for example_feature in example_features:
example_feature.example_index = example_index
example_feature.unique_id = unique_id
new_features.append(example_feature)
unique_id += 1
example_index += 1
features = new_features
del new_features
if return_dataset == "pt":
if not is_torch_available():
raise RuntimeError("PyTorch must be installed to return a PyTorch dataset.")
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_masks = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
all_cls_index = torch.tensor([f.cls_index for f in features], dtype=torch.long)
all_p_mask = torch.tensor([f.p_mask for f in features], dtype=torch.float)
all_source = torch.tensor([f.source for f in features], dtype=torch.int)
if not is_training:
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
dataset = TensorDataset(
all_input_ids, all_attention_masks, all_token_type_ids, all_example_index, all_cls_index, all_p_mask, all_source
)
else:
all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long)
all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long)
dataset = TensorDataset(
all_input_ids,
all_attention_masks,
all_token_type_ids,
all_start_positions,
all_end_positions,
all_cls_index,
all_p_mask,
all_source,
)
return features, dataset
elif return_dataset == "tf":
if not is_tf_available():
raise RuntimeError("TensorFlow must be installed to return a TensorFlow dataset.")
def gen():
for ex in features:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
{
"start_position": ex.start_position,
"end_position": ex.end_position,
"cls_index": ex.cls_index,
"p_mask": ex.p_mask,
},
)
return tf.data.Dataset.from_generator(
gen,
(
{"input_ids": tf.int32, "attention_mask": tf.int32, "token_type_ids": tf.int32},
{"start_position": tf.int64, "end_position": tf.int64, "cls_index": tf.int64, "p_mask": tf.int32},
),
(
{
"input_ids": tf.TensorShape([None]),
"attention_mask": tf.TensorShape([None]),
"token_type_ids": tf.TensorShape([None]),
},
{
"start_position": tf.TensorShape([]),
"end_position": tf.TensorShape([]),
"cls_index": tf.TensorShape([]),
"p_mask": tf.TensorShape([None]),
},
),
)
return features
class SquadProcessor(DataProcessor):
"""
Processor for the SQuAD data set.
Overriden by SquadV1Processor and SquadV2Processor, used by the version 1.1 and version 2.0 of SQuAD, respectively.
"""
train_file = None
dev_file = None
def _get_example_from_tensor_dict(self, tensor_dict, evaluate=False):
if not evaluate:
answer = tensor_dict["answers"]["text"][0].numpy().decode("utf-8")
answer_start = tensor_dict["answers"]["answer_start"][0].numpy()
answers = []
else:
answers = [
{"answer_start": start.numpy(), "text": text.numpy().decode("utf-8")}
for start, text in zip(tensor_dict["answers"]["answer_start"], tensor_dict["answers"]["text"])
]
answer = None
answer_start = None
return SquadExample(
qas_id=tensor_dict["id"].numpy().decode("utf-8"),
question_text=tensor_dict["question"].numpy().decode("utf-8"),
context_text=tensor_dict["context"].numpy().decode("utf-8"),
answer_text=answer,
start_position_character=answer_start,
title=tensor_dict["title"].numpy().decode("utf-8"),
answers=answers,
)
def get_examples_from_dataset(self, dataset, evaluate=False):
"""
Creates a list of :class:`~transformers.data.processors.squad.SquadExample` using a TFDS dataset.
Args:
dataset: The tfds dataset loaded from `tensorflow_datasets.load("squad")`
evaluate: boolean specifying if in evaluation mode or in training mode
Returns:
List of SquadExample
Examples::
import tensorflow_datasets as tfds
dataset = tfds.load("squad")
training_examples = get_examples_from_dataset(dataset, evaluate=False)
evaluation_examples = get_examples_from_dataset(dataset, evaluate=True)
"""
if evaluate:
dataset = dataset["validation"]
else:
dataset = dataset["train"]
examples = []
for tensor_dict in dataset:
examples.append(self._get_example_from_tensor_dict(tensor_dict, evaluate=evaluate))
return examples
def get_train_examples(self, data_dir, filename=None):
"""
Returns the training examples from the data directory.
Args:
data_dir: Directory containing the data files used for training and evaluating.
filename: None by default, specify this if the training file has a different name than the original one
which is `train-v1.1.json` and `train-v2.0.json` for squad versions 1.1 and 2.0 respectively.
"""
if data_dir is None:
data_dir = ""
if self.train_file is None:
raise ValueError("SquadProcessor should be instantiated via SquadV1Processor or SquadV2Processor")
with open(
os.path.join(data_dir, self.train_file if filename is None else filename), "r", encoding="utf-8"
) as reader:
input_data = json.load(reader)["data"]
return self._create_examples(input_data, "train")
def get_eval_examples(self, data_dir, filename=None):
"""
Returns the evaluation example from the data directory.
Args:
data_dir: Directory containing the data files used for training and evaluating.
filename: None by default, specify this if the evaluation file has a different name than the original one
which is `train-v1.1.json` and `train-v2.0.json` for squad versions 1.1 and 2.0 respectively.
"""
if data_dir is None:
data_dir = ""
if self.dev_file is None:
raise ValueError("SquadProcessor should be instantiated via SquadV1Processor or SquadV2Processor")
with open(
os.path.join(data_dir, self.dev_file if filename is None else filename), "r", encoding="utf-8"
) as reader:
input_data = json.load(reader)["data"]
return self._create_examples(input_data, "dev")
def get_test_examples(self, data_dir, filename=None):
"""
Returns the evaluation example from the data directory.
Args:
data_dir: Directory containing the data files used for training and evaluating.
filename: None by default, specify this if the evaluation file has a different name than the original one
which is `train-v1.1.json` and `train-v2.0.json` for squad versions 1.1 and 2.0 respectively.
"""
if data_dir is None:
data_dir = ""
if self.dev_file is None:
raise ValueError("SquadProcessor should be instantiated via SquadV1Processor or SquadV2Processor")
with open(
os.path.join(data_dir, self.dev_file if filename is None else filename), "r", encoding="utf-8"
) as reader:
input_data = json.load(reader)["data"]
return self._create_examples(input_data, "test")
def _create_examples(self, input_data, set_type):
src = {"kdc": 0, "view": 1, "web": 2, "kin": 3, "nws": 4}
is_training = set_type == "train"
examples = []
has_answer_cnt, no_answer_cnt, qa_num = 0, 0, 0
has_apply, no_apply=0, 0
for entry in input_data[:]:
qa = entry['qa']
question_text = qa["question"]
answer_text = qa['answer']
if question_text is None or answer_text is None:
continue
qa_num += 1
per_qa_ans_paragraph_cnt = 0
per_qa_unans_paragraph_cnt = 0
cnt = 0
for pi, paragraph in enumerate(entry["paragraphs"]):
title = paragraph["title"]
context_text = str(paragraph["contents"])
source = paragraph["source"]
if context_text is None:
continue
qas_id = "{}[SEP]{}[SEP]{}".format(question_text, answer_text, pi)
start_position_character = None
answers = []
if answer_text not in context_text:
is_impossible = True
else:
is_impossible = False
if not is_impossible:
if is_training:
start_position_character = context_text.index(answer_text) # answer["answer_start"]
else:
answers = [{"text": answer_text,
"answer_start": context_text.index(answer_text)}]
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
context_text=context_text,
answer_text=answer_text,
start_position_character=start_position_character,
title=title,
source=src[source],
is_impossible=is_impossible,
answers=answers,
)
if set_type == "test":
examples.append(example)
if pi >= 3:
break
else:
if is_impossible:
no_answer_cnt += 1
per_qa_unans_paragraph_cnt += 1
if per_qa_unans_paragraph_cnt < 3:
examples.append(example)
cnt += 1
no_apply += 1
else:
has_answer_cnt += 1
per_qa_ans_paragraph_cnt += 1
if per_qa_ans_paragraph_cnt < 6:
examples.append(example)
cnt += 1
has_apply += 1
if cnt >=7:
break
# train 메모리때문에 개수제한
print("[{}] qa pair num({})".format(set_type, qa_num))
print("[{}] Has Answer({}) / No Answer({})".format(set_type, has_answer_cnt, no_answer_cnt))
print("[{}] Apply Has Answer({}) / Apply No Answer({})".format(set_type, has_apply, no_apply))
return examples
class SquadV1Processor(SquadProcessor):
train_file = "train-v1.1.json"
dev_file = "dev-v1.1.json"
class SquadV2Processor(SquadProcessor):
train_file = "train_data/korquad_open_train.json"
dev_file = "train_data/korquad_open_dev.json"
test_file = "test_data/korquad_open_test.json"
class SquadExample(object):
"""
A single training/test example for the Squad dataset, as loaded from disk.
Args:
qas_id: The example's unique identifier
question_text: The question string
context_text: The context string
answer_text: The answer string
start_position_character: The character position of the start of the answer
title: The title of the example
answers: None by default, this is used during evaluation. Holds answers as well as their start positions.
is_impossible: False by default, set to True if the example has no possible answer.
"""
def __init__(
self,
qas_id,
question_text,
context_text,
answer_text,
start_position_character,
title,
source,
answers=[],
is_impossible=False,
):
self.qas_id = qas_id
self.question_text = question_text
self.context_text = context_text
self.answer_text = answer_text
self.title = title
self.is_impossible = is_impossible
self.answers = answers
self.start_position, self.end_position = 0, 0
self.source = source
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
# Split on whitespace so that different tokens may be attributed to their original position.
for c in self.context_text:
if _is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
self.doc_tokens = doc_tokens
self.char_to_word_offset = char_to_word_offset
# Start end end positions only has a value during evaluation.
if start_position_character is not None and not is_impossible:
self.start_position = char_to_word_offset[start_position_character]
self.end_position = char_to_word_offset[
min(start_position_character + len(answer_text) - 1, len(char_to_word_offset) - 1)
]
class SquadFeatures(object):
"""
Single squad example features to be fed to a model.
Those features are model-specific and can be crafted from :class:`~transformers.data.processors.squad.SquadExample`
using the :method:`~transformers.data.processors.squad.squad_convert_examples_to_features` method.
Args:
input_ids: Indices of input sequence tokens in the vocabulary.
attention_mask: Mask to avoid performing attention on padding token indices.
token_type_ids: Segment token indices to indicate first and second portions of the inputs.
cls_index: the index of the CLS token.
p_mask: Mask identifying tokens that can be answers vs. tokens that cannot.
Mask with 1 for tokens than cannot be in the answer and 0 for token that can be in an answer
example_index: the index of the example
unique_id: The unique Feature identifier
paragraph_len: The length of the context
token_is_max_context: List of booleans identifying which tokens have their maximum context in this feature object.
If a token does not have their maximum context in this feature object, it means that another feature object
has more information related to that token and should be prioritized over this feature for that token.
tokens: list of tokens corresponding to the input ids
token_to_orig_map: mapping between the tokens and the original text, needed in order to identify the answer.
start_position: start of the answer token index
end_position: end of the answer token index
"""
def __init__(
self,
input_ids,
attention_mask,
token_type_ids,
cls_index,
p_mask,
example_index,
unique_id,
paragraph_len,
token_is_max_context,
tokens,
token_to_orig_map,
start_position,
end_position,
source
):
self.input_ids = input_ids
self.attention_mask = attention_mask
self.token_type_ids = token_type_ids
self.cls_index = cls_index
self.p_mask = p_mask
self.example_index = example_index
self.unique_id = unique_id
self.paragraph_len = paragraph_len
self.token_is_max_context = token_is_max_context
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.start_position = start_position
self.end_position = end_position
self.source = source
class SquadResult(object):
"""
Constructs a SquadResult which can be used to evaluate a model's output on the SQuAD dataset.
Args:
unique_id: The unique identifier corresponding to that example.
start_logits: The logits corresponding to the start of the answer
end_logits: The logits corresponding to the end of the answer
"""
def __init__(self, unique_id, start_logits, end_logits, start_top_index=None, end_top_index=None, cls_logits=None):
self.start_logits = start_logits
self.end_logits = end_logits
self.unique_id = unique_id
if start_top_index:
self.start_top_index = start_top_index
self.end_top_index = end_top_index
self.cls_logits = cls_logits
|
{"hexsha": "8a20e48b98a3915cafdd612d4439d5db6d41d3fc", "size": 31335, "ext": "py", "lang": "Python", "max_stars_repo_path": "open_squad_multihead.py", "max_stars_repo_name": "JongSuk1/KorQuad", "max_stars_repo_head_hexsha": "757dccb3cfee887692ec9ee7eec5b9d91b0af5d1", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "open_squad_multihead.py", "max_issues_repo_name": "JongSuk1/KorQuad", "max_issues_repo_head_hexsha": "757dccb3cfee887692ec9ee7eec5b9d91b0af5d1", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "open_squad_multihead.py", "max_forks_repo_name": "JongSuk1/KorQuad", "max_forks_repo_head_hexsha": "757dccb3cfee887692ec9ee7eec5b9d91b0af5d1", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.4150943396, "max_line_length": 128, "alphanum_fraction": 0.6184139142, "include": true, "reason": "import numpy", "num_tokens": 6473}
|
import numpy as np
import unittest
import networkx as nx
from numpy.testing import assert_allclose
from graphik.graphs import ProblemGraphRevolute
from graphik.robots.robot_base import Robot
from graphik.robots import RobotRevolute
from graphik.solvers.constraints import get_full_revolute_nearest_point
from graphik.solvers.sdp_snl import (
distance_constraints,
distance_constraints_graph,
evaluate_linear_map,
constraints_and_nearest_points_to_sdp_vars,
evaluate_cost,
)
from graphik.utils.constants import *
from graphik.utils.roboturdf import load_ur10
def run_constraint_test(test_case, graph: Robot, sparse=False, ee_cost=False):
robot = graph.robot
n = robot.n
# get a copy of the current robot + environment graph
G = nx.DiGraph(graph)
# remove base nodes and all adjacent edges
G.remove_node("x")
G.remove_node("y")
q = robot.random_configuration()
full_points = [f"p{idx}" for idx in range(0, n + 1)] + [
f"q{idx}" for idx in range(0, n + 1)
]
true_input_vals = get_full_revolute_nearest_point(graph, q, full_points)
random_input_vals = {
key: np.random.rand(graph.robot.dim) for key in true_input_vals
}
anchors = {key: true_input_vals[key] for key in ["p0", "q0", f"p{n}", f"q{n}"]}
# If a position is pre-defined for a node, set to anchor
for node, data in G.nodes(data=True):
if data.get(POS, None) is not None:
anchors[node] = data[POS]
constraint_clique_dict = distance_constraints_graph(G, anchors, sparse, ee_cost)
# remove the edges that don't have distances defined
edges = []
for u, v, data in G.edges(data=True):
if not data.get(DIST, False):
edges += [(u, v)]
G.remove_edges_from(edges)
undirected = G.to_undirected()
for clique in constraint_clique_dict:
A_clique, b_clique, mapping, _ = constraint_clique_dict[clique]
true_evaluations = evaluate_linear_map(
clique, A_clique, b_clique, mapping, true_input_vals
)
for true_eval in true_evaluations:
test_case.assertAlmostEqual(true_eval, 0.0)
random_evaluations = evaluate_linear_map(
clique, A_clique, b_clique, mapping, random_input_vals
)
# TODO: change this to iterate over edges in the graph object and check that they exist with assert!
for u in clique:
for v in clique:
if frozenset((u, v)) in mapping:
idx = mapping[frozenset((u, v))]
sdp_residual = random_evaluations[idx]
u_val = (
anchors[u]
if u in anchors and not ee_cost
else random_input_vals[u]
)
v_val = (
anchors[v]
if v in anchors and not ee_cost
else random_input_vals[v]
)
true_residual = (
np.linalg.norm(u_val - v_val) ** 2 - undirected[u][v][DIST] ** 2
)
test_case.assertAlmostEqual(sdp_residual, true_residual)
def run_cost_test(test_case, robot, graph, sparse=False, ee_cost=False):
q = robot.random_configuration()
full_points = [f"p{idx}" for idx in range(0, robot.n + 1)] + [
f"q{idx}" for idx in range(0, robot.n + 1)
]
input_vals = get_full_revolute_nearest_point(graph, q, full_points)
end_effectors = {
key: input_vals[key] for key in ["p0", "q0", f"p{robot.n}", f"q{robot.n}"]
}
constraint_clique_dict = distance_constraints(graph, end_effectors, sparse, ee_cost)
A, b, mapping, _ = list(constraint_clique_dict.values())[0]
# Make cost function stuff
interior_nearest_points = (
{
key: input_vals[key]
for key in input_vals
if key not in ["p0", "q0", f"p{robot.n}", f"q{robot.n}"]
}
if not ee_cost
else end_effectors
)
(
sdp_variable_map,
sdp_constraints_map,
sdp_cost_map,
) = constraints_and_nearest_points_to_sdp_vars(
constraint_clique_dict, interior_nearest_points, robot.dim
)
cost = evaluate_cost(constraint_clique_dict, sdp_cost_map, interior_nearest_points)
test_case.assertAlmostEqual(cost, 0.0)
random_nearest_points = {key: np.random.rand(3) for key in interior_nearest_points}
cost_bad = evaluate_cost(
constraint_clique_dict, sdp_cost_map, random_nearest_points
)
cost_bad_explicit = sum(
[
np.linalg.norm(random_nearest_points[key] - interior_nearest_points[key])
** 2
for key in interior_nearest_points
]
)
test_case.assertAlmostEqual(cost_bad, cost_bad_explicit)
class TestUR10(unittest.TestCase):
def setUp(self):
self.robot, self.graph = load_ur10()
def test_constraints(self):
n_runs = 10
for _ in range(n_runs):
for sparse in [
True,
False,
]: # Whether to exploit chordal sparsity in the SDP formulation
for ee_cost in [
True,
False,
]: # Whether to treat the end-effectors as variables with targets in the cost
q = self.robot.random_configuration()
full_points = [f"p{idx}" for idx in range(0, self.robot.n + 1)] + [
f"q{idx}" for idx in range(0, self.robot.n + 1)
]
input_vals = get_full_revolute_nearest_point(
self.graph, q, full_points
)
random_input_vals = {key: np.random.rand(3) for key in input_vals}
# Copy of the current robot + environment graph
G = nx.DiGraph(self.graph)
G.remove_node("x")
G.remove_node("y")
anchors = {
key: input_vals[key]
for key in ["p0", "q0", f"p{self.robot.n}", f"q{self.robot.n}"]
}
# If a position is pre-defined for a node, set to anchor
for node, data in G.nodes(data=True):
if data.get(POS, None) is not None:
anchors[node] = data[POS]
constraint_clique_dict = distance_constraints_graph(
G, anchors, sparse, ee_cost=ee_cost
)
for clique in constraint_clique_dict:
A, b, mapping, _ = constraint_clique_dict[clique]
evaluations = evaluate_linear_map(
clique, A, b, mapping, input_vals
)
random_evaluations = evaluate_linear_map(
clique, A, b, mapping, random_input_vals
)
for evaluation in evaluations:
self.assertAlmostEqual(evaluation, 0.0)
for evaluation in random_evaluations:
self.assertNotAlmostEqual(evaluation, 0.0)
def test_cost(self):
n_runs = 10
for _ in range(n_runs):
for sparse in [True, False]:
for ee_cost in [True, False]:
run_cost_test(self, self.robot, self.graph, sparse, ee_cost)
def test_distance_constraints(self):
n_runs = 10
# sparse = False
# ee_cost = False
for _ in range(n_runs):
for sparse in [True, False]:
for ee_cost in [True, False]:
run_constraint_test(self, self.graph, sparse, ee_cost)
class TestTruncatedUR10(unittest.TestCase):
def setUp(self):
self.robot, self.graph = load_ur10()
n = 3
a_full = [0, -0.612, -0.5723, 0, 0, 0]
d_full = [0.1273, 0, 0, 0.1639, 0.1157, 0.0922]
al_full = [np.pi / 2, 0, 0, np.pi / 2, -np.pi / 2, 0]
th_full = [0, 0, 0, 0, 0, 0]
a = a_full[0:n]
d = d_full[0:n]
al = al_full[0:n]
th = th_full[0:n]
ub = np.minimum(np.random.rand(n) * (np.pi / 2) + np.pi / 2, np.pi)
lb = -ub
modified_dh = False
params = {
"a": a[:n],
"alpha": al[:n],
"d": d[:n],
"theta": th[:n],
"lb": lb[:n],
"ub": ub[:n],
"modified_dh": modified_dh,
"num_joints": n
}
self.robot = RobotRevolute(params)
self.graph = ProblemGraphRevolute(self.robot)
def test_cost(self):
n_runs = 10
# sparse = False
# ee_cost = False
for _ in range(n_runs):
for sparse in [True, False]:
for ee_cost in [True, False]:
run_cost_test(self, self.robot, self.graph, sparse, ee_cost)
def test_distance_constraints(self):
n_runs = 10
# sparse = False
# ee_cost = False
for _ in range(n_runs):
for sparse in [True, False]:
for ee_cost in [True, False]:
run_constraint_test(self, self.graph, sparse, ee_cost)
if __name__ == "__main__":
unittest.main()
|
{"hexsha": "655b6260d48553e89ec4487bf28f6010af183ba7", "size": 9444, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_sdp_snl.py", "max_stars_repo_name": "utiasSTARS/GraphIK", "max_stars_repo_head_hexsha": "c2d05386bf9f9baf8ad146125bfebc3b73fccd14", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-11-08T23:26:03.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-08T23:26:03.000Z", "max_issues_repo_path": "tests/test_sdp_snl.py", "max_issues_repo_name": "utiasSTARS/GraphIK", "max_issues_repo_head_hexsha": "c2d05386bf9f9baf8ad146125bfebc3b73fccd14", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_sdp_snl.py", "max_forks_repo_name": "utiasSTARS/GraphIK", "max_forks_repo_head_hexsha": "c2d05386bf9f9baf8ad146125bfebc3b73fccd14", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.9087452471, "max_line_length": 108, "alphanum_fraction": 0.5549555273, "include": true, "reason": "import numpy,from numpy,import networkx", "num_tokens": 2227}
|
module TestTUI
#=
Remaining problems:
- bug in TerminalUserInterfaces.terminal_size()
- printing of outputs is cut off. See `paragraph.jl` in TerminalUserInterfaces
ToDo:
- also track files in `test` directory
=#
using Glob
using Revise
using Parameters
using Pkg
using Suppressor
using TerminalUserInterfaces
const TUI = TerminalUserInterfaces
const DEVDIR = get(ENV, "JULIA_PKG_DEVDIR", joinpath(homedir(), ".julia/dev"))
include("./utils.jl")
include("./test.jl")
include("./textwidget.jl")
include("./tui.jl")
function testtui(
pattern = "*";
dir = DEVDIR)
testtui(findpackages(pattern, dir))
end
#=
Tasks:
1. watch package directories and requeue changed packages
2. empty queue and update testresults
3. update UI
=#
function startwatching(packagepaths, results, channel)
for (packagepath, result) in zip(packagepaths, results)
paths = sourcepaths(packagepath)
@async begin
entr(paths, pause = .5) do
try
if result.state != Queued
result.state = Queued
push!(channel, packagepath)
end
catch e
@error e
end
end
end
end
end
function starttesting(resultdict, channel)
for packagepath in channel
try
result = resultdict[packagepath]
result.state = Running
newresult = runtests(packagepath)
if result.state != Queued
result.output = newresult.output
result.state = newresult.state
end
catch e
@error e pkg=packagepath
end
end
end
function testtui(packagepaths::Vector{String})
@assert all(istestablepackage.(packagepaths))
packagepaths = sort(packagepaths)
names = [splitpath(path)[end] for path in packagepaths]
results = [TestResult(name) for name in names]
resultdict = Dict(zip(packagepaths, results))
channel = Channel{String}(100)
startwatching(packagepaths, results, channel)
Threads.@spawn starttesting(resultdict, channel)
tuiloop(results)
return results, channel
end
export testtui, runtests
end
|
{"hexsha": "0cafe92d9939861400c561f003951f99aa957c3b", "size": 2232, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/TestTUI.jl", "max_stars_repo_name": "lorenzoh/TestTUI.jl", "max_stars_repo_head_hexsha": "071577764a32d5dad0160dc13307d39582c123e8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-08-31T07:45:31.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-09T23:36:51.000Z", "max_issues_repo_path": "src/TestTUI.jl", "max_issues_repo_name": "lorenzoh/TestTUI.jl", "max_issues_repo_head_hexsha": "071577764a32d5dad0160dc13307d39582c123e8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/TestTUI.jl", "max_forks_repo_name": "lorenzoh/TestTUI.jl", "max_forks_repo_head_hexsha": "071577764a32d5dad0160dc13307d39582c123e8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-08-09T23:36:55.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-09T23:36:55.000Z", "avg_line_length": 23.4947368421, "max_line_length": 78, "alphanum_fraction": 0.6339605735, "num_tokens": 515}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.