text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
# coding: utf-8
# Copyright (c) Scanlon Materials Theory Group
# Distributed under the terms of the MIT License.
"""
Module containing helper functions for dealing with band structures.
todo:
* Extend get projections to allow specifying lm orbitals and atomic sites.
"""
import numpy as np
import itertools as it
from copy import deepcopy
from collections import defaultdict
from pymatgen.electronic_structure.core import Spin
from pymatgen.electronic_structure.bandstructure import (BandStructure,
BandStructureSymmLine)
def get_projections_by_branches(bs, selection, normalise=None):
"""Returns orbital projections for each branch in a band structure.
Args:
bs (:obj:`~pymatgen.electronic_structure.bandstructure.BandStructureSymmLine`):
The band structure.
selection (list): A list of :obj:`tuple` or :obj:`string`
identifying which projections to return. Projections can be
specified by both element and orbital, for example::
[('Sn', 's'), ('Bi', 'p'), ('S', 'p')]
If just the element is specified then all the orbitals of that
element are combined. For example, the following will combine
all the S orbitals into a single projection::
[('Bi', 's'), ('Bi', 'p'), 'S']
Particular orbitals can also be combined, for example::
[('Bi', 's'), ('Bi', 'p'), ('S', ('s', 'p', 'd'))]
normalise (:obj:`str`, optional): Normalisation the projections.
Options are:
* ``'all'``: Projections normalised against the sum of all
other projections.
* ``'select'``: Projections normalised against the sum of the
selected projections.
* ``None``: No normalisation performed.
Defaults to ``None``.
Returns:
list: A ``list`` of orbital projections for each branch of the band
structure, in the same order as specified in ``selection``, with
the format::
[ [ {spin: projections} ], [ {spin: projections} ], ... ]
Where spin is a :obj:`pymatgen.electronic_structure.core.Spin`
object and projections is a :obj:`numpy.array` of::
projections[band_index][kpoint_index]
If there are no projections in the band structure, then an array of
zeros is returned for each spin.
"""
spins = bs.bands.keys()
projections = get_projections(bs, selection, normalise=normalise)
branches = []
for b in bs.branches:
s = b['start_index']
e = b['end_index'] + 1
branch_proj = deepcopy(projections)
for spin, i in it.product(spins, range(len(projections))):
branch_proj[i][spin] = projections[i][spin][:, s:e]
branches.append(branch_proj)
return branches
def get_projections(bs, selection, normalise=None):
"""Returns orbital projections from a band structure.
Args:
bs (:obj:`~pymatgen.electronic_structure.bandstructure.BandStructureSymmLine`):
The band structure.
selection (list): A list of :obj:`tuple` or :obj:`string`
identifying which projections to return. Projections can be
specified by both element and orbital, for example::
[('Bi', 's'), ('Bi', 'p'), ('S', 'p')]
If just the element is specified then all the orbitals of
that element are combined. For example, the following will combine
all the S orbitals into a single projection::
[('Bi', 's'), ('Bi', 'p'), 'S']
Particular orbitals can also be combined, for example::
[('Bi', 's'), ('Bi', 'p'), ('S', ('s', 'p', 'd'))]
normalise (:obj:`str`, optional): Normalisation the projections.
Options are:
* ``'all'``: Projections normalised against the sum of all
other projections.
* ``'select'``: Projections normalised against the sum of the
selected projections.
* ``None``: No normalisation performed.
Defaults to ``None``.
Returns:
list: A ``list`` of orbital projections, in the same order as specified
in ``selection``, with the format::
[ {spin: projections}, {spin: projections} ... ]
Where spin is a :obj:`pymatgen.electronic_structure.core.Spin`
object and projections is a :obj:`numpy.array` of::
projections[band_index][kpoint_index]
If there are no projections in the band structure, then an array of
zeros is returned for each spin.
"""
spins = bs.bands.keys()
nbands = bs.nb_bands
nkpts = len(bs.kpoints)
# if we are to normalise the data later we need access to all projections
elements = bs.structure.symbol_set
all_orbitals = ['s', 'p', 'd', 'f']
# dictio has the form: {'el1': [s, p, d, f], 'el2': [s, p, d, f]...}
dictio = dict(zip(elements, [all_orbitals] * len(elements)))
# bs.get_projection_on_elements_and_orbitals() returns the data in a
# really frustrating format, namely:
# {spin: [band_index][kpoint_index]{element: {orbital: projection}}}
all_proj = bs.get_projections_on_elements_and_orbitals(dictio)
# Make a defaultdict of defaultdicts
dict_proj = defaultdict(lambda: defaultdict(dict))
sum_proj = dict(zip(spins, [np.zeros((nbands, nkpts))] * len(spins)))
# store the projections for all elements and orbitals in a useable format
for spin, element, orbital in it.product(spins, elements, all_orbitals):
# convert data to [nb][nk][projection]
el_orb_proj = [[all_proj[spin][nb][nk][element][orbital]
for nk in range(nkpts)] for nb in range(nbands)]
dict_proj[element][orbital][spin] = np.array(el_orb_proj)
if normalise == 'all':
sum_proj[spin] += el_orb_proj
# now go through the selected orbitals and extract what's needed
spec_proj = []
for spec in selection:
if isinstance(spec, str):
# spec is just an element type, therefore sum all orbitals
element = spec
orbitals = all_orbitals
else:
element, orbitals = spec
# even if there is only one orbital, make sure we can loop over it
orbitals = tuple(orbitals)
proj = dict(zip(spins, [np.zeros((nbands, nkpts))] * len(spins)))
for spin, orbital in it.product(spins, orbitals):
proj[spin] += dict_proj[element][orbital][spin]
if normalise == 'select':
sum_proj[spin] += dict_proj[element][orbital][spin]
spec_proj.append(proj)
if normalise:
# to prevent warnings/errors relating to divide by zero,
# catch warnings and surround divide with np.nan_to_num
with np.errstate(divide='ignore', invalid='ignore'):
for spin, i in it.product(spins, range(len(spec_proj))):
spec_proj[i][spin] = np.nan_to_num(spec_proj[i][spin] /
sum_proj[spin])
return spec_proj
def get_reconstructed_band_structure(list_bs, efermi=None):
"""Combine a list of band structures into a single band structure.
This is typically very useful when you split non self consistent
band structure runs in several independent jobs and want to merge back
the results.
This method will also ensure that any BandStructure objects will contain
branches.
Args:
list_bs (:obj:`list` of \
:obj:`~pymatgen.electronic_structure.bandstructure.BandStructure` \
or :obj:`~pymatgen.electronic_structure.bandstructure.BandStructureSymmLine`):
The band structures.
efermi (:obj:`float`, optional): The Fermi energy of the reconstructed
band structure. If `None`, an average of all the Fermi energies
across all band structures is used.
Returns:
:obj:`pymatgen.electronic_structure.bandstructure.BandStructure` or \
:obj:`pymatgen.electronic_structure.bandstructureBandStructureSymmLine`:
A band structure object. The type depends on the type of the band
structures in ``list_bs``.
"""
if efermi is None:
efermi = sum([b.efermi for b in list_bs]) / len(list_bs)
kpoints = []
labels_dict = {}
rec_lattice = list_bs[0].lattice_rec
nb_bands = min([list_bs[i].nb_bands for i in range(len(list_bs))])
kpoints = np.concatenate([[k.frac_coords for k in bs.kpoints]
for bs in list_bs])
dicts = [bs.labels_dict for bs in list_bs]
labels_dict = {k: v.frac_coords for d in dicts for k, v in d.items()}
# pymatgen band structure objects support branches. These are formed when
# two kpoints with the same label are next to each other. This bit of code
# will ensure that the band structure will contain branches, if it doesn't
# already.
dup_ids = []
for i, k in enumerate(kpoints):
dup_ids.append(i)
if (tuple(k) in tuple(map(tuple, labels_dict.values()))
and i != 0 and i != len(kpoints) - 1
and (not np.array_equal(kpoints[i + 1], k)
or not np.array_equal(kpoints[i - 1], k))):
dup_ids.append(i)
kpoints = kpoints[dup_ids]
eigenvals = {}
eigenvals[Spin.up] = np.concatenate([bs.bands[Spin.up][:nb_bands]
for bs in list_bs], axis=1)
eigenvals[Spin.up] = eigenvals[Spin.up][:, dup_ids]
if list_bs[0].is_spin_polarized:
eigenvals[Spin.down] = np.concatenate([bs.bands[Spin.down][:nb_bands]
for bs in list_bs], axis=1)
eigenvals[Spin.down] = eigenvals[Spin.up][:, dup_ids]
projections = {}
if len(list_bs[0].projections) != 0:
projs = [bs.projections[Spin.up][:nb_bands][dup_ids] for bs in list_bs]
projections[Spin.up] = np.concatenate(projs, axis=1)[:, dup_ids]
if list_bs[0].is_spin_polarized:
projs = [bs.projections[Spin.down][:nb_bands][dup_ids]
for bs in list_bs]
projections[Spin.down] = np.concatenate(projs, axis=1)[:, dup_ids]
if isinstance(list_bs[0], BandStructureSymmLine):
return BandStructureSymmLine(kpoints, eigenvals, rec_lattice,
efermi, labels_dict,
structure=list_bs[0].structure,
projections=projections)
else:
return BandStructure(kpoints, eigenvals, rec_lattice, efermi,
labels_dict, structure=list_bs[0].structure,
projections=projections)
|
{"hexsha": "1e658c937cddb2ce8fe9137e4f0b9de9b5732d59", "size": 10930, "ext": "py", "lang": "Python", "max_stars_repo_path": "sumo/electronic_structure/bandstructure.py", "max_stars_repo_name": "antisymmetric/sumo", "max_stars_repo_head_hexsha": "325c5d5bd6ac63f8a58b89a81cba0ca36842936a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-08-21T02:28:08.000Z", "max_stars_repo_stars_event_max_datetime": "2019-08-21T02:28:08.000Z", "max_issues_repo_path": "sumo/electronic_structure/bandstructure.py", "max_issues_repo_name": "antisymmetric/sumo", "max_issues_repo_head_hexsha": "325c5d5bd6ac63f8a58b89a81cba0ca36842936a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sumo/electronic_structure/bandstructure.py", "max_forks_repo_name": "antisymmetric/sumo", "max_forks_repo_head_hexsha": "325c5d5bd6ac63f8a58b89a81cba0ca36842936a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.896797153, "max_line_length": 87, "alphanum_fraction": 0.6090576395, "include": true, "reason": "import numpy", "num_tokens": 2556}
|
/* Copyright (C) 2014 InfiniDB, Inc.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; version 2 of
the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA. */
/***************************************************************************
*
* $Id: blockrequestprocessor.cpp 725 2008-09-26 16:26:47Z jrodriguez $
*
* jrodriguez@calpont.com *
* *
***************************************************************************/
#include "blockrequestprocessor.h"
#include "rwlock_local.h"
#include "dbrm.h"
#include <sys/time.h>
#include <pthread.h>
#include <sstream>
#include <string>
#include <list>
#include <boost/date_time/posix_time/posix_time.hpp>
using namespace std;
namespace dbbc
{
BlockRequestProcessor::BlockRequestProcessor(uint32_t numBlcks,
int thrCount,
int blocksPerRead,
uint32_t deleteBlocks,
uint32_t blckSz) :
fbMgr(numBlcks, blckSz, deleteBlocks),
fIOMgr(fbMgr, fBRPRequestQueue, thrCount, blocksPerRead)
{
}
BlockRequestProcessor::~BlockRequestProcessor()
{
}
void BlockRequestProcessor::stop()
{
fBRPRequestQueue.stop();
fIOMgr.stop();
}
int BlockRequestProcessor::check(const BRM::InlineLBIDRange& range, const BRM::VER_t ver, uint32_t& lbidCount)
{
uint64_t maxLbid = range.start; // highest existent lbid
uint64_t rangeLen = range.size;
uint64_t idx;
uint64_t adjSz;
struct timespec start_tm;
if (fTrace) clock_gettime(CLOCK_MONOTONIC, &start_tm);
for (idx = 0; fbMgr.exists(maxLbid, ver) == true && idx < rangeLen; maxLbid++, idx++);
if (idx == rangeLen) // range is already loaded
{
uint32_t fbo;
BRM::OID_t oid;
fdbrm.lookup(maxLbid, ver, false, oid, fbo);
fLogFile
<< oid << " "
<< maxLbid << " "
<< fbo << " "
<< rangeLen << " "
<< 0 << " "
<< 0 << " "
<< 0 << " "
<< right << fixed << ((double)(start_tm.tv_sec + (1.e-9 * start_tm.tv_nsec)))
<< endl;
return 0;
}
adjSz = rangeLen - idx;
BRM::InlineLBIDRange adjRange;
adjRange.start = maxLbid;
adjRange.size = adjSz;
fileRequest rqstBlk(adjRange, ver);
check(rqstBlk);
lbidCount = rqstBlk.BlocksRead();
if (fTrace)
{
uint32_t fbo;
BRM::OID_t oid;
fdbrm.lookup(maxLbid, ver, false, oid, fbo);
fLogFile
<< oid << " "
<< maxLbid << " "
<< fbo << " "
<< rangeLen << " "
<< adjSz << " "
<< rqstBlk.BlocksRead() << " "
<< rqstBlk.BlocksLoaded() << " "
<< right << fixed << ((double)(start_tm.tv_sec + (1.e-9 * start_tm.tv_nsec)))
<< endl;
}
return rqstBlk.BlocksLoaded();
} // check
int BlockRequestProcessor::check(fileRequest& rqstBlk)
{
pthread_mutex_lock(&rqstBlk.frMutex());
rqstBlk.SetPredicate(fileRequest::SENDING);
sendRequest(rqstBlk); // start file read request
while (rqstBlk.frPredicate() < fileRequest::COMPLETE)
pthread_cond_wait(&rqstBlk.frCond(), &rqstBlk.frMutex());
pthread_mutex_unlock(&rqstBlk.frMutex());
return 0;
}
int BlockRequestProcessor::check(BRM::LBID_t lbid, BRM::VER_t ver, bool flg, bool& wasBlockInCache)
{
if (fbMgr.exists(lbid, ver) == true)
{
wasBlockInCache = true;
return 0;
}
else
{
wasBlockInCache = false;
fileRequest rqstBlk(lbid, ver, flg);
int ret = check(rqstBlk);
return ret;
}
}
int BlockRequestProcessor::sendRequest(fileRequest& blk)
{
int ret = fBRPRequestQueue.push(blk);
return ret;
}
const int BlockRequestProcessor::read(const BRM::InlineLBIDRange& range, FileBufferList_t& readList, const BRM::VER_t ver)
{
int blksLoaded = 0;
HashObject_t fb = {0, 0, 0};
for (int idx = 0; (uint64_t)idx < range.size; idx++)
{
fb.lbid = range.start + idx;
fb.ver = ver;
fb.poolIdx = 0;
FileBuffer fbRet(-1, -1);
bool ret = false; //fbMgr.find(fb, fbRet);
if (ret)
{
blksLoaded++;
readList.push_back(fbRet);
}
}
return blksLoaded;
}
FileBuffer* BlockRequestProcessor::getBlockPtr(const BRM::LBID_t lbid, const BRM::VER_t ver )
{
HashObject_t hashObj = {lbid, ver, 0};
FileBuffer* fb = fbMgr.findPtr(hashObj);
return fb;
}
const int BlockRequestProcessor::read(const BRM::LBID_t& lbid, const BRM::VER_t& ver, FileBuffer& fb)
{
HashObject_t hashObj = {lbid, ver, 0};
bool ret = fbMgr.find(hashObj, fb);
if (ret == true)
return 1;
else
return 0;
}
const int BlockRequestProcessor::read(const BRM::LBID_t& lbid, const BRM::VER_t& ver, void* bufferPtr)
{
HashObject_t hashObj = {lbid, ver, 0};
bool ret = fbMgr.find(hashObj, bufferPtr);
if (ret == true)
return 1;
else
return 0;
}
const int BlockRequestProcessor::getBlock(const BRM::LBID_t& lbid, const BRM::VER_t& ver, void* bufferPtr, bool flg, bool& wasCached)
{
HashObject_t hashObj = {lbid, ver, 0};
wasCached = fbMgr.find(hashObj, bufferPtr);
if (wasCached)
return 1;
wasCached = false;
fileRequest rqstBlk(lbid, ver, flg, (uint8_t*) bufferPtr);
check(rqstBlk);
return 1;
}
bool BlockRequestProcessor::exists(BRM::LBID_t lbid, BRM::VER_t ver)
{
HashObject_t ho = {lbid, ver, 0};
return fbMgr.exists(ho);
}
void BlockRequestProcessor::flushCache()
{
fbMgr.flushCache();
}
/**
const uint32_t BlockRequestProcessor::resize(const uint32_t s)
{
int rc = fbMgr.resize(s);
return rc;
}
**/
ostream& BlockRequestProcessor::formatLRUList(ostream& os) const
{
return fbMgr.formatLRUList(os);
}
} // namespace dbbc
|
{"hexsha": "95564877f80f53f74a6239a7431a7777290042c8", "size": 6494, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/vendor/mariadb-10.6.7/storage/columnstore/columnstore/utils/testbc/blockrequestprocessor.cpp", "max_stars_repo_name": "zettadb/zettalib", "max_stars_repo_head_hexsha": "3d5f96dc9e3e4aa255f4e6105489758944d37cc4", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/vendor/mariadb-10.6.7/storage/columnstore/columnstore/utils/testbc/blockrequestprocessor.cpp", "max_issues_repo_name": "zettadb/zettalib", "max_issues_repo_head_hexsha": "3d5f96dc9e3e4aa255f4e6105489758944d37cc4", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/vendor/mariadb-10.6.7/storage/columnstore/columnstore/utils/testbc/blockrequestprocessor.cpp", "max_forks_repo_name": "zettadb/zettalib", "max_forks_repo_head_hexsha": "3d5f96dc9e3e4aa255f4e6105489758944d37cc4", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2022-02-27T14:00:01.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T06:24:22.000Z", "avg_line_length": 25.5669291339, "max_line_length": 133, "alphanum_fraction": 0.595934709, "num_tokens": 1780}
|
import logging
import pathlib
from unittest import mock
import boost_histogram as bh
import numpy as np
import pytest
from cabinetry import template_builder
def test__check_for_override():
# override exists for template
assert (
template_builder._check_for_override(
{"Up": {"setting": "val"}}, "Up", "setting"
)
== "val"
)
# no override for template
assert template_builder._check_for_override({}, "Up", "setting") is None
# no option requested
assert (
template_builder._check_for_override({"Up": {"setting": "val"}}, "Up", "")
is None
)
# no template requested
assert (
template_builder._check_for_override({"Up": {"setting": "val"}}, "", "setting")
is None
)
# override is a list
assert template_builder._check_for_override(
{"Up": {"setting": ["val", "val2"]}}, "Up", "setting"
) == ["val", "val2"]
def test__get_ntuple_paths(caplog):
# only general path, no override
assert template_builder._get_ntuple_paths("path.root", {}, {}, {}, "") == [
pathlib.Path("path.root")
]
# general path with region and sample templates
assert (
template_builder._get_ntuple_paths(
"{RegionPath}/{SamplePaths}",
{"RegionPath": "region"},
{"SamplePaths": "sample.root"},
{},
"",
)
== [pathlib.Path("region/sample.root")]
)
# two SamplePaths
assert (
template_builder._get_ntuple_paths(
"{RegionPath}/{SamplePaths}",
{"RegionPath": "region"},
{"SamplePaths": ["sample.root", "new.root"]},
{},
"",
)
== [pathlib.Path("region/sample.root"), pathlib.Path("region/new.root")]
)
# systematic with override for RegionPath and SamplePaths
assert (
template_builder._get_ntuple_paths(
"{RegionPath}/{SamplePaths}",
{"RegionPath": "reg_1"},
{"SamplePaths": "path.root"},
{
"Name": "variation",
"Up": {
"SamplePaths": ["variation.root", "new.root"],
"RegionPath": "reg_2",
},
},
"Up",
)
== [pathlib.Path("reg_2/variation.root"), pathlib.Path("reg_2/new.root")]
)
# systematic without override
assert template_builder._get_ntuple_paths(
"{SamplePaths}", {}, {"SamplePaths": "path.root"}, {"Name": "variation"}, "Up"
) == [pathlib.Path("path.root")]
caplog.set_level(logging.DEBUG)
caplog.clear()
# warning: no region path in template
assert template_builder._get_ntuple_paths(
"path.root", {"RegionPath": "region.root"}, {}, {}, ""
) == [pathlib.Path("path.root")]
assert "region override specified, but {RegionPath} not found in default path" in [
rec.message for rec in caplog.records
]
caplog.clear()
# warning: no region path in template
assert template_builder._get_ntuple_paths(
"path.root", {}, {"SamplePaths": "sample.root"}, {}, ""
) == [pathlib.Path("path.root")]
assert "sample override specified, but {SamplePaths} not found in default path" in [
rec.message for rec in caplog.records
]
caplog.clear()
# error: no override for {RegionPath}
with pytest.raises(ValueError, match="no path setting found for region region"):
template_builder._get_ntuple_paths(
"{RegionPath}", {"Name": "region"}, {}, {}, ""
)
# error: no override for {SamplePaths}
with pytest.raises(ValueError, match="no path setting found for sample sample"):
template_builder._get_ntuple_paths(
"{SamplePaths}", {}, {"Name": "sample"}, {}, ""
)
def test__get_variable():
assert template_builder._get_variable({"Variable": "jet_pt"}) == "jet_pt"
def test__get_filter():
# no override
assert (
template_builder._get_filter({"Filter": "jet_pt > 0"}, {}, {}, "")
== "jet_pt > 0"
)
# no filter
assert template_builder._get_filter({}, {}, {}, "") is None
# systematic with override
assert (
template_builder._get_filter(
{"Filter": "jet_pt > 0"},
{},
{"Name": "variation", "Up": {"Filter": "jet_pt > 100"}},
"Up",
)
== "jet_pt > 100"
)
# systematic without override
assert (
template_builder._get_filter(
{"Filter": "jet_pt > 0"}, {}, {"Name": "variation"}, "Up"
)
== "jet_pt > 0"
)
def test__get_weight():
# no override
assert (
template_builder._get_weight({}, {"Weight": "weight_mc"}, {}, "") == "weight_mc"
)
# no weight
assert template_builder._get_weight({}, {}, {}, "") is None
# systematic with override
assert (
template_builder._get_weight(
{},
{"Weight": "weight_mc"},
{"Name": "variation", "Up": {"Weight": "weight_modified"}},
"Up",
)
== "weight_modified"
)
# systematic without override
assert (
template_builder._get_weight(
{}, {"Weight": "weight_mc"}, {"Name": "variation"}, "Up"
)
== "weight_mc"
)
def test__get_position_in_file():
# no override
assert (
template_builder._get_position_in_file(
{"Tree": "tree_name"}, {"Name": "Nominal"}, ""
)
== "tree_name"
)
# systematic with override
assert (
template_builder._get_position_in_file(
{"Tree": "nominal"}, {"Name": "variation", "Up": {"Tree": "up_tree"}}, "Up"
)
== "up_tree"
)
# systematic without override
assert (
template_builder._get_position_in_file(
{"Tree": "nominal"}, {"Name": "variation"}, "Up"
)
== "nominal"
)
def test__get_binning():
np.testing.assert_equal(
template_builder._get_binning({"Binning": [1, 2, 3]}), [1, 2, 3]
)
with pytest.raises(NotImplementedError, match="cannot determine binning"):
template_builder._get_binning({})
def test__Builder():
builder = template_builder._Builder(pathlib.Path("path"), "file.root", "uproot")
assert builder.histogram_folder == pathlib.Path("path")
assert builder.general_path == "file.root"
assert builder.method == "uproot"
@mock.patch("cabinetry.template_builder._Builder._name_and_save")
@mock.patch("cabinetry.histo.Histogram.from_arrays", return_value="histogram")
@mock.patch(
"cabinetry.contrib.histogram_creation.from_uproot", return_value=([1], [0.1])
)
def test__Builder_create_histogram(mock_uproot_builder, mock_histo, mock_save):
# the binning [0] is not a proper binning, but simplifies the comparison
region = {"Name": "test_region", "Variable": "x", "Binning": [0], "Filter": "x>3"}
sample = {
"Name": "sample",
"Tree": "tree",
"SamplePaths": "path_to_sample",
"Weight": "weight_mc",
}
systematic = {"Name": "Nominal"}
builder = template_builder._Builder(pathlib.Path("path"), "{SamplePaths}", "uproot")
builder._create_histogram(region, sample, systematic, "Nominal")
# verify the backend call happened properly
assert mock_uproot_builder.call_args_list == [
(
([pathlib.Path("path_to_sample")], "tree", "x", [0]),
{"weight": "weight_mc", "selection_filter": "x>3"},
)
]
# verify the histogram conversion call
assert mock_histo.call_args_list == [(([0], [1], [0.1]), {})]
# verify the call for saving
assert mock_save.call_args_list == [
(("histogram", region, sample, systematic, "Nominal"), {})
]
# other backends
builder_unknown = template_builder._Builder(
pathlib.Path("path"), "{SamplePaths}", "unknown"
)
with pytest.raises(NotImplementedError, match="unknown backend unknown"):
builder_unknown._create_histogram(region, sample, systematic, "Nominal")
@mock.patch("cabinetry.histo.build_name", return_value="name")
def test__Builder__name_and_save(mock_name):
region = {"Name": "test_region"}
sample = {"Name": "sample"}
systematic = {"Name": "Nominal"}
histogram = mock.MagicMock()
builder = template_builder._Builder(pathlib.Path("path"), "file.root", "uproot")
builder._name_and_save(histogram, region, sample, systematic, "Up")
# check that the naming function was called, the histogram was validated and saved
assert mock_name.call_args_list == [((region, sample, systematic, "Up"), {})]
assert histogram.validate.call_args_list == [mock.call("name")]
assert histogram.save.call_args_list == [mock.call(pathlib.Path("path/name"))]
@mock.patch("cabinetry.template_builder._Builder._name_and_save")
def test__Builder__wrap_custom_template_builder(mock_save):
histogram = bh.Histogram(bh.axis.Variable([0, 1]))
region = {"Name": "test_region"}
sample = {"Name": "sample"}
systematic = {"Name": "Nominal"}
def test_func(reg, sam, sys, tem):
return histogram
builder = template_builder._Builder(pathlib.Path("path"), "file.root", "uproot")
wrapped_func = builder._wrap_custom_template_builder(test_func)
# check the behavior of the wrapped function
# when called, it should save the returned histogram
wrapped_func(region, sample, systematic, "Up")
assert mock_save.call_args_list == [
((histogram, region, sample, systematic, "Up"), {})
]
# wrapped function returns wrong type
def test_func_wrong_return(reg, sam, sys, tem):
return None
wrapped_func_wrong_return = builder._wrap_custom_template_builder(
test_func_wrong_return
)
with pytest.raises(TypeError, match="must return a boost_histogram.Histogram"):
wrapped_func_wrong_return(region, sample, systematic, "Up")
def test_create_histograms():
config = {"General": {"HistogramFolder": "path/", "InputPath": "file.root"}}
method = "uproot"
# no router
with mock.patch("cabinetry.route.apply_to_all_templates") as mock_apply:
template_builder.create_histograms(config, method)
assert len(mock_apply.call_args_list) == 1
config_call, func_call = mock_apply.call_args_list[0][0]
assert config_call == config
assert (
func_call.__name__ == "_create_histogram"
) # could also compare to function
assert mock_apply.call_args_list[0][1] == {"match_func": None}
# including a router
mock_router = mock.MagicMock()
with mock.patch("cabinetry.route.apply_to_all_templates") as mock_apply:
template_builder.create_histograms(config, method, router=mock_router)
# verify wrapper was set
assert (
mock_router.template_builder_wrapper.__name__
== "_wrap_custom_template_builder"
)
assert len(mock_apply.call_args_list) == 1
config_call, func_call = mock_apply.call_args_list[0][0]
assert config_call == config
assert func_call.__name__ == "_create_histogram"
assert mock_apply.call_args_list[0][1] == {
"match_func": mock_router._find_template_builder_match
}
|
{"hexsha": "b1675a86aa5922f6573bcbbec1c772b8fce1e9ff", "size": 11344, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_template_builder.py", "max_stars_repo_name": "phinate/cabinetry", "max_stars_repo_head_hexsha": "10571e83155b0fd47796b93b85919975c3b364ed", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_template_builder.py", "max_issues_repo_name": "phinate/cabinetry", "max_issues_repo_head_hexsha": "10571e83155b0fd47796b93b85919975c3b364ed", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_template_builder.py", "max_forks_repo_name": "phinate/cabinetry", "max_forks_repo_head_hexsha": "10571e83155b0fd47796b93b85919975c3b364ed", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.687150838, "max_line_length": 88, "alphanum_fraction": 0.6100141044, "include": true, "reason": "import numpy", "num_tokens": 2655}
|
from collections import defaultdict
from typing import Any, Dict, Sequence, Union
from attrdict import AttrDict
import numpy as np
import sys
import os
import copy
import time
sys.path.append("./ddetr")
import torch
import filelock
from determined.pytorch import (
DataLoader,
LRScheduler,
MetricReducer,
PyTorchTrial,
PyTorchTrialContext,
)
from determined.experimental import Determined
# Deformable DETR imports
import ddetr.util.misc as utils
from ddetr.datasets.coco_eval import CocoEvaluator
from model import build_model
# Experiment dir imports
from data import unwrap_collate_fn, build_dataset
from data_utils import download_coco_from_source
TorchData = Union[Dict[str, torch.Tensor], Sequence[torch.Tensor], torch.Tensor]
def match_name_keywords(n, name_keywords):
out = False
for b in name_keywords:
if b in n:
out = True
break
return out
class COCOReducer(MetricReducer):
def __init__(self, base_ds, iou_types, cat_ids=[]):
self.base_ds = base_ds
self.iou_types = iou_types
self.cat_ids = cat_ids
self.reset()
def reset(self):
self.results = []
def update(self, result):
self.results.extend(result)
def per_slot_reduce(self):
return self.results
def cross_slot_reduce(self, per_slot_metrics):
coco_evaluator = CocoEvaluator(self.base_ds, self.iou_types)
if len(self.cat_ids):
for iou_type in self.iou_types:
coco_evaluator.coco_eval[iou_type].params.catIds = self.cat_ids
for results in per_slot_metrics:
results_dict = {r[0]: r[1] for r in results}
coco_evaluator.update(results_dict)
for iou_type in coco_evaluator.iou_types:
coco_eval = coco_evaluator.coco_eval[iou_type]
coco_evaluator.eval_imgs[iou_type] = np.concatenate(
coco_evaluator.eval_imgs[iou_type], 2
)
coco_eval.evalImgs = list(coco_evaluator.eval_imgs[iou_type].flatten())
coco_eval.params.imgIds = list(coco_evaluator.img_ids)
# We need to perform a deepcopy here since this dictionary can be modified in a
# custom accumulate call and we don't want that to change coco_eval.params.
# See https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocotools/cocoeval.py#L315.
coco_eval._paramsEval = copy.deepcopy(coco_eval.params)
coco_evaluator.accumulate()
coco_evaluator.summarize()
coco_stats = coco_evaluator.coco_eval["bbox"].stats.tolist()
loss_dict = {}
loss_dict["mAP"] = coco_stats[0]
loss_dict["mAP_50"] = coco_stats[1]
loss_dict["mAP_75"] = coco_stats[2]
loss_dict["mAP_small"] = coco_stats[3]
loss_dict["mAP_medium"] = coco_stats[4]
loss_dict["mAP_large"] = coco_stats[5]
return loss_dict
class DeformableDETRTrial(PyTorchTrial):
def __init__(self, context: PyTorchTrialContext) -> None:
self.context = context
self.hparams = AttrDict(self.context.get_hparams())
# If backend is local download data.
if self.hparams.backend == "local":
# Use a file lock so only one worker on each node does the download.
with filelock.FileLock(os.path.join(self.hparams.data_dir, "download.lock")):
if not all(
[
os.path.isdir(os.path.join(self.hparams.data_dir, d))
for d in ["train2017", "val2017"]
]
):
download_coco_from_source(self.hparams.data_dir)
self.cat_ids = []
# Build the model and configure postprocessors for evaluation.
model, self.criterion, self.postprocessors = build_model(
self.hparams, world_size=self.context.distributed.get_size()
)
# Load pretrained weights downloaded in the startup-hook.sh from
# the original repo.
if "warmstart" in self.hparams and self.hparams.warmstart:
checkpoint = torch.load("model.ckpt")
ckpt = checkpoint["model"]
# Remove class weights if finetuning.
if "cat_ids" in self.hparams and len(self.hparams.cat_ids):
delete_keys = [k for k in ckpt if "class_embed" in k]
for k in delete_keys:
del ckpt[k]
model.load_state_dict(ckpt, strict=False)
self.model = self.context.wrap_model(model)
n_parameters = sum(
p.numel() for p in self.model.parameters() if p.requires_grad
)
print("number of params:", n_parameters)
param_dicts = [
{
"params": [
p
for n, p in self.model.named_parameters()
if not match_name_keywords(n, self.hparams.lr_backbone_names)
and not match_name_keywords(n, self.hparams.lr_linear_proj_names)
and p.requires_grad
],
"lr": self.hparams.lr,
},
{
"params": [
p
for n, p in self.model.named_parameters()
if match_name_keywords(n, self.hparams.lr_backbone_names)
and p.requires_grad
],
"lr": self.hparams.lr_backbone,
},
{
"params": [
p
for n, p in self.model.named_parameters()
if match_name_keywords(n, self.hparams.lr_linear_proj_names)
and p.requires_grad
],
"lr": self.hparams.lr * self.hparams.lr_linear_proj_mult,
},
]
if self.hparams.sgd:
self.optimizer = self.context.wrap_optimizer(
torch.optim.SGD(
param_dicts,
lr=self.hparams.lr,
momentum=0.9,
weight_decay=self.hparams.weight_decay,
)
)
else:
self.optimizer = self.context.wrap_optimizer(
torch.optim.AdamW(
param_dicts,
lr=self.hparams.lr,
weight_decay=self.hparams.weight_decay,
)
)
# Wrap the LR scheduler.
self.lr_scheduler = self.context.wrap_lr_scheduler(
torch.optim.lr_scheduler.StepLR(self.optimizer, self.hparams.lr_drop),
step_mode=LRScheduler.StepMode.STEP_EVERY_EPOCH,
)
self.clip_grads_fn = (
lambda x: torch.nn.utils.clip_grad_norm_(x, self.hparams.clip_max_norm)
if self.hparams.clip_max_norm > 0
else None
)
def build_training_data_loader(self) -> DataLoader:
dataset_train = build_dataset(image_set="train", args=self.hparams)
return DataLoader(
dataset_train,
batch_size=self.context.get_per_slot_batch_size(),
collate_fn=unwrap_collate_fn,
num_workers=self.hparams.num_workers,
shuffle=True,
)
def build_validation_data_loader(self) -> DataLoader:
dataset_val = build_dataset(image_set="val", args=self.hparams)
if "cat_ids" in self.hparams:
self.cat_ids = self.hparams.cat_ids
self.catIdtoCls = dataset_val.catIdtoCls
# Set up evaluator
self.base_ds = dataset_val.coco
iou_types = tuple(
k for k in ("segm", "bbox") if k in self.postprocessors.keys()
)
self.reducer = self.context.wrap_reducer(
COCOReducer(self.base_ds, iou_types, self.cat_ids),
for_training=False,
for_validation=True,
)
return DataLoader(
dataset_val,
batch_size=self.context.get_per_slot_batch_size(),
collate_fn=unwrap_collate_fn,
num_workers=1,
shuffle=False,
)
def train_batch(
self, batch: TorchData, epoch_idx: int, batch_idx: int
) -> Dict[str, torch.Tensor]:
samples, targets = batch
outputs = self.model(samples)
loss_dict = self.criterion(outputs, targets)
weight_dict = self.criterion.weight_dict
losses = sum(
loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict
)
self.context.backward(losses)
self.context.step_optimizer(self.optimizer, clip_grads=self.clip_grads_fn)
# Compute losses for logging
loss_dict["sum_unscaled"] = sum(loss_dict.values())
loss_dict["loss"] = losses
return loss_dict
def evaluate_batch(self, batch):
samples, targets = batch
outputs = self.model(samples)
loss_dict = self.criterion(outputs, targets, eval=True)
# Compute losses for logging
loss_dict["sum_unscaled"] = sum(loss_dict.values())
orig_target_sizes = torch.stack([t["orig_size"] for t in targets], dim=0)
res = self.postprocessors["bbox"](outputs, orig_target_sizes)
res = [{k: v.cpu() for k, v in r.items()} for r in res]
if len(self.cat_ids):
for row in res:
row["labels"] = torch.tensor(
[self.cat_ids[l.item()] for l in row["labels"]], dtype=torch.int64
)
result = [
(target["image_id"].item(), output) for target, output in zip(targets, res)
]
self.reducer.update(result)
return loss_dict
|
{"hexsha": "2a21312c1661dec7aafae7dc0baa6b78f547a463", "size": 9695, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/computer_vision/deformabledetr_coco_pytorch/model_def.py", "max_stars_repo_name": "gh-determined-ai/determined", "max_stars_repo_head_hexsha": "9a1ab33a3a356b69681b3351629fef4ab98ddb56", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1729, "max_stars_repo_stars_event_min_datetime": "2020-04-27T17:36:40.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T05:48:39.000Z", "max_issues_repo_path": "examples/computer_vision/deformabledetr_coco_pytorch/model_def.py", "max_issues_repo_name": "ChrisW09/determined", "max_issues_repo_head_hexsha": "5c37bfe9cfcc69174ba29a3f1a115c3e9e3632e0", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1940, "max_issues_repo_issues_event_min_datetime": "2020-04-27T17:34:14.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T23:02:28.000Z", "max_forks_repo_path": "examples/computer_vision/deformabledetr_coco_pytorch/model_def.py", "max_forks_repo_name": "ChrisW09/determined", "max_forks_repo_head_hexsha": "5c37bfe9cfcc69174ba29a3f1a115c3e9e3632e0", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 214, "max_forks_repo_forks_event_min_datetime": "2020-04-27T19:57:28.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T08:17:16.000Z", "avg_line_length": 35.2545454545, "max_line_length": 108, "alphanum_fraction": 0.5928829293, "include": true, "reason": "import numpy", "num_tokens": 2081}
|
[STATEMENT]
lemma cdi_iff_no_strict_pd: \<open>i cd\<^bsup>\<pi>\<^esup>\<rightarrow> k \<longleftrightarrow> is_path \<pi> \<and> k < i \<and> \<pi> i \<noteq> return \<and> (\<forall> j \<in> {k..i}. \<not> (\<pi> k, \<pi> j) \<in> pdt)\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (i cd\<^bsup>\<pi>\<^esup>\<rightarrow> k) = (is_path \<pi> \<and> k < i \<and> \<pi> i \<noteq> local.return \<and> (\<forall>j\<in>{k..i}. (\<pi> k, \<pi> j) \<notin> pdt))
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. i cd\<^bsup>\<pi>\<^esup>\<rightarrow> k \<Longrightarrow> is_path \<pi> \<and> k < i \<and> \<pi> i \<noteq> local.return \<and> (\<forall>j\<in>{k..i}. (\<pi> k, \<pi> j) \<notin> pdt)
2. is_path \<pi> \<and> k < i \<and> \<pi> i \<noteq> local.return \<and> (\<forall>j\<in>{k..i}. (\<pi> k, \<pi> j) \<notin> pdt) \<Longrightarrow> i cd\<^bsup>\<pi>\<^esup>\<rightarrow> k
[PROOF STEP]
assume cd:\<open>i cd\<^bsup>\<pi>\<^esup>\<rightarrow> k\<close>
[PROOF STATE]
proof (state)
this:
i cd\<^bsup>\<pi>\<^esup>\<rightarrow> k
goal (2 subgoals):
1. i cd\<^bsup>\<pi>\<^esup>\<rightarrow> k \<Longrightarrow> is_path \<pi> \<and> k < i \<and> \<pi> i \<noteq> local.return \<and> (\<forall>j\<in>{k..i}. (\<pi> k, \<pi> j) \<notin> pdt)
2. is_path \<pi> \<and> k < i \<and> \<pi> i \<noteq> local.return \<and> (\<forall>j\<in>{k..i}. (\<pi> k, \<pi> j) \<notin> pdt) \<Longrightarrow> i cd\<^bsup>\<pi>\<^esup>\<rightarrow> k
[PROOF STEP]
have 1: \<open>is_path \<pi> \<and> k < i \<and> \<pi> i \<noteq> return\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. is_path \<pi> \<and> k < i \<and> \<pi> i \<noteq> local.return
[PROOF STEP]
using cd
[PROOF STATE]
proof (prove)
using this:
i cd\<^bsup>\<pi>\<^esup>\<rightarrow> k
goal (1 subgoal):
1. is_path \<pi> \<and> k < i \<and> \<pi> i \<noteq> local.return
[PROOF STEP]
unfolding is_cdi_def
[PROOF STATE]
proof (prove)
using this:
is_path \<pi> \<and> k < i \<and> \<pi> i \<noteq> local.return \<and> (\<forall>j\<in>{k..i}. \<pi> j \<noteq> ipd (\<pi> k))
goal (1 subgoal):
1. is_path \<pi> \<and> k < i \<and> \<pi> i \<noteq> local.return
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
is_path \<pi> \<and> k < i \<and> \<pi> i \<noteq> local.return
goal (2 subgoals):
1. i cd\<^bsup>\<pi>\<^esup>\<rightarrow> k \<Longrightarrow> is_path \<pi> \<and> k < i \<and> \<pi> i \<noteq> local.return \<and> (\<forall>j\<in>{k..i}. (\<pi> k, \<pi> j) \<notin> pdt)
2. is_path \<pi> \<and> k < i \<and> \<pi> i \<noteq> local.return \<and> (\<forall>j\<in>{k..i}. (\<pi> k, \<pi> j) \<notin> pdt) \<Longrightarrow> i cd\<^bsup>\<pi>\<^esup>\<rightarrow> k
[PROOF STEP]
have 2: \<open>\<forall> j \<in> {k..i}. \<not> (\<pi> k, \<pi> j) \<in> pdt\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>j\<in>{k..i}. (\<pi> k, \<pi> j) \<notin> pdt
[PROOF STEP]
proof (rule ccontr)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<not> (\<forall>j\<in>{k..i}. (\<pi> k, \<pi> j) \<notin> pdt) \<Longrightarrow> False
[PROOF STEP]
assume \<open> \<not> (\<forall>j\<in>{k..i}. (\<pi> k, \<pi> j) \<notin> pdt)\<close>
[PROOF STATE]
proof (state)
this:
\<not> (\<forall>j\<in>{k..i}. (\<pi> k, \<pi> j) \<notin> pdt)
goal (1 subgoal):
1. \<not> (\<forall>j\<in>{k..i}. (\<pi> k, \<pi> j) \<notin> pdt) \<Longrightarrow> False
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<not> (\<forall>j\<in>{k..i}. (\<pi> k, \<pi> j) \<notin> pdt)
[PROOF STEP]
obtain j where \<open>j \<in> {k..i}\<close> and \<open>(\<pi> k, \<pi> j) \<in> pdt\<close>
[PROOF STATE]
proof (prove)
using this:
\<not> (\<forall>j\<in>{k..i}. (\<pi> k, \<pi> j) \<notin> pdt)
goal (1 subgoal):
1. (\<And>j. \<lbrakk>j \<in> {k..i}; (\<pi> k, \<pi> j) \<in> pdt\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
j \<in> {k..i}
(\<pi> k, \<pi> j) \<in> pdt
goal (1 subgoal):
1. \<not> (\<forall>j\<in>{k..i}. (\<pi> k, \<pi> j) \<notin> pdt) \<Longrightarrow> False
[PROOF STEP]
hence \<open>\<pi> j \<noteq> \<pi> k\<close> and \<open>\<pi> j pd\<rightarrow> \<pi> k\<close>
[PROOF STATE]
proof (prove)
using this:
j \<in> {k..i}
(\<pi> k, \<pi> j) \<in> pdt
goal (1 subgoal):
1. \<pi> j \<noteq> \<pi> k &&& \<pi> j pd\<rightarrow> \<pi> k
[PROOF STEP]
unfolding pdt_def
[PROOF STATE]
proof (prove)
using this:
j \<in> {k..i}
(\<pi> k, \<pi> j) \<in> {(x, y). x \<noteq> y \<and> y pd\<rightarrow> x}
goal (1 subgoal):
1. \<pi> j \<noteq> \<pi> k &&& \<pi> j pd\<rightarrow> \<pi> k
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<pi> j \<noteq> \<pi> k
\<pi> j pd\<rightarrow> \<pi> k
goal (1 subgoal):
1. \<not> (\<forall>j\<in>{k..i}. (\<pi> k, \<pi> j) \<notin> pdt) \<Longrightarrow> False
[PROOF STEP]
thus \<open>False\<close>
[PROOF STATE]
proof (prove)
using this:
\<pi> j \<noteq> \<pi> k
\<pi> j pd\<rightarrow> \<pi> k
goal (1 subgoal):
1. False
[PROOF STEP]
using path_pd_ipd
[PROOF STATE]
proof (prove)
using this:
\<pi> j \<noteq> \<pi> k
\<pi> j pd\<rightarrow> \<pi> k
\<lbrakk>is_path ?\<pi>; ?\<pi> ?k \<noteq> local.return; ?\<pi> ?n \<noteq> ?\<pi> ?k; ?\<pi> ?n pd\<rightarrow> ?\<pi> ?k; ?k < ?n; \<And>l. \<lbrakk>?k < l; l \<le> ?n; ?\<pi> l = ipd (?\<pi> ?k)\<rbrakk> \<Longrightarrow> ?thesis\<rbrakk> \<Longrightarrow> ?thesis
goal (1 subgoal):
1. False
[PROOF STEP]
by (metis \<open>j \<in> {k..i}\<close> atLeastAtMost_iff cd cd_not_pd cdi_prefix le_eq_less_or_eq)
[PROOF STATE]
proof (state)
this:
False
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<forall>j\<in>{k..i}. (\<pi> k, \<pi> j) \<notin> pdt
goal (2 subgoals):
1. i cd\<^bsup>\<pi>\<^esup>\<rightarrow> k \<Longrightarrow> is_path \<pi> \<and> k < i \<and> \<pi> i \<noteq> local.return \<and> (\<forall>j\<in>{k..i}. (\<pi> k, \<pi> j) \<notin> pdt)
2. is_path \<pi> \<and> k < i \<and> \<pi> i \<noteq> local.return \<and> (\<forall>j\<in>{k..i}. (\<pi> k, \<pi> j) \<notin> pdt) \<Longrightarrow> i cd\<^bsup>\<pi>\<^esup>\<rightarrow> k
[PROOF STEP]
show \<open>is_path \<pi> \<and> k < i \<and> \<pi> i \<noteq> return \<and> (\<forall> j \<in> {k..i}. \<not> (\<pi> k, \<pi> j) \<in> pdt)\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. is_path \<pi> \<and> k < i \<and> \<pi> i \<noteq> local.return \<and> (\<forall>j\<in>{k..i}. (\<pi> k, \<pi> j) \<notin> pdt)
[PROOF STEP]
using 1 2
[PROOF STATE]
proof (prove)
using this:
is_path \<pi> \<and> k < i \<and> \<pi> i \<noteq> local.return
\<forall>j\<in>{k..i}. (\<pi> k, \<pi> j) \<notin> pdt
goal (1 subgoal):
1. is_path \<pi> \<and> k < i \<and> \<pi> i \<noteq> local.return \<and> (\<forall>j\<in>{k..i}. (\<pi> k, \<pi> j) \<notin> pdt)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
is_path \<pi> \<and> k < i \<and> \<pi> i \<noteq> local.return \<and> (\<forall>j\<in>{k..i}. (\<pi> k, \<pi> j) \<notin> pdt)
goal (1 subgoal):
1. is_path \<pi> \<and> k < i \<and> \<pi> i \<noteq> local.return \<and> (\<forall>j\<in>{k..i}. (\<pi> k, \<pi> j) \<notin> pdt) \<Longrightarrow> i cd\<^bsup>\<pi>\<^esup>\<rightarrow> k
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. is_path \<pi> \<and> k < i \<and> \<pi> i \<noteq> local.return \<and> (\<forall>j\<in>{k..i}. (\<pi> k, \<pi> j) \<notin> pdt) \<Longrightarrow> i cd\<^bsup>\<pi>\<^esup>\<rightarrow> k
[PROOF STEP]
assume \<open>is_path \<pi> \<and> k < i \<and> \<pi> i \<noteq> return \<and> (\<forall> j \<in> {k..i}. \<not> (\<pi> k, \<pi> j) \<in> pdt)\<close>
[PROOF STATE]
proof (state)
this:
is_path \<pi> \<and> k < i \<and> \<pi> i \<noteq> local.return \<and> (\<forall>j\<in>{k..i}. (\<pi> k, \<pi> j) \<notin> pdt)
goal (1 subgoal):
1. is_path \<pi> \<and> k < i \<and> \<pi> i \<noteq> local.return \<and> (\<forall>j\<in>{k..i}. (\<pi> k, \<pi> j) \<notin> pdt) \<Longrightarrow> i cd\<^bsup>\<pi>\<^esup>\<rightarrow> k
[PROOF STEP]
thus \<open>i cd\<^bsup>\<pi>\<^esup>\<rightarrow> k\<close>
[PROOF STATE]
proof (prove)
using this:
is_path \<pi> \<and> k < i \<and> \<pi> i \<noteq> local.return \<and> (\<forall>j\<in>{k..i}. (\<pi> k, \<pi> j) \<notin> pdt)
goal (1 subgoal):
1. i cd\<^bsup>\<pi>\<^esup>\<rightarrow> k
[PROOF STEP]
by (metis ipd_in_pdt term_path_stable less_or_eq_imp_le not_cd_impl_ipd path_nodes)
[PROOF STATE]
proof (state)
this:
i cd\<^bsup>\<pi>\<^esup>\<rightarrow> k
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 3709, "file": "IFC_Tracking_IFC", "length": 27}
|
% The BLAST Algorithm
%
% Cartoonish introduction to the BLAST algorithm
\subsection{The BLAST Algorithm}
\begin{frame}
\frametitle{BLAST Is A Heuristic}
\begin{itemize}
\item<1-> BLAST does not use Needleman-Wunsch or Smith-Waterman
\item<1-> BLAST \emph{approximates} dynamic programming methods
\item<1-> BLAST is not guaranteed to give a mathematically optimal alignment
\item<2-> BLAST does not explore the complete search space
\item<3-> BLAST uses heuristics (loosely-defined rules) to refine High-scoring Segment Pairs (HSPs)
\item<4-> BLAST reports only ``statistically-significant'' alignments (dependent on parameters)
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Steps in the Algorithm}
\begin{enumerate}
\item Seeding
\item Extension
\item Evaluation
\end{enumerate}
\end{frame}
|
{"hexsha": "ce699691bc7704a0f8185537a79fb33045702871", "size": 849, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "presentation/sections/subsection_blastintro.tex", "max_stars_repo_name": "peterjc/Teaching-Intro-to-Bioinf", "max_stars_repo_head_hexsha": "d642804aa73e80546e2326d2c2537c5727ac3ee8", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": 20, "max_stars_repo_stars_event_min_datetime": "2015-05-28T18:29:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-20T13:38:44.000Z", "max_issues_repo_path": "presentation/sections/subsection_blastintro.tex", "max_issues_repo_name": "peterjc/Teaching-Intro-to-Bioinf", "max_issues_repo_head_hexsha": "d642804aa73e80546e2326d2c2537c5727ac3ee8", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2016-11-25T11:55:43.000Z", "max_issues_repo_issues_event_max_datetime": "2018-10-05T10:53:49.000Z", "max_forks_repo_path": "presentation/sections/subsection_blastintro.tex", "max_forks_repo_name": "peterjc/Teaching-Intro-to-Bioinf", "max_forks_repo_head_hexsha": "d642804aa73e80546e2326d2c2537c5727ac3ee8", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2015-02-05T20:54:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-08T18:24:04.000Z", "avg_line_length": 32.6538461538, "max_line_length": 103, "alphanum_fraction": 0.7373380448, "num_tokens": 241}
|
# Chapter 2 End to End Machine Learning
import os
DOWNLOAD_ROOT = "https://raw.githubusercontent.com/ageron/handson-ml/master/"
HOUSING_PATH = "datasets/housing/"
HOUSING_URL = DOWNLOAD_ROOT + HOUSING_PATH
#-------------------------------------------------------------
# In[ Step 1. load csv data file
import pandas as pd
def load_housing_data(housing_path= HOUSING_URL):
csv_path = os.path.join(housing_path, "housing.csv")
return pd.read_csv(csv_path, sep=',')
housing = load_housing_data()
# In[ Step 1.1 show data information
housing.head()
housing.info()
#-------------------------------------------------------------
# In[ Step 1.2 show data distribution
housing["ocean_proximity"].value_counts()
housing.describe()
import matplotlib.pyplot as plt
housing.hist(bins=50, figsize=(20,15))
plt.show()
#-------------------------------------------------------------
# In[Step 2. Create a Test Set
import numpy as np
# In[]
def split_train_test(data, test_ratio):
np.random.seed(42)
shuffled_indices = np.random.permutation(len(data))
test_set_size = int(len(data) * test_ratio)
test_indices = shuffled_indices[:test_set_size]
train_indices = shuffled_indices[test_set_size:]
return data.iloc[train_indices], data.iloc[test_indices]
train_set, test_set = split_train_test(housing, 0.2)
print(len(train_set), "train +", len(test_set), "test")
#-------------------------------------------------------------
# In[ step 2.1 another method for spliting data
from sklearn.model_selection import train_test_split
train_set, test_set = train_test_split(housing, test_size=0.2, random_state=42)
print(len(train_set), "train +", len(test_set), "test")
#-------------------------------------------------------------
# In[ step 2.2 stratified sampling by median_income categories
housing["income_cat"] = np.ceil(housing["median_income"] / 1.5)
housing["income_cat"].where(housing["income_cat"] < 5, 5.0, inplace=True)
from sklearn.model_selection import StratifiedShuffleSplit
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index, test_index in split.split(housing, housing["income_cat"]):
strat_train_set = housing.loc[train_index]
strat_test_set = housing.loc[test_index]
housing["income_cat"].value_counts() / len(housing)
# In[ remove the new attribute from train and test sets
for s in (strat_train_set, strat_test_set):
s.drop(["income_cat"], axis=1, inplace=True) # axis=1: column not row
#--------------------------------------------------------------
# In[ Step 3. Visualizing Geographical Data
housing = strat_train_set.copy()
housing.plot(kind="scatter", x="longitude", y="latitude")
# alpha: density of data point
housing.plot(kind="scatter", x="longitude", y="latitude", alpha=0.1)
# s(radius of each circle): population; c(color): price; cmap: predefined color map
housing.plot(kind="scatter", x="longitude", y="latitude", alpha=0.1,
s=housing["population"]/100, label="population",
c="median_house_value", cmap=plt.get_cmap("jet"),
colorbar=True,)
plt.legend()
#---------------------------------------------------------------
# In[ Step 3.1 Looking for Correlations
corr_matrix = housing.corr()
corr_matrix["median_house_value"].sort_values(ascending=False)
from pandas.plotting import scatter_matrix
attributes = ["median_house_value", "median_income", "total_rooms",
"housing_median_age"]
scatter_matrix(housing[attributes], figsize=(15, 12))
housing.plot(kind="scatter", x="median_income", y="median_house_value",
alpha=0.1)
#-----------------------------------------------------------------
# In[ Step 3.2 Experimenting with Attribute Combinations
housing["rooms_per_household"] = housing["total_rooms"]/housing["households"] #每人房屋數
housing["bedrooms_per_room"] = housing["total_bedrooms"]/housing["total_rooms"] #每屋臥房數
housing["population_per_household"]=housing["population"]/housing["households"] #每人人口比
corr_matrix = housing.corr()
corr_matrix["median_house_value"].sort_values(ascending=False)
housing = strat_train_set.drop("median_house_value", axis=1)
housing_values = strat_train_set["median_house_value"].copy()
#------------------------------------------------------------------
# In[Step 4. Data Cleaning
# Step 4.1 prepocessing missing values of attribute
from sklearn.preprocessing import Imputer
housing_num = housing.drop("ocean_proximity", axis=1) # select partial data from housing
# try to fill median for missing data
imputer = Imputer(strategy="median") # new Imputer object
imputer.fit(housing_num) # apply Imputer to fill mediam for each attribute
X = imputer.transform(housing_num) # transfer from dataframe to array
housing_tr = pd.DataFrame(X, columns=housing_num.columns)
#--------------------------------------------------------------------
# In[ Step 4.2 Handling Text and Categorical Attributes
from sklearn.preprocessing import LabelEncoder
encoder = LabelEncoder() # linear encoding
housing_cat = housing["ocean_proximity"]
housing_cat_encoded = encoder.fit_transform(housing_cat)
# In[]
from sklearn.preprocessing import OneHotEncoder
encoder = OneHotEncoder()
housing_cat_1hot = encoder.fit_transform(housing_cat_encoded.reshape(-1,1))
# In[] better encoding for categorical data
from sklearn.preprocessing import LabelBinarizer
encoder = LabelBinarizer()
housing_cat_1hot = encoder.fit_transform(housing_cat)
#---------------------------------------------------------------------
# In[ Step 4.3 Combine Custom Transformers by a Class (step 3.2)
from sklearn.base import BaseEstimator, TransformerMixin
rooms_ix, bedrooms_ix, population_ix, household_ix = 3, 4, 5, 6
class CombinedAttributesAdder(BaseEstimator, TransformerMixin):
def __init__(self, add_bedrooms_per_room = True): # no *args or **kargs
self.add_bedrooms_per_room = add_bedrooms_per_room
def fit(self, X, y=None):
return self # nothing else to do
def transform(self, X, y=None):
rooms_per_household = X[:, rooms_ix] / X[:, household_ix] #每人房屋數 array
population_per_household = X[:, population_ix] / X[:, household_ix] #每人人口比 array
if self.add_bedrooms_per_room:
bedrooms_per_room = X[:, bedrooms_ix] / X[:, rooms_ix] #每屋臥房數 array
return np.c_[X, rooms_per_household, population_per_household,
bedrooms_per_room]
else:
return np.c_[X, rooms_per_household, population_per_household] # arrays to object
attr_adder = CombinedAttributesAdder() # new an Comhousing.valuesbinedAttributesAdder object
#housing_extra_attribs = attr_adder.transform()
#-------------------------------------------------------------------
# In[ Step 4.4 Transformation Pipelines
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
# In[] try 1
num_pipeline = Pipeline([ ('imputer', Imputer(strategy="median")),
('attribs_adder', CombinedAttributesAdder()),
('std_scaler', StandardScaler()),])
housing_num_tr = num_pipeline.fit_transform(housing_num)
#==============================================================================
#==============================================================================
# attributes = ["median_house_value", "median_income", "total_rooms",
# "housing_median_age"]
# scatter_matrix(housing_num_tr[attributes], figsize=(12, 8))
# housing.plot(kind="scatter", x="median_income", y="median_house_value",
# alpha=0.1)
#==============================================================================
# In[] get attribute data by assigning attribute names
#-------------------------------------------------------------------
class DataFrameSelector(BaseEstimator, TransformerMixin):
def __init__(self, attribute_names):
self.attribute_names = attribute_names
def fit(self, X, y=None):
return self
def transform(self, X):
return X[self.attribute_names].values
# In[]
from sklearn.pipeline import FeatureUnion
num_attribs = list(housing_num)
cat_attribs = ["ocean_proximity"]
# pipeline for processing numerical data as in steps 4.1 and 4.3
num_pipeline = Pipeline([('selector', DataFrameSelector(num_attribs)),
('imputer', Imputer(strategy="median")),
('attribs_adder', CombinedAttributesAdder()),
('std_scaler', StandardScaler()),
])
# pipeline for processing categorical data as in step 4.2
cat_pipeline = Pipeline([('selector', DataFrameSelector(cat_attribs)),
('label_binarizer', LabelBinarizer()),
])
full_pipeline = FeatureUnion(
transformer_list=[("num_pipeline", num_pipeline),
("cat_pipeline", cat_pipeline),
])
housing_prepared = full_pipeline.fit_transform(housing)
housing_prepared.shape
#----------------------------------------------------------------
# In[ Step 5. Select and Train a Model
# Step 5.1 using linear regression
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(housing_prepared, housing_values)
# In[]
from sklearn.metrics import mean_squared_error
# =============================================================================
housing_predictions = lin_reg.predict(housing_prepared)
lin_mse = mean_squared_error(housing_values, housing_predictions)
lin_rmse = np.sqrt(lin_mse)
# =============================================================================
# In[] Step 5.2 using decision tree regressor
from sklearn.tree import DecisionTreeRegressor
tree_reg = DecisionTreeRegressor()
tree_reg.fit(housing_prepared, housing_values)
housing_predictions = tree_reg.predict(housing_prepared)
tree_mse = mean_squared_error(housing_values, housing_predictions)
tree_rmse = np.sqrt(tree_mse)
# =============================================================================
# In[
# Step 5.2.1 Cross-Validation
from sklearn.model_selection import cross_val_score
scores = cross_val_score(tree_reg, housing_prepared, housing_values,
scoring="neg_mean_squared_error", cv=10)
rmse_scores = np.sqrt(-scores)
[rmse_scores, rmse_scores.mean(), rmse_scores.std()]
# In[] Step 5.2.2 using random forest regressor
from sklearn.ensemble import RandomForestRegressor
forest_reg = RandomForestRegressor()
forest_reg.fit(housing_prepared, housing_values)
forest_reg_predictions = forest_reg.predict(housing_prepared)
forest_reg_mse = mean_squared_error(housing_values, forest_reg_predictions)
forest_reg_rmse = np.sqrt(forest_reg_mse)
# In[]
from sklearn.externals import joblib
joblib.dump(forest_reg, "forest_reg_model.pkl")
# and later...
forest_reg_loaded = joblib.load("forest_reg_model.pkl")
#--------------------------------------------------------------------------
# In[ Step 5.3 using Fine-Tune Model
from sklearn.model_selection import GridSearchCV
param_grid = [ {'n_estimators': [3, 10, 30],
'max_features': [2, 4, 6, 8]},
{'bootstrap': [False],
'n_estimators': [3, 10],
'max_features': [2, 3, 4]},]
forest_reg = RandomForestRegressor()
# cv: cross validation
grid_search = GridSearchCV(forest_reg, param_grid, cv=5, scoring='neg_mean_squared_error')
grid_search.fit(housing_prepared, housing_values)
grid_search.best_params_
grid_search.best_estimator_
cvres = grid_search.cv_results_
for mean_score, params in zip(cvres["mean_test_score"], cvres["params"]):
print(np.sqrt(-mean_score), params)
feature_importances = grid_search.best_estimator_.feature_importances_
extra_attribs = ["rooms_per_hhold", "pop_per_hhold", "bedrooms_per_room"]
cat_one_hot_attribs = list(encoder.classes_)
attributes = num_attribs + extra_attribs + cat_one_hot_attribs
sorted(zip(feature_importances, attributes), reverse=True)
# In[] Step 6 Evaluate Your System on the Test Set
final_model = grid_search.best_estimator_
X_test = strat_test_set.drop("median_house_value", axis=1)
y_test = strat_test_set["median_house_value"].copy()
X_test_prepared = full_pipeline.transform(X_test)
final_predictions = final_model.predict(X_test_prepared)
final_mse = mean_squared_error(y_test, final_predictions)
final_rmse = np.sqrt(final_mse)
|
{"hexsha": "702677c23186cb1febf001b5e5cf73fa913594cf", "size": 12468, "ext": "py", "lang": "Python", "max_stars_repo_path": "Ch 2/End-to-End Machine Learning Project.py", "max_stars_repo_name": "IESSC/Machine-Learning", "max_stars_repo_head_hexsha": "0866f09ee7ee671b49f0c7340383f5051dcacb49", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Ch 2/End-to-End Machine Learning Project.py", "max_issues_repo_name": "IESSC/Machine-Learning", "max_issues_repo_head_hexsha": "0866f09ee7ee671b49f0c7340383f5051dcacb49", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Ch 2/End-to-End Machine Learning Project.py", "max_forks_repo_name": "IESSC/Machine-Learning", "max_forks_repo_head_hexsha": "0866f09ee7ee671b49f0c7340383f5051dcacb49", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.8389261745, "max_line_length": 94, "alphanum_fraction": 0.6470965672, "include": true, "reason": "import numpy", "num_tokens": 2700}
|
# encoding: UTF-8
"""
这里的Demo是一个最简单的策略实现,并未考虑太多实盘中的交易细节,如:
1. 委托价格超出涨跌停价导致的委托失败
2. 委托未成交,需要撤单后重新委托
3. 断网后恢复交易状态
4. 等等
这些点是作者选择特意忽略不去实现,因此想实盘的朋友请自己多多研究CTA交易的一些细节,
做到了然于胸后再去交易,对自己的money和时间负责。
也希望社区能做出一个解决了以上潜在风险的Demo出来。
"""
from ctaStrategy.ctaBase import *
from ctaStrategy.ctaArbitrageTemplate import CtaArbitrageTemplate
import talib as ta
import numpy as np
from datetime import *
EMPTY_STRING = ''
########################################################################
class ArbitrageTestStrategy(CtaArbitrageTemplate):
"""套利测试策略"""
strategyName = u'套利测试策略' # 策略实例名称
className = u'ArbitrageTestStrategy'
author = u'Jesse'
# 策略参数
initDays = 0 # 初始化数据所用的天数, 此处只需要监控套利机会故为0
# 策略变量
underlying = u'm1709'
symbol1 = u'm1709-C-2650'
symbol2 = u'm1709-C-2700'
if symbol1[6:7] == 'C':
option_type = 'Call'
elif symbol1[6:7] == 'P':
option_type = 'Put'
K1 = int(symbol1[-4:])
K2 = int(symbol2[-4:])
if K1 >= K2:
raise ValueError('K1 must be less than K2!')
ask_C1 = 0.0
ask_C2 = 0.0
bid_C1 = 0.0
bid_C2 = 0.0
ask_C1_volume = 0
ask_C2_volume = 0
bid_C1_volume = 0
bid_C2_volume = 0
ask_P1 = 0.0
ask_P2 = 0.0
bid_P1 = 0.0
bid_P2 = 0.0
ask_P1_volume = 0
ask_P2_volume = 0
bid_P1_volume = 0
bid_P2_volume = 0
# exercise_date = '2017-08-07'
# today = date.today()
# T = (datetime(int(exercise_date[:4]),int(exercise_date[5:7]),int(exercise_date[-2:])) -
# datetime(today.year,today.month,today.day)).days
# rate = 0.03
# 参数列表,保存了参数的名称
paramList = ['strategyName',
'className',
'author',
'vtSymbol',
'Symbol1',
'Symbol2'
]
# 变量列表,保存了变量的名称
varList = ['inited', # 是否初始化
'trading', # 交易状态
'pos', # 仓位状态
'option_type',
'underlying',
'K1',
'K2'
]
# ----------------------------------------------------------------------
def __init__(self, ctaEngine, setting):
"""Constructor"""
super(ArbitrageTestStrategy, self).__init__(ctaEngine, setting)
if setting:
self.Symbol1 = setting['Symbol1']
self.Symbol2 = setting['Symbol2']
self.lastOrder = None
# ----------------------------------------------------------------------
def onInit(self):
"""初始化策略(必须由用户继承实现)"""
if self.initDays == 0:
return
self.writeCtaLog(u'策略初始化')
for vtsymbol in self.vtSymbol:
initData = self.loadTick(self.initDays, vtsymbol)
for tick in initData:
self.onTick(tick)
self.putEvent()
# ----------------------------------------------------------------------
def onStart(self):
"""启动策略(必须由用户继承实现)"""
self.writeCtaLog(u'策略启动')
self.putEvent()
# ----------------------------------------------------------------------
def onStop(self):
"""停止策略(必须由用户继承实现)"""
self.writeCtaLog(u'策略停止')
self.putEvent()
# ----------------------------------------------------------------------
def onOrder(self, order):
"""收到委托变化推送(必须由用户继承实现)"""
self.lastOrder = order
# ----------------------------------------------------------------------
def onTick(self, tick):
"""收到行情TICK推送(必须由用户继承实现)"""
# 计算K线
print tick.vtSymbol
if self.option_type == 'Call':
if tick.vtSymbol == self.symbol1:
self.ask_C1 = tick.askPrice1
self.ask_C1_volume = tick.askVolume1
self.bid_C1 = tick.bidPrice1
self.bid_C1_volume = tick.bidVolume1
elif tick.vtSymbol == self.symbol2:
self.ask_C2 = tick.askPrice1
self.ask_C2_volume = tick.askVolume1
self.bid_C2 = tick.bidPrice1
self.bid_C2_volume = tick.bidVolume1
#### fee ####
if self.ask_C1 - self.bid_C2 < 0:
print 'call option bull spread: C1 - C2 < 0'
size = min(self.ask_C1_volume, self.bid_C2_volume)
self.buy(self.ask_C1, size, self.symbol1)
self.short(self.bid_C2, size, self.symbol2)
#### add condition to handle the failure of order
elif self.bid_C1 - self.ask_C2 > (self.K2 - self.K1):
print 'call option bear spread: C1 - C2 > (K2 - K1)'
size = min(self.bid_C1_volume, self.ask_C2_volume)
self.short(self.bid_C1, size, self.symbol1)
self.buy(self.ask_C2, size, self.symbol2)
#### add condition to handle the failure of order
elif self.option_type == 'Put':
if tick.vtSymbol == self.symbol1:
self.ask_P1 = tick.askPrice1
self.ask_P1_volume = tick.askVolume1
self.bid_P1 = tick.bidPrice1
self.bid_P1_volume = tick.bidVolume1
elif tick.vtSymbol == self.symbol2:
self.ask_P2 = tick.askPrice1
self.ask_P2_volume = tick.askVolume1
self.bid_P2 = tick.bidPrice1
self.bid_P2_volume = tick.bidVolume1
#### fee####
if self.ask_P2 - self.bid_P1 < 0:
print 'put option bull spread: P2 - P1 < 0'
size = min(self.ask_P1_volume, self.bid_P2_volume)
self.buy(self.ask_P2, size, self.symbol2)
self.short(self.bid_P1, size, self.symbol1)
#### add condition to handle the failure of order
elif self.bid_P2 - self.ask_P1 > (self.K2 - self.K1):
print 'put option bear spread: P2 - P1 > (K2 - K1)'
size = min(self.bid_P1_volume, self.ask_P2_volume)
self.short(self.bid_P2, size, self.symbol2)
self.buy(self.ask_P1, size, self.symbol1)
#### add condition to handle the failure of order
'''
tickMinute = tick.datetime.minute # by Jesse
if tick.vtSymbol in self.barMinute.keys(): # by Jesse
barMinute = self.barMinute[tick.vtSymbol]
else:
barMinute = EMPTY_STRING
self.lastTick[tick.vtSymbol] = tick
dt = datetime.datetime.strftime(tick.datetime, '%Y-%m-%d %H:%M:%S')
# if tick.askPrice1 - tick.bidPrice1 >1:
# print dt,tick.vtSymbol,tick.lastPrice,tick.bidPrice1,tick.askPrice1
# 撤单判断与执行,待修改
if tickMinute != barMinute:
if tick.vtSymbol in self.bar.keys(): # by hw
self.onBar(self.bar[tick.vtSymbol]) # by hw
bar = CtaBarData()
bar.vtSymbol = tick.vtSymbol
bar.symbol = tick.symbol
bar.exchange = tick.exchange
bar.open = tick.lastPrice
bar.high = tick.lastPrice
bar.low = tick.lastPrice
bar.close = tick.lastPrice
bar.date = tick.date
bar.time = tick.time
bar.datetime = tick.datetime # K线的时间设为第一个Tick的时间
# 实盘中用不到的数据可以选择不算,从而加快速度
# bar.volume = tick.volume
# bar.openInterest = tick.openInterest
self.bar[tick.vtSymbol] = bar # 这种写法为了减少一层访问,加快速度 by hw
self.barMinute[tick.vtSymbol] = tickMinute # 更新当前的分钟 by hw
self.barTime[tick.vtSymbol] = tick.datetime
else: # 否则继续累加新的K线
bar = self.bar[tick.vtSymbol] # 写法同样为了加快速度
bar.high = max(bar.high, tick.lastPrice)
bar.low = min(bar.low, tick.lastPrice)
bar.close = tick.lastPrice
'''
# ----------------------------------------------------------------------
def onBar(self, bar):
"""收到Bar推送(必须由用户继承实现)"""
pass
'''
# 计算基础变量macd,pbx .by hw
vtsymbol = bar.vtSymbol
if vtsymbol in self.closeList.keys():
l = self.closeList[vtsymbol]
else:
l = []
self.closeList[vtsymbol] = l
l.append(bar.close)
self.writeCtaLog(u'symbol:%s' % bar.vtSymbol)
# 策略信号
longsignal = False
shortsignal = False
sellsignal = False
coversignal = False
for vts in self.vtSymbol:
print 'signal:', vts, self.vtSymbol, self.bar[vts].datetime.hour, self.longSymbol, self.shortSymbol, \
self.posToday[vts], self.bar[vts].datetime.minute
if self.posToday[vts] == 0 and cmp(vts, self.longSymbol) and self.bar[vts].datetime.hour >= 14 and self.bar[
vts].datetime.minute < 50:
longsignal = True
# print longsignal
if self.posToday[vts] == 0 and vts == self.shortSymbol and self.bar[vts].datetime.hour >= 14 and self.bar[
vts].datetime.minute < 50:
shortsignal = True
if self.posToday[vts] == 1 and vts == self.longSymbol and self.bar[vts].datetime.hour >= 14 and self.bar[
vts].datetime.minute > 50:
sellsignal = True
if self.posToday[vts] == -1 and vts == self.shortSymbol and self.bar[vts].datetime.hour >= 14 and self.bar[
vts].datetime.minute > 50:
coversignal = True
# 金叉和死叉的条件是互斥
# 所有的委托均以K线收盘价委托(这里有一个实盘中无法成交的风险,考虑添加对模拟市价单类型的支持)
print longsignal, shortsignal, self.posToday[self.longSymbol], self.posToday[self.shortSymbol], self.tradeState[
self.longSymbol], self.tradeState[self.shortSymbol]
if sellsignal and self.posToday[self.longSymbol] == 1 and self.tradeState[self.longSymbol] <> -1:
self.tradeID = self.sell(self.bar[self.longSymbol].close, 1, self.longSymbol)
self.tradeState[self.longSymbol] = -1
print 'trade 1'
if coversignal and self.posToday[self.shortSymbol] == -1 and self.tradeState[self.shortSymbol] <> 1:
self.tradeID = self.cover(self.bar[self.shortSymbol].close, 1, self.shortSymbol)
self.tradeState[self.shortSymbol] = 1
print 'trade 2'
if longsignal and self.tradeState[self.longSymbol] <> 1:
self.tradeID = self.buy(self.bar[self.longSymbol].close, 1, self.longSymbol)
self.tradeState[self.longSymbol] = 1
print 'trade 3'
if shortsignal and self.tradeState[self.shortSymbol] <> -1:
self.tradeID = self.short(self.bar[self.shortSymbol].close, 1, self.shortSymbol)
self.tradeState[self.shortSymbol] = -1
print 'trade 4'
'''
# 发出状态更新事件
self.putEvent()
# ----------------------------------------------------------------------
def onTrade(self, trade):
"""收到成交推送(必须由用户继承实现)"""
pass
# self.posToday[trade.vtSymbol] = self.posToday[trade.vtSymbol] + self.tradeState[trade.vtSymbol]
# self.tradeState[trade.vtSymbol] = 0
# print 'trade', trade.vtSymbol, self.posToday[trade.vtSymbol], self.tradeState[trade.vtSymbol]
if __name__ == '__main__':
# 提供直接双击回测的功能
# 导入PyQt4的包是为了保证matplotlib使用PyQt4而不是PySide,防止初始化出错
from ctaStrategy.ctaBacktesting_Arbitrage import *
from PyQt4 import QtCore, QtGui
# 创建回测引擎
engine = BacktestingEngine()
# 设置引擎的回测模式为K线
engine.setBacktestingMode(engine.BAR_MODE)
# 设置回测用的数据起始日期
engine.setStartDate('20170101')
# 设置产品相关参数
engine.setSlippage(0.2) # 股指1跳
engine.setRate(0.3 / 10000) # 万0.3
engine.setSize(300) # 股指合约大小
engine.setPriceTick(0.2) # 股指最小价格变动
# 设置使用的历史数据库
engine.setDatabase(MINUTE_DB_NAME, 'IF0000')
# 在引擎中创建策略对象
engine.initStrategy(ArbitrageTestStrategy, {})
# 开始跑回测
engine.runBacktesting()
# 显示回测结果
engine.showBacktestingResult()
## 跑优化
# setting = OptimizationSetting() # 新建一个优化任务设置对象
# setting.setOptimizeTarget('capital') # 设置优化排序的目标是策略净盈利
# setting.addParameter('atrLength', 12, 20, 2) # 增加第一个优化参数atrLength,起始11,结束12,步进1
# setting.addParameter('atrMa', 20, 30, 5) # 增加第二个优化参数atrMa,起始20,结束30,步进1
# setting.addParameter('rsiLength', 5) # 增加一个固定数值的参数
## 性能测试环境:I7-3770,主频3.4G, 8核心,内存16G,Windows 7 专业版
## 测试时还跑着一堆其他的程序,性能仅供参考
# import time
# start = time.time()
## 运行单进程优化函数,自动输出结果,耗时:359秒
# engine.runOptimization(AtrRsiStrategy, setting)
## 多进程优化,耗时:89秒
##engine.runParallelOptimization(AtrRsiStrategy, setting)
# print u'耗时:%s' %(time.time()-start)
|
{"hexsha": "86097a9c4b04c659e954d8cdd5f88a8a6f9d3901", "size": 12824, "ext": "py", "lang": "Python", "max_stars_repo_path": "ctaStrategy/strategy/strategyArbitrageTest.py", "max_stars_repo_name": "JesseXu117/vn.trader_Arbitrage", "max_stars_repo_head_hexsha": "de3cd2f28c309d354b348e61a58f1a0178daa88b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-03-07T23:09:43.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-07T23:09:43.000Z", "max_issues_repo_path": "ctaStrategy/strategy/strategyArbitrageTest.py", "max_issues_repo_name": "JesseXu117/vn.trader_Arbitrage", "max_issues_repo_head_hexsha": "de3cd2f28c309d354b348e61a58f1a0178daa88b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ctaStrategy/strategy/strategyArbitrageTest.py", "max_forks_repo_name": "JesseXu117/vn.trader_Arbitrage", "max_forks_repo_head_hexsha": "de3cd2f28c309d354b348e61a58f1a0178daa88b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2018-06-11T09:09:44.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-01T21:00:09.000Z", "avg_line_length": 34.5660377358, "max_line_length": 120, "alphanum_fraction": 0.542654398, "include": true, "reason": "import numpy", "num_tokens": 3883}
|
import tensorflow as tf
from core.Log import log
from core import Measures, Extractions
from datasets import DataKeys
from forwarding.Forwarder import Forwarder
from forwarding.Util import apply_mask
import gc
import colorsys
import numpy as np
DETECTION_EXTRACTION_KEYS = (Extractions.DET_BOXES, Extractions.DET_PROBS, Extractions.DET_LABELS,
DataKeys.IMAGE_FILENAMES, Extractions.DET_MASKS, DataKeys.RAW_IMAGES,)
# adapted from https://github.com/matterport/Mask_RCNN/blob/master/mrcnn/visualize.py
def generate_colors():
"""
Generate random colors.
To get visually distinct colors, generate them in HSV space then
convert to RGB.
"""
N = 30
brightness = 0.7
hsv = [(i / N, 1, brightness) for i in range(N)]
colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))
perm = [15, 13, 25, 12, 19, 8, 22, 24, 29, 17, 28, 20, 2, 27, 11, 26, 21, 4, 3, 18, 9, 5, 14, 1, 16, 0, 23, 7, 6, 10]
colors = [colors[idx] for idx in perm]
return colors
class RecurrentDetectionForwarder(Forwarder):
def __init__(self, engine, extraction_keys=DETECTION_EXTRACTION_KEYS):
super().__init__(engine)
self.video_ids = self.config.int_list("video_ids", []) # Which vids to forward
self.visualize_detections = self.config.bool("visualize_detections", False)
# Only relevant if visualize_detections==True, if interactive show during forwarding instead of exporting jpgs
self.interactive_visualization = self.config.bool("interactive_visualization", False)
# Output for KITTI eval
self.export_for_kitti_semseg = self.config.bool("export_for_kitti_semseg", True)
self._extraction_keys = extraction_keys
if Extractions.RECURRENT_STATE not in self._extraction_keys:
self._extraction_keys += (Extractions.RECURRENT_STATE,)
def forward(self):
video_ids = range(self.val_data.n_videos()) if len(self.video_ids) == 0 else self.video_ids
for video_idx in video_ids:
self.val_data.set_video_idx(video_idx)
gc.collect()
tag = self.val_data.get_video_tag()
n_timesteps = self.val_data.n_examples_per_epoch()
self._forward_video(n_timesteps, tag)
gc.collect()
def _forward_video(self, n_timesteps, tag):
recurrent_states = None
batch_size = self.val_data.get_batch_size()
for t_start in range(0, n_timesteps, batch_size):
recurrent_state, measures, extractions = self._forward_timestep(recurrent_states)
for j in range(batch_size):
t = t_start + j
if t >= n_timesteps:
continue
det_boxes = extractions[Extractions.DET_BOXES][0][j]
det_scores = extractions[Extractions.DET_PROBS][0][j]
det_classes = extractions[Extractions.DET_LABELS][0][j]
det_masks = extractions[Extractions.DET_MASKS][0][j]
img_filename = extractions[DataKeys.IMAGE_FILENAMES][0][j].decode("utf-8")
print(img_filename)
if self.export_for_kitti_semseg:
out_folder = "forwarded/" + self.config.string("model") + "/seg_data/"
_export_detections_kitti_format(out_folder, det_boxes, det_classes, det_masks, det_scores, t, tag)
if self.visualize_detections:
if DataKeys.RAW_IMAGES not in extractions:
print("Can't extract raw images for visualization, maybe images in batch have different size?", file=log.v5)
assert False
img = extractions[DataKeys.RAW_IMAGES][0][j]
if self.interactive_visualization:
out_filename = None
else:
out_folder = "forwarded/" + self.config.string("model") + "/vis/" + tag
tf.gfile.MakeDirs(out_folder)
out_filename = out_folder + "/%06d.jpg" % t
visualize_detections(det_boxes, det_classes, det_masks, det_scores, img, save_path=out_filename)
def _forward_timestep(self, recurrent_states):
feed_dict = self.val_data.get_feed_dict_for_next_step()
if recurrent_states is not None:
placeholders = self.engine.test_network.placeholders
assert len(placeholders) == len(recurrent_states)
for placeholder, val in zip(placeholders, recurrent_states):
feed_dict[placeholder] = val
step_res = self.trainer.validation_step(feed_dict=feed_dict,
extraction_keys=self._extraction_keys)
measures = step_res[Measures.MEASURES]
extractions = step_res[Extractions.EXTRACTIONS]
if Extractions.RECURRENT_STATE in extractions:
recurrent_states = extractions[Extractions.RECURRENT_STATE]
assert len(recurrent_states) == 1
recurrent_states = recurrent_states[0]
# flatten
recurrent_states = [item for statetuple in recurrent_states for item in statetuple]
recurrent_states = [x[np.newaxis] if len(x.shape) == 1 else x for x in recurrent_states]
return recurrent_states, measures, extractions
def _export_detections_kitti_format(out_folder, det_boxes, det_classes, det_masks, det_scores, t, tag):
assert len(det_boxes) == len(det_scores) == len(det_classes) == len(det_masks)
# Format for evalInstanceLevelSemanticLabeling.py from KITTI eval suite (with our changes)
# Create folders
tf.gfile.MakeDirs(out_folder + "pred_list/")
tf.gfile.MakeDirs(out_folder + "pred_img/")
image_name = tag + "_" + str(t).zfill(6)
# Create file with list of detections + id
with open(out_folder + "pred_list/" + image_name + ".txt", "w+") as f:
for idx, (bbox, score, class_, mask) in enumerate(zip(det_boxes, det_scores, det_classes, det_masks)):
mask_img_filename = "pred_img/" + image_name + "_" + str(idx).zfill(3) + ".png"
from PIL import Image
im = Image.fromarray(mask * 255)
im.save(out_folder + mask_img_filename)
f.write("../" + mask_img_filename + " " + str(class_) + " " + str(score) + "\n")
def visualize_detections(det_boxes, det_classes, det_masks, det_scores, img, ids=None, save_path=None,
draw_boxes=False, box_is_xywh=False):
colors = generate_colors()
if save_path is not None:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
fig = plt.figure()
dpi = 100.0
fig.set_size_inches(img.shape[1]/dpi, img.shape[0]/dpi, forward=True)
fig.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=None, hspace=None)
ax = fig.subplots()
ax.set_axis_off()
assert len(det_boxes) == len(det_scores) == len(det_classes) == len(det_masks)
for idx, (bbox, score, class_, mask) in enumerate(zip(det_boxes, det_scores, det_classes, det_masks)):
if ids is None:
color = colors[idx % len(colors)]
else:
color = colors[ids[idx] % len(colors)]
# TODO
if class_ == 1:
category_name = "Cow"
elif class_ == 2:
category_name = "Pedestrian"
else:
category_name = "Ignore"
color = (0.7, 0.7, 0.7)
if class_ == 1 or class_ == 2: # Don't show boxes or ids for ignore regions
if ids is not None:
category_name += ":" + str(ids[idx])
if score < 1.0:
category_name += ":" + "%.2f" % score
if not box_is_xywh:
bbox = [bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]]
if draw_boxes:
import matplotlib.patches as patches
rect = patches.Rectangle((bbox[0], bbox[1]), bbox[2], bbox[3], linewidth=1,
edgecolor=color, facecolor='none', alpha=1.0)
ax.add_patch(rect)
ax.annotate(category_name, (bbox[0] + 0.5 * bbox[2], bbox[1] + 0.5 * bbox[3]), color=color, weight='bold',
fontsize=7, ha='center', va='center', alpha=1.0)
if mask is not None:
apply_mask(img, mask, color, alpha=score * 0.5)
ax.imshow(img)
if save_path is None:
plt.show()
else:
print(save_path, file=log.v5)
fig.savefig(save_path, dpi=dpi)
plt.close(fig)
|
{"hexsha": "da715472dd6ced57105fa6b00dc405b4aaef3759", "size": 7837, "ext": "py", "lang": "Python", "max_stars_repo_path": "algorithms/TrackR-CNN/forwarding/RecurrentDetectionForwarder.py", "max_stars_repo_name": "Diego-Barbulo/TrackR-CNN", "max_stars_repo_head_hexsha": "12eeaca0bd1903ee28822bd2341456404b6e0af4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-25T08:57:52.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T08:57:52.000Z", "max_issues_repo_path": "algorithms/TrackR-CNN/forwarding/RecurrentDetectionForwarder.py", "max_issues_repo_name": "Diego-Barbulo/TrackR-CNN", "max_issues_repo_head_hexsha": "12eeaca0bd1903ee28822bd2341456404b6e0af4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "algorithms/TrackR-CNN/forwarding/RecurrentDetectionForwarder.py", "max_forks_repo_name": "Diego-Barbulo/TrackR-CNN", "max_forks_repo_head_hexsha": "12eeaca0bd1903ee28822bd2341456404b6e0af4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-25T08:58:19.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T08:58:19.000Z", "avg_line_length": 44.0280898876, "max_line_length": 120, "alphanum_fraction": 0.6793415848, "include": true, "reason": "import numpy", "num_tokens": 2074}
|
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 30 10:07:52 2021
@author: Uday Talwar
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams['figure.dpi'] = 4800
def mandelbrotset(steps,threshold,msize,nsize,degree):
'''
The underlying function is of the form z_n+1 = z_n^k + c - z_0 (initial value) = 0.
steps = number of iterations
threshold = upper bound to estimate divergence/convergence of given C value
msize = number of rows in np.mgrid
nsize = number of columns in np.mgrid
degree = the degree/power of z_n, corresponds to k in the underlying function
'''
steps = int(steps)
threshold = int(threshold)
msize, nsize = int(msize), int(nsize)
xmin, xmax, resox = -2,1,complex(0,msize) #the x-axis bounds of the grid and column size
ymin, ymax, resoy = -1.5,1.5,complex(0,nsize) #y-axis bounds of the grid and row size
x,y = np.mgrid[xmin:xmax:resox, ymin:ymax:resoy] #initiate mgrid
c = x+1j*y #define c
z = 0 #initialize z
for j in range(steps): #iterate z as z_n+1 = (z_n)^2 + c, where z_0 = 0
z = np.where(abs(z)<threshold,z, threshold+1)**(degree) + c #computes only those elements that are < 50
req_set = (abs(z) < threshold) #filter grid for absolute values of z lower than theshold
return req_set #return filtered grid
mbrot = mandelbrotset(100,50,1000,1000,2) #run iteration
#plot iteration
plt.clf()
plt.imshow(mbrot.T, extent = [-2, 1, -1.5, 1.5])
plt.gray()
plt.show()
#some other interesting renders with varying function forms
def mandelbrotset2(steps,threshold,msize,nsize,degree):
'''Underlying function of the form z_n+1 = z_n^k + z_n^k-1 + c'''
xmin, xmax, resox = -2,1,complex(0,msize)
ymin, ymax, resoy = -1.5,1.5,complex(0,nsize)
x,y = np.mgrid[xmin:xmax:resox, ymin:ymax:resoy]
c = x+1j*y
z = c
for j in range(steps): #of the form, z_n+1 = z_n^k + z_n^k-1 + c
z = np.where(abs(z)<threshold,z, threshold+1)**degree + \
np.where(abs(z)<threshold,z, threshold+1)**(degree-1) + c
req_set = (abs(z) < threshold)
return req_set
test2 = mandelbrotset2(100,50,1000,1000,3)
plt.clf()
plt.imshow(test2.T, extent = [-2, 1, -1.5, 1.5])
plt.gray()
plt.show()
def mandelbrotset3(steps,threshold,msize,nsize,degree):
'''Underlying function of the form z_n+1 = z_n^k + z_n^k-1 + z_n^k-2 + c'''
xmin, xmax, resox = -2,1,complex(0,msize)
ymin, ymax, resoy = -1.5,1.5,complex(0,nsize)
x,y = np.mgrid[xmin:xmax:resox, ymin:ymax:resoy]
c = x+1j*y
z = c
for j in range(steps): #of the form, z_n+1 = z_n^k + z_n^k-1 + z_n^k-2 + c
z = np.where(abs(z)<threshold,z, threshold+1)**degree + \
np.where(abs(z)<threshold,z, threshold+1)**(degree-1) + \
np.where(abs(z)<threshold,z, threshold+1)**(degree-2) + c
req_set = (abs(z) < threshold)
return req_set
test3 = mandelbrotset3(100,50,1000,1000,3)
plt.clf()
plt.imshow(test3.T, extent = [-2, 1, -1.5, 1.5])
plt.gray()
plt.show()
|
{"hexsha": "c041303e8460ad1acebb8db750171f60bb2de149", "size": 3202, "ext": "py", "lang": "Python", "max_stars_repo_path": "MandelbrotSets.py", "max_stars_repo_name": "ta1war/Mandelbrot_sets", "max_stars_repo_head_hexsha": "48ede6f4faac8e890d80e3bf11fc666c097c78e1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "MandelbrotSets.py", "max_issues_repo_name": "ta1war/Mandelbrot_sets", "max_issues_repo_head_hexsha": "48ede6f4faac8e890d80e3bf11fc666c097c78e1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "MandelbrotSets.py", "max_forks_repo_name": "ta1war/Mandelbrot_sets", "max_forks_repo_head_hexsha": "48ede6f4faac8e890d80e3bf11fc666c097c78e1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.0325203252, "max_line_length": 112, "alphanum_fraction": 0.6105559026, "include": true, "reason": "import numpy", "num_tokens": 1067}
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import traceback
import types
from typing import Any, Callable, TypeVar
import jax
from jax._src.lib import xla_extension
from jax._src import util
C = TypeVar("C", bound=Callable[..., Any])
_exclude_paths = [__file__, util.__file__]
def register_exclusion(path):
_exclude_paths.append(path)
_jax_message_append = (
'The stack trace below excludes JAX-internal frames.\n'
'The preceding is the original exception that occurred, unmodified.\n'
'\n--------------------')
def path_starts_with(path, path_prefix):
path = os.path.abspath(path)
path_prefix = os.path.abspath(path_prefix)
try:
common = os.path.commonpath([path, path_prefix])
except ValueError:
# path and path_prefix are both absolute, the only case will raise a
# ValueError is different drives.
# https://docs.python.org/3/library/os.path.html#os.path.commonpath
return False
try:
return common == path_prefix or os.path.samefile(common, path_prefix)
except OSError:
# One of the paths may not exist.
return False
def include_frame(f):
return not any(path_starts_with(f.f_code.co_filename, path)
for path in _exclude_paths)
# When scanning stack traces, we might encounter frames from cpython that are
# removed from printed stack traces, such as frames from parts of importlib. We
# ignore these frames heuristically based on source and name match.
def ignore_known_hidden_frame(f):
return 'importlib._bootstrap' in f.f_code.co_filename
def add_tracebackhide_to_hidden_frames(tb):
for f, lineno in traceback.walk_tb(tb):
if not include_frame(f):
f.f_locals["__tracebackhide__"] = True
def filter_traceback(tb):
out = None
# Scan the traceback and collect relevant frames.
frames = list(traceback.walk_tb(tb))
for f, lineno in reversed(frames):
if include_frame(f):
out = types.TracebackType(out, f, f.f_lasti, lineno) # pytype: disable=wrong-arg-count
return out
def add_call_stack_frames(tb):
# Continue up the call stack.
#
# We would like to avoid stepping too far up, e.g. past the exec/eval point of
# a REPL such as IPython. To that end, we stop past the first contiguous bunch
# of module-level frames, if we reach any such frames at all. This is a
# heuristic that might stop in advance of the REPL boundary. For example, if
# the call stack includes module-level frames from the current module A, and
# the current module A was imported from within a function F elsewhere, then
# the stack trace we produce will be truncated at F's frame.
out = tb
reached_module_level = False
for f, lineno in traceback.walk_stack(tb.tb_frame):
if ignore_known_hidden_frame(f):
continue
if reached_module_level and f.f_code.co_name != '<module>':
break
if include_frame(f):
out = types.TracebackType(out, f, f.f_lasti, lineno) # pytype: disable=wrong-arg-count
if f.f_code.co_name == '<module>':
reached_module_level = True
return out
def is_reraiser_frame(f):
return (f.filename == __file__ and
f.name == 'reraise_with_filtered_traceback')
def is_under_reraiser(e):
tb = traceback.extract_stack(e.__traceback__.tb_frame)
return any(is_reraiser_frame(f) for f in tb[:-1])
def format_exception_only(e):
return ''.join(traceback.format_exception_only(type(e), e)).strip()
class UnfilteredStackTrace(Exception): pass
def running_under_ipython():
"""Returns true if we appear to be in an IPython session."""
try:
get_ipython() # type: ignore
return True
except NameError:
return False
def ipython_supports_tracebackhide():
"""Returns true if the IPython version supports __tracebackhide__."""
import IPython # type: ignore
return IPython.version_info[:2] >= (7, 17)
def filtering_mode():
mode = jax.config.jax_traceback_filtering
if mode is None or mode == "auto":
if (running_under_ipython() and ipython_supports_tracebackhide()):
mode = "tracebackhide"
else:
mode = "remove_frames"
return mode
def api_boundary(fun: C) -> C:
'''Wraps ``fun`` to form a boundary for filtering exception tracebacks.
When an exception occurs below ``fun``, this appends to it a custom
``__cause__`` that carries a filtered traceback. The traceback imitates the
stack trace of the original exception, but with JAX-internal frames removed.
This boundary annotation works in composition with itself. The topmost frame
corresponding to an ``api_boundary`` is the one below which stack traces are
filtered. In other words, if ``api_boundary(f)`` calls ``api_boundary(g)``,
directly or indirectly, the filtered stack trace provided is the same as if
``api_boundary(f)`` were to simply call ``g`` instead.
This annotation is primarily useful in wrapping functions output by JAX's
transformations. For example, consider ``g = jax.jit(f)``. When ``g`` is
called, JAX's JIT compilation machinery is invoked, which in turn calls ``f``
in order to trace and translate it. If the function ``f`` raises an exception,
the stack unwinds through JAX's JIT internals up to the original call site of
``g``. Because the function returned by ``jax.jit`` is annotated as an
``api_boundary``, such an exception is accompanied by an additional traceback
that excludes the frames specific to JAX's implementation.
'''
@util.wraps(fun)
def reraise_with_filtered_traceback(*args, **kwargs):
__tracebackhide__ = True
try:
return fun(*args, **kwargs)
except Exception as e:
mode = filtering_mode()
if is_under_reraiser(e) or mode == "off":
raise
if mode == "tracebackhide":
add_tracebackhide_to_hidden_frames(e.__traceback__)
raise
assert mode == "remove_frames", mode
filtered_tb, unfiltered, mode = None, None, None
try:
filtered_tb = filter_traceback(e.__traceback__)
msg = format_exception_only(e)
msg = f'{msg}\n\n{_jax_message_append}'
unfiltered = UnfilteredStackTrace(msg)
unfiltered.with_traceback(add_call_stack_frames(e.__traceback__))
unfiltered.__context__ = e.__context__
unfiltered.__cause__ = e.__cause__
unfiltered.__suppress_context__ = e.__suppress_context__
e.__context__ = None
e.__cause__ = unfiltered
# There seems to be no way to alter the currently raised exception's
# traceback, except via the C API. The currently raised exception
# is part of the interpreter's thread state: value `e` is a copy.
xla_extension.replace_thread_exc_traceback(filtered_tb)
raise
finally:
del filtered_tb
del unfiltered
del mode
return reraise_with_filtered_traceback
|
{"hexsha": "d9c623c15e6ae5e8a269ed1e3ae8439dce05c1bd", "size": 7312, "ext": "py", "lang": "Python", "max_stars_repo_path": "jax/_src/traceback_util.py", "max_stars_repo_name": "mariogeiger/jax", "max_stars_repo_head_hexsha": "7098088f4eb15cf750398889e4341dbc15cda1b3", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-09-27T06:33:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-08T10:03:33.000Z", "max_issues_repo_path": "jax/_src/traceback_util.py", "max_issues_repo_name": "mariogeiger/jax", "max_issues_repo_head_hexsha": "7098088f4eb15cf750398889e4341dbc15cda1b3", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2022-01-03T06:09:33.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-07T19:31:57.000Z", "max_forks_repo_path": "jax/_src/traceback_util.py", "max_forks_repo_name": "mariogeiger/jax", "max_forks_repo_head_hexsha": "7098088f4eb15cf750398889e4341dbc15cda1b3", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.4974358974, "max_line_length": 93, "alphanum_fraction": 0.7204595186, "include": true, "reason": "import jax,from jax", "num_tokens": 1767}
|
!! generate an initial model for an arbitrary-mass, isothermal C WD
!! with an isentropic He envelope on the surface.
program init_1d
use bl_types
use bl_constants_module
use bl_error_module
use extern_probin_module, only: use_eos_coulomb
use eos_module, only: eos_input_rt, eos, eos_init
use eos_type_module, only: eos_t
use network
use fundamental_constants_module, only: Gconst
use f2kcli
implicit none
integer :: i, n
character(len=128) :: params_file
real (kind=dp_t) :: temp_core, temp_base, delta
real (kind=dp_t), DIMENSION(nspec) :: xn_core, xn_he
logical :: mixed_co_wd
real (kind=dp_t), allocatable :: xzn_hse(:), xznl(:), xznr(:)
real (kind=dp_t), allocatable :: model_hse(:,:), M_enclosed(:)
real (kind=dp_t), allocatable :: cs_hse(:), s_hse(:)
real (kind=dp_t) :: rho_c, rho_c_old, mass_wd, mass_wd_old, drho_c
real (kind=dp_t) :: rho_he, rho_he_old, mass_he, mass_he_old, drho_he
real (kind=dp_t) :: slope_T, slope_xn(nspec)
real (kind=dp_t) :: A, B, dAdT, dAdrho, dBdT, dBdrho
logical :: isentropic
real (kind=dp_t) :: test
integer :: nx
! define convenient indices for the scalars
integer, parameter :: nvar = 3 + nspec
integer, parameter :: idens = 1, &
itemp = 2, &
ipres = 3, &
ispec = 4
! we'll get the composition indices from the network module
integer, save :: ihe4, ic12, io16
real (kind=dp_t), save :: xmin, xmax, dCoord
real (kind=dp_t) :: dens_zone, temp_zone, pres_zone, entropy
real (kind=dp_t) :: dpd, dpt, dsd, dst
real (kind=dp_t) :: p_want, drho, dtemp, delx
real (kind=dp_t) :: entropy_base
real (kind=dp_t) :: g_zone
! TOL_HSE is the tolerance used when iterating over a zone to force
! it into HSE by adjusting the current density (and possibly
! temperature). TOL_HSE should be very small (~ 1.e-10).
real (kind=dp_t), parameter :: TOL_HSE = 1.d-10
! TOL_WD_MASS is tolerance used for getting the total WD mass equal
! to M_tot (defined below). It can be reasonably small, since there
! will always be a central density value that can give the desired
! WD mass on the grid we use
real (kind=dp_t), parameter :: TOL_WD_MASS = 1.d-4
! TOL_HE_MASS is the tolerance used for getting the mass of the He
! envelope. This should not be too small, since the values of the
! He envelope mass we can achieve will depend on our grid spacing.
real (kind=dp_t), parameter :: TOL_HE_MASS = 2.d-2
integer, parameter :: MAX_ITER = 250
integer :: iter, iter_mass
integer :: icutoff, ihe_layer, ihe_entropy
logical :: converged_hse, fluff, mass_converged
real (kind=dp_t), dimension(nspec) :: xn
real (kind=dp_t) :: low_density_cutoff, temp_fluff, smallx, smallt
real (kind=dp_t) :: M_tot, M_He
real (kind=dp_t) :: solar_mass = 1.98892d33
character (len=256) :: outfile
character (len=8) num, mass_wd_str, mass_he_str
real (kind=dp_t) :: max_hse_error, dpdr, rhog
integer :: narg
type (eos_t) :: eos_state
namelist /params/ nx, M_tot, M_He, delta, xmin, xmax, &
temp_core, temp_base, mixed_co_wd, low_density_cutoff, temp_fluff, smallt
! determine if we specified a runtime parameters file or use the default
narg = command_argument_count()
if (narg == 0) then
params_file = "_params"
else
call get_command_argument(1, value = params_file)
endif
! define the defaults parameters for this model
nx = 2560
M_tot = 0.6
M_He = 0.2
delta = 1.d-6
xmin = 0_dp_t
xmax = 1.6e9_dp_t
temp_core = 1.d7
temp_base = 4.d8
mixed_co_wd = .true.
low_density_cutoff =1.d-4
temp_fluff = 1.d5
smallt = 1.d5
! check the namelist for any changed parameters
open(unit=11, file=trim(params_file), status="old", action="read")
read(unit=11, nml=params)
close(unit=11)
! convert the envelope and WD mass into solar masses
M_tot = M_tot * solar_mass
M_He = M_He * solar_mass
! this comes in via extern_probin_module, override the default if desired.
use_eos_coulomb = .true.
smallx = 1.d-10
! initialize the EOS and network
call eos_init()
call network_init()
! get the species indices
ihe4 = network_species_index("helium-4")
ic12 = network_species_index("carbon-12")
io16 = network_species_index("oxygen-16")
if (ihe4 < 0 .or. ic12 < 0 .or. io16 < 0) then
call bl_error("ERROR: species not defined")
endif
if (mixed_co_wd) then
xn_core(:) = smallx
xn_core(ic12) = 0.5_dp_t - 0.5*(nspec - 1)*smallx
xn_core(io16) = 0.5_dp_t - 0.5*(nspec - 1)*smallx
else
xn_core(:) = smallx
xn_core(ic12) = 1.0_dp_t - (nspec - 1)*smallx
endif
xn_he(:) = smallx
xn_he(ihe4) = 1.0_dp_t - (nspec - 1)*smallx
!----------------------------------------------------------------------------
! Create a 1-d uniform grid that is identical to the mesh that we are
! mapping onto, and then we want to force it into HSE on that mesh.
!----------------------------------------------------------------------------
! allocate storage
allocate(xzn_hse(nx))
allocate(xznl(nx))
allocate(xznr(nx))
allocate(model_hse(nx,nvar))
allocate(M_enclosed(nx))
allocate(cs_hse(nx))
allocate(s_hse(nx))
! compute the coordinates of the new gridded function
dCoord = (xmax - xmin) / dble(nx)
do i = 1, nx
xznl(i) = xmin + (dble(i) - 1.0_dp_t)*dCoord
xznr(i) = xmin + (dble(i))*dCoord
xzn_hse(i) = 0.5_dp_t*(xznl(i) + xznr(i))
enddo
! We don't know what WD central density will give the desired total
! mass, so we need to iterate over central density
! we will do a secant iteration. rho_c_old is the 'old' guess for
! the central density and rho_c is the current guess. After 2
! loops, we can start estimating the density required to yield our
! desired mass
rho_c_old = -1.0_dp_t
rho_c = 1.e9_dp_t ! 1.e9 is a reasonable starting WD central density
! rho_he_old is the old guess for the density to transition to He,
! where we will be isentropic, and rho_he is the currrent guess.
rho_he_old = -1.0_dp_t
rho_he = 0.5*rho_c
mass_converged = .false.
do iter_mass = 1, MAX_ITER
print *, 'mass iter = ', iter_mass, rho_c, temp_core
fluff = .false.
! we start at the center of the WD and integrate outward. Initialize
! the central conditions.
eos_state%T = temp_core
eos_state%rho = rho_c
eos_state%xn(:) = xn_core(:)
! (t, rho) -> (p, s)
call eos(eos_input_rt, eos_state)
! make the initial guess be completely uniform
model_hse(:,idens) = eos_state%rho
model_hse(:,itemp) = eos_state%T
model_hse(:,ipres) = eos_state%p
do i = 1, nspec
model_hse(:,ispec-1+i) = eos_state%xn(i)
enddo
! keep track of the mass enclosed below the current zone
M_enclosed(1) = FOUR3RD*M_PI*(xznr(1)**3 - xznl(1)**3)*model_hse(1,idens)
ihe_layer = -1
ihe_entropy = -1
!-------------------------------------------------------------------------
! HSE + entropy solve
!-------------------------------------------------------------------------
do i = 2, nx
delx = xzn_hse(i) - xzn_hse(i-1)
! as the initial guess for the density, use the previous zone
dens_zone = model_hse(i-1,idens)
if (dens_zone > rho_he) then
temp_zone = temp_core
xn(:) = xn_core(:)
isentropic = .false.
else
if (ihe_layer == -1) then
ihe_layer = i
endif
! determine whether we are starting the ramp up. We will
! use a tanh profile, centered at (xzn_hse(ihe_layer) +
! FOUR*delta). The "+ FOUR*delta" enables us to capture
! the leading edge of the profile. Since rho_he is
! computed by considering the integral of He on the grid,
! shifting the profile by FOUR*delta doesn't affect the
! overall mass.
test = HALF*(ONE + tanh((xzn_hse(i) - xzn_hse(ihe_layer) - FOUR*delta)/delta))
if (test < 0.999d0) then
! small tanh ramp up regime
xn(:) = xn_core(:) + HALF*(xn_he(:) - xn_core(:))* &
(ONE + tanh((xzn_hse(i) - xzn_hse(ihe_layer) - FOUR*delta)/delta))
temp_zone = temp_core + HALF*(temp_base - temp_core)* &
(ONE + tanh((xzn_hse(i) - xzn_hse(ihe_layer) - FOUR*delta)/delta))
isentropic = .false.
else
! fully isentropic
if (ihe_entropy == -1) then
ihe_entropy = i
temp_zone = temp_base
isentropic = .false.
else
temp_zone = model_hse(i-1,itemp)
isentropic = .true.
endif
xn(:) = xn_he(:)
endif
endif
g_zone = -Gconst*M_enclosed(i-1)/(xznl(i)*xznl(i))
!----------------------------------------------------------------------
! iteration loop
!----------------------------------------------------------------------
! start off the Newton loop by saying that the zone has not converged
converged_hse = .FALSE.
if (.not. fluff) then
do iter = 1, MAX_ITER
if (isentropic) then
p_want = model_hse(i-1,ipres) + &
delx*0.5_dp_t*(dens_zone + model_hse(i-1,idens))*g_zone
! now we have two functions to zero:
! A = p_want - p(rho,T)
! B = entropy_base - s(rho,T)
! We use a two dimensional Taylor expansion and find the deltas
! for both density and temperature
eos_state%T = temp_zone
eos_state%rho = dens_zone
eos_state%xn(:) = xn(:)
! (t, rho) -> (p, s)
call eos(eos_input_rt, eos_state)
entropy = eos_state%s
pres_zone = eos_state%p
dpt = eos_state%dpdt
dpd = eos_state%dpdr
dst = eos_state%dsdt
dsd = eos_state%dsdr
A = p_want - pres_zone
B = entropy_base - entropy
dAdT = -dpt
dAdrho = 0.5d0*delx*g_zone - dpd
dBdT = -dst
dBdrho = -dsd
dtemp = (B - (dBdrho/dAdrho)*A)/ &
((dBdrho/dAdrho)*dAdT - dBdT)
drho = -(A + dAdT*dtemp)/dAdrho
dens_zone = max(0.9_dp_t*dens_zone, &
min(dens_zone + drho, 1.1_dp_t*dens_zone))
temp_zone = max(0.9_dp_t*temp_zone, &
min(temp_zone + dtemp, 1.1_dp_t*temp_zone))
! check if the density falls below our minimum
! cut-off -- if so, floor it
if (dens_zone < low_density_cutoff) then
dens_zone = low_density_cutoff
temp_zone = temp_fluff
converged_hse = .TRUE.
fluff = .TRUE.
exit
endif
if ( abs(drho) < TOL_HSE*dens_zone .and. &
abs(dtemp) < TOL_HSE*temp_zone) then
converged_hse = .TRUE.
exit
endif
else
! the core is isothermal, so we just need to constrain
! the density and pressure to agree with the EOS and HSE
! We difference HSE about the interface between the current
! zone and the one just inside.
p_want = model_hse(i-1,ipres) + &
delx*0.5*(dens_zone + model_hse(i-1,idens))*g_zone
eos_state%T = temp_zone
eos_state%rho = dens_zone
eos_state%xn(:) = xn(:)
! (t, rho) -> (p, s)
call eos(eos_input_rt, eos_state)
entropy = eos_state%s
pres_zone = eos_state%p
dpd = eos_state%dpdr
drho = (p_want - pres_zone)/(dpd - 0.5*delx*g_zone)
dens_zone = max(0.9*dens_zone, &
min(dens_zone + drho, 1.1*dens_zone))
if (abs(drho) < TOL_HSE*dens_zone) then
converged_hse = .TRUE.
exit
endif
if (dens_zone < low_density_cutoff) then
icutoff = i
dens_zone = low_density_cutoff
temp_zone = temp_fluff
converged_hse = .TRUE.
fluff = .TRUE.
exit
endif
endif
if (temp_zone < temp_fluff .and. isentropic) then
temp_zone = temp_fluff
isentropic = .false.
endif
enddo
if (.NOT. converged_hse) then
print *, 'Error zone', i, ' did not converge in init_1d'
print *, dens_zone, temp_zone
print *, p_want
print *, drho
call bl_error('Error: HSE non-convergence')
endif
else
dens_zone = low_density_cutoff
temp_zone = temp_fluff
endif
! call the EOS one more time for this zone and then go on
! to the next
eos_state%T = temp_zone
eos_state%rho = dens_zone
eos_state%xn(:) = xn(:)
! (t, rho) -> (p, s)
call eos(eos_input_rt, eos_state)
pres_zone = eos_state%p
! determine the entropy that we want to constrain to, if
! this is the first zone of the He layer
if (i == ihe_entropy) then
entropy_base = entropy
endif
! update the thermodynamics in this zone
model_hse(i,idens) = dens_zone
model_hse(i,itemp) = temp_zone
model_hse(i,ipres) = pres_zone
model_hse(i,ispec:ispec-1+nspec) = xn(:)
M_enclosed(i) = M_enclosed(i-1) + &
FOUR3RD*M_PI*(xznr(i) - xznl(i))* &
(xznr(i)**2 +xznl(i)*xznr(i) + xznl(i)**2)*model_hse(i,idens)
cs_hse(i) = eos_state%cs
s_hse(i) = eos_state%s
enddo ! end loop over zones
! compute the total mass of the He layer and C/O WD
mass_he = FOUR3RD*M_PI*(xznr(1)**3 - xznl(1)**3)* &
model_hse(1,idens)*model_hse(1,ispec-1+ihe4)
mass_wd = FOUR3RD*M_PI*(xznr(1)**3 - xznl(1)**3)*model_hse(1,idens)* &
(model_hse(1,ispec-1+ic12) + model_hse(1,ispec-1+io16))
do i = 2, icutoff
mass_he = mass_he + &
FOUR3RD*M_PI*(xznr(i) - xznl(i))* &
(xznr(i)**2 +xznl(i)*xznr(i) + xznl(i)**2)*model_hse(i,idens)* &
model_hse(i,ispec-1+ihe4)
mass_wd = mass_wd + &
FOUR3RD*M_PI*(xznr(i) - xznl(i))* &
(xznr(i)**2 +xznl(i)*xznr(i) + xznl(i)**2)*model_hse(i,idens)* &
(model_hse(i,ispec-1+ic12) + model_hse(i,ispec-1+io16))
enddo
if (rho_c_old < 0.0_dp_t) then
! not enough iterations yet -- store the old central density and
! mass and pick a new value
rho_c_old = rho_c
mass_wd_old = mass_wd
rho_he_old = rho_he
mass_he_old = mass_he
rho_c = 0.5*rho_c_old
rho_he = 0.5*rho_he_old
else
! have we converged
if ( abs(mass_wd - M_tot)/M_tot < TOL_WD_MASS .and. &
abs(mass_he - M_He)/M_He < TOL_HE_MASS) then
mass_converged = .true.
exit
endif
! do a secant iteration:
! M_tot = M(rho_c) + dM/drho |_rho_c x drho + ...
drho_c = (M_tot - mass_wd)/ &
( (mass_wd - mass_wd_old)/(rho_c - rho_c_old) )
rho_c_old = rho_c
mass_wd_old = mass_wd
rho_c = min(1.1_dp_t*rho_c_old, &
max((rho_c + drho_c), 0.9_dp_t*rho_c_old))
drho_he = (M_He - mass_he)/ &
( (mass_he - mass_he_old)/(rho_he - rho_he_old) )
rho_he_old = rho_he
mass_he_old = mass_he
rho_he = min(1.1_dp_t*rho_he_old, &
max((rho_he + drho_he), 0.9_dp_t*rho_he_old))
print *, 'current mass = ', mass_wd/solar_mass, mass_he/solar_mass
endif
enddo ! end mass constraint loop
if (.not. mass_converged) then
print *, 'ERROR: WD mass did not converge'
call bl_error("ERROR: mass did not converge")
endif
print *, 'final masses: '
print *, ' mass WD: ', mass_wd/solar_mass
print *, ' mass He: ', mass_He/solar_mass
print *, ihe_layer
! store the model
write(num,'(i8)') nx
write(mass_wd_str,'(f4.2)') mass_wd/solar_mass
write(mass_he_str,'(f5.3)') mass_He/solar_mass
!if (mass_He/solar_mass > 0.01) then
! write(mass_he_str,'(f4.2)') mass_He/solar_mass
!else
! write(mass_he_str,'(f6.4)') mass_He/solar_mass
!endif
if (mixed_co_wd) then
outfile = "sub_chandra.M_WD-" // trim(adjustl(mass_wd_str)) // &
".M_He-" // trim(adjustl(mass_he_str)) // &
".hse.CO." // trim(adjustl(num))
else
outfile = "sub_chandra.M_WD-" // trim(adjustl(mass_wd_str)) // &
".M_He-" // trim(adjustl(mass_he_str)) // &
".hse.C." // trim(adjustl(num))
endif
open (unit=50, file=outfile, status="unknown")
write (50,1001) "# npts = ", nx
write (50,1001) "# num of variables = ", nvar
write (50,1002) "# density"
write (50,1002) "# temperature"
write (50,1002) "# pressure"
do n = 1, nspec
write (50,1003) "# ", spec_names(n)
enddo
1000 format (1x, 12(g26.16, 1x))
1001 format(a, i5)
1002 format(a)
1003 format(a,a)
do i = 1, nx
write (50,1000) xzn_hse(i), model_hse(i,idens), model_hse(i,itemp), model_hse(i,ipres), &
(model_hse(i,ispec-1+n), n=1,nspec)
enddo
close (50)
! extra info
if (mixed_co_wd) then
outfile = "sub_chandra.M_WD-" // trim(adjustl(mass_wd_str)) // &
".M_He-" // trim(adjustl(mass_he_str)) // &
".extras.CO." // trim(adjustl(num))
else
outfile = "sub_chandra.M_WD-" // trim(adjustl(mass_wd_str)) // &
".M_He-" // trim(adjustl(mass_he_str)) // &
".extras.C." // trim(adjustl(num))
endif
open (unit=51, file=outfile, status="unknown")
write (51,1001) "# npts = ", nx
write (51,1002) "# cs"
write (51,1002) "# entropy"
do i = 1, nx
write (51,1000) xzn_hse(i), cs_hse(i), s_hse(i)
enddo
close (51)
! compute the maximum HSE error
max_hse_error = -1.d30
do i = 2, nx-1
g_zone = -Gconst*M_enclosed(i-1)/xznr(i-1)**2
dpdr = (model_hse(i,ipres) - model_hse(i-1,ipres))/delx
rhog = HALF*(model_hse(i,idens) + model_hse(i-1,idens))*g_zone
print *, xzn_hse(i), g_zone*xzn_hse(i)
if (dpdr /= ZERO .and. model_hse(i+1,idens) > low_density_cutoff) then
max_hse_error = max(max_hse_error, abs(dpdr - rhog)/abs(dpdr))
endif
enddo
print *, 'maximum HSE error = ', max_hse_error
print *, ' '
end program init_1d
|
{"hexsha": "17aef7d2babb65f4622b3eff1e5cf2dd26099341", "size": 19522, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "Util/initial_models/sub_chandra/init_1d.f90", "max_stars_repo_name": "sailoridy/MAESTRO", "max_stars_repo_head_hexsha": "f957d148d2028324a2a1076be244f73dad63fd67", "max_stars_repo_licenses": ["BSD-3-Clause-LBNL"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2017-05-15T15:28:56.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-09T08:13:32.000Z", "max_issues_repo_path": "Util/initial_models/sub_chandra/init_1d.f90", "max_issues_repo_name": "sailoridy/MAESTRO", "max_issues_repo_head_hexsha": "f957d148d2028324a2a1076be244f73dad63fd67", "max_issues_repo_licenses": ["BSD-3-Clause-LBNL"], "max_issues_count": 17, "max_issues_repo_issues_event_min_datetime": "2017-06-14T23:05:00.000Z", "max_issues_repo_issues_event_max_datetime": "2018-11-28T16:40:42.000Z", "max_forks_repo_path": "Util/initial_models/sub_chandra/init_1d.f90", "max_forks_repo_name": "sailoridy/MAESTRO", "max_forks_repo_head_hexsha": "f957d148d2028324a2a1076be244f73dad63fd67", "max_forks_repo_licenses": ["BSD-3-Clause-LBNL"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2017-06-14T14:52:09.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-04T07:16:09.000Z", "avg_line_length": 29.3123123123, "max_line_length": 94, "alphanum_fraction": 0.5496363078, "num_tokens": 5809}
|
## The inviscid Burgers' Equation - A different approach
#### Problem Setup
Setting $u(x,t) = \frac{x}{1+t}$, we expect $\nu = 0$ as a parameter.
Then $u_0(x) := u(x,0) = x$.
Using the forward Euler scheme, the equation can be re-written as:
and setting the factor $u_{n-1} = \mu_{n-1}$ (analogously to the previous subchapter this is the mean of $u_{n-1}$) to deal with the non-linearity:
Consider $u_{n-1}$ to be a Gaussian process.
$u_{n-1} \sim \mathcal{GP}(0, k_{uu}(x_i, x_j; \theta, l))$
And the linear operator:
$\mathcal{L}_x^\nu = \cdot + \tau \nu \frac{d}{dx}\cdot - \tau \mu_{n-1} \frac{d}{dx} \cdot$
so that
$\mathcal{L}_x^\nu u_{n-1} = u_n$
Problem at hand: Estimate $\nu$ (should be $\nu = 0$ in the end).
For the sake of simplicity, take $u := u_{n-1}$ and $f := u_n$.
#### Step 1: Simulate data
Take data points at $t = 0$ for $(u_{n-1})$ and $t = \tau$ for $(u_n)$, where $\tau$ is the time step.
```python
import numpy as np
import sympy as sp
from scipy.optimize import minimize
import matplotlib.pyplot as plt
import warnings
import time
```
$x \in [0, 1], \; t \in \{0, \tau \}$
```python
tau = 0.001
def get_simulated_data(tau, n=20):
x = np.random.rand(n)
y_u = x
y_f = x/(1+tau)
return (x, y_u, y_f)
(x, y_u, y_f) = get_simulated_data(tau)
```
```python
def show_1(x, y_u, y_f):
f, (ax1, ax2) = plt.subplots(ncols=2, nrows=1, sharey=True, figsize=(10,3))
f.suptitle("Data for u(x) in red and f(x) in blue")
ax1.plot(x, y_u, 'ro')
ax1.set(xlabel= r"x", ylabel= "u(x)")
ax2.plot(x, y_f, 'bo')
ax2.set(xlabel= r"x", ylabel= "f(x)");
```
```python
show_1(x, y_u, y_f)
```
#### Step 2: Evaluate kernels
1) $k_{uu}(x_i, x_j; \theta, l) = \theta exp(-\frac{1}{2l}(x_i-x_j)^2)$
```python
x_i, x_j, theta, l, nu = sp.symbols('x_i x_j theta l nu')
mu = np.mean(y_u)
kuu_sym = theta*sp.exp(-1/(2*l)*((x_i - x_j)**2))
kuu_fn = sp.lambdify((x_i, x_j, theta, l), kuu_sym, "numpy")
def kuu(x, theta, l):
k = np.zeros((x.size, x.size))
for i in range(x.size):
for j in range(x.size):
k[i,j] = kuu_fn(x[i], x[j], theta, l)
return k
```
2) $k_{ff}(x_i,x_j;\theta, l,\nu)
= \mathcal{L}_{x_i}^\nu \mathcal{L}_{x_j}^\nu k_{uu}(x_i, x_j; \theta, l) \\
= k_{uu} + \tau \nu \frac{d}{dx_i}k_{uu} - \tau \mu_{n-1} \frac{d}{dx_i}k_{uu} + \tau \nu \frac{d}{dx_j}k_{uu} + \tau^2 \nu^2 \frac{d}{dx_i} \frac{d}{dx_j}k_{uu} - \tau^2 \nu \mu_{n-1}\frac{d^2}{dx_i dx_j} k_{uu} - \tau \mu_{n-1} \frac{d}{dx_j}k_{uu} - \tau^2 \nu \mu_{n-1} \frac{d^2}{dx_i dx_j} k_{uu} + \tau^2 \mu_{n-1}^2 \frac{d^2}{dx_i dx_j}k_{uu}$
```python
kff_sym = kuu_sym \
+ tau*nu*sp.diff(kuu_sym, x_i) \
- tau*mu*sp.diff(kuu_sym, x_i) \
+ tau*nu*sp.diff(kuu_sym, x_j) \
+ tau**2*nu**2*sp.diff(kuu_sym, x_j, x_i) \
- tau**2*nu*mu*sp.diff(kuu_sym, x_j, x_i) \
- tau*mu*sp.diff(kuu_sym, x_j) \
- tau**2*nu*mu*sp.diff(kuu_sym, x_j, x_i) \
+ tau**2*mu**2*sp.diff(kuu_sym, x_j, x_i)
kff_fn = sp.lambdify((x_i, x_j, theta, l, nu), kff_sym, "numpy")
def kff(x, theta, l, nu):
k = np.zeros((x.size, x.size))
for i in range(x.size):
for j in range(x.size):
k[i,j] = kff_fn(x[i], x[j], theta, l, nu)
return k
```
3) $k_{fu}(x_i,x_j;\theta,l,\nu)
= \mathcal{L}_{x_i}^\nu k_{uu}(x_i, x_j; \theta, l) \\
= k_{uu} + \tau \nu \frac{d}{dx_i}k_{uu} - \tau \mu_{n-1}\frac{d}{dx_i}k_{uu}$
```python
kfu_sym = kuu_sym + tau*nu*sp.diff(kuu_sym, x_i) - tau*mu*sp.diff(kuu_sym, x_i)
kfu_fn = sp.lambdify((x_i, x_j, theta, l, nu), kfu_sym, "numpy")
def kfu(x, theta, l, nu):
k = np.zeros((x.size, x.size))
for i in range(x.size):
for j in range(x.size):
k[i,j] = kfu_fn(x[i], x[j], theta, l, nu)
return k
```
4) $k_{uf}(x_i,x_j;\theta, l, \nu)$ is given by the transpose of $k_{fu}(x_i,x_j;\theta, l, \nu)$.
```python
def kuf(x, theta, l, nu):
return kfu(x,theta, l, nu).T
```
#### Steps 3 and 4: Compute NLML and optimize the hyperparameters
```python
def nlml(params, x, y1, y2, s):
theta_exp = np.exp(params[0])
l_exp = np.exp(params[1])
K = np.block([
[kuu(x, theta_exp, l_exp) + s*np.identity(x.size), kuf(x, theta_exp, l_exp, params[2])],
[kfu(x, theta_exp, l_exp, params[2]), kff(x, theta_exp, l_exp, params[2]) + s*np.identity(x.size)]
])
y = np.concatenate((y1, y2))
val = 0.5*(np.log(abs(np.linalg.det(K))) + np.mat(y) * np.linalg.inv(K) * np.mat(y).T)
return val.item(0)
```
```python
m = minimize(nlml, np.random.rand(3), args=(x, y_u, y_f, 1e-7), method=\
"Nelder-Mead", options = {'maxiter' : 1000})
```
```python
m.x[2] # This is our inferred value for \nu
```
0.00021347647791778476
```python
m
```
final_simplex: (array([[6.98888116e+00, 8.08598514e+00, 2.13476478e-04],
[6.98887122e+00, 8.08597556e+00, 2.13481534e-04],
[6.98882034e+00, 8.08592923e+00, 2.13858738e-04],
[6.98884831e+00, 8.08595542e+00, 2.13548057e-04]]), array([-291.94040781, -291.94038991, -291.94038888, -291.940384 ]))
fun: -291.9404078103601
message: 'Optimization terminated successfully.'
nfev: 209
nit: 106
status: 0
success: True
x: array([6.98888116e+00, 8.08598514e+00, 2.13476478e-04])
#### Step 5: Analysis w.r.t. the number of data points (up to 25):
In this section we want to analyze the error of our algorithm and plot its time complexity.
```python
res = np.zeros((5,25))
timing = np.zeros((5,25))
warnings.filterwarnings("ignore")
for k in range(5):
for n in range(25):
start_time = time.time()
(x, y_u, y_f) = get_simulated_data(tau, n)
m = minimize(nlml, np.random.rand(3), args=(x, y_u, y_f, 1e-7), method="Nelder-Mead")
res[k][n] = m.x[2]
timing[k][n] = time.time() - start_time
```
**1. Plotting the error in our estimate:**
The error is given by $| \nu_{estimate} - \nu_{true} |$.
```python
lin = np.linspace(1, res.shape[1], res.shape[1])
for i in range(res.shape[0]):
plt.plot(lin, np.abs(res[i,:]))
plt.ylabel('Error')
plt.xlabel('Number of data points')
plt.show()
```
We plot the error with respect to the number of data samples for five runs of the program:
```python
%matplotlib inline
import matplotlib.ticker as ticker
# Setting global figsize to (5,3)
import matplotlib
matplotlib.rcParams["figure.figsize"] = (5,3)
lin = np.linspace(5, res.shape[1] - 1, res.shape[1] - 5)
def show_2(lin, res):
plt.figure(figsize=(5,3))
to_del = np.linspace(0, 4, 5)
ax = plt.figure().gca()
ax.xaxis.set_major_locator(ticker.MaxNLocator(integer=True))
plt.ylabel('Error')
plt.xlabel('Number of data points')
plt.suptitle(r'Error in our estimate for $\nu$')
for i in range(res.shape[0]):
a_i = np.delete(np.abs(res[i,:]), to_del)
plt.plot(lin, a_i)
est = np.repeat(0.16, len(lin))
plt.plot(lin, est, color='blue', linestyle='dashed')
plt.show()
```
```python
show_2(lin, res)
```
We see that for n sufficiently large (in this case $n \geq 8$), we can assume the error to be bounded by 0.16.
**2. Plotting the error between the solution and the approximative solution:**
**3. Plotting the execution time:**
The blue-dashed line follows $f(x) = 0.032 x^2$.
```python
lin = np.linspace(1, timing.shape[1], timing.shape[1])
for i in range(timing.shape[0]):
plt.plot(lin, timing[i,:])
plt.ylabel('Execution time in seconds')
plt.xlabel('Number of data points')
plt.show()
```
```python
lin = np.linspace(1, timing.shape[1], timing.shape[1])
def show_3(lin, timing):
plt.suptitle('Execution time of the algorithm')
plt.ylabel('Seconds')
plt.xlabel('Number of data points')
for i in range(timing.shape[0]):
plt.plot(lin, timing[i,:])
est = 0.032*lin**(2)
plt.plot(lin, est, color='blue', linestyle='dashed')
plt.show()
```
```python
show_3(lin, timing)
```
We observe a time complexity of roughly $\mathcal{O}(n^2)$ (blue-dashed line).
#### Comparison with a no-mean version
$\tau \nu \frac{d^2}{dx^2}u_{n-1} - \tau x \frac{d}{dx}u_{n-1} + u_{n-1} = u_{n}$
The linear operator looks just slightly different:
$\mathcal{L}_x^\nu = \cdot + \tau \nu \frac{d}{dx}\cdot - \tau x \frac{d}{dx} \cdot$
so that
$\mathcal{L}_x^\nu u_{n-1} = u_n$.
Our kernels evaluate to:
1) $k_{uu}(x_i, x_j; \theta, l) = \theta exp(-\frac{1}{2l}(x_i-x_j)^2)$
2) $k_{ff}(x_i,x_j;\theta, l,\nu)
= \mathcal{L}_{x_i}^\nu \mathcal{L}_{x_j}^\nu k_{uu}(x_i, x_j; \theta, l) \\
= k_{uu} + \tau \nu \frac{d}{dx_i}k_{uu} - \tau x \frac{d}{dx_i}k_{uu} + \tau \nu \frac{d}{dx_j}k_{uu} + \tau^2 \nu^2 \frac{d}{dx_i} \frac{d}{dx_j}k_{uu} - \tau^2 \nu x\frac{d^2}{dx_i dx_j} k_{uu} - \tau x \frac{d}{dx_j}k_{uu} - \tau^2 \nu x \frac{d^2}{dx_i dx_j} k_{uu} + \tau^2 x^2 \frac{d^2}{dx_i dx_j}k_{uu}$
3) $k_{fu}(x_i,x_j;\theta,l,\nu)
= \mathcal{L}_{x_i}^\nu k_{uu}(x_i, x_j; \theta, l) \\
= k_{uu} + \tau \nu \frac{d}{dx_i}k_{uu} - \tau x\frac{d}{dx_i}k_{uu}$
4) $k_{uf}(x_i,x_j;\theta, l, \nu)$ is given by the transpose of $k_{fu}(x_i,x_j;\theta, l, \nu)$.
After constructing the *nlml* we get:
```python
%matplotlib inline
import matplotlib.ticker as ticker
# Setting global figsize to (5,3)
import matplotlib
matplotlib.rcParams["figure.figsize"] = (5,3)
```
```python
x_i, x_j, theta, l, nu = sp.symbols('x_i x_j theta l nu')
kuu_sym = theta*sp.exp(-1/(2*l)*((x_i - x_j)**2))
kuu_fn = sp.lambdify((x_i, x_j, theta, l), kuu_sym, "numpy")
def kuu(x, theta, l):
k = np.zeros((x.size, x.size))
for i in range(x.size):
for j in range(x.size):
k[i,j] = kuu_fn(x[i], x[j], theta, l)
return k
```
```python
kff_sym = kuu_sym \
+ tau*nu*sp.diff(kuu_sym, x_i) \
- tau*x_i*sp.diff(kuu_sym, x_i) \
+ tau*nu*sp.diff(kuu_sym, x_j) \
+ tau**2*nu**2*sp.diff(kuu_sym, x_j, x_i) \
- tau**2*nu*x_i*sp.diff(kuu_sym, x_j, x_i) \
- tau*x_j*sp.diff(kuu_sym, x_j) \
- tau**2*nu*x_j*sp.diff(kuu_sym, x_j, x_i) \
+ tau**2*x_i*x_j*sp.diff(kuu_sym, x_j, x_i)
kff_fn = sp.lambdify((x_i, x_j, theta, l, nu), kff_sym, "numpy")
def kff(x, theta, l, nu):
k = np.zeros((x.size, x.size))
for i in range(x.size):
for j in range(x.size):
k[i,j] = kff_fn(x[i], x[j], theta, l, nu)
return k
```
```python
kfu_sym = kuu_sym + tau*nu*sp.diff(kuu_sym, x_i) - tau*x_i*sp.diff(kuu_sym, x_i)
kfu_fn = sp.lambdify((x_i, x_j, theta, l, nu), kfu_sym, "numpy")
def kfu(x, theta, l, nu):
k = np.zeros((x.size, x.size))
for i in range(x.size):
for j in range(x.size):
k[i,j] = kfu_fn(x[i], x[j], theta, l, nu)
return k
```
```python
def kuf(x, theta, l, nu):
return kfu(x,theta, l, nu).T
```
```python
def nlml(params, x, y1, y2, s):
theta_exp = np.exp(params[0])
l_exp = np.exp(params[1])
K = np.block([
[kuu(x, theta_exp, l_exp) + s*np.identity(x.size), kuf(x, theta_exp, l_exp, params[2])],
[kfu(x, theta_exp, l_exp, params[2]), kff(x, theta_exp, l_exp, params[2]) + s*np.identity(x.size)]
])
y = np.concatenate((y1, y2))
val = 0.5*(np.log(abs(np.linalg.det(K))) + np.mat(y) * np.linalg.inv(K) * np.mat(y).T)
return val.item(0)
```
```python
m = minimize(nlml, np.random.rand(3), args=(x, y_u, y_f, 1e-7), method=\
"Nelder-Mead", options = {'maxiter' : 1000})
m.x[2] # This is our prediction for \nu
```
0.00042724953581625225
We can analyze the error in multiple runs and look at the execution time:
```python
res = np.zeros((5,25))
timing = np.zeros((5,25))
warnings.filterwarnings("ignore")
for k in range(5):
for n in range(25):
start_time = time.time()
(x, y_u, y_f) = get_simulated_data(tau, n)
m = minimize(nlml, np.random.rand(3), args=(x, y_u, y_f, 1e-7), method="Nelder-Mead")
res[k][n] = m.x[2]
timing[k][n] = time.time() - start_time
```
**1. Plotting the error in our estimate for** $\nu$
The error is given by $| \nu_{estimate} - \nu_{true} |$.
```python
lin = np.linspace(1, res.shape[1], res.shape[1])
est = np.repeat(0.01, len(lin))
f, (ax1, ax2) = plt.subplots(ncols=2, nrows=2, figsize=(13,7))
ax1[0].plot(lin, np.abs(res[0,:]), color = 'green')
ax1[0].plot(lin, est, color='blue', linestyle='dashed')
ax1[0].set(xlabel= r"Number of data points", ylabel= "Error")
ax1[1].plot(lin, np.abs(res[1,:]), color = 'green')
ax1[1].plot(lin, est, color='blue', linestyle='dashed')
ax1[1].set(xlabel= r"Number of data points", ylabel= "Error")
ax2[0].plot(lin, np.abs(res[2,:]), color = 'green')
ax2[0].plot(lin, est, color='blue', linestyle='dashed')
ax2[0].set(xlabel= r"Number of data points", ylabel= "Error")
ax2[1].plot(lin, np.abs(res[3,:]), color = 'green')
ax2[1].plot(lin, est, color='blue', linestyle='dashed')
ax2[1].set(xlabel= r"Number of data points", ylabel= "Error");
```
```python
lin = np.linspace(5, res.shape[1] - 1, res.shape[1] - 5)
est = np.repeat(0.0009, len(lin))
def show_4(lin, est, res):
to_del = np.linspace(0, 4, 5)
ax = plt.figure().gca()
ax.xaxis.set_major_locator(ticker.MaxNLocator(integer=True))
plt.suptitle(r'Error in our estimate for $\nu$')
plt.ylabel('Error')
plt.xlabel('Number of data points')
for i in range(res.shape[0]):
a_i = np.delete(np.abs(res[i,:]), to_del)
plt.plot(lin, a_i)
plt.plot(lin, est, color='blue', linestyle='dashed')
```
```python
show_4(lin, est, res)
```
We see that for n sufficiently large (in this case $n \geq 5$), we can assume the error to be bounded by 0.0009. <br>
**2. Plotting the execution time:**
```python
lin = np.linspace(1, timing.shape[1], timing.shape[1])
def show_5(lin, timing):
for i in range(timing.shape[0]):
plt.plot(lin, timing[i,:])
plt.ylabel('Seconds')
plt.xlabel('Number of data points')
plt.suptitle('Execution time of our algorithm')
est = 0.032*lin**(2)
plt.plot(lin, est, color='blue', linestyle='dashed')
plt.show()
```
```python
show_5(lin, timing)
```
The time complexity seems to be as before roughly $\mathcal{O}(n^2)$.
|
{"hexsha": "60fdbe15742ecb52f4a337572bd18d0256038ee0", "size": 338458, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "burgers_equation/burgerseq_mod_fe.ipynb", "max_stars_repo_name": "ratnania/mlhiphy", "max_stars_repo_head_hexsha": "c75b5c4b5fbc557f77d234df001fe11b10681d7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2018-07-12T09:03:43.000Z", "max_stars_repo_stars_event_max_datetime": "2019-10-29T09:50:34.000Z", "max_issues_repo_path": "burgers_equation/burgerseq_mod_fe.ipynb", "max_issues_repo_name": "ratnania/mlhiphy", "max_issues_repo_head_hexsha": "c75b5c4b5fbc557f77d234df001fe11b10681d7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "burgers_equation/burgerseq_mod_fe.ipynb", "max_forks_repo_name": "ratnania/mlhiphy", "max_forks_repo_head_hexsha": "c75b5c4b5fbc557f77d234df001fe11b10681d7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2018-04-25T06:33:03.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-13T02:25:07.000Z", "avg_line_length": 296.3730297723, "max_line_length": 45276, "alphanum_fraction": 0.9228471479, "converted": true, "num_tokens": 5236}
|
import torch
import numpy as np
from models.network_emb_study import UNet_Pretrained
from tutils import trans_init, trans_args, tfilename, save_script, CSVLogger
from tutils.trainer import LearnerModule, Trainer
class Learner(LearnerModule):
def __init__(self, logger, config, *args, **kwargs):
super(Learner, self).__init__(*args, **kwargs)
self.net = UNet()
self.net_patch = UNet()
self.label_encoder = Net()
self.classifier = Net()
def forward(self, x, **kwargs):
return
def training_step(self, data, batch_idx, **kwargs):
return
def configure_optimizers(self, **kwargs):
return
def save(self, pth, **kwargs):
return
|
{"hexsha": "f200e74b06eaab531e7011130306e8983eb06276", "size": 746, "ext": "py", "lang": "Python", "max_stars_repo_path": "sc/inc/inc.py", "max_stars_repo_name": "Curli-quan/fewshot-select", "max_stars_repo_head_hexsha": "34f8ce5069ed1fbd01c1fa73a3ef264c98dadafe", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sc/inc/inc.py", "max_issues_repo_name": "Curli-quan/fewshot-select", "max_issues_repo_head_hexsha": "34f8ce5069ed1fbd01c1fa73a3ef264c98dadafe", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sc/inc/inc.py", "max_forks_repo_name": "Curli-quan/fewshot-select", "max_forks_repo_head_hexsha": "34f8ce5069ed1fbd01c1fa73a3ef264c98dadafe", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.6428571429, "max_line_length": 77, "alphanum_fraction": 0.6434316354, "include": true, "reason": "import numpy", "num_tokens": 176}
|
# Copyright 2021 The NPLinker Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# test functions
import numpy as np
from data_linking_functions import calc_correlation_matrix
def test_calc_correlation_matrix():
# Test with simple input matrices and known outcome
A = np.array([[0, 0, 0, 1], [0, 0, 1, 1], [0, 0, 1, 1], [0, 0, 0, 1], [0, 0, 0, 1]])
B = np.array([[1, 1, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1]])
M_A_B, M_A_notB, M_notA_B, M_notA_notB = calc_correlation_matrix(A, B)
assert M_A_B[1][2] == 1
assert M_A_notB[2][1] == 2
assert M_A_notB[2][2] == 1
assert M_notA_B[4][0] == 2
assert M_notA_B[1][2] == 0
assert np.max([M_A_B, M_A_notB, M_notA_B]) == 2 # in this example
assert np.max(M_notA_notB) == 3
assert np.max(M_notA_notB == np.array([[1, 2, 2, 2, 2, 3],
[0, 1, 2, 1, 1, 2],
[0, 1, 2, 1, 1, 2],
[1, 2, 2, 2, 2, 3],
[1, 2, 2, 2, 2, 3]]))
from data_linking_functions import calc_likelihood_matrix
def test_calc_likelihood_matrix():
# Test with simple input matrices and known outcome
A = np.array([[0, 0, 0, 1], [0, 0, 1, 1], [0, 0, 1, 1], [0, 0, 0, 1], [0, 0, 0, 1]])
B = np.array([[1, 1, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1]])
M_A_B, M_A_notB, M_notA_B, M_notA_notB = calc_correlation_matrix(A, B)
LBA, LBnotA, LAB, LAnotB = calc_likelihood_matrix(A, B,M_A_B, M_A_notB, M_notA_B)
assert np.max([LBA, LBnotA, LAB, LAnotB]) <= 1 # no likelihood can be > 1
assert LBA[1][2] == 0.5
assert LAB[1][2] == 1
assert LBnotA[1][2] == 0
assert LBnotA[1][0] == 1
assert LAnotB[1][0] == 1
assert LAnotB[4][4] == 1/3
assert LBA.shape == (len(A), len(B)) # must have shape len(A), len(B)
from data_linking_functions import pair_prob_hg
def test_pair_prob_hg():
# Test pair_prob with known cases
assert pair_prob_hg(1, 100, 1, 1) == 1/100
assert pair_prob_hg(1, 100, 50, 1) == 0.5
assert pair_prob_hg(1, 100, 1, 50) == 0.5
assert pair_prob_hg(1, 100, 2, 2) == 98/100 * 2/99 + 2/100 * 98/99
from data_linking_functions import hit_prob_dist
def test_hit_prob_dist():
# Testhit_prob_dist with known cases
pks = hit_prob_dist(100, 1, 1, 100)
assert np.sum(pks) > 0.99999999
assert np.sum(pks) < 1.00000001
assert pks[0][0] == 0.99**100
from data_linking_functions import permutation_unique
def test_permutation_unique():
# Test function to find unique permutations with known cases
testlist = [1, 2, 3, 4, 5]
assert len(list(permutation_unique(testlist))) == 120 # math.factorial(5)
testlist = [1, 2, 3, 4, 1]
assert len(list(permutation_unique(testlist))) == 60 # math.factorial(5)/math.factorial(2)
testlist = [1, 2, 3, 1, 2, 3]
assert len(list(permutation_unique(testlist))) == 90 # math.factorial(6)/math.factorial(2)**3
testlist = ['A', 'B','C', 'C','C']
assert len(list(permutation_unique(testlist))) == 20 # math.factorial(5)/math.factorial(3)
from data_linking_functions import pair_prob
def test_pair_prob():
# Test pair_prob function with known cases
P_str = np.ones(10)
P_str = P_str/np.sum(P_str)
XG = [0,1,2]
Ny = 3
hits= 2
assert pair_prob(P_str, XG, Ny, hits) == 0.175
P_str = np.ones(10)
P_str = P_str/np.sum(P_str)
XG = [0,1]
Ny = 2
hits= 2
assert pair_prob(P_str, XG, Ny, hits) > 2/90 # correct result here actually should be = 2/90, but there are rounding errors
assert pair_prob(P_str, XG, Ny, hits) < (2/90+0.00000001)
from data_linking_functions import link_prob
def test_link_prob():
# Test link_prob function with known cases
Nstr = 10
P_str = np.ones(Nstr)
P_str[0:2] = P_str[0:2]*0.1
P_str = P_str/np.sum(P_str)
XGS = [0,1]
Ny = 2
Nx = 2
assert link_prob(P_str, XGS, Nx, Ny, Nstr) == 2*P_str[1]**2/0.9
Ny = 3
Nx = 5
assert link_prob(P_str, XGS, Nx, Ny, Nstr) == 6*0.5*P_str[0]**2/(0.9*0.8)
|
{"hexsha": "56259635adda840009e2c8db5bd18ae878c2398e", "size": 4706, "ext": "py", "lang": "Python", "max_stars_repo_path": "prototype/nplinker/tests/test_data_linking_functions.py", "max_stars_repo_name": "louwenjjr/nplinker", "max_stars_repo_head_hexsha": "22e957d0f3326775ca5c1f850073067c6fb256d6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2018-10-24T20:33:49.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-11T03:44:14.000Z", "max_issues_repo_path": "prototype/nplinker/tests/test_data_linking_functions.py", "max_issues_repo_name": "louwenjjr/nplinker", "max_issues_repo_head_hexsha": "22e957d0f3326775ca5c1f850073067c6fb256d6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 44, "max_issues_repo_issues_event_min_datetime": "2018-10-25T19:46:26.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T20:31:06.000Z", "max_forks_repo_path": "prototype/nplinker/tests/test_data_linking_functions.py", "max_forks_repo_name": "louwenjjr/nplinker", "max_forks_repo_head_hexsha": "22e957d0f3326775ca5c1f850073067c6fb256d6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2018-10-29T15:29:41.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-10T14:36:40.000Z", "avg_line_length": 36.2, "max_line_length": 128, "alphanum_fraction": 0.6064598385, "include": true, "reason": "import numpy", "num_tokens": 1752}
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 26 13:48:21 2017
@author: david
"""
from pyspark import SparkContext
import numpy as np
from time import time
class KMeansSpark():
def __init__(self, k, epsilon=1, max_iter=10):
self.k = k # numero de clusters
self.epsilon = epsilon # tolerancia para el criterio de parada
self.max_iter = max_iter # maximo numero de iteraciones
def distance_squared(self, p, q):
# Distancia al cuadrado entre dos puntos
return np.sum((p - q)**2)
def closest_centroid(self, p):
# para un punto p devuelve el indice del centroide mas proximo a p
index = np.argmin(np.linalg.norm(self.centroids - p, axis=1))
return index
def fit(self, X): # X es un RDD de np.array
# ajusta los centroides a los datos
init_time = time()
X.persist() # solo si tenemos suficiente memoria
# se empieza con k puntos del dataset eleccionados aleatoriamente
initial_centroids = np.asarray(X.takeSample(False, self.k, 42))
self.centroids = initial_centroids
distance = np.inf
it = 0 # iteracion
while (distance > self.epsilon and it < max_iter):
# Para cada punto, encontrar el indice del centroide mas proximo
# mapearlo a (index, (point,1))
points_clusterized = X.map(lambda p: ( self.closest_centroid(p), \
(p, 1) ))
# para cada key (k-point index), hacer una agregacion
# de las coordenadas y el numero de puntos
clusters_set = points_clusterized\
.reduceByKey(lambda p1, p2: (p1[0]+p2[0], p1[1]+p2[1]) )
# para cada key (k-point index), encontrar los nuevso centroides
# calculando la media de los puntos de un mismo cluster (centroid)
new_centroids = clusters_set\
.mapValues(lambda pair: (pair[0] / pair[1]) )\
.sortBy(lambda t: t[0])\
.map(lambda pair: pair[1]).collect()
new_centroids = np.asarray(new_centroids)
# calculamos la distancia entre los nuevos centroides y los anteriores
distance = np.linalg.norm(self.centroids - new_centroids, ord=np.inf)
#Asignamos los nuevos centroides al array de centroides de la clase
self.centroids = new_centroids
it += 1
time_passed = time() - init_time
if it == max_iter:
print('Maximun number of iterations reached')
print('{} iterations terminated in {} seconds'.format(it, time_passed))
else:
print('Convergence successfull')
print('{} iterations terminated in {} seconds'.format(it, time_passed))
if __name__ == '__main__':
sc = SparkContext(appName='KMeansSpark')
k = 4
epsilon = 0.1
max_iter = 10
from_file = False
# Testarlo localmente
if not from_file:
np.random.seed(42)
D, N = 2, 150
mu0 = np.array([1, 3])
X0 = (np.random.randn(D, N) + mu0[:, np.newaxis]).T
mu1 = np.array([7, 7])
X1 = (np.random.randn(D, N) + mu1[:, np.newaxis]).T
mu2 = np.array([2, -5])
X2 = (np.random.randn(D, N) + mu2[:, np.newaxis]).T
mu3 = np.array([-5, -1])
X3 = (np.random.randn(D, N) + mu3[:, np.newaxis]).T
X = np.vstack((X0, X1, X2, X3))
X = sc.parallelize(X)
else:
number_partitions = 2
sep = " "
X = sc.textFile('/path/to/your/textfile', number_partitions)\
.map(lambda s: np.fromstring(s, dtype=np.float64, sep=sep))
k_means = KMeansSpark(k, epsilon)
k_means.fit(X)
for centroid in k_means.centroids:
print(centroid)
|
{"hexsha": "c36637f4a8fef7553b58b8808020e77aa8f2009e", "size": 3906, "ext": "py", "lang": "Python", "max_stars_repo_path": "ApacheSpark/KMeansSpark.py", "max_stars_repo_name": "davidRetana/TFG", "max_stars_repo_head_hexsha": "e04f758811c092a34a4e71cb3409a5a419e7b95a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ApacheSpark/KMeansSpark.py", "max_issues_repo_name": "davidRetana/TFG", "max_issues_repo_head_hexsha": "e04f758811c092a34a4e71cb3409a5a419e7b95a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ApacheSpark/KMeansSpark.py", "max_forks_repo_name": "davidRetana/TFG", "max_forks_repo_head_hexsha": "e04f758811c092a34a4e71cb3409a5a419e7b95a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.1666666667, "max_line_length": 83, "alphanum_fraction": 0.5663082437, "include": true, "reason": "import numpy", "num_tokens": 998}
|
from os.path import join
import numpy as np
from fury import actor, window, ui, pick
from fury.testing import assert_greater
import numpy.testing as npt
import itertools
from fury.data import DATA_DIR
def test_picking_manager():
xyz = 10 * np.random.rand(100, 3)
colors = np.random.rand(100, 4)
radii = np.random.rand(100) + 0.5
scene = window.Scene()
sphere_actor = actor.sphere(centers=xyz,
colors=colors,
radii=radii)
scene.add(sphere_actor)
showm = window.ShowManager(scene,
size=(900, 768), reset_camera=False,
order_transparent=True)
showm.initialize()
tb = ui.TextBlock2D(bold=True)
# use itertools to avoid global variables
counter = itertools.count()
pickm = pick.PickingManager()
record_indices = {'vertex_indices': [],
'face_indices': [],
'xyz': [],
'actor': []}
def timer_callback(_obj, _event):
cnt = next(counter)
tb.message = "Let's count up to 15 and exit :" + str(cnt)
showm.scene.azimuth(0.05 * cnt)
# sphere_actor.GetProperty().SetOpacity(cnt/100.)
if cnt % 10 == 0:
# pick at position
info = pickm.pick((900/2, 768/2), scene)
record_indices['vertex_indices'].append(info['vertex'])
record_indices['face_indices'].append(info['face'])
record_indices['xyz'].append(info['xyz'])
record_indices['actor'].append(info['actor'])
showm.render()
if cnt == 15:
showm.exit()
scene.add(tb)
# Run every 200 milliseconds
showm.add_timer_callback(True, 200, timer_callback)
showm.start()
assert_greater(np.sum(np.array(record_indices['vertex_indices'])), 1)
assert_greater(np.sum(np.array(record_indices['face_indices'])), 1)
for ac in record_indices['actor']:
if ac is not None:
npt.assert_equal(ac is sphere_actor, True)
assert_greater(np.sum(np.abs(np.diff(np.array(record_indices['xyz']),
axis=0))), 0)
def _get_three_cubes():
centers = 0.5 * np.array([[0, 0, 0], [100, 0, 0], [200, 0, 0.]])
colors = np.array([[0.8, 0, 0], [0, 0.8, 0], [0, 0, 0.8]])
radii = 0.1 * np.array([50, 100, 150.])
return centers, colors, radii
def test_selector_manager():
centers, colors, radii = _get_three_cubes()
scene = window.Scene()
cube_actor = actor.cube(centers, directions=(1, 0, 2),
colors=colors, scales=radii)
pts = 100 * (np.random.rand(100, 3) - 0.5) + np.array([20, 0, 0.])
pts_actor = actor.dots(pts, dot_size=10)
rgb = 255 * np.ones((400, 400, 3), dtype=np.uint8)
tex_actor = actor.texture(rgb)
scene.add(cube_actor)
scene.add(pts_actor)
scene.add(tex_actor)
showm = window.ShowManager(scene,
size=(900, 768), reset_camera=False,
order_transparent=True)
showm.initialize()
tb = ui.TextBlock2D(bold=True)
# use itertools to avoid global variables
counter = itertools.count()
selm = pick.SelectionManager(select='faces')
selm.selectable_off([tex_actor])
selm.selectable_on([tex_actor])
selm.selectable_off([tex_actor])
def timer_callback(_obj, _event):
cnt = next(counter)
tb.message = "Let's count up to 15 and exit :" + str(cnt)
if cnt % 10 == 0:
# select large area
info_plus = selm.select((900//2, 768//2), scene, (30, 30))
for info in info_plus.keys():
if info_plus[info]['actor'] in [cube_actor, pts_actor]:
npt.assert_(True)
else:
npt.assert_(False)
# select single pixel
info_ = selm.pick((900//2, 768//2), scene)
if info_['actor'] in [cube_actor, pts_actor]:
npt.assert_(True)
else:
npt.assert_(False)
showm.render()
if cnt == 15:
showm.exit()
pass
scene.add(tb)
# Run every 200 milliseconds
showm.add_timer_callback(True, 200, timer_callback)
showm.start()
def test_hover_selection_faces(recording=False):
# simply hover going through blue, green, red
recording_filename = join(DATA_DIR, 'selector_faces.log.gz')
centers, colors, radii = _get_three_cubes()
scene = window.Scene()
cube_actor = actor.cube(centers, directions=(1, 0, 0),
colors=colors, scales=radii)
scene.add(cube_actor)
selm = pick.SelectionManager(select='faces')
showm = window.ShowManager(scene,
size=(900, 768), reset_camera=False,
order_transparent=True)
showm.initialize()
global track_objects
track_objects = []
def hover_callback(_obj, _event):
global track_objects
event_pos = selm.event_position(showm.iren)
info = selm.select(event_pos, showm.scene, (10, 10))
selected_faces = info[0]['face']
if selected_faces is not None:
track_objects.append(selected_faces[0]//12)
showm.render()
showm.add_iren_callback(hover_callback)
if recording:
showm.record_events_to_file(recording_filename)
else:
showm.play_events_from_file(recording_filename)
track_objects = set(track_objects)
npt.assert_({0, 1, 2}.issubset(track_objects))
del track_objects
def test_hover_selection_vertices(recording=False):
# simply hover through blue, green, red cubes
# close to any vertices of each of the cubes
recording_filename = join(DATA_DIR, 'selector_vertices.log.gz')
centers, colors, radii = _get_three_cubes()
scene = window.Scene()
cube_actor = actor.cube(centers, directions=(1, 0, 0),
colors=colors, scales=radii)
scene.add(cube_actor)
selm = pick.SelectionManager(select='vertices')
showm = window.ShowManager(scene,
size=(900, 768), reset_camera=False,
order_transparent=True)
showm.initialize()
global track_objects2
track_objects2 = []
def hover_callback(_obj, _event):
global track_objects2
event_pos = selm.event_position(showm.iren)
info = selm.select(event_pos, showm.scene, (100, 100))
selected_triangles = info[0]['vertex']
if selected_triangles is not None:
track_objects2.append(selected_triangles[0]//8)
showm.render()
showm.add_iren_callback(hover_callback)
if recording:
showm.record_events_to_file(recording_filename)
else:
showm.play_events_from_file(recording_filename)
track_obj = set(track_objects2)
npt.assert_(track_obj.issubset({0, 1, 2}))
del track_objects2
def test_hover_selection_actors_only(recording=False):
# simply hover going through blue, green, red cubes
recording_filename = join(DATA_DIR, 'selector_actors.log.gz')
centers, colors, radii = _get_three_cubes()
scene = window.Scene()
cube_actor = actor.cube(centers, directions=(1, 0, 0),
colors=colors, scales=radii)
scene.add(cube_actor)
selm = pick.SelectionManager(select='actors')
showm = window.ShowManager(scene,
size=(900, 768), reset_camera=False,
order_transparent=True)
showm.initialize()
def hover_callback(_obj, _event):
event_pos = selm.event_position(showm.iren)
info = selm.pick(event_pos, showm.scene)
selected_actor = info['actor']
# print(id(selected_actor), id(cube_actor))
if selected_actor is not None:
npt.assert_equal(id(cube_actor), id(selected_actor))
showm.render()
showm.add_iren_callback(hover_callback)
if recording:
showm.record_events_to_file(recording_filename)
else:
showm.play_events_from_file(recording_filename)
if __name__ == "__main__":
npt.run_module_suite()
|
{"hexsha": "4402a209e446a1f3d6425b57c3f050450ece160f", "size": 8281, "ext": "py", "lang": "Python", "max_stars_repo_path": "fury/tests/test_pick.py", "max_stars_repo_name": "SunTzunami/fury", "max_stars_repo_head_hexsha": "39a28039fab8ba3070c0a7c1cdb1eed263f59971", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 149, "max_stars_repo_stars_event_min_datetime": "2018-09-20T18:36:16.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T05:16:25.000Z", "max_issues_repo_path": "fury/tests/test_pick.py", "max_issues_repo_name": "SunTzunami/fury", "max_issues_repo_head_hexsha": "39a28039fab8ba3070c0a7c1cdb1eed263f59971", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 523, "max_issues_repo_issues_event_min_datetime": "2018-09-20T16:57:16.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T18:52:41.000Z", "max_forks_repo_path": "fury/tests/test_pick.py", "max_forks_repo_name": "SunTzunami/fury", "max_forks_repo_head_hexsha": "39a28039fab8ba3070c0a7c1cdb1eed263f59971", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 150, "max_forks_repo_forks_event_min_datetime": "2018-10-10T07:21:27.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T08:33:17.000Z", "avg_line_length": 28.8536585366, "max_line_length": 73, "alphanum_fraction": 0.6030672624, "include": true, "reason": "import numpy", "num_tokens": 1987}
|
from __future__ import print_function
import os, sys
import numpy as np
from algorithms import g_span as gSpan
from algorithms import load_graphs
filepath = os.path.dirname(os.path.abspath(__file__))
def main(filename='data/exampleG.txt', min_sup=2):
filename = os.path.join(filepath, filename)
graphs = load_graphs(filename)
n = len(graphs)
extensions = []
gSpan([], graphs, min_sup=min_sup, extensions=extensions)
for i, ext in enumerate(extensions):
print('Pattern %d' % (i+1))
for _c in ext:
print(_c)
print('')
if __name__ == '__main__':
if ('--help' in sys.argv) or ('-h' in sys.argv):
print("")
print("Finds possible frequent and canonical extensions of C in D, using")
print("min_sup as lowest allowed support value.")
print("Usage: %s FILENAME minsup" % (sys.argv[0]))
print("")
print("FILENAME: Relative path of graph data file.")
print("minsup: Minimum support value.")
else:
kwargs = {}
if len(sys.argv) > 1:
kwargs['filename'] = sys.argv[1]
if len(sys.argv) > 2:
kwargs['min_sup'] = int(sys.argv[2])
if len(sys.argv) > 3:
sys.exit("Not correct arguments provided. Use %s -h for more information" % (sys.argv[0]))
main(**kwargs)
|
{"hexsha": "a9ea1515adf64157b5e25a3dd3fa4223d37097d0", "size": 1345, "ext": "py", "lang": "Python", "max_stars_repo_path": "test.py", "max_stars_repo_name": "LasseRegin/gSpan", "max_stars_repo_head_hexsha": "69df98dccf5d440723274e3c0e9c4fe335d93225", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2016-03-01T07:26:27.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-26T21:17:21.000Z", "max_issues_repo_path": "test.py", "max_issues_repo_name": "LasseRegin/gSpan", "max_issues_repo_head_hexsha": "69df98dccf5d440723274e3c0e9c4fe335d93225", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2016-11-08T16:35:01.000Z", "max_issues_repo_issues_event_max_datetime": "2018-10-05T14:44:25.000Z", "max_forks_repo_path": "test.py", "max_forks_repo_name": "LasseRegin/gSpan", "max_forks_repo_head_hexsha": "69df98dccf5d440723274e3c0e9c4fe335d93225", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 16, "max_forks_repo_forks_event_min_datetime": "2016-03-24T10:29:16.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-28T08:48:09.000Z", "avg_line_length": 35.3947368421, "max_line_length": 102, "alphanum_fraction": 0.6089219331, "include": true, "reason": "import numpy", "num_tokens": 337}
|
"""IO methods for radar data from MYRORSS.
--- DEFINITIONS ---
MYRORSS = Multi-year Reanalysis of Remotely Sensed Storms
"""
import shutil
import os.path
import numpy
from gewittergefahr.gg_io import myrorss_and_mrms_io
from gewittergefahr.gg_utils import radar_utils
from gewittergefahr.gg_utils import myrorss_and_mrms_utils
from gewittergefahr.gg_utils import unzipping
from gewittergefahr.gg_utils import time_conversion
from gewittergefahr.gg_utils import error_checking
AZIMUTHAL_RADAR_FIELD_NAMES = [
radar_utils.LOW_LEVEL_SHEAR_NAME, radar_utils.MID_LEVEL_SHEAR_NAME
]
DEFAULT_FIELDS_TO_REMOVE = [
radar_utils.ECHO_TOP_18DBZ_NAME, radar_utils.ECHO_TOP_50DBZ_NAME,
radar_utils.LOW_LEVEL_SHEAR_NAME, radar_utils.MID_LEVEL_SHEAR_NAME,
radar_utils.REFL_NAME, radar_utils.REFL_COLUMN_MAX_NAME,
radar_utils.MESH_NAME, radar_utils.REFL_0CELSIUS_NAME,
radar_utils.REFL_M10CELSIUS_NAME, radar_utils.REFL_M20CELSIUS_NAME,
radar_utils.REFL_LOWEST_ALTITUDE_NAME, radar_utils.SHI_NAME,
radar_utils.VIL_NAME
]
DEFAULT_REFL_HEIGHTS_TO_REMOVE_M_ASL = radar_utils.get_valid_heights(
data_source=radar_utils.MYRORSS_SOURCE_ID, field_name=radar_utils.REFL_NAME)
def unzip_1day_tar_file(
tar_file_name, field_names, spc_date_string, top_target_directory_name,
refl_heights_m_asl=None):
"""Unzips 1-day tar file (containing raw MYRORSS data for one SPC date).
:param tar_file_name: Path to input file.
:param field_names: 1-D list with names of radar fields.
:param spc_date_string: SPC date (format "yyyymmdd").
:param top_target_directory_name: Name of top-level directory for unzipped
MYRORSS files. This method will create a subdirectory therein for the
SPC date.
:param refl_heights_m_asl: 1-D numpy array of reflectivity heights (metres
above sea level).
:return: target_directory_name: Path to output directory.
"""
# Verification.
_ = time_conversion.spc_date_string_to_unix_sec(spc_date_string)
error_checking.assert_is_string_list(field_names)
error_checking.assert_is_numpy_array(
numpy.asarray(field_names), num_dimensions=1)
error_checking.assert_is_string(top_target_directory_name)
# Put azimuthal-shear fields (which are allowed to be missing) at the end.
# This way, if the tar command errors out due to missing data, it will do so
# after unzipping all the non-missing data.
field_names_removed = []
for this_field_name in AZIMUTHAL_RADAR_FIELD_NAMES:
if this_field_name in field_names:
field_names.remove(this_field_name)
field_names_removed.append(this_field_name)
for this_field_name in field_names_removed:
field_names.append(this_field_name)
field_to_heights_dict_m_asl = (
myrorss_and_mrms_utils.fields_and_refl_heights_to_dict(
field_names=field_names, data_source=radar_utils.MYRORSS_SOURCE_ID,
refl_heights_m_asl=refl_heights_m_asl))
target_directory_name = '{0:s}/{1:s}/{2:s}'.format(
top_target_directory_name, spc_date_string[:4], spc_date_string
)
field_names = list(field_to_heights_dict_m_asl.keys())
directory_names_to_unzip = []
for this_field_name in field_names:
these_heights_m_asl = field_to_heights_dict_m_asl[this_field_name]
for this_height_m_asl in these_heights_m_asl:
directory_names_to_unzip.append(
myrorss_and_mrms_io.get_relative_dir_for_raw_files(
field_name=this_field_name,
data_source=radar_utils.MYRORSS_SOURCE_ID,
height_m_asl=this_height_m_asl))
unzipping.unzip_tar(
tar_file_name,
target_directory_name=target_directory_name,
file_and_dir_names_to_unzip=directory_names_to_unzip)
return target_directory_name
def remove_unzipped_data_1day(
spc_date_string, top_directory_name,
field_names=DEFAULT_FIELDS_TO_REMOVE,
refl_heights_m_asl=DEFAULT_REFL_HEIGHTS_TO_REMOVE_M_ASL):
"""Removes unzipped MYRORSS data for one SPC date.
Basically, this method cleans up after unzip_1day_tar_file.
:param spc_date_string: SPC date (format "yyyymmdd").
:param top_directory_name: Name of top-level directory with unzipped MYRORSS
files. This method will find the subdirectory in `top_directory_name`
for the given SPC date.
:param field_names: 1-D list with names of radar fields. Only these will be
deleted.
:param refl_heights_m_asl: 1-D numpy array of reflectivity heights (metres
above sea level).
"""
spc_date_unix_sec = time_conversion.spc_date_string_to_unix_sec(
spc_date_string)
field_to_heights_dict_m_asl = (
myrorss_and_mrms_utils.fields_and_refl_heights_to_dict(
field_names=field_names, data_source=radar_utils.MYRORSS_SOURCE_ID,
refl_heights_m_asl=refl_heights_m_asl))
for this_field_name in list(field_to_heights_dict_m_asl.keys()):
these_heights_m_asl = field_to_heights_dict_m_asl[this_field_name]
for this_height_m_asl in these_heights_m_asl:
example_file_name = myrorss_and_mrms_io.find_raw_file(
unix_time_sec=spc_date_unix_sec,
spc_date_string=spc_date_string, field_name=this_field_name,
data_source=radar_utils.MYRORSS_SOURCE_ID,
top_directory_name=top_directory_name,
height_m_asl=this_height_m_asl,
raise_error_if_missing=False)
example_directory_name, _ = os.path.split(example_file_name)
directory_name_parts = example_directory_name.split('/')
remove_all_heights = False
if this_field_name == radar_utils.REFL_NAME:
if (set(these_heights_m_asl) ==
set(DEFAULT_REFL_HEIGHTS_TO_REMOVE_M_ASL)):
remove_all_heights = True
dir_name_to_remove = '/'.join(directory_name_parts[:-1])
else:
dir_name_to_remove = '/'.join(directory_name_parts)
else:
dir_name_to_remove = '/'.join(directory_name_parts[:-1])
if os.path.isdir(dir_name_to_remove):
print('Removing directory "{0:s}"...'.format(
dir_name_to_remove))
shutil.rmtree(dir_name_to_remove, ignore_errors=True)
if remove_all_heights:
break
|
{"hexsha": "d6353ba129825fd4d29cb725e6a9185fb7d39e5b", "size": 6522, "ext": "py", "lang": "Python", "max_stars_repo_path": "gewittergefahr/gg_io/myrorss_io.py", "max_stars_repo_name": "dopplerchase/GewitterGefahr", "max_stars_repo_head_hexsha": "4415b08dd64f37eba5b1b9e8cc5aa9af24f96593", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 26, "max_stars_repo_stars_event_min_datetime": "2018-10-04T01:07:35.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-29T08:49:32.000Z", "max_issues_repo_path": "gewittergefahr/gg_io/myrorss_io.py", "max_issues_repo_name": "liuximarcus/GewitterGefahr", "max_issues_repo_head_hexsha": "d819874d616f98a25187bfd3091073a2e6d5279e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2017-12-25T02:01:08.000Z", "max_issues_repo_issues_event_max_datetime": "2018-12-19T01:54:21.000Z", "max_forks_repo_path": "gewittergefahr/gg_io/myrorss_io.py", "max_forks_repo_name": "liuximarcus/GewitterGefahr", "max_forks_repo_head_hexsha": "d819874d616f98a25187bfd3091073a2e6d5279e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2017-12-10T23:05:29.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-29T08:49:33.000Z", "avg_line_length": 40.2592592593, "max_line_length": 80, "alphanum_fraction": 0.7252376572, "include": true, "reason": "import numpy", "num_tokens": 1589}
|
MODULE vmec_params
USE stel_kinds, ONLY: rprec, dp
USE vparams, ONLY: mpold
C-----------------------------------------------
C L o c a l P a r a m e t e r s
C-----------------------------------------------
INTEGER, PARAMETER :: meven = 0, modd = 1
INTEGER, PARAMETER :: ndamp = 10
INTEGER, PARAMETER :: ns4 = 25
INTEGER, PRIVATE :: ink
INTEGER, PARAMETER, DIMENSION(0:mpold) ::
1 jmin1 = (/ 1,1,(2,ink=2,mpold) /), !starting js(m) values where R,Z are non-zero
2 jmin2 = (/ 1,2,(2,ink=2,mpold) /), !starting js(m) values for which R,Z are evolved
3 jlam = (/ 2,2,(2,ink=2,mpold) /) !starting js(m) values for which Lambda is evolved
! Besure to update werror in fileout.f when adding more error flags.
INTEGER, PARAMETER :: norm_term_flag=0, bad_jacobian_flag=1,
1 more_iter_flag=2,
2 jac75_flag=4, input_error_flag=5,
3 phiedge_error_flag=7,
4 ns_error_flag=8,
5 misc_error_flag=9,
6 successful_term_flag=11, !ftol force criterion has been met
7 bsub_bad_js1_flag=12,
8 r01_bad_value_flag=13,
9 arz_bad_value_flag=14
INTEGER, PARAMETER :: restart_flag=1, readin_flag=2,
1 timestep_flag=4,output_flag=8,
2 cleanup_flag=16, reset_jacdt_flag=32
REAL(rprec), PARAMETER :: pdamp = 0.05_dp
CHARACTER(LEN=*), PARAMETER :: version_ = '9.0'
!-----------------------------------------------
! L o c a l V a r i a b l e s
!-----------------------------------------------
INTEGER :: ntmax, rcc, rss, rsc, rcs, zsc, zcs, zcc, zss
INTEGER :: mnyq, nnyq
INTEGER, ALLOCATABLE :: uminus(:)
REAL(rprec), ALLOCATABLE :: mscale(:), nscale(:)
REAL(rprec) :: signgs, lamscale
!-----------------------------------------------
!
! VERSION INFORMATION HISTORY
!
! 8.00
! a) added lforbal logical to fbal module to control whether to compute the flux-averaged
! force balance equation printed in the threed1 file. This requires a modification of
! the m=1,n=0 forces for R,Z in tomnsps subroutine. This works well, generally, and
! yields an improved <EQUIF> in threed1 file. However, there have been some cases where
! this non-variational departure fails to converge.
! b) added "bias" to iotas in bcovar when determining preconditioner for very small iota
! values. Seems to need this for improving convergence of preconditioned current-hole cases.
! Eliminated in v8.20.
! c) added "totzsps,a_hess" to recompute r,z,l rapidly for only those modes that are jogged during
! Hessian calculation. NOTE: need to do this for lasym=true case, as well, eventually
! 8.20 (August, 2004)
! a) removed 2-pt tie at origin for m=1 modes, to preserve tri-diagonal structure of Hessian.
! This is needed for preconditioning, which assumes block-tridi structure of equations
! b) fixed problem with free-boundary preconditioner, namely, ctor can not be extrapolated
! at edge when computing preconditioner, because this breaks tri-diagonal structure
! c) added new variables to input file to control preconditioning:
! 1) PRECON_TYPE: = 'default', default tri-di (block size = 1)
! = 'cg', block tri-di, conjugate-gradient time-stepper
! = 'gmres', " ", gmres time-stepper
! = 'tfqmr', " ", transpose free qmr
! 2) PREC2D_THRESHOLD: value of (unpreconditioned) forces at which block (2D) preconditioner
! is turned on (=0 block preconditioner never turned on); recommended
! (default) value ~ 1.E-10, or smaller, if convergence is poor
! 3) LFORBAL: logical variable (default = .true.); when true, the force balance
! used in the threed1 file is used to evolve the m=1 R,Z components. This
! is a non-variational departure from the force equations for these modes,
! but generally does not have an unfavorable impact on convergence.
! d) added new internal variable, ICTRL_PREC2D, to precon2d module. Replaces previous lprec2d
! and iequi>1 variables.
! e) removed lsweep_fast option from module precon2d. This slows the computation of the Hessian
! by about 2/3, but is more accurate (includes pdamp, liota, lforbal correctly)
! f) removed lflam logicals from bcovar and tomnsps, since now we must compute dFlam/dR,Z by
! jogging
! g) removed Compute_Hess_Flam_RZ from lamblks; this is now computed via jogging
! (also removed Get_dFlam_dRZ, FFT2Hessian, Forbal_avg, GetGsqrtVar supporting routines)
! h) removed internal liota logic, used to push iota profile rather than solving for it. Had
! needed this for symmetric Hessian (lsweep_fast=true option), but no longer required. Also,
! it was not implemented entirely correctly for lforbal=true case
! i) for lasym m=1 constraint rsc = zcc, changed xc array so that R+ = .5*(rsc + zcc) is stored at
! xc(rsc,m=1) and R- = .5*(rsc - zcc) is stored at xc(zcc,m=1). In residue, gcz(R-) == gcz(zcc)
! is zeroed by "evolving" gcr(zcc) = azd*[xc(zcc)-xcint], and gcr(rsc) => .5*[gcr(rsc) + gcz(zcc)]
! is evolved. In totzspa, the real rsc and zcc are computed from the internal representations
! (check convert call, too) by calling a new routine convert_asym (also called from wrout before
! writing out xc info). In readin, the original R+,R- are stored, so that for external "jogs",
! there will be no change in forces. All these changes are needed to obtain an invertible Hessian.
! j) added m=1 constraint for 3D case (similar to (i)), rss(n) = zcs(n), for n != 0. Imposed this
! on forces by adding routine constrain_m1 in residue. Added convert_sym routine to totzsp to convert
! from internal xc representation TO internal one.
! k) Decreased exponent on pdamp factor r2 (in bcovar) from 2 to 1, to give better conditioning
! especially for current hole cases
! l) Eliminated iotas bias for determining preconditioner, previously added in v8.00 for stabilizing
! current hole cases (not needed with corrected preconditioner)
! 8.30 (October, 2004)
! a) Implemented flags for "reverse-communication" mode of vmec
! 8.40 a) Converted the m=1 constraints for 3D and asym back to old way; did not always
! converge well with the new constraints introduced in 8.20 (i-j)
! 8.45 (December, 2005)
! a) Added the lconm1 logical. If = True, new constraint; if = False, old m=1 constraint used
! b) Added "perturbation" computation for lasym=TRUE case (totzspa_hess)
! 8.46 (June, 2009)
! a) Added LRFP logical to allow easy switching of profiles between Stellarator/tokamak (PHIP=1, LRFP=F)
! and RFP (CHIP=1, LRFP=T). When LRFP=T, AI coefficients are expansion of q = 1/iota. Added lrfp to
! LIBSTELL/vmec_input module.
! 8.47 (July, 2010)
! a) Rewrote magnetic field representation so that phip*lambda = new internal lambda. This greatly improves
! the conditioning of the lambda equations which otherwise become singular at the RFP reversal point
! 8.48 (March 2012 - JDH)
! a) Accumulated small changes from SPH & JDH
! b) Modifications from J Geiger, March 2012
! - to be able to get additional main iterations if the force tolerance is
! not met. Parameter MAX_MAIN_ITERATIONS specifies how many main iteration
! cycles should be run.
! - to get a full output in the threed1-file if the force tolerance is not
! met. Specify the logical LFULL3D1OUT to true for this.
! - if vmec2000 is compiled with netcdf, you can still get the ascii-output
! if you specify the logical LWOUTTXT as true.
! - you get the output for diagno 1.0 and 1.5 if the logical LDIAGNO set true.
! - you get a rather old fort.8-output if you specify LOLDOUT as true.
!
! If none of these new variables is set, the behavior of vmec2000 is as
! expected from the version without the changes.
! 8.49 (June, 2012)
! a) Fixed bug in bcovar when averaging half-grid covariant components onto full grid: did not
! zero components at (phantom) js=1 point, so edge force averages were incorrect
! b) Added lamscale factor to scale lambda in contravariant B-components. Modify wrout to maintain
! old-style lambda output
! c) Set lbsubs=F in jxbforce by default to capture current sheets
! d) Added lmove_axis INPUT logical (=T by default) so user can control whether or not the magnetic
! axis can be initially shifted to improve the initial force residuals. It is important NOT to move
! the helical axis for RFP equilibria requiring a helical seed (set l_moveaxis=F for this case!)
!
! 8.50 (Jan, 2013)
! a) Improved scaling of lambda forces with respect to lamscale
! b) Fixed fnorm1 scaling (remove hs dependence)
! c) Added lgiveup logical (M. Drevlak/J. Geiger)
!-----------------------------------------------
END MODULE vmec_params
|
{"hexsha": "132bd8313de4e759e17a0ec7c185b766f147da89", "size": 9942, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "VMEC2000/Sources/General/vmec_params.f", "max_stars_repo_name": "joseluisvelasco/STELLOPT", "max_stars_repo_head_hexsha": "e064ebb96414d5afc4e205f43b44766558dca2af", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 29, "max_stars_repo_stars_event_min_datetime": "2020-05-08T01:47:34.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-06T10:35:28.000Z", "max_issues_repo_path": "VMEC2000/Sources/General/vmec_params.f", "max_issues_repo_name": "joseluisvelasco/STELLOPT", "max_issues_repo_head_hexsha": "e064ebb96414d5afc4e205f43b44766558dca2af", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 77, "max_issues_repo_issues_event_min_datetime": "2020-05-08T07:18:09.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T11:20:33.000Z", "max_forks_repo_path": "Sources/General/vmec_params.f", "max_forks_repo_name": "mbkumar/VMEC2000", "max_forks_repo_head_hexsha": "334e3bd478f2432b6fe8cbb321f0d81d9a952152", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-02-10T13:47:08.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-21T12:53:43.000Z", "avg_line_length": 69.0416666667, "max_line_length": 116, "alphanum_fraction": 0.6073224703, "num_tokens": 2560}
|
from torch.utils.data import Dataset, DataLoader
import numpy as np
import json
import jieba
import torch
class textDataset(Dataset):
def __init__(self, label_dir='../data/label.json', feature_dir='../data/feature_imgName.json', word_to_idx={},
index=None):
super().__init__()
with open(label_dir, 'r') as f:
lable_title = json.load(f)
self.titles = []
self.labels = []
self.img_names = []
for i in index:
self.titles.append(lable_title['title'][i])
self.labels.append(lable_title['label'][i])
self.img_names.append(lable_title['img_name'][i])
# ------all data-------
# self.titles_all=lable_title['title']
# self.labels_all=lable_title['label']
# self.img_names_all=lable_title['image_name']
with open(feature_dir, 'r') as f:
self.img_features = json.load(f)
#
self.word_to_idx = word_to_idx
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
label = self.labels[idx]
text = self.titles[idx]
name = self.img_names[idx]
feature = np.array(self.img_features[name])
text = [self.word_to_idx[w] for w in jieba.cut(text)]
if len(text) > 18:
text = text[:18]
else:
text = text + [0] * (18 - len(text))
#
return np.array(text), np.array(label), feature
#
if __name__ == "__main__":
#
with open('../data/word_to_idx_v1.json', 'r') as f:
word_to_idx = json.load(f)
index = [i for i in range(10000)]
train_dataset = textDataset(label_dir='../data/label_v1.json',
feature_dir='../data/feature_imgName.json',
word_to_idx=word_to_idx, index=index)
trainloader = DataLoader(train_dataset,
batch_size=2,
shuffle=False,
num_workers=0)
for data in trainloader:
inputs, labels, feature = data
inputs = inputs.type(torch.LongTensor)
labels = labels.type(torch.LongTensor)
print(inputs.shape, labels.shape, feature.shape)
break
|
{"hexsha": "1e270d79cad9508bf7794a492aad9d6235cffe40", "size": 2255, "ext": "py", "lang": "Python", "max_stars_repo_path": "gaiic2022/dataset.py", "max_stars_repo_name": "Challyfilio/GAIIC2022-itmatch", "max_stars_repo_head_hexsha": "55eb1f762dec2d9fe859ed9caafd35cfcd125de6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "gaiic2022/dataset.py", "max_issues_repo_name": "Challyfilio/GAIIC2022-itmatch", "max_issues_repo_head_hexsha": "55eb1f762dec2d9fe859ed9caafd35cfcd125de6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gaiic2022/dataset.py", "max_forks_repo_name": "Challyfilio/GAIIC2022-itmatch", "max_forks_repo_head_hexsha": "55eb1f762dec2d9fe859ed9caafd35cfcd125de6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.1666666667, "max_line_length": 114, "alphanum_fraction": 0.5636363636, "include": true, "reason": "import numpy", "num_tokens": 513}
|
/**
* Author: Aravinth Panchadcharam
* Email: me@aravinth.info
* Date: 22/12/14.
* Project: Gesture Recogntion for Human-Robot Interaction
*/
#include <iostream>
#include <boost/asio.hpp>
#include "udp_server.h"
int main(){
try
{
boost::asio::io_service io_service;
udp_server server(io_service);
io_service.run();
}
catch (std::exception& e)
{
std::cerr << e.what() << std::endl;
}
return 0;
}
|
{"hexsha": "5c6332867d96dd1055a1a3670e5ea54246e7126a", "size": 512, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "source/human-robot-interaction/src/udp-server-async/main.cpp", "max_stars_repo_name": "AravinthPanch/gesture-recognition-for-human-robot-interaction", "max_stars_repo_head_hexsha": "42effa14c0f7a03f460fba5cd80dd72d5206e2a8", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 68.0, "max_stars_repo_stars_event_min_datetime": "2016-05-26T16:19:59.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-17T12:52:09.000Z", "max_issues_repo_path": "source/human-robot-interaction/src/udp-server-async/main.cpp", "max_issues_repo_name": "AravinthPanch/gesture-recognition-for-human-robot-interaction", "max_issues_repo_head_hexsha": "42effa14c0f7a03f460fba5cd80dd72d5206e2a8", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2017-11-20T13:28:59.000Z", "max_issues_repo_issues_event_max_datetime": "2017-11-22T13:52:50.000Z", "max_forks_repo_path": "source/human-robot-interaction/src/udp-server-async/main.cpp", "max_forks_repo_name": "AravinthPanch/gesture-recognition-for-human-robot-interaction", "max_forks_repo_head_hexsha": "42effa14c0f7a03f460fba5cd80dd72d5206e2a8", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 35.0, "max_forks_repo_forks_event_min_datetime": "2015-06-25T08:51:57.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-15T04:47:16.000Z", "avg_line_length": 16.5161290323, "max_line_length": 58, "alphanum_fraction": 0.54296875, "num_tokens": 136}
|
#include <boost/spirit/include/qi_attr_cast.hpp>
|
{"hexsha": "4a0af0e6a37b5acde69d96c60b0fd44b5eae3405", "size": 49, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/boost_spirit_include_qi_attr_cast.hpp", "max_stars_repo_name": "miathedev/BoostForArduino", "max_stars_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2018-03-17T00:58:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-06T02:48:49.000Z", "max_issues_repo_path": "src/boost_spirit_include_qi_attr_cast.hpp", "max_issues_repo_name": "miathedev/BoostForArduino", "max_issues_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2021-03-26T15:17:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-20T23:55:08.000Z", "max_forks_repo_path": "src/boost_spirit_include_qi_attr_cast.hpp", "max_forks_repo_name": "miathedev/BoostForArduino", "max_forks_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2019-05-28T21:06:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T03:06:52.000Z", "avg_line_length": 24.5, "max_line_length": 48, "alphanum_fraction": 0.8163265306, "num_tokens": 12}
|
'''
Created on Oct 30, 2015
@author: kashefy
'''
from nose.tools import assert_equal, assert_false, \
assert_true, assert_greater
import os
import tempfile
import shutil
import numpy as np
import lmdb
import caffe
from nideep.iow.lmdb_utils import IDX_FMT, MAP_SZ
import nideep.iow.to_lmdb as tol
import nideep.iow.read_lmdb as r
import nideep.iow.copy_lmdb as c
class TestCopySamplesLMDB:
@classmethod
def setup_class(self):
self.dir_tmp = tempfile.mkdtemp()
x = np.array([[[ 1, 2, 3],
[ 4, 5, 6]
],
[[ 7, 8, 9],
[10, 11, 12]
],
[[13, 14, 15],
[16, 17, 18],
],
[[19, 20, 21],
[22, 23, 24]
]
])
tol.arrays_to_lmdb([y for y in x], os.path.join(self.dir_tmp, 'x_lmdb'))
@classmethod
def teardown_class(self):
shutil.rmtree(self.dir_tmp)
def test_copy_samples_single(self):
path_src = os.path.join(self.dir_tmp, 'x_lmdb')
x = r.read_values(path_src)
path_dst = os.path.join(self.dir_tmp, 'test_copy_samples_single_lmdb')
assert_greater(len(x), 0, "This test needs non empty data.")
for i in xrange(len(x)):
if os.path.isdir(path_dst):
shutil.rmtree(path_dst)
c.copy_samples_lmdb(path_src, path_dst, [i])
assert_true(os.path.isdir(path_dst), "failed to save LMDB for i=%d" % (i,))
y = r.read_values(path_dst)
assert_equal(len(y), 1, "Single element expected.")
assert_true(np.all(x[i][0] == y[0][0]), "Wrong content copied.")
assert_true(np.all(x[i][1] == y[0][1]), "Wrong content copied.")
def test_copy_samples_single_with_data_func(self):
path_src = os.path.join(self.dir_tmp, 'x_lmdb')
x = r.read_values(path_src)
path_dst = os.path.join(self.dir_tmp, 'test_copy_samples_single_with_data_func_lmdb')
assert_greater(len(x), 0, "This test needs non empty data.")
ALPHA = -10 # non-zero
def func_data_mul(value):
_, v = r.unpack_raw_datum(value)
dat = caffe.io.array_to_datum(ALPHA * v, int(v.flatten()[0]))
return dat.SerializeToString()
for i in xrange(len(x)):
if os.path.isdir(path_dst):
shutil.rmtree(path_dst)
c.copy_samples_lmdb(path_src, path_dst, [i], func_data=func_data_mul)
assert_true(os.path.isdir(path_dst), "failed to save LMDB for i=%d" % (i,))
y = r.read_values(path_dst)
assert_equal(len(y), 1, "Single element expected.")
assert_true(np.all(x[i][0] * ALPHA == y[0][0]), "Wrong content copied.")
assert_true(np.all(int(x[i][0].flatten()[0]) == y[0][1]), "Wrong content copied for label.")
def test_copy_samples_single_reverse(self):
path_src = os.path.join(self.dir_tmp, 'x_lmdb')
x = r.read_values(path_src)
path_dst = os.path.join(self.dir_tmp, 'test_copy_samples_single_lmdb')
assert_greater(len(x), 0, "This test needs non empty data.")
for i in range(len(x))[::-1]:
if os.path.isdir(path_dst):
shutil.rmtree(path_dst)
c.copy_samples_lmdb(path_src, path_dst, [i])
assert_true(os.path.isdir(path_dst), "failed to save LMDB for i=%d" % (i,))
y = r.read_values(path_dst)
assert_equal(len(y), 1, "Single element expected.")
assert_true(np.all(x[i][0] == y[0][0]), "Wrong content copied.")
assert_true(np.all(x[i][1] == y[0][1]), "Wrong content copied.")
def test_copy_samples_subset(self):
path_src = os.path.join(self.dir_tmp, 'x_lmdb')
x = r.read_values(path_src)
assert_greater(len(x), 0, "This test needs non empty data.")
path_dst = os.path.join(self.dir_tmp, 'test_copy_samples_subset')
keys = range(0, len(x), 2)
assert_greater(len(keys), 0, "This test needs a non-empty subset.")
assert_greater(len(x), len(keys), "Need subset, not all elements.")
c.copy_samples_lmdb(path_src, path_dst, keys)
assert_true(os.path.isdir(path_dst), "failed to save LMDB")
y = r.read_values(path_dst)
assert_equal(int(len(x) / 2), len(y), "Wrong number of elements copied.")
for (x_val, x_label), (y_val, y_label) in zip(x[0::2], y): # skip element in x
assert_true(np.all(x_val == y_val), "Wrong content copied.")
assert_true(np.all(x_label == y_label), "Wrong content copied.")
def test_copy_samples_subset_with_data_func(self):
path_src = os.path.join(self.dir_tmp, 'x_lmdb')
x = r.read_values(path_src)
assert_greater(len(x), 0, "This test needs non empty data.")
path_dst = os.path.join(self.dir_tmp, 'test_copy_samples_subset_with_data_func_lmdb')
keys = range(0, len(x), 2)
assert_greater(len(keys), 0, "This test needs a non-empty subset.")
assert_greater(len(x), len(keys), "Need subset, not all elements.")
ALPHA = -10 # non-zero
def func_data_mul(value):
_, x = r.unpack_raw_datum(value)
dat = caffe.io.array_to_datum(ALPHA * x, int(x.flatten()[0]))
return dat.SerializeToString()
c.copy_samples_lmdb(path_src, path_dst, keys, func_data=func_data_mul)
assert_true(os.path.isdir(path_dst), "failed to save LMDB")
y = r.read_values(path_dst)
assert_equal(int(len(x) / 2), len(y), "Wrong number of elements copied.")
for (x_val, x_label), (y_val, y_label) in zip(x[0::2], y): # skip element in x
assert_true(np.all(x_val * ALPHA == y_val), "Wrong content copied.")
assert_true(np.all(int(x_val.flatten()[0]) == y_label), "Wrong content copied for label.")
assert_false(np.all(x_label == y_label), "Wrong content copied for label.")
def test_copy_samples_no_append(self):
path_src = os.path.join(self.dir_tmp, 'x_lmdb')
x = r.read_values(path_src)
path_dst = os.path.join(self.dir_tmp, 'test_copy_samples_subset')
c.copy_samples_lmdb(path_src, path_dst, range(0, len(x), 2))
c.copy_samples_lmdb(path_src, path_dst, range(0, len(x), 2))
c.copy_samples_lmdb(path_src, path_dst, range(0, len(x), 2))
c.copy_samples_lmdb(path_src, path_dst, range(0, len(x), 2))
assert_true(os.path.isdir(path_dst), "failed to save LMDB")
y = r.read_values(path_dst)
assert_equal(int(len(x) / 2), len(y), "Wrong number of elements copied.")
for a, b in zip(x[0::2], y): # skip element in x
assert_true(np.all(a[0] == b[0]), "Wrong content copied.")
assert_true(np.all(a[1] == b[1]), "Wrong content copied.")
def test_copy_samples_all(self):
path_src = os.path.join(self.dir_tmp, 'x_lmdb')
x = r.read_values(path_src)
path_dst = os.path.join(self.dir_tmp, 'test_copy_samples_all_lmdb')
c.copy_samples_lmdb(path_src, path_dst, range(len(x)))
assert_true(os.path.isdir(path_dst), "failed to save LMDB")
y = r.read_values(path_dst)
assert_equal(len(x), len(y), "Wrong number of elements copied.")
for a, b in zip(x, y):
assert_true(np.all(a[0] == b[0]), "Wrong content copied.")
assert_true(np.all(a[1] == b[1]), "Wrong content copied.")
class TestConcatentateLMDB:
@classmethod
def setup_class(self):
self.dir_tmp = tempfile.mkdtemp()
x = np.array([[[ 1, 2, 3],
[ 4, 5, 6]
],
[[ 7, 8, 9],
[10, 11, 12]
],
[[13, 14, 15],
[16, 17, 18],
],
[[19, 20, 21],
[22, 23, 24]
]
])
tol.arrays_to_lmdb([y for y in x], os.path.join(self.dir_tmp, 'x0_lmdb'))
tol.arrays_to_lmdb([-y for y in x], os.path.join(self.dir_tmp, 'x1_lmdb'))
tol.arrays_to_lmdb([y + 1000 for y in x], os.path.join(self.dir_tmp, 'x2_lmdb'))
@classmethod
def teardown_class(self):
shutil.rmtree(self.dir_tmp)
def test_concatenate(self):
path_src0 = os.path.join(self.dir_tmp, 'x0_lmdb')
x0 = r.read_values(path_src0)
path_src1 = os.path.join(self.dir_tmp, 'x1_lmdb')
x1 = r.read_values(path_src1)
path_src2 = os.path.join(self.dir_tmp, 'x2_lmdb')
x2 = r.read_values(path_src2)
path_dst = os.path.join(self.dir_tmp, 'test_concatenate_lmdb')
c.concatenate_lmdb([path_src0, path_src1, path_src2], path_dst)
assert_true(os.path.isdir(path_dst), "failed to save LMDB")
y = r.read_values(path_dst)
assert_equal(len(x0) + len(x1) + len(x2), len(y), "Wrong number of elements copied.")
x = x0
x.extend(x1)
x.extend(x2)
assert_equal(len(x), len(y), "Wrong number of elements copied.")
for a, b in zip(x, y):
assert_true(np.all(a[0] == b[0]), "Wrong content copied.")
assert_true(np.all(a[1] == b[1]), "Wrong content copied.")
|
{"hexsha": "eee4868a0dc085718faa2b3d35cbfc33e53fe076", "size": 9429, "ext": "py", "lang": "Python", "max_stars_repo_path": "nideep/iow/test_copy_lmdb.py", "max_stars_repo_name": "kashefy/caffe_sandbox", "max_stars_repo_head_hexsha": "31afc409df14fece0ac21707185e586dd2d625a9", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 15, "max_stars_repo_stars_event_min_datetime": "2015-08-26T21:15:15.000Z", "max_stars_repo_stars_event_max_datetime": "2016-03-10T06:25:08.000Z", "max_issues_repo_path": "nideep/iow/test_copy_lmdb.py", "max_issues_repo_name": "nigroup/nideep", "max_issues_repo_head_hexsha": "31afc409df14fece0ac21707185e586dd2d625a9", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 35, "max_issues_repo_issues_event_min_datetime": "2016-05-24T13:57:01.000Z", "max_issues_repo_issues_event_max_datetime": "2018-03-07T18:43:07.000Z", "max_forks_repo_path": "nideep/iow/test_copy_lmdb.py", "max_forks_repo_name": "nigroup/nideep", "max_forks_repo_head_hexsha": "31afc409df14fece0ac21707185e586dd2d625a9", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2016-05-24T13:42:55.000Z", "max_forks_repo_forks_event_max_datetime": "2019-10-04T16:20:54.000Z", "avg_line_length": 41.1746724891, "max_line_length": 104, "alphanum_fraction": 0.5798069785, "include": true, "reason": "import numpy", "num_tokens": 2426}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import warnings
warnings.filterwarnings('ignore')
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
from glob import glob
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from MulticoreTSNE import MulticoreTSNE as TSNE
from PIL import Image, ImageDraw, ImageFont
from skimage import io
from sklearn.cluster import (DBSCAN, AffinityPropagation,
AgglomerativeClustering, Birch, KMeans)
from sklearn.datasets import make_blobs
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from umap import UMAP
import ae
import metrics
import ssnp
def compute_all_metrics(X, X_2d, D_high, D_low, y, X_inv=None):
T = metrics.metric_trustworthiness(X, X_2d, D_high, D_low)
C = metrics.metric_continuity(X, X_2d, D_high, D_low)
R = metrics.metric_shepard_diagram_correlation(D_high, D_low)
S = metrics.metric_normalized_stress(D_high, D_low)
N = metrics.metric_neighborhood_hit(X_2d, y, k=3)
if X_inv is not None:
MSE = metrics.metric_mse(X, X_inv)
else:
MSE = -99.0
return T, C, R, S, N, MSE
def plot(X, y, figname=None):
if len(np.unique(y)) <= 10:
cmap = plt.get_cmap('tab10')
else:
cmap = plt.get_cmap('tab20')
fig, ax = plt.subplots(figsize=(20, 20))
for cl in np.unique(y):
ax.scatter(X[y==cl,0], X[y==cl,1], c=[cmap(cl)], label=cl, s=20)
ax.axis('off')
if figname is not None:
fig.savefig(figname)
plt.close('all')
del fig
del ax
if __name__ == '__main__':
patience = 5
epochs = 200
min_delta = 0.05
verbose = False
results = []
output_dir = 'results_clustering'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
data_dir ='../data'
data_dirs = ['mnist', 'fashionmnist', 'har', 'reuters']
epochs_dataset = {}
epochs_dataset['fashionmnist'] = 10
epochs_dataset['mnist'] = 10
epochs_dataset['har'] = 10
epochs_dataset['reuters'] = 10
classes_mult = {}
classes_mult['fashionmnist'] = 2
classes_mult['mnist'] = 2
classes_mult['har'] = 2
classes_mult['reuters'] = 1
for d in data_dirs:
dataset_name = d
X = np.load(os.path.join(data_dir, d, 'X.npy'))
y = np.load(os.path.join(data_dir, d, 'y.npy'))
print('------------------------------------------------------')
print('Dataset: {0}'.format(dataset_name))
print(X.shape)
print(y.shape)
print(np.unique(y))
n_classes = len(np.unique(y)) * classes_mult[dataset_name]
n_samples = X.shape[0]
train_size = 5000
test_size = 1000
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, train_size=train_size, random_state=420, stratify=y)
epochs = epochs_dataset[dataset_name]
ssnpgt = ssnp.SSNP(epochs=epochs, verbose=verbose, patience=0, opt='adam', bottleneck_activation='linear')
ssnpgt.fit(X_train, y_train)
X_ssnpgt = ssnpgt.transform(X_train)
y_pred = ssnpgt.predict(X_train)
y_pred_test = ssnpgt.predict(X_test)
ssnpkm = ssnp.SSNP(epochs=epochs, verbose=verbose, patience=0, opt='adam', bottleneck_activation='linear')
C = KMeans(n_clusters=n_classes)
y_km = C.fit_predict(X_train)
ssnpkm.fit(X_train, y_km)
X_ssnpkm = ssnpkm.transform(X_train)
y_pred_km = ssnpkm.predict(X_train)
y_pred_km_test_cl = C.predict(X_test)
y_pred_km_test = ssnpkm.predict(X_test)
results.append((dataset_name, 'SSNP-GT', 'train', np.mean(y_train == y_pred)))
results.append((dataset_name, 'SSNP-KMeans', 'train', np.mean(y_km == y_pred_km)))
results.append((dataset_name, 'SSNP-GT', 'test', np.mean(y_test == y_pred_test)))
results.append((dataset_name, 'SSNP-KMeans', 'test', np.mean(y_pred_km_test_cl == y_pred_km_test)))
df = pd.DataFrame(results, columns=[ 'dataset_name',
'test_name',
'type',
'acc'])
df.to_csv(os.path.join(output_dir, 'metrics.csv'), header=True, index=None)
|
{"hexsha": "93b205d02b30c971bb62733ca39101fe7f650442", "size": 4332, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/experiment3_clustering.py", "max_stars_repo_name": "arturandre/ssnp", "max_stars_repo_head_hexsha": "9c97ce96ee3f35b2db6294c2634034540e96f079", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-07-24T18:31:36.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-10T10:16:14.000Z", "max_issues_repo_path": "code/experiment3_clustering.py", "max_issues_repo_name": "arturandre/ssnp", "max_issues_repo_head_hexsha": "9c97ce96ee3f35b2db6294c2634034540e96f079", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/experiment3_clustering.py", "max_forks_repo_name": "arturandre/ssnp", "max_forks_repo_head_hexsha": "9c97ce96ee3f35b2db6294c2634034540e96f079", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-07-24T18:40:36.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-10T10:16:15.000Z", "avg_line_length": 30.5070422535, "max_line_length": 139, "alphanum_fraction": 0.6221144968, "include": true, "reason": "import numpy", "num_tokens": 1163}
|
import numpy as np
from numpy import logical_and
import timeit
from skimage import io
import os
from os import listdir
from os.path import isfile, join
import sys
from exif import Image
def Locate_Columns(input_hologram,Standard_Deviation_First_Threshold,Begin_Time,time_slices):
#input_hologram = np.array(io.imread(filePath)).astype('<f4')
#print(f'Loaded hologram ({np.round(timeit.default_timer() - Begin_Time,3)} seconds)')
input_mean = np.mean(input_hologram)
input_std = np.std(input_hologram)
cutoff = input_mean+Standard_Deviation_First_Threshold*input_std
array_data = np.zeros(input_hologram.shape)
Locations = np.argwhere(input_hologram>=cutoff)
for x in range(len(Locations)):
array_data[:,Locations[x][1]-5:Locations[x][1]+5,Locations[x][2]-5:Locations[x][2]+5] = 1
print(f'[Time Slice #{time_slices+1}] Finished Column Thresholding ({np.round(timeit.default_timer() - Begin_Time,3)} seconds)')
return array_data
def Identify_Columns(array_data,input_hologram,Percentage_Cutoff_Column_Threshold,Standard_Deviation_First_Threshold,Minimum_Pixel_Count_For_Column,Begin_Time,time_slices):
column_data = np.copy(array_data[0,:,:])
array_data = []
non_zeros = np.argwhere(column_data!=0)
blob_identifier = np.zeros((non_zeros.shape[0],3))
blob_identifier[:,:2] = non_zeros
non_zeros_remaining = np.copy(non_zeros)
Locations_To_Still_Check = []
Locations_Deleted = []
order_of_checks = np.zeros((8,2))
order_of_checks[0,:] = np.array([-1,-1])
order_of_checks[1,:] = np.array([-1,0])
order_of_checks[2,:] = np.array([-1,1])
order_of_checks[3,:] = np.array([0,-1])
order_of_checks[4,:] = np.array([0,1])
order_of_checks[5,:] = np.array([1,-1])
order_of_checks[6,:] = np.array([1,0])
order_of_checks[7,:] = np.array([1,1])
blob_identification = 0
blob_identifier[0,2] = 1
while len(np.argwhere(blob_identifier[:,2] == 0)) > 0:
blob_identification += 1
print(f'[Time Slice #{time_slices+1}] Building blob #{blob_identification} ({np.round(timeit.default_timer() - Begin_Time,3)} seconds)')
blob_starting_check_position = np.argwhere(blob_identifier[:,2]==0)[0][0]
for x in range(8):
if len(np.argwhere(logical_and(blob_identifier[:,0]==blob_identifier[blob_starting_check_position][0]+order_of_checks[x,0],blob_identifier[:,1]==blob_identifier[blob_starting_check_position][1]+order_of_checks[x,1]))) != 0:
if blob_identifier[np.argwhere(logical_and(blob_identifier[:,0]==blob_identifier[blob_starting_check_position][0]+order_of_checks[x,0],blob_identifier[:,1]==blob_identifier[blob_starting_check_position][1]+order_of_checks[x,1]))[0][0],2]==0:
Locations_To_Still_Check.append(np.argwhere(logical_and(blob_identifier[:,0]==blob_identifier[blob_starting_check_position][0]+order_of_checks[x,0],blob_identifier[:,1]==blob_identifier[blob_starting_check_position][1]+order_of_checks[x,1]))[0][0])
blob_identifier[np.argwhere(logical_and(blob_identifier[:,0]==blob_identifier[blob_starting_check_position][0]+order_of_checks[x,0],blob_identifier[:,1]==blob_identifier[blob_starting_check_position][1]+order_of_checks[x,1]))[0][0],2] = blob_identification
while(len(Locations_To_Still_Check)>0):
for x in range(8):
if len(np.argwhere(logical_and(blob_identifier[:,0]==blob_identifier[Locations_To_Still_Check[0]][0]+order_of_checks[x,0],blob_identifier[:,1]==blob_identifier[Locations_To_Still_Check[0]][1]+order_of_checks[x,1]))) != 0:
if blob_identifier[np.argwhere(logical_and(blob_identifier[:,0]==blob_identifier[Locations_To_Still_Check[0]][0]+order_of_checks[x,0],blob_identifier[:,1]==blob_identifier[Locations_To_Still_Check[0]][1]+order_of_checks[x,1]))[0][0],2]==0:
Locations_To_Still_Check.append(np.argwhere(logical_and(blob_identifier[:,0]==blob_identifier[Locations_To_Still_Check[0]][0]+order_of_checks[x,0],blob_identifier[:,1]==blob_identifier[Locations_To_Still_Check[0]][1]+order_of_checks[x,1]))[0][0])
blob_identifier[np.argwhere(logical_and(blob_identifier[:,0]==blob_identifier[Locations_To_Still_Check[0]][0]+order_of_checks[x,0],blob_identifier[:,1]==blob_identifier[Locations_To_Still_Check[0]][1]+order_of_checks[x,1]))[0][0],2] = blob_identification
Locations_To_Still_Check.remove(Locations_To_Still_Check[0])
blob_library = []
blob_mean = []
blob_std = []
blob_max = []
column_cutoff_max = []
Cutoff_Blob_Identifier = np.zeros(input_hologram.shape).astype('<f4')
Cutoff_Blob_Identifier_Layer = np.zeros((input_hologram.shape[1],input_hologram.shape[2]))
for x in range(blob_identification+1):
if x!=0:
print(f'[Time Slice #{time_slices+1}] Indexing blob #{x} ({np.round(timeit.default_timer() - Begin_Time,3)} seconds)')
blob_library.append(input_hologram[:,blob_identifier[blob_identifier[:,2]==x][:,0].astype(int),blob_identifier[blob_identifier[:,2]==x][:,1].astype(int)])
blob_mean.append(np.mean(input_hologram[:,blob_identifier[blob_identifier[:,2]==x][:,0].astype(int),blob_identifier[blob_identifier[:,2]==x][:,1].astype(int)]))
blob_std.append(np.std(input_hologram[:,blob_identifier[blob_identifier[:,2]==x][:,0].astype(int),blob_identifier[blob_identifier[:,2]==x][:,1].astype(int)]))
blob_max.append(np.max(input_hologram[:,blob_identifier[blob_identifier[:,2]==x][:,0].astype(int),blob_identifier[blob_identifier[:,2]==x][:,1].astype(int)]))
cutoff_max = Percentage_Cutoff_Column_Threshold*np.max((input_hologram[:,blob_identifier[blob_identifier[:,2]==x][:,0].astype(int),blob_identifier[blob_identifier[:,2]==x][:,1].astype(int)]))
column_cutoff_max.append(cutoff_max)
Cutoff_Blob_Identifier_Layer[blob_identifier[blob_identifier[:,2]==x][:,0].astype(int),blob_identifier[blob_identifier[:,2]==x][:,1].astype(int)] = x
Cutoff_Blob_Identifier[:,:,:] = Cutoff_Blob_Identifier_Layer
Column_Removal_Slice = np.copy(Cutoff_Blob_Identifier[0,:,:])
Columns_Removed_Check = 0
for x in range(blob_identification+1):
if x != 0:
if np.count_nonzero(Column_Removal_Slice==x) < Minimum_Pixel_Count_For_Column:
print(f'[Time Slice #{time_slices+1}] Removed column for blob #{x} ({np.round(timeit.default_timer() - Begin_Time,3)} seconds)')
Cutoff_Blob_Identifier[Cutoff_Blob_Identifier==x] = 0
if Columns_Removed_Check == 0:
Columns_Removed_Check = 1
if Columns_Removed_Check == 0:
print(f'[Time Slice #{time_slices+1}] No columns were removed below a pixel count of {Minimum_Pixel_Count_For_Column} ({np.round(timeit.default_timer() - Begin_Time,3)} seconds)')
Blobs_Found = np.zeros(input_hologram.shape).astype('<f4')
Blobs_Found_Numbers = []
Column_Removal_Slice = np.copy(Cutoff_Blob_Identifier[0,:,:])
for x in range(blob_identification+1):
if x!=0:
if np.count_nonzero(Column_Removal_Slice==x) > 0:
print(f'[Time Slice #{time_slices+1}] Thresholding Column #{x} ({np.round(timeit.default_timer() - Begin_Time,3)} seconds)')
Cutoff_Blob_Identifier_Temp = np.copy(Cutoff_Blob_Identifier)
Cutoff_Blob_Identifier_Temp[Cutoff_Blob_Identifier_Temp!=x] = 0
Blobs_Found[logical_and(Cutoff_Blob_Identifier_Temp==x,input_hologram>=column_cutoff_max[x-1])] = input_hologram[logical_and(Cutoff_Blob_Identifier_Temp==x,input_hologram>=column_cutoff_max[x-1])]
print(f'[Time Slice #{time_slices+1}] Done with unbuffered array ({np.round(timeit.default_timer() - Begin_Time,3)} seconds)')
Cutoff_Blob_Identifier = []
Cutoff_Blob_Identifier_Layer = []
return Blobs_Found
def Column_Thresholding(input_hologram,Standard_Deviation_First_Threshold,Percentage_Cutoff_Column_Threshold,Minimum_Pixel_Count_For_Column,Begin_Time,time_slices):
percentage_used = str(int(np.round(Percentage_Cutoff_Column_Threshold*100,2)))
array_data = Locate_Columns(input_hologram,Standard_Deviation_First_Threshold,Begin_Time,time_slices)
Blobs_Found = Identify_Columns(array_data,input_hologram,Percentage_Cutoff_Column_Threshold,Standard_Deviation_First_Threshold,Minimum_Pixel_Count_For_Column,Begin_Time,time_slices)
print(f'[Time Slice #{time_slices+1}] Done thresholding ({np.round(timeit.default_timer() - Begin_Time,3)} seconds)')
return Blobs_Found
|
{"hexsha": "2cf15b57a06e6295d31681b003b336bd382ffeee", "size": 8842, "ext": "py", "lang": "Python", "max_stars_repo_path": "Column_Thresholding.py", "max_stars_repo_name": "NikoRanta/DHM_Pipeline", "max_stars_repo_head_hexsha": "0c7790f71e940b42e9b362b9186cb53654f92a58", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Column_Thresholding.py", "max_issues_repo_name": "NikoRanta/DHM_Pipeline", "max_issues_repo_head_hexsha": "0c7790f71e940b42e9b362b9186cb53654f92a58", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Column_Thresholding.py", "max_forks_repo_name": "NikoRanta/DHM_Pipeline", "max_forks_repo_head_hexsha": "0c7790f71e940b42e9b362b9186cb53654f92a58", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 65.0147058824, "max_line_length": 279, "alphanum_fraction": 0.7016512101, "include": true, "reason": "import numpy,from numpy", "num_tokens": 2272}
|
import pandas as pd
import numpy as np
# import os
# from collections import OrderedDict
# import libs
# import libs.definition as lib_df
def KL(a, b):
a = np.asarray(a, dtype=np.float)
b = np.asarray(b, dtype=np.float)
return np.sum(np.where(a != 0, a * np.log(a / b), 0))
def getKL(proto_fields,lib_df,field_to_server_freq,proto_name,KL_thresh):
klinfo=[]
IGNORE_LIST=[]
for field, f_metadata in proto_fields.items() : #For all fields ..
if f_metadata.size >= lib_df.SMALL_FIELD_THRESHOLD:
continue
server_to_freq_vectors = field_to_server_freq[field]
print("For field ", field )
# field_to_freq_vector[field] = OrderedDict()
start=-1
pre=-1
for s in server_to_freq_vectors:
f_v = server_to_freq_vectors[s]
if(start==-1):
pre=f_v
start=1
else:
pre=np.add(pre,f_v)
pre=np.array(pre)
print(pre,pre.shape)
FRQ= pre / np.sum(pre)
print(FRQ)
if pre.size ==1 and pre == -1:
IGNORE_LIST.append(field)
else:
unfrm = np.ones((pre.shape)) / pre.shape[0]
curKL=KL(FRQ, unfrm)
k2={}
k2['name'] = field
k2['KL'] = curKL
k2['size'] = pre.shape[0]
klinfo.append(k2)
if (curKL <KL_thresh and pre.shape[0] > 1):
IGNORE_LIST.append(field)
print("KL",field,curKL)
# print("IGNORED",IGNORE_LIST)
kl_fname=proto_name+'/klinfo.csv'
d2d = pd.DataFrame(klinfo)
# print(d2d)
d2d.to_csv(kl_fname,index=False)
return IGNORE_LIST
|
{"hexsha": "24ba56bfe660fb38505b96d49fb2a90b3b10ed4f", "size": 1749, "ext": "py", "lang": "Python", "max_stars_repo_path": "postprocess/common.py", "max_stars_repo_name": "ampmap-cmu/AmpMap", "max_stars_repo_head_hexsha": "5f2d1e3fb9863315041d37a0727a829fce06c515", "max_stars_repo_licenses": ["BSD-3-Clause-Clear"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-03-29T03:48:14.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-24T10:18:15.000Z", "max_issues_repo_path": "postprocess/common.py", "max_issues_repo_name": "ampmap-cmu/AmpMap", "max_issues_repo_head_hexsha": "5f2d1e3fb9863315041d37a0727a829fce06c515", "max_issues_repo_licenses": ["BSD-3-Clause-Clear"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "postprocess/common.py", "max_forks_repo_name": "ampmap-cmu/AmpMap", "max_forks_repo_head_hexsha": "5f2d1e3fb9863315041d37a0727a829fce06c515", "max_forks_repo_licenses": ["BSD-3-Clause-Clear"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.9076923077, "max_line_length": 73, "alphanum_fraction": 0.5454545455, "include": true, "reason": "import numpy", "num_tokens": 449}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Compare output data to analytical solution
# Arguments: [<show_plots (0 or 1)>] [<filenames>]
#
import sys
import numpy as np
import csv
import collections
import copy
import os
from argparse import ArgumentParser
import time
#sys.path.append('../../scripts/')
import py_reader
import json
files = ""
parser=ArgumentParser()
parser.add_argument('--path')
args=parser.parse_args()
if args.path is not None:
path = args.path
else:
path='./'
# get all input data in current directory
ls = os.listdir(path)
# sort files by number in file name
files = sorted(ls)
# extract the files that are npy files
solution_condition = lambda filename: ".py" in filename
solution_files = list(np.extract(np.array(list(map(solution_condition, files))), files))
print("{} files".format(len(solution_files)))
solution_files = sorted(solution_files)
solution_files_with_path = []
data = []
for solution in solution_files:
solution_with_path = path + solution
solution_files_with_path.append(solution_with_path)
print(solution_files_with_path)
# load py files of current group
for solution_with_path in solution_files_with_path:
with open(solution_with_path,'rt') as f:
dict_from_file = json.load(f)
data.append(dict_from_file)
if len(data) == 0:
print("no data found.")
sys.exit(0)
####################
# 1D
with open(path + 'snapshots.csv', "wt") as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for dataset in data:
ydata = py_reader.get_values(dataset, "solution", "V")
ydata += py_reader.get_values(dataset, "solution", "m")
ydata += py_reader.get_values(dataset, "solution", "h")
ydata += py_reader.get_values(dataset, "solution", "n")
#print(ydata)
csvwriter.writerow(ydata)
csvfile.close()
sys.exit(0)
|
{"hexsha": "453f474254d71b0d6a5c6c5106b0dfbfcb54328d", "size": 1963, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/electrophysiology/monodomain/pod/scripts/snapshots.py", "max_stars_repo_name": "maierbn/opendihu", "max_stars_repo_head_hexsha": "577650e2f6b36a7306766b0f4176f8124458cbf0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2018-11-25T19:29:34.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-20T04:46:22.000Z", "max_issues_repo_path": "examples/electrophysiology/monodomain/pod/scripts/snapshots.py", "max_issues_repo_name": "maierbn/opendihu", "max_issues_repo_head_hexsha": "577650e2f6b36a7306766b0f4176f8124458cbf0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-11-12T15:15:58.000Z", "max_issues_repo_issues_event_max_datetime": "2020-12-29T15:29:24.000Z", "max_forks_repo_path": "examples/electrophysiology/monodomain/pod/scripts/snapshots.py", "max_forks_repo_name": "maierbn/opendihu", "max_forks_repo_head_hexsha": "577650e2f6b36a7306766b0f4176f8124458cbf0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2018-10-17T12:18:10.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-28T13:24:20.000Z", "avg_line_length": 25.4935064935, "max_line_length": 93, "alphanum_fraction": 0.6775343861, "include": true, "reason": "import numpy", "num_tokens": 460}
|
%!TEX root = ../thesis.tex
\chapter{Scheduling}
\ifpdf
\graphicspath{{Chapters/Figs/Raster/}{Chapters/Figs/PDF/}{Chapters/Figs/}}
\else
\graphicspath{{Chapters/Figs/Vector/}{Chapters/Figs/}}
\fi
%********************************** % Intro *****************************************
In this chapter a scheduling framework for mixed-critical applications with precedence constraint and communication costs is presented.
%********************************** % Section **************************************
\section{Introduction}
\par It has been shown \cite{MCSNPhard} that the mixed-criticality schedulability problem (preemptive or non-preemptive) is strongly NP-hard even with only two criticality levels (High and Low). Nevertheless, different approaches had been proposed in the literature. The first research was presented in 2009 by Anderson et al.\cite{Anderson09} and extended in 2010 \cite{Mollison10}. The mechanism they presented is based on assigning to high-critical tasks higher Worst Case Execution Time and different scheduling policies (e.g. level A tasks are statistic assigned and cyclic released, level B with a Partitioned-EDF scheduler, level C and D with G-EDF). Kritikakou et al.\cite{Kritikakou14} provided a new scheduling for tasks distinguished in only two levels: HI-criticality and LO-criticality. Same as proposed by S.Baruah et al.\cite{Baruah2012EDFVD} in which they describe EDF-VD (for Earliest Deadline First with Virtual Deadlines) for mixed-criticality tasks (see \cite{Zhang2014} for detailed analysis).
%\paragraph{} In the following sections is presented a model to schedule real-time, mixed-critical task-sets with precedence and periodicity through an off-line scheduling algorithm.
\paragraph{}To properly treat the problem an formal abstraction of the real-time scheduling with mixed-critical tasks with precedence and periodicity is presented.
%********************************** % Section **************************************
\section{Problem Formulation}
As stated in the previous chapter, tasks (or threads) to be scheduled are represented as an acyclic directed graph. Every node in the graph represents one task and the edges between nodes, the communications. The node cost is the time required for the task to complete (WCET) and the edge cost is the communication cost.
\paragraph{} More formally, the tasks are defined by $G=(\Gamma,E,C,T,K)$ where $\tau_i\in\Gamma$ represents a task and $\Gamma$ the task-set. The set $E=\{e_{ij}:\forall\tau_i\to\tau_j\}$ represent the precedence constraints between $\tau_i$ and $\tau_j$ (meaning that $\tau_i$ must be completed before $\tau_j$) with its communication cost expressed in time. The Worst-Case Execution Times are expressed in $C=\{c_i:\forall\tau_i\in\Gamma\}$. The periods (or rate) are $T=\{T_i:\forall\tau_i\in\Gamma\}$ and it is assumed that every $T_i$ is a integer multiple of some base-period $\beta_T$. The criticality levels are $K=\{\chi:\forall\tau_i\in\Gamma\}$. Moreover, each task has its priority $\rho_i$ which is assigned by the scheduling algorithm and a set of accessed resources $\mathbb{R}$ manually assigned by the system designer to each task.
\par The Direct Acyclic Graph made by all the partitions (which are the group of tasks) is denoted by $\mathbb{P}=(\Pi, H, L, R)$ and called \emph{P-DAG}, where $\pi_i\in\Pi$ is a partition. The inter-partition communications are represented in $H$, $\lambda_i\in L$ and $\delta_i\in R$ are respectively the duration and the periodicity of the partition $\pi_i$. So we can define a map $\Psi:\Gamma\to\mathbb{P}$ as the partitioning algorithm. The subgraph of $G$ made by all the tasks assigned to a given partition is called T-DAG.
\par The behavioral parameters for the task $\tau_i$ that the scheduling process must define are: the starting time $s_i$ (\emph{when} the task should execute), and the core $\mu_i$ on which it will execute (\emph{where} should execute), also called \emph{affinity mask}.
%********************************** % Section **************************************
\section{Assumptions}
It is assumed that the COTS board is a connected network of processors with identical communication links (the Unified Memory Model shown in figure \ref{fig:unifiedmemorymodel}) and relatively small number of processors. This simplifies the mathematical formulation of the optimization problem and limits its computational complexity.
\par It is also assumed that the partitioning addresses the security and safety requirement. This mechanism relies on the Hypervisor, which is a trusted code (certified by the authorities) and it is the only one executing in the highest privileged mode. It ensures time and spatial isolation among partitions so the partitioning algorithm should map each task to one partition such that a fault in one partition does not affect another partition while considering the criticality as a decision variable. Moreover, interferences and inter-partition communications should be minimized.
\paragraph{} Once partitions are determined, they need to be scheduled. The problem can be split in two parts: \emph{intra-partition} scheduling and \emph{inter-partition} scheduling. In the following sections is presented a detailed description of each phase.
%********************************** % Section **************************************
\section{Partitioning}
Determine a way to measure safety is complex, hence, derive an optimization problem is not easy. In order to simplify the intra-partition scheduling and enforce determinism it is important that all the tasks inside a partition have the same period (or eventually integer multiple of the partition rate). To understand the rationale, assume that a task $\tau_i$ assigned to \TP{1} needs to be activated at time $t_1<t_{L_1}$ and $t_2>t_{L_2}$ as shown in figure \ref{fig:PartitionRationale}
\begin{figure}[htbp]
\centering
\includegraphics[width=0.7\textwidth]{PartitionRationale}
\caption{Non rate-base partitioning}
\label{fig:PartitionRationale}
\end{figure}
To allow this behavior, two approaches are possible:
\begin{enumerate}
\item Introduce preemption among time partition, losing determinism and the control of safety states.
\item Push the execution of $\tau_i$ o the new activation of \TP{1}. This approach leads to a lower level of determinism because $\tau_i$ now should interrupt any task assigned to \TP{2} that is executing at time $t_2$. Moreover, the worst-case execution time of \TP{2} will considerably change between different execution, leading to an over-estimating of it.
\end{enumerate}
The solution adopted in this work does not pretend to be a solution for the partitioning problem. Instead, it would be an example. The algorithm simply groups all the tasks with the same rates in partitions and then split them according to some criticality threshold, creating smaller sub-partitions. A more complex partitioning algorithm is under development.
\par As stated before, the result of the partitioning is another DAG, called P-DAG, each of them with its T-DAG. Now the inter-partition and intra-partition scheduling can be introduced.
%********************************** % Section **************************************
\section{Tasks Allocation and Scheduling}
\subsection{Intra-Partition}
In order to schedule partitions the execution time of the partition itself must be estimated. This optimization schedules a T-DAG on all the available $|M|$ processors. The solution space of this problem is spawned by all possible processor assignments combined with all possible task orderings that satisfy the partial order expressed by the T-DAG. The tasks are to be assigned in such a way as to minimize the total computation time required to execute that partition. This is also referred as reducing the makespan. The optimization problem that solves this problem presented below is based on the one proposed by S.Venugopalan and O.Sinnen \cite{ILP}.
\paragraph{} For each task $\tau_i\in\Gamma$, let $s_i$ the starting time, $\mu_i$ the core on which it will be executed and $\gamma_i$ the cost of all outgoing communications. Let $W$ the makespan and $|M|$ the number of available cores. Moreover, let $\delta^-(i)$ the set of tasks that need to be completed before task $\tau_i$. Some tasks cannot execute in parallel with another due the shared resource they are going to use, $\mathcal{I}$ is matrix that represent \emph{parallel incompatibilities}, the component $\mathcal{I}_{ij}$ is equal to one if $\tau_i$ and $\tau_j$ cannot execute in parallel, formally:
\[
\mathcal{I}_{i,j}=
\begin{cases}
1\quad \text{if } \tau_i \text{ and } \tau_j \text{ share at least one resource}\\
0\quad\text{otherwise}
\end{cases}
\]
Let the variable $x_{ik}$ is one if task $\tau_i$ is assigned to the processor $\mu_k$, zero otherwise. In order to control the scheduling behavior, define the following set of binary variables:
\[
\forall \tau_i,\tau_j\in\Gamma \quad \sigma_{ij}=
\begin{cases}
1\quad s_i+c_i\leq s_j \\
0\quad\text{otherwise}
\end{cases}
\]
\[
\forall \tau_i,\tau_j\in\Gamma \quad \epsilon_{ij}=
\begin{cases}
1\quad\mu_i<\mu_j \\
0\quad\text{otherwise}
\end{cases}
\]
The resulting MILP problem is
\begin{align}
\min & \quad & W \label{eq:milp1}\\
\forall\tau_i\in\Gamma & & s_i+c_i\leq W \label{eq:milp2}\\
\forall\tau_i\neq\tau_j\in\Gamma & & s_j-s_i-c_i-\gamma_i-(\sigma_{ij}-1)W_{\max}\geq 0 \label{eq:milp3}\\
\forall\tau_i\neq\tau_j\in\Gamma & & \mu_j-\mu_i-1-(\epsilon_{ij}-1)M\geq 0 \label{eq:milp4}\\
\forall\tau_i\neq\tau_j\in\Gamma & & \sigma_{ij}+\sigma_{ji}+\epsilon_{ij}+\epsilon_{ji}\geq 1 \label{eq:milp5}\\
\forall(i,j):\mathcal{I}_{ij}=1 & & s_i+c_i+\gamma_i-s_j\leq W_{\max}(1-\sigma_{ij}) \label{eq:milp6}\\
\forall(i,j):\mathcal{I}_{ij}=1 & & s_j+c_j+\gamma_j-s_i\leq W_{\max}\sigma_{ij} \label{eq:milp7}\\
\forall\tau_i\neq\tau_j\in\Gamma & & \sigma_{ij}+\sigma_{ji}\leq 1 \label{eq:milp8}\\
\forall\tau_i\neq\tau_j\in\Gamma & & \epsilon_{ij}+\epsilon_{ji}\leq 1 \label{eq:milp9}\\
\forall\tau_j\in\Gamma:\tau_i\in\delta^-(j) & & \sigma_{ij}=1 \label{eq:milp10}\\
\forall\tau_i\in\Gamma & & \sum_{k\in |M|} kx_{ik}=\mu_i \label{eq:milp11}\\
\forall\tau_i\in\Gamma & & \sum_{k\in |M|} x_{ik}=1 \label{eq:milp12}\\
& & 0\leq W \leq W_{\max} \label{eq:milp13}\\
\forall\tau_i\in\Gamma & & s_i\geq 0 \label{eq:milp14}\\
\forall\tau_i\in\Gamma & & \mu_i\in \{1,...,|M|\} \label{eq:milp15}\\
\forall\tau_i\in\Gamma,k\in |M| & & x_{ij}\in\{0,1\} \label{eq:milp16}\\
\forall\tau_i,\tau_j\in\Gamma & & \sigma_{ij},\epsilon_{ij} \in\{0,1\} \label{eq:milp17}
\end{align}
Where $W_{\max}$ is an upper bound for the makespan $W$. It can be computed as all the tasks were executed on a single core (so it is the sum of computational cost and communication costs) or with some heuristics.
\par The formulation is a min-max problem: this is achieved by minimizing the makespan $W$ while introducing the constraint (\ref{eq:milp2}). Constraint (\ref{eq:milp3}) impose the partial order on the tasks in terms of the $\sigma$ variables. Constraint (\ref{eq:milp4}) impose the multi-core usage. Constraint (\ref{eq:milp5}) impose that at least one of the following is true: $\tau_i$ must finish before $\tau_j$ starts and/or $\mu_i<\mu_j$. Constraints (\ref{eq:milp6}) and (\ref{eq:milp7}) avoid that two tasks that share a common resource execute in parallel. By (\ref{eq:milp8}) and (\ref{eq:milp9}) a task cannot be before and after another task in both time and cores. Constraint (\ref{eq:milp10}) enforces the task precedences defined by the T-DAG. Constraints (\ref{eq:milp11}) link the assignment variable $x$ with the core variables $\mu$ and finally (\ref{eq:milp12}) ensures that any given task runs only on one core.
\par The complexity in terms of constraint and variables, depends on $|G|$, $|E|$, $|M|$ and $|\mathcal{I}|$. Assuming that the number of processor $|M|$ and the number of shared resources $|\mathcal{I}|$ are small, then the MILP complexity is dominated by (\ref{eq:milp10}) which generates $O(|G||E|)$ constraints. In the worst case scenario $|E|=|G|(|G|-1)/2$, however, for task-sets representing real applications, we usually have $O(|E|)=O(|G|)$, hence the overall complexity is $O(|G|^2)$.
\paragraph{} Once a T-DAG related to a partition is scheduled the makespan of the schedule is the Worst-Case Execution Time of the partition itself. Moreover, the variables $s_i$ and $\mu_i$ for each task $\tau_i\in\Gamma$ are known so the priorities can be computed.
\subsection{Inter-Partition}\label{interpartition}
The inter-partition schedule is analogue to the problem of schedule the P-DAG on a single core. Indeed, each Resource Partition is assigned to a Time Partition that is as big the total Worst-Case-Execution-Time of the Resource Partition contained (this amount of time can be estimated after the intra-partition schedule). In PikeOS, Time partitions are scheduled according to a statically-assigned schedule scheme like they were on a single core.
\par The problem of scheduling tasks on a single core have received substantial attention and many algorithms are available in the literature, for a complete review see \cite{buttazzoRT} and \cite{blazewiczScheduling}.
\paragraph{} When scheduling the P-DAG, the partial order expressed by it must be satisfied. Let introduce some concept (as in \cite{blazewiczScheduling}). For seek of notation simplicity, let consider a partition like a task, so that the same notation as before can be used. In addition to the previous notation, let introduce the \emph{arrival time} of a partition $\pi_i$ as $r_i$ which represent the moment in time in which a partition can start its execution, and the \emph{due date} $\widetilde{d}_i$ as the moment in time in which the task must be completed. These parameters, together with the periodicity, represent the real-time requirement for a given partition.
\subsubsection{Factorization}
Considering all nodes in the P-DAG, it is common to find different periodicity. In the general problem formulation is assumed that the periodicity of each task is an integer multiple of a base-period $\beta_T$, so when they are grouped into a partition, the partition itself inherit the rate of the tasks it contains and the so does the property. If $T_i=k_i\beta_T$ , the \emph{Hyper-Period} or \emph{Major Time Frame} can be defined as
\begin{equation}
\Delta = \gcd(k_1,k_2,...)\beta_T\quad i=\{1,...,|\Pi|\}
\end{equation}
Inside the hyper-period some partitions $\pi_i$ should execute more than once, in general exactly $k_i$ times. In order to generalize this behavior, a \emph{factorized P-DAG} can be defined. Let denote it as $\widetilde{\mathbb{P}}=(\widetilde{\Pi},\widetilde{H},\widetilde{L},\widetilde{R})$, it is a \emph{finite repetitive precedence} of partition $\pi_i$ by exactly $k_i$ times, in a direct precedence relation. The factorization process is depicted in figure \ref{fig:Factorization}.
\begin{figure}[htbp]
\centering
\includegraphics[width=1.0\textwidth]{Factorization}
\caption{Factorization example}
\label{fig:Factorization}
\end{figure}
\subsubsection{Partitions precedence constraints}
\par Given a schedule, it is called \emph{normal} if, for any two partition $\pi_i,\pi_j\in\Gamma$, $s_i<s_j$ implies that $\widetilde{d}_i\leq\widetilde{d}_j$ or $r_j>s_i$. Release times and deadlines are called \emph{consistent with the precedence relation} if $\pi_i\to\pi_j$ implies that $r_i+\delta t\leq r_j$ and $\widetilde{d}_i\leq\widetilde{d}_j-\delta t$, where $\delta t$ represent a small amount of time (basically the scheduling decision tick-time). The following lemma proves that the precedence constraints are not of essential relevance if there is only one processor.
\begin{lemma}\label{eq:precedenceLemma}
If the release times and deadlines are consistent with the precedence relation, then any normal one-processor schedule that satisfies the release times and deadlines must also obey the precedence relation.
\end{lemma}
\begin{proof}
Consider a normal schedule, and suppose that $\pi_i\to\pi_j$ but $s_i>s_j$. By the consistency assumption we have $r_i<r_j$ and $\widetilde{d}_i<\widetilde{d}_j$. However, these, together with $r_j\leq s_j$, cause a violation of the assumption that the schedule is normal, a contradiction from which the result follows.
\end{proof}
This lemma ensures that release times and deadlines can be made consistent with the precedence relation if they are redefined by:
\begin{align}\label{eq:precedence}
r^{'}_{j} = & \max\big(\{r_j\}\cup\{r^{'}_i+\delta t:\pi_i\to\pi_j\} \big) \\
\widetilde{d}^{'}_j = & \min\big(\{\widetilde{d}_j\}\cup\{\widetilde{d}^{'}_i-\delta t:\pi_j\to\pi_i\} \big)
\end{align}
%\begin{align}
%r^{'}_{\alpha_j} = & \max\big(\{r_{\alpha_j}\}\cup\{r^{'}_{\alpha_i}+\delta t:\pi_{\alpha_i}\to\pi_{\alpha_j}\} \big) \\
%\widetilde{d}^{'}_{\alpha_j} = & \min\big(\{\widetilde{d}_{\alpha_j}\}\cup\{\widetilde{d}^{'}_{\alpha_i}-\delta t:\pi_{\alpha_j}\to\pi_{\alpha_i}\} \big)
%\end{align}
These changes do not alter the feasibility of any schedule. Furthermore, from lemma \ref{eq:precedenceLemma} follows that a precedence relation is essentially irrelevant when scheduling on one processor.
\subsubsection{Bratley algorithm}
Scheduling partitions with precedence constraint (or adapted arrival times and due dates) is NP-hard in the strong sense, even for integer release times and deadlines \cite{LRKB77}. Only if all tasks have unit processing times, an optimization algorithm of polynomial time complexity is available. However, Bratley et al. \cite{bratleyScheduling} proposed a branch-and-bound algorithm which solves this class of problems. Their algorithm is shortly described below.
%Scheduling Computer and Manufacturing: Bratley et al. [BFR71] , page 74
\begin{figure}[htbp]
\centering
\includegraphics[width=1.0\textwidth]{Bratley}
\caption{Search tree example for Bratley et al. algorithm}
\label{fig:bratley}
\end{figure}
\paragraph{} All possible partition schedules are implicitly enumerated by a search tree as shown in Figure \ref{fig:bratley} (for three partitions). Each node $v_{ij}$ of the tree represents the assignment of $i-1$ partitions and a new unscheduled one to be in the $i$-th position of the schedule scheme, with $i = \{1,...,N\}$, $N=|\widetilde{\mathbb{P}}|$. On each level $i$, there are $N-i+1$ new nodes generated from each node of the preceding level. Hence, all the $N!$ possible schedule will be enumerated. To each node is associated the completion time of the corresponding partial schedule.
\par The order in which the nodes of the tree are examined is based on a \emph{backtracking
search strategy}. Moreover, the algorithm uses two criteria to bound the solution space.
\begin{enumerate}
\item Exceeding deadlines. Consider a node $v_{ij}$ of the tree where one of the $i$ partitions exceed its due date, it will certainly exceed its deadline if other partitions are scheduled after it. Therefore, the node with all the sub-tree may be excluded from further consideration.
\item Problem decomposition. Consider a node $v_{ij}$ where an unscheduled partition is assigned to $i$-th position of the schedule scheme. If the completion time of this partial schedule is less than or equal to the smallest release time among the yet unscheduled partitions, then the problem decomposes at level $i$, and there is need to backtrack beyond level $i$. This follows from the fact that the best schedule for the remaining $N-i$ partitions may not be started before the smallest of their release times.
\end{enumerate}
After enumerating all the possible $N!$ the best schedule according to an objective function can be selected. A common objective function is the makespan minimization.
%If the objective function is the makespan minimization, a sufficient but not necessary condition for optimality can be derived. Let define a \emph{block} as a group of partitions such that the first partition starts at its release time and all the following partitions to the end of the schedule are processed without idle times. Thus the length of a block is the sum of processing times of the partition within that block. If a block has the property that the release times of all the partitions within the block are greater than or equal to the release time of the first partition in the block (in that case we will say that \emph{"the block satisfies the release time property"}), then the schedule found for this block is clearly optimal. A block satisfying the release time property may be found by scanning the given schedule, starting from the last partition and attempting to find a group of tasks of the described property. Then, from the definition follow the lemma
%\begin{lemma}
%If a schedule for for a single-core, with starting time and due date, satisfies the release time property then it is one optimal solution for the makespan minimization.
%\end{lemma}
%********************************** % Section **************************************
\section{Priority assignment}\label{sec:priorityassignment}
Priority assignment is required to allow the operating system scheduler to execute tasks according to the optimal schedule.
\par Let assume that each thread has its affinity mask, meaning that it can execute only on the core specified by it and that the scheduler is priority-based FIFO queue. To enforce the non-preemptive behavior for the tasks inside a partition, threads on the same core must have \emph{strictly monotonically decreasing} priorities. Here to derive a correct assignment algorithm, an assumption on the implementation is required. Priorities alone cannot ensure mutual exclusion on communications memory locations. These shared memory regions are accessed only by communicating thread and them can be placed:
\begin{itemize}
\item On the same core: so priorities can ensure that the inputs are fulfilled, indeed the task with lower criticality will not execute before higher priority task.
\item On different core: so spinlocks can be used.
\end{itemize}
The use of spinlocks for inter-core synchronization is suggested because they avoid overhead from operating system process rescheduling or context switching. Moreover, spinlocks are efficient if tasks are likely to be blocked for only short periods, which is true to a certain degree that depends on the worst-case timing analysis.
\paragraph{} A simple yet effective way to achieve this result is through a Linear Programming optimization problem:
\begin{equation}
\begin{cases}
\min \sum\rho_i \\
\rho_i - \rho_j \leq -1 \quad
\begin{matrix}
\text{for each consecutive task } \tau_i,\tau_j \text{ on the same core} \\
\text{for each communication edge } e_{ij} \text{ between cores}
\end{matrix} \\
\rho_{\min} \leq \rho_i \leq \rho_{\max}
\end{cases}
\end{equation}
where $\rho_i$ is the priority assigned to task $\tau_i\in\Gamma$. This class of problems can be solved in polynomial time \cite{polyLP}.
\paragraph{} Usually an operating system can only handle a finite set of priority values, for this reason the variable $\rho$ is bounded. However, if the schedule priority assignment does not use all the possible priority values, it is possible to create a gap below and above the partition to allow the execution of sporadic tasks. For example, this behavior can be easily implemented utilizing the background PikeOS partition \TP{0}. The result is depicted in figure \ref{fig:PriorityAssignment}.
\begin{figure}[htbp]
\centering
\includegraphics[width=1.0\textwidth]{PriorityAssignment}
\caption{Priority Assignment }
\label{fig:PriorityAssignment}
\end{figure}
|
{"hexsha": "53564cb92f8325176fa077fc6277bf9d797be837", "size": 23777, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Chapters/scheduling.tex", "max_stars_repo_name": "pantonante/EMC2-thesis", "max_stars_repo_head_hexsha": "47cf8aff592557c1ca990404dc7c079e09307262", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Chapters/scheduling.tex", "max_issues_repo_name": "pantonante/EMC2-thesis", "max_issues_repo_head_hexsha": "47cf8aff592557c1ca990404dc7c079e09307262", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Chapters/scheduling.tex", "max_forks_repo_name": "pantonante/EMC2-thesis", "max_forks_repo_head_hexsha": "47cf8aff592557c1ca990404dc7c079e09307262", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 117.7079207921, "max_line_length": 1015, "alphanum_fraction": 0.748580561, "num_tokens": 6120}
|
[STATEMENT]
lemma append_cols_mult_left:
assumes A: "A \<in> carrier_mat n a"
and B: "B \<in> carrier_mat n b"
and P: "P \<in> carrier_mat n n"
shows "P * (A @\<^sub>c B) = (P*A) @\<^sub>c (P*B)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. P * (A @\<^sub>c B) = P * A @\<^sub>c P * B
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. P * (A @\<^sub>c B) = P * A @\<^sub>c P * B
[PROOF STEP]
let ?P = "four_block_mat P (0\<^sub>m n 0) (0\<^sub>m 0 n) (0\<^sub>m 0 0)"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. P * (A @\<^sub>c B) = P * A @\<^sub>c P * B
[PROOF STEP]
have "P = ?P"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. P = four_block_mat P (0\<^sub>m n 0) (0\<^sub>m 0 n) (0\<^sub>m 0 0)
[PROOF STEP]
by (rule eq_matI, auto)
[PROOF STATE]
proof (state)
this:
P = four_block_mat P (0\<^sub>m n 0) (0\<^sub>m 0 n) (0\<^sub>m 0 0)
goal (1 subgoal):
1. P * (A @\<^sub>c B) = P * A @\<^sub>c P * B
[PROOF STEP]
hence "P * (A @\<^sub>c B) = ?P * (A @\<^sub>c B)"
[PROOF STATE]
proof (prove)
using this:
P = four_block_mat P (0\<^sub>m n 0) (0\<^sub>m 0 n) (0\<^sub>m 0 0)
goal (1 subgoal):
1. P * (A @\<^sub>c B) = four_block_mat P (0\<^sub>m n 0) (0\<^sub>m 0 n) (0\<^sub>m 0 0) * (A @\<^sub>c B)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
P * (A @\<^sub>c B) = four_block_mat P (0\<^sub>m n 0) (0\<^sub>m 0 n) (0\<^sub>m 0 0) * (A @\<^sub>c B)
goal (1 subgoal):
1. P * (A @\<^sub>c B) = P * A @\<^sub>c P * B
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
P * (A @\<^sub>c B) = four_block_mat P (0\<^sub>m n 0) (0\<^sub>m 0 n) (0\<^sub>m 0 0) * (A @\<^sub>c B)
goal (1 subgoal):
1. P * (A @\<^sub>c B) = P * A @\<^sub>c P * B
[PROOF STEP]
have "?P * (A @\<^sub>c B) = four_block_mat (P * A + 0\<^sub>m n 0 * 0\<^sub>m 0 (dim_col A))
(P * B + 0\<^sub>m n 0 * 0\<^sub>m 0 (dim_col B)) (0\<^sub>m 0 n * A + 0\<^sub>m 0 0 * 0\<^sub>m 0 (dim_col A))
(0\<^sub>m 0 n * B + 0\<^sub>m 0 0 * 0\<^sub>m 0 (dim_col B))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. four_block_mat P (0\<^sub>m n 0) (0\<^sub>m 0 n) (0\<^sub>m 0 0) * (A @\<^sub>c B) = four_block_mat (P * A + 0\<^sub>m n 0 * 0\<^sub>m 0 (dim_col A)) (P * B + 0\<^sub>m n 0 * 0\<^sub>m 0 (dim_col B)) (0\<^sub>m 0 n * A + 0\<^sub>m 0 0 * 0\<^sub>m 0 (dim_col A)) (0\<^sub>m 0 n * B + 0\<^sub>m 0 0 * 0\<^sub>m 0 (dim_col B))
[PROOF STEP]
unfolding append_cols_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. four_block_mat P (0\<^sub>m n 0) (0\<^sub>m 0 n) (0\<^sub>m 0 0) * four_block_mat A B (0\<^sub>m 0 (dim_col A)) (0\<^sub>m 0 (dim_col B)) = four_block_mat (P * A + 0\<^sub>m n 0 * 0\<^sub>m 0 (dim_col A)) (P * B + 0\<^sub>m n 0 * 0\<^sub>m 0 (dim_col B)) (0\<^sub>m 0 n * A + 0\<^sub>m 0 0 * 0\<^sub>m 0 (dim_col A)) (0\<^sub>m 0 n * B + 0\<^sub>m 0 0 * 0\<^sub>m 0 (dim_col B))
[PROOF STEP]
by (rule mult_four_block_mat, insert A B P, auto)
[PROOF STATE]
proof (state)
this:
four_block_mat P (0\<^sub>m n 0) (0\<^sub>m 0 n) (0\<^sub>m 0 0) * (A @\<^sub>c B) = four_block_mat (P * A + 0\<^sub>m n 0 * 0\<^sub>m 0 (dim_col A)) (P * B + 0\<^sub>m n 0 * 0\<^sub>m 0 (dim_col B)) (0\<^sub>m 0 n * A + 0\<^sub>m 0 0 * 0\<^sub>m 0 (dim_col A)) (0\<^sub>m 0 n * B + 0\<^sub>m 0 0 * 0\<^sub>m 0 (dim_col B))
goal (1 subgoal):
1. P * (A @\<^sub>c B) = P * A @\<^sub>c P * B
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
four_block_mat P (0\<^sub>m n 0) (0\<^sub>m 0 n) (0\<^sub>m 0 0) * (A @\<^sub>c B) = four_block_mat (P * A + 0\<^sub>m n 0 * 0\<^sub>m 0 (dim_col A)) (P * B + 0\<^sub>m n 0 * 0\<^sub>m 0 (dim_col B)) (0\<^sub>m 0 n * A + 0\<^sub>m 0 0 * 0\<^sub>m 0 (dim_col A)) (0\<^sub>m 0 n * B + 0\<^sub>m 0 0 * 0\<^sub>m 0 (dim_col B))
goal (1 subgoal):
1. P * (A @\<^sub>c B) = P * A @\<^sub>c P * B
[PROOF STEP]
have "... = four_block_mat (P * A) (P * B) (0\<^sub>m 0 (dim_col (P*A))) (0\<^sub>m 0 (dim_col (P*B)))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. four_block_mat (P * A + 0\<^sub>m n 0 * 0\<^sub>m 0 (dim_col A)) (P * B + 0\<^sub>m n 0 * 0\<^sub>m 0 (dim_col B)) (0\<^sub>m 0 n * A + 0\<^sub>m 0 0 * 0\<^sub>m 0 (dim_col A)) (0\<^sub>m 0 n * B + 0\<^sub>m 0 0 * 0\<^sub>m 0 (dim_col B)) = four_block_mat (P * A) (P * B) (0\<^sub>m 0 (dim_col (P * A))) (0\<^sub>m 0 (dim_col (P * B)))
[PROOF STEP]
by (rule cong_four_block_mat, insert P, auto)
[PROOF STATE]
proof (state)
this:
four_block_mat (P * A + 0\<^sub>m n 0 * 0\<^sub>m 0 (dim_col A)) (P * B + 0\<^sub>m n 0 * 0\<^sub>m 0 (dim_col B)) (0\<^sub>m 0 n * A + 0\<^sub>m 0 0 * 0\<^sub>m 0 (dim_col A)) (0\<^sub>m 0 n * B + 0\<^sub>m 0 0 * 0\<^sub>m 0 (dim_col B)) = four_block_mat (P * A) (P * B) (0\<^sub>m 0 (dim_col (P * A))) (0\<^sub>m 0 (dim_col (P * B)))
goal (1 subgoal):
1. P * (A @\<^sub>c B) = P * A @\<^sub>c P * B
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
four_block_mat (P * A + 0\<^sub>m n 0 * 0\<^sub>m 0 (dim_col A)) (P * B + 0\<^sub>m n 0 * 0\<^sub>m 0 (dim_col B)) (0\<^sub>m 0 n * A + 0\<^sub>m 0 0 * 0\<^sub>m 0 (dim_col A)) (0\<^sub>m 0 n * B + 0\<^sub>m 0 0 * 0\<^sub>m 0 (dim_col B)) = four_block_mat (P * A) (P * B) (0\<^sub>m 0 (dim_col (P * A))) (0\<^sub>m 0 (dim_col (P * B)))
goal (1 subgoal):
1. P * (A @\<^sub>c B) = P * A @\<^sub>c P * B
[PROOF STEP]
have "... = (P*A) @\<^sub>c (P*B)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. four_block_mat (P * A) (P * B) (0\<^sub>m 0 (dim_col (P * A))) (0\<^sub>m 0 (dim_col (P * B))) = P * A @\<^sub>c P * B
[PROOF STEP]
unfolding append_cols_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. four_block_mat (P * A) (P * B) (0\<^sub>m 0 (dim_col (P * A))) (0\<^sub>m 0 (dim_col (P * B))) = four_block_mat (P * A) (P * B) (0\<^sub>m 0 (dim_col (P * A))) (0\<^sub>m 0 (dim_col (P * B)))
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
four_block_mat (P * A) (P * B) (0\<^sub>m 0 (dim_col (P * A))) (0\<^sub>m 0 (dim_col (P * B))) = P * A @\<^sub>c P * B
goal (1 subgoal):
1. P * (A @\<^sub>c B) = P * A @\<^sub>c P * B
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
P * (A @\<^sub>c B) = P * A @\<^sub>c P * B
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
P * (A @\<^sub>c B) = P * A @\<^sub>c P * B
goal (1 subgoal):
1. P * (A @\<^sub>c B) = P * A @\<^sub>c P * B
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
P * (A @\<^sub>c B) = P * A @\<^sub>c P * B
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 3396, "file": "Smith_Normal_Form_SNF_Missing_Lemmas", "length": 21}
|
[STATEMENT]
lemma carrier_possible_subset:
"carrier t \<subseteq> A \<Longrightarrow> possible t x \<Longrightarrow> x \<in> A"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>carrier t \<subseteq> A; possible t x\<rbrakk> \<Longrightarrow> x \<in> A
[PROOF STEP]
by transfer force
|
{"llama_tokens": 103, "file": "Call_Arity_TTree", "length": 1}
|
import numpy as np
from c3pO.icp._base import BaseICP
from c3pO.base import BaseScorer
from c3pO.utils import calc_p
class ICPClassifier(BaseICP):
"""Inductive conformal classifier.
Parameters
----------
nc_function : BaseModelNC
Nonconformity scorer object used to calculate nonconformity of
calibration examples and test patterns. Should implement ``fit(x, y)``
and ``calc_nc(x, y)``.
smoothing : boolean
Decides whether to use stochastic smoothing of p-values.
Attributes
----------
cal_x : numpy array of shape [n_cal_examples, n_features]
Inputs of calibration set.
cal_y : numpy array of shape [n_cal_examples]
Outputs of calibration set.
nc_function : BaseModelNC
Nonconformity scorer object used to calculate nonconformity scores.
classes : numpy array of shape [n_classes]
List of class labels, with indices corresponding to output columns
of ICPClassifier.predict()
See also
--------
IcpRegressor
References
----------
.. [1] Papadopoulos, H., & Haralambous, H. (2011). Reliable prediction
intervals with regression neural networks. Neural Networks, 24(8),
842-851.
"""
def __init__(self, nc_function, condition=None, smoothing=True):
super().__init__(nc_function, condition)
self.classes = None
self.smoothing = smoothing
def _calibrate_hook(self, x, y, increment=False):
self._update_classes(y, increment)
def _update_classes(self, y, increment):
if self.classes is None or not increment:
self.classes = np.unique(y)
else:
self.classes = np.unique(np.hstack([self.classes, y]))
def predict(self, x, significance=None):
"""Predict the output values for a set of input patterns.
Parameters
----------
x : numpy array of shape [n_samples, n_features]
Inputs of patters for which to predict output values.
significance : float or None
Significance level (maximum allowed error rate) of predictions.
Should be a float between 0 and 1. If ``None``, then the p-values
are output rather than the predictions.
Returns
-------
p : numpy array of shape [n_samples, n_classes]
If significance is ``None``, then p contains the p-values for each
sample-class pair; if significance is a float between 0 and 1, then
p is a boolean array denoting which labels are included in the
prediction sets.
"""
n_test_objects = x.shape[0]
p = np.zeros((n_test_objects, self.classes.size))
ncal_ngt_neq = self._get_stats(x)
for i in range(len(self.classes)):
for j in range(n_test_objects):
p[j, i] = calc_p(
ncal_ngt_neq[j, i, 0],
ncal_ngt_neq[j, i, 1],
ncal_ngt_neq[j, i, 2],
self.smoothing,
)
if significance is not None:
return p > significance
else:
return p
def _get_stats(self, x):
n_test_objects = x.shape[0]
ncal_ngt_neq = np.zeros((n_test_objects, self.classes.size, 3))
for i, c in enumerate(self.classes):
test_class = np.zeros(x.shape[0], dtype=self.classes.dtype)
test_class.fill(c)
# TODO: maybe calculate p-values using cython or similar
# TODO: interpolated p-values
# TODO: nc_function.calc_nc should take X * {y1, y2, ... ,yn}
test_nc_scores = self.nc_function.score(x, test_class)
for j, nc in enumerate(test_nc_scores):
cal_scores = self.cal_scores[self.condition((x[j], c))][::-1]
n_cal = cal_scores.size
idx_left = np.searchsorted(cal_scores, nc, "left")
idx_right = np.searchsorted(cal_scores, nc, "right")
ncal_ngt_neq[j, i, 0] = n_cal
ncal_ngt_neq[j, i, 1] = n_cal - idx_right
ncal_ngt_neq[j, i, 2] = idx_right - idx_left
return ncal_ngt_neq
def predict_conf(self, x):
"""Predict the output values for a set of input patterns, using
the confidence-and-credibility output scheme.
Parameters
----------
x : numpy array of shape [n_samples, n_features]
Inputs of patters for which to predict output values.
Returns
-------
p : numpy array of shape [n_samples, 3]
p contains three columns: the first column contains the most
likely class for each test pattern; the second column contains
the confidence in the predicted class label, and the third column
contains the credibility of the prediction.
"""
p = self.predict(x, significance=None)
label = p.argmax(axis=1)
credibility = p.max(axis=1)
for i, idx in enumerate(label):
p[i, idx] = -np.inf
confidence = 1 - p.max(axis=1)
return np.array([label, confidence, credibility]).T
|
{"hexsha": "78349e6fa0613332d4f33273ebae993fb321fd5b", "size": 5302, "ext": "py", "lang": "Python", "max_stars_repo_path": "c3pO/icp/classifier.py", "max_stars_repo_name": "maclandrol/c3pO", "max_stars_repo_head_hexsha": "3f3be5132c02f1c313b9ef5abf8264cd3d1dda31", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-10-15T09:32:08.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-15T09:32:08.000Z", "max_issues_repo_path": "c3pO/icp/classifier.py", "max_issues_repo_name": "maclandrol/c3pO", "max_issues_repo_head_hexsha": "3f3be5132c02f1c313b9ef5abf8264cd3d1dda31", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "c3pO/icp/classifier.py", "max_forks_repo_name": "maclandrol/c3pO", "max_forks_repo_head_hexsha": "3f3be5132c02f1c313b9ef5abf8264cd3d1dda31", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.3466666667, "max_line_length": 83, "alphanum_fraction": 0.5878913618, "include": true, "reason": "import numpy", "num_tokens": 1190}
|
using PrettyTables
using StatsBase
function zscore_transform(data)
μ = mean(data)
σ = std(data)
z(d) = (d .- μ) ./ σ
unz(d) = d .* σ .+ μ
return z, unz
end
function meanlowerupper(data, PI = (0.055, 0.945))
m = mean.(eachrow(data))
lower = quantile.(eachrow(data), PI[1])
upper = quantile.(eachrow(data), PI[2])
return (mean = m,
lower = lower,
upper = upper,
raw = data)
end
function estimparam(data, PI = (0.055, 0.945))
m = mean.(eachcol(data))
lower = quantile.(eachcol(data), PI[1])
upper = quantile.(eachcol(data), PI[2])
return m, lower, upper
end
function lin(a, b, c, x...)
result = @. a + b * c
for i in 1:2:length(x)
@. result += x[i] * x[i+1]
end
return result
end
const BARS = collect("▁▂▃▄▅▆▇█")
function unicode_histogram(data, nbins = 12)
# @show data
f = fit(Histogram, data, nbins = nbins) # nbins: more like a guideline than a rule, really
# scale weights between 1 and 8 (length(BARS)) to fit the indices in BARS
# eps is needed so indices are in the interval [0, 8) instead of [0, 8] which could
# result in indices 0:8 which breaks things
scaled = f.weights .* (length(BARS) / maximum(f.weights) - eps())
indices = floor.(Int, scaled) .+ 1
return join((BARS[i] for i in indices))
end
function precis(df::DataFrame; digits = 2, depth = Inf, alpha = 0.11)
d = DataFrame()
d.param = names(df)
d.mean = mean.(eachcol(df))
d.std = std.(eachcol(df))
d[:, "5.5%"] = quantile.(eachcol(df), alpha/2)
d[:, "50%"] = quantile.(eachcol(df), 0.5)
d[:, "94.5%"] = quantile.(eachcol(df), 1 - alpha/2)
u = unicode_histogram.(eachcol(df))
d.histogram = unicode_histogram.(eachcol(df))
for col in ["mean", "std", "5.5%", "50%", "94.5%"]
d[:, col] .= round.(d[:, col], digits = digits)
end
pretty_table(d, nosubheader = true, vlines = [0, 1, 7])
end
|
{"hexsha": "fd2599ffa9bc0b937834e73682e5d1eb0e4a485f", "size": 1963, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/tools.jl", "max_stars_repo_name": "karajan9/statisticalrethinking", "max_stars_repo_head_hexsha": "e7516c468ca182c4b1c9cae0cfd0bb1feef7ed3c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2020-06-03T14:18:11.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-05T16:52:26.000Z", "max_issues_repo_path": "src/tools.jl", "max_issues_repo_name": "karajan9/statisticalrethinking", "max_issues_repo_head_hexsha": "e7516c468ca182c4b1c9cae0cfd0bb1feef7ed3c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-06-13T05:56:35.000Z", "max_issues_repo_issues_event_max_datetime": "2020-08-12T14:05:57.000Z", "max_forks_repo_path": "src/tools.jl", "max_forks_repo_name": "karajan9/statisticalrethinking", "max_forks_repo_head_hexsha": "e7516c468ca182c4b1c9cae0cfd0bb1feef7ed3c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-07-01T13:00:14.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-03T23:40:53.000Z", "avg_line_length": 28.8676470588, "max_line_length": 95, "alphanum_fraction": 0.5807437596, "num_tokens": 649}
|
% Set onlycurpagenum when you do not want to show total page number.
% Set secheader if you want to show a headline with (sub)section names.
% Set infolines if you want to show a headline and a footline.
\documentclass[onlycurpagenum,infolines,xcolor=table]{beamer}
% Copyright 2017 Alick Zhao <alick9188@gmail.com>
%
% Last modified on: 2017-07-16
% This file is modified from a solution template for:
% - Giving a talk on some subject.
% - The talk is between 15min and 45min long.
% - Style is ornate.
% Copyright 2004 by Till Tantau <tantau@users.sourceforge.net>.
%
% In principle, this file can be redistributed and/or modified under
% the terms of the GNU Public License, version 2.
%
% However, this file is supposed to be a template to be modified
% for your own needs. For this reason, if you use this file as a
% template and not specifically distribute it as part of a another
% package/program, I grant the extra permission to freely copy and
% modify this file as you see fit and even to delete this copyright
% notice.
\mode<presentation>
{
\usetheme{tamu}
} % end of mode presentation
\usepackage{graphicx} % includegraphics support (already specified)
%\graphicspath{{fig/}} % directories that hold graphics
\usepackage{amsmath} % for math
\usepackage{amssymb} % for additional math symbols
\usepackage{siunitx} % for si units
\usepackage[style=ieee]{biblatex}
%\usepackage{listings} % for typesetting source code
%\usepackage{algorithmic} % for typesetting algorithms
\usepackage{iftex}
\ifXeTeX
\usepackage{fontspec}
%\renewcommand\familydefault{\sfdefault} % for slides
\else
\usepackage[utf8]{inputenc}
\fi
\usepackage{calligra}
\usepackage{colortbl}
\colorlet{tableheader}{blue!40}
\colorlet{oddrow}{blue!20}
\colorlet{evenrow}{blue!10}
\newcommand*{\tamu}{Texas A\&M University}
\title{Beamer Template for \tamu}
\newcommand*{\theauthor}{Alick Zhao}
\author{\theauthor}
\institute[TAMU] % (optional, but mostly needed)
{
\tamu
}
% - Use the \inst command only if there are several affiliations.
% - Keep it simple, no one is interested in your street address.
\date % (optional)
{\today}
%\subject{Talks}
\hypersetup{
pdfauthor={Alick Zhao},
pdfsubject={Talks},
pdfkeywords={beamer,TAMU,theme},
unicode=true,
}
% Delete this, if you do not want the table of contents to pop up at
% the beginning of each subsection:
%\AtBeginSubsection[]
%{
% \begin{frame}<beamer>{Outline}
% \tableofcontents[currentsection,currentsubsection]
% \end{frame}
%}
% If you wish to uncover everything in a step-wise fashion, uncomment
% the following command:
%\beamerdefaultoverlayspecification{<+->}
% listings setup
%\lstset{basicstyle=\ttfamily,breaklines=true}
% hyperref setup
%\hypersetup{
%pdfpagemode=FullScreen,
%}
\ExecuteBibliographyOptions{citetracker=true,sorting=none}
% Suppress publisher, location, note, etc. in citations.
\AtEveryCitekey{%
%\ifentrytype{article}
%{\clearfield{title}}
%{}%
\clearlist{publisher}%
\clearlist{location}%
\clearfield{isbn}%
\clearfield{issn}%
\clearfield{note}}
% Number of each bibliography entry in brackets
\DeclareFieldFormat{labelnumberwidth}{\mkbibbrackets{#1}}
\makeatletter
% Citation number in brackets.
\renewcommand\@makefntext[1]{%
\normalfont[\@thefnmark]\enspace #1}
\makeatother
% Provide \notefullcite for footnote citations.
% cf. http://www.texdev.net/2010/03/08/biblatex-numbered-citations-as-footnotes/
% NOTE: for vanilla footnote coexistence, refer to
% http://tex.stackexchange.com/questions/20754/tuning-numbered-citations-as-footnote
% http://tex.stackexchange.com/questions/20787/biblatex-cite-with-footnote-only-once-with-use-of-brackets
\DeclareCiteCommand{\notefullcite}[\mkbibbrackets]
{\usebibmacro{cite:init}%
\usebibmacro{prenote}}
{\usebibmacro{citeindex}%
\usebibmacro{notefullcite}%
\usebibmacro{cite:comp}}
{}
{\usebibmacro{cite:dump}%
\usebibmacro{postnote}}
\newbibmacro*{notefullcite}{%
\ifciteseen
{}
{\footnotetext[\thefield{labelnumber}]{%
\usedriver{}{\thefield{entrytype}}.}}}
% Decrease the footnote size.
\let\oldfootnotesize\footnotesize
\renewcommand*{\footnotesize}{\oldfootnotesize\tiny}
\addbibresource{beamerthemetamu-refs.bib} % path to the bib database
% Disable the navigation symbol bar.
\beamertemplatenavigationsymbolsempty
\newcommand*{\tamulogo}{%
\IfFileExists{tamu-logo.pdf}{%
\includegraphics[scale=.5]{tamu-logo.pdf}}{%
}}
\begin{document}
\titlegraphic{\tamulogo}
\begin{frame}
\titlepage
\end{frame}
\begin{frame}{Outline}
\tableofcontents
% You might wish to add the option [pausesections]
\end{frame}
% Since this a solution template for a generic talk, very little can
% be said about how it should be structured. However, the talk length
% of between 15min and 45min and the theme suggest that you stick to
% the following rules:
% - Exactly two or three sections (other than the summary).
% - At *most* three subsections per section.
% - Talk about 30s to 2min per frame. So there should be between about
% 15 and 30 frames, all told.
\section{Sample Section}
\begin{frame}
\frametitle{Bullet Items}
\begin{itemize}
\item Computers/computing everywhere --- $10^4$ CPUs per person
\begin{itemize}
\item Real-world computing --- sensors and actuators
\item Massively distributed and embedded
\item Collect data and make decisions
\end{itemize}
\item Massive data --- a TeraByte per person per day
\begin{itemize}
\item Sensors, personal, scientific, business, etc\ldots
\item Extract information from this mass of data
\item Serious privacy issues
\end{itemize}
\item People will spend much time in virtual environments
\begin{itemize}
\item Integrating digital and physical worlds
\item Games, Interactive Movies, Virtual Classrooms --- many connected
to physical spaces
\end{itemize}
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{A Block Example}
\begin{block}{Computers/computing everywhere --- $10^4$ CPUs per person}
\begin{itemize}
\item Real-world computing --- sensors and actuators
\item Massively distributed and embedded
\item Collect data and make decisions
\end{itemize}
\end{block}
\end{frame}
\begin{frame}{Figure/Table in Columns}
\begin{columns}
\begin{column}{.5\textwidth}
\begin{figure}[htbp]
\centering
\includegraphics{beamericonarticle.pdf}
\caption{A sample figure.}
\end{figure}
\end{column}
\begin{column}{.5\textwidth}
\begin{table}[htbp]
\centering
\caption{A sample table.}
\label{tab:sample}
\rowcolors{2}{oddrow}{evenrow}
\begin{tabular}{ccc}
\rowcolor{tableheader}
Header & Value \\
Even & $2$\\
Odd & $3$\\
\end{tabular}
\end{table}
\end{column}
\end{columns}
\end{frame}
\begin{frame}{Formulas}
\begin{itemize}
\item Electromagnetic Wave
\begin{itemize}
\item Maxwell:
\begin{align}
\nabla \times \mathbf{E} & = - \frac{\partial
\mathbf{B}}{\partial t}\\
\nabla \times \mathbf{H} & = \mathbf{J} +
\frac{\partial \mathbf{D}}{\partial t}\\
\nabla \cdot \mathbf{D} & = \rho \\
\nabla \cdot\mathbf{B} & = 0
\label{eqn:maxwell}
\end{align}
\end{itemize}
\item Probability
\begin{itemize}
\item Normal Distribution $\mathcal{N}(\mu,\sigma^2)$:
\[
\int_{-\infty}^{\infty}\frac{1}{\sigma\sqrt{2\pi}}\mathrm{e}^{-\frac{(x-\mu)^2}{2\sigma^2}}\mathrm{d}x= 1 \, .
\]
\end{itemize}
\end{itemize}
\end{frame}
\begin{frame}{Formulas With Texts}
Formulas in plain texts: {\rmfamily $P_\text{out}$}
\begin{itemize}
\item Formulas in lists
\begin{itemize}
\item EARTH model
\begin{equation}
P_\text{in} = \begin{cases}
N_{\text{TRX}} P_0 + \Delta_\text{p} P_\text{out}, & 0 < P_\text{out} \le P_\text{max} \\
N_{\text{TRX}} P_\text{sleep}, & P_\text{out} = 0
\end{cases}
\end{equation}
\end{itemize}
\end{itemize}
\begin{exampleblock}{Formulas in blocks}
Here is the EARTH model again:\rmfamily
\begin{equation}
P_\text{in} = \begin{cases}
N_{\text{TRX}} P_0 + \Delta_\text{p} P_\text{out}, & 0 < P_\text{out} \le P_\text{max} \\
N_{\text{TRX}} P_\text{sleep}, & P_\text{out} = 0
\end{cases}
\end{equation}
\end{exampleblock}
\end{frame}
\begin{frame}
\frametitle{Title}
\framesubtitle{Subtitle}
\begin{itemize}
\item Item 1
\item Item 2
\item Footnote citations~\notefullcite{beameruserguide,shannon1948}
\end{itemize}
\end{frame}
\section{Various Samples}
\subsection{Sample Subsection}
\subsection{Another Sample Subsection}
\subsection{Yet Another Sample Subsection}
\section{Summary}
\begin{frame}{Summary}
\begin{itemize}
\item Lorem ipsum
\end{itemize}
\vspace{1cm}
\begin{itemize}
\item An outlook to the future.
\end{itemize}
\end{frame}
\appendix
\section<presentation>*{\appendixname}
% Final page.
\begin{frame}[plain]
\begin{center}
{\Huge\calligra Thank you!\\}
\bigskip
\theauthor\\
\nolinkurl{alick9188@gmail.com}\\
\smallskip
\tamulogo
\end{center}
\end{frame}
\end{document}
%%% vim: set sw=2 isk+=\: et tw=70 formatoptions+=mM:
|
{"hexsha": "12b755e55dad5fead3ae76bcbed1c54f81d9d641", "size": 9437, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "200+ beamer \u6a21\u677f\u5408\u96c6/beamerthemetamu(\u5fb7\u514b\u8428\u65afA&M\u5927\u5b66)/sample/beamerthemetamu.tex", "max_stars_repo_name": "lemoxiao/Awesome-Beamer-Collection", "max_stars_repo_head_hexsha": "3ab28a23fb60cb0a97fcec883847e2d8728b98c0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2019-07-30T04:09:54.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-24T09:27:26.000Z", "max_issues_repo_path": "200+ beamer \u6a21\u677f\u5408\u96c6/beamerthemetamu(\u5fb7\u514b\u8428\u65afA&M\u5927\u5b66)/sample/beamerthemetamu.tex", "max_issues_repo_name": "lemoxiao/Awesome-Beamer-Collection", "max_issues_repo_head_hexsha": "3ab28a23fb60cb0a97fcec883847e2d8728b98c0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "200+ beamer \u6a21\u677f\u5408\u96c6/beamerthemetamu(\u5fb7\u514b\u8428\u65afA&M\u5927\u5b66)/sample/beamerthemetamu.tex", "max_forks_repo_name": "lemoxiao/Awesome-Beamer-Collection", "max_forks_repo_head_hexsha": "3ab28a23fb60cb0a97fcec883847e2d8728b98c0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2019-11-02T03:10:26.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-12T04:13:23.000Z", "avg_line_length": 27.5935672515, "max_line_length": 120, "alphanum_fraction": 0.6902617357, "num_tokens": 2876}
|
# -*- coding: utf-8 -*-
# Modified by Yifan Gui from FAIR Detectron2, Apache 2.0 licence.
import argparse
import json
import multiprocessing as mp
import os
import time
import gc
import numpy as np
import pandas as pd
import skimage.io as io
from detectron2.config import get_cfg
from detectron2.utils.logger import setup_logger
from pcnaDeep.predictor import VisualizationDemo, pred2json, predictFrame
from pcnaDeep.data.utils import getDetectInput
def setup_cfg(args):
# load config from file and command-line arguments
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
# Set score_threshold for builtin models
cfg.MODEL.RETINANET.SCORE_THRESH_TEST = args.confidence_threshold
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.confidence_threshold
cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args.confidence_threshold
cfg.freeze()
return cfg
def get_parser():
parser = argparse.ArgumentParser(description="pcnaDeep script for detection stage only.")
parser.add_argument(
"--config-file",
default="../config/dtrnCfg.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument(
"--stack_input",
help="Path to image stack file.",
)
parser.add_argument(
"--bf",
help="Path to bight field image file.",
)
parser.add_argument(
"--pcna",
help="Path to PCNA image file file.",
)
parser.add_argument(
"--output",
help="A file or directory to save outputs.",
)
parser.add_argument(
"--prefix",
help="Output file name. If not given, will deduce from inputs.",
)
parser.add_argument(
"--confidence-threshold",
type=float,
default=0.5,
help="Minimum score for instance predictions to be shown",
)
parser.add_argument(
"--is_slice",
action="store_true",
)
parser.add_argument(
"--vis_out",
action="store_true",
)
parser.add_argument(
"--sat",
type=float,
help="Saturated pixel when enhancing contrast. Only applies to separate channels. Default 1",
default=1,
)
parser.add_argument(
"--gamma",
type=float,
help="Gamma correction factor, enhance (<1) or suppress (>1) intensity non-linearly. Default 1",
default=1,
)
parser.add_argument(
"--opts",
help="Modify config options using the command-line 'KEY VALUE' pairs",
default=[],
nargs=argparse.REMAINDER,
)
return parser
if __name__ == "__main__":
mp.set_start_method("spawn", force=True)
args = get_parser().parse_args()
setup_logger(name="fvcore")
logger = setup_logger()
logger.info("Arguments: " + str(args))
cfg = setup_cfg(args)
demo = VisualizationDemo(cfg)
if args.stack_input is not None or args.bf is not None:
# Input image must be uint8
if args.stack_input is not None:
imgs = io.imread(args.stack_input)
if args.is_slice:
imgs = np.expand_dims(imgs, axis=0)
if args.prefix is None:
args.prefix = os.path.basename(args.stack_input).split('.')[0]
else:
dic = io.imread(args.bf)
mcy = io.imread(args.pcna)
if args.prefix is None:
args.prefix = os.path.basename(args.bf).split('.')[0].split('_')[0]
if args.is_slice:
dic = np.expand_dims(dic, axis=0)
mcy = np.expand_dims(mcy, axis=0)
imgs = getDetectInput(mcy, dic, sat=args.sat, gamma=args.gamma, torch_gpu=True)
del dic, mcy
gc.collect()
print("Run on image shape: "+str(imgs.shape))
imgs_out = []
table_out = pd.DataFrame()
json_out = {}
for i in range(imgs.shape[0]):
img = imgs[i,:]
start_time = time.time()
if not args.vis_out:
# Generate json output readable by VIA2
img_relabel, out_props = predictFrame(imgs[i, :], i, demo, size_flt=1000, edge_flt=0)
file_name = args.prefix + '-' + "%04d" % i + '.png'
dic_frame = pred2json(img_relabel, out_props, file_name)
json_out[file_name] = dic_frame
else:
# Generate visualized output
predictions, visualized_output = demo.run_on_image(img)
imgs_out.append(visualized_output.get_image())
logger.info(
"{}: {} in {:.2f}s".format(
'frame'+str(i),
"detected {} instances".format(len(dic_frame['regions'])),
time.time() - start_time,
)
)
prefix = args.prefix
if not args.vis_out:
with(open(os.path.join(args.output, prefix+'.json'), 'w', encoding='utf8')) as file:
json.dump(json_out, file)
else:
out = np.stack(imgs_out, axis=0)
io.imsave(os.path.join(args.output, prefix+'_vis.tif'), out)
|
{"hexsha": "2464a1a359dbe9153238ddf1d1cbe909c06d0264", "size": 5196, "ext": "py", "lang": "Python", "max_stars_repo_path": "bin/detect.py", "max_stars_repo_name": "Jeff-Gui/PCNAdeep", "max_stars_repo_head_hexsha": "ed4effc07e330155905b73064435d444ac857c1d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-09-21T14:31:53.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-01T04:33:04.000Z", "max_issues_repo_path": "bin/detect.py", "max_issues_repo_name": "Jeff-Gui/PCNAdeep", "max_issues_repo_head_hexsha": "ed4effc07e330155905b73064435d444ac857c1d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "bin/detect.py", "max_forks_repo_name": "Jeff-Gui/PCNAdeep", "max_forks_repo_head_hexsha": "ed4effc07e330155905b73064435d444ac857c1d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-12-08T02:28:38.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-08T02:28:38.000Z", "avg_line_length": 33.0955414013, "max_line_length": 104, "alphanum_fraction": 0.5910315627, "include": true, "reason": "import numpy", "num_tokens": 1181}
|
import requests
import time
import json
import sys
import os
import numpy as np
import cv2 as cv
import io
URL = "http://192.168.1.243:8001"
colors = [
(250,0,0),
(0,0,120),
(0,0,0),
(0,250,0),
(220,220,0),
(107,142,35),
(152,251,152),
(70,130,180),
(220,20,60),
(255,0,0),
(0,0,142),
(0,0,70),
(0,60,100),
(0,80,100),
(0,0,230),
(119,11,32),
(70,70,70),
(102,102,156),
(190,153,153),
]
def get_color(idx):
return colors[idx % len(colors)]
def request_detect(f):
try:
params = dict (file = f)
resp = requests.post(URL + "/detect", files=params, verify=False)
if resp.status_code == requests.codes.ok:
return 0, resp.json()
return resp.status_code, resp.content
except:
return 503, None
def request_detect_draw(f):
try:
params = dict (file = f)
resp = requests.post(URL + "/ddetect", files=params, verify=False)
if resp.status_code == requests.codes.ok:
return 0, resp.content
except:
return 503, None
def read_file(path):
with open(path, "rb") as f:
return f.read()
def to_memfile(content):
memfile = io.BytesIO()
memfile.write(content)
memfile.seek(0)
return memfile
def detect_file(path):
with open(path, "rb") as f:
return request_detect(f)
def detect_draw(path):
with open(path, "rb") as f:
return request_detect_draw(f)
def detect_img(img):
_, img_encoded = cv.imencode('.jpg', img)
return request_detect(to_memfile(img_encoded))
def detect_draw_img(img):
_, img_encoded = cv.imencode('.jpg', img)
return request_detect_draw(to_memfile(img_encoded))
def draw_detection(img, d, draw_text=True):
if d is None:
return
n = 0
for a in d:
clr = get_color(n)
cv.rectangle(img, (a["x"], a["y"]), (a["x"] + a["w"], a["y"] + a["h"]), clr, thickness=2)
word = a["name"] + "(" + str(int(100. * a["score"])) + "%)"
if draw_text:
cv.putText(img, word, (a["x"] + 5, a["y"] + 25), cv.FONT_HERSHEY_SIMPLEX, 0.5, clr, 1, cv.LINE_AA)
n += 1
if __name__ == "__main__":
t = time.time()
#err, R = detect_file(sys.argv[1])
#img = cv.imread(sys.argv[1])
#err, R = detect_img(img)
err, R = detect_draw(sys.argv[1])
t = time.time() - t
with open("out.jpg", 'wb') as f:
f.write(R)
"""
if err == 0:
for r in R:
print(r)
print("Detection done in {:.4f} seconds".format(t))
draw_detection(img, R, True)
cv.imwrite("out.jpg", img)
else:
print (err, R)
"""
|
{"hexsha": "e634bfb1f56ec841ab6c10226556f9599de82f7e", "size": 2705, "ext": "py", "lang": "Python", "max_stars_repo_path": "client/detect_client.py", "max_stars_repo_name": "peterwa88/docker-fastrcnn", "max_stars_repo_head_hexsha": "8891a518c60c4f882834ad025e5b9295063a5a5a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2020-02-10T04:15:10.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-02T15:34:20.000Z", "max_issues_repo_path": "client/detect_client.py", "max_issues_repo_name": "peterwa88/docker-fastrcnn", "max_issues_repo_head_hexsha": "8891a518c60c4f882834ad025e5b9295063a5a5a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "client/detect_client.py", "max_forks_repo_name": "peterwa88/docker-fastrcnn", "max_forks_repo_head_hexsha": "8891a518c60c4f882834ad025e5b9295063a5a5a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2020-02-10T05:33:54.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-26T12:52:13.000Z", "avg_line_length": 21.2992125984, "max_line_length": 110, "alphanum_fraction": 0.5515711645, "include": true, "reason": "import numpy", "num_tokens": 817}
|
[STATEMENT]
lemma take_bit_numeral_bit0:
\<open>take_bit (numeral l) (numeral (Num.Bit0 k)) = take_bit (pred_numeral l) (numeral k) * 2\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. take_bit (numeral l) (numeral (num.Bit0 k)) = take_bit (pred_numeral l) (numeral k) * (2::'a)
[PROOF STEP]
by (simp add: take_bit_rec numeral_Bit0_div_2)
|
{"llama_tokens": 155, "file": null, "length": 1}
|
/*! \file
\brief A string input.
Copyright (C) 2019-2022 kaoru https://www.tetengo.org/
*/
#include <cstddef>
#include <memory>
#include <stdexcept>
#include <string>
#include <utility>
#include <boost/container_hash/hash.hpp>
#include <boost/core/noncopyable.hpp>
#include <tetengo/lattice/input.hpp>
#include <tetengo/lattice/string_input.hpp>
namespace tetengo::lattice
{
class string_input::impl : private boost::noncopyable
{
public:
// constructors and destructor
explicit impl(std::string value) : m_value{ std::move(value) } {}
// functions
const std::string& value() const
{
return m_value;
}
std::string& value()
{
return m_value;
}
bool equal_to_impl(const input& another) const
{
return another.as<string_input>().value() == m_value;
}
std::size_t hash_value_impl() const
{
return boost::hash_value(m_value);
}
std::size_t length_impl() const
{
return m_value.length();
}
std::unique_ptr<input> clone_impl() const
{
return std::make_unique<string_input>(m_value);
}
std::unique_ptr<input> create_subrange_impl(const std::size_t offset, const std::size_t length) const
{
if (offset + length > m_value.length())
{
throw std::out_of_range{ "offset and/or length are out of the range." };
}
return std::make_unique<string_input>(m_value.substr(offset, length));
}
void append_impl(std::unique_ptr<input>&& p_another)
{
if (!p_another)
{
throw std::invalid_argument{ "p_another is nullptr." };
}
if (!p_another->is<string_input>())
{
throw std::invalid_argument{ "Mismatch type of p_another." };
}
m_value += std::move(p_another->as<string_input>().value());
}
private:
// variables
std::string m_value;
};
string_input::string_input(std::string value) : m_p_impl{ std::make_unique<impl>(std::move(value)) } {}
string_input::~string_input() = default;
const std::string& string_input::value() const
{
return m_p_impl->value();
}
std::string& string_input::value()
{
return m_p_impl->value();
}
bool string_input::equal_to_impl(const input& another) const
{
return m_p_impl->equal_to_impl(another);
}
std::size_t string_input::hash_value_impl() const
{
return m_p_impl->hash_value_impl();
}
std::size_t string_input::length_impl() const
{
return m_p_impl->length_impl();
}
std::unique_ptr<input> string_input::clone_impl() const
{
return m_p_impl->clone_impl();
}
std::unique_ptr<input> string_input::create_subrange_impl(const std::size_t offset, const std::size_t length) const
{
return m_p_impl->create_subrange_impl(offset, length);
}
void string_input::append_impl(std::unique_ptr<input>&& p_another)
{
m_p_impl->append_impl(std::move(p_another));
}
}
|
{"hexsha": "170df32c481fe04f13ca626d954fe7d3f257abde", "size": 3416, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "library/lattice/cpp/src/tetengo.lattice.string_input.cpp", "max_stars_repo_name": "tetengo/tetengo", "max_stars_repo_head_hexsha": "66e0d03635583c25be4320171f3cc1e7f40a56e6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "library/lattice/cpp/src/tetengo.lattice.string_input.cpp", "max_issues_repo_name": "tetengo/tetengo", "max_issues_repo_head_hexsha": "66e0d03635583c25be4320171f3cc1e7f40a56e6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 41.0, "max_issues_repo_issues_event_min_datetime": "2021-06-25T14:20:29.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-16T02:50:50.000Z", "max_forks_repo_path": "library/lattice/cpp/src/tetengo.lattice.string_input.cpp", "max_forks_repo_name": "tetengo/tetengo", "max_forks_repo_head_hexsha": "66e0d03635583c25be4320171f3cc1e7f40a56e6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.5755395683, "max_line_length": 120, "alphanum_fraction": 0.5594262295, "num_tokens": 762}
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test cases for eager execution using XLA."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests.xla_test import XLATestCase
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import googletest
class EagerTest(XLATestCase):
def testBasic(self):
with self.test_scope():
three = constant_op.constant(3)
five = constant_op.constant(5)
product = three * five
self.assertAllEqual(15, product)
def testExecuteListOutputLen0(self):
with self.test_scope():
empty = constant_op.constant([], dtype=dtypes.int32)
result = array_ops.unstack(empty, 0)
self.assertTrue(isinstance(result, list))
self.assertEqual(0, len(result))
def testExecuteListOutputLen1(self):
with self.test_scope():
split_dim = constant_op.constant(1)
value = constant_op.constant([[0, 1, 2], [3, 4, 5]])
result = array_ops.split(value, 1, axis=split_dim)
self.assertTrue(isinstance(result, list))
self.assertEqual(1, len(result))
self.assertAllEqual([[0, 1, 2], [3, 4, 5]], result[0])
def testExecuteListOutputLen3(self):
with self.test_scope():
split_dim = constant_op.constant(1)
value = constant_op.constant([[0, 1, 2], [3, 4, 5]])
result = array_ops.split(value, 3, axis=split_dim)
self.assertTrue(isinstance(result, list))
self.assertEqual(3, len(result))
self.assertAllEqual([[0], [3]], result[0])
self.assertAllEqual([[1], [4]], result[1])
self.assertAllEqual([[2], [5]], result[2])
def testBasicGraph(self):
# Run some ops eagerly
with self.test_scope():
three = constant_op.constant(3)
five = constant_op.constant(5)
product = three * five
self.assertAllEqual(15, product)
# Run some ops graphly
with context.graph_mode(), self.test_session() as sess:
with self.test_scope():
three = constant_op.constant(3)
five = constant_op.constant(5)
product = three * five
self.assertAllEqual(15, sess.run(product))
def testDegenerateSlices(self):
with self.test_scope():
npt = np.arange(1, 19, dtype=np.float32).reshape(3, 2, 3)
t = constant_op.constant(npt)
# degenerate by offering a forward interval with a negative stride
self.assertAllEqual(npt[0:-1:-1, :, :], t[0:-1:-1, :, :])
# degenerate with a reverse interval with a positive stride
self.assertAllEqual(npt[-1:0, :, :], t[-1:0, :, :])
# empty interval in every dimension
self.assertAllEqual(npt[-1:0, 2:2, 2:3:-1], t[-1:0, 2:2, 2:3:-1])
def testIdentity(self):
with self.test_scope():
self.assertAllEqual(2, array_ops.identity(2))
def testIdentityOnVariable(self):
with self.test_scope():
v = resource_variable_ops.ResourceVariable(True)
i = array_ops.identity(v)
self.assertAllEqual(True, i.numpy())
def testAssignAddVariable(self):
with self.test_scope():
v = resource_variable_ops.ResourceVariable(1.0)
v.assign_add(2.0)
self.assertEqual(3.0, v.numpy())
def testGradient(self):
def f(x):
return x
with self.test_scope():
grad_fn = backprop.gradients_function(f)
self.assertAllEqual(2., grad_fn(1., dy=2.)[0])
def testVariableGradient(self):
with self.test_scope():
v0 = resource_variable_ops.ResourceVariable(1.0)
def f():
x = v0 * v0
return x
grads = backprop.implicit_grad(f)()
self.assertEqual(2., grads[0][0].numpy())
if __name__ == "__main__":
ops.enable_eager_execution(
config=config_pb2.ConfigProto(log_device_placement=True))
googletest.main()
|
{"hexsha": "bdd0185dfe4abe9d9acecc5381ff82c54b8c0705", "size": 4786, "ext": "py", "lang": "Python", "max_stars_repo_path": "tensorflow/compiler/tests/eager_test.py", "max_stars_repo_name": "tucaiyong/tensorflow", "max_stars_repo_head_hexsha": "3cc3c87f375f1bc292bd58db4928b810ac888bc6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2018-12-06T06:51:33.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-23T11:29:24.000Z", "max_issues_repo_path": "tensorflow/compiler/tests/eager_test.py", "max_issues_repo_name": "tucaiyong/tensorflow", "max_issues_repo_head_hexsha": "3cc3c87f375f1bc292bd58db4928b810ac888bc6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2018-02-04T18:41:52.000Z", "max_issues_repo_issues_event_max_datetime": "2018-05-02T09:00:46.000Z", "max_forks_repo_path": "tensorflow/compiler/tests/eager_test.py", "max_forks_repo_name": "tucaiyong/tensorflow", "max_forks_repo_head_hexsha": "3cc3c87f375f1bc292bd58db4928b810ac888bc6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2018-01-17T14:22:49.000Z", "max_forks_repo_forks_event_max_datetime": "2018-02-27T15:06:41.000Z", "avg_line_length": 34.6811594203, "max_line_length": 80, "alphanum_fraction": 0.6809444212, "include": true, "reason": "import numpy", "num_tokens": 1181}
|
import cv2
import argparse
import numpy as np
import os
def get_noise(img,value=10):
noise = np.random.uniform(0,256,img.shape[0:2])
v = value *0.01
noise[np.where(noise<(256-v))]=0
k = np.array([ [0, 0.1, 0],
[0.1, 8, 0.1],
[0, 0.1, 0] ])
noise = cv2.filter2D(noise,-1,k)
'''cv2.imshow('img',noise)
cv2.waitKey()
cv2.destroyWindow('img')'''
return noise
def rain_blur(noise, length=10, angle=0,w=1):
trans = cv2.getRotationMatrix2D((length/2, length/2), angle-45, 1-length/100.0)
dig = np.diag(np.ones(length))
k = cv2.warpAffine(dig, trans, (length, length))
k = cv2.GaussianBlur(k,(w,w),0)
blurred = cv2.filter2D(noise, -1, k)
cv2.normalize(blurred, blurred, 0, 255, cv2.NORM_MINMAX)
blurred = np.array(blurred, dtype=np.uint8)
'''
cv2.imshow('img',blurred)
cv2.waitKey()
cv2.destroyWindow('img')'''
return blurred
def alpha_rain(rain,img,beta = 0.8):
rain = np.expand_dims(rain,2)
rain_effect = np.concatenate((img,rain),axis=2) #add alpha channel
rain_result = img.copy()
rain = np.array(rain,dtype=np.float32)
rain_result[:,:,0]= rain_result[:,:,0] * (255-rain[:,:,0])/255.0 + beta*rain[:,:,0]
rain_result[:,:,1] = rain_result[:,:,1] * (255-rain[:,:,0])/255 + beta*rain[:,:,0]
rain_result[:,:,2] = rain_result[:,:,2] * (255-rain[:,:,0])/255 + beta*rain[:,:,0]
# cv2.imshow('rain_effct_result',rain_result)
# cv2.waitKey()
# cv2.destroyAllWindows()
return rain_result
def add_rain(rain,img,alpha=0.9):
#chage rain into 3-dimenis
rain = np.expand_dims(rain,2)
rain = np.repeat(rain,3,2)
result = cv2.addWeighted(img,alpha,rain,1-alpha,1)
cv2.imshow('rain_effct',result)
cv2.waitKey()
cv2.destroyWindow('rain_effct')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--input_path', type=str, default="../source_datasets/orginal")
parser.add_argument('--output_path', type=str, default="../follow_up_datasets/rainy")
args = parser.parse_args()
print("Rainy config: {}".format(args))
if not os.path.exists(os.path.join(args.output_path, 'source_datasets')):
os.makedirs(os.path.join(args.output_path, 'source_datasets'))
if not os.path.exists(os.path.join(args.output_path, 'follow_up_datasets')):
os.makedirs(os.path.join(args.output_path, 'follow_up_datasets'))
source_path = args.input_path
img_list = os.listdir(source_path)
for img_name in img_list:
img = cv2.imread(os.path.join(source_path, img_name))
cv2.imwrite(os.path.join(args.output_path, 'source_datasets', img_name), img)
noise = get_noise(img,value=200)
rain = rain_blur(noise,length=30,angle=-30,w=3)
rain_img = alpha_rain(rain,img,beta=0.6) #方法一,透明度賦值
cv2.imwrite(os.path.join(args.output_path, 'follow_up_datasets', img_name), rain_img)
|
{"hexsha": "477153ae4398a29d4b1813c6b8afb90530247cc4", "size": 3043, "ext": "py", "lang": "Python", "max_stars_repo_path": "generators/OpenCV/rainy.py", "max_stars_repo_name": "JW9MsjwjnpdRLFw/RMT", "max_stars_repo_head_hexsha": "a877fd78639a8d4c534d0373b9d0ad023e0fa2dd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "generators/OpenCV/rainy.py", "max_issues_repo_name": "JW9MsjwjnpdRLFw/RMT", "max_issues_repo_head_hexsha": "a877fd78639a8d4c534d0373b9d0ad023e0fa2dd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "generators/OpenCV/rainy.py", "max_forks_repo_name": "JW9MsjwjnpdRLFw/RMT", "max_forks_repo_head_hexsha": "a877fd78639a8d4c534d0373b9d0ad023e0fa2dd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-01-25T02:44:23.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-09T13:25:57.000Z", "avg_line_length": 32.3723404255, "max_line_length": 93, "alphanum_fraction": 0.6240552087, "include": true, "reason": "import numpy", "num_tokens": 888}
|
Require Import Explicit_sub.
Require Import FOTheory.
Require Import basic Omega.
Import BuildModel.
Import T J R.
Import CCM.
Import ZFind_basic.
Import ZFnats.
Fixpoint int_fotrm t:=
match t with
| Var i => Ref i
| Cst_0 => Zero
| Cst_1 => Succ Zero
| Df_Add u v => Add (int_fotrm u) (int_fotrm v)
end.
Fixpoint int_fofml f:=
match f with
| eq_fotrm x y => EQ_trm (int_fotrm x) (int_fotrm y)
| TF => True_symb
| BF => False_symb
| neg f => Neg (int_fofml f)
| conj f1 f2 => Conj (int_fofml f1) (int_fofml f2)
| disj f1 f2 => Disj (int_fofml f1) (int_fofml f2)
| implf f1 f2 => Impl (int_fofml f1) (int_fofml f2)
| fall f => Fall (int_fofml f)
| exst f => Exst (int_fofml f)
end.
Fixpoint int_hyp_rec hyp:=
match hyp with
| nil => nil
| h::hyp' =>
match h with
| Some f => (int_fofml f)::(int_hyp_rec hyp')
| None => T :: (int_hyp_rec hyp')
end
end.
Definition int_hyp hyp:= (int_hyp_rec hyp).
Lemma fotrm_Some : forall t, int_fotrm t <> None.
destruct t; simpl; red; intros; discriminate.
Qed.
Lemma fofml_Some : forall f, int_fofml f <> None.
destruct f; simpl; red; intros; discriminate.
Qed.
Lemma int_hyp_nth_fml : forall hyp f n, nth_error hyp n = Some (Some f) ->
nth_error (int_hyp hyp) n = Some (int_fofml f).
induction hyp; destruct n; simpl; intros; [discriminate | discriminate |
injection H; intro Hinj; rewrite Hinj |]; trivial.
destruct a; simpl; apply IHhyp; trivial.
Qed.
Lemma int_hyp_nth_trm : forall hyp n,
nth_error hyp n = value None ->
nth_error (int_hyp hyp) n = value T.
induction hyp; destruct n; simpl; intros; try discriminate.
destruct a; [discriminate | trivial].
destruct a; apply IHhyp; trivial.
Qed.
Lemma int_trm_N : forall hyp t i, hyp_ok hyp t ->
val_ok (int_hyp hyp) i ->
int (int_fotrm t) i ∈ N.
unfold hyp_ok; unfold val_ok; induction t; simpl in *; intros.
assert (n=n\/False). left; trivial.
specialize H with (n0:=n) (1:=H1).
generalize (int_hyp_nth_trm _ _ H); intros.
specialize H0 with (1:=H2). simpl in H0; trivial.
apply zero_typ.
apply succ_typ. apply zero_typ.
replace (fun k : nat => i k) with i; trivial.
assert (int (int_fotrm t2) i ∈ N).
apply IHt2; trivial; intros.
apply H. apply in_or_app. right; trivial.
elim H1 using N_ind; intros.
revert H4; apply in_set_morph; try reflexivity.
apply natrec_morph; try reflexivity.
do 2 red; intros. rewrite H5; reflexivity.
symmetry; trivial.
rewrite add0. apply IHt1; trivial; intros.
apply H. apply in_or_app; left; trivial.
rewrite addS;trivial.
apply succ_typ; trivial.
Qed.
Lemma lift_int_lift_trm_rec : forall t n k,
eq_term (lift_rec n k (int_fotrm t))
(int_fotrm (lift_trm_rec t n k)).
induction t; simpl; intros.
unfold V.lams. unfold V.shift.
destruct (Compare_dec.le_gt_dec k n); simpl; intros.
replace (k+(n0+(n-k))) with (n+n0) by omega.
red; auto.
red; auto.
red; reflexivity.
red; intros. apply succ_morph. reflexivity.
red; intros. apply natrec_morph; try rewrite H.
rewrite <- IHt1. rewrite int_lift_rec_eq. reflexivity.
do 2 red. intros x0 y0 H' x1 y1 HE; rewrite HE; reflexivity.
rewrite <- IHt2. rewrite int_lift_rec_eq. reflexivity.
Qed.
Lemma lift_int_lift_trm : forall t n,
eq_term (lift n (int_fotrm t)) (int_fotrm (lift_trm t n)).
unfold lift, lift_trm. intros. apply lift_int_lift_trm_rec.
Qed.
Lemma lift_int_lift_fml_rec : forall f n k,
eq_term (lift_rec n k (int_fofml f))
(int_fofml (lift_fml_rec f n k)).
induction f; red; simpl; red; intros; try reflexivity.
apply subset_morph; try reflexivity.
red; intros. do 2 rewrite <- lift_int_lift_trm_rec.
do 2 rewrite <- int_lift_rec_eq. rewrite H; reflexivity.
apply prod_ext.
rewrite <- IHf. symmetry. rewrite H. apply int_lift_rec_eq.
red. reflexivity.
apply subset_morph; try red; intros;
rewrite <- IHf1; rewrite <- IHf2; rewrite H.
apply union_morph.
apply pair_morph; symmetry; apply int_lift_rec_eq.
do 2 rewrite int_lift_rec_eq. reflexivity.
apply union2_morph; rewrite H;
[rewrite <- IHf1 | rewrite <- IHf2]; symmetry;
apply int_lift_rec_eq.
apply prod_ext; try red; intros;
[rewrite <- IHf1 | rewrite <- IHf2]; rewrite H;
symmetry; apply int_lift_rec_eq.
apply prod_ext; try reflexivity.
red; intros. rewrite <- IHf. rewrite V.cons_lams.
rewrite int_lift_rec_eq. rewrite H1. rewrite H. reflexivity.
do 2 red. intros. rewrite H2. reflexivity.
apply union_morph. apply replf_morph; try reflexivity.
red. intros. rewrite <- IHf. rewrite int_lift_rec_eq.
rewrite V.cons_lams.
rewrite H; rewrite H1; reflexivity.
do 2 red; intros. rewrite H2. reflexivity.
Qed.
Lemma lift_int_lift_fml : forall f n,
eq_term (lift n (int_fofml f)) (int_fofml (lift_fml f n)).
unfold lift, lift_fml; intros. apply lift_int_lift_fml_rec.
Qed.
Lemma subst_int_subst_trm_rec : forall t N k,
eq_term (subst_rec (int_fotrm N) k (int_fotrm t))
(int_fotrm (subst_trm_rec t N k)).
induction t; intros.
do 2 red; simpl.
destruct (Compare_dec.lt_eq_lt_dec k n) as [[fv|eqv]|bv]; simpl.
unfold V.lams, V.shift; destruct (Compare_dec.le_gt_dec k n);
try (apply False_ind; omega; fail).
replace (n-k) with (S(Peano.pred n-k)) by omega; simpl.
replace (k+(Peano.pred n-k)) with (Peano.pred n) by omega;
red; auto.
case_eq (int_fotrm (lift_trm N k)); intros.
red; intros. subst k. unfold V.lams; simpl.
destruct (Compare_dec.le_gt_dec n n).
replace (n-n) with 0 by omega; simpl. rewrite H0.
setoid_replace (V.shift n y) with (V.lams 0 (V.shift n) y).
rewrite <- int_lift_rec_eq. fold (lift n (int_fotrm N)).
rewrite lift_int_lift_trm. rewrite H. simpl. reflexivity.
rewrite V.lams0; reflexivity.
apply False_ind; omega.
elim fotrm_Some with (1:=H).
do 2 red; intros. rewrite V.lams_bv; trivial. apply H.
do 2 red. simpl. do 2 red. reflexivity.
do 2 red; simpl; intros. do 2 red. intros.
apply succ_morph. reflexivity.
do 2 red; simpl; intros. do 2 red; intros. apply natrec_morph.
rewrite <- IHt1. rewrite H.
rewrite int_subst_rec_eq; reflexivity.
do 2 red; intros. rewrite H1; reflexivity.
rewrite <- IHt2. rewrite H.
rewrite int_subst_rec_eq; reflexivity.
Qed.
Lemma subst_int_subst_trm : forall t N,
eq_term (subst (int_fotrm N) (int_fotrm t))
(int_fotrm (subst_trm t N)).
unfold subst. intros. apply subst_int_subst_trm_rec with (k:=0).
Qed.
Lemma subst_int_subst_fml_rec : forall f N k,
eq_term (subst_rec (int_fotrm N) k (int_fofml f))
(int_fofml (subst_fml f N k)).
induction f; do 2 red; simpl; intros.
do 2 red; intros. apply subset_morph; try reflexivity.
red; intros. do 2 rewrite <- subst_int_subst_trm_rec.
do 2 rewrite int_subst_rec_eq. rewrite H; reflexivity.
do 2 red; reflexivity.
do 2 red; reflexivity.
do 2 red; intros. apply prod_ext.
rewrite <- IHf. rewrite int_subst_rec_eq. rewrite H; reflexivity.
red; reflexivity.
do 2 red; intros. apply subset_morph.
apply union2_morph; [rewrite <- IHf1 | rewrite <- IHf2];
rewrite int_subst_rec_eq; rewrite H; reflexivity.
red; intros. rewrite <- IHf1; rewrite <- IHf2.
do 2 rewrite <- int_subst_rec_eq. rewrite H. reflexivity.
do 2 red; intros.
apply union2_morph; [rewrite <- IHf1 | rewrite <- IHf2];
rewrite int_subst_rec_eq; rewrite H; reflexivity.
red; intros. apply prod_ext.
rewrite H. rewrite <- IHf1.
rewrite int_subst_rec_eq; reflexivity.
red; intros. rewrite <- IHf2.
rewrite <- int_subst_rec_eq. rewrite H; reflexivity.
red; intros. rewrite prod_ext; try reflexivity.
red; intros. rewrite <- IHf. rewrite V.cons_lams.
rewrite int_subst_rec_eq. rewrite H. rewrite V.shiftS_split.
rewrite V.shift_cons. rewrite H1. reflexivity.
do 4 red; intros. rewrite H2; reflexivity.
do 2 red; intros. apply union_morph.
apply replf_morph; try reflexivity.
red; intros. rewrite <- IHf. rewrite int_subst_rec_eq.
rewrite V.cons_lams. rewrite V.shiftS_split.
rewrite V.shift_cons. rewrite H1; rewrite H. reflexivity.
do 4 red; intros. rewrite H2; reflexivity.
Qed.
Lemma subst_int_subst_fml : forall f N,
eq_term (subst (int_fotrm N) (int_fofml f))
(int_fofml (subst_fml0 f N)).
unfold subst; intros; apply subst_int_subst_fml_rec.
Qed.
Lemma fofml_in_props : forall f e,
typ e (int_fofml f) prop.
induction f; do 2 red; simpl; intros; unfold props;
unfold ZFcoc.props; rewrite power_ax; intros; trivial.
unfold EQ in H0. unfold cond_set in H0.
rewrite subset_ax in H0. destruct H0; trivial.
apply empty_ax in H0; contradiction.
revert y H0. rewrite <- power_ax. apply impredicative_prod.
do 2 red; reflexivity.
unfold props; unfold ZFcoc.props; intros.
rewrite power_ax; intros.
apply empty_ax in H1; contradiction.
do 2 red in IHf1; simpl in IHf1.
rewrite subset_ax in H0. destruct H0. destruct H1. destruct H2.
rewrite H1. clear H1 H3. revert x H2. rewrite <- power_ax.
fold ZFcoc.props. fold props. apply IHf1 with (e:=e); trivial.
apply union2_elim in H0. destruct H0; revert y H0;
rewrite <- power_ax; fold ZFcoc.props; fold props;
[do 2 red in IHf1; simpl in IHf1; apply IHf1 with (e:=e)
| do 2 red in IHf1; simpl in IHf1; apply IHf2 with (e:=e)];
trivial.
revert y H0. rewrite <- power_ax. apply impredicative_prod.
do 2 red; reflexivity.
intros. do 2 red in IHf2; simpl in IHf2;
apply IHf2 with (e:=e); trivial.
revert y H0. rewrite <- power_ax. apply impredicative_prod.
do 2 red. intros y1 y2 Hy1N H0; rewrite H0; reflexivity.
intros. do 2 red in IHf; simpl in IHf;
apply IHf with (e:=(T::e)).
apply vcons_add_var; simpl; trivial.
apply union_elim in H0. destruct H0.
apply replf_elim in H1.
destruct H1. revert y H0. rewrite <- power_ax. rewrite H2.
do 2 red in IHf; simpl in IHf; apply IHf with (e:=(T::e)).
apply vcons_add_var; simpl; trivial.
do 2 red. intros. rewrite H3; reflexivity.
Qed.
Lemma P_ax_intro5_ex : forall P, eq_term
(Impl (int_fofml (subst_fml0 P Cst_0)) (Impl (Fall (Impl (int_fofml P)
(int_fofml (subst_fml0 (lift_fml_rec P 1 1) (Df_Add (Var 0) Cst_1)))))
(Fall (int_fofml P))))
(Impl (subst Zero (int_fofml P))
(Impl (Fall (Impl (int_fofml P)
(subst (Add (Ref 0) (Succ Zero)) (lift_rec 1 1 (int_fofml P)))))
(Fall (int_fofml P)))).
red; simpl; red; intros.
apply prod_ext.
rewrite <- subst_int_subst_fml; simpl. rewrite H; reflexivity.
red; intros. apply prod_ext.
apply prod_ext; try reflexivity.
red; intros. apply prod_ext.
apply int_morph; try reflexivity.
replace (fun k : nat => V.cons y0 (fun k0 : nat => x k0) k)
with (V.cons y0 x); trivial.
rewrite H3, H. reflexivity.
red; intros. rewrite <- subst_int_subst_fml. simpl.
do 2 rewrite int_subst_eq. rewrite <- lift_int_lift_fml_rec.
replace (fun k : nat => V.cons y0 (fun k0 : nat => x k0) k) with
(V.cons y0 x); trivial.
replace (fun k : nat => V.cons y3 (fun k0 : nat => y k0) k) with
(V.cons y3 y); trivial.
rewrite H. rewrite H3. reflexivity.
red; intros. apply prod_ext; try reflexivity.
red; intros. rewrite H5.
replace (fun k : nat => x k) with x; trivial.
replace (fun k : nat => y k) with y; trivial.
rewrite H; reflexivity.
Qed.
Lemma int_correct : forall hyp P, deriv hyp P ->
exists p, typ ((int_hyp hyp)) p (int_fofml P).
induction 1; simpl.
(*hyp*)
exists (Ref n); red; simpl; intros. unfold val_ok in H0.
rewrite <- lift_int_lift_fml. apply H0. apply int_hyp_nth_fml; trivial.
(*ax_intro*)
destruct H.
rewrite H; simpl. apply P_ax_intro1.
destruct H.
rewrite H; simpl. apply P_ax_intro2.
destruct H.
rewrite H; simpl. apply P_ax_intro3.
destruct H.
rewrite H; simpl. apply P_ax_intro4.
destruct H; rewrite H. generalize P_ax_intro5; intros.
specialize H0 with (e:=(int_hyp hyp)) (1:=(fofml_Some x)).
destruct H0. exists x0. simpl. rewrite P_ax_intro5_ex; trivial.
(*true_intro*)
apply True_symb_intro.
(*false_elim*)
apply False_symb_elim. simpl in IHderiv. trivial.
(*neg_intro*)
destruct IHderiv as (p, IH). exists p.
apply Neg_intro. trivial.
(*neg_elim*)
destruct IHderiv. exists x. apply Neg_elim. trivial.
(*conj_intro*)
destruct IHderiv1 as (x, IH1).
destruct IHderiv2 as (x', IH2).
exists x.
apply Conj_intro; try apply fofml_Some. split; trivial.
do 2 red; intros. case_eq (int_fofml f2); intros; trivial.
assert (int x i == int x' i).
generalize (proof_irr _ _ _ (fofml_Some f1) IH1
(fofml_in_props f1 (int_hyp hyp))); intros. specialize H3 with (1:=H1).
generalize (proof_irr _ _ _ (fofml_Some f2) IH2
(fofml_in_props f2 (int_hyp hyp))); intros. specialize H4 with (1:=H1).
rewrite H3, H4; reflexivity.
rewrite H3. do 2 red in IH2. rewrite H2 in IH2.
apply IH2; trivial.
(*conj_elim1*)
destruct IHderiv. exists x. simpl in H0. apply Conj_elim in H0.
destruct H0; trivial.
apply fofml_Some.
apply fofml_Some.
(*conj_elim2*)
destruct IHderiv. exists x. simpl in H0. apply Conj_elim in H0.
destruct H0; trivial.
apply fofml_Some.
apply fofml_Some.
(*disj_intro1*)
destruct IHderiv. exists x.
apply Disj_intro; try apply fofml_Some.
left; trivial.
(*disj_intro2*)
destruct IHderiv. exists x.
apply Disj_intro; try apply fofml_Some.
right; trivial.
(*disj_elim*)
destruct IHderiv1, IHderiv2, IHderiv3.
exists prf_term. simpl in H2, H3, H4.
apply Disj_elim with (t:=x) (t1:=x0) (t2:=x1)
(A:=int_fofml f1) (B:=int_fofml f2); try rewrite lift_int_lift_fml; trivial.
apply fofml_Some.
apply fofml_in_props.
(*impl_intro*)
destruct IHderiv. simpl in H0.
exists (Abs (int_fofml f1) x).
apply Impl_intro; try apply fofml_Some.
rewrite lift_int_lift_fml; trivial.
(*impl_elim*)
destruct IHderiv1, IHderiv2. exists (App x x0).
apply Impl_elim with (A:=int_fofml f1); try apply fofml_Some; trivial.
(*fall_intro*)
destruct IHderiv. simpl in H0.
exists ((Abs T x)). apply Fall_intro; try apply fofml_Some; trivial.
(*fall_elim*)
destruct IHderiv. exists (App x (int_fotrm u)).
rewrite <- subst_int_subst_fml.
apply Fall_elim; try apply fofml_Some; trivial.
do 2 red; simpl; intros. apply int_trm_N with (hyp:=hyp); trivial.
(*exst_intro*)
destruct IHderiv; simpl in H0.
exists x. apply Exst_intro with (a:=(int_fotrm N)); try apply fofml_Some.
do 2 red; simpl; intros. apply int_trm_N with (hyp:=hyp); trivial.
do 2 red; intros. do 2 red in H1; specialize H1 with (1:=H2).
case_eq (subst (int_fotrm N) (int_fofml f)); intros.
rewrite <- H3. case_eq (int_fofml (subst_fml0 f N)); intros.
rewrite H4 in H1; rewrite <- H4 in H1.
rewrite subst_int_subst_fml; trivial.
elim fofml_Some with (f:=(subst_fml0 f N)); trivial.
elim subst_Some with (t:=(int_fofml f)) (a:=(int_fotrm N));
[apply fofml_Some | trivial ].
(*exst_elim*)
destruct IHderiv1, IHderiv2. simpl in H1, H2.
exists prf_term. apply Exst_elim with (t1:=x) (t2:=x0) (A:=int_fofml f); trivial.
apply fofml_Some.
apply fofml_in_props.
rewrite lift_int_lift_fml; trivial.
Qed.
Fixpoint const_env n :=
match n with
| 0 => T :: nil
| S m => T :: (const_env m)
end.
Lemma const_env_spec : forall m n t,
nth_error (const_env n) m = value t ->
(m <= n)%nat /\ t = T.
induction m; destruct n; simpl; intros.
injection H; intros; split; [|subst t]; trivial.
injection H; intros; split; [omega |subst t]; trivial.
destruct m; simpl in H; discriminate.
specialize IHm with (1:=H). destruct IHm.
split; [omega | trivial].
Qed.
Lemma eq_ext : forall e t1 t2 s s',
eq_typ (const_env (Peano.max (max_var t1) (max_var t2)))
(int_fotrm t1) (int_fotrm t2) ->
eq_fosub (S (Peano.max (max_var t1) (max_var t2))) e s s' ->
eq_typ e (app_esub s (int_fotrm t1)) (app_esub s' (int_fotrm t2)).
do 2 red; simpl; intros.
do 2 red in H; simpl.
unfold eq_fosub in H0.
assert (val_ok (const_env (Peano.max (max_var t1) (max_var t2))) (esub_conv s i)).
unfold val_ok. do 2 red; intros. apply const_env_spec in H2.
destruct H2; subst T; simpl.
assert (n < S (Peano.max (max_var t1) (max_var t2)))%nat by omega.
specialize H0 with (1:=H3); destruct H0 as (Heqtyp, (Htyps, Htyps')).
do 2 red in Htyps; simpl in Htyps; specialize Htyps with (1:=H1). apply Htyps.
specialize H with (1:=H2). rewrite H.
replace (fun k : nat => i k) with i; trivial. clear H H2.
induction t2; simpl; try reflexivity.
specialize H0 with (n0:=n). destruct H0.
rewrite succ_max_distr. apply max_split2. unfold max_var; simpl; omega.
do 2 red in H; simpl in H; apply H; trivial.
apply natrec_morph.
apply IHt2_1. intros. apply H0.
rewrite succ_max_distr. rewrite succ_max_distr in H.
apply max_comb in H. destruct H.
apply max_split1; trivial.
apply max_split2. unfold max_var; simpl.
rewrite succ_max_distr. apply max_split1. trivial.
do 2 red; intros. rewrite H2; reflexivity.
apply IHt2_2. intros. apply H0.
rewrite succ_max_distr. rewrite succ_max_distr in H.
apply max_comb in H. destruct H.
apply max_split1; trivial.
apply max_split2. unfold max_var; simpl.
rewrite succ_max_distr. apply max_split2. trivial.
Qed.
|
{"author": "barras", "repo": "cic-model", "sha": "dcc38f3104048aa50d230f819085131b16702d3d", "save_path": "github-repos/coq/barras-cic-model", "path": "github-repos/coq/barras-cic-model/cic-model-dcc38f3104048aa50d230f819085131b16702d3d/CCTnat.v"}
|
function varargout = calculator(varargin)
% CALCULATOR M-file for calculator.fig
% CALCULATOR, by itself, creates a new CALCULATOR or raises the existing
% singleton*.
%
% H = CALCULATOR returns the handle to a new CALCULATOR or the handle to
% the existing singleton*.
%
% CALCULATOR('CALLBACK',hObject,eventData,handles,...) calls the local
% function named CALLBACK in CALCULATOR.M with the given input arguments.
%
% CALCULATOR('Property','Value',...) creates a new CALCULATOR or raises the
% existing singleton*. Starting from the left, property value pairs are
% applied to the GUI before calculator_OpeningFcn gets called. An
% unrecognized property name or invalid value makes property application
% stop. All inputs are passed to calculator_OpeningFcn via varargin.
%
% *See GUI Options on GUIDE's Tools menu. Choose "GUI allows only one
% instance to run (singleton)".
%
% See also: GUIDE, GUIDATA, GUIHANDLES
% Edit the above text to modify the response to help calculator
% Last Modified by GUIDE v2.5 18-Feb-2011 17:46:26
% Begin initialization code - DO NOT EDIT
gui_Singleton = 1;
gui_State = struct('gui_Name', mfilename, ...
'gui_Singleton', gui_Singleton, ...
'gui_OpeningFcn', @calculator_OpeningFcn, ...
'gui_OutputFcn', @calculator_OutputFcn, ...
'gui_LayoutFcn', [] , ...
'gui_Callback', []);
if nargin && ischar(varargin{1})
gui_State.gui_Callback = str2func(varargin{1});
end
if nargout
[varargout{1:nargout}] = gui_mainfcn(gui_State, varargin{:});
else
gui_mainfcn(gui_State, varargin{:});
end
% End initialization code - DO NOT EDIT
% --- Executes just before calculator is made visible.
function calculator_OpeningFcn(hObject, eventdata, handles, varargin)
% This function has no output args, see OutputFcn.
% hObject handle to figure
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
% varargin command line arguments to calculator (see VARARGIN)
% Choose default command line output for calculator
handles.output = hObject;
% Update handles structure
guidata(hObject, handles);
date1 = date;
set(handles.date1, 'String', date);
initialize_gui(hObject, handles, false);
% UIWAIT makes calculator wait for user response (see UIRESUME)
% uiwait(handles.figure1);
% --- Outputs from this function are returned to the command line.
function varargout = calculator_OutputFcn(hObject, eventdata, handles)
% varargout cell array for returning output args (see VARARGOUT);
% hObject handle to figure
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
% Get default command line output from handles structure
varargout{1} = handles.output;
% --- Executes during object creation, after setting all properties.
function numbera_CreateFcn(hObject, eventdata, handles)
% hObject handle to numbera (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles empty - handles not created until after all CreateFcns called
% Hint: edit controls usually have a white background on Windows.
% See ISPC and COMPUTER.
if ispc && isequal(get(hObject,'BackgroundColor'), get(0,'defaultUicontrolBackgroundColor'))
set(hObject,'BackgroundColor','white');
end
function numbera_Callback(hObject, eventdata, handles)
% hObject handle to numbera (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
% Hints: get(hObject,'String') returns contents of numbera as text
% str2double(get(hObject,'String')) returns contents of numbera as a double
numbera = str2double(get(hObject, 'String'));
if isnan(numbera)
set(hObject, 'String', 0);
errordlg('Input must be a number','Error');
end
% Save the new density value
handles.metricdata.numbera = numbera;
guidata(hObject,handles)
% --- Executes during object creation, after setting all properties.
function numberb_CreateFcn(hObject, eventdata, handles)
% hObject handle to numberb (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles empty - handles not created until after all CreateFcns called
% Hint: edit controls usually have a white background on Windows.
% See ISPC and COMPUTER.
if ispc && isequal(get(hObject,'BackgroundColor'), get(0,'defaultUicontrolBackgroundColor'))
set(hObject,'BackgroundColor','white');
end
function numberb_Callback(hObject, eventdata, handles)
% hObject handle to numberb (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
% Hints: get(hObject,'String') returns contents of numberb as text
% str2double(get(hObject,'String')) returns contents of numberb as a double
numberb = str2double(get(hObject, 'String'));
if isnan(numberb)
set(hObject, 'String', 0);
errordlg('Input must be a number','Error');
end
% Save the new density value
handles.metricdata.numberb = numberb;
guidata(hObject,handles)
% --- Executes on button press in add.
function add_Callback(hObject, eventdata, handles)
% hObject handle to add (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
result = handles.metricdata.numbera + handles.metricdata.numberb;
set(handles.result, 'String', result)
% --- Executes on button press in substract.
function substract_Callback(hObject, eventdata, handles)
% hObject handle to substract (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
result = handles.metricdata.numbera - handles.metricdata.numberb;
set(handles.result, 'String', result)
% --- Executes on button press in divide.
function divide_Callback(hObject, eventdata, handles)
% hObject handle to divide (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
result = handles.metricdata.numbera / handles.metricdata.numberb;
set(handles.result, 'String', result)
% --- Executes on button press in multiply.
function multiply_Callback(hObject, eventdata, handles)
% hObject handle to multiply (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
result = handles.metricdata.numbera * handles.metricdata.numberb;
set(handles.result, 'String', result)
% --- Executes on button press in natlog.
function natlog_Callback(hObject, eventdata, handles)
% hObject handle to natlog (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
result = (log(handles.metricdata.numbera));
set(handles.result, 'String', result)
% --- Executes on button press in inverse.
function inverse_Callback(hObject, eventdata, handles)
% hObject handle to inverse (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
result = (1 / handles.metricdata.numbera);
set(handles.result, 'String', result)
% --- Executes on button press in cube.
function cube_Callback(hObject, eventdata, handles)
% hObject handle to cube (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
result = (handles.metricdata.numbera)^3;
set(handles.result, 'String', result)
% --- Executes on button press in powerb.
function powerb_Callback(hObject, eventdata, handles)
% hObject handle to powerb (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
result = (handles.metricdata.numbera)^(handles.metricdata.numberb);
set(handles.result, 'String', result)
% --- Executes on button press in sqrt.
function sqrt_Callback(hObject, eventdata, handles)
% hObject handle to sqrt (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
result = (sqrt(handles.metricdata.numbera));
set(handles.result, 'String', result)
% --- Executes on button press in logarithm.
function logarithm_Callback(hObject, eventdata, handles)
% hObject handle to logarithm (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
result = log10(handles.metricdata.numbera);
set(handles.result, 'String', result)
% --- Executes on button press in broot.
function broot_Callback(hObject, eventdata, handles)
% hObject handle to broot (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
result = (handles.metricdata.numbera)^(1 /handles.metricdata.numberb);
set(handles.result, 'String', result)
% --- Executes on button press in factorial.
function factorial_Callback(hObject, eventdata, handles)
% hObject handle to factorial (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
N = 1;
for n = 1:1:handles.metricdata.numbera
N = N.*n;
end
result = N;
set(handles.result, 'String', result)
% --- Executes on button press in square.
function square_Callback(hObject, eventdata, handles)
% hObject handle to square (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
result = (handles.metricdata.numbera)^(2);
set(handles.result, 'String', result)
% --- Executes on button press in pushbutton30.
function pushbutton30_Callback(hObject, eventdata, handles)
% hObject handle to pushbutton30 (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
initialize_gui(gcbf, handles, true);
function initialize_gui(fig_handle, handles, isreset)
% If the metricdata field is present and the reset flag is false, it means
% we are we are just re-initializing a GUI by calling it from the cmd line
% while it is up. So, bail out as we dont want to reset the data.
if isfield(handles, 'metricdata') && ~isreset
return;
end
handles.metricdata.numbera = 0;
handles.metricdata.numberb = 0;
set(handles.numbera, 'String', handles.metricdata.numbera);
set(handles.numberb, 'String', handles.metricdata.numberb);
set(handles.result, 'String', 0);
set(handles.date1, 'String', date);
% Update handles structure
guidata(handles.figure1, handles);
function result_Callback(hObject, eventdata, handles)
% hObject handle to result (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
% Hints: get(hObject,'String') returns contents of result as text
% str2double(get(hObject,'String')) returns contents of result as a double
% --- Executes during object creation, after setting all properties.
function result_CreateFcn(hObject, eventdata, handles)
% hObject handle to result (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles empty - handles not created until after all CreateFcns called
% Hint: edit controls usually have a white background on Windows.
% See ISPC and COMPUTER.
if ispc && isequal(get(hObject,'BackgroundColor'), get(0,'defaultUicontrolBackgroundColor'))
set(hObject,'BackgroundColor','white');
end
|
{"author": "Sable", "repo": "mcbench-benchmarks", "sha": "ba13b2f0296ef49491b95e3f984c7c41fccdb6d8", "save_path": "github-repos/MATLAB/Sable-mcbench-benchmarks", "path": "github-repos/MATLAB/Sable-mcbench-benchmarks/mcbench-benchmarks-ba13b2f0296ef49491b95e3f984c7c41fccdb6d8/30454-calculator/calculator.m"}
|
import pandas as pd
import os
import csv
import numpy as np
from itertools import cycle
from collections import Counter
def d5_1():
data = pd.read_csv("input_5.csv", header = None, delimiter = "->", engine = "python")
data[["x1", "y1"]] = data[0].str.split(",", expand = True).astype(int)
data[["x2", "y2"]] = data[1].str.split(",", expand = True).astype(int)
data = data.drop(columns = [0, 1])
data = data[(data["x1"] == data["x2"]) | (data["y1"] == data["y2"])]
tuples = []
for index, row in data.iterrows():
if row["x2"] > row["x1"]:
xrange = [*range(row["x1"], row["x2"] + 1)]
else:
xrange = [*range(row["x2"], row["x1"] + 1)]
if row["y2"] > row["y1"]:
yrange = [*range(row["y1"], row["y2"] + 1)]
else:
yrange = [*range(row["y2"], row["y1"] + 1)]
if len(xrange) > len(yrange):
tuples.extend(list(zip(xrange, cycle(yrange))))
if len(xrange) < len(yrange):
tuples.extend(list(zip(cycle(xrange), yrange)))
tuples = [str(item) for item in tuples]
frequency = {}
for item in tuples:
if item in frequency:
frequency[item] += 1
else:
frequency[item] = 1
freq_df = pd.DataFrame.from_dict(frequency, orient = "index", columns = ["count"])
print(len(freq_df[freq_df["count"] > 1]))
def getType(x1, x2, y1, y2):
if x1 == x2:
return "vertical"
if y1 == y2:
return "horizontal"
else:
return "diagonal"
def d5_2():
data = pd.read_csv("input_5.csv", header = None, delimiter = "->", engine = "python")
data[["x1", "y1"]] = data[0].str.split(",", expand = True).astype(int)
data[["x2", "y2"]] = data[1].str.split(",", expand = True).astype(int)
data = data.drop(columns = [0, 1])
data["type"] = data.apply(lambda row: getType(row["x1"], row["x2"], row["y1"], row["y2"]), axis = 1)
tuples = []
for index, row in data.iterrows():
if row["type"] != "diagonal":
if row["x2"] > row["x1"]:
xrange = [*range(row["x1"], row["x2"] + 1)]
else:
xrange = [*range(row["x2"], row["x1"] + 1)]
if row["y2"] > row["y1"]:
yrange = [*range(row["y1"], row["y2"] + 1)]
else:
yrange = [*range(row["y2"], row["y1"] + 1)]
if len(xrange) > len(yrange):
tuples.extend(list(zip(xrange, cycle(yrange))))
if len(xrange) < len(yrange):
tuples.extend(list(zip(cycle(xrange), yrange)))
if row["type"] == "diagonal":
if row["x2"] > row["x1"]:
xrange = [*range(row["x1"], row["x2"] + 1)]
else:
xrange = [*range(row["x1"], row["x2"] - 1, -1)]
if row["y2"] > row["y1"]:
yrange = [*range(row["y1"], row["y2"] + 1)]
else:
yrange = [*range(row["y1"], row["y2"] - 1, -1)]
tuples.extend(list(zip(xrange, yrange)))
tuples = [str(item) for item in tuples]
frequency = {}
for item in tuples:
if item in frequency:
frequency[item] += 1
else:
frequency[item] = 1
freq_df = pd.DataFrame.from_dict(frequency, orient = "index", columns = ["count"])
print(len(freq_df[freq_df["count"] > 1]))
d5_1()
d5_2()
|
{"hexsha": "9048f82ead62442f834cedf992a94009b2559616", "size": 3544, "ext": "py", "lang": "Python", "max_stars_repo_path": "d5/d5.py", "max_stars_repo_name": "mwm021/Advent-of-Code-2021", "max_stars_repo_head_hexsha": "9908b95ea6503c1b31fa26845e8ee5d0ad474718", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "d5/d5.py", "max_issues_repo_name": "mwm021/Advent-of-Code-2021", "max_issues_repo_head_hexsha": "9908b95ea6503c1b31fa26845e8ee5d0ad474718", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "d5/d5.py", "max_forks_repo_name": "mwm021/Advent-of-Code-2021", "max_forks_repo_head_hexsha": "9908b95ea6503c1b31fa26845e8ee5d0ad474718", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.0891089109, "max_line_length": 108, "alphanum_fraction": 0.4771444695, "include": true, "reason": "import numpy", "num_tokens": 1001}
|
#include "3dUnitTypeSerialization.hpp"
#include <boost/foreach.hpp>
#include "logger.hpp"
#include <boost/optional/optional.hpp>
#include <glm/gtx/euler_angles.hpp>
using namespace m3d;
void typeSerialization::serialize(boost::property_tree::ptree &pt, m3d::rawPointcloud &v, std::string id)
{
ptree p;
p.put("__type", "rawPointcloud");
for (int i=0; i < v.ranges.size(); i++)
{
ptree profile;
profile.put("angle", v.angles[i]);
for (int j=0; j <v.ranges[i].echoes.size(); j++)
{
lms_channel *ch =&(v.ranges[i].echoes[j]);
if (ch->data.size()>0)serialize(profile, *ch, "ECHO");
}
for (int j=0; j <v.ranges[i].rssis.size(); j++)
{
lms_channel *ch =&(v.ranges[i].rssis[j]);
if (ch->data.size()>0)serialize(profile, *ch, "RSSI");
}
p.add_child("profile", profile);
}
pt.add_child(id, p);
}
void typeSerialization::serialize(boost::property_tree::ptree &pt, lms_channel &m, std::string id)
{
///std::string contents;
///float scallingFactor;
///float scallingOffset;
///float startAngle;
///float angStepWidth;
///int numberOfData;
///std::vector<int>data;
ptree p;
p.put("__type", "lms_channel");
p.put ("id", id);
p.put ("contents", m.contents);
p.put ("scallingOffset", m.scallingOffset);
p.put ("scallingFactor", m.scallingFactor);
p.put ("angStepWidth", m.angStepWidth);
p.put ("startAngle", m.startAngle);
p.put ("numberOfData", m.numberOfData);
std::stringstream ss;
for (int i=0; i< m.data.size();i++)
{
ss<<m.data[i]<<' ';
}
p.add("data", ss.str());
pt.add_child(id, p);
}
void typeSerialization::deserialize(boost::property_tree::ptree &pt, m3d::rawPointcloud &v, std::string id)
{
m3d::rawPointcloud m_raw;
boost::property_tree::ptree pLocal = pt.get_child(id);
std::string type = pLocal.get<std::string>("__type");
if (type.compare("rawPointcloud")!=0) throw ("bad type");
LOG_INFO("type ok");
for(boost::property_tree::ptree::iterator iter = pLocal.begin(); iter != pLocal.end(); iter++)
{
boost::property_tree::ptree profileLocal = iter->second;
if (iter->first.compare("profile")!= 0) continue;
float angle = profileLocal.get("angle", -1000.0f);
lms_measurement lm;
lms_channel dist;
lms_channel rssi;
deserialize(profileLocal, dist, "ECHO");
deserialize(profileLocal, rssi, "RSSI");
lm.echoes.push_back(dist);
lm.rssis.push_back(rssi);
if (angle != -1000.0f)
{
m_raw.ranges.push_back(lm);
m_raw.angles.push_back(angle);
}
}
v= m_raw;
LOG_INFO("Number of profiles "<<m_raw.angles.size());
}
void typeSerialization::deserialize(boost::property_tree::ptree &pt, lms_channel &m, std::string id)
{
lms_channel my_channel;
boost::optional<boost::property_tree::ptree&> pLocalOpt = pt.get_child_optional(id);
if (!pLocalOpt)
{
LOG_WARN("cannot find node " << id);
return;
}
boost::property_tree::ptree pLocal = pLocalOpt.get();
std::string type = pLocal.get<std::string>("__type");
if (type.compare("lms_channel")!=0) throw ("bad type");
my_channel.contents = pLocal.get<std::string>("contents");
my_channel.scallingOffset = pLocal.get<float>("scallingOffset");
my_channel.scallingFactor = pLocal.get<float>("scallingFactor");
my_channel.angStepWidth = pLocal.get<float>("angStepWidth");
my_channel.startAngle = pLocal.get<float>("startAngle");
my_channel.numberOfData = pLocal.get<float>("numberOfData");
std::string data = pLocal.get<std::string>("data");
std::stringstream ss(data);
while (!ss.eof())
{
float d;
ss>>d;
my_channel.data.push_back(d);
}
m=my_channel;
}
void typeSerialization::deserialize(boost::property_tree::ptree &pt, glm::mat4 &m, std::string id)
{
LOG_INFO("reading matrix");
boost::property_tree::ptree pLocal = pt.get_child(id);
std::string type = pLocal.get<std::string>("__type");
if (type.compare("Mat4")!=0) throw ("bad type");
std::string method = pLocal.get("__method","matrix");
if (method.compare("matrix") ==0)
{
std::string data = pLocal.get<std::string>("data");
std::stringstream ss(data);
ss>>m[0][0]>>m[0][1]>>m[0][2]>>m[0][3];
ss>>m[1][0]>>m[1][1]>>m[1][2]>>m[1][3];
ss>>m[2][0]>>m[2][1]>>m[2][2]>>m[2][3];
ss>>m[3][0]>>m[3][1]>>m[3][2]>>m[3][3];
}
if (method.compare("XYZYPR") ==0)
{
std::string data = pLocal.get<std::string>("data");
std::stringstream ss(data);
float floats[7];
ss>>floats[0]>>floats[1]>>floats[2];
ss>>floats[3]>>floats[4]>>floats[5];
glm::mat4x4 t,r;
t = glm::translate(glm::mat4(1.0f),glm::vec3(floats[0],floats[1],floats[2]));
r=glm::yawPitchRoll(floats[3],floats[4],floats[5]);
m = r*t;
}
}
void typeSerialization::serialize(boost::property_tree::ptree &pt, glm::mat4 &m, std::string id)
{
boost::property_tree::ptree pLocal;
pLocal.add("__type", "Mat4");
pLocal.add("__method", "matrix");
std::stringstream ss;
ss<<m[0][0]<<"\t"<<m[0][1]<<"\t"<<m[0][2]<<"\t"<<m[0][3]<<"\t";
ss<<m[1][0]<<"\t"<<m[1][1]<<"\t"<<m[1][2]<<"\t"<<m[1][3]<<"\t";
ss<<m[2][0]<<"\t"<<m[2][1]<<"\t"<<m[2][2]<<"\t"<<m[2][3]<<"\t";
ss<<m[3][0]<<"\t"<<m[3][1]<<"\t"<<m[3][2]<<"\t"<<m[3][3]<<"\t";
pLocal.add("data", ss.str());
pt.add_child(id, pLocal);
}
void typeSerialization::saveTXT(std::string fn, m3d::pointcloud &pc)
{
std::ofstream ofile;
ofile.open(fn.c_str());
if (ofile.is_open()) std::cout <<"file is opened "<< fn<<"\n";
for (int i=0; i < pc.data.size(); i++)
{
ofile << pc.data[i].x<<"\t"<<pc.data[i].y<<"\t"<<pc.data[i].z;
if (pc.intensity.size() == pc.data.size()) {ofile<<"\t"<<pc.intensity[i];};
ofile<<"\n";
}
std::cout <<"saved\n";
ofile.close();
}
|
{"hexsha": "2188de68a56506dfa23563545764b7f0c329f178", "size": 6212, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/3dUnitTypeSerialization.cpp", "max_stars_repo_name": "michalpelka/m3d_unit_dr", "max_stars_repo_head_hexsha": "ecee744474bde24a4a50d4872b59a087c66ac677", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/3dUnitTypeSerialization.cpp", "max_issues_repo_name": "michalpelka/m3d_unit_dr", "max_issues_repo_head_hexsha": "ecee744474bde24a4a50d4872b59a087c66ac677", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/3dUnitTypeSerialization.cpp", "max_forks_repo_name": "michalpelka/m3d_unit_dr", "max_forks_repo_head_hexsha": "ecee744474bde24a4a50d4872b59a087c66ac677", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.2192513369, "max_line_length": 108, "alphanum_fraction": 0.5690598841, "num_tokens": 1863}
|
from typing import Dict
from numpy.lib.histograms import _ravel_and_check_weights
import datasets
from datasets import load_dataset, Dataset
import transformers
from transformers import AutoTokenizer
from copy import deepcopy
import pandas as pd
class DataFilter(object):
'''使用pandas来完成数据提取(复杂度有点感人,但只能选择相信pandas内置函数的速度了)'''
def __init__(self, base_dataset, cfg):
self.base_dataset_pd = pd.DataFrame(base_dataset['train'][:])
# 分词器
self.tokenizer = AutoTokenizer.from_pretrained(cfg['MODEL_NAME'])
assert isinstance(self.tokenizer, transformers.PreTrainedTokenizerFast)
def __call__(self, example) :
# 一个布尔数组
# 应该不会重复。。吧
# TODO
row = self.base_dataset_pd['doc_id'] == example['doc_id']
tokenized_inputs = self.tokenizer(self.base_dataset_pd.loc[row].iloc[0]['text'],padding="max_length", truncation=True)
tokenized_inputs["label"] = example['immigration']
self.base_dataset_pd.drop(self.base_dataset_pd.loc[row].index)
return tokenized_inputs
def filterRawData(self,example):
tokenized_inputs = self.tokenizer(example['text'],padding="max_length" , truncation=True)
tokenized_inputs["label"] = 0
return tokenized_inputs
def generate_remain_dataset(self):
dataset = Dataset.from_pandas(self.base_dataset_pd)
dataset = dataset.map(self.filterRawData, batched=False)
return dataset
class DataLoader(object):
def __init__(self, cfg, mode = 'normal'):
dataset = load_dataset('csv', data_files=['./data/dat_speeches_043114_immi_h_ascii_07212021.csv',
'./data/dat_speeches_043114_immi_s_ascii_07202021.csv'])
print(dataset['train'][0])
dataset_label=load_dataset('csv', data_files=['./data/hand_coding_task_house_1000_07162021_lite.csv',
'./data/hand_coding_task_senate_1000_07032021_lite.csv'])
# 清洗,合并数据集
filter = DataFilter(dataset, cfg)
raw_datasets = dataset_label.map(filter, batched=False)
self.train = None
self.dev = None
self.test = None
# used to search for hyperparameters
if (mode == '5-cross-train'):
raw_datasets.shuffle(seed=1)
self.generate_5fold_cross_validation(raw_datasets)
# use the whole datast and inference the raw data
if (mode == '5-cross-inference'):
self.train = raw_datasets['train']
if (mode == 'normal'):
raw_datasets.shuffle()
sub_dataset = [raw_datasets['train'].shard(num_shards =10 , index = i) for i in range(10)]
self.train, self.test, self.dev = datasets.concatenate_datasets([sub_dataset[j] for j in range(8)]), sub_dataset[8], sub_dataset[9]
# Prepare data sets for the five training sessions
def generate_5fold_cross_validation(self, raw_datasets):
self.train, self.test = [None]*5, [None]*5
sub_dataset = [raw_datasets['train'].shard(num_shards =5 , index = i) for i in range(5)]
for i in range(5):
self.train[i] = datasets.concatenate_datasets([sub_dataset[j] for j in range(5) if j!=i])
self.test[i] = sub_dataset[i]
def test():
from config import cfg
dataset = DataLoader(cfg, mode = 'normal')
print(dataset.train)
print(dataset.test)
print(dataset.dev)
if __name__=='__main__':
test()
|
{"hexsha": "bc03540d690fbc61986e2c5fe847b9b5c68c898e", "size": 3522, "ext": "py", "lang": "Python", "max_stars_repo_path": "dataloader.py", "max_stars_repo_name": "cogito233/ImmigrationBert", "max_stars_repo_head_hexsha": "d80c44351f795ec2f6156d57d2ee126aace2d1da", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-08-25T11:16:08.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-20T12:31:12.000Z", "max_issues_repo_path": "dataloader.py", "max_issues_repo_name": "cogito233/ImmigrationBert", "max_issues_repo_head_hexsha": "d80c44351f795ec2f6156d57d2ee126aace2d1da", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dataloader.py", "max_forks_repo_name": "cogito233/ImmigrationBert", "max_forks_repo_head_hexsha": "d80c44351f795ec2f6156d57d2ee126aace2d1da", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.4827586207, "max_line_length": 143, "alphanum_fraction": 0.6496308915, "include": true, "reason": "from numpy", "num_tokens": 847}
|
#pragma once
#include <mumlib.hpp>
#include <log4cpp/Category.hh>
#include <boost/noncopyable.hpp>
#include <string>
#include <stdexcept>
// 0 = mumble users connected at start; 1 = connect at dial-in
// TODO: fix mumlib::TransportException when this option is enabled
#define MUM_DELAYED_CONNECT 0
namespace mumble {
class Exception : public std::runtime_error {
public:
Exception(const char *message) : std::runtime_error(message) { }
};
class MumlibCallback;
struct MumbleCommunicatorConfig {
std::string user;
std::string password;
std::string host;
std::string cert_file;
std::string privkey_file;
int opusEncoderBitrate;
int port = 0;
bool autodeaf;
std::string comment;
int max_calls = 1;
std::string authchan; // config.ini: channelAuthExpression
};
// This is the subset that is of interest to us
struct MumbleUserState {
int32_t mute;
int32_t deaf;
int32_t suppress;
int32_t self_mute;
int32_t self_deaf;
int32_t priority_speaker;
int32_t recording;
};
class MumbleCommunicator : boost::noncopyable {
public:
MumbleCommunicator(
boost::asio::io_service &ioService);
void connect(MumbleCommunicatorConfig &config);
void onConnect(const std::string& address);
void onDisconnect();
void onCallerAuth();
//void onCallerUnauth();
virtual ~MumbleCommunicator();
void sendPcmSamples(int16_t *samples, unsigned int length);
/**
* This callback is called when communicator has received samples.
* Arguments: call ID, session ID, sequence number, PCM samples, length of samples
*/
std::function<void(int, int, int, int16_t *, int)> onIncomingPcmSamples;
/**
* This callback is called when a channel state message (e.g. Channel
* information) is received. Arguments: channel_id, name
*/
std::function<void(std::string, int)> onIncomingChannelState;
std::function<void()> onServerSync;
std::function<void()> onUserState;
void sendTextMessage(std::string message);
void sendTextMessageStr(mumlib::MessageType field, std::string message);
void joinChannel(int channel_id);
void sendUserState(mumlib::UserState field, bool val);
void sendUserState(mumlib::UserState field, std::string val);
MumbleUserState userState;
int callId;
private:
boost::asio::io_service &ioService;
log4cpp::Category &logger;
MumbleCommunicatorConfig mumbleConf;
mumlib::MumlibConfiguration mumConfig;
std::shared_ptr<mumlib::Mumlib> mum;
std::unique_ptr<MumlibCallback> callback;
friend class MumlibCallback;
};
}
|
{"hexsha": "66e62ed7977777c5be4e586f7286622357c1a49e", "size": 2916, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "MumbleCommunicator.hpp", "max_stars_repo_name": "hiro2233/mumsi", "max_stars_repo_head_hexsha": "19c604cbf3f19728163c1a84bc243f8a29c66786", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 6.0, "max_stars_repo_stars_event_min_datetime": "2020-06-01T08:54:48.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-21T19:39:48.000Z", "max_issues_repo_path": "MumbleCommunicator.hpp", "max_issues_repo_name": "LeoVerto/mumsi", "max_issues_repo_head_hexsha": "ae3254d49affe39e5b47ccf90ee0e4dd8c6fbc6a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2020-11-10T05:25:02.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-10T08:24:28.000Z", "max_forks_repo_path": "MumbleCommunicator.hpp", "max_forks_repo_name": "LeoVerto/mumsi", "max_forks_repo_head_hexsha": "ae3254d49affe39e5b47ccf90ee0e4dd8c6fbc6a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2020-11-07T18:41:03.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-07T18:41:03.000Z", "avg_line_length": 26.2702702703, "max_line_length": 90, "alphanum_fraction": 0.640260631, "num_tokens": 674}
|
from __future__ import print_function
import sympy as sm
x, y, a = sm.symbols('x y a')
f = a*x + y**2*sm.sin(y)
step1 = sm.Integral(f, x, y)
print(step1)
step2 = sm.Integral(sm.Integral(f, x).doit(), y)
print(step2)
step3 = step2.doit()
print(step3)
|
{"hexsha": "f4bac67975580d83ae06eca350b46f30649290b2", "size": 251, "ext": "py", "lang": "Python", "max_stars_repo_path": "doc/src/pyembed/src/ex1.py", "max_stars_repo_name": "dutille/doconce", "max_stars_repo_head_hexsha": "f1d41ae065f7d3c24450180c2ce019b03edbaddc", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 305, "max_stars_repo_stars_event_min_datetime": "2015-01-07T06:57:14.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-16T01:45:25.000Z", "max_issues_repo_path": "doc/src/pyembed/src/ex1.py", "max_issues_repo_name": "dutille/doconce", "max_issues_repo_head_hexsha": "f1d41ae065f7d3c24450180c2ce019b03edbaddc", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 163, "max_issues_repo_issues_event_min_datetime": "2015-01-08T11:03:26.000Z", "max_issues_repo_issues_event_max_datetime": "2020-12-17T12:54:46.000Z", "max_forks_repo_path": "doc/src/pyembed/src/ex1.py", "max_forks_repo_name": "dutille/doconce", "max_forks_repo_head_hexsha": "f1d41ae065f7d3c24450180c2ce019b03edbaddc", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 91, "max_forks_repo_forks_event_min_datetime": "2015-03-19T17:17:34.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-02T15:45:20.000Z", "avg_line_length": 20.9166666667, "max_line_length": 48, "alphanum_fraction": 0.6733067729, "include": true, "reason": "import sympy", "num_tokens": 91}
|
#!/usr/bin/python
# Usage:
#
# Reference
#
import numpy as np
def loadMatrix():
def expectation():
def maximization():
def convergenceCheck():
def outputPrediction():
def main():
# configuration
inputTrain = "../dataset/train.txt"
inputTest = "../dataset/test.txt"
amountOfEigenv = 5
output = "scores-%d.txt" % (amountOfEigenv)
# load train and test data
# construct original matrix with missing values (the scores we are going to predict)
matrix = loadMatrix(inputTrain, inputTest)
# initial parameters
# E-M loop
while( convergenceCheck()):
expectation()
maximization()
# generate prediction results
outputPrediction()
if __name__ == "__main__":
main()
|
{"hexsha": "0833f37303e83d4b7e6ed44de170856585219560", "size": 739, "ext": "py", "lang": "Python", "max_stars_repo_path": "assignments/3/A/PCAReconstructDataset.bak.py", "max_stars_repo_name": "gypleon/codesCloud", "max_stars_repo_head_hexsha": "bc779fd3485b925ff5e5345e725a97d6ce262a2b", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "assignments/3/A/PCAReconstructDataset.bak.py", "max_issues_repo_name": "gypleon/codesCloud", "max_issues_repo_head_hexsha": "bc779fd3485b925ff5e5345e725a97d6ce262a2b", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "assignments/3/A/PCAReconstructDataset.bak.py", "max_forks_repo_name": "gypleon/codesCloud", "max_forks_repo_head_hexsha": "bc779fd3485b925ff5e5345e725a97d6ce262a2b", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 17.5952380952, "max_line_length": 88, "alphanum_fraction": 0.6589986468, "include": true, "reason": "import numpy", "num_tokens": 170}
|
#!/usr/bin/env python
# coding=utf-8
"""Tests based on the IOOS QARTOD manuals."""
import logging
import warnings
from typing import Dict, List, Tuple, Union, Sequence
from numbers import Real as N
from collections import namedtuple
import numpy as np
import pandas as pd
from ioos_qc.utils import (
isnan,
isfixedlength,
add_flag_metadata,
great_circle_distance,
mapdates
)
from ioos_qc.qartod import QartodFlags
L = logging.getLogger(__name__) # noqa
FLAGS = QartodFlags # Default name for all check modules
NOTEVAL_VALUE = QartodFlags.UNKNOWN
span = namedtuple('Span', 'minv maxv')
@add_flag_metadata(standard_name='gross_range_test_quality_flag',
long_name='Gross Range Test Quality Flag')
def valid_range_test(inp : Sequence[any],
valid_span : Tuple[any, any],
dtype : np.dtype = None,
start_inclusive : bool = True,
end_inclusive : bool = False,
) -> np.ma.core.MaskedArray:
"""Checks that values are within a min/max range. This is not unlike a `qartod.gross_range_test`
with fail and suspect bounds being equal, except that here we specify the inclusive range that
should pass instead of the exclusive bounds which should fail. This also supports datetime-like
objects where as the `qartod.gross_range_test` method only supports numerics.
Given a 2-tuple of minimum/maximum values, flag data outside of the given
range as FAIL data. Missing and masked data is flagged as UNKNOWN. The first span value is
treated as inclusive and the second span valid is treated as exclusive. To change this
behavior you can use the paramters `start_inclusive` and `end_inclusive`.
Args:
inp (Sequence[any]): Data as a sequence of objects compatible with the fail_span objects
fail_span (Tuple[any, any]): 2-tuple range which to flag outside data as FAIL. Objects
should be of equal format to that of the inp parameter as they will be checked for
equality without type conversion.
dtype (np.dtype): Optional. If your data is not already numpy-typed you can specify its
dtype here.
start_inclusive (bool): Optional. If the starting span value should be inclusive (True) or
exclusive (False).
end_inclusive (bool): Optional. If the ending span value should be inclusive (True) or
exclusive (False).
Returns:
np.ma.core.MaskedArray: A masked array of flag values equal in size to that of the input.
"""
# Numpy array inputs
if dtype is None and hasattr(inp, 'dtype'):
dtype = inp.dtype
# Pandas column inputs
# This is required because we don't want to restrict a user from using a pd.Series
# directly with this function. If the data was coming from a Store, it would
# always be a numpy array.
elif dtype is None and hasattr(inp, 'values') and hasattr(inp.values, 'dtype'):
dtype = inp.values.dtype
# Save original shape
original_shape = inp.shape
if dtype is None:
L.warning("Trying to guess data input type, try specifying the dtype parameter")
# Try to figure out the dtype so masked values can be calculated
try:
# Try datetime-like objects
inp = np.ma.masked_invalid(mapdates(inp))
valid_span = np.ma.masked_invalid(mapdates(valid_span))
except BaseException:
try:
# Try floating point numbers
inp = np.ma.masked_invalid(np.array(inp).astype(np.floating))
valid_span = np.ma.masked_invalid(np.array(valid_span).astype(np.floating))
except BaseException:
# Well, we tried.
raise ValueError(
"Could not determine the type of input, try using the dtype parameter")
else:
inp = np.ma.masked_invalid(np.array(inp, dtype=dtype))
valid_span = np.ma.masked_invalid(np.array(valid_span, dtype=dtype))
inp = inp.flatten()
# Start with everything as passing (1)
flag_arr = np.ma.ones(inp.size, dtype='uint8')
# Set fail on either side of the bounds, inclusive and exclusive
if not isnan(valid_span[0]):
with np.errstate(invalid='ignore'):
if start_inclusive is True:
flag_arr[inp < valid_span[0]] = QartodFlags.FAIL
else:
flag_arr[inp <= valid_span[0]] = QartodFlags.FAIL
if not isnan(valid_span[1]):
with np.errstate(invalid='ignore'):
if end_inclusive is True:
flag_arr[inp > valid_span[1]] = QartodFlags.FAIL
else:
flag_arr[inp >= valid_span[1]] = QartodFlags.FAIL
# If the value is masked or nan set the flag to MISSING
flag_arr[inp.mask] = QartodFlags.MISSING
return flag_arr.reshape(original_shape)
|
{"hexsha": "45ba4898da43628c0c4a6b34263ced451120a8a6", "size": 4940, "ext": "py", "lang": "Python", "max_stars_repo_path": "ioos_qc/axds.py", "max_stars_repo_name": "ocefpaf/ioos_qc", "max_stars_repo_head_hexsha": "b5412e2ad35e47d0d460afea963ecbffe260afe6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ioos_qc/axds.py", "max_issues_repo_name": "ocefpaf/ioos_qc", "max_issues_repo_head_hexsha": "b5412e2ad35e47d0d460afea963ecbffe260afe6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ioos_qc/axds.py", "max_forks_repo_name": "ocefpaf/ioos_qc", "max_forks_repo_head_hexsha": "b5412e2ad35e47d0d460afea963ecbffe260afe6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.4918032787, "max_line_length": 100, "alphanum_fraction": 0.6601214575, "include": true, "reason": "import numpy", "num_tokens": 1115}
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import os
import json
import numpy as np
import mxnet as mx
from mxnet import gluon
import glob
from battlesnake_heuristics import MyBattlesnakeHeuristics
heuristics = MyBattlesnakeHeuristics()
model_names = ["Model-11x11",
"Model-15x15",
"Model-19x19",
"Model-7x7"]
def model_fn(model_dir):
print("model_fn model_dir={} glob={}".format(model_dir, glob.glob("{}/*".format(model_dir))))
models = {}
for model_name in model_names:
symbol_name = "{}/Models/{}/local-symbol.json".format(model_dir, model_name)
params_name = "{}/Models/{}/local-0000.params".format(model_dir, model_name)
model = gluon.SymbolBlock.imports(
symbol_name, ['data0', 'data1', 'data2', 'data3'],
params_name)
print("model_fn {} symbol={} params={}".format(model_name, symbol_name, params_name))
models[model_name] = model
return models
def transform_fn(models, data, content_type, output_content_type):
"""
Transform incoming requests.
"""
#check if GPUs area available
ctx = mx.gpu() if mx.context.num_gpus() > 0 else mx.cpu()
data = json.loads(data)
model_name = "Model-{}x{}".format(data["map_width"], data["map_width"])
#convert input data into MXNet NDArray
state = mx.nd.array(data["state"], ctx=ctx)
snake_id = mx.nd.array(data["snake_id"], ctx=ctx)
turn_count = mx.nd.array(data["turn_count"], ctx=ctx)
snake_health = mx.nd.array(data["health"], ctx=ctx)
print("running model")
#inference
model = models[model_name]
action = model(state, snake_id, turn_count, snake_health)
action = action.asnumpy()[0]
print("Action is {}".format(action))
heuristics_state = np.array(data["state"])[0, 1, :].transpose(1, 2, 0)
heuristics_id = np.array(data["snake_id"])[0, 1]
heuristics_turn = np.array(data["turn_count"])[0, 1]
heuristics_health = data["all_health"]
converted_heuristic_health = {}
for k in heuristics_health:
converted_heuristic_health[int(k)] = heuristics_health[k]
print("Heuristisc health {}".format(converted_heuristic_health))
print("state {}".format(heuristics_state.shape))
print("data json {}".format(data["json"]))
converted_action, _ = heuristics.run(heuristics_state,
int(heuristics_id),
int(heuristics_turn)+1,
converted_heuristic_health,
json=data["json"],
action=action)
print("converted_action {}".format(converted_action))
output = converted_action
#decode result as json string
response_body = json.dumps(output)
return response_body, output_content_type
|
{"hexsha": "db1e76530eea51ef7032893e53eb5a8b7250f94d", "size": 3468, "ext": "py", "lang": "Python", "max_stars_repo_path": "source/MXNetEnv/inference/inference_src/predict.py", "max_stars_repo_name": "erebus9856/sagemaker-battlesnake-ai", "max_stars_repo_head_hexsha": "b1799622e56cbc4e619f15806db62c5fb6f25993", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 76, "max_stars_repo_stars_event_min_datetime": "2020-03-04T01:31:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T12:56:57.000Z", "max_issues_repo_path": "source/MXNetEnv/inference/inference_src/predict.py", "max_issues_repo_name": "IronOnet/sagemaker-battlesnake-ai", "max_issues_repo_head_hexsha": "b1799622e56cbc4e619f15806db62c5fb6f25993", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 18, "max_issues_repo_issues_event_min_datetime": "2020-03-08T05:56:43.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-20T06:18:38.000Z", "max_forks_repo_path": "source/MXNetEnv/inference/inference_src/predict.py", "max_forks_repo_name": "IronOnet/sagemaker-battlesnake-ai", "max_forks_repo_head_hexsha": "b1799622e56cbc4e619f15806db62c5fb6f25993", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 53, "max_forks_repo_forks_event_min_datetime": "2020-03-05T18:57:02.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T06:05:05.000Z", "avg_line_length": 36.125, "max_line_length": 97, "alphanum_fraction": 0.6363898501, "include": true, "reason": "import numpy", "num_tokens": 835}
|
#redirect Gladys Valley Hall
|
{"hexsha": "31674364d32f0d4e940604b4601b7b9504542c76", "size": 29, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/Veterinary_Medicine_Instructional_Facility.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/Veterinary_Medicine_Instructional_Facility.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/Veterinary_Medicine_Instructional_Facility.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 14.5, "max_line_length": 28, "alphanum_fraction": 0.8275862069, "num_tokens": 7}
|
\documentclass[serif,xcolor=pdftex,dvipsnames,table,hyperref={bookmarks=false,breaklinks}]{beamer}
\input{../config.tex}
\settitlecard{3}{Classes and Representing Numbers}
\begin{document}
\maketitlepage
% \section{Preliminaries}
% \subsection{Foo}
%
%
% \begin{frame}[t]{Reminders}
% \begin{itemize}[<+->]
% \item Assignment 1 due Tonight (09/13) by 11:59pm
% \begin{itemize}[<+->]
% \item Check Piazza
% \item Check your return types
% \end{itemize}
% \item Quiz 2 is going out tonight after class. Fewer questions, but they are a little longer.
% \item If you cannot make my office hours, feel free to email me and set up another time.
% \end{itemize}
% \end{frame}
\section{Modules and Objects}
\subsection{Foo}
% \begin{frame}[t]{Scoping}
%
% \end{frame}
\begin{frame}[t,fragile]{Importing Functions}
\begin{itemize}[<+->]
\item \textbf{Modules} are Python packages, usually a collection of functions and variable, that can be used in other programs.
\item Load modules into your code using an \textbf{import} statement.
\end{itemize}
\pause
\begin{tcolorbox}
\begin{verbatim}
import <module_name> # imports a module
\end{verbatim}
\end{tcolorbox}
%
% \pause
% \centering
% \Huge{DEMO}
\end{frame}
\begin{frame}[t]{Useful Built-in Modules}
Python has a number of very useful built-in modules:
\begin{itemize}[<+->]
\item \textbf{string:} operations on strings.
\item \textbf{math:} standard math functions such as: log, exp, sine, etc.
\item \textbf{itertools:} functions for manipulating sequences such as: combinations, permutations, cross product, etc.
\end{itemize}
% string
% math
% itertools
\pause
\centering
\Huge{DEMO}
\end{frame}
\begin{frame}[t]{Object Oriented Programming}
\Huge{DEMO}
\end{frame}
\begin{frame}[t,fragile]{Classes}
Create new object types in Python by defining a \textbf{class}.
\pause
\begin{tcolorbox}
\begin{verbatim}
class <class_name>:
def __init__(self,<args>):
self.<member_variable> = <expression>
<body>
def <member_function>(self,<args>):
<body>
\end{verbatim}
\end{tcolorbox}
\end{frame}
\begin{frame}[t,fragile]{What are objects?}
\begin{itemize}[<+->]
\item Objects, like functions, are a tool for organizing programs.
\item An object consists of:
\begin{enumerate}[<+->]
\item A collection of related information.
\item A set of operations to manipulate and access that information.
\end{enumerate}
\item The information is stored as \textit{instance variables}.
\item The operations are called \textit{methods}.
\item Collectively the methods and instance variables are called \textit{attributes}.
\end{itemize}
\pause
\begin{block}{Dog Example:}
\begin{itemize}[<+->]
\item \textbf{Instance variables}:
\begin{itemize}[<+->]
\item \verb|name| and \verb|tricks|
\end{itemize}
\item \textbf{Methods:}
\begin{itemize}[<+->]
\item \verb|bark|, \verb|teach_trick|, and \verb|do_trick|
\end{itemize}
\end{itemize}
\end{block}
\end{frame}
% \section{Miscelaneous Python}
% \subsection{Foo}
%
% \begin{frame}[t,fragile]{List Comprehensions}
% Python supports a concept called \textbf{list comprehensions} which can be used to conveniently create sequences.
% \pause
% \begin{block}{Class Definitions}
% \begin{verbatim}
% [<expresion> for <var> in <sequence>]
% \end{verbatim}
% \end{block}
% \pause
% \centering
% \Huge{DEMO}
%
% \end{frame}
% \begin{frame}[t]{Functional programmingesque Python}
% Python includes a number of functions
% \end{frame}
%
% \begin{frame}[t]{Misc}
% % with
% % None
% \end{frame}
\section{Intro to Numerical Computing}
\subsection{Foo}
\begin{frame}[t]{Numerical Computing}
\centering
\Large{\textbf{Numerical computing} is the approximation of continuous values and functions on a computer with finite precision.}
\includegraphics[width=2in]{../Figures/numerical_integration.png}
\end{frame}
\begin{frame}[t]{Representing Numbers}
Before we can approximate functions, we need to represent numbers:
\pause
\begin{itemize}[<+->]
\item The set of integers is countably infinite.
\item The set of real numbers is continuous and uncountably infinite.
\item But the set of numbers representable by a computer is finite...
\end{itemize}
\end{frame}
\begin{frame}[t]{Integer Representation}
Given a fixed number of bits (usually 32 or 64), we can create the following mapping:
\pause
\centering
\begin{tabular}{|r|r|}\hline
integral number & bit representation\\\hline\hline
0 & 00000000\\
1 & 00000001\\
2 & 00000010\\
3 & 00000011\\
4 & 00000100\\
\vdots & \vdots\\
254 & 11111110\\
255 & 11111111\\\hline
\end{tabular}
\pause
\begin{itemize}[<+->]
\item Shift the mapping to store negative numbers: $0000000_2 = -125$ and $11111111_2 = 126$.
\end{itemize}
% meaning we can represent number up to ~$2.1\cdot 10^9$ and ~$9.2\cdot 10^18$ respectively.
\end{frame}
\begin{frame}[t]{Integer Representation}
\centering
\begin{block}{Binary Integers}
Given $n$ bits, $b_1,...,b_n\in \{0,1\}$, we map binary values to integer values as follows:
\Large{$$(b_nb_{n-1}...b_2b_1b_0)_2 = \sum_{i=0}^n b_i2^i = x$$}
\end{block}
\pause
For example:
\Large{$$0101_2 = 0\cdot2^3 + 1\cdot2^2 + 0\cdot2^1 + 1\cdot2^0 = 5$$}
\end{frame}
\begin{frame}[t]{Real Representations}
\begin{block}{Fixed Point}
Represent real numbers as integers that are scaled by a fixed scaling factor, $d$. For example: Let $d = 1000$, then
$$1.23 = \frac{12300}{d}$$
\end{block}
\pause
\begin{itemize}[<+->]
\item This is equivalent to shifting the decimal.
\item $d$ is usually a multiple of $2$.
\item Restricted by the range of representable integers.
\item \textbf{Observation:} Between 0 and 1, we would usually like a finer discretization, but between 1000 and 1001, we may be ok with a rough discretization.
\end{itemize}
\end{frame}
\begin{frame}[t]{Real Representations}
\begin{block}{Floating Point}
Rewrite a number, $x$, in scientific notation $x = a\cdot 10^b$. Then, $x$ can be stored by storing $a$ as a fixed point and $b$ as an integer. Floating point numbers use a fixed number of digits for $a$, known as the \textit{mantissa}, and $b$, known as the \textit{exponent}:
\pause
\centering
\begin{tabular}{|r|r|r|}\hline
$x$ & $a$ & $b$\\\hline
123.456 & 1.23456 & 2\\
100000 & 1.00000 & 5\\
0.00025 & 2.50000 & -4\\\hline
\end{tabular}
\end{block}
\pause
\begin{itemize}[<+->]
\item Floating point numbers naturally have finer granularity nearer zero.
\end{itemize}
\end{frame}
\begin{frame}[t]{Floating Point Limitations}
\begin{itemize}[<+->]
\item Typically, 54 bits are used for the mantissa and 10 bits are used for the exponent.
\item This results in:
\begin{itemize}[<+->]
\item The largest possible float is $\approx 10^{308}$.
\item The smallest possible float is $\approx 10^{-308}$.
\item The distance between 1.0 and the next largest number is $\approx 10^{-16}$.
\end{itemize}
\item Because we can only represent finite numbers, we must rely on rounding and approximation which can lead to errors if you are not careful.
\end{itemize}
\end{frame}
% \begin{frame}[t]{Rounding in Action}
% Can a $10^-16$ make a difference?
% \begin{itemize}[<+->]
% \item Ca
% \item This results in:
% \begin{itemize}[<+->]
% \item The largest possible float is $\approx 10^{308}$.
% \item The smallest possible float is $\approx 10^{-308}$.
% \item The distance between 1.0 and the next largest number is $\approx 10^-16$.
% \end{itemize}
% \item Because we can only represent finite numbers, we must rely on rounding and approximation.
% \end{itemize}
% \end{frame}
\begin{frame}[t]{Overflow/Underflow}
\pause
\begin{block}{Overflow}
Overflow occurs when an expression results in a number that is too \textbf{large} to be represented.
\end{block}
\pause
\begin{block}{Underflow}
Underflow occurs when an expression results in a number that is too \textbf{small} to be represented.
\end{block}
\pause
\centering
\Huge{Demo}
\end{frame}
\begin{frame}[t]{Representability}
\begin{itemize}[<+->]
\item Not all decimal numbers are representable using a binary, floating point representation.
\item Example: 0.1
\item The set of representable numbers is not closed under standard arithmetic. That is, adding, subtracting, multiplying, or dividing representable numbers may result in an unrepresentable number.
\item Example: 1.0/10.0 = 0.1
\end{itemize}
\pause
\centering
\Huge{Demo}
\end{frame}
\begin{frame}[t]{Working with floating point numbers}
\begin{itemize}[<+->]
\item Scale your variables or work in log space to avoid overflow and underflow.
\item Avoid using == to compare floating point numbers. Instead compare with a tolerance: $|x - y| < \epsilon$
\end{itemize}
\pause
\centering
\Huge{Demo}
\end{frame}
\begin{frame}[t]{Rounding in Action}
Can a $10^{-16}$ make a difference?
\pause
\begin{block}{Patriot Missile Failure}
\begin{itemize}[<+->]
\item Missile defense system failed to target an incoming missile due to a rounding error.
\item The error was caused by counting time in tenths of seconds (+= 0.1).
\item As in our demo, this resulted in accumulating errors because 0.1 is not representable.
\item Incorrect timestamps were then used to incorrectly calculate distance and speed of an incoming missile.
\end{itemize}
\end{block}
\end{frame}
\end{document}
|
{"hexsha": "baa2d1882a088e80e4e09837ca520864cba4e92f", "size": 9876, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "src/Lecture03/lecture.tex", "max_stars_repo_name": "royadams/intro_to_numerical_computing_with_python", "max_stars_repo_head_hexsha": "f31706f691b8a22ad8db19cdb950a0cb1df047f4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-18T05:36:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-18T05:36:19.000Z", "max_issues_repo_path": "src/Lecture03/lecture.tex", "max_issues_repo_name": "royadams/intro_to_numerical_computing_with_python", "max_issues_repo_head_hexsha": "f31706f691b8a22ad8db19cdb950a0cb1df047f4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Lecture03/lecture.tex", "max_forks_repo_name": "royadams/intro_to_numerical_computing_with_python", "max_forks_repo_head_hexsha": "f31706f691b8a22ad8db19cdb950a0cb1df047f4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-09T20:22:57.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-09T20:22:57.000Z", "avg_line_length": 31.3523809524, "max_line_length": 280, "alphanum_fraction": 0.6756784123, "num_tokens": 3152}
|
Rebol []
header-file: %include/glfw3.h
output-file: %glfw3.reds
do %common-init.r
ch_hexdigit: charset "0123456789ABCDEF"
echo output-file
print {Red/System [
Title: "Red/System glfw3 binding - A library for OpenGL/Vulkan, window and input"
Author: "Oldes"
File: %glfw3.reds
Rights: "Copyright (C) 2017 David 'Oldes' Oliva. All rights reserved."
License: "BSD-3 - https://github.com/red/red/blob/master/BSD-3-License.txt"
Note: {
/*************************************************************************
* GLFW 3.2 - www.glfw.org
* A library for OpenGL, window and input
*------------------------------------------------------------------------
* Copyright (c) 2002-2006 Marcus Geelnard
* Copyright (c) 2006-2010 Camilla Berglund <elmindreda@elmindreda.org>
*
* This software is provided 'as-is', without any express or implied
* warranty. In no event will the authors be held liable for any damages
* arising from the use of this software.
*
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it
* freely, subject to the following restrictions:
*
* 1. The origin of this software must not be misrepresented; you must not
* claim that you wrote the original software. If you use this software
* in a product, an acknowledgment in the product documentation would
* be appreciated but is not required.
*
* 2. Altered source versions must be plainly marked as such, and must not
* be misrepresented as being the original software.
*
* 3. This notice may not be removed or altered from any source
* distribution.
*
*************************************************************************/
}
]
#include %../os/definitions.reds ;common aliases and defines
#switch OS [
Windows [
#define GLFW3_LIBRARY "glfw3.dll"
#define GLFW3_CALLING cdecl
]
macOS [;@@ not tested!
#define GLFW3_LIBRARY "glfw3.dylib"
#define GLFW3_CALLING cdecl
]
#default [;@@ not tested!
#define GLFW3_LIBRARY "glfw3.so"
#define GLFW3_CALLING cdecl
]
]
#define GLFWmonitor-ptr! int-ptr!
}
callback-types: [
"GLFWerrorfun!"
"GLFWmonitorfun!"
"GLFWwindowposfun!"
"GLFWwindowsizefun!"
"GLFWwindowclosefun!"
"GLFWwindowrefreshfun!"
"GLFWwindowfocusfun!"
"GLFWwindowiconifyfun!"
"GLFWframebuffersizefun!"
"GLFWkeyfun!"
"GLFWcharfun!"
"GLFWcharmodsfun!"
"GLFWmousebuttonfun!"
"GLFWcursorposfun!"
"GLFWcursorenterfun!"
"GLFWscrollfun!"
"GLFWdropfun!"
"GLFWglproc!"
"GLFWvkproc!"
"GLFWjoystickfun!"
]
parse-callback-args: func[raw-args [string!] /local n][
clear args
if "void" = raw-args [return args]
n: 1
foreach type parse/all raw-args "," [
repend args [
join "arg" n
get-red-type type
]
n: n + 1
]
args
]
parse-args: func[raw-args [string!] /local n][
clear args
if "void" = raw-args [return args]
foreach val parse/all raw-args "," [
p: find/last val " "
repend args [
copy next p
get-red-type copy/part val p
]
]
args
]
in-import?: false
parse/all read header-file [
thru {/*! @name GLFW version macros}
any [
"/*! @defgroup " thru #" " copy title to lf 1 skip (
print "^/^/;-------------------------------------------"
print [";--" title]
print ";-------------------------------------------^/^/"
)
|
"/*! @brief " copy tmp thru {*/} (
brief: none
brief-desc: none
parse/all tmp [
[ copy brief to {^/ *^/} thru {*^/}
| copy brief to {^/ */}
]
copy brief-desc to end
]
replace/all brief "^/ * " " "
insert brief-desc #"^/"
replace brief-desc "^/ */" ""
replace/all brief-desc "^/ *" "^/; "
trim/head/tail brief
trim/head/tail brief-desc
;parse brief-desc ["; " brief copy brief-desc to end]
if brief-desc [trim/head/tail brief-desc]
if any [
all [string? brief-desc empty? brief-desc]
brief = brief-desc
] [brief-desc: none]
;print [tab brief]
;probe brief-desc
)
|
"^/#define " copy def to #"^/" (
if in-import? [ print "]]^/" in-import?: false ]
if brief [ print ["^/;- " brief] ]
if brief-desc [ print brief-desc ]
parse def [ to "0x" s: (e: insert remove/part s 2 " ") e: some ch_hexdigit s: (insert s #"h")]
replace def "/*" ";"
replace/all def " */" ""
print ["#define " def]
brief: brief-desc: none
)
|
"/****************************" thru "^/ * " copy header to "^/ **********" (
if in-import? [ print "]]^/" in-import?: false ]
print "^/^/;-=================================================="
print [";-" header ]
print ";-==================================================^/^/"
)
|
"typedef struct " copy struct some ch_name opt ch_space [
#"{" copy def to "^/}" (
if in-import? [ print "]]^/" in-import?: false ]
trim/head/tail def
print rejoin [lf struct "!: alias struct! ["]
if brief [ prin [";- " brief] ]
if brief-desc [ print brief-desc ]
;probe def
parse/all def [
some [
any ch_space [
"/*! " copy comm to lf (print ["^-;" comm])
| "*/" opt #" "
| s:
[
"char" any #" " "const" any #" "
|
"unsigned" any #" "
opt ["int" | "char" | "short"] any #" "
|
opt "const" any #" "
opt ["unsigned long" | "unsigned"] any #" "
some ch_name any #" "
]
any #"*"
e: (type: trim/tail copy/part s e)
any ch_space
copy name some ch_name #";" (
print [#"^-" name "[" get-red-type type "]"]
)
]
]
]
print "]"
)
|
(
prin rejoin ["#define " struct "!^-[pointer! [integer!]]"]
either brief [
print ["^-;" brief]
brief: none
][ prin lf]
)
]
|
"typedef void (*" any #" " copy callback some ch_name any #" " ")" any #" " #"(" copy raw-args to ");"
(
if in-import? [
print "]]^/"
in-import?: false
]
parse-callback-args raw-args
prin rejoin ["#define " callback "! [ function! ["]
either brief [ print ["^/;- " brief] ] [ prin lf ]
if brief-desc [ print brief-desc ]
n: 7 foreach [name type] args [ if n < length? name [n: length? name] ]
foreach [arg type] args [
print rejoin [ #"^-" pad arg n " [ " type " ]" ]
]
print "]]"
brief: brief-desc: none
)
|
"GLFWAPI" any #" " opt ["const" some #" "] copy ret to #" " any #" " copy name some ch_name #"(" copy raw-args to ");" (
unless in-import? [
print "^/#import [ GLFW3_LIBRARY GLFW3_CALLING ["
in-import?: true
]
;print ["!!!!!!!!!!!!!!!!!! " mold ret mold name]
parse-args raw-args
print rejoin ["^-;@@ " ret #" " name #"(" raw-args #")"]
prin rejoin [#"^-" name ": " mold name "["]
either brief [ print ["^/^- ;- " brief] ] [ prin lf ]
if brief-desc [
replace/all brief-desc "; " "^- ; "
replace/all brief-desc "^/; " "^/^- ; "
print brief-desc
]
n: 7 foreach [name type] args [ if n < length? name [n: length? name] ]
foreach [arg type] args [
print rejoin [ "^-^-" pad arg n " [ " type " ]" ]
]
if ret <> "void" [
ret: get-red-type ret
if find callback-types ret [ret: "pointer! [integer!]"]
print rejoin ["^-^-return: [ " ret " ]"]
]
print "^-]"
)
| 1 skip
]
]
echo none
err: unique err
new-line/all err true
probe err
|
{"hexsha": "3b21b142c05a8c5f48118f4a5d66eb0b87a15e1e", "size": 7398, "ext": "r", "lang": "R", "max_stars_repo_path": "parse-glfw3.r", "max_stars_repo_name": "Oldes/parse-opengl-headers", "max_stars_repo_head_hexsha": "f13fab98e56da767108f9eb3c268703b2fea0fd6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "parse-glfw3.r", "max_issues_repo_name": "Oldes/parse-opengl-headers", "max_issues_repo_head_hexsha": "f13fab98e56da767108f9eb3c268703b2fea0fd6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "parse-glfw3.r", "max_forks_repo_name": "Oldes/parse-opengl-headers", "max_forks_repo_head_hexsha": "f13fab98e56da767108f9eb3c268703b2fea0fd6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.8043478261, "max_line_length": 122, "alphanum_fraction": 0.5428494188, "num_tokens": 2189}
|
"""Text and number formatting operations"""
from copy import deepcopy
import json
import numpy as np
def round_all_numbers_in_dict(d, rounding_digits=2, outplace=True):
""" Return a new version of dict d with all floats rounded to N digits."""
if outplace:
d = deepcopy(d)
for k, v in d.items():
if isinstance(v, float):
d[k] = np.round(v, rounding_digits)
if isinstance(v, dict):
round_all_numbers_in_dict(v, rounding_digits, outplace=False)
return d
def dict_to_pretty_string(d, rounding_digits=2, indent=2):
"""Return a nicely JSON-like formatted string to print a dict."""
d = round_all_numbers_in_dict(d, rounding_digits)
formatted_text = json.dumps(d, indent=indent)
for char in '{}",':
formatted_text = formatted_text.replace(char, "")
return formatted_text
def score_to_formatted_string(score, characters=9):
"""Transform a number (score) into a best-format string.
The format will be either int (2234), float (10.234) or engineering
(1.20E5), whichever is shorter. The score is then padded with left
whitespaces to obtained the desired number of ``characters``."""
raw = str(int(score) if (int(score) == score) else score)
as_float = "%.02f" % score
as_eng = "%.02E." % score
return min([raw, as_float, as_eng], key=len).rjust(characters)
|
{"hexsha": "f4a0e5e049b29afcb3e31c88724705d5ec085c0a", "size": 1378, "ext": "py", "lang": "Python", "max_stars_repo_path": "dnachisel/biotools/formatting_operations.py", "max_stars_repo_name": "simone-pignotti/DnaChisel", "max_stars_repo_head_hexsha": "b7f0f925c9daefcc5fec903a13cfa74c3b726a7a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 124, "max_stars_repo_stars_event_min_datetime": "2017-11-14T14:42:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T08:02:07.000Z", "max_issues_repo_path": "dnachisel/biotools/formatting_operations.py", "max_issues_repo_name": "simone-pignotti/DnaChisel", "max_issues_repo_head_hexsha": "b7f0f925c9daefcc5fec903a13cfa74c3b726a7a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 65, "max_issues_repo_issues_event_min_datetime": "2017-11-15T07:25:38.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-31T10:38:45.000Z", "max_forks_repo_path": "dnachisel/biotools/formatting_operations.py", "max_forks_repo_name": "simone-pignotti/DnaChisel", "max_forks_repo_head_hexsha": "b7f0f925c9daefcc5fec903a13cfa74c3b726a7a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 31, "max_forks_repo_forks_event_min_datetime": "2018-10-18T12:59:47.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-11T16:54:43.000Z", "avg_line_length": 37.2432432432, "max_line_length": 78, "alphanum_fraction": 0.6806966618, "include": true, "reason": "import numpy", "num_tokens": 335}
|
C>
C> \brief Calculate the LdR decomposition of the overlap matrix
C>
C> This subroutine is based on the function `wfn1_overlap_bo`.
C> Whereas `wfn1_overlap_bo` only calculates the overlap between
C> two determinants, this routine also calculates the transformation
C> matrices of the orbitals as well as the diagonal matrix. These
C> additional matrices are needed to calculate the transition
C> density matrices between the two determinants involved.
C>
C> In short, given a non-symmetric overlap matrix \f$S\f$ this
C> subroutine calculates the left and right transformation matrices
C> \f$L\f$ and \f$R\f$ and the diagonal \f$d\f$ such that
C> \f{eqnarray*}{
C> d &=& L\cdot S \cdot R
C> \f}
C> In addition the sign of the overlap is also provided. See [1] for
C> as discussion of this approach.
C>
C> This routine is essentially a copy of the `subroutine determ` in
C> GAMESS-UK, written by J. Verbeek and J.H. van Lenthe.
C>
C> See also [2].
C>
C> ### References ###
C>
C> [1] Jacob Verbeek, Joop H. van Lenthe,
C> "On the evaluation of non-orthogonal matrix elements",
C> J. Mol. Struct. (Theochem) <b>229</b>, 115-137 (1991), DOI:
C> <a href="http://dx.doi.org/10.1016/0166-1280(91)90141-6">
C> 10.1016/0166-1280(91)90141-6</a>.
C>
C> [2] Joop H. van Lenthe, Gabriel G. Balint-Kurti,
C> "The valence-bond self-consistent field method (VB-SCF):
C> Theory and test calculations", J. Chem. Phys. <b>78</b>,
C> 5699-5713 (1983), DOI:
C> <a href="http://dx.doi.org/10.1063/1.445451">
C> 10.1063/1.445451</a>.
C>
subroutine wfn1_overlap_ldr2(ne,orbov,lmat,rmat,dvec,ipar,tmp,m2)
implicit none
c
#include "errquit.fh"
c
integer ne !< [Input] The number of particles
integer ipar !< [Output] The parity
c
double precision orbov(ne,ne) !< [Input] The overlaps of extended
!< orbitals
double precision lmat(ne,ne) !< [Output] The left transformation
!< matrix
double precision rmat(ne,ne) !< [Output] The right transformation
!< matrix
double precision dvec(ne) !< [Output] The eigenvalues
c
double precision tmp(ne,ne) !< [Scratch]
double precision m2(ne,*) !< [Scratch]
c
double precision det !< The value of the matrix determinant
double precision pivo !< The value of the pivot
double precision cridep !< Criterion for singularity
parameter (cridep = 1.0d-13)
c
integer i, j, k, l !< Counters
integer irank !< The rank of the matrix done so far
c
call dcopy(ne*ne,orbov,1,tmp,1)
irank = 0
ipar = 1
det = 1.0d0
if (ne.eq.0) return
call wfn1_piv(tmp,ne,ne,1,pivo,ipar)
if (dabs(pivo).lt.cridep) then
irank = 0
det = 0.0d0
return
end if
irank = 1
do 40 i = 1,ne-1
det = det * tmp(i,i)
dvec(i) = tmp(i,i)
do 30 j = i+1,ne
c.....
c.....form i-th row of r
c.....
tmp(i,j) = -tmp(i,j)/tmp(i,i)
c.....
c.....adapt s-matrix to r
c.....
do 10 k = i+1,ne
tmp(k,j) = tmp(k,j) + tmp(k,i)*tmp(i,j)
10 continue
c.....
c.....adapt r-matrix to itself
c.....
do 20 l = 1,i-1
tmp(l,j) = tmp(l,j) + tmp(l,i)*tmp(i,j)
20 continue
30 continue
call wfn1_piv(tmp,ne,ne,i+1,pivo,ipar)
if (dabs(pivo).lt.cridep) then
det = 0.0d0
return
end if
irank = irank + 1
40 continue
det = det * tmp(ne,ne) * ipar
dvec(ne) = tmp(ne,ne)
c
call dfill(ne*ne,0.0d0,lmat,1)
call dfill(ne*ne,0.0d0,rmat,1)
do i = 1, ne
lmat(i,i) = 1.0d0
rmat(i,i) = 1.0d0
do j = i+1, ne
rmat(i,j) = tmp(i,j)
enddo
do j = 1, i-1
lmat(i,j) = tmp(i,j)
enddo
enddo
c
return
end
C>
C> \brief Swap rows and columns to get maximum values on the diagonal
C> of the matrix
C>
C> This routine swap rows and columns of a matrix to place the
C> maximum element of the remaining matrix block on the diagonal.
C>
C> This subroutine was obtained from GAMESS-UK, with permission by
C> Joop H. van Lenthe (Jan, 2014).
C>
subroutine wfn1_piv(a,nr,nc,n,pivo,ipar)
implicit none
c
integer nr !< [Input] The number of rows
integer nc !< [Input] The number of columns
integer n !< [Input] The rank of the first element of the
!< remaining block
double precision a(nr,nc) !< [In/Output] The matrix to operate on
double precision pivo !< [Output] The pivot found
integer ipar !< [Output] The parity found
c
c Local
c
integer irt !< The current best row number
integer ict !< The current best column number
integer i, j, k, l !< Counters
c
double precision aa !< Temporary variable
c
pivo = 0.0d0
irt = n
ict = n
c
c... search pivot
c
do j = n,nc
do i = n,nr
if (dabs(pivo).lt.dabs(a(i,j))) then
pivo = a(i,j)
irt = i
ict = j
end if
enddo ! i
enddo ! j
c
if (irt.ne.n) then
c
c... permute rows
c
do k = 1,nc
aa = a(irt,k)
a(irt,k) = a(n,k)
a(n,k) = aa
enddo
ipar =-ipar
c
end if
c
if (ict.ne.n) then
c
c... permute columns
c
do l = 1,nr
aa = a(l,ict)
a(l,ict) = a(l,n)
a(l,n) = aa
enddo
ipar =-ipar
c
end if
return
end
|
{"hexsha": "a5d3aae79685d7bebbfc387ba321ec350c1126c2", "size": 5733, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "src/rdmft/recycling/wfn1/overlap.f", "max_stars_repo_name": "miroi/nwchem", "max_stars_repo_head_hexsha": "c252de495e545a244cf6a7b806566c33f0a225ed", "max_stars_repo_licenses": ["ECL-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-10-30T03:12:57.000Z", "max_stars_repo_stars_event_max_datetime": "2018-10-30T03:12:57.000Z", "max_issues_repo_path": "src/rdmft/recycling/wfn1/overlap.f", "max_issues_repo_name": "miroi/nwchem", "max_issues_repo_head_hexsha": "c252de495e545a244cf6a7b806566c33f0a225ed", "max_issues_repo_licenses": ["ECL-2.0"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2018-11-01T02:55:29.000Z", "max_issues_repo_issues_event_max_datetime": "2019-01-16T14:49:37.000Z", "max_forks_repo_path": "src/rdmft/recycling/wfn1/overlap.f", "max_forks_repo_name": "miroi/nwchem", "max_forks_repo_head_hexsha": "c252de495e545a244cf6a7b806566c33f0a225ed", "max_forks_repo_licenses": ["ECL-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2018-11-01T03:14:14.000Z", "max_forks_repo_forks_event_max_datetime": "2019-01-16T11:34:00.000Z", "avg_line_length": 29.1015228426, "max_line_length": 72, "alphanum_fraction": 0.5592185592, "num_tokens": 1875}
|
from typing import Dict, List, Tuple, Optional
from copy import copy
from functools import partial
from scipy import interpolate
from vnpy.event import Event, EventEngine
from vnpy.trader.engine import MainEngine
from vnpy.trader.ui import QtWidgets, QtCore, QtGui
from vnpy.trader.event import EVENT_TICK, EVENT_TIMER, EVENT_TRADE
from vnpy.trader.object import TickData, TradeData, LogData
from vnpy.trader.utility import save_json, load_json
from ..engine import OptionEngine, OptionAlgoEngine
from ..base import (
EVENT_OPTION_ALGO_PRICING,
EVENT_OPTION_ALGO_STATUS,
EVENT_OPTION_ALGO_LOG,
PortfolioData,
ChainData,
OptionData,
InstrumentData
)
from .monitor import (
MonitorCell, IndexCell, BidCell, AskCell, PosCell,
COLOR_WHITE, COLOR_BLACK
)
from ..algo import ElectronicEyeAlgo
class AlgoSpinBox(QtWidgets.QSpinBox):
""""""
def __init__(self) -> None:
""""""
super().__init__()
self.setMaximum(999999)
self.setMinimum(-999999)
self.setAlignment(QtCore.Qt.AlignCenter)
def get_value(self) -> int:
""""""
return self.value()
def set_value(self, value: int) -> None:
""""""
self.setValue(value)
def update_status(self, active: bool) -> None:
""""""
self.setEnabled(not active)
class AlgoPositiveSpinBox(AlgoSpinBox):
""""""
def __init__(self) -> None:
""""""
super().__init__()
self.setMinimum(0)
class AlgoDoubleSpinBox(QtWidgets.QDoubleSpinBox):
""""""
def __init__(self) -> None:
""""""
super().__init__()
self.setDecimals(1)
self.setMaximum(9999.9)
self.setMinimum(0)
self.setAlignment(QtCore.Qt.AlignCenter)
def get_value(self) -> float:
""""""
return self.value()
def set_value(self, value: float) -> None:
""""""
self.setValue(value)
def update_status(self, active: bool) -> None:
""""""
self.setEnabled(not active)
class AlgoDirectionCombo(QtWidgets.QComboBox):
""""""
def __init__(self) -> None:
""""""
super().__init__()
self.addItems([
"双向",
"做多",
"做空"
])
def get_value(self) -> Dict[str, bool]:
""""""
if self.currentText() == "双向":
value: dict = {
"long_allowed": True,
"short_allowed": True
}
elif self.currentText() == "做多":
value: dict = {
"long_allowed": True,
"short_allowed": False
}
else:
value: dict = {
"long_allowed": False,
"short_allowed": True
}
return value
def set_value(self, value: dict) -> None:
""""""
if value["long_allowed"] and value["short_allowed"]:
self.setCurrentIndex(0)
elif value["long_allowed"]:
self.setCurrentIndex(1)
else:
self.setCurrentIndex(2)
def update_status(self, active: bool) -> None:
""""""
self.setEnabled(not active)
class AlgoPricingButton(QtWidgets.QPushButton):
""""""
def __init__(self, vt_symbol: str, manager: "ElectronicEyeManager") -> None:
""""""
super().__init__()
self.vt_symbol: str = vt_symbol
self.manager: ElectronicEyeManager = manager
self.active: bool = False
self.setText("N")
self.clicked.connect(self.on_clicked)
def on_clicked(self) -> None:
""""""
if self.active:
self.manager.stop_algo_pricing(self.vt_symbol)
else:
self.manager.start_algo_pricing(self.vt_symbol)
def update_status(self, active: bool) -> None:
""""""
self.active = active
if active:
self.setText("Y")
else:
self.setText("N")
class AlgoTradingButton(QtWidgets.QPushButton):
""""""
def __init__(self, vt_symbol: str, manager: "ElectronicEyeManager") -> None:
""""""
super().__init__()
self.vt_symbol: str = vt_symbol
self.manager: ElectronicEyeManager = manager
self.active: bool = False
self.setText("N")
self.clicked.connect(self.on_clicked)
def on_clicked(self) -> None:
""""""
if self.active:
self.manager.stop_algo_trading(self.vt_symbol)
else:
self.manager.start_algo_trading(self.vt_symbol)
def update_status(self, active: bool) -> None:
""""""
self.active = active
if active:
self.setText("Y")
else:
self.setText("N")
class ElectronicEyeMonitor(QtWidgets.QTableWidget):
""""""
signal_tick: QtCore.Signal = QtCore.Signal(Event)
signal_pricing: QtCore.Signal = QtCore.Signal(Event)
signal_status: QtCore.Signal = QtCore.Signal(Event)
signal_trade: QtCore.Signal = QtCore.Signal(Event)
headers: List[Dict] = [
{"name": "bid_volume", "display": "买量", "cell": BidCell},
{"name": "bid_price", "display": "买价", "cell": BidCell},
{"name": "ask_price", "display": "卖价", "cell": AskCell},
{"name": "ask_volume", "display": "卖量", "cell": AskCell},
{"name": "algo_bid_price", "display": "目标\n买价", "cell": BidCell},
{"name": "algo_ask_price", "display": "目标\n卖价", "cell": AskCell},
{"name": "algo_spread", "display": "价差", "cell": MonitorCell},
{"name": "ref_price", "display": "理论价", "cell": MonitorCell},
{"name": "pricing_impv", "display": "定价\n隐波", "cell": MonitorCell},
{"name": "net_pos", "display": "净持仓", "cell": PosCell},
{"name": "price_spread", "display": "价格\n价差", "cell": AlgoDoubleSpinBox},
{"name": "volatility_spread", "display": "隐波\n价差", "cell": AlgoDoubleSpinBox},
{"name": "max_pos", "display": "持仓\n范围", "cell": AlgoPositiveSpinBox},
{"name": "target_pos", "display": "目标\n持仓", "cell": AlgoSpinBox},
{"name": "max_order_size", "display": "最大\n委托", "cell": AlgoPositiveSpinBox},
{"name": "direction", "display": "方向", "cell": AlgoDirectionCombo},
{"name": "pricing_active", "display": "定价", "cell": AlgoPricingButton},
{"name": "trading_active", "display": "交易", "cell": AlgoTradingButton},
]
def __init__(self, option_engine: OptionEngine, portfolio_name: str) -> None:
""""""
super().__init__()
self.option_engine: OptionEngine = option_engine
self.event_engine: EventEngine = option_engine.event_engine
self.main_engine: MainEngine = option_engine.main_engine
self.algo_engine: OptionAlgoEngine = option_engine.algo_engine
self.portfolio_name: str = portfolio_name
self.setting_filename: str = f"{portfolio_name}_electronic_eye.json"
self.cells: Dict[str, Dict] = {}
self.init_ui()
self.register_event()
self.load_setting()
def init_ui(self) -> None:
""""""
self.setWindowTitle("电子眼")
self.verticalHeader().setVisible(False)
self.setEditTriggers(self.NoEditTriggers)
# Set table row and column numbers
portfolio: PortfolioData = self.option_engine.get_portfolio(self.portfolio_name)
row_count: int = 0
for chain in portfolio.chains.values():
row_count += (1 + len(chain.indexes))
self.setRowCount(row_count)
column_count: int = len(self.headers) * 2 + 1
self.setColumnCount(column_count)
call_labels: list = [d["display"] for d in self.headers]
put_labels: list = copy(call_labels)
put_labels.reverse()
labels: list = call_labels + ["行权价"] + put_labels
self.setHorizontalHeaderLabels(labels)
# Init cells
strike_column: int = len(self.headers)
current_row: int = 0
chain_symbols: list = list(portfolio.chains.keys())
chain_symbols.sort()
for chain_symbol in chain_symbols:
chain: ChainData = portfolio.get_chain(chain_symbol)
self.setItem(
current_row,
strike_column,
IndexCell(chain.chain_symbol.split(".")[0])
)
for index in chain.indexes:
call: OptionData = chain.calls[index]
put: OptionData = chain.puts[index]
current_row += 1
# Call cells
call_cells: dict = {}
for column, d in enumerate(self.headers):
cell_type = d["cell"]
if issubclass(cell_type, QtWidgets.QPushButton):
cell = cell_type(call.vt_symbol, self)
else:
cell = cell_type()
call_cells[d["name"]] = cell
if isinstance(cell, QtWidgets.QTableWidgetItem):
self.setItem(current_row, column, cell)
else:
self.setCellWidget(current_row, column, cell)
self.cells[call.vt_symbol] = call_cells
# Put cells
put_cells: dict = {}
put_headers: list = copy(self.headers)
put_headers.reverse()
for column, d in enumerate(put_headers):
column += (strike_column + 1)
cell_type = d["cell"]
if issubclass(cell_type, QtWidgets.QPushButton):
cell = cell_type(put.vt_symbol, self)
else:
cell = cell_type()
put_cells[d["name"]] = cell
if isinstance(cell, QtWidgets.QTableWidgetItem):
self.setItem(current_row, column, cell)
else:
self.setCellWidget(current_row, column, cell)
self.cells[put.vt_symbol] = put_cells
# Strike cell
index_cell: IndexCell = IndexCell(str(call.chain_index))
self.setItem(current_row, strike_column, index_cell)
# Move to next row
current_row += 1
self.resizeColumnsToContents()
# Update all net pos and tick cells
for vt_symbol in self.cells.keys():
self.update_net_pos(vt_symbol)
tick: Optional[TickData] = self.main_engine.get_tick(vt_symbol)
if tick:
self.update_tick(tick)
def load_setting(self) -> None:
""""""
fields: list = [
"price_spread",
"volatility_spread",
"max_pos",
"target_pos",
"max_order_size",
"direction"
]
setting: dict = load_json(self.setting_filename)
for vt_symbol, cells in self.cells.items():
buf: Optional[dict] = setting.get(vt_symbol, None)
if buf:
for field in fields:
cells[field].set_value(buf[field])
def save_setting(self) -> None:
""""""
fields: list = [
"price_spread",
"volatility_spread",
"max_pos",
"target_pos",
"max_order_size",
"direction"
]
setting: dict = {}
for vt_symbol, cells in self.cells.items():
buf: dict = {}
for field in fields:
buf[field] = cells[field].get_value()
setting[vt_symbol] = buf
save_json(self.setting_filename, setting)
def register_event(self) -> None:
""""""
self.signal_pricing.connect(self.process_pricing_event)
self.signal_status.connect(self.process_status_event)
self.signal_tick.connect(self.process_tick_event)
self.signal_trade.connect(self.process_trade_event)
self.event_engine.register(
EVENT_OPTION_ALGO_PRICING,
self.signal_pricing.emit
)
self.event_engine.register(
EVENT_OPTION_ALGO_STATUS,
self.signal_status.emit
)
self.event_engine.register(
EVENT_TICK,
self.signal_tick.emit
)
self.event_engine.register(
EVENT_TRADE,
self.signal_trade.emit
)
def process_tick_event(self, event: Event) -> None:
""""""
tick: TickData = event.data
self.update_tick(tick)
def update_tick(self, tick: TickData) -> None:
""""""
cells: Optional[dict] = self.cells.get(tick.vt_symbol, None)
if not cells:
return
cells["bid_price"].setText(str(tick.bid_price_1))
cells["ask_price"].setText(str(tick.ask_price_1))
cells["bid_volume"].setText(str(tick.bid_volume_1))
cells["ask_volume"].setText(str(tick.ask_volume_1))
def process_status_event(self, event: Event) -> None:
""""""
algo: ElectronicEyeAlgo = event.data
cells: dict = self.cells[algo.vt_symbol]
cells["price_spread"].update_status(algo.pricing_active)
cells["volatility_spread"].update_status(algo.pricing_active)
cells["pricing_active"].update_status(algo.pricing_active)
cells["max_pos"].update_status(algo.trading_active)
cells["target_pos"].update_status(algo.trading_active)
cells["max_order_size"].update_status(algo.trading_active)
cells["direction"].update_status(algo.trading_active)
cells["trading_active"].update_status(algo.trading_active)
def process_pricing_event(self, event: Event) -> None:
""""""
algo: ElectronicEyeAlgo = event.data
cells: dict = self.cells[algo.vt_symbol]
if algo.ref_price:
cells["algo_bid_price"].setText(str(algo.algo_bid_price))
cells["algo_ask_price"].setText(str(algo.algo_ask_price))
cells["algo_spread"].setText(str(algo.algo_spread))
cells["ref_price"].setText(str(algo.ref_price))
cells["pricing_impv"].setText(f"{algo.pricing_impv * 100:.2f}")
else:
cells["algo_bid_price"].setText("")
cells["algo_ask_price"].setText("")
cells["algo_spread"].setText("")
cells["ref_price"].setText("")
cells["pricing_impv"].setText("")
def process_trade_event(self, event: Event) -> None:
""""""
trade: TradeData = event.data
self.update_net_pos(trade.vt_symbol)
def update_net_pos(self, vt_symbol: str) -> None:
""""""
cells: Optional[dict] = self.cells.get(vt_symbol, None)
if not cells:
return
option: InstrumentData = self.option_engine.get_instrument(vt_symbol)
cells["net_pos"].setText(str(option.net_pos))
def start_algo_pricing(self, vt_symbol: str) -> None:
""""""
cells: dict = self.cells[vt_symbol]
params: dict = {}
params["price_spread"] = cells["price_spread"].get_value()
params["volatility_spread"] = cells["volatility_spread"].get_value()
self.algo_engine.start_algo_pricing(vt_symbol, params)
def stop_algo_pricing(self, vt_symbol: str) -> None:
""""""
self.algo_engine.stop_algo_pricing(vt_symbol)
def start_algo_trading(self, vt_symbol: str) -> None:
""""""
cells: dict = self.cells[vt_symbol]
params = cells["direction"].get_value()
for name in [
"max_pos",
"target_pos",
"max_order_size"
]:
params[name] = cells[name].get_value()
self.algo_engine.start_algo_trading(vt_symbol, params)
def stop_algo_trading(self, vt_symbol: str) -> None:
""""""
self.algo_engine.stop_algo_trading(vt_symbol)
class ElectronicEyeManager(QtWidgets.QWidget):
""""""
signal_log = QtCore.Signal(Event)
def __init__(self, option_engine: OptionEngine, portfolio_name: str) -> None:
""""""
super().__init__()
self.option_engine: OptionEngine = option_engine
self.event_Engine: EventEngine = option_engine.event_engine
self.algo_engine: OptionAlgoEngine = option_engine.algo_engine
self.portfolio_name: str = portfolio_name
self.init_ui()
self.register_event()
def init_ui(self) -> None:
""""""
self.setWindowTitle("期权电子眼")
self.algo_monitor: ElectronicEyeMonitor = ElectronicEyeMonitor(self.option_engine, self.portfolio_name)
self.log_monitor: QtWidgets.QTextEdit = QtWidgets.QTextEdit()
self.log_monitor.setReadOnly(True)
self.log_monitor.setMaximumWidth(400)
stop_pricing_button: QtWidgets.QPushButton = QtWidgets.QPushButton("停止定价")
stop_pricing_button.clicked.connect(self.stop_pricing_for_all)
stop_trading_button: QtWidgets.QPushButton = QtWidgets.QPushButton("停止交易")
stop_trading_button.clicked.connect(self.stop_trading_for_all)
self.price_spread_spin: AlgoDoubleSpinBox = AlgoDoubleSpinBox()
self.volatility_spread_spin: AlgoDoubleSpinBox = AlgoDoubleSpinBox()
self.direction_combo: AlgoDirectionCombo = AlgoDirectionCombo()
self.max_order_size_spin: AlgoPositiveSpinBox = AlgoPositiveSpinBox()
self.target_pos_spin: AlgoSpinBox = AlgoSpinBox()
self.max_pos_spin: AlgoPositiveSpinBox = AlgoPositiveSpinBox()
price_spread_button: QtWidgets.QPushButton = QtWidgets.QPushButton("设置")
price_spread_button.clicked.connect(self.set_price_spread_for_all)
volatility_spread_button: QtWidgets.QPushButton = QtWidgets.QPushButton("设置")
volatility_spread_button.clicked.connect(self.set_volatility_spread_for_all)
direction_button: QtWidgets.QPushButton = QtWidgets.QPushButton("设置")
direction_button.clicked.connect(self.set_direction_for_all)
max_order_size_button: QtWidgets.QPushButton = QtWidgets.QPushButton("设置")
max_order_size_button.clicked.connect(self.set_max_order_size_for_all)
target_pos_button: QtWidgets.QPushButton = QtWidgets.QPushButton("设置")
target_pos_button.clicked.connect(self.set_target_pos_for_all)
max_pos_button: QtWidgets.QPushButton = QtWidgets.QPushButton("设置")
max_pos_button.clicked.connect(self.set_max_pos_for_all)
QLabel = QtWidgets.QLabel
grid: QtWidgets.QGridLayout = QtWidgets.QGridLayout()
grid.addWidget(QLabel("价格价差"), 0, 0)
grid.addWidget(self.price_spread_spin, 0, 1)
grid.addWidget(price_spread_button, 0, 2)
grid.addWidget(QLabel("隐波价差"), 1, 0)
grid.addWidget(self.volatility_spread_spin, 1, 1)
grid.addWidget(volatility_spread_button, 1, 2)
grid.addWidget(QLabel("持仓范围"), 2, 0)
grid.addWidget(self.max_pos_spin, 2, 1)
grid.addWidget(max_pos_button, 2, 2)
grid.addWidget(QLabel("目标持仓"), 3, 0)
grid.addWidget(self.target_pos_spin, 3, 1)
grid.addWidget(target_pos_button, 3, 2)
grid.addWidget(QLabel("最大委托"), 4, 0)
grid.addWidget(self.max_order_size_spin, 4, 1)
grid.addWidget(max_order_size_button, 4, 2)
grid.addWidget(QLabel("方向"), 5, 0)
grid.addWidget(self.direction_combo, 5, 1)
grid.addWidget(direction_button, 5, 2)
hbox1: QtWidgets.QHBoxLayout = QtWidgets.QHBoxLayout()
hbox1.addWidget(stop_pricing_button)
hbox1.addWidget(stop_trading_button)
vbox: QtWidgets.QVBoxLayout = QtWidgets.QVBoxLayout()
vbox.addLayout(hbox1)
vbox.addLayout(grid)
vbox.addWidget(self.log_monitor)
hbox: QtWidgets.QHBoxLayout = QtWidgets.QHBoxLayout()
hbox.addWidget(self.algo_monitor)
hbox.addLayout(vbox)
self.setLayout(hbox)
def register_event(self) -> None:
""""""
self.signal_log.connect(self.process_log_event)
self.event_Engine.register(EVENT_OPTION_ALGO_LOG, self.signal_log.emit)
def process_log_event(self, event: Event) -> None:
""""""
log: LogData = event.data
timestr: str = log.time.strftime("%H:%M:%S")
msg: str = f"{timestr} {log.msg}"
self.log_monitor.append(msg)
def show(self) -> None:
""""""
self.algo_engine.init_engine(self.portfolio_name)
self.algo_monitor.resizeColumnsToContents()
super().showMaximized()
def set_price_spread_for_all(self) -> None:
""""""
price_spread: float = self.price_spread_spin.get_value()
for cells in self.algo_monitor.cells.values():
if cells["price_spread"].isEnabled():
cells["price_spread"].setValue(price_spread)
def set_volatility_spread_for_all(self) -> None:
""""""
volatility_spread: float = self.volatility_spread_spin.get_value()
for cells in self.algo_monitor.cells.values():
if cells["volatility_spread"].isEnabled():
cells["volatility_spread"].setValue(volatility_spread)
def set_direction_for_all(self) -> None:
""""""
ix: int = self.direction_combo.currentIndex()
for cells in self.algo_monitor.cells.values():
if cells["direction"].isEnabled():
cells["direction"].setCurrentIndex(ix)
def set_max_order_size_for_all(self) -> None:
""""""
size: int = self.max_order_size_spin.get_value()
for cells in self.algo_monitor.cells.values():
if cells["max_order_size"].isEnabled():
cells["max_order_size"].setValue(size)
def set_target_pos_for_all(self) -> None:
""""""
pos: int = self.target_pos_spin.get_value()
for cells in self.algo_monitor.cells.values():
if cells["target_pos"].isEnabled():
cells["target_pos"].setValue(pos)
def set_max_pos_for_all(self) -> None:
""""""
pos: int = self.max_pos_spin.get_value()
for cells in self.algo_monitor.cells.values():
if cells["max_pos"].isEnabled():
cells["max_pos"].setValue(pos)
def stop_pricing_for_all(self) -> None:
""""""
for vt_symbol in self.algo_monitor.cells.keys():
self.algo_monitor.stop_algo_pricing(vt_symbol)
def stop_trading_for_all(self) -> None:
""""""
for vt_symbol in self.algo_monitor.cells.keys():
self.algo_monitor.stop_algo_trading(vt_symbol)
def closeEvent(self, event: QtGui.QCloseEvent) -> None:
""""""
self.algo_monitor.save_setting()
event.accept()
class VolatilityDoubleSpinBox(QtWidgets.QDoubleSpinBox):
""""""
def __init__(self) -> None:
""""""
super().__init__()
self.setDecimals(1)
self.setSuffix("%")
self.setMaximum(200.0)
self.setMinimum(0)
def get_value(self) -> float:
""""""
return self.value()
class PricingVolatilityManager(QtWidgets.QWidget):
""""""
signal_timer: QtCore.Signal = QtCore.Signal(Event)
def __init__(self, option_engine: OptionEngine, portfolio_name: str) -> None:
""""""
super().__init__()
self.option_engine: OptionEngine = option_engine
self.event_engine: EventEngine = option_engine.event_engine
self.portfolio: PortfolioData = option_engine.get_portfolio(portfolio_name)
self.cells: Dict[Tuple, Dict] = {}
self.chain_symbols: List[str] = []
self.chain_atm_index: Dict[str, str] = {}
self.init_ui()
self.register_event()
def init_ui(self) -> None:
""""""
self.setWindowTitle("波动率管理")
tab: QtWidgets.QTabWidget = QtWidgets.QTabWidget()
vbox: QtWidgets.QVBoxLayout = QtWidgets.QVBoxLayout()
vbox.addWidget(tab)
self.setLayout(vbox)
self.chain_symbols: list = list(self.portfolio.chains.keys())
self.chain_symbols.sort()
for chain_symbol in self.chain_symbols:
chain: ChainData = self.portfolio.get_chain(chain_symbol)
table: QtWidgets.QTableWidget = QtWidgets.QTableWidget()
table.setEditTriggers(table.NoEditTriggers)
table.verticalHeader().setVisible(False)
table.setRowCount(len(chain.indexes))
table.horizontalHeader().setSectionResizeMode(
QtWidgets.QHeaderView.Stretch
)
labels: list = [
"行权价",
"OTM隐波",
"CALL隐波",
"PUT隐波",
"定价隐波",
"执行拟合"
]
table.setColumnCount(len(labels))
table.setHorizontalHeaderLabels(labels)
for row, index in enumerate(chain.indexes):
index_cell: IndexCell = IndexCell(index)
otm_impv_cell: MonitorCell = MonitorCell("")
call_impv_cell: MonitorCell = MonitorCell("")
put_impv_cell: MonitorCell = MonitorCell("")
set_func = partial(
self.set_pricing_impv,
chain_symbol=chain_symbol,
index=index
)
pricing_impv_spin: VolatilityDoubleSpinBox = VolatilityDoubleSpinBox()
pricing_impv_spin.setAlignment(QtCore.Qt.AlignCenter)
pricing_impv_spin.valueChanged.connect(set_func)
check: QtWidgets.QCheckBox = QtWidgets.QCheckBox()
check_hbox: QtWidgets.QHBoxLayout = QtWidgets.QHBoxLayout()
check_hbox.setAlignment(QtCore.Qt.AlignCenter)
check_hbox.addWidget(check)
check_widget: QtWidgets.QWidget = QtWidgets.QWidget()
check_widget.setLayout(check_hbox)
table.setItem(row, 0, index_cell)
table.setItem(row, 1, otm_impv_cell)
table.setItem(row, 2, call_impv_cell)
table.setItem(row, 3, put_impv_cell)
table.setCellWidget(row, 4, pricing_impv_spin)
table.setCellWidget(row, 5, check_widget)
cells: dict = {
"otm_impv": otm_impv_cell,
"call_impv": call_impv_cell,
"put_impv": put_impv_cell,
"pricing_impv": pricing_impv_spin,
"check": check
}
self.cells[(chain_symbol, index)] = cells
reset_func = partial(self.reset_pricing_impv, chain_symbol=chain_symbol)
button_reset: QtWidgets.QPushButton = QtWidgets.QPushButton("重置")
button_reset.clicked.connect(reset_func)
fit_func = partial(self.fit_pricing_impv, chain_symbol=chain_symbol)
button_fit: QtWidgets.QPushButton = QtWidgets.QPushButton("拟合")
button_fit.clicked.connect(fit_func)
increase_func = partial(self.increase_pricing_impv, chain_symbol=chain_symbol)
button_increase: QtWidgets.QPushButton = QtWidgets.QPushButton("+0.1%")
button_increase.clicked.connect(increase_func)
decrease_func = partial(self.decrease_pricing_impv, chain_symbol=chain_symbol)
button_decrease: QtWidgets.QPushButton = QtWidgets.QPushButton("-0.1%")
button_decrease.clicked.connect(decrease_func)
hbox: QtWidgets.QHBoxLayout = QtWidgets.QHBoxLayout()
hbox.addWidget(button_reset)
hbox.addWidget(button_fit)
hbox.addWidget(button_increase)
hbox.addWidget(button_decrease)
vbox: QtWidgets.QVBoxLayout = QtWidgets.QVBoxLayout()
vbox.addLayout(hbox)
vbox.addWidget(table)
chain_widget: QtWidgets.QWidget = QtWidgets.QWidget()
chain_widget.setLayout(vbox)
tab.addTab(chain_widget, chain_symbol)
self.update_pricing_impv(chain_symbol)
self.default_foreground = otm_impv_cell.foreground()
self.default_background = otm_impv_cell.background()
table.resizeRowsToContents()
def register_event(self) -> None:
""""""
self.signal_timer.connect(self.process_timer_event)
self.event_engine.register(EVENT_TIMER, self.signal_timer.emit)
def process_timer_event(self, event: Event) -> None:
""""""
for chain_symbol in self.chain_symbols:
self.update_chain_impv(chain_symbol)
def reset_pricing_impv(self, chain_symbol: str) -> None:
"""
Set pricing impv to the otm mid impv of each strike price.
"""
chain: ChainData = self.portfolio.get_chain(chain_symbol)
atm_index: str = chain.atm_index
for index in chain.indexes:
call: OptionData = chain.calls[index]
put: OptionData = chain.puts[index]
if index >= atm_index:
otm: OptionData = call
else:
otm: OptionData = put
call.pricing_impv = otm.mid_impv
put.pricing_impv = otm.mid_impv
self.update_pricing_impv(chain_symbol)
def fit_pricing_impv(self, chain_symbol: str) -> None:
"""
Fit pricing impv with cubic spline algo.
"""
chain: ChainData = self.portfolio.get_chain(chain_symbol)
atm_index: str = chain.atm_index
strike_prices: list = []
pricing_impvs: list = []
for index in chain.indexes:
call: OptionData = chain.calls[index]
put: OptionData = chain.puts[index]
cells: dict = self.cells[(chain_symbol, index)]
if not cells["check"].isChecked():
if index >= atm_index:
otm: OptionData = call
else:
otm: OptionData = put
strike_prices.append(otm.strike_price)
pricing_impvs.append(otm.pricing_impv)
cs: interpolate.CubicSpline = interpolate.CubicSpline(strike_prices, pricing_impvs)
for index in chain.indexes:
call: OptionData = chain.calls[index]
put: OptionData = chain.puts[index]
new_impv: float = float(cs(call.strike_price))
call.pricing_impv = new_impv
put.pricing_impv = new_impv
self.update_pricing_impv(chain_symbol)
def increase_pricing_impv(self, chain_symbol: str) -> None:
"""
Increase pricing impv of all options within a chain by 0.1%.
"""
chain: ChainData = self.portfolio.get_chain(chain_symbol)
for option in chain.options.values():
option.pricing_impv += 0.001
self.update_pricing_impv(chain_symbol)
def decrease_pricing_impv(self, chain_symbol: str) -> None:
"""
Decrease pricing impv of all options within a chain by 0.1%.
"""
chain: ChainData = self.portfolio.get_chain(chain_symbol)
for option in chain.options.values():
option.pricing_impv -= 0.001
self.update_pricing_impv(chain_symbol)
def set_pricing_impv(self, value: float, chain_symbol: str, index: str) -> None:
""""""
new_impv: float = value / 100
chain: ChainData = self.portfolio.get_chain(chain_symbol)
call: OptionData = chain.calls[index]
call.pricing_impv = new_impv
put: OptionData = chain.puts[index]
put.pricing_impv = new_impv
def update_pricing_impv(self, chain_symbol: str) -> None:
""""""
chain: ChainData = self.portfolio.get_chain(chain_symbol)
atm_index: str = chain.atm_index
for index in chain.indexes:
if index >= atm_index:
otm: OptionData = chain.calls[index]
else:
otm: OptionData = chain.puts[index]
value: int = round(otm.pricing_impv * 100, 1)
key: tuple = (chain_symbol, index)
cells: Optional[dict] = self.cells.get(key, None)
if cells:
cells["pricing_impv"].setValue(value)
def update_chain_impv(self, chain_symbol: str) -> None:
""""""
chain: ChainData = self.portfolio.get_chain(chain_symbol)
atm_index: str = chain.atm_index
for index in chain.indexes:
call: OptionData = chain.calls[index]
put: OptionData = chain.puts[index]
if index >= atm_index:
otm: OptionData = call
else:
otm: OptionData = put
cells: dict = self.cells[(chain_symbol, index)]
cells["otm_impv"].setText(f"{otm.mid_impv:.1%}")
cells["call_impv"].setText(f"{call.mid_impv:.1%}")
cells["put_impv"].setText(f"{put.mid_impv:.1%}")
current_atm_index: str = self.chain_atm_index.get(chain_symbol, "")
if current_atm_index == atm_index:
return
self.chain_atm_index[chain_symbol] = atm_index
if current_atm_index:
old_cells: dict = self.cells[(chain_symbol, current_atm_index)]
for field in ["otm_impv", "call_impv", "put_impv"]:
old_cells[field].setForeground(COLOR_WHITE)
old_cells[field].setBackground(self.default_background)
if atm_index:
new_cells: dict = self.cells[(chain_symbol, atm_index)]
for field in ["otm_impv", "call_impv", "put_impv"]:
new_cells[field].setForeground(COLOR_BLACK)
new_cells[field].setBackground(COLOR_WHITE)
|
{"hexsha": "f7f41d3bfaf7babd616b4330fe7dbdbe20916963", "size": 33661, "ext": "py", "lang": "Python", "max_stars_repo_path": "vnpy_optionmaster/ui/manager.py", "max_stars_repo_name": "noranhe/vnpy_optionmaster", "max_stars_repo_head_hexsha": "180c85f92004d1092bc45032dc31585539de9768", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "vnpy_optionmaster/ui/manager.py", "max_issues_repo_name": "noranhe/vnpy_optionmaster", "max_issues_repo_head_hexsha": "180c85f92004d1092bc45032dc31585539de9768", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "vnpy_optionmaster/ui/manager.py", "max_forks_repo_name": "noranhe/vnpy_optionmaster", "max_forks_repo_head_hexsha": "180c85f92004d1092bc45032dc31585539de9768", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.8982880161, "max_line_length": 111, "alphanum_fraction": 0.6009922462, "include": true, "reason": "from scipy", "num_tokens": 7499}
|
import warnings
import numpy as np
def generate_regular_latlon_grid(ndpts, grid_template):
""" Generate simple 1d lat, lon arrays for regular grid definition
"""
# earth_shape = grid_template[0] # 6 = sphere of 6,371,229.0 m
nlon = grid_template[7]
nlat = grid_template[8]
angle_scale = grid_template[9]
angle_divis = grid_template[10]
if angle_scale == 0 or angle_divis <= 0 or angle_divis == (2 ** 32 - 1):
angle_scale = 1
angle_divis = 1.e6
offset = angle_scale / angle_divis
lat1 = grid_template[11] * offset
lat2 = grid_template[14] * offset
lon1 = grid_template[12] * offset
lon2 = grid_template[15] * offset
dlon = grid_template[16] * offset
dlat = grid_template[17] * offset
lats, dlat_compute = np.linspace(lat1,
lat2,
num=nlat,
endpoint=True,
retstep=True)
if dlat_compute != dlat:
warnings.warn("Computed lat delta doesn't match template")
lons, dlon_compute = np.linspace(lon1,
lon2,
num=nlon,
endpoint=True,
retstep=True)
if dlon_compute != dlon:
warnings.warn("Computed lon delta doesn't match template")
return lats, lons
|
{"hexsha": "3a2d6c12d9dd2097aab4cf0c30d3e6177eebc870", "size": 1449, "ext": "py", "lang": "Python", "max_stars_repo_path": "gripy/grids.py", "max_stars_repo_name": "abrammer/gripy", "max_stars_repo_head_hexsha": "1ce204a8f2f6a3015d3bc7cd1ce6e82d730673ac", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "gripy/grids.py", "max_issues_repo_name": "abrammer/gripy", "max_issues_repo_head_hexsha": "1ce204a8f2f6a3015d3bc7cd1ce6e82d730673ac", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gripy/grids.py", "max_forks_repo_name": "abrammer/gripy", "max_forks_repo_head_hexsha": "1ce204a8f2f6a3015d3bc7cd1ce6e82d730673ac", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.1538461538, "max_line_length": 76, "alphanum_fraction": 0.5389924086, "include": true, "reason": "import numpy", "num_tokens": 341}
|
from os import path
import numpy as np
import csv
def check_file_exits(predicted_test_Y_file_path):
if not path.exists(predicted_test_Y_file_path):
raise Exception("Couldn't find '" + predicted_test_Y_file_path +"' file")
def check_format(test_X_file_path, predicted_test_Y_file_path):
pred_Y = []
with open(predicted_test_Y_file_path, 'r') as file:
reader = csv.reader(file)
pred_Y = list(reader)
pred_Y = np.array(pred_Y)
test_X = np.genfromtxt(test_X_file_path, delimiter=',', \
dtype=np.float64, skip_header=1)
if pred_Y.shape != (len(test_X), 1):
raise Exception("Output format is not proper")
def check_accuracy(actual_test_Y_file_path, predicted_test_Y_file_path):
pred_Y = np.genfromtxt(predicted_test_Y_file_path, delimiter=',', dtype=np.int)
actual_Y = np.genfromtxt(actual_test_Y_file_path, delimiter=',', dtype=np.int)
from sklearn.metrics import accuracy_score
accuracy = accuracy_score(actual_Y, pred_Y)
print("Accuracy", accuracy)
return accuracy
def validate(test_X_file_path, actual_test_Y_file_path):
predicted_test_Y_file_path = "predicted_test_Y_de.csv"
check_file_exits(predicted_test_Y_file_path)
check_format(test_X_file_path, predicted_test_Y_file_path)
check_accuracy(actual_test_Y_file_path, predicted_test_Y_file_path)
|
{"hexsha": "48b5123838cecf3ef89c6b8def8edbfb66147493", "size": 1400, "ext": "py", "lang": "Python", "max_stars_repo_path": "decision_trees_passport/validate.py", "max_stars_repo_name": "Silent-faith/ML-Algorithim-coded-from-scratch", "max_stars_repo_head_hexsha": "a1eaa588bb507c9c8ef0ec4cedc7c24179901d29", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-06-22T08:50:18.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-22T08:50:18.000Z", "max_issues_repo_path": "decision_trees_passport/validate.py", "max_issues_repo_name": "Silent-faith/ML-Algorithim-coded-from-scratch", "max_issues_repo_head_hexsha": "a1eaa588bb507c9c8ef0ec4cedc7c24179901d29", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "decision_trees_passport/validate.py", "max_forks_repo_name": "Silent-faith/ML-Algorithim-coded-from-scratch", "max_forks_repo_head_hexsha": "a1eaa588bb507c9c8ef0ec4cedc7c24179901d29", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.8421052632, "max_line_length": 84, "alphanum_fraction": 0.7321428571, "include": true, "reason": "import numpy", "num_tokens": 323}
|
@testset "Test total multi order indices" begin
# Nx = 1
midxs = totalorder([0])
@test midxs == reshape([0],(1,1))
midxs = totalorder([1])
@test midxs == reshape([0; 1],(2,1))
midxs = totalorder([3])
@test midxs == reshape([0; 1; 2; 3],(4,1))
#Nx = 2
midxs = totalorder([0; 0])
@test midxs == reshape([0; 0],(1,2))
midxs = totalorder([1; 0])
@test midxs == [0 0; 1 0]
midxs = totalorder([3;4])
@test midxs ==
[0 0
0 1
0 2
0 3
0 4
1 0
1 1
1 2
1 3
2 0
2 1
2 2
3 0
3 1]
# Nx = 3
midxs = totalorder([0; 0; 0])
@test midxs == reshape([0; 0; 0],(1,3))
midxs = totalorder([0; 1; 0])
@test midxs == [0 0 0; 0 1 0]
midxs = totalorder([2;4;3])
@test midxs ==
[0 0 0
0 0 1
0 0 2
0 0 3
0 1 0
0 1 1
0 1 2
0 1 3
0 2 0
0 2 1
0 2 2
0 3 0
0 3 1
0 4 0
1 0 0
1 0 1
1 0 2
1 0 3
1 1 0
1 1 1
1 1 2
1 2 0
1 2 1
1 3 0
2 0 0
2 0 1
2 0 2
2 1 0
2 1 1
2 2 0]
end
|
{"hexsha": "1e4f31fb4697019d5a8e3437d4156f8b0fb92378", "size": 1210, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/margin/totalorder.jl", "max_stars_repo_name": "mleprovost/TransportBasedInference.jl", "max_stars_repo_head_hexsha": "bdcedf72e9ea23c24678fe6af7a00202c5f9d5d7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-23T03:16:56.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T03:16:56.000Z", "max_issues_repo_path": "test/margin/totalorder.jl", "max_issues_repo_name": "mleprovost/TransportBasedInference.jl", "max_issues_repo_head_hexsha": "bdcedf72e9ea23c24678fe6af7a00202c5f9d5d7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/margin/totalorder.jl", "max_forks_repo_name": "mleprovost/TransportBasedInference.jl", "max_forks_repo_head_hexsha": "bdcedf72e9ea23c24678fe6af7a00202c5f9d5d7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 15.7142857143, "max_line_length": 47, "alphanum_fraction": 0.3876033058, "num_tokens": 602}
|
import os
import numpy as np
fileFolder = "D:\\DigKey\\"
mapPath = "d:\\DigKeyEnglish2Chinese.txt"
outputFilePath = "d:\\pariOutput.txt"
finalOutPath = "d:\\DigKeyGoodsPair.txt"
english2chinese = {}
chinese2english = {}
with open(mapPath,"r") as rh:
for line in rh:
eng = line.strip().split("\t")[1]
chi = line.strip().split("\t")[0]
english2chinese[eng] = chi
chinese2english[chi] = eng
nameSet = set()
for root, folder , files in os.walk(fileFolder):
for file in files:
nameSet.add(file.split("_")[0])
print(nameSet)
chineseDataList = []
englishDataList = []
print(len(nameSet))
thisK = 0
for name in nameSet:
print(thisK)
print(name)
chineseName = fileFolder + name + "_zh.txt"
englishName = fileFolder + name + "_en.txt"
with open(chineseName,"r",encoding="UTF-8") as rh:
for oneLine in rh:
#print(oneLine.strip().split("\t"))
chineseDataList.append(oneLine.strip().split("\t"))
with open(englishName,"r",encoding="UTF-8") as rh:
for oneLine in rh:
#print(oneLine.strip().split("\t"))
englishDataList.append(oneLine.strip().split("\t"))
numberOfRows = len(chineseDataList)
indexChineseList = []
indexEnglishList = []
for i,oneSentence in enumerate(chineseDataList):
if "比较零件" in oneSentence:
indexChineseList.append(i)
for i,oneSentence in enumerate(englishDataList):
#print(oneSentence)
if "Compare Parts" in oneSentence:
indexEnglishList.append(i)
#print(indexChineseList)
#print(indexEnglishList)
for i in range(len(indexChineseList)):
#print("#############")
if i != len(indexChineseList)-1:
oneBatchChinese = chineseDataList[indexChineseList[i]:indexChineseList[i+1]]
oneBatchEnglish = englishDataList[indexEnglishList[i]:indexEnglishList[i+1]]
else:
oneBatchChinese = chineseDataList[indexChineseList[i]:-1]
oneBatchEnglish = englishDataList[indexEnglishList[i]:-1]
chineseTitle = oneBatchChinese[0]
englishTitle = oneBatchEnglish[0]
#print(chineseTitle)
#print(englishTitle)
thisBatchC2EMap = {}
#print(chinese2english)
for label in chineseTitle:
if label in chinese2english:
englishTrans = chinese2english[label]
if englishTrans in englishTitle:
labelCIndex = chineseTitle.index(label)
labelEIndex = englishTitle.index(englishTrans)
thisBatchC2EMap[labelCIndex] = labelEIndex
#print(thisBatchC2EMap)
chineseId = []
englishId = []
for s , oneSentence in enumerate(oneBatchChinese):
if s != 0:
chineseId.append(oneSentence[3])
for s,oneSentence in enumerate(oneBatchEnglish):
if s != 0:
englishId.append(oneSentence[3])
#print(chineseId)
#print(englishId)
shareId = []
for thisId in chineseId:
if thisId in englishId:
shareId.append(thisId)
#print(shareId)
shareInforChineseData = []
shareInforEnglishData = []
for oneSentence in oneBatchChinese:
if oneSentence[3] in shareId:
shareInforChineseData.append(oneSentence)
for oneSentence in oneBatchEnglish:
if oneSentence[3] in shareId:
shareInforEnglishData.append(oneSentence)
#print(len(shareInforChineseData))
#print(len(shareInforEnglishData))
pair = {}
for oneCSentence in shareInforChineseData:
idNm = oneCSentence[3]
eSentence = []
for oneESentence in shareInforEnglishData:
if oneESentence[3] == idNm:
eSentence = shareInforEnglishData[shareInforEnglishData.index(oneESentence)]
break
#print(oneCSentence)
#print(eSentence)
for cPosition,ePosition in thisBatchC2EMap.items():
if cPosition <= len(oneCSentence) - 1 and ePosition <= len(eSentence) - 1:
pair[oneCSentence[cPosition]] = eSentence[ePosition]
#print(pair)
with open(outputFilePath,"a",encoding="UTF-8") as wh:
for key ,value in pair.items():
wh.write(key + "\t" + value + "\n")
thisK += 1
def is_Chinese(word):
for ch in word:
if '\u4e00' <= ch <= '\u9fff':
return True
return False
print("Output final file.")
with open(outputFilePath,"r",encoding="utf-8") as rh:
with open(finalOutPath,"w",encoding="utf-8") as wh:
for line in rh:
oneLine = line.strip()
if is_Chinese(oneLine):
if "厂方库存" not in oneLine and "非库存货" not in oneLine and "原厂标准交货期" not in oneLine and "立即发货" not in oneLine \
and "宽 x" not in oneLine and "长 x" not in oneLine:
wh.write(oneLine + "\n")
|
{"hexsha": "39bce889c745a04783eb09495a9960c5d5c4238c", "size": 5245, "ext": "py", "lang": "Python", "max_stars_repo_path": "Correspone.py", "max_stars_repo_name": "zoubohao/Emotion-Classification", "max_stars_repo_head_hexsha": "ec6c52647581181a1a3c0231de01ecedaa2ceebd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Correspone.py", "max_issues_repo_name": "zoubohao/Emotion-Classification", "max_issues_repo_head_hexsha": "ec6c52647581181a1a3c0231de01ecedaa2ceebd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Correspone.py", "max_forks_repo_name": "zoubohao/Emotion-Classification", "max_forks_repo_head_hexsha": "ec6c52647581181a1a3c0231de01ecedaa2ceebd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.8387096774, "max_line_length": 124, "alphanum_fraction": 0.5763584366, "include": true, "reason": "import numpy", "num_tokens": 1260}
|
import numpy as np
from sklearn import preprocessing
def GradientDescent(y, x):
'''a functon to calculate the coefficient of each feature with gradient
descent
return beta, which is a matrix of coefficients of all the control variables
x, y are python lists
'''
l = len(x[0]) # how many features
m = len(x) # how many rows of data
beta = np.ones(l)
x = np.array(x)
# feature scaling and mean normalization
x = preprocessing.scale(x)
y = np.array(y)
# formular: beta = (X.T*X)-1 * X.T *Y
#beta = np.array([ -3.67585642e+02, -3.70182336e-02, 6.16163588e+00])
diff = 0.001
alpha = 0.01
i = 0
difference = 1000
while not(difference <= diff or i >= 10000):
#define loss function
lossfunction = np.sum((x.dot(beta) - y) ** 2)/2/m
#define the gradient
gradient = (x.T.dot(x.dot(beta)-y))/m
#print(gradient)
beta -= alpha * gradient
#print(beta)
difference = np.sum((x.dot(beta) - y) ** 2)/2/m - lossfunction
print(difference)
i += 1
return beta
|
{"hexsha": "6935b2348a91a26da591aa1ff5592177e8be551d", "size": 1118, "ext": "py", "lang": "Python", "max_stars_repo_path": "gradient_descent.py", "max_stars_repo_name": "dingyunxing/James-code-paradise", "max_stars_repo_head_hexsha": "688a8e6c8b569bacb6ac9f6754f43a5a3a7eba7a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "gradient_descent.py", "max_issues_repo_name": "dingyunxing/James-code-paradise", "max_issues_repo_head_hexsha": "688a8e6c8b569bacb6ac9f6754f43a5a3a7eba7a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gradient_descent.py", "max_forks_repo_name": "dingyunxing/James-code-paradise", "max_forks_repo_head_hexsha": "688a8e6c8b569bacb6ac9f6754f43a5a3a7eba7a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.6666666667, "max_line_length": 79, "alphanum_fraction": 0.5849731664, "include": true, "reason": "import numpy", "num_tokens": 332}
|
import torch
import argparse
import numpy as np
from torch import nn
from torch import optim
from torch.utils import data
from torch.optim.lr_scheduler import StepLR
from torch.nn import functional as F
from collections import defaultdict
import time
import pickle
import energy_estimator.analyse as simul
class Net(nn.Module):
def __init__(self, input_shape, num_outputs):
super(Net, self).__init__()
ic, ih, iw = input_shape
self.conv1 = nn.Conv2d(ic, 256, 3, 1)
self.conv2 = nn.Conv2d(256, 256, 3, 1)
self.conv3 = nn.Conv2d(256, 256, 3, 1)
self.dropout1 = nn.Dropout2d(0.5)
self.pool = torch.nn.AdaptiveAvgPool2d(1)
self.fc1 = nn.Linear(256, 128)
self.fc2 = nn.Linear(128, num_outputs)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = self.conv3(x)
x = F.relu(x)
x = self.pool(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout1(x)
return self.fc2(x)
def get_trainable_image(image):
# take original image in numpy format, this image should have been preprocessed
tensor_image = torch.from_numpy(image).float()
tensor_image = torch.nn.Parameter(tensor_image, requires_grad=True)
return tensor_image
def compute_loss(output, target):
return torch.sum(torch.abs(output - target))
def compute_loss_no_abs(output, target):
return torch.sum(output - target)
def renorm(image, min_value=0.0, max_value=1.0):
return torch.clamp(image, min_value, max_value)
def score_me(datas, model, hardware, hardware_worst, stats):
reses = []
hooks = simul.add_hooks(model, stats)
for i, dat in enumerate(datas):
stats.__reset__()
_ = model(dat.unsqueeze(dim=0))
energy_est = simul.get_energy_estimate(stats, hardware)
energy_est_worst = simul.get_energy_estimate(stats, hardware_worst)
rs = energy_est/energy_est_worst
reses.append(rs)
print(f"{i} {rs}", end="\r")
print()
simul.remove_hooks(hooks)
return reses
def build_dataset(
train_data_x=None, train_data_y=None,
test_data_x=None, test_data_y=None, random=False, model=None,
random_shape=(1, 28, 28), savename=None, gpu=None):
# better provide train_data_x and test_data_x as post-transformation images
if random:
hardware = simul.HardwareModel(optim=True)
hardware_worst = simul.HardwareModel(optim=False)
stats = simul.StatsRecorder()
train_data_x = torch.Tensor(np.random.rand(5000, *random_shape))
test_data_x = torch.Tensor(np.random.rand(100, *random_shape))
train_data_y = torch.Tensor(np.random.rand(5000, 1))
test_data_y = torch.Tensor(np.random.rand(100, 1))
if gpu is not None:
train_data_x = train_data_x.to(gpu)
test_data_x = train_data_x.to(gpu)
if model is not None:
with torch.no_grad():
train_data_y = score_me(train_data_x, model, hardware,
hardware_worst, stats)
test_data_y = score_me(test_data_x, model, hardware,
hardware_worst, stats)
print()
train_data_y = torch.Tensor(train_data_y)
test_data_y = torch.Tensor(test_data_y)
if savename is not None:
print(f"Saving to {savename} ... ")
pickle.dump([train_data_x, train_data_y, test_data_x, test_data_y],
open(savename, "wb" ))
# train loader
dataset = data.TensorDataset(train_data_x, train_data_y)
train_dataloader = data.DataLoader(dataset)
# test loader
dataset = data.TensorDataset(test_data_x, test_data_y)
test_dataloader = data.DataLoader(dataset)
return (train_dataloader, test_dataloader)
def build_adversarial_image(
image, label, model, iterations=10, alpha=0.01, random=False):
if random:
image = np.random.rand(1, 1, 28, 28)
label = torch.Tensor(np.random.rand(1))
model.eval()
numpy_image = image
for i in range(iterations):
tensor_image = get_trainable_image(numpy_image)
tensor_image.grad = None
pred = model(tensor_image)
loss_with_sign = compute_loss_no_abs(pred, label)
loss_with_sign.backward()
# ascending on gradients
adv_noise = alpha * tensor_image.grad.data
tensor_image = tensor_image - adv_noise
# renorm input
tensor_image = renorm(tensor_image)
numpy_image = tensor_image.detach().numpy()
return image, tensor_image
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = compute_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()), end="\r")
print()
def test(args, model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += compute_loss(output, target).item() # sum up batch loss
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f} \n'.format(test_loss))
def main():
# Training settings
parser = argparse.ArgumentParser(description='Energy Estimator')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=20, metavar='N',
help='number of epochs to train (default: 14)')
parser.add_argument('--lr', type=float, default=1.0, metavar='LR',
help='learning rate (default: 1.0)')
parser.add_argument('--gamma', type=float, default=0.7, metavar='M',
help='Learning rate step gamma (default: 0.7)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--load-model', action='store_true', default=False,
help='For Loading the current Model')
parser.add_argument('--save-model', action='store_true', default=False,
help='For Saving the current Model')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
model = Net(
(1, 28, 28),
1,
).to(device)
optimizer = optim.Adadelta(model.parameters(), lr=args.lr)
train_loader, test_loader = build_dataset(random=True, model=model)
scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
# testing enabled
for epoch in range(1, args.epochs + 1):
train(args, model, device, train_loader, optimizer, epoch)
test(args, model, device, test_loader)
scheduler.step()
# testing enabled
org_image, adv_image = build_adversarial_image(None, None, model, random=True)
model.eval()
before = time.time()
res = model(org_image)
after = time.time()
if args.save_model:
torch.save(model.state_dict(), "checkpoints/image_energy_est.pt")
if __name__ == '__main__':
main()
|
{"hexsha": "58ec1233be1613e96d09ef23c6724f8da030d617", "size": 8328, "ext": "py", "lang": "Python", "max_stars_repo_path": "energy_estimator/images_energy_estimator.py", "max_stars_repo_name": "iliaishacked/sponge_examples", "max_stars_repo_head_hexsha": "914ecb8c1ce06ad7177b34d907d419995c0734e6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2022-01-13T07:03:01.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T07:37:47.000Z", "max_issues_repo_path": "energy_estimator/images_energy_estimator.py", "max_issues_repo_name": "iliaishacked/sponge_examples", "max_issues_repo_head_hexsha": "914ecb8c1ce06ad7177b34d907d419995c0734e6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-10-05T03:31:39.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-18T18:44:08.000Z", "max_forks_repo_path": "energy_estimator/images_energy_estimator.py", "max_forks_repo_name": "iliaishacked/sponge_examples", "max_forks_repo_head_hexsha": "914ecb8c1ce06ad7177b34d907d419995c0734e6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.5560165975, "max_line_length": 87, "alphanum_fraction": 0.6306436119, "include": true, "reason": "import numpy", "num_tokens": 1981}
|
#=
Jobs puzzle in ConstraintSolver.jl
This is a standard problem in Automatic Reasoning.
From http://www-unix.mcs.anl.gov/~wos/mathproblems/jobs.html
"""
Jobs Puzzle
There are four people: Roberta, Thelma, Steve, and Pete.
Among them, they hold eight different jobs.
Each holds exactly two jobs.
The jobs are chef, guard, nurse, clerk, police officer (gender
not implied), teacher, actor, and boxer.
The job of nurse is held by a male.
The husband of the chef is the clerk.
Roberta is not a boxer.
Pete has no education past the ninth grade.
Roberta, the chef, and the police officer went golfing together.
Question: Who holds which jobs?
"""
The answer:
Chef Thelma
Guard Roberta
Nurse Steve
Clerk Pete
Police Steve
Teacher Roberta
Actor Pete
Boxer Thelma
Model created by Hakan Kjellerstrand, hakank@gmail.com
See also my Julia page: http://www.hakank.org/julia/
=#
using ConstraintSolver, JuMP
using Cbc, GLPK, Ipopt
const CS = ConstraintSolver
include("constraints_utils.jl")
function jobs_puzzle(print_solutions=true,all_solutions=true,timeout=6)
cbc_optimizer = optimizer_with_attributes(Cbc.Optimizer, "logLevel" => 0)
glpk_optimizer = optimizer_with_attributes(GLPK.Optimizer)
ipopt_optimizer = optimizer_with_attributes(Ipopt.Optimizer)
model = Model(optimizer_with_attributes(CS.Optimizer, "all_solutions"=> all_solutions,
# "all_optimal_solutions"=>all_solutions,
"logging"=>[],
"traverse_strategy"=>:BFS,
# "traverse_strategy"=>:DFS,
# "traverse_strategy"=>:DBFS,
# "branch_split"=>:Smallest,
# "branch_split"=>:Biggest,
"branch_split"=>:InHalf,
# https://wikunia.github.io/ConstraintSolver.jl/stable/options/#branch_strategy-(:Auto)
"branch_strategy" => :IMPS, # default
# "branch_strategy" => :ABS, # Activity Based Search
# "activity.decay" => 0.999, # default 0.999
# "activity.max_probes" => 10, # default, 10
# "activity.max_confidence_deviation" => 20, # default 20
# "simplify"=>false,
# "simplify"=>true, # default
"time_limit"=>timeout,
# "backtrack" => false, # default true
# "backtrack_sorting" => false, # default true
# "lp_optimizer" => cbc_optimizer,
# "lp_optimizer" => glpk_optimizer,
# "lp_optimizer" => ipopt_optimizer,
))
num_people = 4
num_jobs = 8
people = 1:num_people
Roberta, Thelma, Steve, Pete = people
@variable(model, 1 <= Jobs[1:num_jobs] <= num_people, Int)
Chef, Guard, Nurse, Clerk, PoliceOfficer, Teacher, Actor, Boxer = Jobs
# Each holds exactly two jobs.
for i in 1:4
count_ctr(model, Jobs, :(==), i, 2)
end
# The job of nurse is held by a male.
# (Nurse == Steve \/ Nurse == Pete),
# either_eq(model, Nurse,Steve, Nurse,Pete)
# is_member_of(model,Nurse, [Steve,Pete])
b1 = @variable(model, binary=true)
@constraint(model, b1 := {Nurse == Steve || Nurse == Pete})
@constraint(model, b1 == 1)
# @either_eq(model, :(Nurse==Steve), :(Nurse==Pete))
# The husband of the chef is the clerk.
# (Clerk == Steve \/ Clerk == Pete),
# either_eq(model, Clerk,Steve, Clerk,Pete)
# is_member_of(model,Clerk, [Steve,Pete])
b2 = @variable(model, binary=true)
@constraint(model, b2 := {Clerk == Steve || Clerk == Pete})
@constraint(model, b2 == 1)
# (Chef #= Roberta #\/ Chef #= Thelma),
# either_eq(model, Chef,Roberta, Chef,Thelma)
# is_member_of(model,Chef, [Roberta,Thelma])
b3 = @variable(model, binary=true)
@constraint(model, b3 := {Chef == Roberta || Chef == Thelma})
@constraint(model, b3 == 1)
@constraint(model, Chef != Clerk)
# Roberta is not a boxer.
@constraint(model, Roberta != Boxer)
# Pete has no education past the ninth grade.
@constraint(model, Pete != Teacher)
@constraint(model, Pete != PoliceOfficer)
@constraint(model, Pete != Nurse)
# Roberta, [and] the chef, and the police officer
# went golfing together.
@variable(model,Roberta <= RobertaVar <= Roberta, Int)
@constraint(model, [RobertaVar,Chef,PoliceOfficer] in CS.AllDifferent())
# From the name of the job
# (Actor == Steve \/ Actor == Pete)
# either_eq(model, Actor,Steve, Actor,Pete)
# is_member_of(model,Actor, [Steve,Pete])
b4 = @variable(model, binary=true)
@constraint(model, b4 := {(Actor == Steve || Actor == Pete)})
@constraint(model, b4 == 1)
# Solve the problem
optimize!(model)
status = JuMP.termination_status(model)
# println("status:$status")
num_sols = 0
if status == MOI.OPTIMAL
num_sols = MOI.get(model, MOI.ResultCount())
println("num_sols:$num_sols\n")
if print_solutions
for sol in 1:num_sols
println("solution #$sol")
jobs_val = convert.(Integer,JuMP.value.(Jobs; result=sol))
println("jobs:$jobs_val")
end
end
else
println("status:$status")
end
return status, num_sols
end
@time jobs_puzzle(true,true)
|
{"hexsha": "1de8b00cf1d18687f440ac582371b4d46fd1e047", "size": 6384, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "julia/constraints/jobs_puzzle.jl", "max_stars_repo_name": "tias/hakank", "max_stars_repo_head_hexsha": "87b7f180c9393afce440864eb9e5fb119bdec1a4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 279, "max_stars_repo_stars_event_min_datetime": "2015-01-10T09:55:35.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T02:34:03.000Z", "max_issues_repo_path": "julia/constraints/jobs_puzzle.jl", "max_issues_repo_name": "tias/hakank", "max_issues_repo_head_hexsha": "87b7f180c9393afce440864eb9e5fb119bdec1a4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2017-10-05T15:48:50.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-20T12:06:52.000Z", "max_forks_repo_path": "julia/constraints/jobs_puzzle.jl", "max_forks_repo_name": "tias/hakank", "max_forks_repo_head_hexsha": "87b7f180c9393afce440864eb9e5fb119bdec1a4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 83, "max_forks_repo_forks_event_min_datetime": "2015-01-20T03:44:00.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-13T23:53:06.000Z", "avg_line_length": 37.3333333333, "max_line_length": 147, "alphanum_fraction": 0.5189536341, "num_tokens": 1462}
|
// Copyright (c) 2012 Andre Martins
// All Rights Reserved.
//
// This file is part of AD3 2.0.
//
// AD3 2.0 is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// AD3 2.0 is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with AD3 2.0. If not, see <http://www.gnu.org/licenses/>.
#ifndef _AD3_QP_HPP_
#define _AD3_QP_HPP_
#include <Eigen/Eigenvalues>
#include <math.h>
#include <limits>
#include "dd_grlab.hpp"
#define NEARLY_ZERO_TOL(a,tol) (((a)<=(tol)) && ((a)>=(-(tol))))
#define NEARLY_EQ_TOL(a,b,tol) (((a)-(b))*((a)-(b))<=(tol))
#define num_max_iterations_QP_ 10
struct admm_vertex_program_general:public admm_vertex_program {
void Maximize(vertex_type& vertex, vec additional_log_potentials, vec variable_log_potentials,
Configuration &configuration,
double *value) {
vector <Configuration> states(vertex.data().nvars,-1);
int best = -1;
*value = -1e12;
for (int index = 0;
index < additional_log_potentials.size();
++index) {
//cout<<"enter loop 1 in Maximize .."<<endl;
double score = additional_log_potentials[index];
//cout<<"score in Maximize .."<<endl;
get_configuration_states(vertex,index, &states);
//cout<<"get config state Maximize"<<endl;
int offset = 0;
for (int i = 0; i < vertex.data().nvars; ++i) {
//cout<<"enter loop 2 in Maximize .."<<endl;
score += variable_log_potentials[offset+states[i]];
offset = vertex.data().cards[i];
}
//cout<<"exit loop 2 in Maximize .."<<endl;
if (configuration < 0 || score > *value) {
configuration = index;
*value = score;
}
}
//cout<<"exit loop 1 in Maximize .."<<endl;
assert(configuration >= 0);
}
//Configuration CreateConfiguration()
//{ Configuration *configuration = new Configuration;
// return *Configuration;
//}
void DeleteConfiguration(Configuration &configuration) {
configuration = -1;
}
bool InvertAfterInsertion(vertex_type& vertex, vector <double> & inverse_A_,
const vector<Configuration> &active_set, const Configuration &inserted_element) {
vector<double> inverse_A = inverse_A_;
int size_A = active_set.size() + 1;
vector<double> r(size_A);
r[0] = 1.0;
for (int i = 0; i < active_set.size(); ++i) {
// Count how many variable values the new assignment
// have in common with the i-th assignment.
int num_common_values = CountCommonValues(vertex, active_set[i], inserted_element);
r[i+1] = static_cast<double>(num_common_values);
}
double r0 = static_cast<double>(CountCommonValues(vertex,
inserted_element, inserted_element));
double s = r0;
for (int i = 0; i < size_A; ++i) {
if (r[i] == 0.0) continue;
s -= r[i] * r[i] * inverse_A[i * size_A + i];
for (int j = i+1; j < size_A; ++j) {
if (r[j] == 0.0) continue;
s -= 2 * r[i] * r[j] * inverse_A[i * size_A + j];
}
}
if (NEARLY_ZERO_TOL(s, 1e-9)) {
if (opts.verbose> 2) {
cout << "Warning: updated matrix will become singular after insertion."
<< endl;
}
return false;
}
double invs = 1.0 / s;
vector<double> d(size_A, 0.0);
for (int i = 0; i < size_A; ++i) {
if (r[i] == 0.0) continue;
for (int j = 0; j < size_A; ++j) {
d[j] += inverse_A[i * size_A + j] * r[i];
}
}
int size_A_after = size_A + 1;
inverse_A_.resize(size_A_after * size_A_after);
for (int i = 0; i < size_A; ++i) {
for (int j = 0; j < size_A; ++j) {
inverse_A_[i * size_A_after + j] = inverse_A[i * size_A + j] +
invs * d[i] * d[j];
}
inverse_A_[i * size_A_after + size_A] = -invs * d[i];
inverse_A_[size_A * size_A_after + i] = -invs * d[i];
}
inverse_A_[size_A * size_A_after + size_A] = invs;
return true;
}
void InvertAfterRemoval(vector <double> &inverse_A_,const vector<Configuration> &active_set,
int removed_index) {
vector<double> inverse_A = inverse_A_;
int size_A = active_set.size() + 1;
vector<double> r(size_A);
++removed_index; // Index in A has an offset of 1.
double invs = inverse_A[removed_index * size_A + removed_index];
assert(!NEARLY_ZERO_TOL(invs, 1e-12));
double s = 1.0 / invs;
vector<double> d(size_A - 1, 0.0);
int k = 0;
for (int i = 0; i < size_A; ++i) {
if (i == removed_index) continue;
d[k] = -s * inverse_A[removed_index * size_A + i];
++k;
}
int size_A_after = size_A - 1;
inverse_A_.resize(size_A_after * size_A_after);
k = 0;
for (int i = 0; i < size_A; ++i) {
if (i == removed_index) continue;
int l = 0;
for (int j = 0; j < size_A; ++j) {
if (j == removed_index) continue;
inverse_A_[k * size_A_after + l] = inverse_A[i * size_A + j] -
invs * d[k] * d[l];
++l;
}
++k;
}
}
// Compute Mnz'*Mnz
void ComputeActiveSetSimilarities(vertex_type& vertex,
const vector<Configuration> &active_set,
vector<double> *similarities) {
int size = active_set.size();
// Compute similarity matrix.
similarities->resize(size * size);
(*similarities)[0] = 0.0;
for (int i = 0; i < active_set.size(); ++i) {
(*similarities)[i*size + i] = static_cast<double>(
CountCommonValues(vertex,active_set[i], active_set[i]) );
for (int j = i+1; j < active_set.size(); ++j) {
// Count how many variable values the i-th and j-th
// assignments have in common.
int num_common_values = CountCommonValues(vertex,active_set[i], active_set[j]);
(*similarities)[i*size + j] = num_common_values;
(*similarities)[j*size + i] = num_common_values;
}
}
}
void ComputeMarginalsFromSparseDistribution( vertex_type& vertex,
const vector<Configuration> &active_set,
const vector<double> &distribution,
vec &variable_posteriors,
vec &additional_posteriors) {
//cout<<"enter cmfsp ..."<<endl;
variable_posteriors.setZero();
additional_posteriors.setZero();
for (int i = 0; i < active_set.size(); ++i) {
UpdateMarginalsFromConfiguration(vertex,active_set[i],
distribution[i],
variable_posteriors,
additional_posteriors);
}
}
// Given a configuration with a probability (weight),
// increment the vectors of variable and additional posteriors.
void UpdateMarginalsFromConfiguration(vertex_type& vertex,
const Configuration &configuration,
double weight,
vec &variable_posteriors,
vec &additional_posteriors) {
vector <Configuration> states(vertex.data().nvars, -1);
get_configuration_states(vertex, configuration, &states);
int offset = 0;
for (int k = 0; k < vertex.data().nvars; ++k)
{ //cout<<"loop enter update marginals.."<<offset<<" "<<states[k]<<" "<<variable_posteriors.size()<<endl;
variable_posteriors[offset + states[k]] += weight;
//cout<<"till here ...";
offset += vertex.data().cards[k];
//cout<<"loop exit update marginals.."<<endl;
}
additional_posteriors[configuration] += weight;
}
// Count how many common values two configurations have.
int CountCommonValues(vertex_type& vertex,Configuration configuration1,
Configuration configuration2) {
//assert(states1->size() == states2->size());
int count = 0;
vector <Configuration> states1(vertex.data().nvars, -1);
vector <Configuration> states2(vertex.data().nvars, -1);
get_configuration_states(vertex, configuration1, &states1);
get_configuration_states(vertex, configuration2, &states2);
for(int i = 0; i< vertex.data().nvars; i++)
{ //cout<<"enter loop in get count common values..."<<endl;
if (states1[i] == states2[i])
{ count++;} }
return count;
}
void Evaluate(vertex_type& vertex, vec additional_log_potentials, vec variable_log_potentials,
const Configuration configuration,
double *value) {
vector<Configuration> states(vertex.data().nvars, -1);
get_configuration_states(vertex, configuration, &states);
*value = 0.0;
int offset = 0;
for (int i = 0;i<vertex.data().nvars; ++i) {
//cout<<"enter loop in eval .."<<" "<<offset<<" "<<states[i]<<" "<<variable_log_potentials.size()<<endl;
*value += variable_log_potentials[offset + states[i]];
offset = vertex.data().cards[i];
}
*value += additional_log_potentials[configuration];
}
void EigenDecompose(vector<double> *similarities,
vector<double> *eigenvalues) {
int size = sqrt(similarities->size());
Eigen::SelfAdjointEigenSolver<Eigen::MatrixXd> es;
Eigen::MatrixXd sim(size, size);
int t = 0;
for (int i = 0; i < size; ++i) {
for (int j = 0; j < size; ++j) {
sim(i, j) = (*similarities)[t];
++t;
}
}
es.compute(sim);
const Eigen::VectorXd &eigvals = es.eigenvalues();
eigenvalues->resize(size);
for (int i = 0; i < size; ++i) {
(*eigenvalues)[i] = eigvals[i];
}
const Eigen::MatrixXd &eigvectors = es.eigenvectors().transpose();
t = 0;
for (int i = 0; i < size; ++i) {
for (int j = 0; j < size; ++j) {
(*similarities)[t] = eigvectors(i, j);
++t;
}
}
}
void SolveQP(vertex_type& vertex,const gather_type& total,
vec& beliefs, vec& variable_posteriors,
vec& additional_posteriors) {
vertex_data& vdata = vertex.data();
beliefs = vdata.potentials;
int num_configurations = vdata.potentials.size();
for (int index_configuration = 0;
index_configuration < num_configurations;
++index_configuration) {
vector<int> states(vdata.nvars, -1);
// This could be made more efficient by defining an iterator over factor
// configurations.
get_configuration_states(vertex, index_configuration, &states);
int offset = 0;
for (int k = 0; k < vdata.nvars; ++k) {
//cout<<index_configuration<<" "<<offset<<" "<<states[k]<<endl;
//cout<<beliefs.size()<<" "<<total.messages.size()<<endl;
beliefs[index_configuration] += total.messages[offset + states[k]];
offset += vdata.cards[k];
}
}
//cout<<"loop 1 in solveQP .."<<endl;
vec additional_log_potentials = beliefs;
vec variable_log_potentials = total.neighbor_distribution;
vector <Configuration> active_set_;
vector<double> distribution_;
vector<double> inverse_A_;
// Initialize the active set.
if (active_set_.size() == 0) {
variable_posteriors.resize(variable_log_potentials.size());
additional_posteriors.resize(additional_log_potentials.size());
distribution_.clear();
// Initialize by solving the LP, discarding the quadratic
// term.
Configuration configuration = -1;
double value;
//cout<<"Before Maximize .."<<endl;
Maximize(vertex, additional_log_potentials, variable_log_potentials,
configuration,
&value);
//cout<<"Maximize in solveQP .."<<endl;
active_set_.push_back(configuration);
distribution_.push_back(1.0);
// Initialize inv(A) as [-M,1;1,0].
inverse_A_.resize(4);
inverse_A_[0] = static_cast<double>(
-CountCommonValues(vertex,configuration, configuration));
//cout<<"count common vals in solve QP ..."<<endl;
inverse_A_[1] = 1;
inverse_A_[2] = 1;
inverse_A_[3] = 0;
}
bool changed_active_set = true;
vector<double> z;
int num_max_iterations = num_max_iterations_QP_;
double tau = 0;
for (int iter = 0; iter < num_max_iterations; ++iter) {
bool same_as_before = true;
bool unbounded = false;
if (changed_active_set) {
// Recompute vector b.
vector<double> b(active_set_.size() + 1, 0.0);
b[0] = 1.0;
for (int i = 0; i < active_set_.size(); ++i) {
const Configuration &configuration = active_set_[i];
double score;
// cout<<"Before eval in solveQP ..."<<endl;
Evaluate(vertex, additional_log_potentials, variable_log_potentials,
configuration,
&score);
b[i+1] = score;
}
//cout<<"Eval in solveQP .."<<endl;
// Solve the system Az = b.
z.resize(active_set_.size());
int size_A = active_set_.size() + 1;
for (int i = 0; i < active_set_.size(); ++i) {
z[i] = 0.0;
for (int j = 0; j < size_A; ++j) {
z[i] += inverse_A_[(i+1) * size_A + j] * b[j];
}
}
tau = 0.0;
for (int j = 0; j < size_A; ++j) {
tau += inverse_A_[j] * b[j];
}
same_as_before = false;
}
if (same_as_before) {
// Compute the variable marginals from the full distribution
// stored in z.
ComputeMarginalsFromSparseDistribution(vertex, active_set_,
z,
variable_posteriors,
additional_posteriors);
//cout<<"ComputeMarginals 1 in solveQP .."<<endl;
// Get the most violated constraint
// (by calling the black box that computes the MAP).
vec scores = variable_log_potentials;
for (int i = 0; i < scores.size(); ++i) {
scores[i] -= variable_posteriors[i];
}
Configuration configuration = -1;
double value = 0.0;
Maximize(vertex,
additional_log_potentials, scores,
configuration,
&value);
//cout<<"Maximize 2 in SolveQP"<<endl;
double very_small_threshold = 1e-9;
if (value <= tau + very_small_threshold) { // value <= tau.
// We have found the solution;
// the distribution, active set, and inv(A) are cached for the next round.
DeleteConfiguration(configuration);
return;
} else {
for (int k = 0; k < active_set_.size(); ++k) {
// This is expensive and should just be a sanity check.
// However, in practice, numerical issues force an already existing
// configuration to try to be added. Therefore, we always check
// if a configuration already exists before inserting it.
// If it does, that means the active set method converged to a
// solution (but numerical issues had prevented us to see it.)
if (active_set_[k] == configuration) { ////////
if (opts.verbose > 2) {
cout << "Warning: value - tau = "
<< value - tau << " " << value << " " << tau
<< endl;
}
// We have found the solution;
// the distribution, active set, and inv(A)
// are cached for the next round.
DeleteConfiguration(configuration);
// Just in case, clean the cache.
// This may prevent eventual numerical problems in the future.
for (int j = 0; j < active_set_.size(); ++j) {
if (j == k) continue; // This configuration was deleted already.
DeleteConfiguration(active_set_[j]);
}
active_set_.clear();
inverse_A_.clear();
distribution_.clear();
// Return.
return;
}
}
z.push_back(0.0);
distribution_ = z;
// Update inv(A).
bool singular = !InvertAfterInsertion(vertex, inverse_A_, active_set_, configuration);
//cout<<"Invertafterinsertion in solveQP .."<<endl;
if (singular) {
// If adding a new configuration causes the matrix to be singular,
// don't just add it. Instead, look for a configuration in the null
// space and remove it before inserting the new one.
// Right now, if more than one such configuration exists, we just
// remove the first one we find. There's a chance this could cause
// some cyclic behaviour. If that is the case, we should randomize
// this choice.
// Note: This step is expensive and requires an eigendecomposition.
// TODO: I think there is a graph interpretation for this problem.
// Maybe some specialized graph algorithm is cheaper than doing
// the eigendecomposition.
vector<double> similarities(active_set_.size() * active_set_.size());
ComputeActiveSetSimilarities(vertex, active_set_, &similarities);
//cout<<"compute active similarities in solveQP .."<<endl;
vector<double> padded_similarities((active_set_.size()+2) *
(active_set_.size()+2), 1.0);
for (int i = 0; i < active_set_.size(); ++i) {
for (int j = 0; j < active_set_.size(); ++j) {
padded_similarities[(i+1)*(active_set_.size()+2) + (j+1)] =
similarities[i*active_set_.size() + j];
}
}
padded_similarities[0] = 0.0;
for (int i = 0; i < active_set_.size(); ++i) {
double value = static_cast<double>(
CountCommonValues(vertex, configuration, active_set_[i]));
padded_similarities[(i+1)*(active_set_.size()+2) +
(active_set_.size()+1)] = value;
padded_similarities[(active_set_.size()+1)*(active_set_.size()+2) +
(i+1)] = value;
}
double value = static_cast<double>(
CountCommonValues(vertex, configuration, configuration));
padded_similarities[(active_set_.size()+1)*(active_set_.size()+2) +
(active_set_.size()+1)] = value;
vector<double> eigenvalues(active_set_.size()+2);
EigenDecompose(&padded_similarities, &eigenvalues);
int zero_eigenvalue = -1;
for (int i = 0; i < active_set_.size()+2; ++i) {
if (NEARLY_EQ_TOL(eigenvalues[i], 0.0, 1e-9)) {
if (zero_eigenvalue >= 0) {
// If this happens, something failed. Maybe a numerical problem
// may cause this. In that case, just give up, clean the cache
// and return. Hopefully the next iteration will fix it.
cout << "Multiple zero eigenvalues: "
<< eigenvalues[zero_eigenvalue] << " and "
<< eigenvalues[i] << endl;
cout << "Warning: Giving up." << endl;
// Clean the cache.
for (int j = 0; j < active_set_.size(); ++j) {
DeleteConfiguration(active_set_[j]);
}
active_set_.clear();
inverse_A_.clear();
distribution_.clear();
return;
}
zero_eigenvalue = i;
}
}
assert(zero_eigenvalue >= 0);
vector<int> configurations_to_remove;
for (int j = 1; j < active_set_.size()+1; ++j) {
double value = padded_similarities[zero_eigenvalue*(active_set_.size()+2) + j];
if (!NEARLY_EQ_TOL(value, 0.0, 1e-9)) {
configurations_to_remove.push_back(j-1);
}
}
if (opts.verbose > 2) {
cout << "Pick a configuration to remove (" << configurations_to_remove.size()
<< " out of " << active_set_.size() << ")." << endl;
}
assert(configurations_to_remove.size() >= 1);
int j = configurations_to_remove[0];
// Update inv(A).
InvertAfterRemoval(inverse_A_, active_set_, j);
// Remove blocking constraint from the active set.
DeleteConfiguration(active_set_[j]); // Delete configutation.
active_set_.erase(active_set_.begin() + j);
singular = !InvertAfterInsertion(vertex, inverse_A_, active_set_, configuration);
assert(!singular);
}
// Insert configuration to active set.
if (opts.verbose > 2) {
cout << "Inserted one element to the active set (iteration "
<< iter << ")." << endl;
}
active_set_.push_back(configuration);
changed_active_set = true;
}
} else {
// Solution has changed from the previous iteration.
// Look for blocking constraints.
int blocking = -1;
bool exist_blocking = false;
double alpha = 1.0;
for (int i = 0; i < active_set_.size(); ++i) {
assert(distribution_[i] >= -1e-12);
if (z[i] >= distribution_[i]) continue;
if (z[i] < 0) exist_blocking = true;
double tmp = distribution_[i] / (distribution_[i] - z[i]);
if (blocking < 0 || tmp < alpha) {
alpha = tmp;
blocking = i;
}
}
if (!exist_blocking) {
// No blocking constraints.
assert(!unbounded);
distribution_ = z;
alpha = 1.0;
changed_active_set = false;
} else {
if (alpha > 1.0 && !unbounded) alpha = 1.0;
// Interpolate between factor_posteriors_[i] and z.
if (alpha == 1.0) {
distribution_ = z;
} else {
for (int i = 0; i < active_set_.size(); ++i) {
z[i] = (1 - alpha) * distribution_[i] + alpha * z[i];
distribution_[i] = z[i];
}
}
// Update inv(A).
InvertAfterRemoval(inverse_A_, active_set_, blocking);
// Remove blocking constraint from the active set.
if (opts.verbose > 2) {
cout << "Removed one element to the active set (iteration "
<< iter << ")." << endl;
}
DeleteConfiguration(active_set_[blocking]); // Delete configutation.
active_set_.erase(active_set_.begin() + blocking);
z.erase(z.begin() + blocking);
distribution_.erase(distribution_.begin() + blocking);
changed_active_set = true;
for (int i = 0; i < distribution_.size(); ++i) {
assert(distribution_[i] > -1e-16);
}
}
}
}
// Maximum number of iterations reached.
// Return the best existing solution by computing the variable marginals
// from the full distribution stored in z.
//assert(false);
ComputeMarginalsFromSparseDistribution(vertex, active_set_,
z,
variable_posteriors,
additional_posteriors);
};
};
#endif
|
{"hexsha": "dae0cf069b987f1d3129d6a3e83846f2fb73eea4", "size": 23305, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "toolkits/graphical_models/ad3_qp.hpp", "max_stars_repo_name": "zgdahai/graphlabapi", "max_stars_repo_head_hexsha": "7d66bbda82d4d44cded35f9438e1c9359b0ca64e", "max_stars_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2016-11-07T05:47:18.000Z", "max_stars_repo_stars_event_max_datetime": "2016-11-07T05:47:18.000Z", "max_issues_repo_path": "toolkits/graphical_models/ad3_qp.hpp", "max_issues_repo_name": "keerthanashanmugam/graphlabapi", "max_issues_repo_head_hexsha": "7d66bbda82d4d44cded35f9438e1c9359b0ca64e", "max_issues_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "toolkits/graphical_models/ad3_qp.hpp", "max_forks_repo_name": "keerthanashanmugam/graphlabapi", "max_forks_repo_head_hexsha": "7d66bbda82d4d44cded35f9438e1c9359b0ca64e", "max_forks_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.5855572998, "max_line_length": 127, "alphanum_fraction": 0.5738253594, "num_tokens": 5794}
|
/*!@file
* @copyright This code is licensed under the 3-clause BSD license.
* Copyright ETH Zurich, Laboratory of Physical Chemistry, Reiher Group.
* See LICENSE.txt for details.
*/
#include <boost/test/unit_test.hpp>
#include "Molassembler/Temple/Adaptors/All.h"
#include "Molassembler/Temple/Functional.h"
#include "Molassembler/Temple/Stringify.h"
#include "Molassembler/Temple/constexpr/Numeric.h"
#include <iostream>
using namespace Scine::Molassembler;
template<class Container>
std::size_t iteratorDistance(const Container& container) {
return std::distance(std::begin(container), std::end(container));
}
BOOST_AUTO_TEST_CASE(PairAdaptorTests, *boost::unit_test::label("Temple")) {
const std::vector<unsigned> i {5, 3, 9, 11};
const std::vector<unsigned> j {3, 4};
auto adjacents = Temple::Adaptors::sequentialPairs(i);
BOOST_CHECK(adjacents.size() == 3);
BOOST_CHECK(iteratorDistance(adjacents) == adjacents.size());
BOOST_CHECK(
Temple::sum(
Temple::Adaptors::transform(adjacents, std::plus<>())
) == 8 + 12 + 20
);
auto singlePairs = Temple::Adaptors::allPairs(i);
BOOST_CHECK(singlePairs.size() == 6);
BOOST_CHECK(
iteratorDistance(singlePairs) == singlePairs.size()
);
BOOST_CHECK(
Temple::sum(
Temple::Adaptors::transform(singlePairs, std::plus<>())
) == 8 + 14 + 16 + 12 + 14 + 20
);
auto twoPairs = Temple::Adaptors::allPairs(i, j);
BOOST_CHECK(twoPairs.size() == 8);
BOOST_CHECK(
iteratorDistance(twoPairs) == twoPairs.size()
);
BOOST_CHECK(
Temple::sum(
Temple::Adaptors::transform(twoPairs, std::plus<>())
) == 8 + 9 + 6 + 7 + 12 + 13 + 14 + 15
);
}
BOOST_AUTO_TEST_CASE(IotaAdaptorTests, *boost::unit_test::label("Temple")) {
auto a = Temple::Adaptors::range(5U);
BOOST_CHECK(a.size() == 5);
BOOST_CHECK(iteratorDistance(a) == a.size());
BOOST_CHECK(Temple::sum(a) == 10U);
auto b = Temple::Adaptors::range(4U, 7U);
BOOST_CHECK(b.size() == 3);
BOOST_CHECK(iteratorDistance(b) == b.size());
BOOST_CHECK(Temple::sum(b) == 15U);
}
BOOST_AUTO_TEST_CASE(ZipAdaptorTests, *boost::unit_test::label("Temple")) {
const std::vector<unsigned> i {5, 3, 9, 11};
const std::vector<unsigned> j {3, 4};
auto zipRange = Temple::Adaptors::zip(i, j);
BOOST_CHECK(zipRange.size() == 2);
BOOST_CHECK(iteratorDistance(zipRange) == zipRange.size());
BOOST_CHECK(
Temple::sum(
Temple::Adaptors::transform(zipRange, std::plus<>())
) == 15U
);
}
BOOST_AUTO_TEST_CASE(TransformAdaptorTests, *boost::unit_test::label("Temple")) {
const std::vector<unsigned> i {5, 3, 9, 11};
auto transformRange = Temple::Adaptors::transform(
i,
[](unsigned x) -> int {return static_cast<int>(x) - 10;}
);
BOOST_CHECK(transformRange.size() == 4);
BOOST_CHECK(iteratorDistance(transformRange) == transformRange.size());
BOOST_CHECK(
Temple::sum(transformRange) == static_cast<int>(Temple::sum(i)) - 4 * 10
);
}
BOOST_AUTO_TEST_CASE( EnumerateTests, *boost::unit_test::label("Temple")) {
std::vector<unsigned> testVec {5, 2, 3, 4};
bool pass = true;
for(const auto& enumPair : Temple::Adaptors::enumerate(testVec)) {
if(testVec.at(enumPair.index) != enumPair.value) {
pass = false;
break;
}
}
BOOST_CHECK(pass);
auto weirdSum = Temple::sum(
Temple::map(
Temple::Adaptors::enumerate(testVec),
[](const auto& enumPair) -> unsigned {
return enumPair.index + enumPair.value;
}
)
);
BOOST_CHECK(weirdSum == 5 + 3 + 5 + 7);
}
BOOST_AUTO_TEST_CASE(CompoundAdaptorOwnership, *boost::unit_test::label("Temple")) {
auto pairsOfRange = Temple::Adaptors::allPairs(
Temple::Adaptors::range(4U)
);
auto selfOwningRange = Temple::Adaptors::range(4U);
auto referenceOwningPairs = Temple::Adaptors::allPairs(selfOwningRange);
auto checkPairs = [](const auto& rangeObject) -> void {
BOOST_CHECK(rangeObject.size() == 6);
BOOST_CHECK(iteratorDistance(rangeObject) == rangeObject.size());
BOOST_CHECK(
Temple::invoke(std::plus<>(), *std::begin(rangeObject)) == 1U
);
BOOST_CHECK(
Temple::sum(
Temple::Adaptors::transform(
Temple::Adaptors::allPairs(
Temple::Adaptors::range(4U)
),
[](const unsigned i, const unsigned j) -> unsigned {
return i * j;
}
)
) == 11U
);
};
checkPairs(pairsOfRange);
checkPairs(referenceOwningPairs);
const std::vector<unsigned> i {1, 4, 9};
const std::vector<unsigned> j {5, 2};
auto pairFromTwoReferences = Temple::Adaptors::allPairs(i, j);
auto pairFromTwoRValues = Temple::Adaptors::allPairs(
std::vector<unsigned> {1, 4, 9},
std::vector<unsigned> {5, 2}
);
auto pairFromMixed = Temple::Adaptors::allPairs(
std::vector<unsigned> {1, 4, 9},
j
);
auto checkTwoPairs = [](const auto& rangeObject) -> void {
BOOST_CHECK(rangeObject.size() == 6);
BOOST_CHECK(iteratorDistance(rangeObject) == rangeObject.size());
BOOST_CHECK(
Temple::invoke(std::plus<>(), *std::begin(rangeObject)) == 6
);
BOOST_CHECK(
Temple::sum(
Temple::Adaptors::transform(
rangeObject,
std::plus<>()
)
) == 6 + 3 + 9 + 6 + 14 + 11
);
};
checkTwoPairs(pairFromTwoReferences);
checkTwoPairs(pairFromTwoRValues);
checkTwoPairs(pairFromMixed);
}
BOOST_AUTO_TEST_CASE(AdaptorShortRanges, *boost::unit_test::label("Temple")) {
auto checkRangeLength = [](
const auto& rangeObject,
const unsigned expectedSize,
const std::string& description
) {
BOOST_CHECK_MESSAGE(
rangeObject.size() == expectedSize,
description << " size is " << rangeObject.size() << ", expected "
<< expectedSize
);
BOOST_CHECK_MESSAGE(
iteratorDistance(rangeObject) == rangeObject.size(),
description << " iterator distance is " << iteratorDistance(rangeObject)
<< ", expected equal to size (" << rangeObject.size() << ")"
);
};
checkRangeLength(
Temple::Adaptors::allPairs(std::vector<unsigned> {4}),
0,
"single-element all-pairs"
);
checkRangeLength(
Temple::Adaptors::allPairs(std::vector<unsigned> {}),
0,
"no-element all-pairs"
);
checkRangeLength(
Temple::Adaptors::allPairs(
std::vector<unsigned> {4},
std::vector<unsigned> {6}
),
1,
"one-one all-pairs"
);
checkRangeLength(
Temple::Adaptors::allPairs(
std::vector<unsigned> {},
std::vector<unsigned> {6}
),
0,
"none-one all-pairs"
);
checkRangeLength(
Temple::Adaptors::allPairs(
std::vector<unsigned> {},
std::vector<unsigned> {}
),
0,
"none-none all-pairs"
);
checkRangeLength(
Temple::Adaptors::sequentialPairs(std::vector<unsigned> {4}),
0,
"one-element sequential pairs"
);
checkRangeLength(
Temple::Adaptors::sequentialPairs(std::vector<unsigned> {}),
0,
"no-element sequential pairs"
);
}
template<typename Range>
void checkRangeLengthTempl(
const Range& rangeObject,
const unsigned expectedSize,
const std::string& description
) {
BOOST_CHECK_MESSAGE(
rangeObject.size() == expectedSize,
description << " size is " << rangeObject.size() << ", expected "
<< expectedSize
);
BOOST_CHECK_MESSAGE(
iteratorDistance(rangeObject) == rangeObject.size(),
description << " iterator distance is " << iteratorDistance(rangeObject)
<< ", expected equal to size (" << rangeObject.size() << ")"
);
}
BOOST_AUTO_TEST_CASE(FrameAdaptorTest, *boost::unit_test::label("Temple")) {
checkRangeLengthTempl(
Temple::Adaptors::cyclicFrame<1>(std::vector<unsigned> {}),
0,
"no-element cyclic frame of size 1"
);
checkRangeLengthTempl(
Temple::Adaptors::cyclicFrame<1>(std::vector<unsigned> {1}),
1,
"single-element cyclic frame of size 1"
);
checkRangeLengthTempl(
Temple::Adaptors::cyclicFrame<1>(std::vector<unsigned> {1, 2}),
2,
"two-element cyclic frame of size 1"
);
checkRangeLengthTempl(
Temple::Adaptors::cyclicFrame<2>(std::vector<unsigned> {1, 2}),
2,
"two-element cyclic frame of size 2"
);
checkRangeLengthTempl(
Temple::Adaptors::cyclicFrame<2>(std::vector<unsigned> {1, 2, 3}),
3,
"three-element cyclic frame of size 2"
);
checkRangeLengthTempl(
Temple::Adaptors::cyclicFrame<4>(std::vector<unsigned> {1, 2, 3}),
0,
"three-element cyclic frame of size 4"
);
BOOST_CHECK(
Temple::sum(
Temple::map(
Temple::Adaptors::cyclicFrame<2>(std::vector<unsigned> {1, 2, 3}),
[](unsigned i, unsigned j) -> unsigned {
return i * j;
}
)
) == 2U + 6U + 3U
);
}
BOOST_AUTO_TEST_CASE(FilterAdaptorTests, *boost::unit_test::label("Temple")) {
const auto filterDistance = iteratorDistance(
Temple::Adaptors::filter(
std::vector<unsigned> {1, 2, 3},
[](const unsigned x) -> bool {return x % 2 == 0;}
)
);
BOOST_CHECK_MESSAGE(
filterDistance == 1,
"Filter of 1,2,3 applying is_even isn't length one, but " << filterDistance
);
}
BOOST_AUTO_TEST_CASE(CombinationsTests, *boost::unit_test::label("Temple")) {
const std::vector<unsigned> nums {{1, 2, 3}};
const auto singletonsCount = iteratorDistance(
Temple::Adaptors::combinations(nums, 1)
);
BOOST_CHECK_EQUAL(singletonsCount, 3);
const auto pairsCount = iteratorDistance(
Temple::Adaptors::combinations(nums, 1)
);
BOOST_CHECK_EQUAL(pairsCount, 3);
const auto emptyRangePairsCount = iteratorDistance(
Temple::Adaptors::combinations(std::vector<unsigned> {}, 2)
);
BOOST_CHECK_EQUAL(emptyRangePairsCount, 0);
const auto pairSum = Temple::sum(
Temple::map(
Temple::Adaptors::combinations(nums, 2),
[](const auto& pairAsVector) -> unsigned {
return Temple::sum(pairAsVector);
}
)
);
BOOST_CHECK_EQUAL(pairSum, 3 + 4 + 5);
}
|
{"hexsha": "a16984e1dd0ec35c7e4170081f22ed04dd77c07a", "size": 10023, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/Temple/Adaptors.cpp", "max_stars_repo_name": "qcscine/molassembler", "max_stars_repo_head_hexsha": "3b72168477b2d1dee55812517e49d9c3285c50ba", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 17.0, "max_stars_repo_stars_event_min_datetime": "2020-11-27T14:59:34.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T10:31:25.000Z", "max_issues_repo_path": "test/Temple/Adaptors.cpp", "max_issues_repo_name": "qcscine/molassembler", "max_issues_repo_head_hexsha": "3b72168477b2d1dee55812517e49d9c3285c50ba", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/Temple/Adaptors.cpp", "max_forks_repo_name": "qcscine/molassembler", "max_forks_repo_head_hexsha": "3b72168477b2d1dee55812517e49d9c3285c50ba", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 6.0, "max_forks_repo_forks_event_min_datetime": "2020-12-09T09:21:53.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-22T15:42:21.000Z", "avg_line_length": 26.2382198953, "max_line_length": 84, "alphanum_fraction": 0.6435199042, "num_tokens": 2799}
|
/-5 .8 Summary of New Constructs
Lemmas:
dec_trivial -- decidable truth (e.g., a true closed executable expression)
funext -- functional extensionality
propext -- propositional extensionality
Tactics:
generalize -- replaces a term by a fresh variable defined by a new hypothesis
linarith -- applies a procedure for linear arithmetic
-/
namespace binary_trees
inductive btree (α : Type) : Type
| empty {} : btree
| node : α → btree → btree → btree
def mirror {α : Type} : btree α → btree α
| btree.empty := btree.empty
| (btree.node (a : α) left right) := btree.node (a : α) (mirror right) (mirror left)
lemma mirror_mirror {α : Type} (t : btree α) :
mirror (mirror t) = t :=
begin
induction t with a l r ih_left ih_right,
case btree.empty { refl },
case btree.node {
rw [mirror, mirror, ih_left, ih_right],
},
end
#print mirror_mirror
lemma mirror_mirror₂ {α : Type} :
∀ t : btree α, mirror (mirror t) = t
| btree.empty := rfl
| (btree.node a l r) :=
calc mirror (mirror (btree.node a l r))
= mirror (btree.node a (mirror r) (mirror l)) :
by refl
... = btree.node a (mirror (mirror l)) (mirror (mirror r)) :
by refl
... = btree.node a l (mirror (mirror r)) :
by rewrite mirror_mirror₂ l
... = btree.node a l r :
begin rewrite mirror_mirror₂ r, end -- another begin .. end syntax example
#print mirror_mirror₂
def singleton {α : Type} (a : α) : btree α :=
btree.node a btree.empty btree.empty
#check btree.empty -- btree.empty : btree ?M_1
#check @btree.empty -- btree.empty : Π {α : Type}, btree α
#reduce @btree.empty nat -- btree.empty
#check btree.node 3 btree.empty btree.empty -- btree.node 3 btree.empty btree.empty : btree ℕ
example : singleton 3 = btree.node 3 btree.empty btree.empty := rfl
#reduce mirror (btree.node 10 (singleton 1) (singleton 2))
-- btree.node 10 (btree.node 2 btree.empty btree.empty) (btree.node 1 btree.empty btree.empty)
lemma mirror_singleton {α : Type} (a : α) :
mirror (singleton a) = singleton a := rfl
example {α : Type} (a : α) (b : α) (c : α):
mirror (btree.node c (singleton a) (singleton b)) = (btree.node c (singleton b) (singleton a)) :=
begin
-- simp [mirror],
-- split; rw mirror_singleton, -- OK
-- rw mirror,
-- constructor, -- split also works
refl,
end
/-
A binary tree is full if all its nodes have either zero or two children.
This can be encoded as an inductive predicate as follows:
-/
-- -- version 1
-- inductive is_full {α : Type} : btree α → Prop
-- | empty : is_full btree.empty -- maybe this <=> boolean True? => is_full btree.empty = true
-- | node (a : α) :
-- (btree.node a (is_full btree.empty) (is_full btree.empty)) ∨ _
-- --version 2
-- inductive is_full {α : Type} : btree α → Prop
-- | empty : is_full btree.empty -- maybe this <=> boolean True? => is_full btree.empty = true
-- | node (a : α) :
-- is_full (btree.node a (btree.empty) (btree.empty)) ∨
-- is_full (btree.node a (is_full (btree.node a _ _)) (is_full (btree.node a _ _)))
namespace test
inductive is_full {α : Type} : btree α → Prop
| empty : is_full btree.empty -- maybe this <=> boolean True? => is_full btree.empty = true
| node (a : α) :
is_full (btree.node a btree.empty btree.empty)
lemma is_full_singleton_test {α : Type} (a : α) :
is_full (btree.node a btree.empty btree.empty) :=
begin
-- rw is_full, -- fail
-- rw is_full.node -- fail
-- simp [is_full], -- fail
-- constructor, -- OK
-- cases is_full, -- fail
-- by_cases is_full, --fail
-- cases btree a, --fail
-- cases is_full.node a, -- Doing nothing but no error
-- refl, -- fail
-- apply is_full.node, -- OK
exact is_full.node a, -- OK
end
end test
-- version 4 -- invalid return type for 'binary_trees.is_full.node'
-- inductive is_full {α : Type} : btree α → Prop
-- | empty : is_full btree.empty
-- | node (a : α) {left : btree α} {right : btree α}
-- (hleft : is_full left) (hright : is_full right)
-- :
-- (is_full (btree.node a btree.empty btree.empty)) ∨
-- (is_full (btree.node a left right))
-- NO ERROR
-- inductive is_full {α : Type} : btree α → Prop
-- | empty : is_full btree.empty
-- | node (a : α) {left right : btree α} :
-- is_full (btree.node a left right)
namespace my_tries
inductive is_full {α : Type} : btree α → Prop
| empty : is_full btree.empty
| node (a : α) {left right : btree α} {l1 r1 l2 r2 : btree α}
(hzero_or_two_children:
(left = btree.empty ∧ right = btree.empty) ∨
(left = (btree.node a l1 r1) ∧ right = (btree.node a l2 r2))
)
:
is_full (btree.node a left right)
lemma is_full_singleton {α : Type} (a : α) :
is_full (btree.node a btree.empty btree.empty) :=
begin
constructor,
simp,
iterate 4 { exact btree.empty },
end
/-
A somethat more interesting property of full trees is that fullness
is preserved by the mirror operation. Our first proof is by rule
induction on ht : is_full t:
-/
lemma is_full_mirror {α : Type} (t : btree α)
(ht : is_full t) :
is_full (mirror t) :=
begin
induction t with a left right ih_left ih_right,
case empty {
rw mirror,
exact ht,
},
case node {
rw mirror,
sorry,
},
end
end my_tries
namespace from_book
-- for example proposition always_true (btree α)
inductive always_true {α : Type} : btree α → Prop
| empty : always_true btree.empty
| node (a : α) : always_true (singleton a)
inductive is_empty {α : Type} : btree α → Prop
| empty : is_empty btree.empty
inductive is_not_empty {α : Type} : btree α → Prop
| node (a : α) : is_not_empty (singleton a)
lemma is_not_empty.def {α : Type} (t : btree α) : is_not_empty t ↔ ¬ is_empty t := sorry
inductive is_full {α : Type} : btree α → Prop
| empty : is_full btree.empty
| node (a : α) (left right : btree α)
(h_full_left : is_full left) (h_full_right : is_full right)
(hiff : left = btree.empty ↔ right = btree.empty) :
is_full (btree.node a left right)
constants {α : Type} (a : α)
lemma is_full_singleton :
is_full (singleton a) :=
begin
apply is_full.node _ _ _,
exact is_full.empty,
exact is_full.empty,
refl,
end
#print is_full_singleton
-- is_full.node a btree.empty btree.empty is_full.empty is_full.empty (iff.refl (btree.empty = btree.empty))
lemma is_full_singleton₂ : is_full (singleton a) :=
is_full.node -- function <=> rule <=> definition
_ _ _ is_full.empty is_full.empty (iff.refl _) -- arguments <=> goals
-- first 3 solve automatically, but last 3 solve by human
lemma mirror_eq_empty_iff {α : Type} (l r : btree α):
(l = btree.empty ↔ r = btree.empty) →
(mirror r = btree.empty ↔ mirror l = btree.empty) := sorry
-- proof by rule induction on ht : is_full t:
lemma is_full_mirror {α : Type} (t : btree α)
(ht : is_full t) :
is_full (mirror t) :=
begin
induction ht,
case is_full.empty {
-- rw mirror,
exact is_full.empty,
},
case is_full.node : a l r hl hr hiff ih_l ih_r {
rewrite mirror,
apply is_full.node,
{ exact ih_r },
{ exact ih_l },
{
-- simp [mirror_eq_empty_iff],
sorry,
},
},
end
-- proof by structural induction on the tree t:
lemma is_full_mirror₂ {α : Type} :
∀ t : btree α, is_full t → is_full (mirror t)
| btree.empty := id
-- begin
-- assume ht : is_full btree.empty, <=> intro ht,
-- exact ht,
-- <=>
-- exact id, -- <=> exact (λ ht, ht),
-- end
| (btree.node a l r) :=
begin
intro ht,
cases ht with _ _ _ hl hr hiff,
rewrite mirror,
-- α : Type,
-- is_full_mirror₂ : ∀ (t : btree α), is_full t → is_full (mirror t),
-- a : α,
-- l r : btree α,
-- hl hr : is_full l,
-- hiff : l = btree.empty ↔ r = btree.empty
-- ⊢ is_full (btree.node a (mirror r) (mirror l))
apply is_full.node,
{
apply is_full_mirror₂, -- backward proof: goal changed from "is_full (mirror t)" to "is_full t"
exact hr,
},
{ exact is_full_mirror₂ _ hl }, -- same proof as in previous goal
{
-- simp [mirror_eq_empty_iff, *],
sorry,
},
end
example : ¬ is_empty (singleton a) :=
begin
-- assume h : is_empty (singleton a),
-- type_check is_empty.empty,
-- cases (singleton a) with a left right,
-- induction a with a left right,
by_contra h₁,
have h₂ := @is_empty.empty α,
-- type_check @is_empty α,
-- prove with using axiom `is_not_empty.def`
sorry,
end
lemma singleton_is_not_empty : ¬ is_empty (singleton a) := by {
rw ←is_not_empty.def,
exact is_not_empty.node a,
}
#print singleton_is_not_empty
lemma empty_is_empty: is_empty (@btree.empty α) := is_empty.empty
end from_book
-- When is_full function is undefined, proof of 1 = 1 using
-- is_full_singleton lemma works:
-- example : 1 = 1 :=
-- begin
-- have h := is_full_singleton btree.empty _,
-- refl,
-- exact nat,
-- end
end binary_trees
|
{"author": "mathprocessing", "repo": "lean_mathlib_examples", "sha": "743c6456c0a3219dd1722efdd31ee6f3a113818a", "save_path": "github-repos/lean/mathprocessing-lean_mathlib_examples", "path": "github-repos/lean/mathprocessing-lean_mathlib_examples/lean_mathlib_examples-743c6456c0a3219dd1722efdd31ee6f3a113818a/src/full_binary_trees.lean"}
|
#!/usr/bin/env python3
"""
.. module:: classify
:platform: Unix
:synopsis: Classify output from MothNet model.
.. moduleauthor:: Adam P. Jones <ajones173@gmail.com>
"""
from sklearn.metrics import confusion_matrix, roc_curve, auc
import numpy as _np
from scipy import interp as _interp
def roc_multi(true_classes, likelihoods):
"""
Measure ROC AUC for multi-class classifiers.
Params:
true_classes (numpy array): class labels [observations,]
likelihoods (numpy array): predicted likelihoods [observations x classes]
Returns:
output (dict):
- targets (numpy array): one-hot-encoded target labels
- roc_auc (dict): ROC curve and ROC area for each class
- fpr (dict): false-positive rate for each class
- tpr (dict): true-positive rate for each class
>>> roc_dict = roc_multi(true_classes, likelihoods)
"""
n_classes = len(set(true_classes))
# one-hot-encode target labels
targets = _np.eye(n_classes)[true_classes.astype(int)]
# compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in set(true_classes.astype(int)):
fpr[i], tpr[i], _ = roc_curve(targets[:,i], likelihoods[:,i])
roc_auc[i] = auc(fpr[i], tpr[i])
# compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(targets.ravel(), likelihoods.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
## compute macro-average ROC curve and ROC area
# first aggregate all false positive rates
all_fpr = _np.unique(_np.concatenate([fpr[i] for i in range(n_classes)]))
# then interpolate all ROC curves at this points
mean_tpr = _np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += _interp(all_fpr, fpr[i], tpr[i])
# finally, average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
output = dict()
output['targets'] = targets
output['roc_auc'] = roc_auc
output['fpr'] = fpr
output['tpr'] = tpr
return output
def classify_digits_log_likelihood(results):
"""
Classify the test digits in a run using log likelihoods from the various EN responses.
Steps:
#. for each test digit (ignore non-postTrain digits), for each EN, calculate \
the number of stds the test digit is from each class distribution. This makes \
a 10 x 10 matrix where each row corresponds to an EN, and each column corresponds \
to a class.
#. Square this matrix by entry. Sum the columns. Select the col with the lowest \
value as the predicted class. Return the vector of sums in 'likelihoods'.
#. The rest is simple calculation.
Args:
results (dict): output from :func:`simulate`. i'th entry gives results for all \
classes, in the _i_th EN.
Returns:
output (dict):
- true_classes (numpy array): shortened version of whichOdor (with only \
post-training, ie validation, entries)
- targets (numpy array): one-hot-encoded target labels
- roc_auc (dict): ROC curve and ROC area for each class
- fpr (dict): false-positive rate for each class
- tpr (dict): true-positive rate for each class
- pred_classes (numpy array): predicted classes
- likelihoods (numpy array): [n x 10] each row a post_training digit \
(entries are summed log likelihoods)
- acc_perc (numpy array): [n x 10] class accuracies as percentages
- total_acc (float): overall accuracy as percentage
- conf_mat (numpy array): i,j'th entry is number of test digits with true \
label i that were predicted to be j
>>> classify_digits_log_likelihood( dummy_results )
"""
n_en = len(results) # number of ENs, same as number of classes
pre_train_inds = _np.nonzero(results[1]['post_train_resp'] >= 0)[0] # indices of post-train (ie validation) digits
# TO DO: Why use 2 (1, here) as index above? Ask CBD
n_post = len(pre_train_inds) # number of post-train digits
# extract true classes (digits may be referred to as odors or 'odor puffs'):
true_classes = results[0]['odor_class'][pre_train_inds]
# TO DO: Why use 1 (0, here) as index above? Ask CBD
# extract the relevant odor puffs: Each row is an EN, each col is an odor puff
post_train_resp = _np.full((n_en,n_post), _np.nan)
for i,resp in enumerate(results):
post_train_resp[i,:] = resp['post_train_resp'][pre_train_inds]
# make a matrix of mean Class Resps and stds. Each row is an EN, each col is a class:
mu = _np.full((n_en,n_en), _np.nan)
sig = _np.full((n_en,n_en), _np.nan)
for i,resp in enumerate(results):
mu[i,:] = resp['post_mean_resp']
sig[i,:] = resp['post_std_resp']
# for each EN:
# get the likelihood of each puff (ie each col of post_train_resp)
likelihoods = _np.zeros((n_post,n_en))
for i in range(n_post):
# Caution: post_train_resp[:,i] becomes a row vector, but we need it to stay as a
# col vector so we can make 10 identical columns. So transpose it back with [_np.newaxis]
a = post_train_resp[:,i][_np.newaxis]
dist = ( _np.tile( a.T, ( 1, 10 )) - mu) / sig # 10 x 10 matrix
# The ith row, jth col entry is the mahalanobis distance of this test
# digit's response from the i'th ENs response to the j'th class.
# For example, the diagonal contains the mahalanobis distance of this
# digit's response to each EN's home-class response.
likelihoods[i,:] = _np.sum(dist**4, axis=0) # the ^4 (instead of ^2) is a sharpener
# make predictions:
pred_classes = _np.argmin(likelihoods, axis=1)
# calc accuracy percentages:
class_acc = _np.zeros(n_en)
for i in range(n_en):
class_acc[i] = (100*_np.logical_and(pred_classes == i, true_classes == i).sum())/(true_classes == i).sum()
total_acc = (100*(pred_classes == true_classes).sum())/len(true_classes)
# calc confusion matrix:
# i,j'th entry is number of test digits with true label i that were predicted to be j
confusion = confusion_matrix(true_classes, pred_classes)
# measure ROC AUC for each class
roc_dict = roc_multi(true_classes, likelihoods*-1)
return {
'true_classes':true_classes,
'targets':roc_dict['targets'],
'roc_auc':roc_dict['roc_auc'],
'fpr':roc_dict['fpr'],
'tpr':roc_dict['tpr'],
'pred_classes':pred_classes,
'likelihoods':likelihoods,
'acc_perc':class_acc,
'total_acc':total_acc,
'conf_mat':confusion,
}
def classify_digits_thresholding(results, home_advantage, home_thresh_sigmas, above_home_thresh_reward):
"""
Classify the test digits using log likelihoods from the various EN responses, \
with the added option of rewarding high scores relative to an ENs home-class \
expected response distribution.
One use of this function is to apply de-facto thresholding on discrete ENs, \
so that the predicted class corresponds to the EN that spiked most strongly \
(relative to its usual home-class response).
Steps:
#. For each test digit (ignore non-postTrain digits), for each EN, calculate \
the # stds from the test digit is from each class distribution. This makes \
a 10 x 10 matrix where each row corresponds to an EN, and each column \
corresponds to a class.
#. Square this matrix by entry. Sum the columns. Select the col with the \
lowest value as the predicted class. Return the vector of sums in 'likelihoods'.
#. The rest is simple calculation.
Args:
results (dict): [1 x 10] dict produced by :func:`collect_stats`.
home_advantage (int): the emphasis given to the home EN. It multiplies the \
off-diagonal of dist. 1 -> no advantage (default). Very high means that a \
test digit will be classified according to the home EN it does best in, \
ie each EN acts on its own.
home_thresh_sigmas (int): the number of stds below an EN's home-class mean \
that we set a threshold, such that if a digit scores above this threshold \
in an EN, that EN will be rewarded by 'above_home_thresh_reward'.
above_home_thresh_reward (int): if a digit's response scores above the EN's \
mean home-class value, reward it by dividing by this value. This reduces \
the log likelihood score for that EN.
Returns:
output (dict):
- true_classes (numpy array): shortened version of whichOdor (with only \
- post-training, ie validation, entries)
- targets (numpy array): one-hot-encoded target labels
- roc_auc (dict): ROC curve and ROC area for each class
- fpr (dict): false-positive rate for each class
- tpr (dict): true-positive rate for each class
- pred_classes (numpy array): predicted classes
- likelihoods (numpy array): [n x 10] each row a post_training digit \
(entries are summed log likelihoods)
- acc_perc (numpy array): [n x 10] class accuracies as percentages
- total_acc (float): overall accuracy as percentage
- conf_mat (numpy array): i,j'th entry is number of test digits with true \
label i that were predicted to be j
- home_advantage (int): the emphasis given to the home EN. It multiplies the \
off-diagonal of dist. 1 -> no advantage (default). Very high means that a \
test digit will be classified according to the home EN it does best in, \
ie each EN acts on its own.
- home_thresh_sigmas (int): the number of stds below an EN's home-class mean \
that we set a threshold, such that if a digit scores above this threshold \
in an EN, that EN will be rewarded by 'above_home_thresh_reward'.
>>> classify_digits_thresholding( dummy_results )
"""
n_en = len(results) # number of ENs, same as number of classes
pre_train_inds = _np.nonzero(results[1]['post_train_resp'] >= 0)[0] # indices of post-train (ie validation) digits
# DEV NOTE: Why use 2 (1, in Python) as index above? Ask CBD
n_post = len(pre_train_inds) # number of post-train digits
# extract true classes:
true_classes = results[0]['odor_class'][pre_train_inds] # throughout, digits may be referred to as odors or 'odor puffs'
# DEV NOTE: Why use 1 (0, in Python) as index above? Ask CBD
# extract the relevant odor puffs: Each row is an EN, each col is an odor puff
post_train_resp = _np.full((n_en,n_post), _np.nan)
for i,resp in enumerate(results):
post_train_resp[i,:] = resp['post_train_resp'][pre_train_inds]
# make a matrix of mean Class Resps and stds. Each row is an EN, each col is a class.
# For example, the i'th row, j'th col entry of 'mu' is the mean of the i'th
# EN in response to digits from the j'th class; the diagonal contains the
# responses to the home-class.
mu = _np.full((n_en,n_en), _np.nan)
sig = _np.full((n_en,n_en), _np.nan)
for i,resp in enumerate(results):
mu[i,:] = resp['post_mean_resp']
sig[i,:] = resp['post_std_resp']
# for each EN:
# get the likelihood of each puff (ie each col of post_train_resp)
likelihoods = _np.zeros((n_post,n_en))
for i in range(n_post):
dist = (_np.tile(post_train_resp[:,i],(10,1)) - mu) / sig # 10 x 10 matrix
# The ith row, jth col entry is the mahalanobis distance of this test
# digit's response from the i'th ENs response to the j'th class.
# For example, the diagonal contains the mahalanobis distance of this
# digit's response to each EN's home-class response.
# 1. Apply rewards for above-threshold responses:
off_diag = dist - _np.diag(_np.diag(dist))
on_diag = _np.diag(dist).copy()
# Reward any onDiags that are above some threshold (mu - n*sigma) of an EN.
# CAUTION: This reward-by-shrinking only works when off-diagonals are
# demolished by very high value of 'home_advantage'.
home_threshs = home_thresh_sigmas * _np.diag(sig)
# aboveThreshInds = _np.nonzero(on_diag > home_threshs)[0]
on_diag[on_diag > home_threshs] /= above_home_thresh_reward
on_diag = _np.diag(on_diag) # turn back into a matrix
# 2. Emphasize the home-class results by shrinking off-diagonal values.
# This makes the off-diagonals less important in the final likelihood sum.
# This is shrinkage for a different purpose than in the lines above.
dist = (off_diag / home_advantage) + on_diag
likelihoods[i,:] = _np.sum(dist**4, axis=0) # the ^4 (instead of ^2) is a sharpener
# In pure thresholding case (ie off-diagonals ~ 0), this does not matter.
# make predictions:
pred_classes = _np.argmin(likelihoods, axis=1)
# for i in range(n_post):
# pred_classes[i] = find(likelihoods(i,:) == min(likelihoods(i,:) ) )
# calc accuracy percentages:
class_acc = _np.zeros(n_en)
for i in range(n_en):
class_acc[i] = (100*_np.logical_and(pred_classes == i, true_classes == i).sum())/(true_classes == i).sum()
total_acc = (100*(pred_classes == true_classes).sum())/len(true_classes)
# confusion matrix:
# i,j'th entry is number of test digits with true label i that were predicted to be j
confusion = confusion_matrix(true_classes, pred_classes)
# measure ROC AUC for each class
roc_dict = roc_multi(true_classes, likelihoods)
return {
'true_classes':true_classes,
'targets':roc_dict['targets'],
'roc_auc':roc_dict['roc_auc'],
'fpr':roc_dict['fpr'],
'tpr':roc_dict['tpr'],
'pred_classes':pred_classes,
'likelihoods':likelihoods,
'acc_perc':class_acc,
'total_acc':total_acc,
'conf_mat':confusion,
'home_advantage':home_advantage,
'home_thresh_sigmas':home_thresh_sigmas,
}
# MIT license:
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial
# portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
{"hexsha": "15784d7373b16aa2c1485f4edb7f61ecf6304b39", "size": 14127, "ext": "py", "lang": "Python", "max_stars_repo_path": "pymoth/modules/classify.py", "max_stars_repo_name": "meccaLeccaHi/pymoth", "max_stars_repo_head_hexsha": "92189887e316e0d17426952625dd33746d2657cf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2019-08-19T15:47:40.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-01T03:47:52.000Z", "max_issues_repo_path": "pymoth/modules/classify.py", "max_issues_repo_name": "meccaLeccaHi/pyMoth", "max_issues_repo_head_hexsha": "92189887e316e0d17426952625dd33746d2657cf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2020-01-28T22:55:43.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-11T23:55:59.000Z", "max_forks_repo_path": "pymoth/modules/classify.py", "max_forks_repo_name": "meccaLeccaHi/pyMoth", "max_forks_repo_head_hexsha": "92189887e316e0d17426952625dd33746d2657cf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-12-10T04:21:29.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-20T15:43:32.000Z", "avg_line_length": 41.7958579882, "max_line_length": 121, "alphanum_fraction": 0.722729525, "include": true, "reason": "import numpy,from scipy", "num_tokens": 3852}
|
/*!
@file
Defines `boost::hana::Traversable::traverse_mcd`.
@copyright Louis Dionne 2014
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)
*/
#ifndef BOOST_HANA_TRAVERSABLE_TRAVERSE_MCD_HPP
#define BOOST_HANA_TRAVERSABLE_TRAVERSE_MCD_HPP
#include <boost/hana/traversable/traversable.hpp>
namespace boost { namespace hana {
//! Minimal complete definition: `traverse`
struct Traversable::traverse_mcd {
template <typename A, typename T>
static constexpr auto sequence_impl(T traversable)
{ return traverse<A>([](auto x) { return x; }, traversable); }
};
}} // end namespace boost::hana
#endif // !BOOST_HANA_TRAVERSABLE_TRAVERSE_MCD_HPP
|
{"hexsha": "97ef9a2578693e7eedef4f6cf6dcd7ad2fd5cd9c", "size": 767, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/boost/hana/traversable/traverse_mcd.hpp", "max_stars_repo_name": "rbock/hana", "max_stars_repo_head_hexsha": "2b76377f91a5ebe037dea444e4eaabba6498d3a8", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2015-05-07T14:29:13.000Z", "max_stars_repo_stars_event_max_datetime": "2015-07-04T10:59:46.000Z", "max_issues_repo_path": "include/boost/hana/traversable/traverse_mcd.hpp", "max_issues_repo_name": "rbock/hana", "max_issues_repo_head_hexsha": "2b76377f91a5ebe037dea444e4eaabba6498d3a8", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/boost/hana/traversable/traverse_mcd.hpp", "max_forks_repo_name": "rbock/hana", "max_forks_repo_head_hexsha": "2b76377f91a5ebe037dea444e4eaabba6498d3a8", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.5, "max_line_length": 78, "alphanum_fraction": 0.7431551499, "num_tokens": 189}
|
"""
=============
Interpolation
=============
Provides interpolation algorithms across tabular data for ``vivarium``
simulations.
"""
import pandas as pd
import numpy as np
from typing import Union, List, Tuple
ParameterType = Union[List[List[str]], List[Tuple[str, str, str]]]
class Interpolation:
"""A callable that returns the result of an interpolation function over input data.
Attributes
----------
data :
The data from which to build the interpolation. Contains
cateogrical_parameters and continuous_parameters.
categorical_parameters :
Column names to be used as categorical parameters in Interpolation
to select between interpolation functions.
continuous_parameters :
Column names to be used as continuous parameters in Interpolation. If
bin edges, should be of the form (column name used in call, column name
for left bin edge, column name for right bin edge).
order :
Order of interpolation.
"""
def __init__(self, data: pd.DataFrame, categorical_parameters: Union[List[str], Tuple[str]],
continuous_parameters: ParameterType, order: int, extrapolate: bool, validate: bool):
# TODO: allow for order 1 interpolation with binned edges
if order != 0:
raise NotImplementedError(f'Interpolation is only supported for order 0. You specified order {order}')
if validate:
validate_parameters(data, categorical_parameters, continuous_parameters)
self.key_columns = categorical_parameters
self.data = data.copy()
self.parameter_columns = continuous_parameters
self.value_columns = self.data.columns.difference(set(self.key_columns)
| set([col for p in self.parameter_columns for col in p]))
self.order = order
self.extrapolate = extrapolate
self.validate = validate
if self.key_columns:
# Since there are key_columns we need to group the table by those
# columns to get the sub-tables to fit
sub_tables = self.data.groupby(list(self.key_columns))
else:
# There are no key columns so we will fit the whole table
sub_tables = {None: self.data}.items()
self.interpolations = {}
for key, base_table in sub_tables:
if base_table.empty: # if one of the key columns is a category and not all values are present in data
continue
# since order 0, we can interpolate all values at once
self.interpolations[key] = Order0Interp(base_table, self.parameter_columns,
self.value_columns, self.extrapolate, self.validate)
def __call__(self, interpolants: pd.DataFrame) -> pd.DataFrame:
"""Get the interpolated results for the parameters in interpolants.
Parameters
----------
interpolants :
Data frame containing the parameters to interpolate..
Returns
-------
pd.DataFrame
A table with the interpolated values for the given interpolants.
"""
if self.validate:
validate_call_data(interpolants, self.key_columns, self.parameter_columns)
if self.key_columns:
sub_tables = interpolants.groupby(list(self.key_columns))
else:
sub_tables = [(None, interpolants)]
# specify some numeric type for columns so they won't be objects but will updated with whatever
# column type actually is
result = pd.DataFrame(index=interpolants.index, columns=self.value_columns, dtype=np.float64)
for key, sub_table in sub_tables:
if sub_table.empty:
continue
df = self.interpolations[key](sub_table)
result.loc[sub_table.index, self.value_columns] = df.loc[sub_table.index, self.value_columns]
return result
def __repr__(self):
return "Interpolation()"
def validate_parameters(data, categorical_parameters, continuous_parameters):
if data.empty:
raise ValueError("You must supply non-empty data to create the interpolation.")
if len(continuous_parameters) < 1:
raise ValueError("You must supply at least one continuous parameter over which to interpolate.")
for p in continuous_parameters:
if not isinstance(p, (List, Tuple)) or len(p) != 3:
raise ValueError(f'Interpolation is only supported for binned data. You must specify a list or tuple '
f'containing, in order, the column name used when interpolation is called, '
f'the column name for the left edge (inclusive), and the column name for '
f'the right edge (exclusive). You provided {p}.')
# break out the individual columns from binned column name lists
param_cols = [col for p in continuous_parameters for col in p]
# These are the columns which the interpolation function will approximate
value_columns = sorted(data.columns.difference(set(categorical_parameters) | set(param_cols)))
if not value_columns:
raise ValueError(f"No non-parameter data. Available columns: {data.columns}, "
f"Parameter columns: {set(categorical_parameters)|set(continuous_parameters)}")
return value_columns
def validate_call_data(data, key_columns, parameter_columns):
if not isinstance(data, pd.DataFrame):
raise TypeError(f'Interpolations can only be called on pandas.DataFrames. You'
f'passed {type(data)}.')
callable_param_cols = [p[0] for p in parameter_columns]
if not set(callable_param_cols) <= set(data.columns.values.tolist()):
raise ValueError(f'The continuous parameter columns with which you built the Interpolation must all '
f'be present in the data you call it on. The Interpolation has key '
f'columns: {callable_param_cols} and your data has columns: '
f'{data.columns.values.tolist()}')
if key_columns and not set(key_columns) <= set(data.columns.values.tolist()):
raise ValueError(f'The key (categorical) columns with which you built the Interpolation must all'
f'be present in the data you call it on. The Interpolation has key'
f'columns: {key_columns} and your data has columns: '
f'{data.columns.values.tolist()}')
def check_data_complete(data, parameter_columns):
""" For any parameters specified with edges, make sure edges
don't overlap and don't have any gaps. Assumes that edges are
specified with ends and starts overlapping (but one exclusive and
the other inclusive) so can check that end of previous == start
of current.
If multiple parameters, make sure all combinations of parameters
are present in data.
Requires that bins of each parameter be standard across all values
of other parameters, i.e., all bins for one parameter when de-duplicated
should cover a continuous range of that parameter with no overlaps or gaps
and the range covered should be the same for all combinations of other
parameter values.
"""
param_edges = [p[1:] for p in parameter_columns if isinstance(p, (Tuple, List))] # strip out call column name
# check no overlaps/gaps
for p in param_edges:
other_params = [p_ed[0] for p_ed in param_edges if p_ed != p]
if other_params:
sub_tables = data.groupby(list(other_params))
else:
sub_tables = {None: data}.items()
n_p_total = len(set(data[p[0]]))
for _, table in sub_tables:
param_data = table[[p[0], p[1]]].copy().sort_values(by=p[0])
start, end = param_data[p[0]].reset_index(drop=True), param_data[p[1]].reset_index(drop=True)
if len(set(start)) < n_p_total:
raise ValueError(f'You must provide a value for every combination of {parameter_columns}.')
if len(start) <= 1:
continue
for i in range(1, len(start)):
e = end[i-1]
s = start[i]
if e > s or s == start[i-1]:
raise ValueError(f'Parameter data must not contain overlaps. Parameter {p} '
f'contains overlapping data.')
if e < s:
raise NotImplementedError(f'Interpolation only supported for parameter columns '
f'with continuous bins. Parameter {p} contains '
f'non-continuous bins.')
class Order0Interp:
"""A callable that returns the result of order 0 interpolation over input data.
Attributes
----------
data :
The data from which to build the interpolation. Contains
categorical_parameters and continuous_parameters.
parameter_columns :
Column names to be used as parameters in Interpolation.
"""
def __init__(self, data, parameter_columns: ParameterType, value_columns: List[str], extrapolate: bool,
validate: bool):
"""
Parameters
----------
data :
Data frame used to build interpolation.
parameter_columns :
Parameter columns. Should be of form (column name used in call,
column name for left bin edge, column name for right bin edge)
or column name. Assumes left bin edges are inclusive and
right exclusive.
extrapolate :
Whether or not to extrapolate beyond the edge of supplied bins.
"""
if validate:
check_data_complete(data, parameter_columns)
self.data = data.copy()
self.value_columns = value_columns
self.extrapolate = extrapolate
# (column name used in call, col name for left edge, col name for right):
# [ordered left edges of bins], max right edge (used when extrapolation not allowed)
self.parameter_bins = {}
for p in parameter_columns:
left_edge = self.data[p[1]].drop_duplicates().sort_values()
max_right = self.data[p[2]].drop_duplicates().max()
self.parameter_bins[tuple(p)] = {'bins': left_edge.reset_index(drop=True), 'max': max_right}
def __call__(self, interpolants: pd.DataFrame) -> pd.DataFrame:
"""Find the bins for each parameter for each interpolant in interpolants
and return the values from data there.
Parameters
----------
interpolants:
Data frame containing the parameters to interpolate..
Returns
-------
pd.DataFrame
A table with the interpolated values for the given interpolants.
"""
# build a dataframe where we have the start of each parameter bin for each interpolant
interpolant_bins = pd.DataFrame(index=interpolants.index)
merge_cols = []
for cols, d in self.parameter_bins.items():
bins = d['bins']
max_right = d['max']
merge_cols.append(cols[1])
interpolant_col = interpolants[cols[0]]
if not self.extrapolate and (interpolant_col.min() < bins[0] or interpolant_col.max() >= max_right):
raise ValueError(f'Extrapolation outside of bins used to set up interpolation is only allowed '
f'when explicitly set in creation of Interpolation. Extrapolation is currently '
f'off for this interpolation, and parameter {cols[0]} includes data outside of '
f'original bins.')
bin_indices = np.digitize(interpolant_col, bins.tolist())
# digitize uses 0 to indicate < min and len(bins) for > max so adjust to actual indices into bin_indices
bin_indices[bin_indices > 0] -= 1
interpolant_bins[cols[1]] = bins.loc[bin_indices].values
index = interpolant_bins.index
interp_vals = interpolant_bins.merge(self.data, how='left', on=merge_cols).set_index(index)
return interp_vals[self.value_columns]
|
{"hexsha": "90cafbacc7ae8ca19eff0bd0a99e5e52037dee75", "size": 12461, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/vivarium/interpolation.py", "max_stars_repo_name": "ihmeuw/vivarium", "max_stars_repo_head_hexsha": "77393d2e84ff2351c926f65b33272b7225cf9628", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 41, "max_stars_repo_stars_event_min_datetime": "2017-07-14T03:39:06.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-20T05:36:33.000Z", "max_issues_repo_path": "src/vivarium/interpolation.py", "max_issues_repo_name": "ihmeuw/vivarium", "max_issues_repo_head_hexsha": "77393d2e84ff2351c926f65b33272b7225cf9628", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 26, "max_issues_repo_issues_event_min_datetime": "2017-08-08T22:13:44.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-18T00:14:54.000Z", "max_forks_repo_path": "src/vivarium/interpolation.py", "max_forks_repo_name": "ihmeuw/vivarium", "max_forks_repo_head_hexsha": "77393d2e84ff2351c926f65b33272b7225cf9628", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2017-08-03T17:15:39.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-30T21:57:50.000Z", "avg_line_length": 43.2673611111, "max_line_length": 116, "alphanum_fraction": 0.6270764786, "include": true, "reason": "import numpy", "num_tokens": 2468}
|
import abc
import os
from enum import Enum
from typing import Optional, Union, TypeVar, List, Tuple, Callable, Generator
import gym
import numpy as np
from gym import spaces
import sap.battle as battle
import sap.game as game
import sap.pet as pet
import sap.pet_impl as pet_impl
import sap.player as player
import sap.shop as shop
from abc import ABC, abstractmethod
ActionSpaceDimension = Tuple[int, int, int]
def get_action_mask(g: game.Game):
# From what I can tell from reading the test environment
# https://github.com/Stable-Baselines-Team/stable-baselines3-contrib/blob/3b007ae93b6177a4ee712f9f1af5dc1183b0abcb/sb3_contrib/common/envs/invalid_actions_env.py#L62-L76
# The action mask for a multidiscrete space is a flat list, where each slot corresponds to not a different action in
# the space, but a different dimension. So e.g. if you had dimensions [a,b] it'd be
# [x_0, x_1, ..., x_{a-1}, y_0, ..., y_{b-1}], with x_i = False means that you can't have an action with a=i.
# This means we can't e.g. enumerate all legal moves, but we can at least block out moves that can't ever work,
# which is nice!
p1 = g.player_1
actions_type_mask = [action.validator.apply(p1) for action in Action]
# used for shop, and for moving a pet, so need to check for both
max_shop_index = max(len(p1.shop.pets), len(p1.shop.food))
source_index_mask = [index < max_shop_index or (index < len(p1.pets) and p1.pets[index] is not None)
for index in range(max(shop.MAX_PETS, shop.MAX_FOOD))]
pet_index_mask = [True] * player.MAX_PETS
action_mask = actions_type_mask + source_index_mask + pet_index_mask
return action_mask
class ActionValidator(ABC):
@abstractmethod
def apply(self, p1: player.Player) -> bool:
raise NotImplementedError()
class RerollValidator(ActionValidator):
def apply(self, p1: player.Player) -> bool:
return p1.can_reroll()
class BuyAndPlacePetValidator(ActionValidator):
def apply(self, p1: player.Player) -> bool:
return p1.can_buy_pet() and p1.num_pets() < player.MAX_PETS and len(p1.shop.pets) > 0
class BuyAndCombinePetValidator(ActionValidator):
def apply(self, p1: player.Player) -> bool:
return p1.can_buy_pet() and p1.num_pets() > 0 and len(p1.shop.pets) > 0
class BuyFoodForPetValidator(ActionValidator):
def apply(self, p1: player.Player) -> bool:
if p1.shop.food and p1.num_pets():
min_food_cost = min(food_item.food.cost for food_item in p1.shop.food)
return p1.gold >= min_food_cost
return False
class ToggleFreezePetValidator(ActionValidator):
def apply(self, p1: player.Player) -> bool:
return len(p1.shop.pets) > 0
class ToggleFreezeFoodValidator(ActionValidator):
def apply(self, p1: player.Player) -> bool:
return len(p1.shop.food) > 0
class SellPetValidator(ActionValidator):
def apply(self, p1: player.Player) -> bool:
return p1.num_pets() > 0
class MovePetValidator(ActionValidator):
def apply(self, p1: player.Player) -> bool:
return p1.num_pets() > 1
class EndTurnValidator(ActionValidator):
def apply(self, p1: player.Player) -> bool:
return True
class Action(Enum):
REROLL = (0, RerollValidator())
BUY_AND_PLACE_PET = (1, BuyAndPlacePetValidator())
BUY_AND_COMBINE_PET = (2, BuyAndCombinePetValidator())
BUY_FOOD_FOR_PET = (3, BuyFoodForPetValidator())
TOGGLE_FREEZE_PET = (4, ToggleFreezePetValidator())
TOGGLE_FREEZE_FOOD = (5, ToggleFreezeFoodValidator())
SELL_PET = (6, SellPetValidator())
MOVE_PET = (7, MovePetValidator())
END_TURN = (8, EndTurnValidator())
def __init__(self, action_value: int, validator: ActionValidator):
self.action_value = action_value
self.validator = validator
@staticmethod
def get_action(val: int):
for action in Action:
if action.action_value == val:
return action
raise ValueError("Could not find action for value", val)
class EnvironmentPlayer(player.Player):
def __init__(self, given_shop: shop.Shop):
super().__init__("Environment Player", given_shop)
def buy_phase(self):
"""Do nothing, do this in the actual algo"""
pass
T = TypeVar("T")
O = TypeVar("O")
def observation_list(l: List[T],
empty_gen: Callable[[], O],
mapper: Callable[[Union[T, Optional[T]]], O],
size: int) -> List[O]:
return [mapper(elem) for elem in l] + [empty_gen() for _ in range(size - len(l))]
def food_space():
return spaces.MultiDiscrete([
len(pet_impl.ID_TO_FOOD_INFO), # id
pet.MAX_POWER + 1, # power
pet.MAX_TOUGHNESS + 1, # toughness
])
def empty_food_observation() -> List[int]:
return [
0, # id
0, # power
0, # toughness
]
def food_observation(food: Optional[pet.Food]) -> List[int]:
if food is None:
return empty_food_observation()
return [
pet_impl.FOOD_TYPE_TO_ID[type(food)],
food.power,
food.toughness
]
# TODO: consider changing this from ID to something that represents the pet?
def pet_space():
return spaces.Tuple((
spaces.Discrete(len(pet_impl.ID_TO_PET_INFO)), # id
spaces.Discrete(pet.MAX_POWER + 1), # power
spaces.Discrete(pet.MAX_TOUGHNESS + 1), # toughness
spaces.Discrete(pet.MAX_POWER + 1), # temp_buff_power
spaces.Discrete(pet.MAX_TOUGHNESS + 1), # temp_buff_tougness
food_space(), # equipped_food
))
def pet_observation(observed_pet: pet.Pet) -> Tuple:
return (
pet_impl.PET_TYPE_TO_ID[type(observed_pet)],
observed_pet.power,
observed_pet.toughness,
observed_pet.temp_buff_power,
observed_pet.temp_buff_toughness,
food_observation(observed_pet.equipped_food)
)
def empty_pet_observation() -> Tuple:
return (
0, # id
0, # power
0, # toughness
0, # temp_buff_power
0, # temp_buff_toughness
empty_food_observation() # equipped_food
)
# TODO: add battles after we have action masks
def player_space() -> spaces.Space:
return spaces.Dict({
'pets': spaces.Tuple(tuple(pet_space() for _ in range(player.MAX_PETS))),
'gold': spaces.Discrete(100),
'lives': spaces.Discrete(100),
'wins': spaces.Discrete(11),
'won_last': spaces.Discrete(2),
'shop_food': spaces.Tuple((food_space(), food_space())),
'shop_frozen_food': spaces.MultiBinary(shop.MAX_FOOD),
'shop_pets': spaces.Tuple(tuple(pet_space() for _ in range(shop.MAX_PETS))),
'shop_frozen_pets': spaces.MultiBinary(shop.MAX_PETS),
'other_team': spaces.Tuple(tuple(pet_space() for _ in range(player.MAX_PETS))),
})
def player_observation(observed_game: game.Game):
observed_player = observed_game.player_1
observed_shop = observed_player.shop
return {
'pets': observation_list(observed_player.pets, empty_pet_observation, pet_observation, player.MAX_PETS),
'gold': observed_player.gold,
'lives': observed_player.lives,
'wins': observed_player.wins,
'won_last': 1 if observed_player.won_last else 0,
'shop_food': observation_list(observed_shop.food, empty_food_observation,
lambda item: food_observation(item.food), shop.MAX_FOOD),
'shop_frozen_food': np.array(observation_list(observed_shop.food, lambda: False, lambda item: item.frozen,
shop.MAX_FOOD)),
'shop_pets': observation_list(observed_shop.pets, empty_pet_observation, lambda item: pet_observation(item.pet),
shop.MAX_PETS),
'shop_frozen_pets': np.array(observation_list(observed_shop.pets, lambda: False, lambda item: item.frozen,
shop.MAX_PETS)),
'other_team': observation_list(observed_game.player_2.pets, empty_pet_observation, pet_observation,
player.MAX_PETS)
}
class SapRandomVersusEnv0(gym.Env):
"""Custom environment for having Super Auto Pets run in RL"""
metadata = {'render.modes': ['human']}
def __init__(self):
super(SapRandomVersusEnv0, self).__init__()
self.action_space_dimension = (
len(Action), # action value
max(shop.MAX_PETS, shop.MAX_FOOD, player.MAX_PETS), # source index, used for shop (pets + food) and moving
player.MAX_PETS, # index of pet on my team
)
self.action_space = spaces.MultiDiscrete(self.action_space_dimension)
self.real_observation_space = player_space()
self.observation_space = spaces.flatten_space(self.real_observation_space)
self.game: Optional[game.Game] = None
self.actions_this_turn = 0
def step(self, action: Tuple[int, int, int]):
reward = 0
action_val, shop_index, pet_index = action
action_enum: Action = Action.get_action(val=action_val)
self.actions_this_turn += 1
p1 = self.game.player_1
try:
if action_enum is Action.END_TURN or self.actions_this_turn > 100:
p1.end_turn()
self.game.player_2.perform_buys(self.game.round)
result = self.game.battle_phase()
if result is battle.Result.TEAM_1_WINS:
# TODO: make this the number of points instead, but this works for now
reward += 10 * self.game.player_1.wins
self.game.start_round()
reward += player.STARTING_GOLD - self.game.player_1.gold # reward for gold spent
p1.start_turn(self.game.round)
self.actions_this_turn = 0
elif action_enum is Action.REROLL:
p1.reroll()
elif action_enum is Action.BUY_AND_PLACE_PET:
p1.buy_and_place_pet(shop_index, pet_index)
reward += 1
elif action_enum is Action.BUY_AND_COMBINE_PET:
p1.buy_and_combine_pet(shop_index, pet_index)
reward += 1
elif action_enum is Action.BUY_FOOD_FOR_PET:
p1.buy_and_apply_food(shop_index, pet_index)
reward += 1
elif action_enum is Action.TOGGLE_FREEZE_PET:
p1.shop.toggle_freeze_pet(shop_index)
elif action_enum is Action.TOGGLE_FREEZE_FOOD:
p1.shop.toggle_freeze_food(shop_index)
elif action_enum is Action.SELL_PET:
p1.sell(pet_index)
elif action_enum is Action.MOVE_PET:
p1.move(shop_index, pet_index)
except (ValueError, IndexError):
pass # ignore invalid actions
done = (not self.game.player_1.has_lives()) or self.game.player_1.wins == 10
info = player_observation(self.game)
observation = spaces.flatten(self.real_observation_space, player_observation(self.game))
return observation, reward, done, info
def action_masks(self) -> List[bool]:
return get_action_mask(self.game)
def reset(self):
self.game = game.Game(
EnvironmentPlayer(game.create_shop()),
game.create_random_player()
)
self.game.start_round()
self.game.player_1.start_turn(self.game.round)
return spaces.flatten(self.real_observation_space, player_observation(self.game))
def render(self, mode='human'):
print(self.game.player_1, self.game.player_2)
def close(self):
pass
if __name__ == "__main__":
# TODO add VecEnv
from stable_baselines3.common.env_checker import check_env
from sb3_contrib import MaskablePPO
from sb3_contrib.common.maskable.utils import get_action_masks
import time
start = time.time_ns()
env = SapRandomVersusEnv0()
check_env(env)
obs = env.reset()
model_path = "model_saves/ppo_sap_random_versus.zip"
if os.path.exists(model_path):
print("Loading model")
model = MaskablePPO.load(model_path, env)
else:
print("Making new model")
model = MaskablePPO("MlpPolicy", env, verbose=1)
seconds_to_train = 8 * 60 * 60
timesteps = int(seconds_to_train * 180) # rough approximation
print("Training for", timesteps)
model.learn(total_timesteps=timesteps)
model.save(model_path)
bot_wins = 0
runs = 0
done = False
while runs < 1000:
action_masks = get_action_masks(env)
action, _states = model.predict(obs, action_masks=action_masks)
obs, rewards, done, info = env.step(action)
if done:
if env.game.player_1.wins == 10:
bot_wins += 1
obs = env.reset()
print("Doing run", runs)
runs += 1
print("Against a random player, performance", bot_wins, runs)
print("Time taken", (time.time_ns() - start) * 1e-9)
print("Time taken per timestep", ((time.time_ns() - start) * 1e-9) / timesteps)
|
{"hexsha": "8be3a3d2e9e798420c01ddfebb12aac38961b8df", "size": 13239, "ext": "py", "lang": "Python", "max_stars_repo_path": "sap/envs/sap_random_versus_env.py", "max_stars_repo_name": "steveryb/super-auto-pets-ai", "max_stars_repo_head_hexsha": "f86e033473885e2c7a0312c598f1870bf8edce3c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sap/envs/sap_random_versus_env.py", "max_issues_repo_name": "steveryb/super-auto-pets-ai", "max_issues_repo_head_hexsha": "f86e033473885e2c7a0312c598f1870bf8edce3c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sap/envs/sap_random_versus_env.py", "max_forks_repo_name": "steveryb/super-auto-pets-ai", "max_forks_repo_head_hexsha": "f86e033473885e2c7a0312c598f1870bf8edce3c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.1721311475, "max_line_length": 173, "alphanum_fraction": 0.6463479115, "include": true, "reason": "import numpy", "num_tokens": 3266}
|
#EasyPlotQwt base types & core functions
#-------------------------------------------------------------------------------
#==Constants
===============================================================================#
#TODO: Support transparent, or *read* plot background color somehow.
const COLOR_BACKGROUND = EasyPlot.COLOR_WHITE
const scalemap = Dict{Symbol, String}(
:lin => "lin",
:log => "log",
)
const linestylemap = Dict{Symbol, String}(
:none => "",
:solid => "-",
:dash => "--",
:dot => ":",
:dashdot => "-.",
)
const markermap = Dict{Symbol, String}(
:none => "",
:square => "s",
:diamond => "d",
:uarrow => "^", :darrow => "v",
:larrow => "<", :rarrow => ">",
:cross => "+", :+ => "+",
:diagcross => "x", :x => "x",
:circle => "o", :o => "o",
:star => "*", :* => "*",
)
struct FlagType{T}; end
const NOTFOUND = FlagType{:NOTFOUND}()
#==Base types
===============================================================================#
const NullOr{T} = Union{Nothing, T} #Simpler than Nullable
mutable struct EPAxes{T} <: EasyPlot.AbstractAxes{T}
ref::Axes #Axes reference
theme::EasyPlot.Theme
eye::NullOr{EasyPlot.EyeAttributes}
end
EPAxes(style::Symbol, ref::Axes, theme::EasyPlot.Theme, eye=nothing) =
EPAxes{style}(ref, theme, eye)
mutable struct WfrmAttributes
title #Not label, for some reason
color #linecolor
linewidth
linestyle
marker
markersize
markerfacecolor
markeredgecolor
# markeredgewidth #Not supported
# fillstyle #Not supported
#==Unknown options:
shade, fitted, curvestyle, curvetype, baseline
==#
end
WfrmAttributes(;label=nothing,
color=nothing, linewidth=nothing, linestyle=nothing,
marker=nothing, markersize=nothing, markerfacecolor=nothing) =
WfrmAttributes(label, color, linewidth, linestyle,
marker, markersize, markerfacecolor, color
)
#==Helper functions
===============================================================================#
const HEX_CODES = UInt8[
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F'
]
function int2mplcolorstr(v::UInt)
result = Array{UInt8}(undef, 7) #6Hex+hash character
result[1] = '#'
for i in length(result):-1:2
result[i] = HEX_CODES[(v & 0xF)+1]
v >>= 4
end
return String(result)
end
function mapcolor(v::Colorant)
v = convert(RGB24, v)
return int2mplcolorstr(UInt(v.color))
end
mapfacecolor(v) = mapcolor(v) #In case we want to diverge
#Linewidth:
maplinewidth(w) = w
maplinewidth(::Nothing) = maplinewidth(1) #default
#Marker size:
mapmarkersize(sz) = 5*sz
mapmarkersize(::Nothing) = mapmarkersize(1)
function maplinestyle(v::Symbol)
result = get(linestylemap, v, NOTFOUND)
if NOTFOUND == result
info("Line style not supported")
result = maplinestyle(nothing)
end
return result
end
maplinestyle(::Nothing) = "-" #default
function mapmarkershape(v::Symbol)
result = get(markermap, v, NOTFOUND)
if "" == result
result = nothing
elseif NOTFOUND == result
info("Marker shape not supported")
result = "o" #Use some supported marker
end
return result
end
mapmarkershape(::Nothing) = mapmarkershape(:none) #default (no marker)
function WfrmAttributes(id::String, attr::EasyPlot.WfrmAttributes)
#TODO: Figure out how to support transparency:
markerfacecolor = attr.glyphfillcolor==EasyPlot.COLOR_TRANSPARENT ?
mapfacecolor(COLOR_BACKGROUND) : mapfacecolor(attr.glyphfillcolor)
return WfrmAttributes(label=id,
color=mapcolor(attr.linecolor),
linewidth=maplinewidth(attr.linewidth),
linestyle=maplinestyle(attr.linestyle),
marker=mapmarkershape(attr.glyphshape),
markersize=mapmarkersize(attr.glyphsize),
markerfacecolor=markerfacecolor,
)
end
function _setlim(ax::Axes, setfn::Symbol, _min, _max)
hasmin = nothing!=min
hasmax = nothing!=max
if hasmin && hasmax
ax[setfn] = (_min, _max)
elseif (hasmin && !hasmax) || (hasmax && !hasmin)
warn("Partial limits not supported: $setfn($_min, $_max)")
end
end
#==Rendering functions
===============================================================================#
#Add DataF1 results:
function _addwfrm(ax::Axes, d::DataF1, a::WfrmAttributes)
kwargs = Any[]
for attrib in fieldnames(a)
v = getfield(a,attrib)
if v != nothing
push!(kwargs, tuple(attrib, v))
end
end
#TODO: is result of add the "wfrm" we want to return?
wfrm = Curve(d.x, d.y; kwargs...)
add(ax, wfrm)
return wfrm
end
#Called by EasyPlot, for each individual DataF1 ∈ DataMD.
function EasyPlot.addwfrm(ax::EPAxes, d::DataF1, id::String,
la::EasyPlot.LineAttributes, ga::EasyPlot.GlyphAttributes)
attr = EasyPlot.WfrmAttributes(ax.theme, la, ga) #Apply theme to attributes
qwtattr = WfrmAttributes(id, attr) #Attributes understood by Qwt
_addwfrm(ax.ref, d, qwtattr)
end
function rendersubplot(ax::Axes, subplot::EasyPlot.Subplot, theme::EasyPlot.Theme)
#TODO: add support for supblot title
#TODO Ugly: setting defaults like this should be done in EasyPlot
ep = nothing
if :eye == subplot.style
ep = subplot.eye
if nothing == ep.teye; ep.teye = ep.tbit; end
end
axes = EPAxes(subplot.style, ax, theme, ep)
for (i, wfrm) in enumerate(subplot.wfrmlist)
EasyPlot.addwfrm(axes, wfrm, i)
end
srca = subplot.axes
#Update axis limits:
_lim = ax[:xlimits]
(xmin, xmax) = _lim != nothing ? _lim : (nothing, nothing)
if srca.xmin != nothing; xmin = srca.xmin; end
if srca.xmax != nothing; xmax = srca.xmax; end
_lim = ax[:ylimits]
(ymin, ymax) = _lim != nothing ? _lim : (nothing, nothing)
if srca.ymin != nothing; ymin = srca.ymin; end
if srca.ymax != nothing; ymax = srca.ymax; end
_setlim(ax, :set_xlim, xmin, xmax)
_setlim(ax, :set_ylim, ymin, ymax)
#Apply x/y scales:
ax[:xscale] = scalemap[srca.xscale]
ax[:yscale] = scalemap[srca.yscale]
#Apply x/y labels:
if srca.xlabel != nothing; ax[:xlabel] = (string(srca.xlabel), ""); end
if srca.ylabel != nothing; ax[:ylabel] = (string(srca.ylabel), ""); end
return ax
end
function render(fig::Figure, eplot::EasyPlot.Plot)
ncols = eplot.ncolumns
nrows = div(length(eplot.subplots)-1, ncols)+1
subplotidx = 0
for s in eplot.subplots
# row = div(subplotidx, ncols) + 1
# col = mod(subplotidx, ncols) + 1
ax = subplot(fig, nrows, ncols, subplotidx+1)
rendersubplot(ax, s, eplot.theme)
if eplot.displaylegend; add(ax, :legend); end
subplotidx += 1
end
return fig
end
#Last line
|
{"hexsha": "bbdbaa6c17a7b61e8bca1700fbf434d6c205cc10", "size": 6354, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "subpkgs/EasyPlotQwt/src/base.jl", "max_stars_repo_name": "ma-laforge/CMDimData.jl", "max_stars_repo_head_hexsha": "f543265841b81dc20f5dca18a37cd642371a0a34", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "subpkgs/EasyPlotQwt/src/base.jl", "max_issues_repo_name": "ma-laforge/CMDimData.jl", "max_issues_repo_head_hexsha": "f543265841b81dc20f5dca18a37cd642371a0a34", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "subpkgs/EasyPlotQwt/src/base.jl", "max_forks_repo_name": "ma-laforge/CMDimData.jl", "max_forks_repo_head_hexsha": "f543265841b81dc20f5dca18a37cd642371a0a34", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.6974789916, "max_line_length": 82, "alphanum_fraction": 0.6458923513, "num_tokens": 1869}
|
import os
import yaml
import pgl
import time
import copy
import numpy as np
import os.path as osp
from pgl.utils.logger import log
from pgl.graph import Graph
from pgl import graph_kernel
from pgl.sampling.custom import subgraph
from ogb.lsc import MAG240MDataset, MAG240MEvaluator
from dataset.base_dataset import BaseDataGenerator
import time
from tqdm import tqdm
from easydict import EasyDict as edict
import argparse
def get_result(config, eval_all=False):
dataset = MAG240MDataset(config.data_dir)
evaluator = MAG240MEvaluator()
file = 'model_result_temp'
sudo_label = np.memmap(file, dtype=np.float32, mode='r',
shape=(121751666, 153))
file = "ck_result.txt"
wf = open(file, 'a', encoding='utf-8')
label = dataset.all_paper_label
if eval_all:
valid_idx = dataset.get_idx_split('valid')
pred = sudo_label[valid_idx]
save_path = os.path.join(config.valid_path, "all_eval_result")
np.save(save_path, pred)
y_pred = pred.argmax(1)
y_true = label[valid_idx]
valid_acc = evaluator.eval({
'y_true': y_true,
'y_pred': y_pred
})['acc']
print("all eval result\n")
print(f"valid_acc: {valid_acc}\n")
wf.write("all eval result\n")
wf.write(f"valid_acc: {valid_acc}\n")
else:
valid_path = os.path.join(config.valid_path, config.valid_name)
valid_idx = np.load(valid_path)
test_idx = dataset.get_idx_split('test')
pred = sudo_label[valid_idx]
y_pred = pred.argmax(1)
y_true = label[valid_idx]
valid_acc = evaluator.eval({
'y_true': y_true,
'y_pred': y_pred
})['acc']
print(f"eval cv {config.valid_name} result\n")
print(f"valid_acc: {valid_acc}\n")
wf.write(f"eval cv {config.valid_name} result\n")
wf.write(f"valid_acc: {valid_acc}\n")
save_path_test = os.path.join(config.valid_path, config.test_name)
pred_test = sudo_label[test_idx]
print(pred_test.shape)
np.save(save_path_test, pred_test)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='main')
parser.add_argument("--conf", type=str, default="./config.yaml")
parser.add_argument("--eval_all", action='store_true', default=False)
args = parser.parse_args()
config = edict(yaml.load(open(args.conf), Loader=yaml.FullLoader))
config.samples = [int(i) for i in config.samples.split('-')]
print(config)
get_result(config, args.eval_all)
|
{"hexsha": "75a7d943b6a04f9073c5bdec88f17ba16db5e24a", "size": 2740, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/kddcup2021/MAG240M/r_unimp/check_cv_get_test_result.py", "max_stars_repo_name": "zbmain/PGL", "max_stars_repo_head_hexsha": "dbded6a1543248b0a33c05eb476ddc513401a774", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1389, "max_stars_repo_stars_event_min_datetime": "2019-06-11T03:29:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T18:25:43.000Z", "max_issues_repo_path": "examples/kddcup2021/MAG240M/r_unimp/check_cv_get_test_result.py", "max_issues_repo_name": "zbmain/PGL", "max_issues_repo_head_hexsha": "dbded6a1543248b0a33c05eb476ddc513401a774", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 232, "max_issues_repo_issues_event_min_datetime": "2019-06-21T06:52:10.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T08:20:31.000Z", "max_forks_repo_path": "examples/kddcup2021/MAG240M/r_unimp/check_cv_get_test_result.py", "max_forks_repo_name": "zbmain/PGL", "max_forks_repo_head_hexsha": "dbded6a1543248b0a33c05eb476ddc513401a774", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 229, "max_forks_repo_forks_event_min_datetime": "2019-06-20T12:13:58.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T12:04:48.000Z", "avg_line_length": 35.1282051282, "max_line_length": 74, "alphanum_fraction": 0.6186131387, "include": true, "reason": "import numpy", "num_tokens": 637}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 18 16:55:14 2017
@author: ajaver
Get food contour using a pre-trained neural network
"""
# %%
import tables
import os
import numpy as np
import cv2
import warnings
with warnings.catch_warnings():
warnings.simplefilter(action='ignore', category=FutureWarning)
from keras.models import load_model
from skimage.morphology import disk
DFLT_RESIZING_SIZE = 512 # the network was trained with images of this size 512
def _get_sizes(im_size, d4a_size= 24, n_conv_layers=4):
''' Useful to determine the expected inputs and output sizes of a u-net.
Additionally if the image is larger than the network output the points to
subdivide the image in tiles are given
'''
#assuming 4 layers of convolutions
def _in_size(d4a_size):
mm = d4a_size
for n in range(n_conv_layers):
mm = mm*2 + 2 + 2
return mm
def _out_size(d4a_size):
mm = d4a_size -2 -2
for n in range(n_conv_layers):
mm = mm*2 - 2 - 2
return mm
#this is the size of the central reduced layer. I choose this value manually
input_size = _in_size(d4a_size) #required 444 of input
output_size = _out_size(d4a_size) #set 260 of outpu
pad_size = int((input_size-output_size)/2)
if any(x < output_size for x in im_size):
msg = 'All the sides of the image ({}) must be larger or equal to ' \
'the network output {}.'
raise ValueError(msg.format(im_size, output_size))
n_tiles_x = int(np.ceil(im_size[0]/output_size))
n_tiles_y = int(np.ceil(im_size[1]/output_size))
txs = np.round(np.linspace(0, im_size[0] - output_size, n_tiles_x)).astype(np.int)
tys = np.round(np.linspace(0, im_size[1] - output_size, n_tiles_y)).astype(np.int)
tile_corners = [(tx, ty) for tx in txs for ty in tys]
return input_size, output_size, pad_size, tile_corners
def _preprocess(X,
input_size,
pad_size,
tile_corners
):
'''
Pre-process an image to input for the pre-trained u-net model
'''
def _get_tile_in(img, x,y):
return img[np.newaxis, x:x+input_size, y:y+input_size, :]
def _cast_tf(D):
D = D.astype(np.float32())
if D.ndim == 2:
D = D[..., None]
return D
#normalize image
X = _cast_tf(X)
X /= 255
X -= np.median(X)
pad_size_s = ((pad_size,pad_size), (pad_size,pad_size), (0,0))
X = np.lib.pad(X, pad_size_s, 'reflect')
X = [_get_tile_in(X, x, y) for x,y in tile_corners]
return X
def get_unet_prediction(Xi,
model_t,
n_flips = 1,
im_size=None,
n_conv_layers = 4,
d4a_size = 24,
_is_debug=False):
'''
Predict the food probability for each pixel using a pretrained u-net model (Helper)
'''
def _flip_d(img_o, nn):
if nn == 0:
img = img_o[::-1, :]
elif nn == 2:
img = img_o[:, ::-1]
elif nn == 3:
img = img_o[::-1, ::-1]
else:
img = img_o
return img
if im_size is None:
im_size = Xi.shape
input_size, output_size, pad_size, tile_corners = \
_get_sizes(im_size, d4a_size= d4a_size, n_conv_layers=n_conv_layers)
Y_pred = np.zeros(im_size)
for n_t in range(n_flips):
X = _flip_d(Xi, n_t)
if im_size is None:
im_size = X.shape
x_crop = _preprocess(X, input_size, pad_size, tile_corners)
x_crop = np.concatenate(x_crop)
y_pred = model_t.predict(x_crop)
Y_pred_s = np.zeros(X.shape)
N_s = np.zeros(X.shape)
for (i,j), yy,xx in zip(tile_corners, y_pred, x_crop):
Y_pred_s[i:i+output_size, j:j+output_size] += yy[:,:,1]
if _is_debug:
import matplotlib.pylab as plt
plt.figure()
plt.subplot(1,2,1)
plt.imshow(np.squeeze(xx))
plt.subplot(1,2,2)
plt.imshow(yy[:,:,1])
N_s[i:i+output_size, j:j+output_size] += 1
Y_pred += _flip_d(Y_pred_s/N_s, n_t)
return Y_pred
def get_food_prob(mask_file, model, max_bgnd_images = 2, _is_debug = False, resizing_size = DFLT_RESIZING_SIZE):
'''
Predict the food probability for each pixel using a pretrained u-net model.
'''
with tables.File(mask_file, 'r') as fid:
if not '/full_data' in fid:
raise ValueError('The mask file {} does not content the /full_data dataset.'.format(mask_file))
bgnd_o = fid.get_node('/full_data')[:max_bgnd_images].copy()
assert bgnd_o.ndim == 3
if bgnd_o.shape[0] > 1:
bgnd = [np.max(bgnd_o[i:i+1], axis=0) for i in range(bgnd_o.shape[0]-1)]
else:
bgnd = [np.squeeze(bgnd_o)]
min_size = min(bgnd[0].shape)
resize_factor = min(resizing_size, min_size)/min_size
dsize = tuple(int(x*resize_factor) for x in bgnd[0].shape[::-1])
bgnd_s = [cv2.resize(x, dsize) for x in bgnd]
for b_img in bgnd_s:
Y_pred = get_unet_prediction(b_img, model, n_flips=1)
if _is_debug:
import matplotlib.pylab as plt
plt.figure()
plt.subplot(1,2,1)
plt.imshow(b_img, cmap='gray')
plt.subplot(1, 2,2)
plt.imshow(Y_pred, interpolation='none')
original_size = bgnd[0].shape
return Y_pred, original_size, bgnd_s
def cnt_solidity_func(_cnt):
_hull = cv2.convexHull(_cnt)
return cv2.contourArea(_cnt) / cv2.contourArea(_hull)
def avg_incnt_func(_cnt, img):
mask = np.zeros(img.shape, np.uint8)
mask = cv2.drawContours(mask, _cnt, 1, color=255).astype(np.uint8)
return cv2.mean(img, mask)[0]
def eccentricity_func(_cnt):
moments = cv2.moments(_cnt)
a1 = (moments['mu20']+moments['mu02'])/2
a2 = np.sqrt(
4*moments['mu11']**2 + (moments['mu20']-moments['mu02'])**2
) / 2
minor_axis = a1-a2
major_axis = a1+a2
eccentricity = np.sqrt(1-minor_axis/major_axis)
return eccentricity
def get_best_scoring_cnt(cnts, food_proba, _is_debug=False):
# print(f'raw n contours {len(cnts)}')
# calculate patches properties
solidities = np.array([cnt_solidity_func(c) for c in cnts])
areas = np.array([cv2.contourArea(c) for c in cnts])
perimeters = np.array([cv2.arcLength(c, True) for c in cnts])
areas_over_perimeters = np.array([a/p for a, p in zip(areas, perimeters)])
avg_probas = np.array([avg_incnt_func(c, food_proba) for c in cnts])
# eccentricities = [eccentricity_func(c) for c in cnts]
# normalise area and area over perimeter
# just divide by the maximum, I don't want to lose the relative values if
# it's only two regions
areas_norm = areas / np.max(areas)
aop_norm = areas_over_perimeters / np.max(areas_over_perimeters)
# normalise
# for all these quantities, the highest the more likely it's food
# and they're all defined positive
# square sum
total_score = np.sqrt(
solidities**2 + areas_norm**2 + aop_norm**2 + avg_probas**2)
cnt_out = cnts[np.argmax(total_score)]
if _is_debug:
print({
'area': areas,
'area_norm': areas_norm,
'aop': areas_over_perimeters,
'aop_normalised': aop_norm,
'solidity': solidities,
'avgprob': avg_probas,
})
print(total_score)
return cnt_out
def get_food_contour_nn(mask_file, model, _is_debug=False):
'''
Get the food contour using a pretrained u-net model.
This function is faster if a preloaded model is given since it is very slow
to load the model and tensorflow.
'''
food_prob, original_size, bgnd_images = get_food_prob(
mask_file, model, _is_debug=_is_debug)
# bgnd_images are only used in debug mode
patch_m = (food_prob > 0.5).astype(np.uint8)
if _is_debug:
import matplotlib.pylab as plt
plt.figure()
plt.imshow(patch_m)
plt.show()
cnts, _ = cv2.findContours(
patch_m, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[-2:]
# filter contours first to only keep the ones with a defined hull area
cnts = [
c for c in cnts if
(cv2.contourArea(cv2.convexHull(c)) > 1) and
(cv2.contourArea(c) > 1)
]
# print(total_rank)
# print(np.argmin(total_rank))
# print(cnts[np.argmin(total_rank)])
# pick the contour with the largest solidity
# print(f'filtered {len(cnts)}')
if len(cnts) == 1:
cnts = cnts[0]
elif len(cnts) > 1:
# too many contours select the largest
# cnts = max(cnts, key=cv2.contourArea)
cnts = get_best_scoring_cnt(cnts, food_prob, _is_debug=_is_debug)
else:
return np.zeros([]), food_prob, 0.
# print("this should be one and be nice:")
# print(cnts)
assert len(cnts == 1)
if _is_debug:
import matplotlib.pyplot as plt
fig = plt.figure()
plt.imshow(patch_m)
fig.gca().set_title('first patch_m')
plt.show()
# this detects the edge and finds the outer rim of said edge
# probably to make sure we hit the actual edge
# rather than being a little inside the food patch
patch_m = np.zeros(
patch_m.shape, np.uint8)
patch_m = cv2.drawContours(
patch_m, cnts, -1, color=1, thickness=cv2.FILLED)
patch_m = cv2.morphologyEx(
patch_m, cv2.MORPH_CLOSE, disk(3), iterations=5)
if _is_debug:
import matplotlib.pyplot as plt
fig = plt.figure()
plt.imshow(patch_m)
fig.gca().set_title('second pathc_m')
plt.show()
cnts, _ = cv2.findContours(
patch_m, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[-2:]
# print(len(cnts))
# print(cnts[0])
if len(cnts) == 1:
cnts = cnts[0]
elif len(cnts) > 1:
# too many contours, select the largest
cnts = max(cnts, key=cv2.contourArea)
else:
return np.zeros([]), food_prob, 0.
hull = cv2.convexHull(cnts)
hull_area = cv2.contourArea(hull)
cnt_solidity = cv2.contourArea(cnts)/hull_area
food_cnt = np.squeeze(cnts).astype(np.float)
# rescale contour to be the same dimension as the original images
food_cnt[:, 0] *= original_size[0]/food_prob.shape[0]
food_cnt[:, 1] *= original_size[1]/food_prob.shape[1]
if _is_debug:
import matplotlib.pylab as plt
img = bgnd_images[0]
# np.squeeze(food_cnt)
patch_n = np.zeros(img.shape, np.uint8)
patch_n = cv2.drawContours(
patch_n, [cnts], 0, color=1, thickness=cv2.FILLED)
top = img.max()
bot = img.min()
img_n = (img-bot)/(top-bot)
img_rgb = np.repeat(img_n[..., None], 3, axis=2)
# img_rgb = img_rgb.astype(np.uint8)
img_rgb[..., 0] = ((patch_n == 0)*0.5 + 0.5)*img_rgb[..., 0]
plt.figure()
plt.imshow(img_rgb)
plt.plot(hull[:, :, 0], hull[:, :, 1], 'r')
plt.title('solidity = {:.3}'.format(cnt_solidity))
return food_cnt, food_prob, cnt_solidity
# %%
if __name__ == '__main__':
from matplotlib import pyplot as plt
from tierpsy.helper.params.models_path import DFLT_MODEL_FOOD_CONTOUR
from pathlib import Path
from tqdm import tqdm
import sys
import os
# mask_file = '/Users/ajaver/OneDrive - Imperial College London/optogenetics/Arantza/MaskedVideos/oig8/oig-8_ChR2_control_males_3_Ch1_11052017_161018.hdf5'
if sys.platform == 'darwin':
bg_path = Path('/Volumes/behavgenom$')
elif sys.platform == 'linux':
bg_path = Path.home() / 'net' / 'behavgenom$'
else:
raise Exception('not coded for this platform')
flist_fname = bg_path / 'Luigi/exchange/all_skels_with_food_archive.txt'
with open(flist_fname, 'r') as fid:
skel_files = fid.read().splitlines()
skel_files = [
f.replace('/Volumes/behavgenom$', str(bg_path)) for f in skel_files]
out_dir = bg_path / 'Luigi/food_tests/'
out_log = out_dir / 'memlog.txt'
out_data = out_dir / 'IoUs.csv'
# load model now to prevent memory leak
food_model = load_model(DFLT_MODEL_FOOD_CONTOUR)
# loop through all skeletons
for skel_file in tqdm(skel_files):
mask_file = Path(
str(skel_file)
.replace('Results', 'MaskedVideos')
.replace('_skeletons.hdf5', '.hdf5')
)
if not mask_file.exists():
continue
with tables.File(skel_file, 'r') as fid:
if '/food_cnt_coord' in fid:
old_food_cnt = fid.get_node('/food_cnt_coord')[:].copy()
old_circx, old_circy = old_food_cnt.T
else:
continue
food_cnt, food_prob, cnt_solidity = get_food_contour_nn(
mask_file, food_model, _is_debug=False)
circx, circy = food_cnt.T
try:
with tables.File(mask_file, 'r') as fid:
img = fid.get_node('/full_data')[0].copy()
except:
print(f'cant get full_data from {mask_file}')
continue
food_mask = cv2.drawContours(
np.zeros(img.shape, np.uint8), [food_cnt.astype(int)], -1,
color=1, thickness=cv2.FILLED
).astype(bool)
old_food_mask = cv2.drawContours(
np.zeros(img.shape, np.uint8), [old_food_cnt.astype(int)], -1,
color=1, thickness=cv2.FILLED
).astype(bool)
food_IoU = (
np.sum(np.logical_and(food_mask, old_food_mask)) /
np.sum(np.logical_or(food_mask, old_food_mask))
)
with open(out_data, 'a') as fid:
print(f'{mask_file},{food_IoU}', file=fid)
out_name = out_dir / mask_file.with_suffix('.png').name
if (food_IoU < 1) and (not out_name.exists()):
fig = plt.figure()
plt.imshow(img, cmap='gray')
plt.plot(circx, circy)
plt.plot(old_circx, old_circy, 'g', linestyle='--')
plt.show()
plt.pause(0.2)
fig.savefig(out_name, dpi=600)
plt.pause(0.2)
plt.close('all')
if sys.platform == 'linux':
_, used_m, free_m = os.popen(
'free -th').readlines()[-1].split()[1:]
with open(out_log, 'a') as fout:
print(
f'free: {free_m}, used:{used_m}, file:{skel_file}',
file=fout)
|
{"hexsha": "70b4fe2b2ca4ff083955694a19c323f38069a6ad", "size": 14687, "ext": "py", "lang": "Python", "max_stars_repo_path": "tierpsy/analysis/food_cnt/getFoodContourNN.py", "max_stars_repo_name": "JGNieto/tierpsy-tracker", "max_stars_repo_head_hexsha": "0c1565bc1ed4d54f9fe196931eecafb663a68e3f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2021-01-11T10:49:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-28T15:48:00.000Z", "max_issues_repo_path": "tierpsy/analysis/food_cnt/getFoodContourNN.py", "max_issues_repo_name": "RongkangXiong/tierpsy-tracker", "max_issues_repo_head_hexsha": "b35646180e2bdbcf591f167feb563d3e4dbde379", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 18, "max_issues_repo_issues_event_min_datetime": "2020-05-08T15:43:08.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-23T10:19:24.000Z", "max_forks_repo_path": "tierpsy/analysis/food_cnt/getFoodContourNN.py", "max_forks_repo_name": "RongkangXiong/tierpsy-tracker", "max_forks_repo_head_hexsha": "b35646180e2bdbcf591f167feb563d3e4dbde379", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2019-12-18T12:10:12.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-05T09:12:47.000Z", "avg_line_length": 30.4709543568, "max_line_length": 159, "alphanum_fraction": 0.5999182951, "include": true, "reason": "import numpy", "num_tokens": 4040}
|
#! /usr/bin/env python3
# coding=utf-8
import datetime
#import matplotlib
#matplotlib.use('Agg')
#import numpy as np
#import matplotlib.pyplot as plt
#import sys, os
import peakTree
import peakTree.helpers as h
import logging
log = logging.getLogger('peakTree')
log.setLevel(logging.INFO)
log.addHandler(logging.StreamHandler())
pTB = peakTree.peakTreeBuffer(system='joyrad_nya')
#pTB.load_joyrad_file('data/joyrad94_nya_20191108000001_P01_ZEN.nc', load_to_ram=True)
#TB.load_joyrad_file('data/joyrad94_nya_20191108120000_P01_ZEN.nc', load_to_ram=True)
# older files
pTB.load_joyrad_file('data/joyrad94_nya_20170602100002_P05_ZEN.nc', load_to_ram=True)
#pTB.load_joyrad_file('data/joyrad94_nya_20170602110001_P05_ZEN.nc', load_to_ram=True)
pTB.assemble_time_height('output/')
exit()
pTB.load_joyrad_file('data/joyrad94_nya_20191108000001_P01_ZEN.nc', load_to_ram=True)
pTB.assemble_time_height('output/')
|
{"hexsha": "7ed0613bc4ee250d6491868ee287dfd229607900", "size": 930, "ext": "py", "lang": "Python", "max_stars_repo_path": "run_conversion_joyrad.py", "max_stars_repo_name": "martin-rdz/peakTree", "max_stars_repo_head_hexsha": "f37764b80faa5ead14835c2d03fda46c64b94e4d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-12-03T06:53:07.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-23T08:35:32.000Z", "max_issues_repo_path": "run_conversion_joyrad.py", "max_issues_repo_name": "martin-rdz/peakTree", "max_issues_repo_head_hexsha": "f37764b80faa5ead14835c2d03fda46c64b94e4d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-02-07T13:55:56.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-07T13:55:56.000Z", "max_forks_repo_path": "run_conversion_joyrad.py", "max_forks_repo_name": "martin-rdz/peakTree", "max_forks_repo_head_hexsha": "f37764b80faa5ead14835c2d03fda46c64b94e4d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2019-08-27T10:51:39.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-07T14:44:38.000Z", "avg_line_length": 30.0, "max_line_length": 87, "alphanum_fraction": 0.8010752688, "include": true, "reason": "import numpy", "num_tokens": 285}
|
#!/usr/bin/env python
################################################################################
# rel_locs_RPs.py
#
# Takes genome wide locations and converts into relative locations graphs
#
################################################################################
import pylab as P
import matplotlib.pyplot as plt
import numpy as np
#directory for project
project_dir = '/Users/trevorsorrells/Documents/UCSF/Projects/TFBS_scoring/ribosome_project/'
#load list of species whose intergenics have been extacted
with open(project_dir + '/lists/all_species_in_order.txt','r') as inFile:
species_list_long = inFile.read().split('\n')
species_output_order = []
#create abbreviated species list
for long_species in species_list_long:
name_split = long_species.split()
species_output_order.append(name_split[0][0] + name_split[1][:3])
#print species_output_order
#load the list of ribosomal protein gene names
inFile = open(project_dir + '/lists/RPs.txt','r')
gene_list = inFile.read().split()
gene_list = sorted(gene_list)
inFile.close()
#add systematic gene IDs to gene_list, because whole genome information uses IDs
for species in species_output_order:
inFile = open(project_dir + 'intergenics/by_species/' + species, 'r')
for line in inFile:
line_split = line.split()
if len(line_split) > 2:
if line_split[2] in gene_list:
if line_split[1][:3] == 'jgi':
gene_list.append(species + ' ' + line_split[1].split('|')[-1])
else:
gene_list.append(species + ' ' + line_split[1])
def simple_relative_locs(input_file_path, species_divisions, gene_list='none'):
#reads file containing a single set of genes and divides up by species. Returns a vector
#containing data for each species division
inFile = open(input_file_path, 'r')
data_table = []
weight_table = []
for sp_dv in species_divisions:
data_table.append([])
weight_table.append(0)
for line in inFile:
line_split = line.split()
if gene_list != 'none':
if line_split[1] not in gene_list:
continue
i=2
while i < len(line_split):
line_split[i] = int(line_split[i])
i += 1
i=0
for sp_dv in species_divisions:
if line_split[0] in sp_dv:
#remove duplicate items by list(set())
data_table[i].extend(list(set(line_split[2:])))
#remove 0 if necessary
#if 0 in data_table[i]:
# data_table[i].remove(0)
weight_table[i] += 1
i += 1
inFile.close()
#repeat weight for each value in the data table
return data_table, weight_table
def loadYGOBorthos(gene_list_systematic):
#loads the orthologs of genes using YGOB for the purposes of studying
#evolution of Rap1-Mcm1 binding sites in Kazachstania naganishii at genes other than
#the ribosomal protein genes
YGOB_path = '/Users/trevorsorrells/Documents/UCSF/Projects/genome_info/genomes_ygob/'
#YGOB_gene_species_dict = {'Kpol':'Vpol','TPHA':'Tpha','Tpha':'Tpha','TBLA':'Tbla','Tbla':'Tbla','NDAI':'Ndai','Ndai':'Ndai','NCAS':'Ncas','Ncas':'Ncas','KNAG':'Knag','Knag':'Knag','KAFR':'Kafr','Kafr':'Kafr','CAGL':'Cgla','Cgla':'Cgla','Suva':'Suva','Skud':'Skud','Smik':'Smik','Scer':'Scer','ZYRO':'Zrou','Zrou':'Zrou','TDEL':'Tdel','Tdel':'Tdel','KLLA':'Klac','Klac':'Klac','Ecym':'Ecym','SAKL':'Lklu','Lklu':'Lklu','Sklu':'Lklu','KLTH':'Lthe','Kthe':'Lthe','Kwal':'Lwal'}
#not_in_species_dict = ['Scer','Egos','Anc']
ortho_list = []
[ortho_list.append([]) for x in gene_list_systematic]
genes_in_ortho_list = []
ortho_file = open(YGOB_path + 'Pillars.tab.txt', 'r')
for line in ortho_file:
split_line = line.split()
#while '---' in split_line:
# split_line.remove('---')
for gene in split_line:
if gene in gene_list_systematic:
i = gene_list_systematic.index(gene)
genes_in_ortho_list.append(gene)
ortho_list[i] = split_line
ortho_file.close()
print 'number of genes in gene list:'
print len(gene_list)
print 'number of orthogroups found in YGOB:'
print len(genes_in_ortho_list)
for orthogroup in ortho_list:
if len(orthogroup) < 2:
print orthogroup
print 'was not in YGOB'
orthogroup.append('not in YGOB')
return ortho_list
def output_values_YGOB_species(gene_list, genes_by_orthos):
#outputs the position weight matrix score of Rap1 and Mcm1 binding sites at non-ribosomal protein genes
#in post-whole genome hybridization species
YGOB_species = ['Cgla','Ecym','Egos','Kafr','Klac','Knag','Lklu','Lthe','Lwal','Ncas','Ndai','Scer','Skud','Smik','Suva','Tbla','Tdel','Tpha','Vpol','Zrou']
output_lines1 = []
all_genes_list = []
for ortho_group in genes_by_orthos:
current_structure = []
for gene in ortho_group:
current_structure.append('')
all_genes_list.append(gene)
output_lines1.append(current_structure)
inFile1 = open(project_dir + 'outputs/whole_genome_scores_500/rap1_scertf__max_score.txt', 'r')
locs1 = []
[locs1.append(line.split()) for line in inFile1]
inFile.close()
for score in locs1:
if score[1] in all_genes_list:
for i, ortho_group in enumerate(genes_by_orthos):
for j, gene in enumerate(ortho_group):
if gene == score[1]:
if len(score)>2:
output_lines1[i][j] = score[2]
output_lines2 = []
for ortho_group in genes_by_orthos:
current_structure = []
for gene in ortho_group:
current_structure.append('')
all_genes_list.append(gene)
output_lines2.append(current_structure)
inFile2 = open(project_dir + 'outputs/whole_genome_scores_500/mcm1_scertf__max_score.txt', 'r')
locs2 = []
[locs2.append(line.split()) for line in inFile2]
inFile.close()
for score in locs2:
if score[1] in all_genes_list:
for i, ortho_group in enumerate(genes_by_orthos):
for j, gene in enumerate(ortho_group):
if gene == score[1]:
if len(score)>2:
output_lines2[i][j] = score[2]
outFile = open(project_dir + 'outputs/Knag_site_evolution/Rap1_Mcm1_scores_500.txt','w')
outFile.write('Rap1\n')
for gene, scores in zip(gene_list, output_lines1):
outFile.write(gene + '\t' + '\t'.join(scores) + '\n')
outFile.write('Mcm1\n')
for gene, scores in zip(gene_list, output_lines2):
outFile.write(gene + '\t' + '\t'.join(scores) + '\n')
outFile.close()
def generateGraph(data_vect, weight_vect, min_range, max_range, output_path, output_name, heatmap=False, font_size=10):
#outputs heatmaps or histograms showing the relative spacing between regulator binding sites
#in the regulatory regions of sets of genes
#remove site locations that are out of range of graph
for column in data_vect:
i=0
while i < len(column):
if column[i] >max_range or column[i] < min_range:
column.remove(column[i])
else:
i += 1
#add pseudocounts to avoid errors in plotting heatmaps
for i in range(len(data_vect)):
if len(data_vect[i]) == 0:
data_vect[i].append(0)
weight_vect[i] = max(weight_vect)
#print data_vect
#print weight_vect
#calculate weights to result in values per promoter
weight_input = []
for i in range(len(data_vect)):
weight_input.append([])
for entry in data_vect[i]:
weight_input[i].append(1.0/float(weight_vect[i]))
#calculate proportions for each group
proportions = []
for i in range(len(data_vect)):
if len(data_vect[i]) == 0:
print i
proportions.append(0.0)
else:
proportions.append(len(data_vect[i])*weight_input[i][0])
#for i in range(len(proportions)):
# print proportions[-i-1]
if not heatmap:
n, bins, patches = P.hist(data_vect[::-1], 50, normed=False, weights = weight_input[::-1], histtype='step', label=sp_dv_names[::-1], stacked=False)
P.legend(loc=1)
#P.show()
P.savefig(output_path + '/histograms/' + output_name + '_hist.pdf', format='pdf')
P.close()
if heatmap:
n, bins, patches = P.hist(data_vect[::-1], 20, normed=False, weights = weight_input[::-1], histtype='step', label=sp_dv_names[::-1], stacked=False)
P.legend(loc=1)
#convert n into correct datastructure
heatdata = np.array(np.ravel(n))
max_density = max(heatdata)
heatdata = np.reshape(heatdata, (len(n), len(bins)-1))
print heatdata
#output heatmap
fig, ax = plt.subplots()
htmap = ax.pcolor(heatdata, cmap = plt.cm.Blues)
plt.setp(ax.get_yticklabels(), fontsize=font_size)
#ax.set_xticks(np.arange(len(bins)-1), minor=False)
ax.set_yticks(np.arange(len(sp_dv_names)), minor=False)
ax.set_ybound(0, len(sp_dv_names))
ax.set_yticklabels(sp_dv_names[::-1], minor=False)
ax.set_xticklabels(((np.arange(5))*(max_range-min_range)/4+min_range).astype(int), minor=False)
plt.title('max density: ' + str(max_density))
P.savefig(output_path + '/heatmaps/' + output_name + '_heat.pdf', format='pdf')
P.close()
def locsToRelative(input1, input2, output_name):
#takes the data of the locations for 2 different TFs and calculates their locations of the 2nd relative to the 1st
inFile1 = open(project_dir + 'outputs/whole_genome_scores_1000/' + input1, 'r')
locs1 = []
[locs1.append(line.split()) for line in inFile1]
inFile.close()
inFile2 = open(project_dir + 'outputs/whole_genome_scores_1000/' + input2, 'r')
locs2 = []
[locs2.append(line.split()) for line in inFile2]
inFile2.close()
output_lines = []
for loc1_rec, loc2_rec in zip(locs1, locs2):
current_output = []
current_output.extend(loc1_rec[:2])
if len(loc1_rec) > 2 and len(loc2_rec) > 2:
loc1_locs = list(set(loc1_rec[2:]))
loc2_locs = list(set(loc2_rec[2:]))
for loc1 in loc1_locs:
for loc2 in loc2_locs:
try:
current_output.append(str(int(loc2)-int(loc1)))
except ValueError:
continue
output_lines.append(current_output)
outFile = open(project_dir + 'outputs/wg_rel_locs/'+output_name, 'w')
for record in output_lines:
outFile.write('\t'.join(record) + '\n')
outFile.close()
def locsToRelativeRPsOnly(input1, input2, output_name):
#takes the data of the locations for 2 different TFs and calculates their locations of the 2nd relative to the 1st
#outputs these results only for the ribosomal protein genes
inFile1 = open(project_dir + 'outputs/whole_genome_scores_1000/' + input1, 'r')
locs1 = []
[locs1.append(line.split()) for line in inFile1]
inFile.close()
inFile2 = open(project_dir + 'outputs/whole_genome_scores_1000/' + input2, 'r')
locs2 = []
[locs2.append(line.split()) for line in inFile2]
inFile2.close()
output_lines = []
for loc1_rec, loc2_rec in zip(locs1, locs2):
if loc1_rec[0] + ' ' + loc1_rec[1] in gene_list:
current_output = []
current_output.extend(loc1_rec[:2])
if len(loc1_rec) > 2 and len(loc2_rec) > 2:
loc1_locs = list(set(loc1_rec[2:]))
loc2_locs = list(set(loc2_rec[2:]))
for loc1 in loc1_locs:
for loc2 in loc2_locs:
try:
current_output.append(str(int(loc2)-int(loc1)))
except ValueError:
continue
output_lines.append(current_output)
outFile = open(project_dir + 'outputs/relative_locations/relative_locations_1000/'+output_name, 'w')
for record in output_lines:
outFile.write('\t'.join(record) + '\n')
outFile.close()
#analyses
#create a list of species divisioins for making graphs
species_divs = []
[species_divs.append([sp]) for sp in species_output_order]
sp_dv_names = species_output_order
#take locations of binding sites and calculate the distance between them
outputs = ['fhl1_scertf_6.0cutoff','fhl1_scertf_8.0cutoff','rap1_scertf_6.0cutoff','rap1_scertf_8.0cutoff','rrn7_Aade_6.0cutoff','rrn7_Aade_8.0cutoff','tbf1_Arub_6.0cutoff','tbf1_Arub_8.0cutoff']
for regulator in outputs:
in_name = regulator + '.txt'
out_name1 = 'mcm1_max_' + regulator + '.txt'
locsToRelativeRPsOnly('mcm1_scertf__max_score.txt', in_name, out_name1)
#output graphs of the relative locations of binding sites
in_name = out_name1
data_vector, weight_vector = simple_relative_locs(project_dir + 'outputs/relative_locations/relative_locations_1000/' + in_name ,species_divs)
generateGraph(data_vector, weight_vector, -200, 200, project_dir + 'outputs/relative_locations/graphs/',in_name, heatmap=True, font_size=5)
#evolution of non-RP Rap1-Mcm1 sites
locsToRelative('Rap1ForwardMax_6.0', 'Mcm1_6.0', 'Rap1ForwardMax_6.0_Mcm1_6.0')
genes_by_ortho = loadYGOBorthos(gene_list)
output_values_YGOB_species(gene_list, genes_by_ortho)
|
{"hexsha": "c7fe6c687c3646fd98820b5eeb9c879ad98abce6", "size": 13650, "ext": "py", "lang": "Python", "max_stars_repo_path": "TFBS_scoring/rel_locs_RPs.py", "max_stars_repo_name": "trevorsorrells/RPG_evolution", "max_stars_repo_head_hexsha": "17b76230459919aea07fd733a06548477d3c67ee", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "TFBS_scoring/rel_locs_RPs.py", "max_issues_repo_name": "trevorsorrells/RPG_evolution", "max_issues_repo_head_hexsha": "17b76230459919aea07fd733a06548477d3c67ee", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "TFBS_scoring/rel_locs_RPs.py", "max_forks_repo_name": "trevorsorrells/RPG_evolution", "max_forks_repo_head_hexsha": "17b76230459919aea07fd733a06548477d3c67ee", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.4713375796, "max_line_length": 479, "alphanum_fraction": 0.6289377289, "include": true, "reason": "import numpy", "num_tokens": 3579}
|
import pytest
from cuxfilter.assets import datetime as dt
from cuxfilter.charts.constants import CUDF_DATETIME_TYPES
import datetime
import pandas as pd
import cudf
import numpy as np
@pytest.mark.parametrize(
"date, _type, factor",
[
(
np.datetime64("2018-10-07").astype("datetime64[ns]"),
"datetime64[ns]",
1.0,
),
(
np.datetime64("2018-10-07").astype("datetime64[ns]"),
"datetime64[ms]",
1e-6,
),
(
np.datetime64("2018-10-07").astype("datetime64[s]"),
"datetime64[ns]",
1e9,
),
(
np.datetime64("2018-10-07").astype("datetime64[ns]"),
"datetime64[s]",
1e-9,
),
],
)
def test_get_dt_unit_factor(date, _type, factor):
assert dt.get_dt_unit_factor(date, _type) == factor
@pytest.mark.parametrize(
"date, result",
[
(1538870400, pd.to_datetime(1538870400, unit="s")),
(1538870400000, pd.to_datetime(1538870400000, unit="ms")),
(1538870400000000, pd.to_datetime(1538870400000000, unit="us")),
(1538870400000000000, pd.to_datetime(1538870400000000000, unit="ns")),
],
)
def test_to_datetime(date, result):
assert dt.to_datetime(date) == result
@pytest.mark.parametrize(
"dates, _type, result_dates",
[
(
(1538870400,),
CUDF_DATETIME_TYPES[0],
(datetime.datetime(2018, 10, 7, 0, 0),),
),
(
[datetime.datetime(2018, 10, 7, 0, 0)],
CUDF_DATETIME_TYPES[1],
[datetime.datetime(2018, 10, 7, 0, 0)],
),
(
[np.datetime64("2018-10-07T00:00:00.000000000")],
CUDF_DATETIME_TYPES[1],
[datetime.datetime(2018, 10, 7, 0, 0)],
),
],
)
def test_to_dt_if_datetime(dates, _type, result_dates):
assert dt.to_dt_if_datetime(dates, _type) == result_dates
@pytest.mark.parametrize(
"dates, _type, result_dates",
[
(
(1538870400,),
CUDF_DATETIME_TYPES[0],
(np.datetime64("2018-10-07T00:00:00.000000000"),),
),
(
[datetime.datetime(2018, 10, 7, 0, 0)],
CUDF_DATETIME_TYPES[1],
[np.datetime64("2018-10-07T00:00:00.000000000")],
),
(
[np.datetime64("2018-10-07T00:00:00.000000000")],
CUDF_DATETIME_TYPES[1],
[np.datetime64("2018-10-07T00:00:00.000000000")],
),
],
)
def test_to_np_dt64_if_datetime(dates, _type, result_dates):
assert dt.to_np_dt64_if_datetime(dates, _type) == result_dates
@pytest.mark.parametrize(
"dates, _type, result_dates",
[
(
cudf.Series(np.array(["2018-10-07"], dtype="datetime64")),
"datetime64[ns]",
cudf.Series(1.5388704e18).astype("int64"),
),
(
[np.datetime64("2018-10-07T00:00:00")],
CUDF_DATETIME_TYPES[2],
([1.5388704e15]),
),
],
)
def test_to_int64_if_datetime(dates, _type, result_dates):
assert (dt.to_int64_if_datetime(dates, _type) == result_dates).all()
|
{"hexsha": "6ad04aefcde0c735364cf7c3993b677372c03965", "size": 3211, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/cuxfilter/tests/assets/test_datetime.py", "max_stars_repo_name": "kkraus14/cuxfilter", "max_stars_repo_head_hexsha": "99d7cf67802270d24db0051162df4feb798f2e15", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 201, "max_stars_repo_stars_event_min_datetime": "2018-12-21T18:32:40.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T11:50:29.000Z", "max_issues_repo_path": "python/cuxfilter/tests/assets/test_datetime.py", "max_issues_repo_name": "kkraus14/cuxfilter", "max_issues_repo_head_hexsha": "99d7cf67802270d24db0051162df4feb798f2e15", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 258, "max_issues_repo_issues_event_min_datetime": "2018-12-27T07:37:50.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T20:01:32.000Z", "max_forks_repo_path": "python/cuxfilter/tests/assets/test_datetime.py", "max_forks_repo_name": "kkraus14/cuxfilter", "max_forks_repo_head_hexsha": "99d7cf67802270d24db0051162df4feb798f2e15", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 51, "max_forks_repo_forks_event_min_datetime": "2019-01-10T19:03:09.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-08T01:37:11.000Z", "avg_line_length": 26.9831932773, "max_line_length": 78, "alphanum_fraction": 0.5546558704, "include": true, "reason": "import numpy", "num_tokens": 937}
|
include("euler/euler.jl")
using .Calculus: triangular_numbers
function compute(n::Int)::Int
limit = trunc(Int, √n)
triangles = triangular_numbers(limit)
min_difference = n
nearest = 0
for i ∈ 1:limit
for j ∈ 1:i
rectangles = triangles[i] * triangles[j]
difference = abs(n - rectangles)
if difference < min_difference
min_difference = difference
nearest = i * j
end
if rectangles > n break end
end
end
return nearest
end
|
{"hexsha": "9c78dbfb7bc552b4b9c27860eb8314273db70ed3", "size": 557, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "problems/0085/compute.jl", "max_stars_repo_name": "Dynortice/Project-Euler", "max_stars_repo_head_hexsha": "99a0201b5d5f147eab77fc52d9db8995045cded0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "problems/0085/compute.jl", "max_issues_repo_name": "Dynortice/Project-Euler", "max_issues_repo_head_hexsha": "99a0201b5d5f147eab77fc52d9db8995045cded0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "problems/0085/compute.jl", "max_forks_repo_name": "Dynortice/Project-Euler", "max_forks_repo_head_hexsha": "99a0201b5d5f147eab77fc52d9db8995045cded0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.3181818182, "max_line_length": 52, "alphanum_fraction": 0.5673249551, "num_tokens": 132}
|
## Analyzing lowered code
function add_docexpr!(docexprs::AbstractDict{Module,V}, mod::Module, ex) where V
docexs = get(docexprs, mod, nothing)
if docexs === nothing
docexs = docexprs[mod] = V()
end
push!(docexs, ex)
return docexprs
end
function lookup_callexpr(frame, stmt)
fargs = JuliaInterpreter.collect_args(frame, stmt)
return Expr(:call, fargs...)
end
function assign_this!(frame, value)
frame.framedata.ssavalues[frame.pc] = value
end
# This defines the API needed to store signatures using methods_by_execution!
# This default version is simple and only used for testing purposes.
# The "real" one is CodeTrackingMethodInfo in Revise.jl.
const MethodInfo = IdDict{Type,LineNumberNode}
add_signature!(methodinfo::MethodInfo, @nospecialize(sig), ln) = push!(methodinfo, sig=>ln)
push_expr!(methodinfo::MethodInfo, mod::Module, ex::Expr) = methodinfo
pop_expr!(methodinfo::MethodInfo) = methodinfo
add_dependencies!(methodinfo::MethodInfo, be::CodeEdges, src, isrequired) = methodinfo
add_includes!(methodinfo::MethodInfo, mod::Module, filename) = methodinfo
# This is not generally used, see `is_method_or_eval` instead
function hastrackedexpr(stmt; heads=LoweredCodeUtils.trackedheads)
haseval = false
if isa(stmt, Expr)
haseval = matches_eval(stmt)
if stmt.head === :call
f = stmt.args[1]
callee_matches(f, Core, :_typebody!) && return true, haseval
callee_matches(f, Core, :_setsuper!) && return true, haseval
f === :include && return true, haseval
elseif stmt.head === :thunk
any(s->any(hastrackedexpr(s; heads=heads)), (stmt.args[1]::Core.CodeInfo).code) && return true, haseval
elseif stmt.head ∈ heads
return true, haseval
end
end
return false, haseval
end
function matches_eval(stmt::Expr)
stmt.head === :call || return false
f = stmt.args[1]
return f === :eval ||
(callee_matches(f, Base, :getproperty) && is_quotenode_egal(stmt.args[end], :eval)) ||
(isa(f, GlobalRef) && f.name === :eval) || is_quotenode_egal(f, Core.eval)
end
function categorize_stmt(@nospecialize(stmt))
ismeth, haseval, isinclude, isnamespace, istoplevel = false, false, false, false, false
if isa(stmt, Expr)
haseval = matches_eval(stmt)
ismeth = stmt.head === :method
istoplevel = stmt.head === :toplevel
isnamespace = stmt.head === :export || stmt.head === :import || stmt.head === :using
isinclude = stmt.head === :call && stmt.args[1] === :include
end
return ismeth, haseval, isinclude, isnamespace, istoplevel
end
"""
isrequired, evalassign = minimal_evaluation!([predicate,] methodinfo, src::Core.CodeInfo, mode::Symbol)
Mark required statements in `src`: `isrequired[i]` is `true` if `src.code[i]` should be evaluated.
Statements are analyzed by `isreq, haseval = predicate(stmt)`, and `predicate` defaults
to `Revise.is_method_or_eval`.
`haseval` is true if the statement came from `@eval` or `eval(...)` call.
Since the contents of such expression are difficult to analyze, it is generally
safest to execute all such evals.
"""
function minimal_evaluation!(@nospecialize(predicate), methodinfo, src::Core.CodeInfo, mode::Symbol)
edges = CodeEdges(src)
# LoweredCodeUtils.print_with_code(stdout, src, edges)
isrequired = fill(false, length(src.code))
evalassign = false
for (i, stmt) in enumerate(src.code)
if !isrequired[i]
isrequired[i], haseval = predicate(stmt)
if haseval # line `i` may be the equivalent of `f = Core.eval`, so...
isrequired[edges.succs[i]] .= true # ...require each stmt that calls `eval` via `f(expr)`
isrequired[i] = true
end
end
if mode === :evalassign && isexpr(stmt, :(=))
evalassign = isrequired[i] = true
lhs = (stmt::Expr).args[1]
if isa(lhs, Symbol)
isrequired[edges.byname[lhs].succs] .= true # mark any `const` statements or other "uses" in this block
end
end
end
# Check for docstrings
if length(src.code) > 1 && mode !== :sigs
stmt = src.code[end-1]
if isexpr(stmt, :call) && (stmt::Expr).args[1] === Base.Docs.doc!
isrequired[end-1] = true
end
end
# All tracked expressions are marked. Now add their dependencies.
# LoweredCodeUtils.print_with_code(stdout, src, isrequired)
lines_required!(isrequired, src, edges;
norequire=mode===:sigs ? LoweredCodeUtils.exclude_named_typedefs(src, edges) : ())
# LoweredCodeUtils.print_with_code(stdout, src, isrequired)
add_dependencies!(methodinfo, edges, src, isrequired)
return isrequired, evalassign
end
minimal_evaluation!(@nospecialize(predicate), methodinfo, frame::JuliaInterpreter.Frame, mode::Symbol) =
minimal_evaluation!(predicate, methodinfo, frame.framecode.src, mode)
function minimal_evaluation!(methodinfo, frame, mode::Symbol)
minimal_evaluation!(methodinfo, frame, mode) do @nospecialize(stmt)
ismeth, haseval, isinclude, isnamespace, istoplevel = categorize_stmt(stmt)
isreq = ismeth | isinclude | istoplevel
return mode === :sigs ? (isreq, haseval) : (isreq | isnamespace, haseval)
end
end
function methods_by_execution(mod::Module, ex::Expr; kwargs...)
methodinfo = MethodInfo()
docexprs = DocExprs()
value, frame = methods_by_execution!(JuliaInterpreter.Compiled(), methodinfo, docexprs, mod, ex; kwargs...)
return methodinfo, docexprs, frame
end
"""
methods_by_execution!(recurse=JuliaInterpreter.Compiled(), methodinfo, docexprs, mod::Module, ex::Expr;
mode=:eval, disablebp=true, skip_include=mode!==:eval, always_rethrow=false)
Evaluate or analyze `ex` in the context of `mod`.
Depending on the setting of `mode` (see the Extended help), it supports full evaluation or just the minimal
evaluation needed to extract method signatures.
`recurse` controls JuliaInterpreter's evaluation of any non-intercepted statement;
likely choices are `JuliaInterpreter.Compiled()` or `JuliaInterpreter.finish_and_return!`.
`methodinfo` is a cache for storing information about any method definitions (see [`CodeTrackingMethodInfo`](@ref)).
`docexprs` is a cache for storing documentation expressions; obtain an empty one with `Revise.DocExprs()`.
# Extended help
The action depends on `mode`:
- `:eval` evaluates the expression in `mod`, similar to `Core.eval(mod, ex)` except that `methodinfo` and `docexprs`
will be populated with information about any signatures or docstrings. This mode is used to implement `includet`.
- `:sigs` analyzes `ex` and extracts signatures of methods and docstrings (specifically, statements flagged by
[`Revise.minimal_evaluation!`](@ref)), but does not evaluate `ex` in the traditional sense.
It will selectively execute statements needed to form the signatures of defined methods.
It will also expand any `@eval`ed expressions, since these might contain method definitions.
- `:evalmeth` analyzes `ex` and extracts signatures and docstrings like `:sigs`, but takes the additional step of
evaluating any `:method` statements.
- `:evalassign` acts similarly to `:evalmeth`, and also evaluates assignment statements.
When selectively evaluating an expression, Revise will incorporate required dependencies, even for
minimal-evaluation modes like `:sigs`. For example, the method definition
max_values(T::Union{map(X -> Type{X}, Base.BitIntegerSmall_types)...}) = 1 << (8*sizeof(T))
found in `base/abstractset.jl` requires that it create the anonymous function in order to compute the
signature.
The other keyword arguments are more straightforward:
- `disablebp` controls whether JuliaInterpreter's breakpoints are disabled before stepping through the code.
They are restored on exit.
- `skip_include` prevents execution of `include` statements, instead inserting them into `methodinfo`'s
cache. This defaults to `true` unless `mode` is `:eval`.
- `always_rethrow`, if true, causes an error to be thrown if evaluating `ex` triggered an error.
If false, the error is logged with `@error`. `InterruptException`s are always rethrown.
This is primarily useful for debugging.
"""
function methods_by_execution!(@nospecialize(recurse), methodinfo, docexprs, mod::Module, ex::Expr;
mode::Symbol=:eval, disablebp::Bool=true, always_rethrow::Bool=false, kwargs...)
mode ∈ (:sigs, :eval, :evalmeth, :evalassign) || error("unsupported mode ", mode)
lwr = Meta.lower(mod, ex)
isa(lwr, Expr) || return nothing, nothing
if lwr.head === :error || lwr.head === :incomplete
error("lowering returned an error, ", lwr)
end
if lwr.head !== :thunk
mode === :sigs && return nothing, nothing
return Core.eval(mod, lwr), nothing
end
frame = JuliaInterpreter.Frame(mod, lwr.args[1])
mode === :eval || LoweredCodeUtils.rename_framemethods!(recurse, frame)
# Determine whether we need interpreted mode
isrequired, evalassign = minimal_evaluation!(methodinfo, frame, mode)
# LoweredCodeUtils.print_with_code(stdout, frame.framecode.src, isrequired)
if !any(isrequired) && (mode===:eval || !evalassign)
# We can evaluate the entire expression in compiled mode
if mode===:eval
ret = try
Core.eval(mod, ex)
catch err
(always_rethrow || isa(err, InterruptException)) && rethrow(err)
loc = location_string(whereis(frame)...)
bt = trim_toplevel!(catch_backtrace())
throw(ReviseEvalException(loc, err, Any[(sf, 1) for sf in stacktrace(bt)]))
end
else
ret = nothing
end
else
# Use the interpreter
local active_bp_refs
if disablebp
# We have to turn off all active breakpoints, https://github.com/timholy/CodeTracking.jl/issues/27
bp_refs = JuliaInterpreter.BreakpointRef[]
for bp in JuliaInterpreter.breakpoints()
append!(bp_refs, bp.instances)
end
active_bp_refs = filter(bp->bp[].isactive, bp_refs)
foreach(disable, active_bp_refs)
end
ret = try
methods_by_execution!(recurse, methodinfo, docexprs, frame, isrequired; mode=mode, kwargs...)
catch err
(always_rethrow || isa(err, InterruptException)) && (disablebp && foreach(enable, active_bp_refs); rethrow(err))
loc = location_string(whereis(frame)...)
sfs = [] # crafted for interaction with Base.show_backtrace
frame = JuliaInterpreter.leaf(frame)
while frame !== nothing
push!(sfs, (Base.StackTraces.StackFrame(frame), 1))
frame = frame.caller
end
throw(ReviseEvalException(loc, err, sfs))
end
if disablebp
foreach(enable, active_bp_refs)
end
end
return ret, lwr
end
methods_by_execution!(methodinfo, docexprs, mod::Module, ex::Expr; kwargs...) =
methods_by_execution!(JuliaInterpreter.Compiled(), methodinfo, docexprs, mod, ex; kwargs...)
function methods_by_execution!(@nospecialize(recurse), methodinfo, docexprs, frame::Frame, isrequired::AbstractVector{Bool}; mode::Symbol=:eval, skip_include::Bool=true)
isok(lnn::LineTypes) = !iszero(lnn.line) || lnn.file !== :none # might fail either one, but accept anything
mod = moduleof(frame)
# Hoist this lookup for performance. Don't throw even when `mod` is a baremodule:
modinclude = isdefined(mod, :include) ? getfield(mod, :include) : nothing
signatures = [] # temporary for method signature storage
pc = frame.pc
while true
JuliaInterpreter.is_leaf(frame) || (@warn("not a leaf"); break)
stmt = pc_expr(frame, pc)
if !isrequired[pc] && mode !== :eval && !(mode === :evalassign && isexpr(stmt, :(=)))
pc = next_or_nothing!(frame)
pc === nothing && break
continue
end
if isa(stmt, Expr)
head = stmt.head
if head === :toplevel
local value
for ex in stmt.args
ex isa Expr || continue
value = methods_by_execution!(recurse, methodinfo, docexprs, mod, ex; mode=mode, disablebp=false, skip_include=skip_include)
end
isassign(frame, pc) && assign_this!(frame, value)
pc = next_or_nothing!(frame)
elseif head ∈ structheads
if mode !== :sigs
pc = step_expr!(recurse, frame, stmt, true) # This checks that they are unchanged
else
pc = next_or_nothing!(frame)
end
# elseif head === :thunk && isanonymous_typedef(stmt.args[1])
# # Anonymous functions should just be defined anew, since there does not seem to be a practical
# # way to find them within the already-defined module.
# # They may be needed to define later signatures.
# # Note that named inner methods don't require special treatment.
# pc = step_expr!(recurse, frame, stmt, true)
elseif head === :method
empty!(signatures)
ret = methoddef!(recurse, signatures, frame, stmt, pc; define=mode!==:sigs)
if ret === nothing
# This was just `function foo end` or similar.
# However, it might have been followed by a thunk that defined a
# method (issue #435), so we still need to check for additions.
if !isempty(signatures)
file, line = whereis(frame.framecode, pc)
lnn = LineNumberNode(Int(line), Symbol(file))
for sig in signatures
add_signature!(methodinfo, sig, lnn)
end
end
pc = ret
else
pc, pc3 = ret
# Get the line number from the body
stmt3 = pc_expr(frame, pc3)::Expr
lnn = nothing
if line_is_decl
sigcode = @lookup(frame, stmt3.args[2])::Core.SimpleVector
lnn = sigcode[end]
if !isa(lnn, LineNumberNode)
lnn = nothing
end
end
if lnn === nothing
bodycode = stmt3.args[end]
if !isa(bodycode, CodeInfo)
bodycode = @lookup(frame, bodycode)
end
if isa(bodycode, CodeInfo)
lnn = linetable(bodycode, 1)
if !isok(lnn)
lnn = nothing
if length(bodycode.code) > 1
# This may be a kwarg method. Mimic LoweredCodeUtils.bodymethod,
# except without having a method
stmt = bodycode.code[end-1]
if isa(stmt, Expr) && length(stmt.args) > 1
stmt = stmt::Expr
a = stmt.args[1]
nargs = length(stmt.args)
hasself = let stmt = stmt, slotnames::Vector{Symbol} = bodycode.slotnames
any(i->LoweredCodeUtils.is_self_call(stmt, slotnames, i), 2:nargs)
end
if isa(a, Core.SlotNumber)
a = bodycode.slotnames[a.id]
end
if hasself && (isa(a, Symbol) || isa(a, GlobalRef))
thismod, thisname = isa(a, Symbol) ? (mod, a) : (a.mod, a.name)
if isdefined(thismod, thisname)
f = getfield(thismod, thisname)
mths = methods(f)
if length(mths) == 1
mth = first(mths)
lnn = LineNumberNode(Int(mth.line), mth.file)
end
end
end
end
end
if lnn === nothing
# Just try to find *any* line number
for lnntmp in linetable(bodycode)
lnntmp = lnntmp::LineTypes
if isok(lnntmp)
lnn = lnntmp
break
end
end
end
end
elseif isexpr(bodycode, :lambda)
bodycode = bodycode::Expr
lnntmp = bodycode.args[end][1]::LineTypes
if isok(lnntmp)
lnn = lnntmp
end
end
end
if lnn === nothing
i = codelocs(frame, pc3)
while i > 0
lnntmp = linetable(frame, i)
if isok(lnntmp)
lnn = lnntmp
break
end
i -= 1
end
end
if lnn !== nothing && isok(lnn)
for sig in signatures
add_signature!(methodinfo, sig, lnn)
end
end
end
elseif head === :(=)
# If we're here, either isrequired[pc] is true, or the mode forces us to eval assignments
pc = step_expr!(recurse, frame, stmt, true)
elseif head === :call
f = @lookup(frame, stmt.args[1])
if f === Core.eval
# an @eval or eval block: this may contain method definitions, so intercept it.
evalmod = @lookup(frame, stmt.args[2])::Module
evalex = @lookup(frame, stmt.args[3])
value = nothing
for (newmod, newex) in ExprSplitter(evalmod, evalex)
if is_doc_expr(newex)
add_docexpr!(docexprs, newmod, newex)
newex = newex.args[4]
end
newex = unwrap(newex)
push_expr!(methodinfo, newmod, newex)
value = methods_by_execution!(recurse, methodinfo, docexprs, newmod, newex; mode=mode, skip_include=skip_include, disablebp=false)
pop_expr!(methodinfo)
end
assign_this!(frame, value)
pc = next_or_nothing!(frame)
elseif skip_include && (f === modinclude || f === Base.include || f === Core.include)
# include calls need to be managed carefully from several standpoints, including
# path management and parsing new expressions
add_includes!(methodinfo, mod, @lookup(frame, stmt.args[2]))
assign_this!(frame, nothing) # FIXME: the file might return something different from `nothing`
pc = next_or_nothing!(frame)
elseif f === Base.Docs.doc! # && mode !== :eval
fargs = JuliaInterpreter.collect_args(frame, stmt)
popfirst!(fargs)
length(fargs) == 3 && push!(fargs, Union{}) # add the default sig
dmod::Module, b::Base.Docs.Binding, str::Base.Docs.DocStr, sig = fargs
if isdefined(b.mod, b.var)
tmpvar = getfield(b.mod, b.var)
if isa(tmpvar, Module)
dmod = tmpvar
end
end
# Workaround for julia#38819 on older Julia versions
if !isdefined(dmod, Base.Docs.META)
Base.Docs.initmeta(dmod)
end
m = get!(Base.Docs.meta(dmod), b, Base.Docs.MultiDoc())::Base.Docs.MultiDoc
if haskey(m.docs, sig)
currentstr = m.docs[sig]::Base.Docs.DocStr
redefine = currentstr.text != str.text
else
push!(m.order, sig)
redefine = true
end
# (Re)assign without the warning
if redefine
m.docs[sig] = str
str.data[:binding] = b
str.data[:typesig] = sig
end
assign_this!(frame, Base.Docs.doc(b, sig))
pc = next_or_nothing!(frame)
else
# A :call Expr we don't want to intercept
pc = step_expr!(recurse, frame, stmt, true)
end
else
# An Expr we don't want to intercept
pc = step_expr!(recurse, frame, stmt, true)
end
else
# A statement we don't want to intercept
pc = step_expr!(recurse, frame, stmt, true)
end
pc === nothing && break
end
return isrequired[frame.pc] ? get_return(frame) : nothing
end
|
{"hexsha": "ab40c7a472e7cbd7941c340e45cd70bdf40874c8", "size": 22568, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/lowered.jl", "max_stars_repo_name": "t-bltg/Revise.jl", "max_stars_repo_head_hexsha": "165433a5e4fc5cdcee1d2ce20843ad476f1c5a83", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/lowered.jl", "max_issues_repo_name": "t-bltg/Revise.jl", "max_issues_repo_head_hexsha": "165433a5e4fc5cdcee1d2ce20843ad476f1c5a83", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/lowered.jl", "max_forks_repo_name": "t-bltg/Revise.jl", "max_forks_repo_head_hexsha": "165433a5e4fc5cdcee1d2ce20843ad476f1c5a83", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 49.6, "max_line_length": 169, "alphanum_fraction": 0.5463045019, "num_tokens": 4949}
|
import torch
import numpy as np
import sys,os
import torch.nn.functional as F
#from Common.Const import GPU
#GPU = 50
sys.path.append(os.path.join(os.getcwd(),"metrics"))
from CD_EMD.emd_ import emd_module
from CD_EMD.cd.chamferdist import ChamferDistance as CD
class PointcloudMixup(object):
def __init__(self):
self.EMD = emd_module.emdModule()
self.cd = CD()
def __call__(self, pc1, pc2,mixrates=0.5):
B, N, C = pc1.shape
mix_rate = torch.tensor(mixrates).cuda().float()
mix_rate = mix_rate.unsqueeze_(1).unsqueeze_(2)
mix_rate_expand_xyz = mix_rate.expand(pc1.shape)
_, ass = self.EMD(pc1, pc2, 0.005, 300)
ass = ass.long()
for i in range(B):
pc2[i] = pc2[i][ass[i]]
xyz = pc1 * (1 - mix_rate_expand_xyz) + pc2 * mix_rate_expand_xyz
class PointcloudToTensor(object):
def __call__(self, points):
return torch.from_numpy(points).float()
def angle_axis(angle: float, axis: np.ndarray):
r"""Returns a 4x4 rotation matrix that performs a rotation around axis by angle
Parameters
----------
angle : float
Angle to rotate by
axis: np.ndarray
Axis to rotate about
Returns
-------
torch.Tensor
3x3 rotation matrix
"""
u = axis / np.linalg.norm(axis)
cosval, sinval = np.cos(angle), np.sin(angle)
# yapf: disable
cross_prod_mat = np.array([[0.0, -u[2], u[1]],
[u[2], 0.0, -u[0]],
[-u[1], u[0], 0.0]])
R = torch.from_numpy(
cosval * np.eye(3)
+ sinval * cross_prod_mat
+ (1.0 - cosval) * np.outer(u, u)
)
# yapf: enable
return R.float()
class PointcloudRotatebyAngle(object):
def __init__(self, rotation_angle = 0.0):
self.rotation_angle = rotation_angle
def __call__(self, pc, rotation_angle):
self.rotation_angle = rotation_angle
normals = pc.size(2) > 3
bsize = pc.size()[0]
for i in range(bsize):
cosval = np.cos(self.rotation_angle)
sinval = np.sin(self.rotation_angle)
rotation_matrix = np.array([[cosval, 0, sinval],
[0, 1, 0],
[-sinval, 0, cosval]])
rotation_matrix = torch.from_numpy(rotation_matrix).float().cuda()
cur_pc = pc[i, :, :]
if not normals:
cur_pc = cur_pc @ rotation_matrix
else:
pc_xyz = cur_pc[:, 0:3]
pc_normals = cur_pc[:, 3:]
cur_pc[:, 0:3] = pc_xyz @ rotation_matrix
cur_pc[:, 3:] = pc_normals @ rotation_matrix
pc[i, :, :] = cur_pc
return pc
class PointcloudJitter_batch(object):
def __init__(self, std=0.01, clip=0.05):
self.std, self.clip = std, clip
def __call__(self, pc):
bsize = pc.size()[0]
for i in range(bsize):
jittered_data = pc.new(pc.size(1), 3).normal_(
mean=0.0, std=self.std
).clamp_(-self.clip, self.clip)
pc[i, :, 0:3] += jittered_data
return pc
class PointcloudJitter(object):
def __init__(self, std=0.01, clip=0.05):
self.std, self.clip = std, clip
def __call__(self, points):
jittered_data = (
points.new(points.size(0), 3)
.normal_(mean=0.0, std=self.std)
.clamp_(-self.clip, self.clip)
)
points[:, 0:3] += jittered_data
return points
class PointcloudScaleAndTranslate(object):
def __init__(self, scale_low=2. / 3., scale_high=3. / 2., translate_range=0.2):
self.scale_low = scale_low
self.scale_high = scale_high
self.translate_range = translate_range
def __call__(self, pc):
bsize = pc.size()[0]
for i in range(bsize):
xyz1 = np.random.uniform(low=self.scale_low, high=self.scale_high, size=[3])
xyz2 = np.random.uniform(low=-self.translate_range, high=self.translate_range, size=[3])
pc[i, :, 0:3] = torch.mul(pc[i, :, 0:3], torch.from_numpy(xyz1).float().cuda()) + torch.from_numpy(xyz2).float().cuda()
return pc
class PointcloudScale_batch(object):
def __init__(self, scale_low=2. / 3., scale_high=3. / 2.):
self.scale_low = scale_low
self.scale_high = scale_high
def __call__(self, pc):
bsize = pc.size()[0]
for i in range(bsize):
xyz1 = np.random.uniform(low=self.scale_low, high=self.scale_high, size=[3])
pc[i, :, 0:3] = torch.mul(pc[i, :, 0:3], torch.from_numpy(xyz1).float().cuda())
return pc
class PointcloudScale(object):
def __init__(self, lo=0.8, hi=1.25):
self.lo, self.hi = lo, hi
def __call__(self, points):
scaler = np.random.uniform(self.lo, self.hi)
points[:, 0:3] *= scaler
return points
class PointcloudTranslate_batch(object):
def __init__(self, translate_range=0.2):
self.translate_range = translate_range
def __call__(self, pc):
bsize = pc.size()[0]
for i in range(bsize):
xyz2 = np.random.uniform(low=-self.translate_range, high=self.translate_range, size=[3])
pc[i, :, 0:3] = pc[i, :, 0:3] + torch.from_numpy(xyz2).float().cuda()
return pc
class PointcloudTranslate(object):
def __init__(self, translate_range=0.1):
self.translate_range = translate_range
def __call__(self, points):
translation = np.random.uniform(-self.translate_range, self.translate_range)
points[:, 0:3] += translation
return points
class PointcloudRotate(object):
def __init__(self, axis=np.array([0.0, 1.0, 0.0])):
self.axis = axis
def __call__(self, points):
rotation_angle = np.random.uniform() * 2 * np.pi
rotation_matrix = angle_axis(rotation_angle, self.axis)
normals = points.size(1) > 3
if not normals:
return torch.matmul(points, rotation_matrix.t())
else:
pc_xyz = points[:, 0:3]
pc_normals = points[:, 3:]
points[:, 0:3] = torch.matmul(pc_xyz, rotation_matrix.t())
points[:, 3:] = torch.matmul(pc_normals, rotation_matrix.t())
return points
class PointcloudRotate_batch(object):
def __init__(self, axis=np.array([0.0, 1.0, 0.0])):
self.axis = axis
def __call__(self, points):
bsize = points.size()[0]
for i in range(bsize):
rotation_angle = np.random.uniform() * 2 * np.pi
rotation_matrix = angle_axis(rotation_angle, self.axis)
normals = points.size(1) > 3
if not normals:
pc_xyz = points[i, :, 0:3]
points[i, :, 0:3] = torch.matmul(pc_xyz, rotation_matrix.t())
else:
pc_xyz = points[:, 0:3]
pc_normals = points[:, 3:]
points[i,:, 0:3] = torch.matmul(pc_xyz, rotation_matrix.t())
points[i,:, 3:] = torch.matmul(pc_normals, rotation_matrix.t())
return points
class PointcloudRotatePerturbation(object):
def __init__(self, angle_sigma=0.06, angle_clip=0.18):
self.angle_sigma, self.angle_clip = angle_sigma, angle_clip
def _get_angles(self):
angles = np.clip(
self.angle_sigma * np.random.randn(3), -self.angle_clip, self.angle_clip
)
return angles
def __call__(self, points):
angles = self._get_angles()
Rx = angle_axis(angles[0], np.array([1.0, 0.0, 0.0]))
Ry = angle_axis(angles[1], np.array([0.0, 1.0, 0.0]))
Rz = angle_axis(angles[2], np.array([0.0, 0.0, 1.0]))
rotation_matrix = torch.matmul(torch.matmul(Rz, Ry), Rx)
normals = points.size(1) > 3
if not normals:
return torch.matmul(points, rotation_matrix.t())
else:
pc_xyz = points[:, 0:3]
pc_normals = points[:, 3:]
points[:, 0:3] = torch.matmul(pc_xyz, rotation_matrix.t())
points[:, 3:] = torch.matmul(pc_normals, rotation_matrix.t())
return points
class PointcloudRotatePerturbation_batch(object):
def __init__(self, angle_sigma=0.06, angle_clip=0.18):
self.angle_sigma, self.angle_clip = angle_sigma, angle_clip
def _get_angles(self):
angles = np.clip(
self.angle_sigma * np.random.randn(3), -self.angle_clip, self.angle_clip
)
return angles
def __call__(self, points):
bsize = points.size()[0]
for i in range(bsize):
angles = self._get_angles()
Rx = angle_axis(angles[0], np.array([1.0, 0.0, 0.0]))
Ry = angle_axis(angles[1], np.array([0.0, 1.0, 0.0]))
Rz = angle_axis(angles[2], np.array([0.0, 0.0, 1.0]))
rotation_matrix = torch.matmul(torch.matmul(Rz, Ry), Rx)
normals = points.size(1) > 3
if not normals:
pc_xyz = points[i, :, 0:3]
points[i, :, 0:3] = torch.matmul(pc_xyz, rotation_matrix.t())
else:
pc_xyz = points[i,:, 0:3]
pc_normals = points[i,:, 3:]
points[i,:, 0:3] = torch.matmul(pc_xyz, rotation_matrix.t())
points[i,:, 3:] = torch.matmul(pc_normals, rotation_matrix.t())
return points
class PointcloudRandomInputDropout_batch(object):
def __init__(self, max_dropout_ratio=0.875):
assert max_dropout_ratio >= 0 and max_dropout_ratio < 1
self.max_dropout_ratio = max_dropout_ratio
def __call__(self, pc):
bsize = pc.size()[0]
for i in range(bsize):
dropout_ratio = np.random.random() * self.max_dropout_ratio # 0~0.875
drop_idx = np.where(np.random.random((pc.size()[1])) <= dropout_ratio)[0]
if len(drop_idx) > 0:
cur_pc = pc[i, :, :]
cur_pc[drop_idx.tolist(), 0:3] = cur_pc[0, 0:3].repeat(len(drop_idx), 1) # set to the first point
pc[i, :, :] = cur_pc
return pc
class PointcloudRandomInputDropout(object):
def __init__(self, max_dropout_ratio=0.875):
assert max_dropout_ratio >= 0 and max_dropout_ratio < 1
self.max_dropout_ratio = max_dropout_ratio
def __call__(self, pc):
#pc = points.numpy()
dropout_ratio = np.random.random() * self.max_dropout_ratio # 0~0.875
drop_idx = np.where(np.random.random((pc.shape[0])) <= dropout_ratio)[0]
if len(drop_idx) > 0:
pc[drop_idx] = pc[0] # set to the first point
return pc#torch.from_numpy(pc).float()
|
{"hexsha": "0881a05b8d3abb54d9780d38aecd1a0c32f75d24", "size": 10874, "ext": "py", "lang": "Python", "max_stars_repo_path": "Common/data_utils.py", "max_stars_repo_name": "JiazeWang/SP-GAN", "max_stars_repo_head_hexsha": "455003f78b1160ebe0a2056005b069808c0df35b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 73, "max_stars_repo_stars_event_min_datetime": "2021-05-11T12:00:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T09:40:12.000Z", "max_issues_repo_path": "Common/data_utils.py", "max_issues_repo_name": "JiazeWang/SP-GAN", "max_issues_repo_head_hexsha": "455003f78b1160ebe0a2056005b069808c0df35b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2021-08-18T13:03:43.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T04:48:29.000Z", "max_forks_repo_path": "Common/data_utils.py", "max_forks_repo_name": "JiazeWang/SP-GAN", "max_forks_repo_head_hexsha": "455003f78b1160ebe0a2056005b069808c0df35b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 13, "max_forks_repo_forks_event_min_datetime": "2021-08-28T20:09:13.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-20T12:42:51.000Z", "avg_line_length": 33.4584615385, "max_line_length": 131, "alphanum_fraction": 0.5683281221, "include": true, "reason": "import numpy", "num_tokens": 2867}
|
\section{Homotopy groups of types}
\subsection{Pointed types}
\begin{defn}
\begin{enumerate}
\item A pointed type consists of a type $X$ equipped with a base point $x:X$. We will write $\UU_\ast$ for the type $\sm{X:\UU}X$ of all pointed types.
\item Let $(X,\ast_X)$ be a pointed type. A \define{pointed family} over $(X,\ast_X)$ consists of a type family $P:X\to \UU$ equipped with a base point $\ast_P:P(\ast_X)$.
\item Let $(P,\ast_P)$ be a pointed family over $(X,\ast_X)$. A \define{pointed section} of $(P,\ast_P)$ consists of a dependent function $f:\prd{x:X}P(x)$ and an identification $p:f(\ast_X)=\ast_P$. We define the \define{pointed $\Pi$-type} to be the type of pointed sections:
\begin{equation*}
\Pi^\ast_{(x:X)}P(x) \defeq \sm{f:\prd{x:X}P(x)}f(\ast_X)=\ast_P
\end{equation*}
In the case of two pointed types $X$ and $Y$, we may also view $Y$ as a pointed family over $X$. In this case we write $X\to_\ast Y$ for the type of pointed functions.
\item Given any two pointed sections $f$ and $g$ of a pointed family $P$ over $X$, we define the type of pointed homotopies
\begin{equation*}
f\htpy_\ast g \defeq \Pi^\ast_{(x:X)} f(x)=g(x),
\end{equation*}
where the family $x\mapsto f(x)=g(x)$ is equipped with the base point $\ct{p}{q^{-1}}$.
\end{enumerate}
\end{defn}
\begin{eg}
The circle $\sphere{1}$ is a pointed type with base point $\base:\sphere{1}$.
\end{eg}
\begin{eg}
If $X$ is a pointed type, then in the suspension of $X$ we have the canonical identification $\merid(\ast_X):\north=\south$. Therefore we do not have to worry about whether to choose $\north$ or $\south$ as the base point of $\susp{X}$.
\end{eg}
\begin{rmk}
Since pointed homotopies are defined as certain pointed sections, we can use the same definition of pointed homotopies again to consider pointed homotopies between pointed homotopies, and so on.
\end{rmk}
\begin{defn}
\begin{enumerate}
\item For any pointed type $X$, we define the \define{pointed identity function} $\mathsf{id}^\ast_X\defeq (\idfunc[X],\refl{\ast})$.
\item For any two pointed maps $f:X\to_\ast Y$ and $g:Y\to_\ast Z$, we define the \define{pointed composite}
\begin{equation*}
g\mathbin{\circ_\ast} f \defeq (g\circ f,\ct{\ap{g}{p_f}}{p_g}).
\end{equation*}
\end{enumerate}
\end{defn}
\subsection{Loop spaces}
\begin{defn}
Let $X$ be a pointed type with base point $x$. We define the \define{loop space} $\loopspace{X,x}$ of $X$ at $x$ to be the pointed type $x=x$ with base point $\refl{x}$.
\end{defn}
\begin{defn}
The loop space operation $\loopspacesym$ is \emph{functorial} in the sense that
\begin{enumerate}
\item For every pointed map $f:X\to_\ast Y$ there is a pointed map
\begin{equation*}
\loopspace{f}:\loopspace{X}\to_\ast \loopspace{Y},
\end{equation*}
defined by $\loopspace{f}(\omega)\defeq \ct{p_f}{\ap{f}{\omega}}{p_f^{-1}}$, which is base point preserving by $\mathsf{right\usc{}inv}(p_f)$.
\item For every pointed type $X$ there is a pointed homotopy
\begin{equation*}
\loopspace{\mathsf{id}_X^\ast}\htpy_\ast \mathsf{id}^\ast_{\loopspace{X}}.
\end{equation*}
\item For any two pointed maps $f:X\to_\ast Y$ and $g:Y\to_\ast X$, there is a pointed homotopy witnessing that the triangle
\begin{equation*}
\begin{tikzcd}
& \loopspace{Y} \arrow[dr,"\loopspace{g}"] \\
\loopspace{X} \arrow[rr,swap,"\loopspace{g\circ_\ast f}"] \arrow[ur,"\loopspace{f}"] & & \loopspace{Z}
\end{tikzcd}
\end{equation*}
of pointed types commutes.
\end{enumerate}
\end{defn}
\begin{thm}
Consider two pointed types $(X,x_0)$ and $(Y,y_0)$. Then there is an equivalence
\begin{equation*}
\eqv{(\susp X \to_\ast Y)}{(X \to_\ast \loopspace Y)}
\end{equation*}
\end{thm}
\begin{proof}
Computing with the universal property of the suspension
\begin{align*}
\susp X \to_\ast Y & \eqvsym \sm{y,y':Y} (X \to (y=y'))\times (y'=y_0) \\
& \eqvsym \sm{y:Y} X\to (y=y_0) \\
& \eqvsym \sm{f:X\to (y=y_0)}f(x_0)=\refl{y_0}.
\end{align*}
In the last equivalence we used \cref{ex:coh_intro}.
\end{proof}
\subsection{Homotopy groups}
In homotopy type theory we use $0$-types to define groups.
\begin{defn}
A \define{group} $\mathcal{G}$ consists of a set $G$ with a unit $e:G$, a multiplication $x,y\mapsto x\cdot y$, and an inverse operation $x\mapsto x^{-1}$ satisfying the \define{group laws}:
\begin{align*}
(x\cdot y)\cdot z & =x\cdot(y\cdot z) & x^{-1}\cdot x & = e \\
e\cdot x & = x & x\cdot x^{-1} & = e. \\
x\cdot e & =x
\end{align*}
\end{defn}
\begin{defn}
For $n\geq 1$, the \define{$n$-th homotopy group} of a type $X$ at a base point $x:X$ consists of the type
\begin{equation*}
|\pi_n(X,x)| \defeq \trunc{0}{\loopspace[n]{X,x}}
\end{equation*}
equipped with the group operations inherited from the path operations on $\loopspace[n]{X,x}$.
Often we will simply write $\pi_n(X)$ when it is clear from the context what the base point of $X$ is.
For $n\jdeq 0$ we define $\pi_0(X,x)\defeq \trunc{0}{X}$.
\end{defn}
\begin{eg}
In \cref{cor:circle_loopspace} we established that $\eqv{\loopspace{\sphere{1}}}{\Z}$. It follows that
\begin{equation*}
\pi_1(\sphere{1})=\Z \qquad\text{ and }\qquad\pi_n(\sphere{1})=0\qquad\text{for $n\geq 2$.}
\end{equation*}
Furthermore, we have seen in \cref{circle_conn} that $\trunc{0}{\sphere{1}}$ is contractible.
Therefore we also have $\pi_0(\sphere{1})=0$.
\end{eg}
\subsection{The Eckmann-Hilton argument}
Given a diagram of identifications
\begin{equation*}
\begin{tikzcd}[column sep=7em]
x \arrow[r,equals,bend left=60,"p",""{name=A,below}] \arrow[r,equals,""{name=B},""{name=E,below},"{p'}"{near end}] \arrow[r,equals,bend right=60,"{p''}"{below},""{name=F,above}] \arrow[from=A,to=B,phantom,"r\Downarrow"] \arrow[from=E,to=F,phantom,"{r'\Downarrow}"]
& y
\end{tikzcd}
\end{equation*}
in a type $A$, where $r:p=p'$ and $r':p'=p''$,
we obtain by concatenation an identification $\ct{r}{r'}:p=p''$. This operation on identifications of identifications is sometimes called the \define{vertical concatenation}, because there is also a \emph{horizontal} concatenation operation.
\begin{defn}
Consider identifications of identifications $r:p=p'$ and $s:q=q'$, where $p,p':x=y$, and $q,q':y=z$ are identifications in a type $A$, as indicated in the diagram
\begin{equation*}
\begin{tikzcd}[column sep=huge]
x \arrow[r,equals,bend left=30,"p",""{name=A,below}] \arrow[r,equals,bend right=30,""{name=B,above},"{p'}"{below}] \arrow[from=A,to=B,phantom,"r\Downarrow"] & y \arrow[r,equals,bend left=30,"q",""{name=C,below}] \arrow[r,equals,bend right=30,""{name=D,above},"{q'}"{below}] \arrow[from=C,to=D,phantom,"s\Downarrow"] & z.
\end{tikzcd}
\end{equation*}
We define the \define{horizontal concatenation} $\ct[h]{r}{s}:\ct{p}{q}=\ct{p'}{q'}$ of $r$ and $s$.
\end{defn}
\begin{proof}
First we induct on $r$, so it suffices to define $\ct[h]{\refl{p}}{s}:\ct{p}{q}=\ct{p}{q'}$.
Next, we induct on $p$, so it suffices to define $\ct[h]{\refl{\refl{y}}}{s}:\ct{\refl{y}}{q}=\ct{\refl{y}}{q'}$.
Since $\ct{\refl{y}}{q}\jdeq q$ and $\ct{\refl{y}}{q'}\jdeq q'$, we take $\ct[h]{\refl{\refl{y}}}{s}\defeq s$.
\end{proof}
\begin{lem}
Horizontal concatenation satisfies the left and right unit laws.
\end{lem}
In the following lemma we establish the \define{interchange law} for horizontal and vertical concatenation.
\begin{lem}
Consider a diagram of the form
\begin{equation*}
\begin{tikzcd}[column sep=7em]
x \arrow[r,equals,bend left=60,"p",""{name=A,below}] \arrow[r,equals,""{name=B},""{name=E,below}] \arrow[r,equals,bend right=60,"{p''}"{below},""{name=F,above}] \arrow[from=A,to=B,phantom,"r\Downarrow"] \arrow[from=E,to=F,phantom,"{r'\Downarrow}"]
& y \arrow[r,equals,bend left=60,"q",""{name=C,below}] \arrow[r,equals,""{name=G,above},""{name=H,below}] \arrow[r,equals,bend right=60,""{name=D,above},"{q''}"{below}] \arrow[from=C,to=G,phantom,"s\Downarrow"] \arrow[from=H,to=D,phantom,"{s'\Downarrow}"] & z.
\end{tikzcd}
\end{equation*}
Then there is an identification
\begin{equation*}
\ct[h]{(\ct{r}{r'})}{(\ct{s}{s'})}=\ct{(\ct[h]{r}{s})}{(\ct[h]{r'}{s'})}.
\end{equation*}
\end{lem}
\begin{proof}
We use path induction on both $r$ and $r'$, followed by path induction on $p$. Then it suffices to show that
\begin{equation*}
\ct[h]{(\ct{\refl{\refl{y}}}{\refl{\refl{y}}})}{(\ct{s}{s'})}=\ct{(\ct[h]{\refl{\refl{y}}}{s})}{(\ct[h]{\refl{\refl{y}}}{s'})}.
\end{equation*}
Using the computation rules, we see that this reduces to
\begin{equation*}
\ct{s}{s'}=\ct{s}{s'},
\end{equation*}
which we have by reflexivity.
\end{proof}
\begin{thm}
For $n\geq 2$, the $n$-th homotopy group is abelian.
\end{thm}
\begin{proof}
Our goal is to show that
\begin{equation*}
\prd{r,s:\pi_2(X)} r\cdot s=s\cdot r.
\end{equation*}
Since we are constructing an identification in a set, we can use the universal property of $0$-truncation on both $r$ and $s$. Therefore it suffices to show that
\begin{equation*}
\prd{r,s:\refl{x_0}=\refl{x_0}} \tproj{0}r\cdot \tproj{0}s=\tproj{0}s\cdot \tproj{0}r.
\end{equation*}
Now we use that $\tproj{0}{r}\cdot\tproj{0}{s}\jdeq \tproj{0}{\ct{r}{s}}$ and $\tproj{0}{s}\cdot\tproj{0}{r}\jdeq \tproj{0}{\ct{s}{r}}$, to see that it suffices to show that $\ct{r}{s}=\ct{s}{r}$, for every $r,s:\refl{x}=\refl{x}$. Using the unit laws and the interchange law, this is a simple computation:
\begin{align*}
\ct{r}{s} & = \ct{(\ct[h]{r}{\refl{x}})}{(\ct[h]{\refl{x}}{s})} \\
& = \ct[h]{(\ct{r}{\refl{x}})}{(\ct{\refl{x}}{s})} \\
& = \ct[h]{(\ct{\refl{x}}{r})}{(\ct{s}{\refl{x}})} \\
& = \ct{(\ct[h]{\refl{x}}{s})}{(\ct[h]{r}{\refl{x}})} \\
& = \ct{s}{r}.\qedhere
\end{align*}
\end{proof}
\subsection{Simply connectedness of the $2$-sphere}
\begin{defn}
A pointed type $X$ is said to be \define{$n$-connected} if its homotopy groups $\pi_i(X)$ are trivial for $i\leq n$. A $0$-connected type is also just called \define{connected}, and a $1$-connected type is also called \define{simply connected}.
\end{defn}
We write $\ast$ for the base point of the sphere $\sphere{n}$.
\begin{thm}
For any $n:\N$ and any family $P$ of $n$-types over the $(n+2)$-sphere $\sphere{n+2}$, the function
\begin{equation*}
\Big(\prd{x:\sphere{n+2}}P(x)\Big)\to P(\ast)
\end{equation*}
given by $f\mapsto f(\ast)$, is an equivalence.
\end{thm}
\begin{cor}
The $2$-sphere is simply connected.
\end{cor}
\begin{proof}
Our goal is to show that $\pi_1(\sphere{2})$ is contractible. In other words, we have to show that $\trunc{0}{\loopspace{\sphere{2}}}$ is contractible. We do this by constructing a term of type
\begin{equation*}
\prd{t:\sphere{2}}\iscontr(\trunc{0}{\ast=t}).
\end{equation*}
First we note that
\begin{equation*}
\prd{t:\sphere{2}}\trunc{0}{\ast=t}
\end{equation*}
is equivalent to the type $\trunc{0}{\ast=\ast}$, of which we have the term $\tproj{0}{\refl{\ast}}$. Thus we obtain a dependent function $\alpha:\prd{t:\sphere{2}}\trunc{0}{\ast=t}$ equipped with $\alpha(\ast)=\tproj{0}{\refl{\ast}}$. Now we proceed to show that
\begin{equation*}
\prd{t:\sphere{2}}{p:\trunc{0}{\ast=t}} \alpha(t)=p
\end{equation*}
by the dependent universal property of $0$-truncation. Therefore it suffices to construct a term of type
\begin{equation*}
\prd{t:\sphere{2}}{p:\ast=t}\alpha(t)=\tproj{0}{p}.
\end{equation*}
This is immediate by path induction and the fact that $\alpha(\ast)=\tproj{0}{\refl{\ast}}$.
\end{proof}
\begin{exercises}
\item Show that the type of pointed families over a pointed type $(X,x)$ is equivalent to the type
\begin{equation*}
\sm{Y:\UU_\ast} Y\to_\ast X.
\end{equation*}
\item Given two pointed types $A$ and $X$, we say that $A$ is a (pointed) retract of $X$ if we have $i:A\to_\ast X$, a retraction $r:X\to_\ast A$, and a pointed homotopy $H:r\circ_\ast i\htpy_\ast \idfunc^\ast$.
\begin{subexenum}
\item Show that if $A$ is a pointed retract of $X$, then $\loopspace{A}$ is a pointed retract of $\loopspace{X}$.
\item Show that if $A$ is a pointed retract of $X$ and $\pi_n(X)$ is a trivial group, then $\pi_n(A)$ is a trivial group.
\end{subexenum}
\item Construct by path induction a family of maps
\begin{equation*}
\prd{A,B:\UU}{a:A}{b:B} (\id{\pairr{A,a}}{\pairr{B,b}})\to \sm{e:\eqv{A}{B}}e(a)=b,
\end{equation*}
and show that this map is an equivalence. In other words, an \emph{identification of pointed types} is a base point preserving equivalence.
\item Let $\pairr{A,a}$ and $\pairr{B,b}$ be two pointed types. Construct by path induction a family of maps
\begin{equation*}
\prd{f,g:A\to B}{p:f(a)=b}{q:g(a)=b} (\id{\pairr{f,p}}{\pairr{g,q}})\to \sm{H:f\htpy g} p = \ct{H(a)}{q},
\end{equation*}
and show that this map is an equivalence. In other words, an \emph{identification of pointed maps} is a base point preserving homotopy.
\item Show that if $A\leftarrow S\rightarrow B$ is a span of pointed types, then for any pointed type $X$ the square
\begin{equation*}
\begin{tikzcd}
(A\sqcup^S B \to_\ast X) \arrow[r] \arrow[d] & (B \to_\ast X) \arrow[d] \\
(A\to_\ast X) \arrow[r] & (S\to_\ast X)
\end{tikzcd}
\end{equation*}
is a pullback square.
\item \label{ex:yoneda_ptd_types}Let $f:A\to_\ast B$ be a pointed map. Show that the following are equivalent:
\begin{enumerate}
\item $f$ is an equivalence.
\item For any pointed type $X$, the precomposition map
\begin{equation*}
\blank\mathbin{\circ_\ast}f:(B\to_\ast X)\to_\ast (A\to_\ast X)
\end{equation*}
is an equivalence.
\end{enumerate}
\item In this exercise we prove the suspension-loopspace adjunction.
\begin{subexenum}
\item Construct a pointed equivalence
\begin{equation*}
\tau_{X,Y}:(\susp(X)\to_\ast Y) \eqvsym_\ast (X\to \loopspace{Y})
\end{equation*}
for any two pointed spaces $X$ and $Y$.
\item Show that for any $f:X\to_\ast X'$ and $g:Y'\to_\ast Y$, there is a pointed homotopy witnessing that the square
\begin{equation*}
\begin{tikzcd}[column sep=large]
(\susp(X')\to_\ast Y') \arrow[r,"\tau_{X',Y'}"] \arrow[d,swap,"h\mapsto g\circ h\circ \susp(f)"] & (X'\to_\ast \loopspace{Y'}) \arrow[d,"h\mapsto\loopspace{g}\circ h\circ f"] \\
(\susp(X)\to_\ast Y) \arrow[r,swap,"\tau_{X,Y}"] & (X\to_\ast \loopspace{Y})
\end{tikzcd}
\end{equation*}
\end{subexenum}
\item Show that if
\begin{equation*}
\begin{tikzcd}
C \arrow[r] \arrow[d] & B \arrow[d] \\
A \arrow[r] & X
\end{tikzcd}
\end{equation*}
is a pullback square of pointed types, then so is
\begin{equation*}
\begin{tikzcd}
\loopspace{C} \arrow[r] \arrow[d] & \loopspace{B} \arrow[d] \\
\loopspace{A} \arrow[r] & \loopspace{X}.
\end{tikzcd}
\end{equation*}
\item
\begin{subexenum}
\item Show that if $X$ is $k$-truncated, then its $n$-th homotopy group $\pi_n(X)$ is trivial for each choice of base point, and each $n> k$.
\item Show that if $X$ is $(k+l)$-truncated, and for each $0< i\leq l$ the $(k+i)$-th homotopy groups $\pi_{k+i}(X)$ are trivial for each choice of base point, then $X$ is $k$-truncated.
\end{subexenum}
It is consistent to assume that there are types for which all homotopy groups are trivial, but which aren't contractible nonetheless. Such types are called \define{$\infty$-connected}.
\end{exercises}
|
{"hexsha": "767a6eb8dd957cb101440837648c36096dd308eb", "size": 14934, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Book/homotopy_groups.tex", "max_stars_repo_name": "tadejpetric/HoTT-Intro", "max_stars_repo_head_hexsha": "f4228d6ecfc6cdb119c6e8b0e711fea05b98b2d5", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Book/homotopy_groups.tex", "max_issues_repo_name": "tadejpetric/HoTT-Intro", "max_issues_repo_head_hexsha": "f4228d6ecfc6cdb119c6e8b0e711fea05b98b2d5", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Book/homotopy_groups.tex", "max_forks_repo_name": "tadejpetric/HoTT-Intro", "max_forks_repo_head_hexsha": "f4228d6ecfc6cdb119c6e8b0e711fea05b98b2d5", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.2594936709, "max_line_length": 320, "alphanum_fraction": 0.6759742869, "num_tokens": 5447}
|
# See https://github.com/TuringLang/Turing.jl/issues/1199
ChainRulesCore.@non_differentiable push!!(
vi::VarInfo, vn::VarName, r, dist::Distribution, gidset::Set{Selector}
)
ChainRulesCore.@non_differentiable updategid!(
vi::AbstractVarInfo, vn::VarName, spl::Sampler
)
# https://github.com/TuringLang/Turing.jl/issues/1595
ZygoteRules.@adjoint function dot_observe(
spl::Union{SampleFromPrior,SampleFromUniform},
dists::AbstractArray{<:Distribution},
value::AbstractArray,
vi,
)
function dot_observe_fallback(spl, dists, value, vi)
increment_num_produce!(vi)
return sum(map(Distributions.loglikelihood, dists, value)), vi
end
return ZygoteRules.pullback(__context__, dot_observe_fallback, spl, dists, value, vi)
end
|
{"hexsha": "edcac7874a44e6c7845a82ec253519bf7c53f899", "size": 771, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/compat/ad.jl", "max_stars_repo_name": "TuringLang/MicroPPL", "max_stars_repo_head_hexsha": "d222316a7a2fd5afe6ec74a7ec2a50c6f08c1d00", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/compat/ad.jl", "max_issues_repo_name": "TuringLang/MicroPPL", "max_issues_repo_head_hexsha": "d222316a7a2fd5afe6ec74a7ec2a50c6f08c1d00", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/compat/ad.jl", "max_forks_repo_name": "TuringLang/MicroPPL", "max_forks_repo_head_hexsha": "d222316a7a2fd5afe6ec74a7ec2a50c6f08c1d00", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.5217391304, "max_line_length": 89, "alphanum_fraction": 0.7418936446, "num_tokens": 215}
|
// ==========================================================================
// Munin Text Document.
//
// Copyright (C) 2010 Matthew Chaplain, All Rights Reserved.
//
// Permission to reproduce, distribute, perform, display, and to prepare
// derivitive works from this file under the following conditions:
//
// 1. Any copy, reproduction or derivitive work of any part of this file
// contains this copyright notice and licence in its entirety.
//
// 2. The rights granted to you under this license automatically terminate
// should you attempt to assert any patent claims against the licensor
// or contributors, which in any way restrict the ability of any party
// from using this software or portions thereof in any form under the
// terms of this license.
//
// Disclaimer: THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
// KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
// WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
// PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
// OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
// OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
// ==========================================================================
#ifndef MUNIN_TEXT_DOCUMENT_HPP_
#define MUNIN_TEXT_DOCUMENT_HPP_
#include "munin/export.hpp"
#include "munin/rectangle.hpp"
#include "odin/core.hpp"
#include <terminalpp/extent.hpp>
#include <terminalpp/point.hpp>
#include <boost/signal.hpp>
namespace terminalpp {
class string;
}
namespace munin { namespace text {
//* =========================================================================
/// \brief Provides a document model for a text component.
//* =========================================================================
class MUNIN_EXPORT document
{
public :
//* =====================================================================
/// \brief Destructor.
//* =====================================================================
virtual ~document();
//* =====================================================================
/// \brief Sets the document's size.
//* =====================================================================
void set_size(terminalpp::extent size);
//* =====================================================================
/// \brief Gets the document's size.
//* =====================================================================
terminalpp::extent get_size() const;
//* =====================================================================
/// \brief Sets the caret's location from an x,y position.
//* =====================================================================
void set_caret_position(terminalpp::point const& pt);
//* =====================================================================
/// \brief Returns the caret's location as an x,y position.
//* =====================================================================
terminalpp::point get_caret_position() const;
//* =====================================================================
/// \brief Sets the caret's location from an index into the text.
//* =====================================================================
void set_caret_index(odin::u32 index);
//* =====================================================================
/// \brief Returns the caret's location as an index into the text.
//* =====================================================================
odin::u32 get_caret_index() const;
//* =====================================================================
/// \brief Returns the size of the text.
//* =====================================================================
odin::u32 get_text_size() const;
//* =====================================================================
/// \brief Inserts the given characters at the caret or, optionally,
/// a specified index.
//* =====================================================================
void insert_text(
terminalpp::string const &text
, boost::optional<odin::u32> index = boost::optional<odin::u32>());
//* =====================================================================
/// \brief Delete the specified region of text.
/// \param range An open-close range to delete from. For example,
/// deleting the range [0..5) will delete the characters in positions
/// 0, 1, 2, 3 and 4. Deleting the range [0..size) will delete the
/// entire contents of the document.
//* =====================================================================
void delete_text(std::pair<odin::u32, odin::u32> range);
//* =====================================================================
/// \brief Replaces the entire text content with the specified text.
//* =====================================================================
void set_text(terminalpp::string const &text);
//* =====================================================================
/// \brief Returns the number of lines in the text.
//* =====================================================================
odin::u32 get_number_of_lines() const;
//* =====================================================================
/// \brief Returns the specified line of text in the document.
//* =====================================================================
terminalpp::string get_line(odin::u32 index) const;
//* =====================================================================
/// \fn on_redraw
/// \param regions The regions of the document that require redrawing.
/// \brief Connect to this signal in order to receive notifications about
/// when the component should be redrawn.
//* =====================================================================
boost::signal
<
void (std::vector<munin::rectangle> const ®ions)
> on_redraw;
//* =====================================================================
/// \fn on_caret_position_changed
/// \brief Connect to this signal in order to receive notifications about
/// when the caret has changed position.
//* =====================================================================
boost::signal
<
void ()
> on_caret_position_changed;
protected :
//* =====================================================================
/// \brief Called by set_size(). Derived classes must override this
/// function in order to set the size of the document in a custom
/// manner.
//* =====================================================================
virtual void do_set_size(terminalpp::extent size) = 0;
//* =====================================================================
/// \brief Called by get_size(). Derived classes must override this
/// function in order to retrieve the size of the document in a
/// custom manner.
//* =====================================================================
virtual terminalpp::extent do_get_size() const = 0;
//* =====================================================================
/// \brief Called by set_caret_position(). Derived classes must
/// override this function in order to set the caret's position in a
/// custom manner.
//* =====================================================================
virtual void do_set_caret_position(terminalpp::point const& pt) = 0;
//* =====================================================================
/// \brief Called by get_caret_position(). Derived classes must
/// override this function in order to retrieve the caret's position in a
/// custom manner.
//* =====================================================================
virtual terminalpp::point do_get_caret_position() const = 0;
//* =====================================================================
/// \brief Called by set_caret_index(). Derived classes must
/// override this function in order to set the caret's index in a custom
/// manner.
//* =====================================================================
virtual void do_set_caret_index(odin::u32 index) = 0;
//* =====================================================================
/// \brief Called by get_caret_index(). Derived classes must override
/// this function in order to retrieve the caret's position in a custom
/// manner.
//* =====================================================================
virtual odin::u32 do_get_caret_index() const = 0;
//* =====================================================================
/// \brief Called by get_text_size(). Derived classes must override
/// this function in order to get the size of the text in a custom
/// manner.
//* =====================================================================
virtual odin::u32 do_get_text_size() const = 0;
//* =====================================================================
/// \brief Called by insert_text(). Derived classes must override this
/// function in order to insert text into the document in a custom
/// manner.
//* =====================================================================
virtual void do_insert_text(
terminalpp::string const& text
, boost::optional<odin::u32> index) = 0;
//* =====================================================================
/// \brief Called by delete_text(). Derived classes must override this
/// function in order to delete text in a custom manner.
//* =====================================================================
virtual void do_delete_text(std::pair<odin::u32, odin::u32> range) = 0;
//* =====================================================================
/// \brief Called by set_text(). Derived classes must override this
/// function in order to set text in a custom manner.
//* =====================================================================
virtual void do_set_text(terminalpp::string const &text) = 0;
//* =====================================================================
/// \brief Called by get_number_of_lines(). Derived classes must
/// override this function in order to get the number of lines in the
/// document in a custom manner.
//* =====================================================================
virtual odin::u32 do_get_number_of_lines() const = 0;
//* =====================================================================
/// \brief Called by get_line(). Derived classes must override this
/// function in order to return the text line in a custom manner.
//* =====================================================================
virtual terminalpp::string do_get_line(odin::u32 index) const = 0;
};
}}
#endif
|
{"hexsha": "75554b3a76e2fd91e642484af6caa3a752b68bdd", "size": 11488, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "munin/include/munin/text/document.hpp", "max_stars_repo_name": "KazDragon/paradice9", "max_stars_repo_head_hexsha": "bb89ce8bff2f99d2526f45b064bfdd3412feb992", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9.0, "max_stars_repo_stars_event_min_datetime": "2015-12-16T07:00:39.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-05T13:29:28.000Z", "max_issues_repo_path": "munin/include/munin/text/document.hpp", "max_issues_repo_name": "KazDragon/paradice9", "max_issues_repo_head_hexsha": "bb89ce8bff2f99d2526f45b064bfdd3412feb992", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 43.0, "max_issues_repo_issues_event_min_datetime": "2015-07-18T11:13:15.000Z", "max_issues_repo_issues_event_max_datetime": "2017-07-15T13:18:43.000Z", "max_forks_repo_path": "munin/include/munin/text/document.hpp", "max_forks_repo_name": "KazDragon/paradice9", "max_forks_repo_head_hexsha": "bb89ce8bff2f99d2526f45b064bfdd3412feb992", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3.0, "max_forks_repo_forks_event_min_datetime": "2015-10-09T13:33:35.000Z", "max_forks_repo_forks_event_max_datetime": "2016-07-11T02:23:08.000Z", "avg_line_length": 49.947826087, "max_line_length": 78, "alphanum_fraction": 0.4045961003, "num_tokens": 1891}
|
# standard library
import argparse
import os
import glob
# third-party libraries
from scipy.io.wavfile import write
import numpy as np
# project libraries
from speech.utils.wave import array_from_wave, wav_duration
from speech.utils import convert
def main(audio_dir:str, use_extend:bool, use_resample:bool) -> None:
"""
processes the background audio files mainly by duplicating audio files
that are less than 60 seconds in length
"""
if use_extend:
target_duration = 60 # seconds
extend_audio(audio_dir, target_duration)
if use_resample:
target_samp_rate = 16000
resample(audio_dir, target_samp_rate)
def extend_audio(audio_dir:str, target_duration:int) -> None:
"""
stacks the audio files in audio_dur on themselves until they are each equal in
length to the target_duration (in seconds)
Arguments:
audio_dir (str): directory of audio files
target_duration (int): length in seconds the audio filles will be extended to
"""
assert os.path.exists(audio_dir) == True, "audio directory does not exist"
pattern = os.path.join(audio_dir, "*.wav")
audio_files = glob.glob(pattern)
for audio_fn in audio_files:
audio_duration = wav_duration(audio_fn)
if audio_duration < target_duration:
data, samp_rate = array_from_wave(audio_fn)
# whole_dup as in whole_duplicate
whole_dup, frac_dup = divmod(target_duration, audio_duration)
output_data = data
#loop over whole_duplicates minus one because concatenating onto original
for i in range(int(whole_dup)-1):
output_data = np.concatenate((output_data, data), axis=0)
# adding on the fractional section
fraction_index = int(frac_dup*samp_rate)
output_data = np.concatenate((output_data, data[:fraction_index]))
file_name = os.path.basename(audio_fn)
extended_name = file_name[:-4]+ "_extended.wav"
extended_dir = os.path.join(os.path.dirname(audio_fn), "extended")
if not os.path.exists(extended_dir):
os.mkdir(extended_dir)
ext_audio_path = os.path.join(extended_dir, extended_name)
write(ext_audio_path, samp_rate, output_data)
def resample(audio_dir:str, target_samp_rate:int)->None:
"""
resamples all of the audio files in audio_dir to the target sample rate
Arguments
audio_dir (str): the audio directory whose files will be resampled
target_samp_rate(int): the sample rate the files will be resampled to
"""
assert os.path.exists(audio_dir) == True, "audio directory does not exist"
out_dir = os.path.join(audio_dir, "resampled")
if not os.path.exists(out_dir):
os.mkdir(out_dir)
extensions = ["*.wav", "*.mp3", "*.aiff", "*.flac"]
audio_files = list()
for ext in extensions:
pattern = os.path.join(audio_dir, ext)
audio_files.extend(glob.glob(pattern))
for audio_fn in audio_files:
filename = os.path.splitext(os.path.basename(audio_fn))[0]
wav_file = filename + os.path.extsep + "wav"
out_path = os.path.join(out_dir, wav_file)
convert.to_wave(audio_fn, out_path)
# sox_params = "sox \"{}\" -r {} -c 1 -b 16 {}".format(audio_fn, target_samp_rate, out_path)
# os.system(sox_params)
def resample_with_sox(path, sample_rate):
"""
resample the recording with sox
"""
sox_params = "sox \"{}\" -r {} -c 1 -b 16 -e si {} trim {} ={} >/dev/null 2>&1".format(path, sample_rate,
tar_filename, start_time,
end_time)
os.system(sox_params)
noise_data, samp_rate = array_from_wave(tar_filename)
return noise_data
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--audio-dir", help="Directory that contains the background audio files.")
parser.add_argument("--extend", action='store_true', default=False,
help="Boolean flag that if present will call the extend_audio method ")
parser.add_argument("--resample", action='store_true', default=False,
help="Boolean flag that if present will call the resample method ")
args = parser.parse_args()
main(args.audio_dir, args.extend, args.resample)
|
{"hexsha": "c785bcbbd5e36ab415e4a21dacd84f15781a2a88", "size": 4517, "ext": "py", "lang": "Python", "max_stars_repo_path": "speech/utils/process_noise.py", "max_stars_repo_name": "dzubke/speech-lite", "max_stars_repo_head_hexsha": "65f83ac2b7551650820f079ce5152741f2a6fdb8", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "speech/utils/process_noise.py", "max_issues_repo_name": "dzubke/speech-lite", "max_issues_repo_head_hexsha": "65f83ac2b7551650820f079ce5152741f2a6fdb8", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "speech/utils/process_noise.py", "max_forks_repo_name": "dzubke/speech-lite", "max_forks_repo_head_hexsha": "65f83ac2b7551650820f079ce5152741f2a6fdb8", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.2782608696, "max_line_length": 117, "alphanum_fraction": 0.6446756697, "include": true, "reason": "import numpy,from scipy", "num_tokens": 994}
|
import _init_paths
from fast_rcnn.train import get_training_roidb, train_net
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from datasets.factory import get_imdb
import datasets.imdb
import caffe
import argparse
import pprint
import numpy as np
import sys
import zl_config as C
from fast_rcnn.test import im_detect
import matplotlib.pyplot as plt
from fast_rcnn.nms_wrapper import nms
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
CLASSES = ('__background__',
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
def combined_roidb(imdb_names):
def get_roidb(imdb_name):
imdb = get_imdb(imdb_name)
print 'Loaded dataset `{:s}` for training'.format(imdb.name)
imdb.set_proposal_method(cfg.TRAIN.PROPOSAL_METHOD)
print 'Set proposal method: {:s}'.format(cfg.TRAIN.PROPOSAL_METHOD)
roidb = get_training_roidb(imdb)
return roidb
roidbs = [get_roidb(s) for s in imdb_names.split('+')]
roidb = roidbs[0]
if len(roidbs) > 1:
for r in roidbs[1:]:
roidb.extend(r)
imdb = datasets.imdb.imdb(imdb_names)
else:
imdb = get_imdb(imdb_names)
return imdb, roidb
cfg_from_file('experiments/cfgs/rfcn_end2end.yml')
imdb, roidb = combined_roidb('voc_0712_test')
import cv2
ann = roidb[9]
im = cv2.imread(ann['image'])
idx = 0
for bb in ann['boxes']:
cv2.rectangle(im,(bb[0],bb[1]),(bb[2],bb[3]),(0,255,0),1)
cv2.imwrite('/home/zawlin/data/all.jpg',im)
cv2.imshow('im2',im)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
net =None
cfg.TEST.HAS_RPN=False
prototxt = 'models/pascal_voc/ResNet-50/rfcn_end2end/test_iccv_rpn.prototxt'
#model = 'data/rfcn_models/resnet50_rfcn_iter_600.caffemodel'
model = 'output/rfcn_end2end/voc_0712_train/resnet50_rfcn_iter_600.caffemodel'
caffe.set_mode_gpu()
caffe.set_device(0)
net = caffe.Net(prototxt, model, caffe.TEST)
print ann['image']
#im = cv2.imread('data/demo/004545.jpg')
im = cv2.imread(ann['image'])
print ann['boxes']
im_detect(net, im,boxes=ann['boxes'])
attention_caffe = net.blobs['attention'].data.copy()
rois = net.blobs['rois'].data
attention = attention_caffe[:,0].squeeze()
ind = np.argsort(attention)[::-1]
attention = attention[ind]
rois = rois[ind]
#rois_all = np.hstack((rois[:,1:],np.zeros(rois.shape[0],np.float32)))
#rois_all = rois_all[ind]
for i in xrange(20):
roi = rois[i]
ascore = '%0.3f'%attention[i]
roi = rois[i]
cv2.putText(im,ascore,(int(roi[1]+10),int(roi[2]+20)),cv2.FONT_HERSHEY_COMPLEX,1.0,(255,0,0),1)
#cv2.rectangle(im,(roi[1],roi[2]),(roi[3],roi[4]),(255,0,0),1)
cv2.rectangle(im,(roi[1],roi[2]),(roi[3],roi[4]),(255,0,0),1)
cv2.imshow('im',im)
cv2.imwrite('/home/zawlin/data/bus.jpg',im)
cv2.waitKey(0)
|
{"hexsha": "5a0a4953d9562e1533157b38a142a61f34a1faf3", "size": 3032, "ext": "py", "lang": "Python", "max_stars_repo_path": "tools/debug_iccv_gt.py", "max_stars_repo_name": "yjy941124/PPR-FCN", "max_stars_repo_head_hexsha": "1eba5515b37e7b32413efdf14bb0c22a2e46fee9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 20, "max_stars_repo_stars_event_min_datetime": "2017-10-16T18:12:51.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-23T02:34:20.000Z", "max_issues_repo_path": "tools/debug_iccv_gt.py", "max_issues_repo_name": "yjy941124/PPR-FCN", "max_issues_repo_head_hexsha": "1eba5515b37e7b32413efdf14bb0c22a2e46fee9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-11-10T04:59:48.000Z", "max_issues_repo_issues_event_max_datetime": "2021-01-21T04:51:31.000Z", "max_forks_repo_path": "tools/debug_iccv_gt.py", "max_forks_repo_name": "yjy941124/PPR-FCN", "max_forks_repo_head_hexsha": "1eba5515b37e7b32413efdf14bb0c22a2e46fee9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2017-10-17T00:54:42.000Z", "max_forks_repo_forks_event_max_datetime": "2018-04-08T15:09:40.000Z", "avg_line_length": 30.32, "max_line_length": 99, "alphanum_fraction": 0.6797493404, "include": true, "reason": "import numpy", "num_tokens": 975}
|
import sys
sys.path.append("../../")
from mushroom_env import fill_volume_env_3D_multiple_obj_3Dac
from mushroom_env import fill_volume_env_3D_multiple_obj_3Dac_4
from mushroom_env import fill_volume_env_3D_multiple_obj_3Dac_wrap_robot
from mushroom_env import fill_volume_env_3D_multiple_obj_3Dac_wrap_robot_1
from mushroom_env import fill_volume_env_3D_multiple_obj_3Dac_wrap_robot_4
from mushroom_env import fill_volume_env_3D_multiple_obj_3Dac_more_objects_wrap_robot
from mushroom_env import fill_volume_env_3D_multiple_obj_3Dac_more_objects2_20acs
from mushroom_env import fill_volume_env_3D_multiple_obj_3Dac_more_objects
import argparse
import datetime
import pathlib
import os
import shutil
import inspect
import random
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
from collections import defaultdict
from mushroom_rl.algorithms.value import AveragedDQN, CategoricalDQN, DQN,\
DoubleDQN, MaxminDQN, DuelingDQN, DQNMultidim
from mushroom_rl.approximators.parametric import TorchApproximator
from mushroom_rl.core import Core, Logger, CoreParallel
from mushroom_rl.environments import *
from mushroom_rl.policy import EpsGreedy, EpsGreedyMultidimensional, EpsGreedyMultidimensionalMCTS, QMultidimensionalMCTS
from mushroom_rl.utils.dataset import compute_metrics
from mushroom_rl.utils.parameters import LinearParameter, Parameter
from mushroom_rl.utils.replay_memory import PrioritizedReplayMemory
from mushroom_rl.utils.seeds import fix_random_seed
from mushroom_rl.utils.function_calls import wrapped_call
from mushroom_rl.utils.preprocessors import MinMaxPreprocessor
from custom_networks.mpnn_multidim_attention import MPNN_Multidim_Attention, MPNN_Multidim_Full_Attention, MPNN_Single_Full_Attention, Pytorch_Transformer, MPNN_Multidim_Full_Attention_Multiple
from custom_networks.mpnn_multidim import MPNN_Multidim, MPNN_Multidim_MORE
import gc
"""
This script runs a custom env
"""
# This is the loss function that is used for training the agents
# Note: for the q- and e-MCTS agents, there is also a cross entropy regularization possible, which simply gets
# activated by setting the loss on the cross entropy loss != 0
class SpecialLoss(nn.Module):
def __init__(self,batch_size,search_budget, weight_normal_loss=0.5,weight_ce_loss=0.5):
super().__init__()
self.batch_size = batch_size
self.search_budget = search_budget
self.m = nn.Softmax(dim=1)
self.weight_normal_loss = weight_normal_loss
self.weight_ce_loss = weight_ce_loss
print ("WEIGHTS NORMAL " + str(self.weight_normal_loss) + " WEIGHT CE: " + str(self.weight_ce_loss))
def forward(self, input, target):
normal_input = input[:self.batch_size]
normal_target = target[:self.batch_size]
loss1 = F.smooth_l1_loss(normal_input,normal_target)
if (self.weight_ce_loss!=0):
add_input = input[self.batch_size:]
add_target = target[self.batch_size:]
add_input = add_input.reshape((-1,int(self.search_budget)))
add_target = add_target.reshape((-1,int(self.search_budget)))
soft_input = torch.log(self.m(add_input))
soft_target = self.m(add_target)
loss2 = -(1/self.batch_size)*torch.sum((torch.sum(torch.multiply(soft_target,soft_input),dim=1)))
if (self.weight_ce_loss!=0):
return self.weight_normal_loss*loss1+self.weight_ce_loss*loss2
else:
return self.weight_normal_loss * loss1
def print_epoch(epoch, logger):
logger.info('################################################################')
logger.info('Epoch: %d' % epoch)
logger.info('----------------------------------------------------------------')
def get_stats(dataset, logger):
score = compute_metrics(dataset)
logger.info(('min_reward: %f, max_reward: %f, mean_reward: %f,'
' games_completed: %d' % score))
return score
def experiment():
# Argument parser
parser = argparse.ArgumentParser()
arg_game = parser.add_argument_group('Game')
arg_game.add_argument("--name",
type=str,
default='Custom Cube 2 ENV',
help='Gym ID of the Atari game.')
arg_game.add_argument("--num-blocks",
type=int,
default=None,
help='Number of blocks to be placed')
arg_game.add_argument("--env-grid",
type=int,
default=3,
help='Size of grid of env')
arg_game.add_argument("--graph-fconnected",
type=int,
default=1,
help='Specify if graph should be fully connected (1) - or not (0)')
arg_mem = parser.add_argument_group('Replay Memory')
arg_mem.add_argument("--initial-replay-size", type=int, default=1000,
help='Initial size of the replay memory.')
arg_mem.add_argument("--max-replay-size", type=int, default=150000,
help='Max size of the replay memory.')
arg_mem.add_argument("--prioritized", action='store_true',
help='Whether to use prioritized memory or not.')
arg_net = parser.add_argument_group('Deep Q-Network')
arg_net.add_argument("--optimizer",
choices=['adadelta',
'adam',
'rmsprop',
'rmspropcentered'],
default='adam',
help='Name of the optimizer to use.')
arg_net.add_argument("--learning-rate", type=float, default=.0001,
help='Learning rate value of the optimizer.')
arg_net.add_argument("--decay", type=float, default=.95,
help='Discount factor for the history coming from the'
'gradient momentum in rmspropcentered and'
'rmsprop')
arg_net.add_argument("--epsilon", type=float, default=1e-8,
help='Epsilon term used in rmspropcentered and'
'rmsprop')
arg_alg = parser.add_argument_group('Algorithm')
arg_alg.add_argument("--algorithm", choices=['dqn', 'ddqn', 'adqn', 'mmdqn',
'cdqn', 'dueldqn'],
default='dqn',
help='Name of the algorithm. dqn is for standard'
'DQN, ddqn is for Double DQN and adqn is for'
'Averaged DQN.')
arg_alg.add_argument("--n-approximators", type=int, default=1,
help="Number of approximators used in the ensemble for"
"AveragedDQN or MaxminDQN.")
arg_alg.add_argument("--batch-size", type=int, default=32,
help='Batch size for each fit of the network.')
arg_alg.add_argument("--target-update-frequency", type=int, default=5000,
help='Number of collected samples before each update'
'of the target network.')
arg_alg.add_argument("--evaluation-frequency", type=int, default=100,
help='Number of collected samples before each'
'evaluation. An epoch ends after this number of'
'steps')
arg_alg.add_argument("--train-frequency", type=int, default=4,
help='Number of collected samples before each fit of'
'the neural network.')
arg_alg.add_argument("--max-steps", type=int, default=50000000,
help='Total number of collected samples.')
arg_alg.add_argument("--final-exploration-frame", type=int, default=200000,
help='Number of collected samples until the exploration'
'rate stops decreasing.')
arg_alg.add_argument("--initial-exploration-rate", type=float, default=1.,
help='Initial value of the exploration rate.')
arg_alg.add_argument("--final-exploration-rate", type=float, default=.05,#.1
help='Final value of the exploration rate. When it'
'reaches this values, it stays constant.')
arg_alg.add_argument("--test-exploration-rate", type=float, default=.05,
help='Exploration rate used during evaluation.')
arg_alg.add_argument("--test-samples", type=int, default=10,
help='Number of collected samples for each'
'evaluation.')
arg_alg.add_argument("--n-atoms", type=int, default=51,
help='Number of atoms for Categorical DQN.')
arg_alg.add_argument("--v-min", type=int, default=-10,
help='Minimum action-value for Categorical DQN.')
arg_alg.add_argument("--v-max", type=int, default=10,
help='Maximum action-value for Categorical DQN.')
arg_utils = parser.add_argument_group('Utils')
arg_utils.add_argument('--use-cuda', action='store_true',
help='Flag specifying whether to use the GPU.')
arg_utils.add_argument('--save', action='store_true',
help='Flag specifying whether to save the model.')
arg_utils.add_argument('--save-path', type=str, default='',
help='Flag specifying whether to save the model.')
arg_utils.add_argument('--load-path', type=str,
help='Path of the model to be loaded.')
arg_utils.add_argument('--render', action='store_true',
help='Flag specifying whether to render the game.')
arg_utils.add_argument('--quiet', action='store_true',
help='Flag specifying whether to hide the progress'
'bar.')
arg_utils.add_argument('--debug', action='store_true',
help='Flag specifying whether the script has to be'
'run in debug mode.')
arg_utils.add_argument('--seed', type=int, default=-1,
help='Flag specifying whether to save the model.')
arg_utils.add_argument('--model', type=str, default='s2v',
help='Flag specifying whether to save the model.')
arg_utils.add_argument('--env', type=str, default='default',
help='Specify which environment to use')
arg_mcts = parser.add_argument_group('MCTS')
arg_mcts.add_argument('--use-mcts', type=int, default=0,
help='Specify whether to use mcts or not')
arg_mcts.add_argument('--num-workers', type=int, default=4,
help='Specify the number of workers for parallel sampling')
arg_mcts.add_argument('--mcts-type', type=str, default='e_mcts',
help='Specify which MCTS version to run - e_mcts or q_mcts')
arg_mcts.add_argument('--rollout-policy', type=str, default='e_greedy',
help='Specify the policy to use when performing rollouts/playouts')
arg_mcts.add_argument('--search-budget-iter', type=int, default=10,
help='Specify the search budget for MCTS search')
arg_mcts.add_argument('--rollout-depth', type=int, default=0,
help='Specify the depth of the rollout before using q-estimate')
arg_mcts.add_argument('--num-avg-rollouts', type=int, default=1,
help='Specify over how much rollouts to normalize to estimate a nodes value')
arg_mcts.add_argument('--expansion-policy', type=str, default='e_greedy',
help='Specify which policy to use to expand nodes')
arg_mcts.add_argument('--allow-repeat-before-expansion', type=int, default=0,
help='Specify whether it is allowed to further explore although not all nodes have been visited')
arg_mcts.add_argument('--uct-expl-const', type=float, default=2.0,
help='Specify exploration constant for uct expansion policy')
arg_mcts.add_argument('--act-select-eps', type=int, default=1,
help='whether or not to use eps greedy selection inside search module')
arg_mcts.add_argument('--prior-estimate', type=int, default=1,
help='whether or not to use the Q-prior, i.e. Q(s,a) in the search for estimating the value')
arg_mcts.add_argument('--w-normal-loss', type=float, default=0.5,
help='Weight of the normal loss')
arg_mcts.add_argument('--w-ce-loss', type=float, default=0.5,
help='Weight of the special ce loss')
args = parser.parse_args()
scores = list()
optimizer = dict()
if args.optimizer == 'adam':
optimizer['class'] = optim.Adam
optimizer['params'] = dict(lr=args.learning_rate,
eps=args.epsilon)
elif args.optimizer == 'adadelta':
optimizer['class'] = optim.Adadelta
optimizer['params'] = dict(lr=args.learning_rate,
eps=args.epsilon)
elif args.optimizer == 'rmsprop':
optimizer['class'] = optim.RMSprop
optimizer['params'] = dict(lr=args.learning_rate,
alpha=args.decay,
eps=args.epsilon)
elif args.optimizer == 'rmspropcentered':
optimizer['class'] = optim.RMSprop
optimizer['params'] = dict(lr=args.learning_rate,
alpha=args.decay,
eps=args.epsilon,
centered=True)
else:
raise ValueError
# Summary folder -> path where to store results of the training
if (args.save_path==''):
folder_name = './logs/custom_' + args.algorithm + '_' + args.name +\
'_' + datetime.datetime.now().strftime('%Y-%m-%d--%H_%M_%S_%f')
else:
folder_name = args.save_path + '/custom_' + args.algorithm + '_' + args.name +\
'_' + datetime.datetime.now().strftime('%Y-%m-%d--%H_%M_%S_%f')
if (args.save):
pathlib.Path(folder_name).mkdir(parents=True)
logger = Logger(DQN.__name__, results_dir=folder_name, log_console=True)
writer = SummaryWriter(folder_name)
loglist = defaultdict(list)
else:
logger = Logger(DQN.__name__, results_dir=None)
writer = None
loglist = None
logger.strong_line()
logger.info('Experiment Algorithm: ' + DQN.__name__)
# Settings
if args.debug:
initial_replay_size = 50
max_replay_size = 500
train_frequency = 5
target_update_frequency = 10
test_samples = 20
evaluation_frequency = 50
max_steps = 1000
else:
initial_replay_size = args.initial_replay_size
max_replay_size = args.max_replay_size
train_frequency = args.train_frequency
target_update_frequency = args.target_update_frequency
test_samples = args.test_samples
evaluation_frequency = args.evaluation_frequency
max_steps = args.max_steps
# Define the properties of the MDP:
if (args.num_blocks is None):
number_of_parts = (9+5)
else:
number_of_parts = args.num_blocks
# define size of the grid, i.e. how large the potential target shape to be built can be
env_grid_size = args.env_grid
ensemble = True
# every node in the graph has 5 features: 1) position (x,y,z) and two booleans indicating: target element / block ; block placed / unplaced
dim_individual_feature = 5
# each block has an observation dimension of its own features plus the adjaceny matrix, i.e. if it is connected
# to other elements
dim_obs_per_block = dim_individual_feature + number_of_parts
# create a list which contains all of the arguments for the MDP as well as MCTS as due to parallelization we will
# need them multiple times -> passing list will make creation of MDP's and MCTS agents simpler
mdp_args=[]
unnamed_args = [number_of_parts]
named_args = {"visualize": False, "add_connectivity": True, "ensemble": ensemble, "env_grid_size": env_grid_size, "fully_connected": bool(args.graph_fconnected), \
"load_path": args.load_path}
mdp_args.append(unnamed_args)
mdp_args.append(named_args)
mcts_args=[]
unnamed_args = []
named_args = {"rolloutPolicy": args.rollout_policy, "searchBudget_iter": args.search_budget_iter, "rollout_depth": args.rollout_depth, \
"num_avg_rollouts": args.num_avg_rollouts, "expansionPolicy": args.expansion_policy, \
"allowRepeatBeforeExpansion": bool(args.allow_repeat_before_expansion), "uctExplConst": args.uct_expl_const, \
"act_select_eps": bool(args.act_select_eps), "priorEstimate": bool(args.prior_estimate), "eval": bool(args.load_path)}
mcts_args.append(unnamed_args)
mcts_args.append(named_args)
# if the load path is specified -> we want to evaluate -> we enable visualization
if args.load_path:
mdp_args[1]["visualize"] = True
# select which environment we want to train on
# TODO: make this selection process cleaner!
# variable robot_state_dim is needed to indicate whether the robot's state is also included in the observation
# or not -> having robot state in observation is needed to conduct proper MCTS search as environment has to be
# reset / set appropriately
if (args.env == 'default'):
print ("No environment has been specified -> terminating")
return 0
elif (args.env == '2-wo-robo'):
mdp = wrapped_call(fill_volume_env_3D_multiple_obj_3Dac.StackBoxesEnv3D_multiple_obj,mdp_args[0],mdp_args[1])
robot_state_dim = 0
elif (args.env == '4-wo-robo'):
mdp = wrapped_call(fill_volume_env_3D_multiple_obj_3Dac_4.StackBoxesEnv3D_multiple_obj_4,mdp_args[0],mdp_args[1])
robot_state_dim = 0
elif (args.env == '1-wo-robo'):
mdp = wrapped_call(fill_volume_env_3D_multiple_obj_3Dac_more_objects.StackBoxesEnv3D_multiple_obj,mdp_args[0],mdp_args[1])
robot_state_dim = 0
elif (args.env == '2-wo-robo-more-obj'):
mdp = wrapped_call(fill_volume_env_3D_multiple_obj_3Dac_more_objects2_20acs.StackBoxesEnv3D_multiple_obj,mdp_args[0],mdp_args[1])
robot_state_dim = 0
elif (args.env == '1-w-robo'):
mdp = wrapped_call(fill_volume_env_3D_multiple_obj_3Dac_wrap_robot_1.StackBoxesEnv3D_multiple_obj_w_robot,mdp_args[0],mdp_args[1])
robot_state_dim = 2 * 9
elif (args.env == '2-w-robo'):
mdp = wrapped_call(fill_volume_env_3D_multiple_obj_3Dac_wrap_robot.StackBoxesEnv3D_multiple_obj_w_robot,mdp_args[0],mdp_args[1])
robot_state_dim = 2 * 9
elif (args.env == '4-w-robo'):
mdp = wrapped_call(fill_volume_env_3D_multiple_obj_3Dac_wrap_robot_4.StackBoxesEnv3D_multiple_obj_w_robot,mdp_args[0],mdp_args[1])
robot_state_dim = 2 * 9
elif (args.env == '2-w-robo-more-obj'):
mdp = wrapped_call(fill_volume_env_3D_multiple_obj_3Dac_more_objects_wrap_robot.StackBoxesEnv3D_multiple_obj_w_robot,mdp_args[0],mdp_args[1])
robot_state_dim = 2 * 9
# set visualization false in the argument list -> all other envs except for the one created above do not have any
# visualization
mdp_args[1]["visualize"] = False
# normalization callback
normalizer = MinMaxPreprocessor(mdp_info=mdp.info)
# do the seeding:
if (args.seed==-1 and args.load_path):
seed = 0
elif (args.seed==-1):
seed = random.randint(0, 1000)
else:
seed = args.seed
fix_random_seed(seed,mdp)
# if loading an agent -> this basically corresponds to evaluating it
if args.load_path:
# Load Agent
agent = DQN.load(args.load_path)
# # this has to be set when wanting to solve a task of different size,...
agent.approximator.model.network.robot_state_dim = robot_state_dim
agent.approximator.model.network.dim_whole_obs = dim_obs_per_block
# # set the network to evaluation mode as for some models, dropout might be involved,...
agent.approximator.model.network.eval()
epsilon_test = Parameter(value=args.test_exploration_rate)
if (args.use_mcts==0):
pi = EpsGreedyMultidimensional(epsilon=epsilon_test, mdp=mdp)
else:
if (args.mcts_type=='e_mcts'):
pi = EpsGreedyMultidimensionalMCTS(epsilon=0.05, mdp=mdp, normalizer=normalizer, mdp_args=mdp_args, mcts_args=mcts_args)
elif (args.mcts_type=='q_mcts'):
pi = QMultidimensionalMCTS(epsilon=0.05, mdp=mdp, normalizer=normalizer, mdp_args=mdp_args, mcts_args=mcts_args)
agent.policy = pi
pi.set_q(agent.approximator)
agent.policy.set_epsilon(epsilon_test)
agent.policy.set_mdp(mdp)
# Algorithm
core_test = Core(agent, mdp, preprocessors=[normalizer])
# Evaluate model
dataset = core_test.evaluate(n_steps=args.test_samples,
render=args.render,
quiet=args.quiet)
get_stats(dataset,logger)
else:
# do the training
# Policy
epsilon = LinearParameter(value=args.initial_exploration_rate,
threshold_value=args.final_exploration_rate,
n=args.final_exploration_frame)
epsilon_test = Parameter(value=args.test_exploration_rate)
epsilon_random = Parameter(value=1)
# depending on input argument choose appropriate policy to be used,...
if (args.use_mcts==0):
pi = EpsGreedyMultidimensional(epsilon=epsilon_random, mdp=mdp)
else:
if (args.mcts_type=='e_mcts'):
pi = EpsGreedyMultidimensionalMCTS(epsilon=0.05, mdp=mdp, normalizer=normalizer, mdp_args=mdp_args, mcts_args=mcts_args)
elif (args.mcts_type=='q_mcts'):
pi = QMultidimensionalMCTS(epsilon=0.05, mdp=mdp, normalizer=normalizer, mdp_args=mdp_args, mcts_args=mcts_args)
pi.set_epsilon(epsilon)
# Approximator -> choose which model is to be used
model_to_be_used = None
if (args.model=="s2v"):
model_to_be_used = MPNN_Multidim
elif (args.model=="s2v_new"):
model_to_be_used = MPNN_Multidim_MORE
elif (args.model=="mha"):
model_to_be_used = MPNN_Multidim_Attention
elif (args.model=="mha_full"):
model_to_be_used = MPNN_Multidim_Full_Attention
elif (args.model=="sha_full"):
model_to_be_used = MPNN_Single_Full_Attention
elif (args.model=="mha_full_multiple"):
model_to_be_used = MPNN_Multidim_Full_Attention_Multiple
elif (args.model == "torch_transformer"):
model_to_be_used = Pytorch_Transformer
num_actions_available = mdp.info.action_space.high[-1] + 1
# Approximator -> create the appropriate one by passing as input arguments the problem's properties
approximator_params = dict(
network=model_to_be_used,
input_shape=mdp.info.observation_space.shape,
output_shape=mdp.info.action_space.shape,
#n_actions=mdp.info.action_space.shape,
n_obs_in = dim_individual_feature,
n_layers = 3,
n_features=64,
tied_weights=False,
n_hid_readout = [],
dim_whole_obs=dim_obs_per_block,
num_actions_avail = num_actions_available,
robot_state_dim = robot_state_dim,
optimizer=optimizer,
loss=SpecialLoss(args.batch_size,args.search_budget_iter,args.w_normal_loss,args.w_ce_loss) if args.use_mcts!=0 else SpecialLoss(args.batch_size,args.search_budget_iter,1.0,0.0),#F.smooth_l1_loss,
use_cuda=args.use_cuda,
loglist=loglist
)
approximator = TorchApproximator
if args.prioritized:
print ("prioritized replay memory currently not supported in this parallel implementation, still has to \
be implemented")
# replay_memory = PrioritizedReplayMemory(
# initial_replay_size, max_replay_size, alpha=.6,
# beta=LinearParameter(.4, threshold_value=1,
# n=max_steps // train_frequency)
# )
replay_memory = None
else:
replay_memory = None
# Agent
algorithm_params = dict(
batch_size=args.batch_size,
target_update_frequency=target_update_frequency // train_frequency,
replay_memory=replay_memory,
initial_replay_size=initial_replay_size,
max_replay_size=max_replay_size
)
if args.algorithm == 'dqn':
agent = DQNMultidim(mdp.info, pi, approximator, mdp=mdp,
approximator_params=approximator_params,
**algorithm_params)
elif args.algorithm == 'ddqn':
print ("currently not supported properly in this repo")
# agent = DoubleDQN(mdp.info, pi, approximator,
# approximator_params=approximator_params,
# **algorithm_params)
elif args.algorithm == 'adqn':
print ("currently not supported properly in this repo")
# agent = AveragedDQN(mdp.info, pi, approximator,
# approximator_params=approximator_params,
# n_approximators=args.n_approximators,
# **algorithm_params)
elif args.algorithm == 'mmdqn':
print ("currently not supported properly in this repo")
# agent = MaxminDQN(mdp.info, pi, approximator,
# approximator_params=approximator_params,
# n_approximators=args.n_approximators,
# **algorithm_params)
elif args.algorithm == 'dueldqn':
print ("currently not supported properly in this repo")
# agent = DuelingDQN(mdp.info, pi,
# approximator_params=approximator_params,
# **algorithm_params)
elif args.algorithm == 'cdqn':
print ("currently not supported properly in this repo")
# agent = CategoricalDQN(mdp.info, pi,
# approximator_params=approximator_params,
# n_atoms=args.n_atoms, v_min=args.v_min,
# v_max=args.v_max, **algorithm_params)
# Algorithm
# here use the parallel version which collects the experience in parallel, i.e. we spawn multiple environments
# in which we collect the experience. Afterwards, we do the update with all of the samples that have been
# acquired
core = CoreParallel(agent, mdp, args.use_cuda, preprocessors=[normalizer], mdp_args=mdp_args, num_workers=args.num_workers)
# RUN
# Fill replay memory with initial dataset
print_epoch(0, logger)
core.learn(n_steps=initial_replay_size,
n_steps_per_fit=initial_replay_size, quiet=args.quiet)
if args.save:
agent.save(folder_name + '/agent_0.msh')
normalizer.save(folder_name + '/normalizer_0.msh')
# copy the training file (i.e. the file here)
shutil.copy2(os.path.abspath(__file__),folder_name + '/')
# copy the environment file
shutil.copy2(inspect.getfile(mdp.__class__), folder_name + '/')
# copy the network setup:
shutil.copy2(inspect.getfile(agent.approximator.model.network.__class__), folder_name + '/')
# write the args to file:
f = open(folder_name + '/arguments.txt', "a")
f.write(str(sys.argv[1:]) + '\n')
f.write('seed ' + str(seed))
f.close()
# Evaluate initial policy
pi.set_epsilon(epsilon_test)
dataset = core.evaluate(n_steps=test_samples, render=args.render,
quiet=args.quiet)
scores.append(get_stats(dataset, logger))
if (args.save):
np.save(folder_name + '/scores.npy', scores)
best_mean_rew = -1000
# train for 5000 epochs -> this is hardcoded for now
for n_epoch in range(1, 5000 + 1):
# every 25 iterations create new workers. this "hack" was necessary as otherwise in the parallel sampling
# procedure we observed memory leackage that could not be tracked down,...
gc.collect()
if (n_epoch%25==0):
do_reset = True
else:
do_reset = False
print_epoch(n_epoch, logger)
logger.info('- Learning:')
# learning step
print (epsilon.get_value())
pi.set_epsilon(epsilon)
# mdp.set_episode_end(True)
dataset_train = core.learn(n_steps=evaluation_frequency,
n_steps_per_fit=train_frequency, quiet=args.quiet, reset=do_reset)
# only store every 100th model, but make a full save which stores everything, including the state of the replay
# memory
if args.save and (n_epoch%100)==0:
agent.save(folder_name + '/agent_full_' + str(n_epoch) + '.msh', full_save=True)
normalizer.save(folder_name + '/normalizer_full_' + str(n_epoch) + '.msh')
# however, as storing the full model consumes lots of space -> periodically remove earlier checkpoints
# to not consume too much space
if (n_epoch>299) and not(((n_epoch-200)%500)==0):
os.remove(folder_name + '/agent_full_' + str(n_epoch-200) + '.msh')
os.remove(folder_name + '/normalizer_full_' + str(n_epoch-200) + '.msh')
# only store every 50th model but this time without the full save -> this is feasible
if args.save and (n_epoch%50)==0:
agent.save(folder_name + '/agent_' + str(n_epoch) + '.msh')
normalizer.save(folder_name + '/normalizer_' + str(n_epoch) + '.msh')
logger.info('- Evaluation:')
# evaluation step
pi.set_epsilon(epsilon_test)
# mdp.set_episode_end(False)
# to speed up training the test set is exactly the same as the training set,...
import copy
dataset = copy.deepcopy(dataset_train)
scores.append(get_stats(dataset, logger))
# add logging to tensorboard file
if (writer is not None):
if (loglist is not None):
attribute_list = list(loglist.keys())
for i in range(len(attribute_list)):
# write mean of logging
writer.add_scalar(str(attribute_list[i]), np.mean(loglist[attribute_list[i]]), n_epoch)
# empty list again -> ready for new values,...
loglist[attribute_list[i]] = []
# Add a few fixed things to the tensorboard log
min_train_rew, max_train_rew, mean_train_rew, num_train_games_completed = compute_metrics(dataset_train)
min_test_rew, max_test_rew, mean_test_rew, num_test_games_completed = compute_metrics(dataset)
writer.add_scalar('Train/min_rew', min_train_rew, n_epoch)
writer.add_scalar('Train/max_rew', max_train_rew, n_epoch)
writer.add_scalar('Train/mean_rew', mean_train_rew, n_epoch)
writer.add_scalar('Train/comp_games', num_train_games_completed, n_epoch)
writer.add_scalar('Test/min_rew', min_test_rew, n_epoch)
writer.add_scalar('Test/max_rew', max_test_rew, n_epoch)
writer.add_scalar('Test/mean_rew', mean_test_rew, n_epoch)
writer.add_scalar('Test/comp_games', num_test_games_completed, n_epoch)
# this is here to always keep the best model
if (scores[-1][2]>best_mean_rew and args.save):
agent.save(folder_name + '/agent_' + 'BEST' + '.msh')
normalizer.save(folder_name + '/normalizer_' + 'BEST' + '.msh')
best_mean_rew = scores[-1][2]
if (args.save):
np.save(folder_name + '/scores.npy', scores)
return scores
if __name__ == '__main__':
experiment()
|
{"hexsha": "11d87a1ae7fc3ad007db3babf9c5a43912730a7a", "size": 33231, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/boxes_3D_dqn__fill_multidim.py", "max_stars_repo_name": "nifunk/GNNMushroomRL", "max_stars_repo_head_hexsha": "d0d8eefdc10bca62e7cb536d65ea619607be755b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-02-06T22:04:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-06T22:04:42.000Z", "max_issues_repo_path": "examples/boxes_3D_dqn__fill_multidim.py", "max_issues_repo_name": "nifunk/GNNMushroomRL", "max_issues_repo_head_hexsha": "d0d8eefdc10bca62e7cb536d65ea619607be755b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/boxes_3D_dqn__fill_multidim.py", "max_forks_repo_name": "nifunk/GNNMushroomRL", "max_forks_repo_head_hexsha": "d0d8eefdc10bca62e7cb536d65ea619607be755b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 49.0856720827, "max_line_length": 209, "alphanum_fraction": 0.6183382986, "include": true, "reason": "import numpy", "num_tokens": 7240}
|
# -*- coding: utf-8 -*-
'''
Created on 30 mars 2014
@author: Mohamed
'''
import sys
import numpy as np
import matplotlib.pylab as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import scipy.sparse.linalg as la
import math
from opencavity.utilsfunc import UtilsFunc
class Help(object):
"""Class containing functions that launch help/docs html files"""
def __init__(self):
print ("launching open_cavity documentation ... ")
import webbrowser
url=""
webbrowser.open(url, new=1, autoraise=True)
class AmpMask2D(object):
"""Class containing definitions of amplitude masks as aperture and losses like absorbers in the cavity
::Args:
- grid_x(float), grid_y(float): Squared grid (matrix) vectors in which the shape of the mask is defined these tow vectors are important because usually in cavity eienvalues problem they don't follow a linear spacingbut Lgendre-Gauss spacing scheme (for the integral calculation).
.. Note::
the unit of the dimensions are normalized to the wavelength's unit (grid_x=1000 means that it is =1000 the wavelength unit.
Example of use
>>> apert=solver.AmpMask2D(x1,y1) # create a mask object
>>> apert.add_circle(100)#create a circular aperture in x1,y1 coordinates with radius=100
>>> apert.add_rectangle(3, 50) # add a rectangle
>>> apert.add_rectangle(50, 3)
"""
def __init__(self,grid_x,grid_y):
"""
constructor
"""
print("creating mask object...")
self.Msk=np.array([])
self.grid_x=grid_x
self.grid_y=grid_y
def add_circle(self,radius,x_center=0,y_center=0,positive=True):
"""Create a circular aperture function and add it to the mask object.
::Args:
- radius: the radius of the circular aperture
- x_center, y_center: coordinates of the shape center, default values (0,0)
- positive: is a boolean flag, default value =True, means the amplitude inside the shape ='1' and '0' outside
.. Note::
:Returns:
- none.
"""
if positive==True:
amp1=1
amp2=0
else:
amp1=0
amp2=1
Nx=np.size(self.grid_x)
Ny=np.size(self.grid_y)
Msk0=np.zeros((Nx,Ny))+np.zeros((Nx,Ny))*1j
#self.Msk=np.zeros((Nx,Ny))+np.zeros((Nx,Ny))*1j
for i in range(Nx):
for j in range(Ny):
if (self.grid_x[i]-x_center)**2+(self.grid_y[j]-y_center)**2<radius**2:
Msk0[i,j]=amp1
else:
Msk0[i,j]=amp2
#assigning the mask
if self.Msk.size ==0:
self.Msk=Msk0 #if this the first mask we only assign it
else:
self.Msk=self.Msk*Msk0 #otherwise we merge it with the existing mask
return
def add_rectangle(self,x_dim,y_dim,x_center=0,y_center=0,positive=True):
"""Create a rectangular aperture function and add it to the mask object.
:Args:
- x_dim, y_dim: dimensions of the rectangle.
- x_center, y_center: coordinates of the shape center, default values (0,0)
- positive: is a boolean flag, default value =True, means the amplitude inside the shape ='1' and '0' outside
.. Note::
:Returns:
- none.
"""
if positive==True:
amp1=1
amp2=0
else:
amp1=0
amp2=1
Nx=np.size(self.grid_x)
Ny=np.size(self.grid_y)
Msk0=np.zeros((Nx,Ny))+np.zeros((Nx,Ny))*1j
#self.Msk=np.zeros((Nx,Ny))+np.zeros((Nx,Ny))*1j
#self.Msk=np.zeros((Nx,Ny))
for i in range(Nx):
for j in range(Ny):
#if grid_x[i]<x_dim and grid_y[j]<y_dim:
if (np.abs(self.grid_x[i]-y_center) <x_dim) and (np.abs(self.grid_y[j]-x_center) <y_dim) :
Msk0[i,j]=amp1
else:
Msk0[i,j]=amp2
#assigning the mask
if self.Msk.size ==0:
self.Msk=Msk0 #if this the first mask we only assign it
else:
self.Msk=self.Msk*Msk0 #otherwise we merge it with the existing mask
return
def show_msk2D(self,what='amplitude'):
"""Show the amplitude/phase of the mask in 2D plot.
:Args:
- what : string= (amplitude/phase) to choose what to plot.
.. Note::
This function needs matplotlib package
:Returns:
- none.
"""
if what=='amplitude':
msk=np.abs(self.Msk)
elif what=='phase':
msk=np.angle(self.Msk)
else:
print('"what" must be "amplitude" or "phase"')
sys.exit(1)
plt.figure()
plt.pcolor(self.grid_x,self.grid_y,msk)
#plt.show()
return
def show_msk3D(self,what='amplitude'):
"""Show the amplitude/phase of the mask in 3D plot.
:Args:
- what : string= (amplitude/phase) to choose what to plot.
.. Note::
This function needs matplotlib package
:Returns:
- none.
"""
if what=='amplitude':
msk=np.abs(self.Msk)
elif what=='phase':
msk=np.angle(self.Msk)
else:
print('"what" must be "amplitude" or "phase"')
sys.exit(1)
fig = plt.figure()
ax = fig.gca(projection='3d')
X,Y=np.meshgrid(self.grid_x,self.grid_y) #we create a meshgrid for the plot
#surf = ax.plot_surface(X, Y, np.abs(msk2), rstride=1,cmap=cm.jet ,cstride=1, linewidth=0)
surf = ax.plot_surface(X, Y, msk, rstride=1 ,cstride=1, linewidth=0)
plt.set_cmap('hot')
#plt.show()
class CavEigenSys(object):
'''
classdocs
'''
def __init__(self,wavelength=1):
'''
Constructor
'''
self.dim_flag='' #Flag indicating whether the system is 1D or 2D
self.wavelength=wavelength # all distances are relative to wavelength unit, for cavity_example_2D L=1000 means 1000*lambda unit
# if lambda is in micron L=1000 micron =1mm
self.k=2*math.pi/wavelength; #wave-number
self.x1=[] #first axis (of starting complex field)
self.x2=[] #second axis (of calculated complex field)
self.Kt=[] #the matrix kernel
self.l=[] #the eigenvalue vector
self.v=[] #the eigenvectors
self.utils=UtilsFunc()
def fresnel1D_cav(self,x1,x2,d,R1,R2):
""""Fresnel Kernel formulation for 1D systems, this function is used internally in the solver, to construct the cavity Kernel matrix.
:Args:
- x1,x2 : 1D vectors of real, defining the calculation zones of 1st and 2nd mirrors forming the optical cavity.
- d : (positive real) the cavity length.
- R1, R2: (reals) Radius of curvature of the two mirrors forming the cavity
.. Note::
this function is appropriate for 1D systems, of optical cavities composed of 2 mirrors
:Returns:
- none. the Fresnel kernel of the system is build and stored directly in the class attribute: 'self.K' '
"""
wavelength=self.wavelength
g1=1-d/R1;
g2=1-d/R2;
A=2*g1*g2-1;
B=2*g2*d;
D=A;
y=-1j/(wavelength*np.sqrt(B))*np.exp((-1j*math.pi/wavelength)*(A*x1**2+D*x2**2-2*x1*x2)/B);
return y
def fresnel1D_ABCD_cav(self,x1,x2,A,B,C,D):
"""Fresnel Kernel formulation for a general ABCD optical 1D systems, this function is used internally in the solver, to construct the cavity Kernel matrix.
:Args:
- x1,x2 : 1D vectors of real, defining the calculation zones of 1st and 2nd mirrors forming the optical cavity.
- A,B,C,D: (reals) elements of the optical matrix defining the paraxial optical system.
.. Note::
this function is appropriate for 1D systems, for a general case, optical cavity (multi-elements)
:Returns:
- none. the Fresnel kernel of the system is build and stored directly in the class attribute: 'self.K' '
"""
if B==0:
print("Propagation distance can not be '0' please change the B element in the ABCD matrix.")
sys.exit(1)
else:
wavelength=self.wavelength
#y=-1j/(wavelength*np.sqrt(B))*np.exp((+1j*math.pi/wavelength)*(A*x1**2+D*x2**2-2*x1*x2)/B);
y=-1j/(wavelength*np.sqrt(B))*np.exp((-1j*math.pi/wavelength)*(A*x1**2+D*x2**2-2*x1*x2)/B);
return y
def fresnel2DC(self,x1,x2,y1,y2,d,R1,R2):
""""Fresnel Kernel formulation for 2D systems, this function is used internally in the solver, to construct the cavity Kernel matrix.
:Args:
- x1,y1,x2,y2 : 1D vectors of real, defining the calculation zones of 1st (x1,y1) and 2nd (x2,y2) mirrors forming the optical cavity.
- d : (positive real) the cavity length.
- R1, R2: (reals) Radius of curvature of the two mirrors forming the cavity
.. Note::
this function is appropriate for 2D systems, of optical cavities composed of 2 mirrors
:Returns:
- none. the Fresnel kernel of the system is build and stored directly in the class attribute: 'self.K' '
"""
wavelength=self.wavelength
g1=1-d/R1;
g2=1-d/R2;
A=2*g1*g2-1;
B=2*g2*d;
D=A;
yx=-1j/(wavelength*np.sqrt(B))*np.exp((-1j*math.pi/wavelength)*(A*x1**2+D*x2**2-2*x1*x2)/B);
yy=-1j/(wavelength*np.sqrt(B))*np.exp((-1j*math.pi/wavelength)*(A*y1**2+D*y2**2-2*y1*y2)/B);
return yx*yy
def fresnel2D_ABCD(self,x1,x2,y1,y2,A,B,C,D):
"""Fresnel Kernel formulation for a general ABCD optical 2D systems, this function is used internally in the solver, to construct the cavity Kernel matrix.
:Args:
- x1,y1,x2,y2 : 1D vectors of real, defining the calculation zones of 1st (x1,y1) and 2nd (x2,y2) mirrors forming the optical cavity.
- A,B,C,D: (reals) elements of the optical matrix defining the paraxial optical system.
.. Note::
this function is appropriate for 2D systems, for a general case, optical cavity (multi-elements)
:Returns:
- none. the Fresnel kernel of the system is build and stored directly in the class attribute: 'self.K' '
"""
wavelength=self.wavelength
yx=-1j/(wavelength*np.sqrt(B))*np.exp((-1j*math.pi/wavelength)*(A*x1**2+D*x2**2-2*x1*x2)/B);
yy=-1j/(wavelength*np.sqrt(B))*np.exp((-1j*math.pi/wavelength)*(A*y1**2+D*y2**2-2*y1*y2)/B);
return yx*yy
def build_2D_cav(self,a,n_pts,R1,R2,d):
self.dim_flag="2D"
#utils=UtilsFunc()
abscissa,weight=self.utils.gauss_legendre(n_pts) # generate Legendre Gauss abscisas and weight for integration
order=np.argsort(abscissa)
abscissa=abscissa[order]
weight=weight[order]
self.x1=a*abscissa
self.x2=np.copy(self.x1)
self.y1=np.copy(self.x1)
self.y2=np.copy(self.x1)
Nn=n_pts
Mn=Nn
self.Kt=np.zeros((Nn*Mn,Nn*Mn))+np.zeros((Nn*Mn,Nn*Mn))*1j
line_block=np.zeros((Nn,Nn*Mn))+np.zeros((Nn,Nn*Mn))*1j
Absc_j,Absc_v=np.meshgrid(self.x2,self.y2);
W_j=np.diag(weight)
print("Building the kernel matrix ...")
for u in range(Nn):
for i in range(Nn):
K=a*a*weight[i]*W_j.dot(self.fresnel2DC(self.x1[i],self.y1[u],Absc_j, Absc_v, d, R1, R2))
#print(np.shape(K),np.shape(line_block),np.shape(self.Kt))
#line_block[:,(i-1)*Nn+1:i*Nn]=K
line_block[:,i*Nn:(i+1)*Nn]=K
#self.Kt[(u-1)*Nn+1:u*Nn,:]=line_block
self.Kt[u*Nn:(u+1)*Nn,:]=line_block
line_block=np.zeros((Nn,Nn*Mn))+np.zeros((Nn,Nn*Mn))*1j
#adv=u/Nn*100
#print("Building the kernel matrix "+repr(u+1)+ " / "+repr(Nn))
print("Building the kernel matrix done.")
return
def build_2D_cav_ABCD(self,a,n_pts,A,B,C,D):
"""Build the Fresnel-Kernel for a general ABCD optical 2D systems, this function construct the cavity Kernel matrix and stores it in the class attribute 'self.K'.
:Args:
- a : (positive, real) Size of calculation zone (squared zone)
- n_pts: number of points used in discretization of the calculation zone, the step will be 'a/n_pts'
- A,B,C,D: (reals) elements of the optical matrix defining the paraxial optical system.
.. Note::
- this function is appropriate for 2D systems, for a general case, optical cavity (multi-elements).
- All distances are normalized to the wavelength unit.
:Returns:
- none. the Fresnel kernel of the system is build and stored directly in the class attribute: 'self.K'.
"""
self.dim_flag="2D"
#utils=UtilsFunc()
abscissa,weight=self.utils.gauss_legendre(n_pts) # generate Legendre Gauss abscisas and weight for integration
order=np.argsort(abscissa)
abscissa=abscissa[order]
weight=weight[order]
self.x1=a*abscissa
self.x2=np.copy(self.x1)
self.y1=np.copy(self.x1)
self.y2=np.copy(self.x1)
Nn=n_pts
Mn=Nn
self.Kt=np.zeros((Nn*Mn,Nn*Mn))+np.zeros((Nn*Mn,Nn*Mn))*1j
line_block=np.zeros((Nn,Nn*Mn))+np.zeros((Nn,Nn*Mn))*1j
Absc_j,Absc_v=np.meshgrid(self.x2,self.y2);
W_j=np.diag(weight)
print("Building the kernel matrix ...")
for u in range(Nn):
for i in range(Nn):
#K=a*a*weight[i]*W_j.dot(self.fresnel2DC(self.x1[i],self.y1[u],Absc_j, Absc_v, d, R1, R2))
K=a*a*weight[i]*W_j.dot(self.fresnel2D_ABCD(self.x1[i],self.y1[u],Absc_j, Absc_v, A,B,C,D))
#print(np.shape(K),np.shape(line_block),np.shape(self.Kt))
#line_block[:,(i-1)*Nn+1:i*Nn]=K
line_block[:,i*Nn:(i+1)*Nn]=K
#self.Kt[(u-1)*Nn+1:u*Nn,:]=line_block
self.Kt[u*Nn:(u+1)*Nn,:]=line_block
line_block=np.zeros((Nn,Nn*Mn))+np.zeros((Nn,Nn*Mn))*1j
#adv=u/Nn*100
#print("Building the kernel matrix "+repr(u+1)+ " / "+repr(Nn))
print("Building the kernel matrix done.")
return
def build_1D_cav(self,a,n_pts,R1,R2,d):
"""
- calculate the matrix kernel of the cavity.
- R1, R2 are given in wavelength units (normalized).
- return the matrix kernel and the x axis (lengendre-Gauss distribution).
Fresnel Kernel calculation for 1D 2 mirrors optical systems, this function is used internally in the solver, to construct the cavity Kernel matrix.
:Args:
- a: (positive real)the size of the calculation area.
- n_pts: (positive integer) the number of point used in discretization.
- R1,R2: (reals) the radius of curvature of the 1st and 2nd mirror.
- d : (positive real) the length of the cavity (distance between the two mirrors)
.. Note::
- this function is appropriate for 1D systems, for a 2 mirrors, optical cavity.
- x1 : the vector representing the initial plane is generated inside the function rather than getting it as an argument because it follows a legendre polynomials distribution and not linear spacing.
- The kernel matrix elements spacing follows Legendre polynomials distribution rather than linear spacing, this is needed to replace the Fresnel integral by a sum (Legendre-Gauss quadrature scheme)
- All the distances are in the wavelength unit.
:Returns:
- none. the Fresnel kernel of the system and x1,x2 the 2 planes (initial and propagated) are build and stored directly in the class attribute: 'self.K', 'self.x1','self.x2'
"""
print("Building the kernel matrix ...")
self.dim_flag="1D"
#utils=UtilsFunc()
abscissa,weight=self.utils.gauss_legendre(n_pts) # generate Legendre Gauss abscisas and weight for integration
order=np.argsort(abscissa)
abscissa=abscissa[order]
weight=weight[order]
#self.x1=np.linspace(-a, a, n_pts)
self.x1=a*abscissa
self.x2=np.copy(self.x1)
self.Kt=np.zeros((n_pts,n_pts))+np.zeros((n_pts,n_pts))*1j
for i in range(n_pts):
for j in range(n_pts):
self.Kt[i,j]=a*weight[j]*self.fresnel1D_cav(a*abscissa[i],a*abscissa[j],d,R1,R2)
print("Building the kernel matrix done.")
return
def build_1D_cav_ABCD(self,a,n_pts,A,B,C,D):
"""Build the Fresnel-Kernel for a general ABCD optical 1D systems, this function construct the cavity Kernel matrix and stores it in the class attribute 'self.K'.
:Args:
- a : (positive, real) Size of calculation zone (squared zone)
- n_pts: number of points used in discretization of the calculation zone, the step will be 'a/n_pts'
- A,B,C,D: (reals) elements of the optical matrix defining the paraxial optical system.
.. Note::
- this function is appropriate for 1D systems, for a general case, optical cavity (multi-elements).
- All distances are in the wavelength unit.
:Returns:
- none. the Fresnel kernel of the system is build and stored directly in the class attribute: 'self.K'.
"""
self.dim_flag="1D"
#utils=UtilsFunc()
abscissa,weight=self.utils.gauss_legendre(n_pts) # generate Legendre Gauss abscisas and weight for integration
order=np.argsort(abscissa)
abscissa=abscissa[order]
weight=weight[order]
#self.x1=np.linspace(-a, a, n_pts)
self.x1=a*abscissa
self.x2=np.copy(self.x1)
self.Kt=np.zeros((n_pts,n_pts))+np.zeros((n_pts,n_pts))*1j
for i in range(n_pts):
for j in range(n_pts):
self.Kt[i,j]=a*weight[j]*self.fresnel1D_ABCD_cav(a*abscissa[i],a*abscissa[j], A, B, C, D)
return
def solve_modes(self,n_modes=30):
"""Calculate the eigenvalues and eigenfunctions of the matrix-Kernel of the optical cavity defined in class attribute 'self.K'.
:Args:
- n_modes: number of eigenvalues and eigenfunctions to calculate
:Returns:
- none. the eigenvalues and eigenfunctions are stored directly in the class attribute: 'self.l' and 'self.v' respectively.
.. Note::
- The i'th eigenvalue corresponds to: losses (amplitude) and phase-shift (phase) per round-trip of the i'th mode of the cavity.
- The i'th eigenfunction corresponds to the complex field distribution function of the i'th mode of the cavity.
- eigenvalues and modes can be obtained using the function 'get_mode(n)'.
- the eigenfunctions (modes of the cavity) can be shwon using 'show_mode(n)' to show the n'th mode.
"""
print("running the eigenvalues solver...")
if self.dim_flag=='':
print('The matrix kernel is empty')
sys.exit(1)
else:
#self.l,self.v=la.eigs(self.Kt,n_modes, which="LM") #solving the eigenvalue problem
if self.dim_flag=='2D':
"with initial values vector"
npts=np.size(self.x1)
v00=np.random.rand(npts**2)
# v00=np.ones(npts**2) #changed here 11-06-2015 by Seghil
self.l,self.v=la.eigs(self.Kt,n_modes, which="LM", v0=v00) #solving the eigenvalue problem
elif self.dim_flag=='1D':
self.l,self.v=la.eigs(self.Kt,n_modes, which="LM") #solving the eigenvalue problem
self.l,self.v=self.eig_sort(self.l, self.v) #sorting eigenvalues & eigenvectors
self.normalize_modes1D() # normalize the amplitude of the mode to have a max=1
return
def get_mode1D(self,n):
"""Fetch the n'th eigenvalue and eigenfunctions from the solved eigenbasis of 1D system.
:Args:
- n: order of eigenvalues and eigenfunctions to fetch
:Returns:
- self.l[n]: (complex) the n'th eigenvalue.
- self.v[:,n]: (complex vector) the n'th eigenfunction of the system. (complex field distribution of the n'th mode of the cavity)
.. Note::
- this function is used with 1D systems.
- The i'th eigenvalue corresponds to: losses (amplitude) and phase-shift (phase) per round-trip of the i'th mode of the cavity.
- The i'th eigenfunction corresponds to the complex field distribution function of the i'th mode of the cavity.
"""
if self.dim_flag=='':
print("There are no modes in the system yet")
sys.exit(1)
elif self.dim_flag=='2D':
print("This function is for 1D systems please use the 2D one")
sys.exit(1)
else:
return self.l[n], self.v[:,n]
def normalize_modes1D(self):
"""Normalize the amplitude of all calculated modes, this function is used internally.
"""
max_v=np.amax(np.abs(self.v),axis=0)
self.v=self.v/max_v
return
def normalize_beam(self, beam):
"""Normalize the amplitude a beam this function is used internally.
:Args:
- beam : 1D or 2D field
:Returns:
- beam : normalized to the maximum value of the entered beam
"""
beam=beam/beam.max()
return beam
def get_mode2D(self,n):
"""Fetch the n'th eigenvalue and eigenfunctions from the solved eigenbasis of 2D system.
:Args:
- n: order of eigenvalues and eigenfunctions to fetch
:Returns:
- self.l[n]: (complex) the n'th eigenvalue.
- tem : 2D (complex vector) the n'th eigenfunction of the system. (complex field distribution of the n'th mode of the cavity)
.. Note::
- this function is used with 2D systems.
- out of the solver the eigenfunction is a complex 1D vector, it has to be reshaped to get the 2D field distribution called tem.
- The i'th eigenvalue corresponds to: losses (amplitude) and phase-shift (phase) per round-trip of the i'th mode of the cavity.
- The i'th eigenfunction corresponds to the complex field distribution function of the i'th mode of the cavity.
"""
if self.dim_flag=='':
print("There are no modes in the system yet")
sys.exit(1)
elif self.dim_flag=='1D':
print("This function is for 2D systems please use the 1D one")
sys.exit(1)
else:
npts=np.size(self.x1)
tem=self.v[:,n].reshape(npts,npts) #the mode out of the solver is 1 column vector it must be reshaped to npts x npts
return self.l[n], tem
def show_mode(self,n,what='amplitude'):
"""Show the amplitude/phase of the n'th mode.
:Args:
- what : string= (amplitude/phase) to choose what to plot.
- n : (positive integer) the order of the mode to show.
.. Note::
This function needs matplotlib package
:Returns:
- none.
"""
if self.dim_flag=='':
print("the system kernel is empty ")
sys.exit(1)
elif self.dim_flag=='1D':
plt.figure()
if what=='amplitude':
plt.plot(self.x1,np.abs(self.v[:,n]))
elif what=='phase':
plt.plot(self.x1,np.angle(self.v[:,n]))
elif what=='intensity':
plt.plot(self.x1,np.abs((self.v[:,n])**2))
else:
print("what must be 'amplitude','intensity' or 'phase'")
elif self.dim_flag=='2D':
npts=np.size(self.x1)
tem=self.v[:,n].reshape(npts,npts);
plt.figure()
if what=='amplitude':
plt.pcolor(self.x1,self.y1,np.abs(tem))
plt.colorbar()
elif what=='phase':
plt.pcolor(self.x1,self.y1,np.angle(tem))
plt.colorbar()
elif what=='intensity':
plt.pcolor(self.x1,self.y1,np.abs(tem**2))
plt.colorbar()
else:
print("what must be 'amplitude','intensity' or 'phase'")
return
def apply_mask1D(self,MaskObj):
"""Applay a phase and amplitude mask to the matrix kernel of 1D systems.
:Args:
- MaskObj : object of the class AmpMask2D which contains the mask matrix in self.Msk
.. Note::
- this function is to use with 1D systems
- This function multiply each coulumn of the matrix Kernel and the mask (phas & amplitude)
:Returns:
- none. the modifications are applyied directly on the kernel (self.Kt)
"""
print("Applying 1D Mask...")
#Mask=MaskObj.Msk
Mask=MaskObj
for i in range(np.size(Mask)):
self.Kt[i,:]=self.Kt[i,:]*Mask[i]
print("Mask applied.")
return
def apply_mask2D(self,MaskObj):
"""Apply a phase and amplitude mask to the matrix kernel of 2D systems.
:Args:
- MaskObj : object of the class AmpMask2D which contains the mask matrix in self.Msk
.. Note::
- this function is to use with 1D systems
- This function multiply each coulumn of the matrix Kernel and the mask (phas & amplitude)
:Returns:
- none. the modifications are applyied directly on the kernel (self.Kt)
"""
print("Applying 2D Mask...")
Mask=MaskObj.Msk
Nn=np.size(Mask)
Mask_alpha=Mask.reshape(1,Nn)
for u in range(Nn):
self.Kt[u,:]=self.Kt[u,:]*Mask_alpha
print("Mask applied.")
return
def cascade_subsystem(self,SysObj,order=1):
"""Cascade 2 systems (2 objects 'MatEigenSolv') each one containig its Matrix kernel (self.Kt).
:Args:
- SysObj : (object of the classMatEigenSolv) contains the kernel matrix in 'self.Kt' and all elements of the system.
- order: (1/-1) corresponds to the order of cascading (order=1 :sys1 --> sys2); (order=-1 :sys2 --> sys1)
.. Note::
- this function is to use with 1D & 2D systems
- This function multiply each coulumn of the matrix Kernel and the mask (phas & amplitude)
:Returns:
- none. the modifications are applyied directly on the kernel (self.Kt)
"""
if self.dim_flag=='':
print("the kernel matrix is empty!")
sys.exit(1)
else:
Kt2=SysObj.Kt
if order==1:
self.Kt=np.dot(self.Kt,Kt2)
elif order==-1:
self.Kt=np.dot(Kt2,self.Kt)
else:
print("order can take '1' or '-1' values only ")
sys.exit(1)
print("systems cascaded.")
return
def eig_sort(self,l,v):
"""l :eigenvalue; v:eigenvector.
sorting eigenvalues & eigenvectors.
this function is used internally after solving the eigenvalue problem to sort the modes from the fundamental (0) to the (n'th)
"""
idx=np.argsort(np.abs(l))
l=l[idx]
v=v[:,idx]
l=l[::-1]
v=v[:,::-1]
return l,v
def find_waist(self,beam,x,value=0.36):
"""find the waist at 36% of the maximum amlitude
"""
beam=self.normalize_beam(beam)
idx=self.utils.find_nearest(np.abs(beam),value)
return np.abs(x[idx])
def find_mode_waist(self,n):
pass
# function to write its uses find_waist
return
if __name__ == '__main__':
print("hiii there")
|
{"hexsha": "ce985cc8eba932e424d1a0c1ad633033395b82d3", "size": 29538, "ext": "py", "lang": "Python", "max_stars_repo_path": "lib/opencavity/modesolver.py", "max_stars_repo_name": "giomalt/SLM_hologram_generation", "max_stars_repo_head_hexsha": "74ad38be8fe17c710856b2508389cd8c9f1ee77a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-02-24T12:55:01.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-19T02:19:25.000Z", "max_issues_repo_path": "lib/opencavity/modesolver.py", "max_issues_repo_name": "giomalt/SLM_hologram_generation", "max_issues_repo_head_hexsha": "74ad38be8fe17c710856b2508389cd8c9f1ee77a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lib/opencavity/modesolver.py", "max_forks_repo_name": "giomalt/SLM_hologram_generation", "max_forks_repo_head_hexsha": "74ad38be8fe17c710856b2508389cd8c9f1ee77a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.8086253369, "max_line_length": 289, "alphanum_fraction": 0.5639853748, "include": true, "reason": "import numpy,import scipy", "num_tokens": 7445}
|
import numpy as np
from typing import Dict, List, Tuple
import logging
from functools import lru_cache
from mypy.types import Type
from . import register
from ..shortcuts import (is_int, is_bool, is_float, bool_type, int_type,
float_type, is_ndarray_of_bools, is_ndarray_of_floats,
is_ndarray_of_ints)
from ..bind_arguments import BoundArgument
log = logging.getLogger(__name__)
@register('numpy._UfuncCast')
def UfuncCast(typ: Type, funcname: str, bound_args: Dict[str, BoundArgument]):
keys = (k for k in bound_args.keys() if k not in ('out', 'out1', 'out2'))
input_chars = tuple((type_to_char(bound_args[xx].arg_typ) for xx in keys))
typecodes = ufunc_to_typecodes(funcname)
output_char = ufunc_type_resolver(input_chars, typecodes)
dtype = char_to_type(output_char[0])
return dtype
###############################################################################
@lru_cache()
def ufunc_to_typecodes(funcname: str) -> List[Tuple[str, str]]:
funcname_split = funcname.split('.')
assert len(funcname_split) == 2 and funcname_split[0] == 'numpy'
ufunc_name = funcname_split[1]
types = getattr(np, ufunc_name).types
return [l.split('->') for l in types]
def ufunc_type_resolver(ichars: str, typecodes: List[Tuple[str, str]]):
for inp, out in typecodes:
if all(np.can_cast(ii, tt) for ii, tt in zip(ichars, inp)):
return out
raise ValueError()
@lru_cache()
def type_to_char(type: Type) -> str:
# Example: 'builtins.bool' -> '?''
# 'builtins.float' -> 'd'
# 'builtins.int' -> 'l'
if is_bool(type) or is_ndarray_of_bools(type):
return np.dtype('bool').char
if is_float(type) or is_ndarray_of_floats(type):
return np.dtype('float').char
if is_int(type) or is_ndarray_of_ints(type):
return np.dtype('int').char
raise ValueError(type)
@lru_cache()
def char_to_type(char: str) -> Type:
if char in ('b', '?'):
return bool_type()
elif char in ('e', 'f', 'd', 'g'):
return float_type()
elif char in ('i', 'l'):
return int_type()
raise ValueError(char)
|
{"hexsha": "6eefd0cd93f5860a2c71c970ae8814c2b83aaab2", "size": 2190, "ext": "py", "lang": "Python", "max_stars_repo_path": "numpy_plugin/typefunctions/ufuncs.py", "max_stars_repo_name": "rmcgibbo/numpy-mypy", "max_stars_repo_head_hexsha": "4f63fbd82bc7067753975bfc887b061be5c2b30f", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-10-01T03:14:06.000Z", "max_stars_repo_stars_event_max_datetime": "2019-10-01T03:14:06.000Z", "max_issues_repo_path": "numpy_plugin/typefunctions/ufuncs.py", "max_issues_repo_name": "rmcgibbo/numpy-mypy", "max_issues_repo_head_hexsha": "4f63fbd82bc7067753975bfc887b061be5c2b30f", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "numpy_plugin/typefunctions/ufuncs.py", "max_forks_repo_name": "rmcgibbo/numpy-mypy", "max_forks_repo_head_hexsha": "4f63fbd82bc7067753975bfc887b061be5c2b30f", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.4166666667, "max_line_length": 79, "alphanum_fraction": 0.6301369863, "include": true, "reason": "import numpy", "num_tokens": 573}
|
def example(Simulator):
import numpy as np
from csdl import Model
import csdl
class ExampleTensorSummationSparse(Model):
def define(self):
# Shape of Tensor
shape3 = (2, 4, 3)
c = np.arange(24).reshape(shape3)
# Declaring tensor
tens = self.declare_variable('c', val=c)
self.register_output(
'einsum_summ2_sparse_derivs',
csdl.einsum(
tens,
subscripts='ijk->',
partial_format='sparse',
))
sim = Simulator(ExampleTensorSummationSparse())
sim.run()
print('c', sim['c'].shape)
print(sim['c'])
print('einsum_summ2_sparse_derivs', sim['einsum_summ2_sparse_derivs'].shape)
print(sim['einsum_summ2_sparse_derivs'])
return sim
|
{"hexsha": "71c769b73bef0329752412f9093e1feb21baf745", "size": 890, "ext": "py", "lang": "Python", "max_stars_repo_path": "csdl/examples/valid/ex_einsum_old_tensor_summation_sparse.py", "max_stars_repo_name": "LSDOlab/csdl", "max_stars_repo_head_hexsha": "04c2c5764f6ca9b865ec87ecfeaf6f22ecacc5a3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "csdl/examples/valid/ex_einsum_old_tensor_summation_sparse.py", "max_issues_repo_name": "LSDOlab/csdl", "max_issues_repo_head_hexsha": "04c2c5764f6ca9b865ec87ecfeaf6f22ecacc5a3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "csdl/examples/valid/ex_einsum_old_tensor_summation_sparse.py", "max_forks_repo_name": "LSDOlab/csdl", "max_forks_repo_head_hexsha": "04c2c5764f6ca9b865ec87ecfeaf6f22ecacc5a3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-10-04T19:40:32.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-04T19:40:32.000Z", "avg_line_length": 26.9696969697, "max_line_length": 80, "alphanum_fraction": 0.5325842697, "include": true, "reason": "import numpy", "num_tokens": 208}
|
"""Synthetic observation data testing.
"""
import copy
import unittest
import numpy
import tigernet
from .network_objects import network_lattice_1x1_geomelem
from .network_objects import network_empirical_simplified
import platform
os = platform.platform()[:7].lower()
if os == "windows":
WINDOWS = True
DECIMAL = -1
else:
WINDOWS = False
DECIMAL = 1
# WINDOWS = False
# DECIMAL = 1
####################################################################################
################################## SYNTH-SYNTH #####################################
####################################################################################
class TestSyntheticObservationsSegmentRandomLattice1x1(unittest.TestCase):
def setUp(self):
network = copy.deepcopy(network_lattice_1x1_geomelem)
# generate synthetic observations
obs = tigernet.generate_obs(5, network.s_data)
obs["obs_id"] = ["a", "b", "c", "d", "e"]
# associate observations with the network
args = network, obs.copy()
kwargs = {"df_name": "obs1", "df_key": "obs_id"}
self.net_obs = tigernet.Observations(*args, **kwargs)
def test_obs2coords(self):
known_obs2coords = {
(0, "a"): (4.939321535345923, 6.436704297351775),
(1, "b"): (5.4248703846447945, 4.903948646972072),
(2, "c"): (3.8128931940501425, 5.813047017599905),
(3, "d"): (3.9382849013642325, 8.025957007038718),
(4, "e"): (8.672964844509263, 3.4509736694319995),
}
observed_obs2coords = self.net_obs.obs2coords
for k, v in known_obs2coords.items():
self.assertAlmostEqual(observed_obs2coords[k], v)
def test_obs2segm(self):
known_obs2segm = {"a": 1, "b": 3, "c": 1, "d": 1, "e": 3}
observed_obs2segm = self.net_obs.obs2segm
self.assertEqual(observed_obs2segm, known_obs2segm)
def test_snapped_points_df_dist_a(self):
known_dist_a = [
1.9367042973517747,
0.9248703846447945,
1.3130470175999047,
3.5259570070387185,
4.172964844509263,
]
observed_dist_a = list(self.net_obs.snapped_points["dist_a"])
self.assertAlmostEqual(observed_dist_a, known_dist_a)
known_dist_a_mean = 2.3747087102288913
observed_dist_a_mean = self.net_obs.snapped_points["dist_a"].mean()
self.assertAlmostEqual(observed_dist_a_mean, known_dist_a_mean)
def test_snapped_points_df_dist_b(self):
known_dist_b = [
2.563295702648225,
3.5751296153552055,
3.1869529824000953,
0.9740429929612815,
0.32703515549073714,
]
observed_dist_b = list(self.net_obs.snapped_points["dist_b"])
self.assertAlmostEqual(observed_dist_b, known_dist_b)
known_dist_b_mean = 2.1252912897711087
observed_dist_b_mean = self.net_obs.snapped_points["dist_b"].mean()
self.assertAlmostEqual(observed_dist_b_mean, known_dist_b_mean)
def test_snapped_points_df_node_a(self):
known_node_a = [1, 1, 1, 1, 1]
observed_node_a = list(self.net_obs.snapped_points["node_a"])
self.assertEqual(observed_node_a, known_node_a)
def test_snapped_points_df_node_b(self):
known_node_b = [2, 4, 2, 2, 4]
observed_node_b = list(self.net_obs.snapped_points["node_b"])
self.assertEqual(observed_node_b, known_node_b)
def test_snapped_points_df_dist2line(self):
known_dist2line = [
0.4393215353459228,
0.4039486469720721,
0.6871068059498575,
0.5617150986357675,
1.0490263305680005,
]
observed_dist2line = list(self.net_obs.snapped_points["dist2line"])
self.assertAlmostEqual(observed_dist2line, known_dist2line)
known_dist2line_mean = 0.6282236834943241
observed_dist2line_mean = self.net_obs.snapped_points["dist2line"].mean()
self.assertAlmostEqual(observed_dist2line_mean, known_dist2line_mean)
class TestSyntheticObservationsNodeRandomLattice1x1(unittest.TestCase):
def setUp(self):
network = copy.deepcopy(network_lattice_1x1_geomelem)
# generate synthetic observations
obs = tigernet.generate_obs(5, network.s_data)
obs["obs_id"] = ["a", "b", "c", "d", "e"]
# associate observations with the network
args = network, obs.copy()
kwargs = {"df_name": "obs1", "df_key": "obs_id", "snap_to": "nodes"}
self.net_obs = tigernet.Observations(*args, **kwargs)
def test_obs2coords(self):
known_obs2coords = {
(0, "a"): (4.939321535345923, 6.436704297351775),
(1, "b"): (5.4248703846447945, 4.903948646972072),
(2, "c"): (3.8128931940501425, 5.813047017599905),
(3, "d"): (3.9382849013642325, 8.025957007038718),
(4, "e"): (8.672964844509263, 3.4509736694319995),
}
observed_obs2coords = self.net_obs.obs2coords
for k, v in known_obs2coords.items():
self.assertAlmostEqual(observed_obs2coords[k], v)
def test_obs2node(self):
known_obs2node = {"a": 1, "b": 1, "c": 1, "d": 2, "e": 4}
observed_obs2node = self.net_obs.obs2node
self.assertEqual(observed_obs2node, known_obs2node)
def test_snapped_points_df_dist2node(self):
known_dist2node = [
1.9859070841304562,
1.0092372059053203,
1.4819609418640627,
1.1244036660258458,
1.098821293546778,
]
observed_dist2node = list(self.net_obs.snapped_points["dist2node"])
self.assertAlmostEqual(observed_dist2node, known_dist2node)
known_dist2node_mean = 1.3400660382944927
observed_dist2node_mean = self.net_obs.snapped_points["dist2node"].mean()
self.assertAlmostEqual(observed_dist2node_mean, known_dist2node_mean)
####################################################################################
####################### SYNTH-SYNTH RESTRICTED #####################################
####################################################################################
class TestSyntheticObservationsSegmentRandomLattice1x1Restricted(unittest.TestCase):
def setUp(self):
network = copy.deepcopy(network_lattice_1x1_geomelem)
network.s_data.loc[1, "MTFCC"] = "S1100"
network.s_data.loc[3, "MTFCC"] = "S1100"
# generate synthetic observations
obs = tigernet.generate_obs(5, network.s_data)
obs["obs_id"] = ["a", "b", "c", "d", "e"]
# associate observations with the network
args = network, obs.copy()
kwargs = {"df_name": "obs1", "df_key": "obs_id", "restrict_col": "MTFCC"}
kwargs.update({"remove_restricted": ["S1100", "S1630", "S1640"]})
self.net_obs = tigernet.Observations(*args, **kwargs)
def test_obs2coords(self):
known_obs2coords = {
(0, "a"): (4.939321535345923, 6.436704297351775),
(1, "b"): (5.4248703846447945, 4.903948646972072),
(2, "c"): (3.8128931940501425, 5.813047017599905),
(3, "d"): (3.9382849013642325, 8.025957007038718),
(4, "e"): (8.672964844509263, 3.4509736694319995),
}
observed_obs2coords = self.net_obs.obs2coords
for k, v in known_obs2coords.items():
self.assertAlmostEqual(observed_obs2coords[k], v)
def test_obs2segm(self):
known_obs2segm = {"a": 0, "b": 0, "c": 2, "d": 2, "e": 0}
observed_obs2segm = self.net_obs.obs2segm
self.assertEqual(observed_obs2segm, known_obs2segm)
def test_snapped_points_df_dist_a(self):
known_dist_a = [
4.5,
4.5,
3.812893194050143,
3.9382849013642325,
3.4509736694319995,
]
observed_dist_a = list(self.net_obs.snapped_points["dist_a"])
self.assertAlmostEqual(observed_dist_a, known_dist_a)
known_dist_a_mean = 4.040430352969275
observed_dist_a_mean = self.net_obs.snapped_points["dist_a"].mean()
self.assertAlmostEqual(observed_dist_a_mean, known_dist_a_mean)
def test_snapped_points_df_dist_b(self):
known_dist_b = [
0.0,
0.0,
0.6871068059498571,
0.5617150986357675,
1.0490263305680005,
]
observed_dist_b = list(self.net_obs.snapped_points["dist_b"])
self.assertAlmostEqual(observed_dist_b, known_dist_b)
known_dist_b_mean = 0.459569647030725
observed_dist_b_mean = self.net_obs.snapped_points["dist_b"].mean()
self.assertAlmostEqual(observed_dist_b_mean, known_dist_b_mean)
def test_snapped_points_df_node_a(self):
known_node_a = [0, 0, 3, 3, 0]
observed_node_a = list(self.net_obs.snapped_points["node_a"])
self.assertEqual(observed_node_a, known_node_a)
def test_snapped_points_df_node_b(self):
known_node_b = [1, 1, 1, 1, 1]
observed_node_b = list(self.net_obs.snapped_points["node_b"])
self.assertEqual(observed_node_b, known_node_b)
def test_snapped_points_df_dist2line(self):
known_dist2line = [
1.9859070841304562,
1.0092372059053203,
1.3130470175999047,
3.525957007038718,
4.172964844509263,
]
observed_dist2line = list(self.net_obs.snapped_points["dist2line"])
self.assertAlmostEqual(observed_dist2line, known_dist2line)
known_dist2line_mean = 2.4014226318367324
observed_dist2ine_mean = self.net_obs.snapped_points["dist2line"].mean()
self.assertAlmostEqual(observed_dist2ine_mean, known_dist2line_mean)
class TestSyntheticObservationsNodeRandomLattice1x1Restricted(unittest.TestCase):
def setUp(self):
network = copy.deepcopy(network_lattice_1x1_geomelem)
network.s_data.loc[1, "MTFCC"] = "S1100"
network.s_data.loc[3, "MTFCC"] = "S1100"
# generate synthetic observations
obs = tigernet.generate_obs(5, network.s_data)
obs["obs_id"] = ["a", "b", "c", "d", "e"]
# associate observations with the network
args = network, obs.copy()
kwargs = {"df_name": "obs1", "df_key": "obs_id", "snap_to": "nodes"}
kwargs.update({"restrict_col": "MTFCC"})
kwargs.update({"remove_restricted": ["S1100", "S1630", "S1640"]})
self.net_obs = tigernet.Observations(*args, **kwargs)
def test_obs2coords(self):
known_obs2coords = {
(0, "a"): (4.939321535345923, 6.436704297351775),
(1, "b"): (5.4248703846447945, 4.903948646972072),
(2, "c"): (3.8128931940501425, 5.813047017599905),
(3, "d"): (3.9382849013642325, 8.025957007038718),
(4, "e"): (8.672964844509263, 3.4509736694319995),
}
observed_obs2coords = self.net_obs.obs2coords
for k, v in known_obs2coords.items():
self.assertAlmostEqual(observed_obs2coords[k], v)
def test_obs2node(self):
known_obs2node = {"a": 1, "b": 1, "c": 1, "d": 1, "e": 1}
observed_obs2node = self.net_obs.obs2node
self.assertEqual(observed_obs2node, known_obs2node)
def test_snapped_points_df_dist2node(self):
known_dist2node = [
1.9859070841304562,
1.0092372059053203,
1.4819609418640627,
3.5704196766655913,
4.302800464317999,
]
observed_dist2node = list(self.net_obs.snapped_points["dist2node"])
self.assertAlmostEqual(observed_dist2node, known_dist2node)
known_dist2node_mean = 2.470065074576686
observed_dist2node_mean = self.net_obs.snapped_points["dist2node"].mean()
self.assertAlmostEqual(observed_dist2node_mean, known_dist2node_mean)
####################################################################################
################################## SYNTH-EMPIR #####################################
####################################################################################
class TestSyntheticObservationsSegmentRandomEmpirical(unittest.TestCase):
def setUp(self):
network = copy.deepcopy(network_empirical_simplified)
# generate synthetic observations
obs = tigernet.generate_obs(500, network.s_data)
obs["obs_id"] = obs.index
# associate observations with the network
args = network, obs.copy()
kwargs = {"df_name": "obs1", "df_key": "obs_id"}
self.net_obs = tigernet.Observations(*args, **kwargs)
@unittest.skipIf(WINDOWS, "Skipping Windows due to precision issues.")
def test_obs2coords(self):
known_obs2coords = [
((495, 495), (621033.3213594754, 164941.80269090834)),
((496, 496), (621819.5720103906, 165514.3885859197)),
((497, 497), (623654.2570885622, 164241.2803142736)),
((498, 498), (622851.6060250874, 166857.07354681785)),
((499, 499), (621816.24144166, 166044.17761455863)),
]
observed_obs2coords = self.net_obs.obs2coords
for k, v in known_obs2coords:
obs = numpy.array(observed_obs2coords[k])
numpy.testing.assert_array_almost_equal(obs, numpy.array(v))
def test_obs2segm(self):
known_obs2segm = [(495, 150), (496, 230), (497, 84), (498, 91), (499, 105)]
observed_obs2segm = list(self.net_obs.obs2segm.items())[-5:]
self.assertEqual(observed_obs2segm, known_obs2segm)
@unittest.skipIf(WINDOWS, "Skipping Windows due to precision issues.")
def test_snapped_points_df_dist_a(self):
known_dist_a = numpy.array(
[
210.40526565933823,
118.30357725098324,
34.12778222322711,
120.39577375386378,
0.0,
]
)
observed_dist_a = list(self.net_obs.snapped_points["dist_a"])[-5:]
numpy.testing.assert_array_almost_equal(
numpy.array(observed_dist_a), known_dist_a
)
known_dist_a_mean = 163.49368966710074
observed_dist_a_mean = self.net_obs.snapped_points["dist_a"].mean()
self.assertAlmostEqual(observed_dist_a_mean, known_dist_a_mean)
@unittest.skipIf(WINDOWS, "Skipping Windows due to precision issues.")
def test_snapped_points_df_dist_b(self):
known_dist_b = numpy.array(
[
342.6965551431302,
0.0,
86.50490751040633,
58.25005873237134,
152.0185068774602,
]
)
observed_dist_b = list(self.net_obs.snapped_points["dist_b"])[-5:]
numpy.testing.assert_array_almost_equal(
numpy.array(observed_dist_b), known_dist_b
)
known_dist_b_mean = 159.75442932794624
observed_dist_b_mean = self.net_obs.snapped_points["dist_b"].mean()
self.assertAlmostEqual(observed_dist_b_mean, known_dist_b_mean)
def test_snapped_points_df_node_a(self):
known_node_a = [186, 86, 122, 132, 151]
observed_node_a = list(self.net_obs.snapped_points["node_a"])[-5:]
self.assertEqual(observed_node_a, known_node_a)
def test_snapped_points_df_node_b(self):
known_node_b = [193, 245, 48, 133, 22]
observed_node_b = list(self.net_obs.snapped_points["node_b"])[-5:]
self.assertEqual(observed_node_b, known_node_b)
@unittest.skipIf(WINDOWS, "Skipping Windows due to precision issues.")
def test_snapped_points_df_dist2line(self):
known_dist2line = numpy.array(
[
147.05576410321171,
298.0459114928476,
2.914177304108527,
160.72592517096817,
300.2025615374258,
]
)
observed_dist2line = list(self.net_obs.snapped_points["dist2line"])[-5:]
numpy.testing.assert_array_almost_equal(
numpy.array(observed_dist2line), known_dist2line
)
known_dist2line_mean = 70.14736252699115
observed_dist2ine_mean = self.net_obs.snapped_points["dist2line"].mean()
self.assertAlmostEqual(observed_dist2ine_mean, known_dist2line_mean)
class TestSyntheticObservationsNodeRandomEmpirical(unittest.TestCase):
def setUp(self):
network = copy.deepcopy(network_empirical_simplified)
# generate synthetic observations
obs = tigernet.generate_obs(500, network.s_data)
obs["obs_id"] = obs.index
# associate observations with the network
args = network, obs.copy()
kwargs = {"df_name": "obs1", "df_key": "obs_id", "snap_to": "nodes"}
self.net_obs = tigernet.Observations(*args, **kwargs)
@unittest.skipIf(WINDOWS, "Skipping Windows due to precision issues.")
def test_obs2coords(self):
known_obs2coords = [
((495, 495), (621033.3213594754, 164941.80269090834)),
((496, 496), (621819.5720103906, 165514.3885859197)),
((497, 497), (623654.2570885622, 164241.2803142736)),
((498, 498), (622851.6060250874, 166857.07354681785)),
((499, 499), (621816.24144166, 166044.17761455863)),
]
observed_obs2coords = self.net_obs.obs2coords
for k, v in known_obs2coords:
numpy.testing.assert_array_almost_equal(
numpy.array(observed_obs2coords[k]), numpy.array(v)
)
def test_obs2node(self):
known_obs2node = [(495, 192), (496, 245), (497, 122), (498, 133), (499, 151)]
observed_obs2node = self.net_obs.obs2node
for k, v in known_obs2node:
self.assertAlmostEqual(observed_obs2node[k], v)
@unittest.skipIf(WINDOWS, "Skipping Windows due to precision issues.")
def test_snapped_points_df_dist2node(self):
known_dist2node = numpy.array(
[
233.41263770566138,
298.0459114928476,
34.25197729818704,
170.95581991959833,
300.2025615374258,
]
)
observed_dist2node = list(self.net_obs.snapped_points["dist2node"])[-5:]
numpy.testing.assert_array_almost_equal(
numpy.array(numpy.array(observed_dist2node)), known_dist2node
)
known_dist2node_mean = 117.00153682103445
observed_dist2node_mean = self.net_obs.snapped_points["dist2node"].mean()
self.assertAlmostEqual(observed_dist2node_mean, known_dist2node_mean)
####################################################################################
######################## SYNTH-EMPIR RESTRICTED ####################################
####################################################################################
class TestSyntheticObservationsSegmentRandomEmpiricalRestricted(unittest.TestCase):
def setUp(self):
network = copy.deepcopy(network_empirical_simplified)
# generate synthetic observations
obs = tigernet.generate_obs(500, network.s_data)
obs["obs_id"] = obs.index
# associate observations with the network
args = network, obs.copy()
kwargs = {"df_name": "obs1", "df_key": "obs_id"}
kwargs.update({"restrict_col": "MTFCC"})
kwargs.update({"remove_restricted": ["S1100", "S1630", "S1640"]})
self.net_obs = tigernet.Observations(*args, **kwargs)
@unittest.skipIf(WINDOWS, "Skipping Windows due to precision issues.")
def test_obs2coords(self):
known_obs2coords = [
((495, 495), (621033.3213594754, 164941.80269090834)),
((496, 496), (621819.5720103906, 165514.3885859197)),
((497, 497), (623654.2570885622, 164241.2803142736)),
((498, 498), (622851.6060250874, 166857.07354681785)),
((499, 499), (621816.24144166, 166044.17761455863)),
]
observed_obs2coords = self.net_obs.obs2coords
for k, v in known_obs2coords:
obs = numpy.array(observed_obs2coords[k])
numpy.testing.assert_array_almost_equal(obs, numpy.array(v))
def test_obs2segm(self):
known_obs2segm = [(495, 150), (496, 230), (497, 84), (498, 91), (499, 105)]
observed_obs2segm = list(self.net_obs.obs2segm.items())[-5:]
self.assertEqual(observed_obs2segm, known_obs2segm)
@unittest.skipIf(WINDOWS, "Skipping Windows due to precision issues.")
def test_snapped_points_df_dist_a(self):
known_dist_a = numpy.array(
[
210.40526565933823,
118.30357725098324,
34.12778222322711,
120.39577375386378,
0.0,
]
)
observed_dist_a = list(self.net_obs.snapped_points["dist_a"])[-5:]
numpy.testing.assert_array_almost_equal(
numpy.array(observed_dist_a), known_dist_a
)
known_dist_a_mean = 147.23394614647037
observed_dist_a_mean = self.net_obs.snapped_points["dist_a"].mean()
self.assertAlmostEqual(observed_dist_a_mean, known_dist_a_mean)
@unittest.skipIf(WINDOWS, "Skipping Windows due to precision issues.")
def test_snapped_points_df_dist_b(self):
known_dist_b = numpy.array(
[
342.6965551431302,
0.0,
86.50490751040633,
58.25005873237134,
152.0185068774602,
]
)
observed_dist_b = list(self.net_obs.snapped_points["dist_b"])[-5:]
numpy.testing.assert_array_almost_equal(
numpy.array(observed_dist_b), known_dist_b
)
known_dist_b_mean = 148.17608136919543
observed_dist_b_mean = self.net_obs.snapped_points["dist_b"].mean()
self.assertAlmostEqual(observed_dist_b_mean, known_dist_b_mean)
@unittest.skipIf(WINDOWS, "Skipping Windows due to precision issues.")
def test_snapped_points_df_node_a(self):
known_node_a = [186, 86, 122, 132, 151]
observed_node_a = list(self.net_obs.snapped_points["node_a"])[-5:]
self.assertEqual(observed_node_a, known_node_a)
@unittest.skipIf(WINDOWS, "Skipping Windows due to precision issues.")
def test_snapped_points_df_node_b(self):
known_node_b = [193, 245, 48, 133, 22]
observed_node_b = list(self.net_obs.snapped_points["node_b"])[-5:]
self.assertEqual(observed_node_b, known_node_b)
@unittest.skipIf(WINDOWS, "Skipping Windows due to precision issues.")
def test_snapped_points_df_dist2line(self):
known_dist2line = numpy.array(
[
147.05576410321171,
298.0459114928476,
2.914177304108527,
160.72592517096817,
300.2025615374258,
]
)
observed_dist2line = list(self.net_obs.snapped_points["dist2line"])[-5:]
numpy.testing.assert_array_almost_equal(
numpy.array(observed_dist2line), known_dist2line
)
known_dist2line_mean = 72.28800510090015
observed_dist2ine_mean = self.net_obs.snapped_points["dist2line"].mean()
self.assertAlmostEqual(observed_dist2ine_mean, known_dist2line_mean)
class TestSyntheticObservationsNodeRandomEmpiricalRestricted(unittest.TestCase):
def setUp(self):
network = copy.deepcopy(network_empirical_simplified)
# generate synthetic observations
obs = tigernet.generate_obs(500, network.s_data)
obs["obs_id"] = obs.index
# associate observations with the network
args = network, obs.copy()
kwargs = {"df_name": "obs1", "df_key": "obs_id", "snap_to": "nodes"}
kwargs.update({"restrict_col": "MTFCC"})
kwargs.update({"remove_restricted": ["S1100", "S1630", "S1640"]})
self.net_obs = tigernet.Observations(*args, **kwargs)
@unittest.skipIf(WINDOWS, "Skipping Windows due to precision issues.")
def test_obs2coords(self):
known_obs2coords = [
((495, 495), (621033.3213594754, 164941.80269090834)),
((496, 496), (621819.5720103906, 165514.3885859197)),
((497, 497), (623654.2570885622, 164241.2803142736)),
((498, 498), (622851.6060250874, 166857.07354681785)),
((499, 499), (621816.24144166, 166044.17761455863)),
]
observed_obs2coords = self.net_obs.obs2coords
for k, v in known_obs2coords:
numpy.testing.assert_array_almost_equal(
numpy.array(observed_obs2coords[k]), numpy.array(v)
)
def test_obs2node(self):
known_obs2node = [(495, 192), (496, 245), (497, 122), (498, 133), (499, 151)]
observed_obs2node = self.net_obs.obs2node
for k, v in known_obs2node:
self.assertAlmostEqual(observed_obs2node[k], v)
@unittest.skipIf(WINDOWS, "Skipping Windows due to precision issues.")
def test_snapped_points_df_dist2node(self):
known_dist2node = numpy.array(
[
233.41263770566138,
298.0459114928476,
34.25197729818704,
170.95581991959833,
300.2025615374258,
]
)
observed_dist2node = list(self.net_obs.snapped_points["dist2node"])[-5:]
numpy.testing.assert_array_almost_equal(
numpy.array(numpy.array(observed_dist2node)), known_dist2node
)
known_dist2node_mean = 117.96666272251426
observed_dist2node_mean = self.net_obs.snapped_points["dist2node"].mean()
self.assertAlmostEqual(observed_dist2node_mean, known_dist2node_mean)
if __name__ == "__main__":
unittest.main()
|
{"hexsha": "d7b0bbc122ba2a9f6ca919d6d3d5a3e9717e6399", "size": 25773, "ext": "py", "lang": "Python", "max_stars_repo_path": "tigernet/tests/test_observations_synthetic.py", "max_stars_repo_name": "martinfleis/tigernet", "max_stars_repo_head_hexsha": "c3bee46bdd79bb9aa8a98356b4f95f5a1fb4c2b4", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-10-22T00:49:25.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-15T23:41:12.000Z", "max_issues_repo_path": "tigernet/tests/test_observations_synthetic.py", "max_issues_repo_name": "martinfleis/tigernet", "max_issues_repo_head_hexsha": "c3bee46bdd79bb9aa8a98356b4f95f5a1fb4c2b4", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 54, "max_issues_repo_issues_event_min_datetime": "2019-08-28T23:55:30.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-23T19:29:04.000Z", "max_forks_repo_path": "tigernet/tests/test_observations_synthetic.py", "max_forks_repo_name": "jGaboardi/tigernet", "max_forks_repo_head_hexsha": "797fd588a9c95a0a4dd79e2835d37c97932e1b9f", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.2703125, "max_line_length": 85, "alphanum_fraction": 0.616187483, "include": true, "reason": "import numpy", "num_tokens": 6788}
|
# Make our document distances work with the TextAnalysis package
const DefaultDocDistance = WordMoversDistance()
# Find the closest token with an embedding in a DocDistance. If the
# token itself has no embedding we will try to "clean" it until we find
# an embedding which is likely to carry very similar meaning.
function findclosetoken(t::String, dd::AbstractDocumentDistance = DefaultDocDistance;
skipdot = true, skiphyphen = true, skipcomma = true, makelowercase = true)
hasembedding(dd, t) && return(t)
if (skipdot && endswith(t, ".")) || (skiphyphen && endswith(t, "-")) ||
(skipcomma && endswith(t, ","))
t = t[1:prevind(t, length(t), 1)]
hasembedding(dd, t) && return(t)
end
if makelowercase
t = lowercase(t)
end
# Return nothing to indicate we couldn't find a close token with an embedding
return hasembedding(dd, t) ? t : nothing
end
# Filter the tokens of a Document so we keep only the ones that have an embedding.
# We try some simple transformations before we give up on finding an embedding.
function filtertokens(tokens, dd::AbstractDocumentDistance = DefaultDocDistance;
skipdot = true, skiphyphen = true, skipcomma = true, makelowercase = true)
res = String[]
for t in tokens
t2 = findclosetoken(t, dd; skipdot = skipdot, skiphyphen = skiphyphen, skipcomma = skipcomma, makelowercase = makelowercase)
if !isnothing(t2)
push!(res, t2)
end
end
return res
end
# Rather than filter tokens every time we want to calculate the doc distance
# we can pre-filter and cache the ngrams and then more quickly calculate
# the distance.
mutable struct FilteredNGramDocument <: AbstractDocument
ngramdoc::NGramDocument # NGramDoc where only the filtered/transformed tokens are left
metadata::TextAnalysis.DocumentMetadata
end
import TextAnalysis.ngrams
ngrams(d::FilteredNGramDocument) = ngrams(d.ngramdoc)
function FilteredNGramDocument(d::AbstractDocument, dd::AbstractDocumentDistance = DefaultDocDistance)
ftoks = filtertokens(tokens(d), dd)
ngd = NGramDocument(TextAnalysis.ngramize(d.metadata.language, ftoks, 1), 1)
FilteredNGramDocument(ngd, d.metadata)
end
FilteredNGramDocument(s::AbstractString, dd::AbstractDocumentDistance = DefaultDocDistance) =
FilteredNGramDocument(StringDocument(s), dd)
function FilteredNGramDocument(d::NGramDocument, dd::AbstractDocumentDistance = DefaultDocDistance)
toks = keys(ngrams(d))
ftoks = filtertokens(toks, dd)
ngd = NGramDocument(ftoks, 1)
FilteredNGramDocument(ngd, s.metadata)
end
# Quicker calculation of the marginals and vocabulary since we already have the counts.
function marginals_and_vocabulary(d1::FilteredNGramDocument, d2::FilteredNGramDocument; usesparse = true)
v = make_vocabulary()
k1 = keys(d1.ngramdoc.ngrams)
k2 = keys(d2.ngramdoc.ngrams)
update_vocabulary!(v, k1)
update_vocabulary!(v, k2)
m1 = calc_marginal(d1, k1, v; usesparse = usesparse)
m2 = calc_marginal(d2, k2, v; usesparse = usesparse)
return m1, m2, v
end
# Quicker calculation of the marginals since we already have the counts.
function calc_marginal(doc::FilteredNGramDocument, ks::Base.KeySet, vocabulary::AbstractDict{T, V}; usesparse = true) where {T<:AbstractString, V<:Integer}
N, counts = makecountvector(vocabulary, usesparse)
for k in ks
if haskey(vocabulary, k)
idx = vocabulary[k]
counts[idx] += doc.ngramdoc[k]
end
end
return frequencies_from_counts(counts)
end
# Find all pdf files (recursively) in a dir, convert them to text files, then read
# in a corpus representing them. Iff onlyifnew is true then convert to text file
# only if the pdf file is newer than the text file.
#function pdf_directory_corpus(dirname::String;
# recursive = true, onlyifnew = true, wordDistance = WordDistanceCache(),
# ngramdocs = true)
# txtfiles = convert_pdf_files_to_txt(dirname; recursive = recursive, onlyifnew = onlyifnew)
# docs = map(fp -> EmbeddedTokensFileDocument(fp, wordDistance), txtfiles)
# if ngramdocs
# docs = map(NGramDocument, docs)
# end
# return Corpus(docs)
#end
|
{"hexsha": "5b695ff3e6852c3c96322e83611fad5510529126", "size": 4209, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/textanalysis_interface.jl", "max_stars_repo_name": "robertfeldt/DocumentDistances.jl", "max_stars_repo_head_hexsha": "2545b15e0907c23be011351f200b34b397e008ae", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-11-09T14:05:34.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-09T14:05:34.000Z", "max_issues_repo_path": "src/textanalysis_interface.jl", "max_issues_repo_name": "robertfeldt/DocumentDistances.jl", "max_issues_repo_head_hexsha": "2545b15e0907c23be011351f200b34b397e008ae", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/textanalysis_interface.jl", "max_forks_repo_name": "robertfeldt/DocumentDistances.jl", "max_forks_repo_head_hexsha": "2545b15e0907c23be011351f200b34b397e008ae", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.09, "max_line_length": 155, "alphanum_fraction": 0.7246376812, "num_tokens": 1106}
|
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import tqdm
import random
import copy
from collections import defaultdict
import re
# %% Global Constants
TOL = 1e-5
USE_CUDA = torch.cuda.is_available()
RANDOM_GROUP_ORDER = True
INTERMEDIATE_ACTIVATION = "relu"
WEIGHT_DECAY = 0
# %%
class SGIN(nn.Module):
def __init__(self, input_dim, layers=[], groups=[]):
"""The Model for SGIN (Sparsely Groupped Input Variables in Neural Network)
Args:
input_dim (int): input dimension
layers (list): a list to specify the structure of this fully-connected
neural network. For each element in the list: if the element is
an integer, it represents a fully-connected layer with certain number
of cells.; if the element is "relu" or "sigmoid", it means a non-linear
activation layer; if the element is "dropout 0.3", it represents a
dropout layer with 30% dropping probability.
groups (list, np.ndarray, or set): a list (or np array) of set that
defines the group.
e.g. [0, 0, 1, 1, 1, 2] means 6 features in 3 groups.
If it is a list, then the length of the list should match the
input dimension.
e.g. {0: [0, 1, 2],
1: [0, 1, 3, 5],
2: [2, 4]} represents 6 features in 3 overlapping groups.
If it s a set, then they key is the group ID, and value is a list
of variable IDs in that group.
"""
super(SGIN, self).__init__()
assert(len(layers) > 0)
self.input_dim = input_dim
self.groups = groups
if type(self.groups) is list or type(self.groups) is np.ndarray:
self.group_idx = defaultdict(list)
assert input_dim == len(groups), "The input dimension is different from the size of the group definition."
for idx, group_id in enumerate(groups):
self.group_idx[group_id].append(idx)
elif type(self.groups) is dict or type(self.groups) is defaultdict:
self.group_idx = copy.deepcopy(groups)
else:
raise(TypeError("The groups should be one of list, np.array, or dict, but got " +
str(type(self.groups))))
self.sparse_groups = set() # a set to store the sparse groups
self.sparse_features_arr = [] # to store the feature ids
all_layers = []
prev_l = input_dim
for idx, l in enumerate(layers):
# Activation and Dropout
if l == "S":
all_layers.append(nn.Sigmoid())
continue
if l == "R":
all_layers.append(nn.ReLU())
continue
if l == "T":
all_layers.append(nn.Tanh())
continue
if type(l) is str and l.find("dropout") >= 0:
p = float(re.findall("\d+\.\d+", l)[0])
all_layers.append(nn.Dropout(p=p))
continue
# Add Linear Layers. Note: no bias for the first layer
all_layers.append(nn.Linear(prev_l, l, bias=idx != 0))
# Default Activation for Linear Layers
if idx != len(layers) - 1:
# add activation for all hidden layers
if INTERMEDIATE_ACTIVATION == "sigmoid":
all_layers.append(nn.Sigmoid())
elif INTERMEDIATE_ACTIVATION == "relu":
all_layers.append(nn.ReLU())
else:
print("Unknown intermediate activation %s. Skip activation here." % INTERMEDIATE_ACTIVATION)
prev_l = l
self.layers = nn.Sequential(*all_layers)
def forward(self, x):
"""
x has the shape [batch_size, input_dim]
"""
# Set weight for features in sparse groups as 0
self.layers[0].weight.data[:, self.sparse_features_arr] = nn.init.zeros_(self.layers[0].weight.data[:, self.sparse_features_arr])
# Set input values to sparse groups as 0
x[:, self.sparse_features_arr] = 0
return self.layers(x)
def set_group_to_sparse(self, group_id):
"""Set a group to sparse"""
self.layers[0].weight.data[:, self.sparse_features_arr] = nn.init.zeros_(self.layers[0].weight.data[:, self.sparse_features_arr])
# We already set this group to sparse in the past, ignore!
if group_id in self.sparse_groups:
return
feature_ids = self.group_idx[group_id]
sparse_features_set = set(self.sparse_features_arr)
[sparse_features_set.add(_) for _ in feature_ids]
self.sparse_features_arr = list(sparse_features_set)
self.sparse_groups.add(group_id)
def tau_for_group(self, group_id):
""" Calculate the $\tau_g$ defined in the paper, which is the number of
features in the group.
"""
feature_ids = self.group_idx[group_id]
p_g = len(feature_ids) # length (number of features) in the group
return torch.sqrt(p_g * torch.sum(self.layers[0].weight[:, feature_ids] ** 2))
def regularization_layer_1(self):
w = self.layers[0].weight
group_regularization = 0
# Note: this feature regularization can be used for bi-level sparsity
# However, we do not use it in this experiment
feature_regularization = 0
for group_id in self.group_idx.keys():
feature_ids = self.group_idx[group_id]
group_regularization += self.tau_for_group(group_id)
for feature_id in feature_ids:
feature_regularization += torch.sqrt(torch.sum(w[:, feature_id] ** 2))
return group_regularization, feature_regularization
def count_sparsity(self):
""" Count how many features and groups are sparse in the model.
We use TOL as the threshold (epsilon) for a parameter to be counted as
zero.
Note that we should not use the self.sparse_groups and
self.sparse_features_arr directly, because SGD might find sparsity by
itself without using our algorithm.
We also set the group to sparse (zero) if it is smaller than the
tolerance.
"""
w = self.layers[0].weight.cpu().detach().numpy()
num_sparse_features = 0
num_sparse_groups = 0
for group_id in self.group_idx.keys():
curr_group_is_sparse = True
for feature_id in self.group_idx[group_id]:
curr_feature_is_sparse = True
for parameter in w[:, feature_id]:
if abs(parameter) > TOL:
curr_feature_is_sparse = False
break
if curr_feature_is_sparse:
num_sparse_features += 1
else:
curr_group_is_sparse = False
break
# Make sure the group is sparse here
if curr_group_is_sparse:
num_sparse_groups += 1
self.set_group_to_sparse(group_id)
return num_sparse_features, num_sparse_groups
# %%
def sbcgd_train(model, criterion, dataloader, lr=0.1, lam=0.1, verbose=True):
"""
Train the model with the "Stochastic Blockwise Coordinated Gradient Descent"
algorithm
Args:
model (torch.module): the PyTorch model
criterion (torch.module): a PyTorch loss criterion
dataloader (torch.utils.data.DataLoader): DESCRIPTION.
lr (float, optional): Initial learning rate. Defaults to 0.1.
lam (float, optional): Regularization term $\lambda$. Defaults to 0.1.
verbose (bool, optional): If verbose, print more training details. Defaults to True.
Returns:
None.
"""
if verbose:
dataloader = tqdm.tqdm(dataloader)
print("-" * 30, "Begin Stochastic Blockwise Coordinated Gradient Descent")
if USE_CUDA:
model = model.cuda()
groups_ = list(model.group_idx.keys())
if RANDOM_GROUP_ORDER:
random.shuffle(groups_)
losses = []
for group_id in groups_:
if len(model.sparse_groups) == len(model.group_idx) - 1:
return # Done, sparsified all groups except the remaining one
feature_ids = model.group_idx[group_id]
if group_id in model.sparse_groups:
# This group is sparse (we already throw it away)
continue
# The features that we don't care for this block coordinate
non_feature_ids = list(set(range(model.input_dim)).difference(feature_ids))
coordinate_loss = []
# The following two arrays are used for logging
losses_this_group = []
regs_this_group = []
optimizer = optim.SGD(model.parameters(), lr=lr, weight_decay=WEIGHT_DECAY, momentum=0)
for x, y in dataloader:
if USE_CUDA:
x, y = x.cuda(), y.cuda()
x = x.float()
if type(criterion) is torch.nn.modules.loss.MSELoss or type(criterion) is torch.nn.modules.loss.L1Loss:
y = y.float()
y = y.reshape(-1, 1)
else:
y = y.long()
optimizer.zero_grad()
pred = model(x)
pred_loss = criterion(pred, y)
# The Group Regularization Term we defined
group_regularization = model.tau_for_group(group_id)
loss_with_reg = pred_loss + group_regularization * lam
losses.append(pred_loss.cpu().detach().item())
losses_this_group.append(pred_loss.cpu().detach().item())
regs_this_group.append((group_regularization * lam).cpu().detach().item())
# Calculate Loss for this coordinate
X2 = x.clone()
X2[:, feature_ids] = 0
pred2 = model(X2)
loss2 = criterion(pred2, y)
err = max((loss2 - pred_loss).item(), 0)
coordinate_loss.append(err)
loss_with_reg.backward()
# manually set gradient for non-relevant features in the first layer to zero
for p in model.parameters():
p.grad[:, non_feature_ids] = 0
break # only clear gradient for the first layer
optimizer.step()
if np.mean(coordinate_loss) < lam * model.tau_for_group(group_id):
model.set_group_to_sparse(group_id)
elif verbose:
print("No sparse in this group. Avg. Coordinate Loss:", np.mean(coordinate_loss))
assert not np.isnan(coordinate_loss[0]), "The coordinate loss is NaN. Programming error. Loss function, learning rate, or lambda is not well defined."
assert not np.isnan(losses_this_group[0]), "Loss for group %d is NaN. Loss function, learning rate, or lambda is not well defined."
print(" >>> Avg. loss in this epoch:", np.mean(losses), "with %d sparse groups" % len(model.sparse_groups))
# %%
def theory_sbcgd_train(model, criterion, dataloader, lr=0.1, lam=0.1, verbose=True):
"""
Train the model with the "Stochastic Blockwise Coordinated Gradient Descent with Theoretical Guarantee"
algorithm.
This is the "Algorithm 2" in the paper appendix (i.e. Page 12 in https://arxiv.org/pdf/1911.13068.pdf)
Args:
model (torch.module): the PyTorch model
criterion (torch.module): a PyTorch loss criterion
dataloader (torch.utils.data.DataLoader): DESCRIPTION.
lr (float, optional): Initial learning rate. Defaults to 0.1.
lam (float, optional): Regularization term $\lambda$. Defaults to 0.1.
verbose (bool, optional): If verbose, print more training details. Defaults to True.
Returns:
None.
"""
if verbose:
dataloader = tqdm.tqdm(dataloader)
print("-" * 30, "Begin sbcgd Coordinate Descent")
if USE_CUDA:
model = model.cuda()
groups_ = list(model.group_idx.keys())
if RANDOM_GROUP_ORDER:
random.shuffle(groups_)
losses = []
for group_id in groups_:
if len(model.sparse_groups) == len(model.group_idx) - 1:
return # Done, sparsified all groups except the remaining one
feature_ids = model.group_idx[group_id]
if group_id in model.sparse_groups:
# This group is sparse (we already throw it away)
continue
# The features that we don't care for this block coordinate
non_feature_ids = list(set(range(model.input_dim)).difference(feature_ids))
optimizer = optim.SGD(model.parameters(), lr=lr, weight_decay=WEIGHT_DECAY, momentum=0)
for x, y in dataloader:
if USE_CUDA:
x, y = x.cuda(), y.cuda()
x = x.float()
if type(criterion) is torch.nn.modules.loss.MSELoss or type(criterion) is torch.nn.modules.loss.L1Loss:
y = y.float()
else:
y = y.long()
optimizer.zero_grad()
pred = model(x)
pred_loss = criterion(pred, y)
group_regularization = model.tau_for_group(group_id)
loss_with_reg = pred_loss + group_regularization * lam
loss_with_reg.backward()
# manually set gradient for non-relevant features in the first layer to zero
for p in model.parameters():
p.grad[:, non_feature_ids] = 0
break # only clear gradient for the first layer
optimizer.step()
# The following two arrays are used for logging
losses_this_group = []
regs_this_group = []
coordinate_loss = []
for x, y in dataloader:
if USE_CUDA:
x, y = x.cuda(), y.cuda()
x = x.float()
if type(criterion) is torch.nn.modules.loss.MSELoss or type(criterion) is torch.nn.modules.loss.L1Loss:
y = y.float()
else:
y = y.long()
optimizer.zero_grad()
pred = model(x)
pred_loss = criterion(pred, y)
losses.append(pred_loss.cpu().detach().item())
losses_this_group.append(pred_loss.cpu().detach().item())
regs_this_group.append((group_regularization * lam).cpu().detach().item())
# Calculate Loss for this coordinate
X2 = x.clone()
X2[:, feature_ids] = 0
pred2 = model(X2)
loss2 = criterion(pred2, y)
err = max((loss2 - pred_loss).item(), 0)
coordinate_loss.append(err)
if np.mean(coordinate_loss) < lam * model.tau_for_group(group_id):
model.set_group_to_sparse(group_id)
elif verbose:
print("No sparse in this group. Avg. Coordinate Loss:", np.mean(coordinate_loss))
print(" >>> Avg. loss in this epoch:", np.mean(losses), "with %d sparse groups" % len(model.sparse_groups))
# %%
def sgd_train(model, criterion, dataloader, lr=0.1, lam=0.1, verbose=True):
"""
Train the model with standard "Stochastic Gradient Descent" algorithm.
This function and algorithm is NOT recommended for real project. It is here
just for comparison with other algorithms. Use sbcgd_train(...) function,
which provides faster and better performance.
Args:
model (torch.module): the PyTorch model
criterion (torch.module): a PyTorch loss criterion
dataloader (torch.utils.data.DataLoader): DESCRIPTION.
lr (float, optional): Initial learning rate. Defaults to 0.1.
lam (float, optional): Regularization term $\lambda$. Defaults to 0.1.
verbose (bool, optional): If verbose, print more training details. Defaults to True.
Returns:
None.
"""
if verbose:
dataloader = tqdm.tqdm(dataloader)
print("-" * 30, "Begin Standard SGD")
if USE_CUDA:
model = model.cuda()
optimizer = optim.SGD(model.parameters(), lr=lr, weight_decay=WEIGHT_DECAY, momentum=0)
losses = []
losses_this_group = []
regs_this_group = []
for x, y in dataloader:
if USE_CUDA:
x, y = x.cuda(), y.cuda()
x = x.float()
optimizer.zero_grad()
pred = model(x)
if type(criterion) is torch.nn.modules.loss.MSELoss or type(criterion) is torch.nn.modules.loss.L1Loss:
y = y.float()
else:
y = y.long()
pred_loss = criterion(pred, y)
group_regularization, _ = model.regularization_layer_1()
loss_with_reg = pred_loss + group_regularization * lam
loss_with_reg.backward()
losses.append(pred_loss.cpu().detach().item())
losses_this_group.append(pred_loss.cpu().detach().item())
regs_this_group.append((group_regularization * lam).cpu().detach().item())
optimizer.step()
print(" >>> Avg. loss in this epoch:", np.mean(losses), "with %d sparse groups" % len(model.sparse_groups))
|
{"hexsha": "02eea5dc036869aecd4a6a94d178fd8383357c73", "size": 18014, "ext": "py", "lang": "Python", "max_stars_repo_path": "sgin_model.py", "max_stars_repo_name": "BeibinLi/SGIN", "max_stars_repo_head_hexsha": "e73f64c9111b0841f3324459114c396847b9ccbe", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sgin_model.py", "max_issues_repo_name": "BeibinLi/SGIN", "max_issues_repo_head_hexsha": "e73f64c9111b0841f3324459114c396847b9ccbe", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sgin_model.py", "max_forks_repo_name": "BeibinLi/SGIN", "max_forks_repo_head_hexsha": "e73f64c9111b0841f3324459114c396847b9ccbe", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-12-04T10:13:20.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-04T10:13:20.000Z", "avg_line_length": 36.4655870445, "max_line_length": 158, "alphanum_fraction": 0.5696125236, "include": true, "reason": "import numpy", "num_tokens": 3857}
|
```python
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
%load_ext autoreload
%autoreload 2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
from simplified_monorotor import Monorotor
import plotting
import testing
import trajectories
pylab.rcParams['figure.figsize'] = 10,10
```
# Feedforward control
The feedforward term captures the acceleration of the target path and adds to the $\bar{u}_1$ calculated by PD controller.
$$
\ddot{z} = \bar{u}_1= k_p(z_t-z)+k_d(z_t-z)+\ddot{z}
$$
#### TODO - Implement PD controller WITH feed forward acceleration
Modify `thrust_control` to incorporate the feedforward term into the PD Controller math.
$$
\begin{align}
e &= z_{\text{target}} - z_{\text{actual}} \\
\dot{e} &= \dot{z}_{\text{target}} - \dot{z}_{\text{actual}} \\
\bar{u}_1 &= k_p e + k_d \dot{e} + \ddot{z}_{\text{ff}} \\
u_1 &= m(g - \bar{u}_1)
\end{align}
$$
```python
class PDController:
def __init__(self, k_p, k_d, m):
self.k_p = k_p
self.k_d = k_d
self.vehicle_mass = m
self.g = 9.81
def thrust_control(self,
z_target,
z_actual,
z_dot_target,
z_dot_actual,
z_dot_dot_ff=0.0):
#
# TODO
# modify the PD control code to incorporate
# the feedforward term.
err = z_target - z_actual
err_dot = z_dot_target - z_dot_actual
u_bar = self.k_p * err + self.k_d * err_dot + z_dot_dot_ff
u = self.vehicle_mass * (self.g - u_bar)
return u
testing.pd_controller_test(PDController, feed_forward=True)
```
Tests pass
#### TODO 2 - Compare trajectories with and without a feedforward term
The code below generates plots of $z$ vs $t$ for two drones. One uses FF and the other doesn't.
Run the code and compare the two trajectories. What happens if you increase the oscillation frequency to 10? What happens if you decrease it to 2?
You should notice a **lag** in the system response: the trajectory without the feedforward term should lag behind the desired trajectory in time. This effect diminishes as the oscillation frequency decreases.
```python
# This code simulates TWO drones. One uses the feed forward
# acceleration and the other doesn't. Note the difference in
# trajectories.
MASS_ERROR = 1.0
K_P = 20.0
K_D = 8.0
AMPLITUDE = 0.5
OSCILLATION_FREQUENCY = 10
PERIOD = 2 * np.pi / OSCILLATION_FREQUENCY
# preparation (TWO drones to compare)
drone = Monorotor()
ff_drone = Monorotor()
perceived_mass = drone.m * MASS_ERROR
# instantiate TWO controllers
controller = PDController(K_P, K_D, perceived_mass)
ff_controller = PDController(K_P, K_D, perceived_mass)
# get trajectories
t, z_path, z_dot_path, z_dot_dot_path = trajectories.cosine(AMPLITUDE,
PERIOD,
duration=6.0)
dt = t[1] - t[0]
# run simulation
history = []
ff_history = []
for z_target, z_dot_target, z_dot_dot_ff in zip(z_path,
z_dot_path,
z_dot_dot_path):
z_actual = drone.z
z_dot_actual = drone.z_dot
ff_z_actual = ff_drone.z
ff_z_dot_actual = ff_drone.z_dot
u_ff = controller.thrust_control(z_target, ff_z_actual,
z_dot_target, ff_z_dot_actual,
z_dot_dot_ff)
u = controller.thrust_control(z_target, z_actual,
z_dot_target, z_dot_actual)
drone.thrust = u
ff_drone.thrust = u_ff
drone.advance_state(dt)
ff_drone.advance_state(dt)
history.append(drone.X)
ff_history.append(ff_drone.X)
# generate plots
z_actual = [h[0] for h in history]
z_ff_actual = [h[0] for h in ff_history]
plotting.compare_planned_to_actual(z_actual, z_path, t,
z_ff_actual)
```
[Solution](/notebooks/PD%20with%20FF%20Solution.ipynb)
|
{"hexsha": "1057a359c1336750feee56326c32130539e5b8a0", "size": 108260, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "Course03/12-4. PD with FF.ipynb", "max_stars_repo_name": "thhuang/notes-fcnd", "max_stars_repo_head_hexsha": "c5b0ec7d99df3cb60a850308d16ccc6c096c7931", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-14T09:34:36.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-14T09:34:36.000Z", "max_issues_repo_path": "Course03/12-4. PD with FF.ipynb", "max_issues_repo_name": "thhuang/notes-fcnd", "max_issues_repo_head_hexsha": "c5b0ec7d99df3cb60a850308d16ccc6c096c7931", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Course03/12-4. PD with FF.ipynb", "max_forks_repo_name": "thhuang/notes-fcnd", "max_forks_repo_head_hexsha": "c5b0ec7d99df3cb60a850308d16ccc6c096c7931", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-10-26T04:06:23.000Z", "max_forks_repo_forks_event_max_datetime": "2018-10-26T04:06:23.000Z", "avg_line_length": 474.8245614035, "max_line_length": 101556, "alphanum_fraction": 0.9346942546, "converted": true, "num_tokens": 1064}
|
"""
Analyze TCIA-GBM annotations
"""
import os
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
data_root = 'D:/Datasets/TCIA-GBM/'
def homogeneous(df):
df[['good', 'ugly', 'bad']].loc[~df[['good', 'ugly', 'bad']].isnull()] = 1
df[['good', 'ugly', 'bad']].loc[df[['good', 'ugly', 'bad']].isnull()] = 0
def explore_tcia_annotations(xl_file_name):
xl_file_name = 'TCIA-GBM-2.xlsx'
xl_file_path = data_root+xl_file_name
df1 = pd.read_excel(xl_file_path, sheet_name='Reader 1', usecols=[0,1,2,3,4])
df2 = pd.read_excel(xl_file_path, sheet_name='Reader 2', usecols=[0,1,2,3,4])
df3 = pd.read_excel(xl_file_path, sheet_name='Reader 3', usecols=[0,1,2,3,4])
# print(df1.shape)
# print(df2.shape)
# print(df3.shape)
df1[['good', 'ugly', 'bad']] = df1[['good', 'ugly', 'bad']].notnull().astype('int')
df2[['good', 'ugly', 'bad']] = df2[['good', 'ugly', 'bad']].notnull().astype('int')
df3[['good', 'ugly', 'bad']] = df3[['good', 'ugly', 'bad']].notnull().astype('int')
df3['id'] = df3['id'].apply(lambda x: ''.join(x[5:]))
# print(df3.shape)
# print(df3.tail())
df1 = df1.set_index('id')
df2 = df2.set_index('id')
df3 = df3.set_index('id')
df3 = df3.reindex(df1.index)
print(df1.good.value_counts())
print(df1.ugly.value_counts())
print(df1.bad.value_counts())
print(df2.good.value_counts())
print(df2.ugly.value_counts())
print(df2.bad.value_counts())
print(df3.good.value_counts())
print(df3.ugly.value_counts())
print(df3.bad.value_counts())
def explore_tcia_2(xl_file_name):
xl_file_path = data_root + xl_file_name
df1 = pd.read_excel(xl_file_path, sheet_name='Reader 1', usecols=[0, 1, 2, 3])
df2 = pd.read_excel(xl_file_path, sheet_name='Reader 2', usecols=[0, 1, 2, 3])
df1[['good', 'ugly', 'bad']] = df1[['good', 'ugly', 'bad']].notnull().astype('int')
df2[['good', 'ugly', 'bad']] = df2[['good', 'ugly', 'bad']].notnull().astype('int')
df1 = df1.set_index('id')
df2 = df2.set_index('id')
print(df1.good.value_counts())
print(df1.ugly.value_counts())
print(df1.bad.value_counts())
print(df2.good.value_counts())
print(df2.ugly.value_counts())
print(df2.bad.value_counts())
rater1 = []
rater1.append(df1.good.value_counts()[1])
rater1.append(df1.ugly.value_counts()[1])
rater1.append(df1.bad.value_counts()[1])
rater2 = []
rater2.append(df2.good.value_counts()[1])
rater2.append(df2.ugly.value_counts()[1])
rater2.append(df2.bad.value_counts()[1])
ratings = pd.DataFrame([rater1, rater2])
ratings.columns = ['Acceptable', 'Issues', 'Unacceptable']
ratings.index = ['Rater 1', 'Rater 2']
# ratings['raters'] = ['Rater1', 'Rater2']
print(ratings)
ratings_t = ratings.T
ratings2 = pd.melt(ratings)
# print(ratings2)
ax = ratings_t.plot.bar(rot=0)
ax.set_xlabel('Quality Rating Category')
ax.set_ylabel('Number of subjects')
ax.set_title('Rater QA for TCIA-GBM')
plt.show()
if __name__=='__main__':
xl_file_name = 'TCIA-GBM-3.xlsx'
explore_tcia_2(xl_file_name)
|
{"hexsha": "b41a5ae55a8ff1de47d264d0f7436da2ff333f1e", "size": 3182, "ext": "py", "lang": "Python", "max_stars_repo_path": "sandbox/analyze_tcia_annotations.py", "max_stars_repo_name": "ashish-code/MI-DQA", "max_stars_repo_head_hexsha": "1d4d5c2fa148aa7229cc9089adc982ab0d33e631", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sandbox/analyze_tcia_annotations.py", "max_issues_repo_name": "ashish-code/MI-DQA", "max_issues_repo_head_hexsha": "1d4d5c2fa148aa7229cc9089adc982ab0d33e631", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sandbox/analyze_tcia_annotations.py", "max_forks_repo_name": "ashish-code/MI-DQA", "max_forks_repo_head_hexsha": "1d4d5c2fa148aa7229cc9089adc982ab0d33e631", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.1960784314, "max_line_length": 87, "alphanum_fraction": 0.6294783155, "include": true, "reason": "import numpy", "num_tokens": 1030}
|
function sub_sst = extract_sst(sst,t1,t2,edge_mode)
%EXTRACT_SST: Extract event SST (Start/Stop Times) between t1 and t2
%
%USAGE: sub_sst = extract_sst(sst,t1,t2,edge_mode)
%
%INPUTS: sst - Event Start/Stop Times [Nx2 array of numeric time values]
% t1 - time value, extract all events in sst between t1 (start)
% and t2 (end)
% t2 - see 't1'
% edge_mode - If t1 or t2 falls between an event start time and
% stop time in sst edge_mode determines if the event is
% deleted, retained, or partially deleted at time t1 or t2.
% 'delete' - whole event is removed from array sub_sst
% 'keep' - whole event is retained in array sub_sst
% 'part' - event in sub_sst is split - i.e. deleted
% before t1 or after t2
%
%OUTPUTS: sub_sst - Subset of origianl sst (Event Start/Stop Times)
% [Nx2 array of numeric time values]
%
% See also ADD_SST, CHK_T, COMPARE_SST, DELETE_SST, EXTRACT_SST, IS_SST,
% ISTEQUAL, MERGE_SST, SEARCH_SST, SORT_SST, NAN2SST, SSD2SST,
% SST2NAN, SST2SSD, SST2VAL, SST2WFA, WFA2SST
%
% Author: Dane Ketner, Alaska Volcano Observatory
% $Date$
% $Revision$
if nargin < 4
error('EXTRACT_SST: Too few input arguments')
elseif nargin > 4
error('EXTRACT_SST: Too many input arguments')
end
if is_sst(sst)
if iscell(sst)
for m = 1:numel(sst)
sub_sst{m} = EXTRACT_SST(sst{m},t1,t2,edge_mode);
end
else
sub_sst = EXTRACT_SST(sst,t1,t2,edge_mode);
end
else
error('EXTRACT_SST: Not a valid Start/Stop Time Argument')
end
%%
function sub_sst = EXTRACT_SST(sst,t1,t2,edge_mode)
[N1 P1] = search_sst(t1,sst);
[N2 P2] = search_sst(t2,sst);
% 'first' and 'last' refer to the first and last events within the time
% span defined by t1 and t2. N1 is the event number within sst
% corresponding to t1, and P1 is the corresponding event position with
% relation to t1. P1 = 1 if t1 falls inside the event time. P1 = 0 if t1
% is before the start of the event.
if P1 == 1
if strcmpi(edge_mode,'part')
first = [t1 sst(N1,2)];
elseif strcmpi(edge_mode,'delete')
first = [];
elseif strcmpi(edge_mode,'keep')
first = sst(N1,:);
end
elseif P1 == 0
first = sst(N1,:);
end
if P2 == 1
if strcmpi(edge_mode,'part')
last = [sst(N2,1) t2];
elseif strcmpi(edge_mode,'delete')
last = [];
elseif strcmpi(edge_mode,'keep')
last = sst(N2,:);
end
elseif P2 == 0
last = [];
end
sub_sst = [first; sst(N1+1:N2-1,:); last];
|
{"author": "geoscience-community-codes", "repo": "GISMO", "sha": "a4eafca9d2ac85079253510005ef00aa9998d030", "save_path": "github-repos/MATLAB/geoscience-community-codes-GISMO", "path": "github-repos/MATLAB/geoscience-community-codes-GISMO/GISMO-a4eafca9d2ac85079253510005ef00aa9998d030/deprecated/@helicorder/private/SST/extract_sst.m"}
|
[STATEMENT]
lemma And_append_syntactic:
"xs \<noteq> [] \<Longrightarrow> And (xs @ ys) = And ((And xs)#ys)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. xs \<noteq> [] \<Longrightarrow> And (xs @ ys) = And (And xs # ys)
[PROOF STEP]
by (induction xs rule: list_nonempty_induct) simp+
|
{"llama_tokens": 117, "file": "LTL_to_DRA_LTL_FGXU", "length": 1}
|
/-
Copyright (c) 2022 Kevin Buzzard. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author : Kevin Buzzard
-/
import tactic -- imports all the Lean tactics
import data.real.basic -- imports the real numbers
/-
# Figuring out how to use the reals
## The `library_search` tactic
We saw in the previous sheet that we couldn't even prove something
as simple as "if `aₙ → L` then `-aₙ → -L`" because when you write down
the proof carefully, it relies on the fact that `|x - y| = |y - x|`
or, equivalently, that `|(-x)| = |x|`. I say "equivalently" because
`ring` will prove that `-(x - y) = y - x`.
You don't want to be proving stuff like `|x - y| = |y - x|` from first
principles. Someone else has already done all the hard work for you.
All you need to do is to learn how to find out the names of the lemmas.
The `library_search` tactic tells you the names of all these lemmas.
See where it says "try this" -- click there and Lean will replace
`library_search` with the actual name of the lemma. Once you've done
that, hover over the lemma name to see in what generality it holds.
## The `linarith` tactic
Some of the results below are bare inequalities which are too complex
to be in the library. The library contains "natural" or "standard"
results, but it doesn't contain a random inequality fact just because
it happens to be true -- the library just contains "beautiful" facts.
The `linarith` tactic is a tactic which can solve some equalities and inequalities
in ordered structures like the naturals or reals. Unlike `ring`, `linarith`
does look at hypotheses in the tactic state. For example if you have
hypotheses `h1 : a < b` and `h2 : b ≤ c` then `linarith` would prove
a goal of `⊢ a < c`.
However `linarith` doesn't know about anything other than `=`, `≠`,
`<` and `≤`, so don't expect it to prove any results about `|x|` or
`max A B`.
Experiment with the `library_search` and `linarith` tactics below.
Try and learn something about the naming convention which Lean uses;
see if you can start beginning to guess what various lemmas should be called.
-/
example (x : ℝ) : |(-x)| = |x| :=
begin
sorry
end
example (x y : ℝ) : |x - y| = |y - x| :=
begin
sorry
end
example (A B C : ℕ) : max A B ≤ C ↔ A ≤ C ∧ B ≤ C :=
begin
sorry
end
example (x y : ℝ) : |x| < y ↔ -y < x ∧ x < y :=
begin
sorry
end
example (ε : ℝ) (hε : 0 < ε) : 0 < ε / 2 :=
begin
sorry,
end
example (a b x y : ℝ) (h1 : a < x) (h2 : b < y) : a + b < x + y :=
begin
sorry,
end
example (ε : ℝ) (hε : 0 < ε) : 0 < ε / 3 :=
begin
sorry,
end
example (a b c d x y : ℝ) (h1 : a + c < x) (h2 : b + d < y) :
a + b + c + d < x + y :=
begin
sorry
end
|
{"author": "ImperialCollegeLondon", "repo": "formalising-mathematics-2022", "sha": "af5f176b3b881b7bc0ae89b55befe48c9d4ab790", "save_path": "github-repos/lean/ImperialCollegeLondon-formalising-mathematics-2022", "path": "github-repos/lean/ImperialCollegeLondon-formalising-mathematics-2022/formalising-mathematics-2022-af5f176b3b881b7bc0ae89b55befe48c9d4ab790/src/section02reals/sheet4.lean"}
|
#### GitHub Actions
abstract type GitHubAction end
"""
DocBuild
# Fields
$(DocStringExtensions.FIELDS)
"""
Base.@kwdef struct DocBuild <: GitHubAction
"Build name"
build_name::String
"Julia version"
julia_version::String
end
function yaml(gha::DocBuild)
return "name: Documentation
on:
push:
branches:
- master
- trying
- staging
tags: '*'
pull_request:
jobs:
$(gha.build_name):
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: julia-actions/setup-julia@latest
with:
version: $(gha.julia_version)
- name: Install dependencies
run: julia --project=docs/ -e 'using Pkg; Pkg.develop(PackageSpec(path=pwd())); Pkg.instantiate()'
- name: Build and deploy
env:
GITHUB_TOKEN: \${{ secrets.GITHUB_TOKEN }} # For authentication with GitHub Actions token
DOCUMENTER_KEY: \${{ secrets.DOCUMENTER_KEY }} # For authentication with SSH deploy key
run: julia --project=docs/ docs/make.jl
"
end
Base.@kwdef struct TagBot <: GitHubAction
end
function yaml(::TagBot)
return "name: TagBot
on:
schedule:
- cron: 0 * * * *
jobs:
TagBot:
runs-on: ubuntu-latest
steps:
- uses: JuliaRegistries/TagBot@v1
with:
token: \${{ secrets.GITHUB_TOKEN }}
"
end
Base.@kwdef struct CompatHelper <: GitHubAction
julia_version::String
end
function yaml(gha::CompatHelper)
return "name: CompatHelper
on:
schedule:
- cron: '00 00 * * *'
jobs:
CompatHelper:
runs-on: ubuntu-latest
steps:
- uses: julia-actions/setup-julia@latest
with:
version: $(gha.julia_version)
- name: Pkg.add(\"CompatHelper\")
run: julia -e 'using Pkg; Pkg.add(\"CompatHelper\")'
- name: CompatHelper.main()
env:
GITHUB_TOKEN: \${{ secrets.GITHUB_TOKEN }}
run: julia -e 'using CompatHelper; CompatHelper.main()'
"
end
Base.@kwdef struct CodeCov <: GitHubAction
"Package name (with extension)"
pkg_name::String
"Julia version"
julia_version::String
end
function yaml(gha::CodeCov)
return "name: CodeCov
on:
schedule:
# * is a special character in YAML so you have to quote this string
# Run at 2am every day:
- cron: '0 2 * * *'
jobs:
coverage:
runs-on: ubuntu-16.04
strategy:
matrix:
julia-version: ['$(gha.julia_version)']
project: ['$(gha.pkg_name)']
steps:
- uses: actions/checkout@v1.0.0
- name: \"Set up Julia\"
uses: julia-actions/setup-julia@v1
with:
version: \${{ matrix.julia-version }}
- name: Install deps
run: |
set -o xtrace
sudo apt-get update
- name: Test with coverage
env:
JULIA_PROJECT: \"@.\"
run: |
julia --project=@. -e 'using Pkg; Pkg.instantiate()'
julia --project=@. -e 'using Pkg; Pkg.test(coverage=true)'
- name: Generate coverage file
env:
JULIA_PROJECT: \"@.\"
run: julia --project=@. -e 'using Pkg; Pkg.add(\"Coverage\");
using Coverage;
LCOV.writefile(\"coverage-lcov.info\", Codecov.process_folder())'
if: success()
- name: Submit coverage
uses: codecov/codecov-action@v1.0.2
with:
token: \${{secrets.CODECOV_TOKEN}}
if: success()
"
end
Base.@kwdef struct FormatterCheck <: GitHubAction
julia_version::String
end
function yaml(gha::FormatterCheck)
return "name: JuliaFormatter
on: [pull_request]
jobs:
format:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 0
- run: git fetch origin
- uses: julia-actions/setup-julia@latest
with:
version: $(gha.julia_version)
- name: Apply JuliaFormatter
run: |
julia --project=.dev .dev/format.jl
- name: Check formatting diff
run: |
git diff --color=always --exit-code
"
end
|
{"hexsha": "2734342526e58d2710cd0230251967a91bb969e3", "size": 4021, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/github_actions.jl", "max_stars_repo_name": "charleskawczynski/PkgTemplates", "max_stars_repo_head_hexsha": "40415cfa561d50618a02ce4eca1da981af511bb9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/github_actions.jl", "max_issues_repo_name": "charleskawczynski/PkgTemplates", "max_issues_repo_head_hexsha": "40415cfa561d50618a02ce4eca1da981af511bb9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/github_actions.jl", "max_forks_repo_name": "charleskawczynski/PkgTemplates", "max_forks_repo_head_hexsha": "40415cfa561d50618a02ce4eca1da981af511bb9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.0934065934, "max_line_length": 106, "alphanum_fraction": 0.6023377269, "num_tokens": 1130}
|
from setuptools import setup, Extension, dist
import setuptools
import sys
dist.Distribution().fetch_build_eggs(['Cython>=0.15.1', 'numpy>=1.10'])
try:
from Cython.Build import cythonize
USE_CYTHON = True
except ImportError:
sys.exit("""Could not import Cython, which is required to build benepar extension modules.
Please install cython and numpy prior to installing benepar.""")
try:
import numpy as np
except ImportError:
sys.exit("""Could not import numpy, which is required to build the extension modules.
Please install cython and numpy prior to installing benepar.""")
with open("README.md", "r") as f:
long_description = f.read()
extensions = cythonize("benepar/*.pyx")
for ext_module in extensions:
ext_module.include_dirs.append(np.get_include())
setuptools.setup(
name="benepar",
version="0.1.2",
author="Nikita Kitaev",
author_email="kitaev@cs.berkeley.edu",
description="Berkeley Neural Parser",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/nikitakit/self-attentive-parser",
packages=setuptools.find_packages(),
package_data={'': ['*.pyx']},
ext_modules = cythonize(extensions),
classifiers=(
'Programming Language :: Python :: 2.7',
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Text Processing :: Linguistic",
),
setup_requires = ["cython", "numpy"],
install_requires = ["cython", "numpy", "nltk>=3.2"],
extras_require={
"cpu": ["tensorflow>=1.11.0"],
"gpu": ["tensorflow-gpu>=1.11.0"],
"spacy": ["spacy>=2.0.9"],
},
)
|
{"hexsha": "0ef0013470b4a5cb4d405de1f60ccf84d34c4bec", "size": 1826, "ext": "py", "lang": "Python", "max_stars_repo_path": "setup.py", "max_stars_repo_name": "samghelms/self-attentive-parser", "max_stars_repo_head_hexsha": "2d6c9dc0fc95a0c46264a91a225fd16a788496f6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "setup.py", "max_issues_repo_name": "samghelms/self-attentive-parser", "max_issues_repo_head_hexsha": "2d6c9dc0fc95a0c46264a91a225fd16a788496f6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "setup.py", "max_forks_repo_name": "samghelms/self-attentive-parser", "max_forks_repo_head_hexsha": "2d6c9dc0fc95a0c46264a91a225fd16a788496f6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-05-05T13:53:48.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-05T13:53:48.000Z", "avg_line_length": 33.2, "max_line_length": 94, "alphanum_fraction": 0.6730558598, "include": true, "reason": "import numpy", "num_tokens": 441}
|
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 11 17:01:28 2021
@author: gawe
Functions dealing with rectangular patch antenna.
"""
import math
import numpy as np
from math import cos, sin, sqrt, pi, log10, atan2, acos, radians
from scipy import integrate
import scipy.integrate
import json
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# from pybaseutils.utils import sph2cart as sph2cart1
# from pybaseutils.utils import cart2sph as cpatchart2sph1
# constants
light_velocity = 299792458
impedance = 50
# ======================================== #
# import plotly
# from plotly.offline import iplot
# import plotly.graph_objs as go
# plotly.offline.init_notebook_mode(connected=True)
def S_i(a):
temp = scipy.integrate.quad(lambda x:sin(x)/x,0,a)
return temp[0]
def J0(s):
temp = scipy.integrate.quad(lambda x:cos(s*sin(x)),0,pi)
temp = (1/pi)*temp[0]
return temp
def get_k(f):
lamda_0 = light_velocity/f
k0 = (2*pi)/lamda_0
return k0
def getG1 (W, f):
k0 = get_k (f)
X = k0 * W
I1 = -2 + cos(X) + X*S_i(X) + sin(X)/X
G1 = I1 / ( 120 * pi**2 )
return G1
def getG12 (W, k0, L):
temp = scipy.integrate.quad(lambda x: (((sin(k0*W*cos(x)/2)/cos(x))**2)*J0(k0*L*sin(x))*sin(x)**3), 0, pi)
G12 = (1/(120*pi**2))*temp[0]
return G12
def getGs(f, W, L):
G1 = getG1(W, f)
k0 = get_k(f)
G12 = getG12(W, k0, L)
return G1, G12
def input_impedance (f, W, L):
k0 = get_k (f)
G1, G12 = getGs(f, W, L)
Rin = 1/(2*(G1+G12))
print("Input Impedance:", Rin, "ohms")
return Rin
def inset_feed_position(Rin, L):
# R = 50.0
R = impedance
y0 = (L/pi)*(math.acos(sqrt(R/Rin)))
return y0
def get_directivity(G1, G12, W, f, I1, I2):
lamda_0 = light_velocity/f
g_12 = G12/G1
D_AF = 2/(1+g_12)
D0 = ((2*pi*W)/lamda_0)**2*(1/I1)
D2 = D0 * D_AF
DIR_1 = 10*log10(D2)
D_2 = ((2*pi*W)/lamda_0) ** 2 * (pi/I2)
DIR_2 = 10 * log10(D_2)
return DIR_1, DIR_2
# ======================================== #
def PatchFunction(thetaInDeg, phiInDeg, Freq, W, L, h, Er):
"""
Taken from Design_patchr
Calculates total E-field pattern for patch as a function of theta and phi
Patch is assumed to be resonating in the (TMx 010) mode.
E-field is parallel to x-axis
W......Width of patch (m)
L......Length of patch (m)
h......Substrate thickness (m)
Er.....Dielectric constant of substrate
Refrence C.A. Balanis 2nd Edition Page 745
"""
lamba = light_velocity / Freq
theta_in = math.radians(thetaInDeg)
phi_in = math.radians(phiInDeg)
ko = 2 * math.pi / lamba
xff, yff, zff = sph2cart1(999, theta_in, phi_in) # Rotate coords 90 deg about x-axis to match array_utils coord system with coord system used in the model.
xffd = zff
yffd = xff
zffd = yff
r, thp, php = cart2sph1(xffd, yffd, zffd)
phi = php
theta = thp
if theta == 0:
theta = 1e-9 # Trap potential division by zero warning
if phi == 0:
phi = 1e-9
Ereff = ((Er + 1) / 2) + ((Er - 1) / 2) * (1 + 12 * (h / W)) ** -0.5 # Calculate effictive dielectric constant for microstrip line of width W on dielectric material of constant Er
F1 = (Ereff + 0.3) * (W / h + 0.264) # Calculate increase length dL of patch length L due to fringing fields at each end, giving total effective length Leff = L + 2*dL
F2 = (Ereff - 0.258) * (W / h + 0.8)
dL = h * 0.412 * (F1 / F2)
Leff = L + 2 * dL
Weff = W # Calculate effective width Weff for patch, uses standard Er value.
heff = h * sqrt(Er)
# Patch pattern function of theta and phi, note the theta and phi for the function are defined differently to theta_in and phi_in
Numtr2 = sin(ko * heff * cos(phi) / 2)
Demtr2 = (ko * heff * cos(phi)) / 2
Fphi = (Numtr2 / Demtr2) * cos((ko * Leff / 2) * sin(phi))
Numtr1 = sin((ko * heff / 2) * sin(theta))
Demtr1 = ((ko * heff / 2) * sin(theta))
Numtr1a = sin((ko * Weff / 2) * cos(theta))
Demtr1a = ((ko * Weff / 2) * cos(theta))
Ftheta = ((Numtr1 * Numtr1a) / (Demtr1 * Demtr1a)) * sin(theta)
# Due to groundplane, function is only valid for theta values : 0 < theta < 90 for all phi
# Modify pattern for theta values close to 90 to give smooth roll-off, standard model truncates H-plane at theta=90.
# PatEdgeSF has value=1 except at theta close to 90 where it drops (proportional to 1/x^2) to 0
rolloff_factor = 0.5 # 1=sharp, 0=softer
theta_in_deg = theta_in * 180 / math.pi # theta_in in Deg
F1 = 1 / (((rolloff_factor * (abs(theta_in_deg) - 90)) ** 2) + 0.001) # intermediate calc
PatEdgeSF = 1 / (F1 + 1) # Pattern scaling factor
UNF = 1.0006 # Unity normalisation factor for element pattern
if theta_in <= math.pi / 2:
Etot = Ftheta * Fphi * PatEdgeSF * UNF # Total pattern by pattern multiplication
else:
Etot = 0
return Etot
def patch_function(theta_in_deg, phi_in_deg, freq, w, l, h, er):
"""
Taken from Design_patchr
Calculates total E-field pattern for patch as a function of theta and phi
Patch is assumed to be resonating in the (TMx 010) mode.
E-field is parallel to x-axis
W......Width of patch (m)
L......Length of patch (m)
h......Substrate thickness (m)
Er.....Dielectric constant of substrate
Refrence C.A. Balanis 2nd Edition Page 745
"""
lambda_ = light_velocity / freq
theta_in = math.radians(theta_in_deg)
phi_in = math.radians(phi_in_deg)
ko = 2 * math.pi / lambda_
xff, yff, zff = sph2cart1(999, theta_in, phi_in) # Rotate coords 90 deg about x-axis to match array_utils coord system with coord system used in the model.
xffd = zff
yffd = xff
zffd = yff
r, thp, php = cart2sph1(xffd, yffd, zffd)
phi = php
theta = thp
if theta == 0:
# Trap potential division by zero warning
theta = 1e-9
if phi == 0:
phi = 1e-9
# Calculate effective dielectric constant for micro_strip line of width W on dielectric material of constant Er
e_ref = ((er + 1) / 2) + ((er - 1) / 2) * (1 + 12 * (h / w)) ** -0.5
# Calculate increase length dL of patch length L due to fringing fields at each end,
# giving total effective length Leff = L + 2*dL
f1 = (e_ref + 0.3) * (w / h + 0.264)
f2 = (e_ref - 0.258) * (w / h + 0.8)
d_l = h * 0.412 * (f1 / f2)
l_eff = l + 2 * d_l
# Calculate effective width Weff for patch, uses standard Er value.
w_eff = w
h_eff = h * sqrt(er)
# Patch pattern function of theta and phi,
# Note the theta and phi for the function are defined differently to theta_in and phi_in
num_tr_2 = sin(ko * h_eff * cos(phi) / 2)
dem_tr_2 = (ko * h_eff * cos(phi)) / 2
f_phi = (num_tr_2 / dem_tr_2) * cos((ko * l_eff / 2) * sin(phi))
num_tr_1 = sin((ko * h_eff / 2) * sin(theta))
dem_tr_1 = ((ko * h_eff / 2) * sin(theta))
num_tr_1a = sin((ko * w_eff / 2) * cos(theta))
dem_tr_1a = ((ko * w_eff / 2) * cos(theta))
f_theta = ((num_tr_1 * num_tr_1a) / (dem_tr_1 * dem_tr_1a)) * sin(theta)
# Due to groundplane, function is only valid for theta values : 0 < theta < 90 for all phi
# Modify pattern for theta values close to 90 to give smooth roll-off, standard model truncates H-plane at theta=90.
# PatEdgeSF has value=1 except at theta close to 90 where it drops (proportional to 1/x^2) to 0
# 1=sharp, 0=softer
roll_off_factor = 0.5
# theta_in in Deg
theta_in_deg = theta_in * 180 / math.pi
# intermediate calc
f1 = 1 / (((roll_off_factor * (abs(theta_in_deg) - 90)) ** 2) + 0.001)
# Pattern scaling factor
pat_edge_sf = 1 / (f1 + 1)
# Unity normalisation factor for element pattern
UNF = 1.0006
# Total pattern by pattern multiplication
if theta_in <= math.pi / 2:
e_tot = f_theta * f_phi * pat_edge_sf * UNF
else:
e_tot = 0
return e_tot
def GetPatchFields(PhiStart, PhiStop, ThetaStart, ThetaStop, Freq, W, L, h, Er):
""""
Calculates the E-field for range of thetaStart-thetaStop and phiStart-phiStop
Returning a numpy array of form - fields[phiDeg][thetaDeg] = eField
W......Width of patch (m)
L......Length of patch (m)
h......Substrate thickness (m)
Er.....Dielectric constant of substrate
"""
fields = np.ones((PhiStop, ThetaStop)) # Create initial array to hold e-fields for each position
for phiDeg in range(PhiStart, PhiStop):
for thetaDeg in range(ThetaStart, ThetaStop): # Iterate over all Phi/Theta combinations
eField = PatchFunction(thetaDeg, phiDeg, Freq, W, L, h, Er) # Calculate the field for current Phi, Theta
fields[phiDeg][thetaDeg] = eField # Update array with e-field
return fields
def get_patch_fields(phi_start, phi_stop, theta_start, theta_stop, freq, w, l, h, er):
""""
Calculates the E-field for range of thetaStart-thetaStop and phiStart-phiStop
Returning a numpy array of form - fields[phiDeg][thetaDeg] = eField
W......Width of patch (m)
L......Length of patch (m)
h......Substrate thickness (m)
Er.....Dielectric constant of substrate
"""
# Create initial array to hold e-fields for each position
fields = np.ones((phi_stop, theta_stop))
# Iterate over all Phi/Theta combinations
for phiDeg in range(phi_start, phi_stop):
for thetaDeg in range(theta_start, theta_stop):
# Calculate the field for current Phi, Theta
eField = patch_function(thetaDeg, phiDeg, freq, w, l, h, er)
# Update array with e-field
fields[phiDeg][thetaDeg] = eField
return fields
def PatchEHPlanePlot(Freq, W, L, h, Er, isLog=True):
"""
Plot 2D plots showing E-field for E-plane (phi = 0°) and the H-plane (phi = 90°).
"""
fields = GetPatchFields(0, 360, 0, 90, Freq, W, L, h, Er) # Calculate the field at each phi, theta
Xtheta = np.linspace(0, 90, 90) # Theta range array used for plotting
if isLog: # Can plot the log scale or normal
plt.plot(Xtheta, 20 * np.log10(abs(fields[90, :])), label="H-plane (Phi=90°)") # Log = 20 * log10(E-field)
plt.plot(Xtheta, 20 * np.log10(abs(fields[0, :])), label="E-plane (Phi=0°)")
plt.ylabel('E-Field (dB)')
else:
plt.plot(Xtheta, fields[90, :], label="H-plane (Phi=90°)")
plt.plot(Xtheta, fields[0, :], label="E-plane (Phi=0°)")
plt.ylabel('E-Field')
plt.xlabel('Theta (degs)') # Plot formatting
plt.title("Patch: \nW=" + str(W) + " \nL=" + str(L) + "\nEr=" + str(Er) + " h=" + str(h) + " \n@" + str(1e-9*Freq) + "GHz")
plt.ylim(-40)
plt.xlim((0, 90))
start, end = plt.xlim()
plt.xticks(np.arange(start, end, 5))
plt.grid(b=True, which='major')
plt.legend()
plt.show() # Show plot
return fields # Return the calculated fields
def patch_eh_plane_plot(freq, w, l, h, er, is_log=True):
"""
Plot 2D plots showing E-field for E-plane (phi = 0) and the H-plane (phi = 90).
"""
fields = get_patch_fields(0, 360, 0, 90, freq, w, l, h, er)
Xtheta = np.linspace(0, 90, 90)
if is_log:
# Log = 20 * log10(E-field)# Can plot the log scale or normal
plt.plot(Xtheta, 20 * np.log10(abs(fields[90, :])), label="H-plane (Phi=90)")
plt.plot(Xtheta, 20 * np.log10(abs(fields[0, :])), label="E-plane (Phi=0)")
plt.ylabel('E-Field (dB)')
else:
plt.plot(Xtheta, fields[90, :], label="H-plane (Phi=90)")
plt.plot(Xtheta, fields[0, :], label="E-plane (Phi=0)")
plt.ylabel('E-Field')
plt.xlabel('Theta (degs)')
plt.title("EH Plane - Theta ")
plt.ylim(-40)
plt.xlim((0, 90))
start, end = plt.xlim()
plt.xticks(np.arange(start, end, 5))
plt.grid(b=True, which='major')
plt.legend()
plt.show()
return fields
def SurfacePlot(Fields, Freq, W, L, h, Er):
"""Plots 3D surface plot over given theta/phi range in Fields by calculating cartesian coordinate equivalent of spherical form."""
print("Processing SurfacePlot...")
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
phiSize = Fields.shape[0] # Finds the phi & theta range
thetaSize = Fields.shape[1]
X = np.ones((phiSize, thetaSize)) # Prepare arrays to hold the cartesian coordinate data.
Y = np.ones((phiSize, thetaSize))
Z = np.ones((phiSize, thetaSize))
for phi in range(phiSize): # Iterate over all phi/theta range
for theta in range(thetaSize):
e = Fields[phi][theta]
xe, ye, ze = sph2cart1(e, math.radians(theta), math.radians(phi)) # Calculate cartesian coordinates
X[phi, theta] = xe # Store cartesian coordinates
Y[phi, theta] = ye
Z[phi, theta] = ze
ax.plot_surface(X, Y, Z, color='b') # Plot surface
plt.ylabel('Y')
plt.xlabel('X') # Plot formatting
plt.title("Patch: \nW=" + str(W) + " \nL=" + str(L) + "\nEr=" + str(Er) + " h=" + str(h) + " \n@" + str(1e-9*Freq) + "GHz")
plt.show()
# def surface_plot(fields, is_note_book=False):
def surface_plot_go(fields, is_note_book=False):
"""Plots 3D surface plot over given theta/phi range in Fields by calculating cartesian
coordinate equivalent of spherical form."""
print("Processing SurfacePlot...")
# Finds the phi & theta range
phiSize = fields.shape[0]
thetaSize = fields.shape[1]
# Prepare arrays to hold the cartesian coordinate data.
X = np.ones((phiSize, thetaSize))
Y = np.ones((phiSize, thetaSize))
Z = np.ones((phiSize, thetaSize))
for phi in range(phiSize):
for theta in range(thetaSize):
e = fields[phi][theta]
xe, ye, ze = sph2cart1(e, math.radians(theta), math.radians(phi))
X[phi, theta] = xe
Y[phi, theta] = ye
Z[phi, theta] = ze
surface = go.Surface(x=X, y=Y, z=Z)
data = [surface]
layout = go.Layout(
title='Surface Plot of EH Plane',
scene=dict(
xaxis=dict(
gridcolor='rgb(255, 255, 255)',
zerolinecolor='rgb(255, 255, 255)',
showbackground=True,
backgroundcolor='rgb(230, 230,230)'
),
yaxis=dict(
gridcolor='rgb(255, 255, 255)',
zerolinecolor='rgb(255, 255, 255)',
showbackground=True,
backgroundcolor='rgb(230, 230,230)'
),
zaxis=dict(
gridcolor='rgb(255, 255, 255)',
zerolinecolor='rgb(255, 255, 255)',
showbackground=True,
backgroundcolor='rgb(230, 230,230)'
)
)
)
fig = go.Figure(data=data, layout=layout)
if is_note_book:
iplot(fig)
else:
plotly.offline.plot(fig)
def DesignPatch(Er, h, Freq):
"""
Returns the patch_config parameters for standard lambda/2 rectangular microstrip patch. Patch length L and width W are calculated and returned together with supplied parameters Er and h.
Returned values are in the same format as the global patchr_config variable, so can be assigned directly. The patchr_config variable is of the following form [Er,W,L,h].
Usage: patchr_config=design_patchr(Er,h,Freq)
Er.....Relative dielectric constant
h......Substrate thickness (m)
Freq...Frequency (Hz)
e.g. patchr_config=design_patchr(3.43,0.7e-3,2e9)
"""
Eo = 8.854185e-12
lambd = light_velocity / Freq
lambdag = lambd / sqrt(Er)
W = (light_velocity / (2 * Freq)) * sqrt(2 / (Er + 1))
Ereff = ((Er + 1) / 2) + ((Er - 1) / 2) * (1 + 12 * (h / W)) ** -0.5 # Calculate effictive dielectric constant for microstrip line of width W on dielectric material of constant Er
F1 = (Ereff + 0.3) * (W / h + 0.264) # Calculate increase length dL of patch length L due to fringing fields at each end, giving actual length L = Lambda/2 - 2*dL
F2 = (Ereff - 0.258) * (W / h + 0.8)
dL = h * 0.412 * (F1 / F2)
lambdag = lambd / sqrt(Ereff)
L = (lambdag / 2) - 2 * dL
print('Rectangular Microstrip Patch Design')
print("Frequency (GHz): " + str(1e-9*Freq))
print("Dielec Const, Er : " + str(Er))
print("Patch Width, W: " + str(W) + "m")
print("Patch Length, L: " + str(L) + "m")
print("Patch Height, h: " + str(h) + "m")
return W, L, h, Er
def design_patch(er, h, freq):
lambda_ = light_velocity / freq
w = (light_velocity / (2 * freq)) * sqrt(2 / (er + 1))
temp = 1 + 12*(h/w)
e_ref = ((er + 1) / 2) + ((er - 1) / 2) * temp ** -0.5
f1 = (e_ref + 0.3) * (w / h + 0.264)
f2 = (e_ref - 0.258) * (w / h + 0.8)
d_l = h * 0.412 * (f1 / f2)
lambda_g = lambda_ / sqrt(e_ref)
L = (lambda_g / 2) - 2 * d_l
print('Rectangular Microstrip Patch Design')
print("Frequency: (GHz) " + str(1e-9*freq))
print("Dielec Const, Er : " + str(er))
print("Patch Width, W: " + str(w) + "m")
print("Patch Length, L: " + str(L) + "m")
print("Patch Height, h: " + str(h) + "m")
return w, L
def exampleRectPatch():
"""Some example patches with various thickness & Er."""
# print("Patch.py")
freq = 14e9
Er = 3.66 # RO4350B
# Er = 2.1 #
# h = 1.0e-3
h = 0.101e-3
W, L, h, Er = DesignPatch(Er, h, freq)
fields = PatchEHPlanePlot(freq, W, L, h, Er)
SurfacePlot(fields, freq, W, L, h, Er)
h = 1.524e-3
# h = 2e-3
W, L, h, Er = DesignPatch(Er, h, freq) # RO4350B
fields = PatchEHPlanePlot(freq, W, L, h, Er)
SurfacePlot(fields, freq, W, L, h, Er)
# fields = PatchEHPlanePlot(freq, 10.7e-3, 10.47e-3, 3e-3, 2.5) # Random
# SurfacePlot(fields, freq, W, L, h, Er)
# ======================================== #
def ArrayFactor(ElementArray, Freq):
"""
Summation of field contributions from each element in array, at frequency freq at theta 0°-95°, phi 0°-360°.
Element = xPos, yPos, zPos, ElementAmplitude, ElementPhaseWeight
Returns arrayFactor[theta, phi, elementSum]
"""
arrayFactor = np.ones((360, 95))
Lambda = light_velocity / Freq
for theta in range(95):
for phi in range(360): # For all theta/phi positions
elementSum = 1e-9 + 0j
for element in ElementArray: # Summation of each elements contribution at theta/phi position.
relativePhase = CalculateRelativePhase(element, Lambda, math.radians(theta), math.radians(phi)) # Find relative phase for current element
elementSum += element[3] * math.e ** ((relativePhase + element[4]) * 1j) # Element contribution = Amp * e^j(Phase + Phase Weight)
arrayFactor[phi][theta] = elementSum.real
return arrayFactor
def CalculateRelativePhase(Element, Lambda, theta, phi):
"""
Incident wave treated as plane wave. Phase at element is referred to phase of plane wave at origin.
Element = xPos, yPos, zPos, ElementAmplitude, ElementPhaseWeight
theta & phi in radians
See Eqn 3.1 @ https://theses.lib.vt.edu/theses/available/etd-04262000-15330030/unrestricted/ch3.pdf
"""
phaseConstant = (2 * math.pi / Lambda)
xVector = Element[0] * math.sin(theta) * math.cos(phi)
yVector = Element[1] * math.sin(theta) * math.sin(phi)
zVector = Element[2] * math.cos(theta)
phaseOfIncidentWaveAtElement = phaseConstant * (xVector + yVector + zVector)
return phaseOfIncidentWaveAtElement
# ======================================== #
def FieldSumPatch(ElementArray, Freq, W, L, h, Er):
"""
Summation of field contributions from each patch element in array, at frequency freq for theta 0°-95°, phi 0°-360°.
Element = xPos, yPos, zPos, ElementAmplitude, ElementPhaseWeight
Returns arrayFactor[theta, phi, elementSum]
"""
arrayFactor = np.ones((360, 95))
Lambda = light_velocity / Freq
for theta in range(95):
for phi in range(360): # For all theta/phi positions
elementSum = 1e-9 + 0j
xff, yff, zff = sph2cart1(999, math.radians(theta), math.radians(phi)) # Find point in far field
for element in ElementArray: # For each element in array, find local theta/phi, calculate field contribution and add to summation for point
xlocal = xff - element[0]
ylocal = yff - element[1] # Calculate local position in cartesian
zlocal = zff - element[2]
r, thetaLocal, phiLocal = cart2sph1(xlocal, ylocal, zlocal) # Convert local position to spherical
patchFunction = PatchFunction(math.degrees(thetaLocal), math.degrees(phiLocal), Freq, W, L, h, Er) # Patch element pattern for local theta, phi
if patchFunction != 0: # Sum each elements contribution
relativePhase = CalculateRelativePhase(element, Lambda, math.radians(theta), math.radians(phi)) # Find relative phase for current element
elementSum += element[3] * patchFunction * math.e ** ((relativePhase + element[4]) * 1j) # Element contribution = Amp * e^j(Phase + Phase Weight)
arrayFactor[phi][theta] = elementSum.real
return arrayFactor
def FieldSumHorn(ElementArray, Freq):
"""
Summation of field contributions from each horn element in array, at frequency freq for theta 0°-95°, phi 0°-360°.
Horn pattern estimate using cos q(theta) function.
Element = xPos, yPos, zPos, ElementAmplitude, ElementPhaseWeight
Returns arrayFactor[theta, phi, elementSum]
"""
arrayFactor = np.ones((360, 95))
Lambda = light_velocity / Freq
for theta in range(95):
for phi in range(360): # For all theta/phi positions
elementSum = 1e-9 + 0j
xff, yff, zff = sph2cart1(999, math.radians(theta), math.radians(phi)) # Find point in far field
for element in ElementArray: # For each element in array, find local theta/phi, calculate field contribution and add to summation for point
if theta > 90:
hornFunction = 0 # Assume no radiation behind horn for simplification
else:
xlocal = xff - element[0] # Calculate local position in cartesian
ylocal = yff - element[1]
zlocal = zff - element[2]
r, thetaLocal, phiLocal = cart2sph1(xlocal, ylocal, zlocal) # Convert local position to spherical
# TODO: This is a random horn antenna pattern: cos^28(theta). You can use a real one or input one later
hornFunction = math.cos(thetaLocal) ** 28 # Horn q function, q = 28
if hornFunction != 0: # Sum each elements contribution
relativePhase = CalculateRelativePhase(element, Lambda, math.radians(theta), math.radians(phi)) # Find relative phase for current element
elementSum += element[3] * hornFunction * math.e ** ((relativePhase + element[4]) * 1j) # Element contribution = Amp * e^j(Phase + Phase Weight)
arrayFactor[phi][theta] = elementSum.real
return arrayFactor
# ======================================== #
"""
Returns the efficiency of a rectangular microstrip patch as a percentage. Based on ArrayCalc calc_patchr_eff.m.
References:
Microstrip Antennas, I.J Bahl and P.Bhartia, Published Atrech House, Page 60
Advances in Microstrip and Printed Antennas", Lee and Chen (Ch5)
Some useful numbers :
CONDUCTORS DIELECTRICS
Material Conductivity S/m Material Er Tand
Perfect 9.90E+99 (lossless) FR4_Epoxy 4.4 0.02
Silver 6.29E+07 Arlon 25FR 3.43 0.0035
Copper 5.80E+07 Arlon AD300 3.00 0.003
Pure Alumin. 3.77E+07 Arlon AR1000 10.00 0.0035
Al. 6063-T832 3.08E+07 Rogers RO3003 3.00 0.0013
Al. 6061-T6 2.49E+07 Rogers RO3006 6.15 0.0025
Brass 1.56E+07 Rogers RO3010 10.20 0.0035
Phospor bronze 9.09E+06 Rogers RO4350 3.48 0.004
Stainless Steel 302 1.39E+06 Glass 5.5 0.000
Plexiglass 3.4 0.001
Polyamide 4.3 0.004
Polyester 3.2 0.003
Polyethylene 2.25 0.001
"""
def CalculatePatchEff(Er, W, L, h, tand, sigma, Freq, VSWR):
"""
Er: Relative dielectric constant
W: Patch width (m)
L: Patch length (m)
h: dielectric thickness (m)
tand: Loss tangent of dielectric (units)
sigma: Conductivity of patch (Siemens/m)
Freq: Frequency (Hz)
VSWR: VSWR for bandwidth estimate (ratio). http://www.antenna-theory.com/definitions/vswr.php describes how well the antenna is impedance matched to the line it is connected to.
"""
if Er <= 1.000001:
Er = 1.000001
if tand <= 0.000001:
tand = 0.000001
Eo = 8.854185e-12 # Free space dielectric constant
Ee = Eo * Er # Effective dielectric constant
lamba = light_velocity / Freq
"""
% Calculation for space and surface wave efficiency factor, gives roughly the same results.
% Reference : "Advances in Microstrip and Printed Antennas" Lee and Chen(Ch 5)
% Efficiency due to surface wave component, dominant for larger h/lambda values
"""
Mur = 1
n1 = sqrt(Er * Mur)
ko = 2 * pi * Freq * (sqrt((8.854e-12) * (pi * 4e-7)))
Lo = lamba
Psur = (1 / Lo ** 2) * ((ko * h) ** 3) * (60 * pi ** 3 * Mur ** 3 * (1 - 1 / n1 ** 2) ** 3) # Power radiated as surface wave
c1 = 1 - 1 / n1 ** 2 + (2 / 5) / n1 ** 4
Pr = (1 / Lo ** 2) * (ko * h) ** 2 * (80 * pi ** 2 * Mur ** 2 * c1) # Total power radiated
Effsw = Pr / (Pr + Psur) # Efficiency factor for surface wave losses
"""
% Efficiency due to ohmic and dielectric losses, dominant for smaller h/lambda values
% ***********************************************************************************
% Reference : "Microstrip Antennas" Bahl and Bartia
"""
if W < lamba:
Rr = 90 * lamba ** 2 / W ** 2 # Radiation resistance for W<lambda
else:
Rr = 120 * lamba / W # Radiation resistance for W>lambda
Qr = (light_velocity * sqrt(Ee)) / (2 * (Freq / 1e6) * h) # Quality factor, modified by me, not sure freq was in Ghz, more like MHz !?
Rc = (1 / sigma) * 0.5 * sqrt(Freq) * (L / W) * Qr ** 2 # Equivalent resistance for conductor loss (ohms)
Rd = (30 * tand / Er) * ((h * lamba) / (L * W)) * Qr ** 2 # Equivalent resistance for dielectric loss (ohms)
Rtot = Rr + Rd + Rc # Total resistance (ohms)
Effcd = Rr / Rtot # Efficiency factor for combined dielectric and ohmic losses
Eff1 = Effsw * Effcd
Eff = Eff1 * 100 # Total efficiency including ohmic, dielectric and surface wave losses (percent)
Qt = Qr * Eff1 / (pi) # Ref Balanis p762 ( Qtotal = Qradiated*Efficiency ) Not the pi factor, I added that, seems necassary get sensible results using Qr from above !?
BW = (VSWR - 1) / (Qt * sqrt(VSWR))
BWf = BW * Freq / 1e6 # Bandwidth as a frequency span in MHz
print("Rectangular patch overall efficency " + str(Eff) + "%")
print("Surface wave efficiency factor " + str(Effsw))
print("Ohmic and dielectric efficiency factor " + str(Effcd))
print("BW=" + str(BWf) + "MHz for VSWR=" + str(VSWR) + " at Fo=" + str(Freq / 1e6) + " MHz")
return Eff
def examplePatchEfficiency():
"""
Calculates efficiencies for two patches @14GHz, one with FR4 and one RO4350.
"""
freq = 14e9 # Same parameters for both patches
h = 1.524e-3
VSWR = 2.0
sigma = 5.8e7 # Copper
# FR4_Epoxy
Er = 4.4
tand = 0.02
print("\n\nCalculating for FR4 patch.")
W, L, h, Er = DesignPatch(Er, h, freq)
eff = CalculatePatchEff(Er, W, L, h, tand, sigma, freq, VSWR)
CalcDirectivity(eff, PatchFunction, freq, W, L, h, Er)
# Rogers RO4350
print("\n\nCalculating for RO4350 patch.")
Er = 3.48
tand = 0.004
W, L, h, Er = DesignPatch(Er, h, freq)
eff = CalculatePatchEff(Er, W, L, h, tand, sigma, freq, VSWR)
CalcDirectivity(eff, PatchFunction, freq, W, L, h, Er)
# end def
# ======================================== #
"""
Function to calculate peak directivity.
Also includes some examples that are used to check result.
"""
def SqrtSinPattern(Theta, Phi, *args):
"""
See Fig1 @ http://www.antenna-theory.com/basics/directivity.php
Expect Directivity to be 1.05dB.
"""
return sqrt(sin(radians(Theta)))
def SinPowerPattern(Theta, Phi, *args):
"""
See Fig1 @ http://www.antenna-theory.com/basics/directivity.php
Expect Directivity to be 2.707dB.
"""
return sin(radians(Theta)) ** 5
def IsotropicPattern(Theta, Phi, *args):
"""
Isotropic directional pattern. i.e. radiation is same in all directions.
Expect directivity to be 0dB.
"""
return 1
def xfrange(start, stop, step):
"""
Creates range of float values.
"""
i = 0
while start + i * step < stop:
yield start + i * step
i += 1
def CalcDirectivity(Efficiency, RadPatternFunction, *args):
"""
Based on calc_directivity.m from ArrayCalc.
Calculates peak directivity in dBi value using numerical integration.
If the array efficiency is set to below 100% then the returned value is referred to as Gain (dB).
Usage: ThetaMax, PhiMax = CalcDirectivity(RadPatternFunction, Efficiency)
RadPatternFunction - antennas radiation pattern function. F(Theta, Phi)
Efficiency - Efficiency of antenna in %. Default 100%.
Returned values:
ThetaMax - Theta value for direction of maximum directivity (Deg)
PhiMax - Phi value for direction of maximum directivity (Deg)
Integration is of the form :
%
% 360 180
% Int{ Int{ (E(theta,phi)*conj(E(theta,phi))*sin(theta) d(theta) d(phi)
% 0 0
%
% z
% |-theta (theta 0-180 measured from z-axis)
% |/
% |_____ y
% /\
% /-phi (phi 0-360 measured from x-axis)
% x
%
"""
print("Calculating Directivity for " + RadPatternFunction.__name__)
deltheta = 2 # Step value of theta (Deg)
delphi = 2 # Step value for phi (Deg)
dth = radians(deltheta)
dph = radians(delphi)
Psum = 0
Pmax = 0
Thmax = 0
Phmax = 0
for phi in xfrange(0, 360, delphi): # Phi Integration Loop 0-360 degrees
for theta in xfrange(0, 180, deltheta): # Theta Integration Loop 0-180 degrees
eField = RadPatternFunction(theta, phi, *args) # Total E-field at point
Pthph = eField * np.conjugate(eField) # Convert to power
if Pthph > Pmax:
Pmax = Pthph # Store peak value
Thmax = theta # Store theta value for the maximum
Phmax = phi # Store phi value for the maximum
# print(str(theta) + "," + str(phi) + ": " + str(Pthph))
Psum = Psum + Pthph * sin(radians(theta)) * dth * dph # Summation
Pmax = Pmax * (Efficiency / 100) # Apply antenna efficiency
directivity_lin = Pmax / (Psum / (4 * pi)) # Directivity (linear ratio)
directivity_dBi = 10 * log10(directivity_lin) # Directivity (dB wrt isotropic)
if Efficiency < 100: # Gain case
dBdiff = 10 * log10(abs(100 / Efficiency)) # Difference between gain and directivity
print("Directivity = " + str(directivity_dBi + dBdiff) + "dBi") # Display what directivity would be for ref.
print("Efficiency = " + str(Efficiency) + "%")
print("Gain = " + str(directivity_dBi) + "dB")
else: # Directivity case
print("Directivity = " + str(directivity_dBi) + "dBi")
print("At Theta = " + str(Thmax) + ", Phi = " + str(Phmax))
return Thmax, Phmax
def exampleDirectivity():
CalcDirectivity(100, SqrtSinPattern)
print("\n\n")
CalcDirectivity(90, SinPowerPattern)
print("\n\n")
CalcDirectivity(100, IsotropicPattern)
print("\n\n")
freq = 14e9
Er = 3.66 # RO4350B
h = 0.101e-3
W, L, h, Er = DesignPatch(Er, h, freq)
CalcDirectivity(100, PatchFunction, freq, W, L, h, Er)
fields = PatchEHPlanePlot(freq, W, L, h, Er)
SurfacePlot(fields, freq, W, L, h, Er)
W = 10.7e-3
L = 10.47e-3
h = 3e-3
Er = 2.5
print("\n\n")
CalcDirectivity(100, PatchFunction, freq, W, L, h, Er)
fields = PatchEHPlanePlot(freq, W, L, h, Er)
SurfacePlot(fields, freq, W, L, h, Er)
# end def
# ======================================== #
def PatchFunction(thetaInDeg, phiInDeg, Freq, W, L, h, Er):
"""
Taken from Design_patchr
Calculates total E-field pattern for patch as a function of theta and phi
Patch is assumed to be resonating in the (TMx 010) mode.
E-field is parallel to x-axis
W......Width of patch (m)
L......Length of patch (m)
h......Substrate thickness (m)
Er.....Dielectric constant of substrate
Refrence C.A. Balanis 2nd Edition Page 745
"""
lamba = light_velocity / Freq
theta_in = math.radians(thetaInDeg)
phi_in = math.radians(phiInDeg)
ko = 2 * math.pi / lamba
xff, yff, zff = sph2cart1(999, theta_in, phi_in) # Rotate coords 90 deg about x-axis to match array_utils coord system with coord system used in the model.
xffd = zff
yffd = xff
zffd = yff
r, thp, php = cart2sph1(xffd, yffd, zffd)
phi = php
theta = thp
if theta == 0:
theta = 1e-9 # Trap potential division by zero warning
if phi == 0:
phi = 1e-9
Ereff = ((Er + 1) / 2) + ((Er - 1) / 2) * (1 + 12 * (h / W)) ** -0.5 # Calculate effictive dielectric constant for microstrip line of width W on dielectric material of constant Er
F1 = (Ereff + 0.3) * (W / h + 0.264) # Calculate increase length dL of patch length L due to fringing fields at each end, giving total effective length Leff = L + 2*dL
F2 = (Ereff - 0.258) * (W / h + 0.8)
dL = h * 0.412 * (F1 / F2)
Leff = L + 2 * dL
Weff = W # Calculate effective width Weff for patch, uses standard Er value.
heff = h * sqrt(Er)
# Patch pattern function of theta and phi, note the theta and phi for the function are defined differently to theta_in and phi_in
Numtr2 = sin(ko * heff * cos(phi) / 2)
Demtr2 = (ko * heff * cos(phi)) / 2
Fphi = (Numtr2 / Demtr2) * cos((ko * Leff / 2) * sin(phi))
Numtr1 = sin((ko * heff / 2) * sin(theta))
Demtr1 = ((ko * heff / 2) * sin(theta))
Numtr1a = sin((ko * Weff / 2) * cos(theta))
Demtr1a = ((ko * Weff / 2) * cos(theta))
Ftheta = ((Numtr1 * Numtr1a) / (Demtr1 * Demtr1a)) * sin(theta)
# Due to groundplane, function is only valid for theta values : 0 < theta < 90 for all phi
# Modify pattern for theta values close to 90 to give smooth roll-off, standard model truncates H-plane at theta=90.
# PatEdgeSF has value=1 except at theta close to 90 where it drops (proportional to 1/x^2) to 0
rolloff_factor = 0.5 # 1=sharp, 0=softer
theta_in_deg = theta_in * 180 / math.pi # theta_in in Deg
F1 = 1 / (((rolloff_factor * (abs(theta_in_deg) - 90)) ** 2) + 0.001) # intermediate calc
PatEdgeSF = 1 / (F1 + 1) # Pattern scaling factor
UNF = 1.0006 # Unity normalisation factor for element pattern
if theta_in <= math.pi / 2:
Etot = Ftheta * Fphi * PatEdgeSF * UNF # Total pattern by pattern multiplication
else:
Etot = 0
return Etot
def sph2cart1(r, th, phi):
x = r * cos(phi) * sin(th)
y = r * sin(phi) * sin(th)
z = r * cos(th)
return x, y, z
def cart2sph1(x, y, z):
r = sqrt(x**2 + y**2 + z**2) + 1e-15
th = acos(z / r)
phi = atan2(y, x)
return r, th, phi
# ========================================================================= #
# ========================================================================= #
class Result:
def __init__(self):
self.frequency = None
self.patch_width = None
self.patch_length = None
self.feeder_width = None
self.feeder_length = None
self.inset_gap_width = None
self.inset_length = None
self.ground_length = None
self.ground_width = None
self.input_edge_impedance = None
def design_string(resonant_frequency, dielectric_constant, thickness):
return json.dumps(design_result(resonant_frequency, dielectric_constant, thickness).__dict__, indent=4)
def design_result(resonant_frequency, dielectric_constant, thickness):
return design(resonant_frequency, dielectric_constant, thickness).get_result()
def design(resonant_frequency, dielectric_constant, thickness):
"""calculates length and width of patch antenna from dielectric constant, thickness and resonant frequency"""
return PatchDesigner(resonant_frequency, dielectric_constant, thickness)
class PatchDesigner:
"""All parameter calculations"""
freq = None
er = None
h = None
patch_length = None
patch_lengthl_eff = None
patch_width = None
feeder_length = None
feeder_width = None
inset_gap = None
e_eff = None
delta_l = None
wavelength = None
electrical_length = None
ground_length = None
ground_width = None
inset_length = None
input_impedance = None
def __init__(self, freq, er, h):
"""
Designs the patch parameters
Parameters:
freq (float): Resonant frequency in Hz.
er (float): Dielectric constant of the cavity material.
h (float): Thickness of the cavity in m.
"""
if not 10 ** 6 <= freq <= 100 * 10 ** 9:
raise ValueError("Frequency value should be in between 1MHz to 100 GHz")
if not 0 < er <= 10**5:
raise ValueError("Dielectric constant value should be in greater than 0 and smaller or equals 100,000")
if not 0 < h <= 1:
raise ValueError("Thickness value should be in greater than 0 and smaller or equals 1 meter")
self.freq = freq
self.er = er
self.h = h
self.set_wavelength()
self.set_length_width_e_eff()
self.set_feeder_width_length()
def set_wavelength(self):
self.wavelength = light_velocity / self.freq
def set_length_width_e_eff(self):
self.patch_width = (light_velocity / (2 * self.freq)) * sqrt(2 / (self.er + 1))
temp = 1 + 12*(self.h / self.patch_width)
self.e_eff = ((self.er + 1) / 2) + ((self.er - 1) / 2) * temp ** -0.5
f1 = (self.e_eff + 0.3) * (self.patch_width / self.h + 0.264)
f2 = (self.e_eff - 0.258) * (self.patch_width / self.h + 0.8)
self.delta_l = self.h * 0.412 * (f1 / f2)
self.patch_lengthl_eff = (self.wavelength / sqrt(self.e_eff)) / 2
self.patch_length = self.patch_lengthl_eff - 2 * self.delta_l
def set_feeder_width_length(self):
self.feeder_length = (light_velocity / (4 * self.freq)) * (sqrt(1 / self.e_eff))
self.feeder_width = self.patch_width / 5
self.inset_gap = self.patch_width / 5
self.set_input_impedance()
self.inset_length = (self.patch_length / pi) * (math.acos(sqrt(impedance / self.input_impedance)))
self.ground_length = self.patch_length + self.feeder_length + self.get_fringing_l()
self.ground_width = self.patch_width + self.feeder_width + self.get_fringing_l()
def get_result(self):
result = Result()
result.frequency = self.freq
result.patch_width = self.patch_width
result.patch_length = self.patch_length
result.feeder_width = self.feeder_width
result.feeder_length = self.feeder_length
result.inset_gap_width = self.inset_gap
result.inset_length = self.inset_length
result.ground_length = self.ground_length
result.ground_width = self.ground_width
result.edge_impedance = self.input_impedance
return result
def get_fringing_l(self):
return 6 * self.h
def get_k(self):
k0 = (2*pi)/self.wavelength
return k0
def S_i(self, a):
temp = integrate.quad(lambda x: sin(x)/x, 0, a)
return temp[0]
def getG1 (self):
k0 = self.get_k()
X = k0 * self.patch_width
I1 = -2 + cos(X) + X * self.S_i(X) + sin(X)/X
G1 = I1 / (120 * pi**2)
return G1
def J0(self, s):
temp = integrate.quad(lambda x: cos(s*sin(x)), 0, pi)
return (1/pi) * temp[0]
def getG12 (self):
k0 = self.get_k()
temp = integrate.quad(lambda x: (((sin(k0 * self.patch_width * cos(x) / 2) / cos(x)) ** 2) * self.J0(k0 * self.patch_length * sin(x)) * sin(x) ** 3), 0, pi)
G12 = (1/(120*pi**2))*temp[0]
return G12
def set_input_impedance(self):
G1, G12 = self.getG1(), self.getG12()
self.input_impedance = 1 / (2 * (G1 + G12))
def m_to_inch(val):
return 39.3701 * val
def get_gerber_str(d, feed_type):
fl = m_to_inch(d.feeder_length)
fw = m_to_inch(d.feeder_width)
pl = m_to_inch(d.patch_length)
pw = m_to_inch(d.patch_width)
fringing_l = m_to_inch(d.get_fringing_l())
gerber = get_inset_feed_gerber(fl, fw, pl, pw, fringing_l, d) if feed_type == 'inset' else \
get_normal_feed_gerber(fl, fw, pl, pw, fringing_l)
return gerber
def get_normal_feed_gerber(fl, fw, pl, pw, fringing_l):
init_x = "{:.4f}".format((fl/2) + fringing_l).replace('.', '')
init_y = "{:.4f}".format(fringing_l).replace('.', '')
patch_x = "{:.4f}".format(fl + fringing_l + (pl/2)).replace('.', '')
gerber_format = f"""
G04 ===== Begin FILE IDENTIFICATION =====*
G04 File Format: Gerber RS274X*
G04 ===== End FILE IDENTIFICATION =====*
%FSLAX24Y24*%
%MOIN*%
%SFA1.0000B1.0000*%
%OFA0.0B0.0*%
%ADD14R,{fl}X{fw}*%
%ADD15R,{pl}X{pw}*%
%LNcond*%
%IPPOS*%
%LPD*%
G75*
D14*
X{init_x}Y{init_y}D03*
D15*
X{patch_x}*
M02*
"""
return gerber_format
def get_inset_feed_gerber(fl, fw, pl, pw, fringing_l, d):
inset_l = m_to_inch(d.inset_length)
inset_g = m_to_inch(d.inset_gap)
pl_s = pl - inset_l
init_x = "{:.4f}".format((fl/2) + fringing_l).replace('.', '')
init_y = "{:.4f}".format(fringing_l).replace('.', '')
patch_x = "{:.4f}".format(fl + fringing_l + inset_l + (pl_s/2)).replace('.', '')
inset_x = "{:.4f}".format(fl + fringing_l + (inset_l/2)).replace('.', '')
inset_top_y = "{:.4f}".format(fw/2 + inset_g + (inset_g/2) + fringing_l).replace('.', '')
inset_y = "{:.4f}".format(fringing_l).replace('.', '')
inset_down_y = "{:.4f}".format(fringing_l - (fw/2 + inset_g + (inset_g/2))).replace('.', '')
gerber_format = f"""
G04 ===== Begin FILE IDENTIFICATION =====*
G04 File Format: Gerber RS274X*
G04 ===== End FILE IDENTIFICATION =====*
%FSLAX24Y24*%
%MOIN*%
%SFA1.0000B1.0000*%
%OFA0.0B0.0*%
%ADD14R,{fl}X{fw}*%
%ADD15R,{pl_s}X{pw}*%
%ADD16R,{inset_l}X{inset_g}*%
%LNcond*%
%IPPOS*%
%LPD*%
G75*
D14*
X{init_x}Y{init_y}D03*
D15*
X{patch_x}*
D16*
X{inset_x}Y{inset_top_y}*
D16*
X{inset_x}Y{inset_y}*
D16*
X{inset_x}Y{inset_down_y}*
M02*
"""
return gerber_format
def write_gerber(resonant_frequency, dielectric_constant, thickness, file_name, feed_type):
"""Calculate design values in inch"""
d = PatchDesigner(resonant_frequency, dielectric_constant, thickness)
write_gerber_design(d, file_name, feed_type)
def write_gerber_design(design_: PatchDesigner, file_name, feed_type="normal"):
content = get_gerber_str(design_, feed_type)
with (open(file_name, 'w')) as f:
f.write(content)
# ======================================= #
def plotPatch3D():
#
#
#
# Under Construction
#
#
#
import plotly
from plotly.offline import init_notebook_mode
import plotly.graph_objs as go
plotly.offline.init_notebook_mode(connected=True)
intensity = [0, 0.142857142857143, 0.285714285714286, 0.428571428571429, 0.571428571428571, 0.714285714285714, 0.85714257142857, 1]
i = [7, 0, 0, 0, 4, 4, 2, 6, 4, 0, 3, 7]
j = [3, 4, 1, 2, 5, 6, 5, 5, 0, 1, 2, 2]
k = [0, 7, 2, 3, 6, 7, 1, 2, 5, 5, 7, 6]
cavitycolor = [[0, 'rgb(0, 100, 0)']*6]
coppercolor = [[0, 'rgb(139, 69, 19)'],
[1, 'rgb(139, 69, 19)'],
[2, 'rgb(139, 69, 19)'],
[3, 'rgb(139, 69, 19)'],
[4, 'rgb(139, 69, 19)'],
[5, 'rgb(139, 69, 19)'],
]
ct = 0.05 # copper_thickness
pl = float(5) # patch length
pw = float(5) # patch width
fl = float(2) # feeder length
fw = float(2) # feeder width
# height
ch = float(1)
tl = pl + fl # total length
tfp = (pw / 2) + (fw / 2) # top feeder point
bfp = (pw / 2) - (fw / 2) # bottom feeder point
data = [
go.Mesh3d(
x = [0, 0, tl, tl, 0, 0, tl, tl],
y = [0, pw, pw, 0, 0, pw, pw, 0],
z = [0, 0, 0, 0 ] + ([ct]*4),
colorbar = go.ColorBar(
title='ground'
),
facecolor = coppercolor,
intensity = intensity,
i = i,
j = j,
k = k,
name = 'ground',
showscale = True
),
go.Mesh3d(
x = [0, 0, pl, pl, 0, 0, pl, pl],
y = [0, pl, pl, 0, 0, pl, pl, 0],
z = ([ch]*4) + ([ch + ct]*4),
colorbar = go.ColorBar(
title='patch_top'
),
facecolor = coppercolor,
intensity = intensity,
i = i,
j = j,
k = k,
name = 'patch_top',
showscale = True
),
go.Mesh3d(
x = [pl, pl, tl, tl, pl, pl, tl, tl],
y = [tfp, bfp]*4,
z = ([ch]*4) + ([ch+ct]*4),
colorbar = go.ColorBar(
title='feeder_top'
),
facecolor= coppercolor,
i = i,
j = j,
k = k,
name = 'feeder_top',
showscale = True
),
go.Mesh3d(
x = [0, 0, tl, tl, 0, 0, tl, tl],
y = [0, pw, pw, 0, 0, pw, pw, 0],
z = ([0 + ct] * 4) + ([ch] * 4),
colorbar = go.ColorBar(
title='cavity'
),
facecolor = cavitycolor,
intensity = intensity,
i = i,
j = j,
k = k,
name='cavity',
showscale=True
),
go.Mesh3d()
]
layout = go.Layout(
xaxis=go.XAxis(
title='x'
),
yaxis=go.YAxis(
title='y'
)
)
fig = go.Figure(data=data, layout=layout)
plotly.offline.plot(fig)
# end def
def exampleDesignPatch():
freq = 2.4e9
Er = 4.4
h = 1.6 * 10 ** -3
# v = 3 * 10 ** 8
W, L = design_patch(Er, h, freq)
Rin = input_impedance(freq, W, L)
print('Inset Feed Position : ', inset_feed_position(Rin, L))
G1, G12 = getGs(freq, W, L)
print('G1 : ', G1)
print('G12 : ', G12)
I1 = 1.863
I2 = 3.59801
d1, d2 = get_directivity(G1, G12, W, freq, I1, I2)
print('Directivity : ', d1, ' dB')
print('Directivity : ', d2, ' dB')
fields = patch_eh_plane_plot(freq, W, L, h, Er)
# surface_plot(fields)
SurfacePlot(fields, freq, W, L, h, Er)
# end def
def exampleSimpleDesign():
import pytest
# resonant frequency in Hz
freq = 2.4 * 10 ** 9
# dielectric constant
er = 4.4
# thickness of the cavity in meter
h = 1.6 * 10 ** -3
# Quick print result
print(design_string(freq, er, h))
# Using result object
simpleresult = design_result(freq, er, h)
print(simpleresult.patch_width)
# Quick write gerber file
#
# normal feed
write_gerber(freq, er, h, 'patch_normal_design.gbr', 'normal')
# inset feed
write_gerber(freq, er, h, 'patch_inset_design.gbr', 'inset')
# Custom change design parameters
# Using design object
simpledesign = design(freq, er, h)
# Changing feeder length and feeder width
simpledesign.feeder_length *= 1.25
simpledesign.feeder_width *= 1.10
# To write as gerber file for both types
# normal feed
write_gerber_design(simpledesign, 'patch_normal_design.gbr', 'normal')
# inset feed
write_gerber_design(simpledesign, 'patch_inset_design.gbr', 'inset')
# end def
# ======================================= #
def test_frequency_limit():
import pytest
with pytest.raises(ValueError) as execinfo:
design(0, 0, 0)
return execinfo.value.args[0] == 'Frequency value should be in between 1MHz to 100 GHz'
def test_dielectric_limit():
import pytest
with pytest.raises(ValueError) as execinfo:
design(10 ** 9, 0, 0)
return execinfo.value.args[0] == 'Dielectric constant value should be in greater than 0 and smaller or equals 100,000'
def test_thickness_limit():
import pytest
with pytest.raises(ValueError) as execinfo:
design(10 ** 9, 1, 0)
return execinfo.value.args[0] == 'Thickness value should be in greater than 0 and smaller or equals 1 meter'
# ======================================= #
if __name__ == "__main__":
exampleRectPatch()
examplePatchEfficiency()
exampleDirectivity()
exampleDesignPatch()
exampleSimpleDesign()
test_frequency_limit()
test_dielectric_limit()
test_thickness_limit()
# end if
# ======================================== #
|
{"hexsha": "6538dcc9cf1a9bbe54b3620fd8a8778a17bb8815", "size": 55899, "ext": "py", "lang": "Python", "max_stars_repo_path": "PatchAntenna.py", "max_stars_repo_name": "gmweir/QuasiOptics", "max_stars_repo_head_hexsha": "0974178984f845597c5209217613c26edf931ed0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-11-06T18:16:00.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-06T18:16:00.000Z", "max_issues_repo_path": "PatchAntenna.py", "max_issues_repo_name": "gmweir/QuasiOptics", "max_issues_repo_head_hexsha": "0974178984f845597c5209217613c26edf931ed0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "PatchAntenna.py", "max_forks_repo_name": "gmweir/QuasiOptics", "max_forks_repo_head_hexsha": "0974178984f845597c5209217613c26edf931ed0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.7032173342, "max_line_length": 246, "alphanum_fraction": 0.5276480796, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 15342}
|
[STATEMENT]
lemma no_unacquired_write_to_read_only_reads':
assumes no_wrt: "no_write_to_read_only_memory \<S> sb"
assumes non_vol: "non_volatile_owned_or_read_only True \<S> \<O> sb"
assumes consis: "sharing_consistent \<S> \<O> sb"
shows "read_only_reads \<O> sb \<inter> outstanding_refs is_Write\<^sub>s\<^sub>b sb \<subseteq> \<O> \<union> all_acquired sb"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. read_only_reads \<O> sb \<inter> outstanding_refs is_Write\<^sub>s\<^sub>b sb \<subseteq> \<O> \<union> all_acquired sb
[PROOF STEP]
using no_unacquired_write_to_read_only_reads [OF no_wrt non_vol consis]
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>?a \<in> read_only_reads \<O> sb; ?a \<notin> \<O> \<union> all_acquired sb\<rbrakk> \<Longrightarrow> ?a \<notin> outstanding_refs is_Write\<^sub>s\<^sub>b sb
goal (1 subgoal):
1. read_only_reads \<O> sb \<inter> outstanding_refs is_Write\<^sub>s\<^sub>b sb \<subseteq> \<O> \<union> all_acquired sb
[PROOF STEP]
by auto
|
{"llama_tokens": 400, "file": "Store_Buffer_Reduction_ReduceStoreBuffer", "length": 2}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.