text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
Load LFindLoad.
Load LFindLoad.
From adtind Require Import goal78.
From lfind Require Import LFind.
Lemma lfind_state (n:natural) (x:lst) (IHx:@eq lst (rev (rev x)) x):@eq lst (rev (append (rev x) (Cons n Nil))) (Cons n x).
Admitted.
From QuickChick Require Import QuickChick.
QCInclude "/home/yousef/lemmafinder/benchmark/_lfind_clam_lf_goal78_rev_rev_58_rev_append/".
QCInclude ".".
Extract Constant defNumTests => "50".
Derive Show for natural.
Derive Arbitrary for natural.
Instance Dec_Eq_natural : Dec_Eq natural.
Proof. dec_eq. Qed.
Derive Show for lst.
Derive Arbitrary for lst.
Instance Dec_Eq_lst : Dec_Eq lst.
Proof. dec_eq. Qed.
Open Scope string_scope.
Parameter print : natural -> string -> natural.
Extract Constant print => "Extract.print".
Definition collect_data (n:natural) (x:lst) :=
let lfind_var := "n:" ++ "(" ++ show n ++ ")"++ "|" ++"x:" ++ "(" ++ show x ++ ")"
in let lfind_v := print n lfind_var
in lfind_state lfind_v x.
QuickChick collect_data.
Success.
|
{"author": "yalhessi", "repo": "lemmaranker", "sha": "53bc2ad63ad7faba0d7fc9af4e1e34216173574a", "save_path": "github-repos/coq/yalhessi-lemmaranker", "path": "github-repos/coq/yalhessi-lemmaranker/lemmaranker-53bc2ad63ad7faba0d7fc9af4e1e34216173574a/benchmark/clam/_lfind_clam_lf_goal78_rev_rev_58_rev_append/lfind_quickchick_generator.v"}
|
#include <iostream>
#include <fstream>
#include <cstdio>
#include <map>
#include <gflags/gflags.h>
#include <vector>
#include <array>
#include <json/json.h>
#include <simple.h>
#include "totalmodel.h"
#include <FitToBody.h>
#include <VisualizedData.h>
#include <KinematicModel.h>
#include <cassert>
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <pose_to_transforms.h>
#include "meshTrackingProj.h"
#include "SGSmooth.hpp"
#include "ModelFitter.h"
#include "utils.h"
#include <thread>
#include <boost/filesystem.hpp>
DEFINE_string(root_dirs, "", "Base root folder to access data");
DEFINE_string(seqName, "default", "Sequence Name to run");
DEFINE_int32(start, 1, "Starting frame");
DEFINE_int32(end, 1000, "Ending frame");
TotalModel g_total_model;
const int NUM_JOINT_PARAMS = 21 * 3 + 2 * 21 * 3;
double gResultJoint[NUM_JOINT_PARAMS];
std::array<int, 19> map_totalcap_to_coco = {1, 0, 8, 5, 6, 7, 12, 13, 14, 2, 3, 4, 9, 10, 11, 16, 18, 15, 17}; // 19 body indices of BODY_25 from openpose
// feet indices are in order of openpose
std::array<int, 8> feet_vtx_idx = { 12239, //left bigtoe
12289, //left littletoe
12368, //left heel
12357, //left heel
14238, //right bigtoe
14288, //right littletoe
14357, //right heel
14361 //right heel
};
const int LEFT_HEEL_IDX = 21;
const int RIGHT_HEEL_IDX = 24;
std::map<int, std::string> POSE_BODY_25_BODY_PARTS = {
{0, "Nose"},
{1, "Neck"},
{2, "RShoulder"},
{3, "RElbow"},
{4, "RWrist"},
{5, "LShoulder"},
{6, "LElbow"},
{7, "LWrist"},
{8, "MidHip"},
{9, "RHip"},
{10, "RKnee"},
{11, "RAnkle"},
{12, "LHip"},
{13, "LKnee"},
{14, "LAnkle"},
{15, "REye"},
{16, "LEye"},
{17, "REar"},
{18, "LEar"},
{19, "LBigToe"},
{20, "LSmallToe"},
{21, "LHeel"},
{22, "RBigToe"},
{23, "RSmallToe"},
{24, "RHeel"},
{25, "Background"}
};
std::map<int, std::string> POSE_SMPL_BODY_PARTS = {
{0, "hips"},
{1, "leftUpLeg"},
{2, "rightUpLeg"},
{3, "spine"},
{4, "leftLeg"},
{5, "rightLeg"},
{6, "spine1"},
{7, "leftFoot"},
{8, "rightFoot"},
{9, "spine2"},
{10, "leftToeBase"},
{11, "rightToeBase"},
{12, "neck"},
{13, "leftShoulder"},
{14, "rightShoulder"},
{15, "head"},
{16, "leftArm"},
{17, "rightArm"},
{18, "leftForeArm"},
{19, "rightForeArm"},
{20, "leftHand"},
{21, "rightHand"}
};
void check_flags(int argc, char* argv[])
{
#ifdef GFLAGS_NAMESPACE
GFLAGS_NAMESPACE::ParseCommandLineFlags(&argc, &argv, true);
#else
google::ParseCommandLineFlags(&argc, &argv, true);
#endif
std::cout << "Root Directory: " << FLAGS_root_dirs << std::endl;
std::cout << "Sequence Name: " << FLAGS_seqName << std::endl;
if (FLAGS_seqName.compare("default") == 0)
{
std::cerr << "Error: Sequence Name must be set." << std::endl;
exit(1);
}
if (FLAGS_start >= FLAGS_end)
{
std::cerr << "Error: Starting frame must be less than end frame." << std::endl;
exit(1);
}
}
std::vector<smpl::SMPLParams> readResultFrames(std::string resDirName, int startFrame, int endFrame) {
int numFrames = endFrame - startFrame;
std::vector<smpl::SMPLParams> modelParams(numFrames);
for (auto i = 0u, image_index = startFrame + i; i < numFrames; i++, image_index++) {
std::cout << "Reading single frame results: " << image_index << std::endl;
char basename[200];
sprintf(basename, "%04d.txt", image_index);
const std::string param_filename = FLAGS_root_dirs + "/" + FLAGS_seqName + "/" + resDirName + "/" + basename;
smpl::SMPLParams frame_params;
readFrameParam(param_filename, frame_params);
modelParams[i] = frame_params;
}
return modelParams;
}
struct ResultFrame {
cv::Point3d m_globalTrans; // 3 vec
std::vector< cv::Point3d > m_jointsPos; // 25 x 3 b/c 25 joints in BODY_25
std::vector< cv::Point3d > m_SMPLJointsPos; // 22 x 3 b/c 22 body joints in ADAM
std::vector< cv::Point3d > m_SMPLJointsRot; // 22 x 3 b/c 22 body joints in ADAM
std::vector< double > m_bodyCoeffs; // 30 coefficients describing body shape
std::vector< double > m_faceCoeffs; // 200 coefficients describing face
ResultFrame()
: m_globalTrans(), m_jointsPos(25), m_SMPLJointsPos(22), m_SMPLJointsRot(22), m_bodyCoeffs(TotalModel::NUM_SHAPE_COEFFICIENTS), m_faceCoeffs(TotalModel::NUM_EXP_BASIS_COEFFICIENTS)
{}
Json::Value serialize(int id) {
Json::Value frame;
frame["id"] = id;
// global translation
Json::Value transVal;
transVal["x"] = m_globalTrans.x;
transVal["y"] = m_globalTrans.y;
transVal["z"] = m_globalTrans.z;
frame["trans"] = transVal;
// joint information
for (int i = 0; i < m_jointsPos.size(); i++) {
Json::Value curJointVal;
curJointVal["name"] = POSE_BODY_25_BODY_PARTS[i];
Json::Value jointPosVal;
jointPosVal["x"] = m_jointsPos[i].x;
jointPosVal["y"] = m_jointsPos[i].y;
jointPosVal["z"] = m_jointsPos[i].z;
curJointVal["pos"] = jointPosVal;
frame["joints"].append(curJointVal);
}
// SMPL body info
for (int i = 0; i < m_SMPLJointsPos.size(); i++) {
Json::Value curJointVal;
curJointVal["name"] = POSE_SMPL_BODY_PARTS[i];
Json::Value jointPosVal;
Json::Value jointRotVal;
jointPosVal["x"] = m_SMPLJointsPos[i].x;
jointPosVal["y"] = m_SMPLJointsPos[i].y;
jointPosVal["z"] = m_SMPLJointsPos[i].z;
curJointVal["pos"] = jointPosVal;
jointRotVal["x"] = m_SMPLJointsRot[i].x;
jointRotVal["y"] = m_SMPLJointsRot[i].y;
jointRotVal["z"] = m_SMPLJointsRot[i].z;
curJointVal["rot"] = jointRotVal;
frame["SMPLJoints"].append(curJointVal);
}
// Shape coefficients
for (int i = 0; i < m_bodyCoeffs.size(); i++) {
frame["bodyCoeffs"].append(m_bodyCoeffs[i]);
}
for (int i = 0; i < m_faceCoeffs.size(); i++) {
frame["faceCoeffs"].append(m_faceCoeffs[i]);
}
return frame;
}
};
std::vector<ResultFrame> processResults(std::vector<smpl::SMPLParams> &modelParams) {
int numFrames = modelParams.size();
std::vector<ResultFrame> resultDataList(numFrames);
CMeshModelInstance mesh;
for (int i = 0; i < numFrames; i++) {
mesh.clearMesh();
GenerateMesh(mesh, gResultJoint, modelParams[i], g_total_model, 2, false, true); // use axis-angle and get local joint info
// first openpose BODY_25 joints
ResultFrame data;
// root translation
data.m_globalTrans = cv::Point3d(modelParams[i].m_adam_t[0], modelParams[i].m_adam_t[1], modelParams[i].m_adam_t[2]);
// body joints are returned
for (int j = 0; j < 19; j++) {
data.m_jointsPos[map_totalcap_to_coco[j]] = cv::Point3d(gResultJoint[j*3], gResultJoint[j*3 + 1], gResultJoint[j*3 + 2]);
}
// feet joints are taken from vertices
for (int j = 19, foot_idx = 0; j < 25; j++, foot_idx++) {
cv::Point3d vtx1 = mesh.m_vertices[feet_vtx_idx[foot_idx]];
if (j == RIGHT_HEEL_IDX || j == LEFT_HEEL_IDX) {
// must average the vertices
cv::Point3d vtx2 = mesh.m_vertices[feet_vtx_idx[++foot_idx]];
data.m_jointsPos[j] = cv::Point3d(0.5 * (vtx1.x + vtx2.x), 0.5 * (vtx1.y + vtx2.y), 0.5 * (vtx1.z + vtx2.z));
} else {
data.m_jointsPos[j] = vtx1;
}
}
// then SMPL joints
for (int j = 0; j < 22; j++) {
data.m_SMPLJointsPos[j] = mesh.m_joints[j];
// if (j == 3 || j == 6 || j == 9) {
// std::cout << j << " regressed" << std::endl;
// std::cout << "(" << mesh.m_joints[j].x << ", " << mesh.m_joints[j].y << ", " << mesh.m_joints[j].z << ")\n";
// }
}
for (int j = 0; j < 22; j++) {
data.m_SMPLJointsRot[j] = cv::Point3d(modelParams[i].m_adam_pose(j, 0), modelParams[i].m_adam_pose(j, 1), modelParams[i].m_adam_pose(j, 2));
}
// shape coefficients
for (int j = 0; j < TotalModel::NUM_SHAPE_COEFFICIENTS; j++) {
data.m_bodyCoeffs[j] = modelParams[i].m_adam_coeffs(j, 0);
}
for (int j = 0; j < TotalModel::NUM_EXP_BASIS_COEFFICIENTS; j++) {
data.m_faceCoeffs[j] = modelParams[i].m_adam_facecoeffs_exp(j, 0);
}
resultDataList[i] = data;
}
return resultDataList;
}
std::string serializeResults(std::vector<ResultFrame> &resultsList) {
Json::Value root;
for (int i = 0; i < resultsList.size(); i++) {
root["totalcapResults"].append(resultsList[i].serialize(i));
}
Json::StyledWriter styledWriter;
return styledWriter.write(root);
}
/**
Goes through a given directory of total capture output and processes the results to output BODY_25 (same as used by OpenPose) and SMPL joint data.
*/
int main(int argc, char* argv[])
{
check_flags(argc, argv);
// initialize total model
LoadTotalModelFromObj(g_total_model, std::string("model/mesh_nofeet.obj"));
LoadModelColorFromObj(g_total_model, std::string("model/nofeetmesh_byTomas_bottom.obj")); // contain the color information
LoadTotalDataFromJson(g_total_model, std::string("model/adam_v1_plus2.json"), std::string("model/adam_blendshapes_348_delta_norm.json"), std::string("model/correspondences_nofeet.txt"));
LoadCocoplusRegressor(g_total_model, std::string("model/regressor_0n1_root.json"));
// read in fitting results for all frames (both before and after tracking)
std::vector<smpl::SMPLParams> trackedModelParams = readResultFrames("body_3d_frontal_tracking", FLAGS_start, FLAGS_end);
std::vector<smpl::SMPLParams> noTrackedModelParams = readResultFrames("body_3d_frontal", FLAGS_start, FLAGS_end);
// go through each frame, build mesh, and collect info to output
std::vector<ResultFrame> trackedResults = processResults(trackedModelParams);
std::vector<ResultFrame> noTrackedResults = processResults(noTrackedModelParams);
// write out
std::string trackedResStr = serializeResults(trackedResults);
std::string noTrackedResStr = serializeResults(noTrackedResults);
const std::string tracked_filename = FLAGS_root_dirs + "/" + FLAGS_seqName + "/tracked_results.json";
std::ofstream fTrack(tracked_filename);
if (!fTrack.good())
{
std::cerr << "Error: could not open json file for writing" << std::endl;
exit(1);
}
fTrack << trackedResStr;
fTrack.close();
const std::string untracked_filename = FLAGS_root_dirs + "/" + FLAGS_seqName + "/untracked_results.json";
std::ofstream fNoTrack(untracked_filename);
if (!fNoTrack.good())
{
std::cerr << "Error: could not open json file for writing" << std::endl;
exit(1);
}
fNoTrack << noTrackedResStr;
fNoTrack.close();
}
|
{"hexsha": "12ea7ef01e63cf21ad643e0f290515b4155ef8c2", "size": 11526, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "FitAdam/process_results.cpp", "max_stars_repo_name": "davrempe/MonocularTotalCapture", "max_stars_repo_head_hexsha": "649768d127d16ae49a4da200a4e4d11980785df7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5.0, "max_stars_repo_stars_event_min_datetime": "2021-05-20T06:37:53.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-03T02:59:44.000Z", "max_issues_repo_path": "FitAdam/process_results.cpp", "max_issues_repo_name": "davrempe/MonocularTotalCapture", "max_issues_repo_head_hexsha": "649768d127d16ae49a4da200a4e4d11980785df7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "FitAdam/process_results.cpp", "max_forks_repo_name": "davrempe/MonocularTotalCapture", "max_forks_repo_head_hexsha": "649768d127d16ae49a4da200a4e4d11980785df7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2021-07-05T06:30:53.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-05T06:30:53.000Z", "avg_line_length": 36.5904761905, "max_line_length": 190, "alphanum_fraction": 0.5958702065, "num_tokens": 3373}
|
"""Watch is an object to easily watch what is happening
during the training/evaluation of a deep net.
"""
import os
import time
import pdb
from collections import defaultdict
import numpy as np
from . import evaluation
class AverageMeter(object):
"""Computes and stores the average and current value of a metric.
It is fast (constant time), regardless of the lenght of the measure series.
mode: (str). Behavior of the meter.
'average': just the average of all values since the start
'sliding': just the average of the last 'nlast' values
'last': just the last value (=='sliding' with nlast=1)
'min' : the minimum so far
'max' : the maximum so far
"""
def __init__(self, mode='average', nlast=5):
self.mode = mode
self.nlast = nlast
self.reset()
def reset(self):
self.vals = []
self.avg = 0
self.sum = 0
self.count = 0
self.is_perf = False
def export(self):
return {k:val for k,val in self.__dict__.items() if type(val) in (bool, str, float, int, list)}
def update(self, val, weight=1):
''' sliding window average '''
self.vals.append( val )
self.sum += val * weight
self.count += weight
if self.mode == 'average':
self.avg = self.sum / self.count
elif self.mode == 'sliding':
vals = self.vals[-self.nlast:]
self.avg = sum(vals) / (1e-8+len(vals))
elif self.mode == 'last':
self.avg = val
elif self.mode == 'min':
self.avg = min(self.avg or float('inf'), val)
elif self.mode == 'max':
self.avg = max(self.avg or -float('inf'), val)
else:
raise ValueError("unknown AverageMeter update policy '%s'" % self.mode)
def __bool__(self):
return bool(self.count)
__nonzero__ = __bool__ # for python2
def __len__(self):
return len(self.vals)
def tostr(self, name='', budget=100, unit=''):
''' Print the meter, using more or less characters
'''
_budget = budget
if name:
name += ': '
budget -= len(name)
if isinstance(self.avg, int):
avg = '%d' % self.avg
minavg = len(avg)
val = ''
budget -= len(avg) + len(unit)
else:
avg = '%f' % self.avg
minavg = (avg+'.').find('.')
val = 'last: %f' % self.vals[-1]
minval = (val+'.').find('.')
budget -= len(avg) + len(val) + 3 + 2*len(unit)
while budget < 0 :
old_budget = budget
if len(val):
val = val[:-1]
budget += 1
if len(val) < minval:
val = '' # we cannot delete beyond the decimal point
budget += 3 + len(val) + len(unit) # add parenthesis
continue
else:
if len(val) % 2: continue # shrink the other sometimes
if len(avg) >= minavg and len(name) <= len(avg):
avg = avg[:-1]
budget += 1
continue # can shrink further
if len(name) > 2:
name = name[:-2]+' ' # remove last char
budget += 1
# cannot shrink anymore
if old_budget == budget: break
res = name + avg+unit
if val: res += ' (' + val+unit + ')'
res += ' '*max(0, len(res) - _budget)
return res
class Watch (object):
"""
Usage:
------
- call start() just before the loop
- call tic() at the beginning of the loop (first line)
- call eval_train(measure1=score1, measure2=score2, ...) or eval_test(...)
- call toc() and the end of the loop (last line)
- call stop() after the loop
Arguments:
----------
tfreq: (float or None)
temporal frequency of outputs (in seconds)
nfreq: (int or None)
iteration frequency of outputs (in iterations)
"""
def __init__(self, tfreq=30.0, nfreq=None):
self.tfreq = tfreq
self.nfreq = nfreq
# init meters
self.meters = defaultdict(AverageMeter)
self.meters['epoch'] = AverageMeter(mode='last')
self.meters['test_epoch'] = AverageMeter(mode='last')
self.meters['data_time'] = AverageMeter(mode='sliding')
self.meters['batch_time'] = AverageMeter(mode='sliding')
self.meters['lr'] = AverageMeter(mode='sliding')
self.meters['loss'] = AverageMeter(mode='sliding')
# init current status
self.tostr_t = None
self.cur_n = None
self.batch_size = None
self.last_test = 0
self.viz = False
def __getattr__(self, name):
meters = object.__getattribute__(self, 'meters')
if name in meters:
return meters[name]
else:
return object.__getattribute__(self, name)
def reset(self):
for meter in self.meters.values():
meter.reset()
def start(self):
'''Just before the loop over batches
'''
self.last_time = time.time()
self.cur_n = 0
self.tostr_t = self.last_time
def tic(self, batch_size, epoch=0, **kw):
'''Just after loading one batch
'''
assert self.last_time is not None, "you must call start() before the loop!"
self.meters['data_time'].update(time.time() - self.last_time)
self.batch_size = batch_size
self.meters['epoch'].update(epoch)
n_epochs = len(self.meters['epoch'])
for name, val in kw.items():
self.meters[name].mode = 'last'
self.meters[name].update(val)
assert len(self.meters[name]) == n_epochs, "missing values for meter %s (expected %d, got %d)" % (name, n_epochs, len(self.meters[name]))
def eval_train(self, **measures):
n_epochs = len(self.meters['epoch'])
for name, score in measures.items():
self.meters[name].is_perf = True
self.meters[name].update(score, self.batch_size)
assert len(self.meters[name]) == n_epochs, "missing values for meter %s (expected %d, got %d)" % (name, n_epochs, len(self.meters[name]))
def eval_test(self, mode='average', **measures):
assert self.batch_size is None, "you must call toc() before; measures should concern the entire test"
epoch = self.meters['epoch'].avg
self.meters['test_epoch'].update(epoch)
n_epochs = len(self.meters['test_epoch'])
for name, val in measures.items():
name = 'test_'+name
if name not in self.meters:
self.meters[name] = AverageMeter(mode=mode)
self.meters[name].is_perf = True
self.meters[name].update(val)
assert len(self.meters[name]) == n_epochs, "missing values for meter %s (expected %d, got %d)" % (name, n_epochs, len(self.meters[name]))
if self.viz: self.plot()
def toc(self):
'''Just after finishing to process one batch
'''
assert self.batch_size is not None, "you must call tic() at the begining of the loop"
now = time.time()
self.meters['batch_time'].update(now - self.last_time)
if (self.tfreq and now-self.tostr_t>self.tfreq) or (self.nfreq and (self.cur_n % self.nfreq) == 0):
self.tostr_t = now
n_meters = sum([bool(meter) for meter in self.meters.values()])
cols = get_terminal_ncols()
cols_per_meter = (cols - len('Time ')) / n_meters # columns per meter
N = np.int32(np.linspace(0,cols - len('Time '), n_meters+1))
N = list(N[1:] - N[:-1]) # this sums to the number of available columns
tt = ''
if self.meters['epoch']:
tt += self.meters['epoch'].tostr('Epoch', budget=N.pop()-1)+' '
tt += 'Time %s %s' % (
self.meters['data_time'].tostr('data',budget=N.pop()-1,unit='s'),
self.meters['batch_time'].tostr('batch',budget=N.pop(),unit='s'))
for name, meter in sorted(self.meters.items()):
if name in ('epoch', 'data_time', 'batch_time'): continue
if meter: tt += ' '+meter.tostr(name, budget=N.pop()-1)
print(tt)
if self.viz: self.plot()
self.batch_size = None
self.cur_n += 1
self.last_time = time.time()
def stop(self):
'''Just after all the batches have been processed
'''
res = ''
for name, meter in sorted(self.meters.items()):
if meter.is_perf:
res += '\n * ' + meter.tostr(name)
print(res[1:])
def upgrade(self):
'''Upgrade the old watcher to the latest version
'''
if not hasattr(self,'meters'):
# convert old to new format
self.meters = defaultdict(AverageMeter)
self.meters['epoch'] = AverageMeter(mode='last')
for i,name in enumerate('data_time batch_time lr loss top1 top5'.split()):
try:
self.meters[name] = getattr(self,name)
if i < 4: self.meters[name].mode = 'sliding'
delattr(self, name)
except AttributeError:
continue
if not self.meters['epoch']:
for i in range(self.epoch):
self.meters['epoch'].update(i)
return self
def measures(self):
return {name:meter.avg for name,meter in self.meters.items() if meter.is_perf}
def plot(self):
''' plot what happened so far.
'''
import matplotlib.pyplot as pl; pl.ion()
self.upgrade()
epochs = self.meters['epoch'].vals
test_epochs = self.meters['test_epoch'].vals
fig = pl.figure('Watch')
pl.subplots_adjust(0.1,0.03,0.97,0.99)
done = {'epoch','test_epoch'}
ax = pl.subplot(321)
ax.lines = []
for name in 'data_time batch_time'.split():
meter = self.meters[name]
if not meter: continue
done.add(name)
n = len(meter.vals)
pl.plot(epochs[:n], meter.vals, label=name)
self.crop_plot(ymin=0)
pl.legend()
ax = pl.subplot(322)
ax.lines = []
for name in 'lr'.split():
meter = self.meters[name]
if not meter: continue
done.add(name)
n = len(meter.vals)
pl.plot(epochs[:n], meter.vals, label=name)
self.crop_plot(ymin=0)
pl.legend()
def avg(arr):
from scipy.ndimage.filters import uniform_filter
return uniform_filter(arr, size=max(3,len(arr)//20), mode='nearest')
def halfc(color):
pdb.set_trace()
return tuple([c/2 for c in color])
ax = pl.subplot(312)
ax.lines = []
for name in self.meters:
if not name.startswith('loss'): continue
meter = self.meters[name]
if not meter: continue
done.add(name)
n = len(meter.vals)
line = pl.plot(epochs[:n], meter.vals, ':', lw=0.5)
ax.plot(epochs[:n], avg(meter.vals), '-', label=name, color=line[0].get_color())
self.crop_plot()
pl.legend()
ax = pl.subplot(313)
ax.lines = []
for name in self.meters:
if name in done: continue
meter = self.meters[name]
if not meter: continue
done.add(name)
n = len(meter.vals)
if name.startswith('test_'):
epochs_ = test_epochs[:n]
else:
epochs_ = epochs[:n]
line = ax.plot(epochs_, meter.vals, ':', lw=0.5)
ax.plot(epochs_, avg(meter.vals), '-', label=name, color=line[0].get_color())
self.crop_plot()
pl.legend()
pl.pause(0.01) # update the figure
def export(self):
members = {}
for k, v in self.__dict__.items():
if k == 'meters':
meters = {}
for k1,v1 in v.items():
meters[k1] = v1.export()
members[k] = meters
else:
members[k] = v
return members
@staticmethod
def update_all(checkpoint):
watch = Watch()
for k,v in checkpoint.items():
if 'meters' in k:
meters = defaultdict(AverageMeter)
for k1,v1 in v.items():
meter = AverageMeter()
meter.__dict__.update(v1)
meters[k1] = meter
watch.__dict__[k] = meters
else:
watch.__dict__[k] = v
return watch
@staticmethod
def crop_plot(span=0.5, ax=None, xmin=np.inf, xmax=-np.inf, ymin=np.inf, ymax=-np.inf):
import matplotlib.pyplot as pl
if ax is None: ax=pl.gca()
if not ax.lines: return # nothing to do
# set xlim to the last <span> of all data
for l in ax.lines:
x,y = map(np.asarray, l.get_data())
xmin = min(xmin,np.min(x[np.isfinite(x)]))
xmax = max(xmax,np.max(x[np.isfinite(x)]))
xmin = xmax - span*(xmax-xmin)
# set ylim to the span of remaining points
for l in ax.lines:
x,y = map(np.asarray, l.get_data())
y = y[(x>=xmin) & (x<=xmax) & np.isfinite(y)] # select only relevant points
if y.size == 0: continue
ymin = min(ymin,np.min(y))
ymax = max(ymax,np.max(y))
try:
ax.set_xlim(xmin,xmax+1)
ax.set_ylim(ymin,(ymax+1e-8)*1.01)
except ValueError:
pass #pdb.set_trace()
class TensorBoard (object):
"""Tensorboard to plot training and validation loss and others
.. notes::
```shell
conda install -c conda-forge tensorboardx
conda install tensorflow
```
Args:
logdir (str): path to save log
phases (array): phases to plot, e.g., ['train', 'val']
"""
def __init__(self, logdir, phases):
from tensorboardX import SummaryWriter
if not os.path.exists(logdir):
for key in phases:
os.makedirs(os.path.join(logdir, key))
self.phases = phases
self.tb_writer={}
for key in phases:
self.tb_writer[key] = SummaryWriter(os.path.join(logdir, key))
def add_scalars(self, phase, watch, names):
""" Add scalar values in watch.meters[names]
"""
if not phase in self.phases:
raise AttributeError('%s is unknown'%phase)
epochs = sorted(watch.meters['epoch'].vals)
for name in names:
vals = sorted(watch.meters[name].vals)
cnt = watch.meters[name].count
for n, val in zip(epochs, vals):
self.tb_writer[phase].add_scalar(name, val, n*cnt)
def close():
for key in self.phases:
self.tb_writer[key].close()
def get_terminal_ncols(default=160):
try:
import sys
from termios import TIOCGWINSZ
from fcntl import ioctl
from array import array
except ImportError:
return default
else:
try:
return array('h', ioctl(sys.stdout, TIOCGWINSZ, '\0' * 8))[1]
except:
try:
from os.environ import get
except ImportError:
return default
else:
return int(get('COLUMNS', 1)) - 1
if __name__ == '__main__':
import time
# test printing size
batch_size = 256
watch = Watch(tfreq=0.5)
watch.start(epoch=0)
watch.meters['top1'].is_perf = True
watch.meters['top5'].is_perf = True
for epoch in range(99999):
watch.tic(batch_size)
time.sleep(0.1)
watch.meters['top1'].update(1-np.exp(-epoch/10))
watch.meters['top5'].update(1-np.exp(-epoch/5))
watch.toc(loss=np.sin(epoch/10), lr=np.cos(epoch/20))
watch.stop()
pdb.set_trace()
|
{"hexsha": "d35d433c64c703df50bff64f6a154199fdd6e1f3", "size": 16323, "ext": "py", "lang": "Python", "max_stars_repo_path": "ML_Models/DeepImageRetrieval/dirtorch/utils/watcher.py", "max_stars_repo_name": "MU-Data-Science/QIK", "max_stars_repo_head_hexsha": "95df516cd5629fddf549bfb877b14ebfce0519df", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-09-22T22:03:51.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-22T17:28:33.000Z", "max_issues_repo_path": "ML_Models/DeepImageRetrieval/dirtorch/utils/watcher.py", "max_issues_repo_name": "MU-Data-Science/QIK", "max_issues_repo_head_hexsha": "95df516cd5629fddf549bfb877b14ebfce0519df", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2020-03-22T04:53:58.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:20:20.000Z", "max_forks_repo_path": "ML_Models/DeepImageRetrieval/dirtorch/utils/watcher.py", "max_forks_repo_name": "MU-Data-Science/QIK", "max_forks_repo_head_hexsha": "95df516cd5629fddf549bfb877b14ebfce0519df", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-09-28T19:28:00.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-28T19:28:00.000Z", "avg_line_length": 33.1768292683, "max_line_length": 149, "alphanum_fraction": 0.5340317344, "include": true, "reason": "import numpy,from scipy", "num_tokens": 3947}
|
[STATEMENT]
lemma sort_simps [simp]:
"sort cmp [] = []"
"sort cmp (x # xs) = insort cmp x (sort cmp xs)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Sorting_Algorithms.sort cmp [] = [] &&& Sorting_Algorithms.sort cmp (x # xs) = Sorting_Algorithms.insort cmp x (Sorting_Algorithms.sort cmp xs)
[PROOF STEP]
by (simp_all add: sort_def)
|
{"llama_tokens": 142, "file": null, "length": 1}
|
# Some basic functions to interact with the data.
function gencov(d::Integer)
A = rand(d, d)
eye(d) + (A + A')/2
end
function plotclouds(cloudA, cloudB)
scatter(cloudA[1,:], cloudA[2,:], label="-1")
scatter!(cloudB[1,:], cloudB[2,:], label="1")
end
function errorrate(classA::MvNormal,
classB::MvNormal,
w::AbstractVector, n::Integer)
testA = rand(classA, n)
testB = rand(classB, n)
X = [[testA testB]' ones(2*n)]
Y = [-1.*ones(n); ones(n)]
sum(sign.(X*w).≠Y)/n
end
function drawborder(f)
xs = linspace(xlims()..., 100)
ys = linspace(ylims()..., 100)
contour!(xs, ys, f, levels=[0], colorbar=false, ls=:dash)
end
function plotdualitygap(m::Integer,
μ::Real,
numstepsarray::AbstractVector{T}) where T<:Integer
dualgaps = [m/μ^i for i=0:length(numstepsarray)]
numstepssumsarray = reduce(
(arr, n) -> [arr; arr[end]+n] ,[0], numstepsarray)
dualgaps2 = repeat(dualgaps, inner=2)[1:end-1]
numstepssumsarray2 = repeat(numstepssumsarray, inner=2)[2:end]
plot(numstepssumsarray2, dualgaps2, yscale=:log10,
xlabel="Newton iterations", ylabel="duality gap")
end
|
{"hexsha": "28fbb396b9021b2b35ead1360a2bd19c7ad08c66", "size": 1188, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "SVM/common.jl", "max_stars_repo_name": "cyber-meow/Optimization_algos", "max_stars_repo_head_hexsha": "ae38b156fbd6ca71bf200fb8d2af7a5d5f817b68", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "SVM/common.jl", "max_issues_repo_name": "cyber-meow/Optimization_algos", "max_issues_repo_head_hexsha": "ae38b156fbd6ca71bf200fb8d2af7a5d5f817b68", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "SVM/common.jl", "max_forks_repo_name": "cyber-meow/Optimization_algos", "max_forks_repo_head_hexsha": "ae38b156fbd6ca71bf200fb8d2af7a5d5f817b68", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.7, "max_line_length": 74, "alphanum_fraction": 0.6178451178, "num_tokens": 390}
|
[STATEMENT]
lemma summable_zeta:
assumes "Re s > 1"
shows "summable (\<lambda>n. of_nat (Suc n) powr -s)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. summable (\<lambda>n. of_nat (Suc n) powr - s)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. summable (\<lambda>n. of_nat (Suc n) powr - s)
[PROOF STEP]
have "summable (\<lambda>n. exp (complex_of_real (ln (real (Suc n))) * - s))" (is "summable ?f")
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. summable (\<lambda>n. exp (complex_of_real (ln (real (Suc n))) * - s))
[PROOF STEP]
by (subst summable_Suc_iff, rule summable_complex_powr_iff) (use assms in auto)
[PROOF STATE]
proof (state)
this:
summable (\<lambda>n. exp (complex_of_real (ln (real (Suc n))) * - s))
goal (1 subgoal):
1. summable (\<lambda>n. of_nat (Suc n) powr - s)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
summable (\<lambda>n. exp (complex_of_real (ln (real (Suc n))) * - s))
goal (1 subgoal):
1. summable (\<lambda>n. of_nat (Suc n) powr - s)
[PROOF STEP]
have "?f = (\<lambda>n. of_nat (Suc n) powr -s)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>n. exp (complex_of_real (ln (real (Suc n))) * - s)) = (\<lambda>n. of_nat (Suc n) powr - s)
[PROOF STEP]
by (simp add: powr_def algebra_simps del: of_nat_Suc)
[PROOF STATE]
proof (state)
this:
(\<lambda>n. exp (complex_of_real (ln (real (Suc n))) * - s)) = (\<lambda>n. of_nat (Suc n) powr - s)
goal (1 subgoal):
1. summable (\<lambda>n. of_nat (Suc n) powr - s)
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
summable (\<lambda>n. of_nat (Suc n) powr - s)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
summable (\<lambda>n. of_nat (Suc n) powr - s)
goal (1 subgoal):
1. summable (\<lambda>n. of_nat (Suc n) powr - s)
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
summable (\<lambda>n. of_nat (Suc n) powr - s)
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 870, "file": "Zeta_Function_Zeta_Function", "length": 10}
|
import argparse
import json
import os
import numpy as np
import pandas as pd
import torch
from sklearn import metrics as sklearn_metrics
from sklearn.model_selection import train_test_split
from torch.utils.data import TensorDataset, DataLoader, SequentialSampler
from transformers import BertTokenizerFast, BertTokenizer, ElectraTokenizer, RobertaTokenizer, AutoTokenizer, BertForTokenClassification, ElectraForTokenClassification, RobertaForTokenClassification, AdamW, get_linear_schedule_with_warmup, get_scheduler
from string import punctuation
import seqeval
from seqeval.metrics import classification_report as seqeval_classif_report
def get_tag2idx(df):
"""
Returns tags maps from a given dataframe df
Outputs:
tag2idx: map from tag to idx
idx2tag: map from idx to tag
"""
tag_values = list(df["tag"].unique()) + ['PAD']
tag2idx = {tag:idx for idx, tag in enumerate(tag_values)}
idx2tag = {idx:tag for tag, idx in tag2idx.items()}
return tag2idx, idx2tag
def tokenize_sentence_words(tokenizer, sentence_words, sentence_word_tags):
"""
Tokenizes each word in a sentence using a given tokenizer (eg: BiobertTokenizer)
For each word in the sentence, asociates its tag to the corresponding word tokens
Inputs:
tokenizer: the tokenizer used (eg: BiobertTokenizer)
sentence_words: the sentence split into words, as a list
sentence_word_tags: the tags associated with each of the words in sentence_words
Outputs:
sentence_tokenized: the sentence split into tokens, as a list
sentence_tokenized_tags: the tags associated with each of the tokens in the sentence
"""
sentence_tokenized = []
sentence_tokenized_tags = []
for word_idx, word in enumerate(sentence_words):
word_tokens = tokenizer.tokenize(str(word))
word_tag = sentence_word_tags[word_idx]
num_word_tokens = len(word_tokens)
sentence_tokenized.extend(word_tokens)
sentence_tokenized_tags.extend([word_tag] * num_word_tokens)
return sentence_tokenized, sentence_tokenized_tags
def tokenize_sentences(tokenizer, df, tags_of_interest):
"""
Tokenizes a list of sentences retrieved from a given dataframe df
Inputs:
tokenizer: the tokenizer used (eg: BiobertTokenizer)
df: dataframe containing training data (eg: training_data.csv)
Outputs:
sentences_tokenized: list of tokenized sentences
sentences_tokenized_tags: list of tags associated with each tokenized sentence
"""
sentences_tokenized = []
sentences_tokenized_tags = []
fn1 = lambda x: [word for word in x["word"].values]
fn2 = lambda x: [tag for tag in x["tag"].values]
sentences_words = df.groupby(['pmcid', 'sent_index']).apply(fn1)
sentences_word_tags = df.groupby(['pmcid', 'sent_index']).apply(fn2)
for sent_idx, sentence_words in enumerate(sentences_words):
sentence_word_tags = sentences_word_tags[sent_idx]
for tag in tags_of_interest:
if tag in sentence_word_tags:
sentence_tokenized, sentence_tokenized_tags = tokenize_sentence_words(tokenizer, sentence_words, sentence_word_tags)
sentences_tokenized.append(sentence_tokenized)
sentences_tokenized_tags.append(sentence_tokenized_tags)
break;
return sentences_tokenized, sentences_tokenized_tags
def pad_sequences(sentences_input_ids, maxlen, pad_value):
return np.array([np.pad(a[:maxlen], (0, max(0, maxlen - len(a))), constant_values=(pad_value)) for a in sentences_input_ids])
def get_train_data(sentences_tokens, tokenizer, max_len):
encoded_input_ids_all = []
attention_masks_all = []
for sentence_tokens in sentences_tokens:
sentence_tokens_ids = tokenizer.convert_tokens_to_ids(sentence_tokens)
inputs = tokenizer.prepare_for_model(sentence_tokens_ids, padding = 'max_length', \
truncation = True, max_length = max_len, add_special_tokens=False)
encoded_input_ids = inputs['input_ids']
attention_mask = inputs['attention_mask']
encoded_input_ids_all.append(encoded_input_ids)
attention_masks_all.append(attention_mask)
return encoded_input_ids_all, attention_masks_all
def get_train_tags(sentences_tags, max_len, token_to_idx = None, pad_value = 0):
sentence_tags_all = []
for sentence_tags in sentences_tags:
O_tag_idx = token_to_idx['O']
sentence_tags_all.append([token_to_idx[token] for token in sentence_tags])
sentence_tags = pad_sequences(sentence_tags_all, max_len, pad_value)
return sentence_tags
def get_dataloader(inputs, masks, tags):
"""
Returns a DataLoader instance that contains inputs, masks and tags
The DataLoader is used for batching during training
"""
inputs = torch.tensor(inputs)
masks = torch.tensor(masks)
tags = torch.tensor(tags)
dataset = TensorDataset(inputs, masks, tags)
sampler = SequentialSampler(dataset)
dataloader = DataLoader(dataset, sampler=sampler, batch_size=args.batch_size)
return dataloader
def print_metrics_resource_type(classif_report, phase, resource_type):
"""
Prints metrics (precision, recall) for one resource type
Resource type can be one of: 'B-MET', 'I-MET', 'B-DAT', 'O'
"""
resource_stats = classif_report[resource_type]
precision = round(resource_stats['precision'], 3)
recall = round(resource_stats['recall'], 3)
if (recall + precision) != 0:
f1 = round(2 * recall * precision / (recall + precision), 3)
else:
f1 = 0
if resource_type == 'O':
print(phase + ':', resource_type, 'Precision: ', precision, resource_type, 'Recall: ', recall, 'F1: ', f1)
else:
print(phase + ':', resource_type, 'Precision:', precision, resource_type, 'Recall:', recall, 'F1:', f1)
return f1
def print_metrics_token_level(classif_report, phase, tags):
"""
Prints metrics (precision, recall) for each of the resouce type in a given phase
Phase can be either 'Train' or 'Val'
"""
f1s = []
for tag in tags:
f1_tag = print_metrics_resource_type(classif_report, phase, tag)
f1s.append(f1_tag)
return np.array(f1s).mean()
def print_metrics_entity_level(classif_report, loss, phase):
"""
Prints metrics (precision, recall) for each of the resouce type in a given phase at the entity level
Phase can be either 'Train' or 'Val'
"""
f1_scores = []
for res_type in ['MET', 'DAT']:
resource_stats = classif_report[res_type]
precision = round(resource_stats['precision'], 3)
recall = round(resource_stats['recall'], 3)
f1_score = round(resource_stats['f1-score'], 3)
f1_scores.append(f1_score)
print(phase + ':', res_type, 'Precision:', precision, 'Recall:', recall, 'F1:', f1_score)
return np.mean(f1_scores)
def evaluate_entity_level(pred_numeric_tags, true_numeric_tags, idx2tag, loss, phase):
"""
Evaluates metrics on mentions on a given batch at the entity level
Inputs:
pred_numeric_tags: predicted labels for a given batch
true_numeric_tags: true labels for a given batch
idx2tag: tag index to tag mapping
phase: 'Train' or 'Val' - for printing purposes only
"""
pred_tags = [[idx2tag[pred_numeric_tag] for pred_numeric_tag in pred_numeric_tags]]
true_tags = [[idx2tag[true_numeric_tag] for true_numeric_tag in true_numeric_tags]]
classif_report_relaxed = seqeval_classif_report(true_tags, pred_tags, digits=3, output_dict = True)
f1_relaxed = print_metrics_entity_level(classif_report_relaxed, loss, phase)
return classif_report_relaxed, f1_relaxed
def train_epoch(model, optimizer, lr_scheduler, dataloader, idx2tag, tags_of_interest, tokens, phase):
"""
Handles training of the model for one epoch
"""
running_loss = 0
num_batches = len(dataloader)
pred_tags = []
true_tags = []
pad_token_id = tokens['PAD']
cls_token_id = tokens['CLS']
sep_token_id = tokens['SEP']
for num_batch, batch in enumerate(dataloader):
inputs, masks, tags = batch
inputs = inputs.to(device)
masks = masks.to(device)
tags = tags.to(device)
if phase == 'Train':
model.zero_grad()
outputs = model(inputs, attention_mask=masks, labels=tags)
loss = outputs[0]
loss.backward()
optimizer.step()
if lr_scheduler:
lr_scheduler.step()
optimizer.zero_grad()
else:
with torch.no_grad():
outputs = model(inputs, attention_mask=masks, labels=tags)
mask_no_pad_tokens = ((inputs != cls_token_id) & (inputs != pad_token_id) & (inputs != sep_token_id))
tags_no_pad_tokens = tags[mask_no_pad_tokens]
logits = outputs[1]
predictions_no_pad_tokens = torch.argmax(logits, axis = 2)[mask_no_pad_tokens]
predictions_cpu = predictions_no_pad_tokens.to('cpu').numpy()
tags_cpu = tags_no_pad_tokens.to('cpu').numpy()
loss = outputs[0].item()
running_loss += loss
true_tags.extend(tags_cpu)
pred_tags.extend(predictions_cpu)
classif_report_entity, f1_entity = evaluate_entity_level(pred_tags, true_tags, idx2tag, running_loss, phase)
return running_loss / num_batches, classif_report_entity, f1_entity
def train_model(model, optimizer, lr_scheduler, num_epochs, train_dataloader, val_dataloader, idx2tag, tags, tokens, model_name, checkpt_dir):
"""
Handles training of the model for num_epochs
"""
max_f1_val = 0
train_losses, val_losses = [], []
for num_epoch in range(num_epochs):
print('Epoch: ', (num_epoch + 1))
print('*' * 20)
# training mode
model.train()
train_loss, classif_report_train, f1_train = train_epoch(model, optimizer, lr_scheduler, train_dataloader, idx2tag, tags, tokens, 'Train')
train_losses.append(train_loss)
# evaluation mode
model.eval()
val_loss, classif_report_val, f1_val = train_epoch(model, None, lr_scheduler, val_dataloader, idx2tag, tags, tokens, 'Val')
val_losses.append(val_loss)
print('Train Loss: ', round(train_loss, 5), 'Val Loss: ', round(val_loss, 5))
if f1_val >= max_f1_val:
max_f1_val = f1_val
best_epoch = num_epoch
best_model = model
torch.save({
'model_state_dict': best_model.state_dict(),
'epoch' : best_epoch,
'f1_val' : max_f1_val,
}, os.path.join(checkpt_dir, '/checkpt_' + model_name + '_' + str(best_epoch + 1) + '_epochs'))
return train_losses, val_losses, best_model
def save_idx2tag(idx2tag, output_data_dir):
"""
Save idx2tag dict to json file to dir specified via `--output_data_dir` option. This dict data is used during
prediction to interpret model output.
:param idx2tag: The dict
:return: the file name written
"""
print(f'idx2tag={idx2tag}')
csv_file_name = os.path.join(output_data_dir, 'idx2tag-925.json')
with open(csv_file_name, 'w') as f:
f.write(json.dumps(idx2tag))
return csv_file_name
def get_dataloaders(tokenizer, train_df, val_df, test_df, tags, tag2idx, max_len):
"""
Returns train, val and test dataloaders from the corresponding dfs
"""
train_sentences_tokenized, train_sentences_tokenized_tags = tokenize_sentences(tokenizer, train_df, tags)
train_input_ids, train_attention_masks = get_train_data(train_sentences_tokenized, tokenizer, max_len)
train_tags = get_train_tags(train_sentences_tokenized_tags, max_len, tag2idx, tag2idx['PAD'])
val_sentences_tokenized, val_sentences_tokenized_tags = tokenize_sentences(tokenizer, val_df, tags)
val_input_ids, val_attention_masks = get_train_data(val_sentences_tokenized, tokenizer, max_len)
val_tags = get_train_tags(val_sentences_tokenized_tags, max_len, tag2idx, tag2idx['PAD'])
test_sentences_tokenized, test_sentences_tokenized_tags = tokenize_sentences(tokenizer, test_df, tags)
test_input_ids, test_attention_masks = get_train_data(test_sentences_tokenized, tokenizer, max_len)
test_tags = get_train_tags(test_sentences_tokenized_tags, max_len, tag2idx, tag2idx['PAD'])
train_dataloader = get_dataloader(train_input_ids, train_attention_masks, train_tags)
val_dataloader = get_dataloader(val_input_ids, val_attention_masks, val_tags)
test_dataloader = get_dataloader(test_input_ids, test_attention_masks, test_tags)
return train_dataloader, val_dataloader, test_dataloader
def create_dir(dir_path):
if not os.path.exists(dir_path):
os.mkdir(dir_path)
print("Directory '% s' created" % dir_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# hyperparameters sent by the client are passed as command-line arguments to the script.
parser.add_argument('--epochs', help = 'number of epochs for training', type=int, default=10)
parser.add_argument('--batch-size', help = 'batch_size for training', type=int, default=32)
parser.add_argument('--max_len', help = 'maximum length of sequences in batch', type=int, default=256)
parser.add_argument('--learning-rate', help = 'learning Rate for training', type=float, default=3e-5)
parser.add_argument('--weight_decay', help = 'weight decay for learning rate', type=float, default=0.0)
parser.add_argument('--use-cuda', help = 'if to use cuda for training or not', type=bool, default=True)
parser.add_argument('--tags', type=str, help = 'tags present in the dataset', default=['B-DAT', 'B-MET', 'I-MET'])
parser.add_argument('--data-dir', help = 'directory where the data is being read from', type=str, default='data')
parser.add_argument('--checkpt_dir', help = 'output directory where checkpoints will be saved', type=str, default='checkpts')
parser.add_argument('--intermediate-files-dir', help = 'output directory where intermediate files like idx2tag.json will be saved', type=str, default='model_artifacts')
parser.add_argument('--model_name', help = 'name of model that will be instantiated during training; code currently supports: biobert, scibert, pubmedbert, pubmedbert_pmc, bluebert, bluebert_mimic3, sapbert, \
sapbert_mean_token, bioelectra, bioelectra_pmc, electra_med, biomed_roberta, biomed_robera_chemprot, biomed_roberta_rct500', type=str, default='scibert')
parser.add_argument('--model_version', help = 'Huggingface version of model; for instance, for scibert it could be allenai/scibert_scivocab_uncased', type=str, default = 'allenai/scibert_scivocab_uncased')
parser.add_argument('--train_file', help = 'location of training file', type=str, default='train_v0.csv')
parser.add_argument('--val_file', help = 'location of val file', type=str, default='val_v0.csv')
parser.add_argument('--test_file', help = 'location of test file', type=str, default='test_v0.csv')
parser.add_argument('--sanity_check', help = 'true for sanity checking that the pipeline works well; will only train on 100 entries from the training file', default = False, action = 'store_true', required = False)
args, _ = parser.parse_known_args()
data_dir = args.data_dir
train_df = pd.read_csv(os.path.join(data_dir, args.train_file))
test_df = pd.read_csv(os.path.join(data_dir, args.test_file))
val_df = pd.read_csv(os.path.join(data_dir, args.val_file))
create_dir(args.checkpt_dir)
create_dir(args.intermediate_files_dir)
if args.sanity_check:
train_df = train_df[:10000]
model_mappings = {
'biobert' : 'dmis-lab/biobert-v1.1',
'scibert' : 'allenai/scibert_scivocab_uncased',
'pubmedbert' : 'microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract',
'pubmedbert_pmc' : 'microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract-fulltext',
'bluebert' : 'bionlp/bluebert_pubmed_uncased_L-12_H-768_A-12',
'bluebert_mimic3' : 'bionlp/bluebert_pubmed_mimic_uncased_L-12_H-768_A-12',
'sapbert' : 'cambridgeltl/SapBERT-from-PubMedBERT-fulltext',
'sapbert_mean_token' : 'cambridgeltl/SapBERT-from-PubMedBERT-fulltext-mean-token',
'bioelectra' : 'kamalkraj/bioelectra-base-discriminator-pubmed',
'bioelectra_pmc' : 'kamalkraj/bioelectra-base-discriminator-pubmed-pmc',
'electramed' : 'giacomomiolo/electramed_base_scivocab_1M',
'biomed_roberta' : 'allenai/biomed_roberta_base',
'biomed_robera_chemprot' : 'allenai/dsp_roberta_base_dapt_biomed_tapt_chemprot_4169',
'biomed_roberta_rct500' : 'allenai/dsp_roberta_base_dapt_biomed_tapt_rct_500'
}
model_name = args.model_name
model_version = model_mappings[model_name]
print(f'args={args}')
print('Loading training data ...')
print('=' * 30)
device = torch.device("cuda" if args.use_cuda and torch.cuda.is_available() else "cpu")
tag2idx, idx2tag = get_tag2idx(train_df)
save_idx2tag(idx2tag, args.intermediate_files_dir)
print('Loading BertTokenizer and model ...', model_version)
print('=' * 30)
if 'electra' in model_name:
# Example for a BERT-based model. All possible variations are available in the README
tokenizer = ElectraTokenizer.from_pretrained(model_version)
model = ElectraForTokenClassification.from_pretrained(model_version,
num_labels=len(tag2idx),
output_attentions=False, output_hidden_states=False)
elif 'roberta' in model_name:
# Example for an ELECTRA-based model. All possible variations are available in the README
tokenizer = AutoTokenizer.from_pretrained(model_version)
model = RobertaForTokenClassification.from_pretrained(model_version,
num_labels=len(tag2idx),
output_attentions=False,
output_hidden_states=False)
elif 'bert' in model_name:
# Example for an Roberta-based model. All possible variations are available in the README
tokenizer = AutoTokenizer.from_pretrained(model_version)
model = BertForTokenClassification.from_pretrained(model_version,
num_labels=len(tag2idx),
output_attentions=False,
output_hidden_states=False)
model.to(device)
print('Finished loading BertTokenizer and model!')
print()
train_dataloader, val_dataloader, test_dataloader = get_dataloaders(tokenizer, train_df, val_df, test_df, args.tags, tag2idx, args.max_len)
optimizer = AdamW(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)
num_epochs = args.epochs
num_training_steps = num_epochs * len(train_dataloader)
use_lr_scheduler = False
if use_lr_scheduler:
lr_scheduler = get_scheduler(
"linear",
optimizer = optimizer,
num_warmup_steps = int(0.06 * len(train_dataloader)),
num_training_steps = num_training_steps
)
else:
lr_scheduler = None
if 'roberta' in model_name:
tokens = {'PAD' : tokenizer.vocab["<pad>"],
'SEP' : tokenizer.vocab["</s>"],
'CLS' : tokenizer.vocab["<s>"]}
elif 'electra' in model_name or 'bert' in model_name:
tokens = {'PAD' : tokenizer.vocab["[PAD]"],
'SEP' : tokenizer.vocab["[SEP]"],
'CLS' : tokenizer.vocab["[CLS]"]}
print('Starting model training ...')
print('=' * 30)
train_losses, val_losses, best_model = train_model(model, optimizer, lr_scheduler, 10, train_dataloader, val_dataloader, idx2tag, args.tags, tokens, model_name, args.checkpt_dir)
print('Finished model training!')
test_loss, classif_report_test, f1_test = train_epoch(best_model, None, None, test_dataloader, idx2tag, args.tags, tokens, 'Test')
|
{"hexsha": "0f7f2c197b57fa46ccf115796fc6bb005f2294ce", "size": 19175, "ext": "py", "lang": "Python", "max_stars_repo_path": "train.py", "max_stars_repo_name": "GullyBurns/meta-full-text-mining-ner", "max_stars_repo_head_hexsha": "968013a89c65af9b7b60d6bb5f445869ba53fbca", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "train.py", "max_issues_repo_name": "GullyBurns/meta-full-text-mining-ner", "max_issues_repo_head_hexsha": "968013a89c65af9b7b60d6bb5f445869ba53fbca", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "train.py", "max_forks_repo_name": "GullyBurns/meta-full-text-mining-ner", "max_forks_repo_head_hexsha": "968013a89c65af9b7b60d6bb5f445869ba53fbca", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.4383886256, "max_line_length": 253, "alphanum_fraction": 0.7342372881, "include": true, "reason": "import numpy", "num_tokens": 4774}
|
# -*- coding: utf-8 -*-
from numpy import array as numpy_array
from numpy import empty as numpy_empty
from numpy import where as numpy_where
from numpy import sum as numpy_sum
from numpy import finfo as numpy_finfo
from .data.data import Data
from .dimensioncoordinate import DimensionCoordinate
from .functions import REGRID_LOGGING
from . import _found_ESMF
if _found_ESMF:
try:
import ESMF
except Exception as error:
print("WARNING: Can not import ESMF for regridding: {0}".format(error))
class Regrid:
'''Class containing all the methods required for accessing ESMF
regridding through ESMPY and the associated utility methods.
'''
def __init__(self, srcfield, dstfield, srcfracfield, dstfracfield,
method='conservative_1st', ignore_degenerate=False):
'''Creates a handle for regridding fields from a source grid to a
destination grid that can then be used by the run_regridding method.
:Parameters:
srcfield: ESMF.Field
The source field with an associated grid to be used for
regridding.
dstfield: ESMF.Field
The destination field with an associated grid to be used
for regridding.
srcfracfield: ESMF.Field
A field to hold the fraction of the source field that
contributes to conservative regridding.
dstfracfield: ESMF.Field
A field to hold the fraction of the source field that
contributes to conservative regridding.
method: `str`ing, optional
By default the regridding method is set to
'conservative_1st'. In this case or if it is set to
'conservative' first-order conservative regridding is
used. If it is set to 'conservative_2nd' second order
conservative regridding is used. If it is set to
'bilinear' then (multi)linear interpolation is used. If
it is set to 'patch' then higher-order patch recovery is
used. If it is set to 'nearest_stod' then nearest source
to destination interpolation is used. If it is set to
'nearest_dtos' then nearest destination to source
interpolation is used.
ignore_degenerate: `bool`, optional
Whether to check for degenerate points.
'''
# create a handle to the regridding method
if method in ('conservative', 'conservative_1st'):
regrid_method = ESMF.RegridMethod.CONSERVE
elif method == 'conservative_2nd':
regrid_method = ESMF.RegridMethod.CONSERVE_2ND
elif method == 'bilinear':
regrid_method = ESMF.RegridMethod.BILINEAR
elif method == 'patch':
regrid_method = ESMF.RegridMethod.PATCH
elif method == 'nearest_stod':
regrid_method = ESMF.RegridMethod.NEAREST_STOD
elif method == 'nearest_dtos':
regrid_method = ESMF.RegridMethod.NEAREST_DTOS
else:
raise ValueError('Regrid method not recognised.')
# Initialise the regridder. This also creates the
# weights needed for the regridding.
self.regridSrc2Dst = ESMF.Regrid(srcfield, dstfield,
regrid_method=regrid_method,
src_mask_values=numpy_array([0],
dtype='int32'),
dst_mask_values=numpy_array([0],
dtype='int32'),
src_frac_field=srcfracfield,
dst_frac_field=dstfracfield,
unmapped_action=ESMF.UnmappedAction.IGNORE,
ignore_degenerate=ignore_degenerate)
def destroy(self):
'''Free the memory associated with the ESMF.Regrid instance.
'''
self.regridSrc2Dst.destroy()
@staticmethod
def initialize():
'''Check whether ESMF has been found. If not raise an import
error. Initialise the ESMPy manager. Whether logging is enabled or
not is determined by cf.REGRID_LOGGING. If it is then logging
takes place after every call to ESMPy.
:Returns:
manager: ESMF.Manager
A singleton instance of the ESMPy manager.
'''
if not _found_ESMF:
raise ImportError('The ESMF package is needed to support regridding.')
manager = ESMF.Manager(debug=REGRID_LOGGING())
return manager
@staticmethod
def create_grid(coords, use_bounds, mask=None, cartesian=False,
cyclic=False, coords_2D=False, coord_order=None):
'''Create an ESMPy grid given a sequence of coordinates for use as a
source or destination grid in regridding. Optionally the grid may
have an associated mask.
:Parameters:
coords: sequence
The coordinates if not Cartesian it is assume that the
first is longitude and the second is latitude.
use_bounds: `bool`
Whether to populate the grid corners with information from
the bounds or not.
mask: `numpy.ndarray`, optional
An optional numpy array of booleans containing the grid
points to mask. Where the elements of mask are True the
output grid is masked.
cartesian: `bool`, optional
Whether to create a Cartesian grid or a spherical one,
False by default.
cyclic: `bool`, optional
Whether or not the longitude (if present) is cyclic. If
None the a check for cyclicity is made from the
bounds. None by default.
coords_2D: `bool`, optional
Whether the coordinates are 2D or not. Presently only
works for spherical coordinates. False by default.
coord_order: sequence, optional
Two tuples one indicating the order of the x and y axes
for 2D longitude, one for 2D latitude.
:Returns:
out: `ESMF.Grid`
The resulting ESMPy grid for use as a source or destination
grid in regridding.
'''
if not cartesian:
lon = coords[0]
lat = coords[1]
if not coords_2D:
# Get the shape of the grid
shape = [lon.size, lat.size]
else:
x_order = coord_order[0]
y_order = coord_order[1]
# Get the shape of the grid
shape = lon.transpose(x_order).shape
if lat.transpose(y_order).shape != shape:
raise ValueError('The longitude and latitude coordinates' +
' must have the same shape.')
#--- End: if
if use_bounds:
if not coords_2D:
# Get the bounds
x_bounds = lon.get_bounds()
y_bounds = lat.get_bounds().clip(-90, 90, 'degrees').array
# If cyclic not set already, check for cyclicity
if cyclic is None:
cyclic = abs(x_bounds.datum(-1)
- x_bounds.datum(0)) == Data(360,
'degrees')
x_bounds = x_bounds.array
else:
# Get the bounds
x_bounds = lon.get_bounds()
y_bounds = lat.get_bounds().clip(-90, 90, 'degrees')
n = x_bounds.shape[0]
m = x_bounds.shape[1]
x_bounds = x_bounds.array
y_bounds = y_bounds.array
tmp_x = numpy_empty((n + 1, m + 1))
tmp_x[:n,:m] = x_bounds[:,:,0]
tmp_x[:n,m] = x_bounds[:,-1,1]
tmp_x[n,:m] = x_bounds[-1,:,3]
tmp_x[n,m] = x_bounds[-1,-1,2]
tmp_y = numpy_empty((n + 1, m + 1))
tmp_y[:n,:m] = y_bounds[:,:,0]
tmp_y[:n,m] = y_bounds[:,-1,1]
tmp_y[n,:m] = y_bounds[-1,:,3]
tmp_y[n,m] = y_bounds[-1,-1,2]
x_bounds = tmp_x
y_bounds = tmp_y
else:
if not coords_2D:
# If cyclicity not set already, check for cyclicity
if cyclic is None:
try:
x_bounds = lon.get_bounds()
cyclic = abs(x_bounds.datum(-1)
- x_bounds.datum(0)) == Data(360,
'degrees')
except ValueError:
pass
#--- End: if
# Create empty grid
max_index = numpy_array(shape, dtype='int32')
if use_bounds:
staggerLocs = [ESMF.StaggerLoc.CORNER, ESMF.StaggerLoc.CENTER]
else:
staggerLocs = [ESMF.StaggerLoc.CENTER]
if cyclic:
grid = ESMF.Grid(max_index, num_peri_dims=1,
staggerloc=staggerLocs)
else:
grid = ESMF.Grid(max_index, staggerloc=staggerLocs)
# Populate grid centres
x, y = 0, 1
gridXCentre = grid.get_coords(x, staggerloc=ESMF.StaggerLoc.CENTER)
gridYCentre = grid.get_coords(y, staggerloc=ESMF.StaggerLoc.CENTER)
if not coords_2D:
gridXCentre[...] = lon.array.reshape((lon.size, 1))
gridYCentre[...] = lat.array.reshape((1, lat.size))
else:
gridXCentre[...] = lon.transpose(x_order).array
gridYCentre[...] = lat.transpose(y_order).array
# Populate grid corners if there are bounds
if use_bounds:
gridCorner = grid.coords[ESMF.StaggerLoc.CORNER]
if not coords_2D:
if cyclic:
gridCorner[x][...] = x_bounds[:, 0].reshape(lon.size, 1)
else:
n = x_bounds.shape[0]
tmp_x = numpy_empty(n + 1)
tmp_x[:n] = x_bounds[:,0]
tmp_x[n] = x_bounds[-1,1]
gridCorner[x][...] = tmp_x.reshape(lon.size + 1, 1)
n = y_bounds.shape[0]
tmp_y = numpy_empty(n + 1)
tmp_y[:n] = y_bounds[:,0]
tmp_y[n] = y_bounds[-1,1]
gridCorner[y][...] = tmp_y.reshape(1, lat.size + 1)
else:
gridCorner = grid.coords[ESMF.StaggerLoc.CORNER]
x_bounds = x_bounds.transpose(x_order)
y_bounds = y_bounds.transpose(y_order)
if cyclic:
x_bounds = x_bounds[:-1,:]
y_bounds = y_bounds[:-1,:]
gridCorner[x][...] = x_bounds
gridCorner[y][...] = y_bounds
#--- End: if
else:
# Test the dimensionality of the list of coordinates
ndim = len(coords)
if ndim < 1 or ndim > 3:
raise ValueError('Cartesian grid must have between 1 and 3 ' +
'dimensions.')
# For 1D conservative regridding add an extra dimension of size 1
if ndim == 1:
if not use_bounds:
# For 1D nonconservative regridding the extra dimension
# should already have been added in cf.Field.regridc.
raise ValueError('Cannot create a Cartesian grid from ' +
'one dimension coordinate with no bounds.')
coords = [DimensionCoordinate(data=Data(0),
bounds=Data([numpy_finfo('float32').epsneg,
numpy_finfo('float32').eps]))] + coords
if mask is not None:
mask = mask[None,:]
ndim = 2
shape = list()
for coord in coords:
shape.append(coord.size)
# Initialise the grid
max_index = numpy_array(shape, dtype='int32')
if use_bounds:
if ndim < 3:
staggerLocs = [ESMF.StaggerLoc.CORNER,
ESMF.StaggerLoc.CENTER]
else:
staggerLocs = [ESMF.StaggerLoc.CENTER_VCENTER,
ESMF.StaggerLoc.CORNER_VFACE]
else:
if ndim < 3:
staggerLocs = [ESMF.StaggerLoc.CENTER]
else:
staggerLocs = [ESMF.StaggerLoc.CENTER_VCENTER]
#--- End: if
grid = ESMF.Grid(max_index, coord_sys=ESMF.CoordSys.CART,
staggerloc=staggerLocs)
# Populate the grid centres
for d in range(0, ndim):
if ndim < 3:
gridCentre = grid.get_coords(d,
staggerloc=ESMF.StaggerLoc.CENTER)
else:
gridCentre = grid.get_coords(d,
staggerloc=ESMF.StaggerLoc.CENTER_VCENTER)
gridCentre[...] = coords[d].array.reshape(
[shape[d] if x == d else 1 for x in range(0, ndim)])
#--- End: for
# Populate grid corners
if use_bounds:
if ndim < 3:
gridCorner = grid.coords[ESMF.StaggerLoc.CORNER]
else:
gridCorner = grid.coords[ESMF.StaggerLoc.CORNER_VFACE]
for d in range(0, ndim):
# boundsD = coords[d].get_bounds(create=True).array
boundsD = coords[d].get_bounds(None)
if boundsD is None:
boundsD = coords[d].create_bounds()
boundsD = boundsD.array
if shape[d] > 1:
tmp = numpy_empty(shape[d] + 1)
tmp[0:-1] = boundsD[:, 0]
tmp[-1] = boundsD[-1, 1]
boundsD = tmp
gridCorner[d][...] = boundsD.reshape(
[shape[d] + 1 if x == d else 1
for x in range(0, ndim)])
#--- End: if
#--- End: if
# Add the mask if appropriate
if mask is not None:
gmask = grid.add_item(ESMF.GridItem.MASK)
gmask[...] = 1
gmask[mask] = 0
return grid
@staticmethod
def create_field(grid, name):
'''Create an ESMPy field for use as a source or destination field in
regridding given an ESMPy grid and a name.
:Parameters:
grid: ESMF.Grid
The ESMPy grid to use in creating the field.
name: `str`
The name to give the field.
:Returns:
out: `ESMF.Field`
The resulting ESMPy field for use as a source or
destination field in regridding.
'''
field = ESMF.Field(grid, name)
return field
def run_regridding(self, srcfield, dstfield):
'''
'''
dstfield = self.regridSrc2Dst(srcfield, dstfield,
zero_region=ESMF.Region.SELECT)
return dstfield
@staticmethod
def concatenate_data(data_list, axis):
'''Concatenates a list of Data objects into a single Data object along
the specified access (see cf.Data.concatenate for details). In the
case that the list contains only one element, that element is
simply returned.
:Parameters:
data_list: `list`
The list of data objects to concatenate.
axis: `int`
The axis along which to perform the concatenation.
:Returns:
out: `Data`
The resulting single Data object.
'''
if len(data_list) > 1:
data = Data.concatenate(data_list, axis=axis)
if data.fits_in_one_chunk_in_memory(data.dtype.itemsize):
data.varray
return data
else:
assert len(data_list) == 1
return data_list[0]
@staticmethod
def reconstruct_sectioned_data(sections):
'''Expects a dictionary of Data objects with ordering information as
keys, as output by the section method when called with a Data
object. Returns a reconstructed cf.Data object with the sections
in the original order.
:Parameters:
sections: `dict`
The dictionary or Data objects with ordering information
as keys.
:Returns:
out: `Data`
The resulting reconstructed Data object.
'''
ndims = len(sections.keys()[0])
for i in range(ndims - 1, -1, -1):
keys = sorted(sections.keys())
if i==0:
if keys[0][i] is None:
assert len(keys) == 1
return sections.values()[0]
else:
data_list = []
for k in keys:
data_list.append(sections[k])
return Regrid.concatenate_data(data_list, i)
else:
if keys[0][i] is None:
pass
else:
new_sections = {}
new_key = keys[0][:i]
data_list = []
for k in keys:
if k[:i] == new_key:
data_list.append(sections[k])
else:
new_sections[new_key] = Regrid.concatenate_data(data_list, i)
new_key = k[:i]
data_list = [sections[k]]
#--- End: for
new_sections[new_key] = Regrid.concatenate_data(data_list, i)
sections = new_sections
#--- End: for
@staticmethod
def compute_mass_grid(valuefield, areafield, dofrac=False, fracfield=None,
uninitval=422397696.):
'''Compute the mass of a data field.
:Parameters:
valuefield: ESMF.Field
This contains data values of a field built on the cells of
a grid.
areafield: ESMF.Field
This contains the areas associated with the grid cells.
fracfield: ESMF.Field
This contains the fractions of each cell which contributed
to a regridding operation involving 'valuefield.
dofrac: `bool`
This gives the option to not use the 'fracfield'.
uninitval: `float`
The value uninitialised cells take.
:Returns:
mass: `float`
The mass of the data field is computed.
'''
mass = 0.0
areafield.get_area()
ind = numpy_where(valuefield.data != uninitval)
if dofrac:
mass = numpy_sum(areafield.data[ind] * valuefield.data[ind] * fracfield.data[ind])
else:
mass = numpy_sum(areafield.data[ind] * valuefield.data[ind])
return mass
#--- End: class
|
{"hexsha": "b8e61d9f64cca38a510e58d8c8a297817ba37a6e", "size": 20225, "ext": "py", "lang": "Python", "max_stars_repo_path": "cf/regrid.py", "max_stars_repo_name": "AJamesPhillips/cf-python", "max_stars_repo_head_hexsha": "4631bc4ba3c0cb51dcd18905116440007e291e6b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "cf/regrid.py", "max_issues_repo_name": "AJamesPhillips/cf-python", "max_issues_repo_head_hexsha": "4631bc4ba3c0cb51dcd18905116440007e291e6b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cf/regrid.py", "max_forks_repo_name": "AJamesPhillips/cf-python", "max_forks_repo_head_hexsha": "4631bc4ba3c0cb51dcd18905116440007e291e6b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.4537037037, "max_line_length": 94, "alphanum_fraction": 0.5011124845, "include": true, "reason": "from numpy", "num_tokens": 4238}
|
module Sheets
export RWGSheet, read_sheet_data, write_sheet_data, find_unique_periods
export rotate!, translate!, combine, recttri, SV2, MV2
using StaticArrays: SVector, MVector, SMatrix
using ..PSSFSSLen
using JLD2
using LinearAlgebra: norm
using RecipesBase
const MV2 = MVector{2,Float64}
const SV2 = SVector{2,Float64}
abstract type Sheet end
mutable struct RWGSheet <: Sheet
style::String
units::PSSFSSLength # Length unit
s₁::SV2 # Direct lattice vector (specified units)
s₂::SV2 # Direct lattice vector (specified units)
β₁::SV2 # Reciprocal lattice vector (1/(specified units))
β₂::SV2 # Reciprocal lattice vector (1/(specified units))
dx::Float64 # Unit cell displacment in x (in specified units)
dy::Float64 # Unit cell displacment in y (in specified units)
rot::Float64 # Rotation angle for unit cell (deg)
ρ::Vector{SV2} # Node coordinates
e1::Vector{Int} # Edge connect. list. e1[i] is the initial node of edge i
e2::Vector{Int} # Edge connect. list. e2[i] is the terminal node of edge i
fv::Array{Int,2} # Face/vertex list. fv[:,i] lists vertices of face i
fe::Array{Int,2} # Face/edge list. fe[:,i] lists edges of face i
fr::Vector{Float64} # Face resistance list. fr[i] is the sheet resistance of face i (Ω/□)
# The following fields are storage for face/face integrals:
J::Vector{ComplexF64}
J_ξ::Vector{ComplexF64}
J_η::Vector{ComplexF64}
K::Vector{ComplexF64}
K_ξ::Vector{ComplexF64}
K_η::Vector{ComplexF64}
ρ_r::Vector{SV2}
rinv::Vector{Float64}
# Parameters that the face/face integrals depend on:
ψ₁::Float64 # Incremental phase shift (radians)
ψ₂::Float64 # Incremental phase shift (radians)
u::Float64 # Smoothing parameter (1/(specified units))
class::Char # Type of sheet. 'J' for electric current, 'M' for magnetic current
info::String # Informational comment
# The following flag tells rwg_setup whether (.true.) or not (.false.)
# to check for consistent edges at xi or eta = 0 and 1. The default
# value (.TRUE.) means that the check should be performed.
ξη_check::Bool
# The following flag tells rwg_setup whether (.true.) or not (.false.)
# to Find Unique Face Pairs.
fufp::Bool
end # struct
import Base.==
==(sh1::RWGSheet, sh2::RWGSheet) = all((getfield(sh1,f)==getfield(sh2,f) for
f in fieldnames(RWGSheet)))
# Add a zero-argument constructor:
RWGSheet() = RWGSheet("", u"mm", # style, units
SV2([0.0,0.0]), # s₁
SV2([0.0,0.0]), # s₂
SV2([0.0,0.0]), # β₁
SV2([0.0,0.0]), # β₂
0.0, 0.0, 0.0, # dx, dy, rot
SV2[], # ρ
Int[], Int[], # e1, e2
Array{Int}(undef,0,0), # fv
Array{Int}(undef,0,0), # fe
Float64[], # fr
ComplexF64[], # J
ComplexF64[], # J_ξ
ComplexF64[], # J_η
ComplexF64[], # K
ComplexF64[], # K_ξ
ComplexF64[], # K_η
Array{SV2}(undef,0), # ρ_r
Float64[], # rinv
0.0, 0.0, 0.0, # ψ₁, ψ₂, u
' ', "", # class, info
true, false) # ξη_check, fufp
Base.show(io::IO, ::MIME"text/plain", s::RWGSheet) =
print(io, "RWGSheet: style=", s.style, ", class=", s.class, ", ", length(s.ρ), " nodes, ", length(s.e1),
" edges, ", size(s.fv,2), " faces")
"""
read_sheet_data(filename::AbstractString)::RWGSheet
Read the sheet geometry data from a `JLD2` file named in `filename`.
"""
function read_sheet_data(filename::AbstractString)::RWGSheet
jldopen(filename, "r") do file
try
return file["sheet"]
catch
@error "$(filename) does not contain sheet data"
end
end
end # function
"""
write_sheet_data(filename::AbstractString, sheet::RWGSheet)
Write the sheet geometry data to a `JLD2` file named in `filename`.
"""
function write_sheet_data(filename::AbstractString, sheet::RWGSheet)
jldopen(filename, "w") do file
file["sheet"] = sheet
end
end
"""
find_unique_periods(junction::Vector{Int}, sheets)
Find the unique unit cells for the sheets used in the FSS analysis.
# Arguments
- `junction`: An integer array of length `(Nlayer-1)` containing
in location `i` the index of the FSS sheet located
at the interface of dielectric layers `i` and `i+1`. If
no sheet is present there, the value is 0.
- `sheets`: An iterable that contains the FSS sheets.
# Return Value
- `upa` (Unique Periodicity Array) An integer array of the
same length of junction, containing zeros in the same
locations. The nonzero entries correspond to sheet
locations, and are numbered consecutively according
to the equivalence class of the sheet at that location.
Two sheets are equivalent if they have the same unit cell.
"""
function find_unique_periods(junction::Vector{Int}, sheets)
all(t isa Sheet for t in sheets) || error("Elements of sheets must be of type Sheet")
one_meter = map(x -> ustrip(Float64, x.units, 1.0u"m"), sheets)
s1s2 = vcat(map(x -> hcat(x.s₁..., x.s₂...), sheets)...) # Each row is s1x s1y s2x s2y
s1s2 = s1s2 ./ one_meter # All rows now are comparable (in meters)
s1s2 = round.(s1s2, sigdigits=8)
upa = zeros(Int, length(junction))
Nup = 0 # Initialize Number of Unique Periodicities.
for i in 1:length(upa) # Step through each junction.
isht = junction[i] # Sheet index.
((isht == 0) || (sheets[isht].style == "NULL")) && continue
# Compare s1 and s2 of current (isht) sheet with previous sheets.
for n in 1:Nup # compare to one member of each equivalence class.
# Find a sheet that is in equivalence class n:
nsht = junction[findfirst(isequal(n), upa)] # Index of sheet to be compared.
# Compare unit cell of sheet nsht with that of sheet isht:
if view(s1s2, isht, :) == view(s1s2, nsht, :)
# Sheets are in the same equivalence class
upa[i] = n # Store equiv. class number.
@goto NextOuterFor
end
end
# If execution fell through to here, we found a sheet that is not
# in an existing equivalence class.
Nup += 1 # Bump count of equiv. classes.
upa[i] = Nup # Store equiv. class number.
@label NextOuterFor
end
return upa
end
"""
rotate!(sh::RWGSheet, rot::Real)
Rotate a sheet by rot degrees (counter-clockwise).
"""
function rotate!(sh::RWGSheet, rot::Real)
rot == 0 && return
s,c = sincosd(rot)
rotmat = SMatrix{2,2}([c -s;s c])
sh.s₁ = rotmat * sh.s₁
sh.s₂ = rotmat * sh.s₂
sh.β₁ = rotmat * sh.β₁
sh.β₂ = rotmat * sh.β₂
for n in eachindex(sh.ρ)
sh.ρ[n] = rotmat * sh.ρ[n]
end
sh.rot = rot
return sh
end
"""
translate!(sh::RWGSheet, dx, dy)
Translate a sheet by dx in x and dy in y.
"""
function translate!(sh::RWGSheet, dx::Real, dy::Real)
dx == dy == 0 && (return sh)
tvec = [dx,dy]
for n in eachindex(sh.ρ)
sh.ρ[n] = tvec + sh.ρ[n]
end
return sh
end
"""
combine(sh1::RWGSheet, sh2::RWGSheet, dup_coor::Char, dup_coor_value::Real)
Combine the triangulations stored in sheets `sh1` and `sh2`.
# Arguments:
- `sh1`, `sh2`: sheets having
initialized values for fields `units`, `ρ`,
`e1`, `e2` `fv`, `fe`, and possibly `fr`. It is assumed
that the two triangulations do not overlap except possibly
along a line defined by `dup_coor` and `dup_coor_value`,
as discussed below. If they do coincide along such
a line, then they must share the same set of vertices
and edges along this line. These duplicate vertices
and edges will be removed by this routine.
- `dup_coor` Either 'x' or 'y' to indicate at which coordinate
constant line the two triangulations may overlap,
requiring redundant edges and nodes to be removed,
or ' ' indicating that no search for duplicate nodes
is required.
- `dup_coor_value` The value of the coordinate at which the two
input triangulations overlap.
# Return value
- `sh3` A `RWGSheet` instance with the following member arrays
initialized: `units`, `ρ`, `ec`, `fv`, `fe`, and `fr`.
"""
function combine(sh1::RWGSheet, sh2::RWGSheet, dup_coor::Char, dup_coor_value::Real)
sh1.units == sh2.units || error("Inconsistent units for sh1 and sh2")
# Count number of vertices located at the duplicate coordinate.
# Save vertex indices of matching points in vcen1 and vcen2
#
Nvcen = 0
Necen = 0
vcen1 = Int[]
vcen2 = Int[]
if dup_coor ≠ ' '
tol = 0.5e-4 * norm(sh1.ρ[sh1.e1[1]] - sh1.ρ[sh1.e2[1]])
for i in 1:length(sh2.ρ)
if dup_coor == 'x'
test = sh2.ρ[i][1]
elseif dup_coor == 'y'
test = sh2.ρ[i][2]
else
test = typemax(typeof(test))
end
if abs(test - dup_coor_value) < tol
# Test to see if there is a sh1 vertex at same coordinate:
n1match = 0
for n1 in 1:length(sh1.ρ)
if norm(sh1.ρ[n1] - sh2.ρ[i]) < tol
n1match = n1
break
end
end
if n1match ≠ 0
Nvcen += 1
push!(vcen2, i)
push!(vcen1, n1match)
end
end
end
Necen = Nvcen - 1 # Number of shared edges.
ecen1 = zeros(Int, Necen); ecen2 = zeros(Int, Necen)
# Locate and save indices in sh1 and sh2 of edges along the center line:
Necen = 0
for i in 1:length(sh2.e1) # Loop over sh2 edges
if (sh2.e1[i] in vcen2) && (sh2.e2[i] in vcen2)
# Edge i of sh2 is a shared edge.
Necen += 1
ecen2[Necen] = i
i1 = findfirst(x->x==sh2.e1[i], vcen2)
i2 = findfirst(x->x==sh2.e2[i], vcen2)
# Now find matching sh1 edge:
for j1 in 1:length(sh1.e1)
if ((sh1.e1[j1] == vcen1[i1]) && (sh1.e2[j1] == vcen1[i2])) ||
((sh1.e2[j1] == vcen1[i1]) && (sh1.e1[j1] == vcen1[i2]))
ecen1[Necen] = j1
@goto sh2edges
end
end
else
continue
end
error("Unable to find matching duplicate edge in combine_sheet")
@label sh2edges
end
end
# Allocate triangulation arrays in new sheet:
sh3 = RWGSheet()
sh3.e1 = zeros(Int, length(sh1.e1) + length(sh2.e1) - Necen)
sh3.e2 = zeros(Int, length(sh1.e2) + length(sh2.e2) - Necen)
sh3.ρ = Vector{SV2}(undef, length(sh1.ρ) + length(sh2.ρ) - Nvcen)
sh3.fe = zeros(Int, 3, size(sh1.fe,2) + size(sh2.fe,2))
sh3.fv = zeros(Int, 3, size(sh1.fv,2) + size(sh2.fv,2))
# Copy vertex locations:
sh3.ρ[1:length(sh1.ρ)] = sh1.ρ
if dup_coor == ' '
sh3.ρ[(1+length(sh1.ρ)):end] = sh2.ρ
else
i2 = 1 + length(sh1.ρ) # Node counter
for i in 1:length(sh2.ρ)
if !(i in vcen2[1:Nvcen])
sh3.ρ[i2] = sh2.ρ[i]
i2 += 1
end
end
end
# Copy edge-node matrices
eoffset = length(sh1.e1)
voffset = length(sh1.ρ)
sh3.e1[1:length(sh1.e1)] = sh1.e1
sh3.e2[1:length(sh1.e2)] = sh1.e2
if dup_coor == ' '
sh3.e1[(eoffset+1):end] = sh2.e1 .+ voffset
sh3.e2[(eoffset+1):end] = sh2.e2 .+ voffset
else
for i in 1:length(sh2.e1)
if i in ecen2
# Edge i of sh2 is located on duplication line.
# Do not include this duplicate edge in sh3, but
# decrement the edge index offset:
eoffset -= 1
else
if sh2.e1[i] in vcen2
# The initial point of edge i of sh2 is a duplicate vertex.
i2 = findfirst(x->x==sh2.e1[i], vcen2)
sh3.e1[i+eoffset] = vcen1[i2]
else
# Ordinary point
sh3.e1[i + eoffset] = sh2.e1[i] + voffset
end
#
if sh2.e2[i] in vcen2
# The terminal point of edge i of sh2 is on the duplication edge.
i2 = findfirst(x->x==sh2.e2[i], vcen2)
sh3.e2[i+eoffset] = vcen1[i2]
else
# Ordinary point:
sh3.e2[i+eoffset] = sh2.e2[i] + voffset
end
end
end
# Correct vertex indices:
for i in Nvcen:-1:1
sh3.e1[sh3.e1 .> (length(sh1.ρ) + vcen2[i])] .-= 1
sh3.e2[sh3.e2 .> (length(sh1.ρ) + vcen2[i])] .-= 1
end
end
# Copy face/vertex matrix
sh3.fv[:, 1:size(sh1.fv,2)] = sh1.fv
sh3.fv[:, 1+size(sh1.fv,2):end] = sh2.fv .+ voffset # offset will be corrected later
# Correct duplicate vertices from sh2:
for n2 in 1:Nvcen # Examine each duplicate vertex in sh2
n1 = vcen1[n2] # Initialize index of matching vertex in sh1
# Replace all references to vertex vcen2[n2] with ref to vcen1[n1]
sh3.fv[sh3.fv .== vcen2[n2] + voffset] .= n1
end
# Correct vertex indices:
for i in Nvcen:-1:1
sh3.fv[sh3.fv .> voffset + vcen2[i]] .-= 1
end
# Copy face/edge matrix:
foffset = size(sh1.fv,2)
eoffset = length(sh1.e1)
sh3.fe[:,1:size(sh1.fe,2)] = sh1.fe
sh3.fe[:,1+size(sh1.fe,2):end] = sh2.fe .+ eoffset # offset will be corrected later
# Correct duplicate edges from sh2:
if dup_coor ≠ ' '
for n2 in 1:length(ecen2) # Examine each duplicate edge in sh2
e2 = ecen2[n2] # Index of duplicate edge in sh2.
e1 = ecen1[n2] # Index of duplicate edge in sh1.
sh3.fe[sh3.fe .== (e2 + eoffset)] .= e1
end
# Correct edge indices:
for i in length(ecen2):-1:1
sh3.fe[sh3.fe .> eoffset + ecen2[i]] .-= 1
end
end
return sh3
end
"""
recttri(rhobl::SVector{2,Float64}, rhotr::SVector{2,Float64}, nx::Int, ny::Int)
Create a variable of type `RWGSheet` that contains the triangulation for
a rectangular strip. The fields `ρ`, `e1`, `e2`, `fv`, and `fe` properly initialized.
"""
function recttri(rhobl::SV2, rhotr::SV2, nx::Int, ny::Int)
nodecount = (nx+1) * (ny+1) # Number of nodes.
edgecount = 3*nx*ny + nx + ny # Number of edges.
facecount = 2*nx*ny # Number of faces.
sh = RWGSheet()
sh.ρ = Vector{SV2}(undef, nodecount)
# Set the node coordinates:
drho = (rhotr - rhobl) ./ [nx, ny]
n = 0 # Initialize node index.
for j in 0:ny
yj = j * drho[2]
for i in 0:nx
n += 1
sh.ρ[n] = rhobl + SV2([i*drho[1], yj])
end
end
sh.e1 = zeros(Int, edgecount)
sh.e2 = zeros(Int, edgecount)
e = 0 # Initialize edge index.
# Do the horizontal edges:
for j in 0:ny
kadd = j * (nx+1)
for i in 1:nx
e += 1
sh.e1[e] = i + kadd
sh.e2[e] = sh.e1[e] + 1
end
end
# Do the vertical edges:
for j in 1:ny
kadd = (j-1) * (nx+1) + 1
for i in 0:nx
e += 1
sh.e1[e] = i + kadd
sh.e2[e] = sh.e1[e] + (nx+1)
end
end
# Do the diagonal edges:
for j in 1:ny
kadd1 = (j-1) * (nx+1)
kadd2 = 1 + j * (nx+1)
for i in 1:nx
e += 1
sh.e1[e] = i + kadd1
sh.e2[e] = i + kadd2
end
end
# Done with edges. Begin setting up faces.
# Allocate arrays whose length depends only on the number of faces:
sh.fv = zeros(Int, 3, facecount)
sh.fe = zeros(Int, 3, facecount)
# Set up the face/vertex and face/edge matrices:
nhe = nx*ny + nx # Number of horizontal edges.
nve = nx*ny + ny # Number of vertical edges.
nde = nx*ny # Number of diagonal edges
f = 0 # Initialize face index.
for j in 1:ny
nadd1 = (j-1) * (nx+1)
nadd2 = 1 + j * (nx+1)
for i in 1:nx
f += 1 # Bump face index (upper left face).
sh.fv[1,f] = i + nadd1 # Lower Left vertex.
sh.fv[2,f] = i + nadd2 # Upper right vertex.
sh.fv[3,f] = i + nadd2 - 1 # Upper left vertex.
sh.fe[1,f] = i + j*nx # Upper edge.
sh.fe[2,f] = i + nhe + nadd1 # Left edge
sh.fe[3,f] = i + nhe + nve + (j-1)*nx # Diagonal edge
f += 1 # Bump face index (lower right face).
sh.fv[1,f] = sh.fv[1,f-1] # Lower Left vertex.
sh.fv[2,f] = 1 + sh.fv[1,f-1] # Lower right vertex.
sh.fv[3,f] = sh.fv[2,f-1] # Upper right vertex.
sh.fe[1,f] = 1 + sh.fe[2,f-1] # Right edge.
sh.fe[2,f] = sh.fe[3,f-1] # Diagonal edge.
sh.fe[3,f] = sh.fe[1,f-1] - nx # Bottom edge
end
end
return sh
end
"Plot recipe for RWGSheet"
@recipe function f(sh::RWGSheet; edges=true, faces=false, nodes=false,
edgenumbers=false, facenumbers=false, nodenumbers=false,
unitcell=false, rep=(1,1), fontsize=9)
# set a default value for an attribute with `-->`. Force it with `:=`.
xguide --> "x ($(sh.units))"
yguide --> "y ($(sh.units))"
aspect_ratio := :equal
if isa(rep[1], Int)
mrange = 1:rep[1]
elseif isa(rep[1], UnitRange)
mrange = rep[1]
else
error("Illegal type for rep[1]")
end
if isa(rep[2], Int)
nrange = 1:rep[2]
elseif isa(rep[2], UnitRange)
nrange = rep[2]
else
error("Illegal type for rep[2]")
end
for m in mrange, n in nrange
x0, y0 = (m-1)*sh.s₁ + (n-1)*sh.s₂
# Add series for faces
if faces
for i in 1:size(sh.fv,2)
points = sh.ρ[sh.fv[:,i]]
x = x0 .+ [point[1] for point in points]
y = y0 .+ [point[2] for point in points]
@series begin
seriestype := :shape
# ignore series in legend and color cycling
primary := false
linecolor := nothing
fillcolor --> :blue
fillalpha --> 0.8
markershape := :none
x, y
end
end
end
# Add series for edges
if edges
x = Float64[]
y = Float64[]
for i in 1:length(sh.e1)
points = sh.ρ[[sh.e1[i], sh.e2[i]]]
push!(x,NaN)
push!(y,NaN)
append!(x, [p[1] for p in points])
append!(y, [p[2] for p in points])
end
x .+= x0
y .+= y0
@series begin
seriestype := :path
# ignore series in legend and color cycling
primary := false
linecolor --> :black
linestyle := :solid
fillcolor := nothing
fillalpha := 0
markershape := :none
x, y
end
end
# Add series for unit cell
if unitcell
points = [0*sh.s₁, sh.s₁, sh.s₁+sh.s₂, sh.s₂]
x = x0 .+ [point[1] for point in points]; push!(x, x[1])
y = y0 .+ [point[2] for point in points]; push!(y, y[1])
@series begin
seriestype := :path
# ignore series in legend and color cycling
primary := false
linecolor := :blue
linestyle := :dot
fillcolor := nothing
fillalpha := 0
markershape := :none
x, y
end
end
# Add series for nodes
if nodes
x = x0 .+ [p[1] for p in sh.ρ]
y = y0 .+ [p[2] for p in sh.ρ]
@series begin
seriestype := :scatter
# ignore series in legend and color cycling
primary := false
linecolor := nothing
markercolor --> :black
markershape --> :circle
markersize --> 1
x, y
end
end
# Add series for node numbers
if nodenumbers
x = x0 .+ [p[1] for p in sh.ρ]
y = y0 .+ [p[2] for p in sh.ρ]
@series begin
seriestype := :scatter
# ignore series in legend and color cycling
primary := false
linecolor := nothing
markersize := 0
markeralpha := 0
markercolor := nothing
markershape := :none
annotations := [(x[i], y[i], string(i), fontsize) for i in 1:length(x)]
x,y
end
end
# Add series for edge numbers
if edgenumbers
x = zeros(Float64, length(sh.e1))
y = zeros(Float64, length(sh.e1))
for i in 1:length(sh.e1)
x[i], y[i] = 0.5*sum(sh.ρ[[sh.e1[i], sh.e2[i]]])
end
x .+= x0
y .+= y0
@series begin
seriestype := :scatter
# ignore series in legend and color cycling
primary := false
linecolor := nothing
markersize := 0
markeralpha := 0
markercolor := nothing
markershape := :none
annotations := [(x[i], y[i], string(i), fontsize) for i in 1:length(x)]
x,y
end
end
# Add series for face numbers
if facenumbers
x = zeros(Float64, size(sh.fv,2))
y = zeros(Float64, size(sh.fv,2))
for i in 1:size(sh.fv,2)
x[i], y[i] = (1/3)*sum(sh.ρ[sh.fv[:,i]])
end
x .+= x0
y .+= y0
@series begin
seriestype := :scatter
# ignore series in legend and color cycling
primary := false
linecolor := nothing
markersize := 0
markeralpha := 0
markercolor := nothing
markershape := :none
annotations := [(x[i], y[i], string(i), fontsize) for i in 1:length(x)]
x,y
end
end
end
end
end # module
|
{"hexsha": "c27efb5264a8b65dddb505e35da1b6cc435dc7c5", "size": 23528, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Sheets.jl", "max_stars_repo_name": "mortenpi/PSSFSS.jl", "max_stars_repo_head_hexsha": "9a3a6503d9266eee57771612a47a3e9b77bc9a2e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2021-05-21T15:44:56.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-25T17:29:19.000Z", "max_issues_repo_path": "src/Sheets.jl", "max_issues_repo_name": "mortenpi/PSSFSS.jl", "max_issues_repo_head_hexsha": "9a3a6503d9266eee57771612a47a3e9b77bc9a2e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2020-10-08T22:20:01.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-26T01:01:09.000Z", "max_forks_repo_path": "src/Sheets.jl", "max_forks_repo_name": "mortenpi/PSSFSS.jl", "max_forks_repo_head_hexsha": "9a3a6503d9266eee57771612a47a3e9b77bc9a2e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-11-20T13:58:43.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-24T09:16:48.000Z", "avg_line_length": 35.8112633181, "max_line_length": 109, "alphanum_fraction": 0.5104131248, "num_tokens": 6860}
|
! wrapftutorial.f
! This file is generated by Shroud nowrite-version. Do not edit.
! Copyright (c) 2017-2021, Lawrence Livermore National Security, LLC and
! other Shroud Project Developers.
! See the top-level COPYRIGHT file for details.
!
! SPDX-License-Identifier: (BSD-3-Clause)
!
!>
!! \file wrapftutorial.f
!! \brief Shroud generated wrapper for tutorial namespace
!<
! splicer begin file_top
! splicer end file_top
module tutorial_mod
use iso_c_binding, only : C_INT, C_LONG, C_NULL_PTR, C_PTR, C_SIZE_T
! splicer begin module_use
! splicer end module_use
implicit none
! splicer begin module_top
! splicer end module_top
! start helper capsule_data_helper
! helper capsule_data_helper
type, bind(C) :: TUT_SHROUD_capsule_data
type(C_PTR) :: addr = C_NULL_PTR ! address of C++ memory
integer(C_INT) :: idtor = 0 ! index of destructor
end type TUT_SHROUD_capsule_data
! end helper capsule_data_helper
! start array_context
! helper array_context
type, bind(C) :: TUT_SHROUD_array
! address of C++ memory
type(TUT_SHROUD_capsule_data) :: cxx
! address of data in cxx
type(C_PTR) :: base_addr = C_NULL_PTR
! type of element
integer(C_INT) :: type
! bytes-per-item or character len of data in cxx
integer(C_SIZE_T) :: elem_len = 0_C_SIZE_T
! size of data in cxx
integer(C_SIZE_T) :: size = 0_C_SIZE_T
! number of dimensions
integer(C_INT) :: rank = -1
integer(C_LONG) :: shape(7) = 0
end type TUT_SHROUD_array
! end array_context
! enum tutorial::Color
integer(C_INT), parameter :: red = 0
integer(C_INT), parameter :: blue = 1
integer(C_INT), parameter :: white = 2
! start abstract callback1_incr
abstract interface
function callback1_incr(arg0) bind(C)
use iso_c_binding, only : C_INT
implicit none
integer(C_INT), value :: arg0
integer(C_INT) :: callback1_incr
end function callback1_incr
end interface
! end abstract callback1_incr
! start no_return_no_arguments
interface
subroutine no_return_no_arguments() &
bind(C, name="TUT_no_return_no_arguments")
implicit none
end subroutine no_return_no_arguments
end interface
! end no_return_no_arguments
interface
function pass_by_value(arg1, arg2) &
result(SHT_rv) &
bind(C, name="TUT_pass_by_value")
use iso_c_binding, only : C_DOUBLE, C_INT
implicit none
real(C_DOUBLE), value, intent(IN) :: arg1
integer(C_INT), value, intent(IN) :: arg2
real(C_DOUBLE) :: SHT_rv
end function pass_by_value
end interface
interface
subroutine c_concatenate_strings_bufferify(arg1, SHT_arg1_len, &
arg2, SHT_arg2_len, SHT_rv) &
bind(C, name="TUT_concatenate_strings_bufferify")
use iso_c_binding, only : C_CHAR, C_INT
import :: TUT_SHROUD_array
implicit none
character(kind=C_CHAR), intent(IN) :: arg1(*)
integer(C_INT), value, intent(IN) :: SHT_arg1_len
character(kind=C_CHAR), intent(IN) :: arg2(*)
integer(C_INT), value, intent(IN) :: SHT_arg2_len
type(TUT_SHROUD_array), intent(OUT) :: SHT_rv
end subroutine c_concatenate_strings_bufferify
end interface
! start c_use_default_arguments
interface
function c_use_default_arguments() &
result(SHT_rv) &
bind(C, name="TUT_use_default_arguments")
use iso_c_binding, only : C_DOUBLE
implicit none
real(C_DOUBLE) :: SHT_rv
end function c_use_default_arguments
end interface
! end c_use_default_arguments
! start c_use_default_arguments_arg1
interface
function c_use_default_arguments_arg1(arg1) &
result(SHT_rv) &
bind(C, name="TUT_use_default_arguments_arg1")
use iso_c_binding, only : C_DOUBLE
implicit none
real(C_DOUBLE), value, intent(IN) :: arg1
real(C_DOUBLE) :: SHT_rv
end function c_use_default_arguments_arg1
end interface
! end c_use_default_arguments_arg1
! start c_use_default_arguments_arg1_arg2
interface
function c_use_default_arguments_arg1_arg2(arg1, arg2) &
result(SHT_rv) &
bind(C, name="TUT_use_default_arguments_arg1_arg2")
use iso_c_binding, only : C_BOOL, C_DOUBLE
implicit none
real(C_DOUBLE), value, intent(IN) :: arg1
logical(C_BOOL), value, intent(IN) :: arg2
real(C_DOUBLE) :: SHT_rv
end function c_use_default_arguments_arg1_arg2
end interface
! end c_use_default_arguments_arg1_arg2
interface
subroutine c_overloaded_function_from_name(name) &
bind(C, name="TUT_overloaded_function_from_name")
use iso_c_binding, only : C_CHAR
implicit none
character(kind=C_CHAR), intent(IN) :: name(*)
end subroutine c_overloaded_function_from_name
end interface
interface
subroutine c_overloaded_function_from_name_bufferify(name, &
SHT_name_len) &
bind(C, name="TUT_overloaded_function_from_name_bufferify")
use iso_c_binding, only : C_CHAR, C_INT
implicit none
character(kind=C_CHAR), intent(IN) :: name(*)
integer(C_INT), value, intent(IN) :: SHT_name_len
end subroutine c_overloaded_function_from_name_bufferify
end interface
interface
subroutine c_overloaded_function_from_index(indx) &
bind(C, name="TUT_overloaded_function_from_index")
use iso_c_binding, only : C_INT
implicit none
integer(C_INT), value, intent(IN) :: indx
end subroutine c_overloaded_function_from_index
end interface
interface
subroutine c_template_argument_int(arg) &
bind(C, name="TUT_template_argument_int")
use iso_c_binding, only : C_INT
implicit none
integer(C_INT), value, intent(IN) :: arg
end subroutine c_template_argument_int
end interface
interface
subroutine c_template_argument_double(arg) &
bind(C, name="TUT_template_argument_double")
use iso_c_binding, only : C_DOUBLE
implicit none
real(C_DOUBLE), value, intent(IN) :: arg
end subroutine c_template_argument_double
end interface
interface
function c_template_return_int() &
result(SHT_rv) &
bind(C, name="TUT_template_return_int")
use iso_c_binding, only : C_INT
implicit none
integer(C_INT) :: SHT_rv
end function c_template_return_int
end interface
interface
function c_template_return_double() &
result(SHT_rv) &
bind(C, name="TUT_template_return_double")
use iso_c_binding, only : C_DOUBLE
implicit none
real(C_DOUBLE) :: SHT_rv
end function c_template_return_double
end interface
interface
subroutine c_fortran_generic_overloaded_0() &
bind(C, name="TUT_fortran_generic_overloaded_0")
implicit none
end subroutine c_fortran_generic_overloaded_0
end interface
interface
subroutine c_fortran_generic_overloaded_1(name, arg2) &
bind(C, name="TUT_fortran_generic_overloaded_1")
use iso_c_binding, only : C_CHAR, C_DOUBLE
implicit none
character(kind=C_CHAR), intent(IN) :: name(*)
real(C_DOUBLE), value, intent(IN) :: arg2
end subroutine c_fortran_generic_overloaded_1
end interface
interface
subroutine c_fortran_generic_overloaded_1_float_bufferify(name, &
SHT_name_len, arg2) &
bind(C, name="TUT_fortran_generic_overloaded_1_float_bufferify")
use iso_c_binding, only : C_CHAR, C_FLOAT, C_INT
implicit none
character(kind=C_CHAR), intent(IN) :: name(*)
integer(C_INT), value, intent(IN) :: SHT_name_len
real(C_FLOAT), value, intent(IN) :: arg2
end subroutine c_fortran_generic_overloaded_1_float_bufferify
end interface
interface
subroutine c_fortran_generic_overloaded_1_double_bufferify(name, &
SHT_name_len, arg2) &
bind(C, name="TUT_fortran_generic_overloaded_1_double_bufferify")
use iso_c_binding, only : C_CHAR, C_DOUBLE, C_INT
implicit none
character(kind=C_CHAR), intent(IN) :: name(*)
integer(C_INT), value, intent(IN) :: SHT_name_len
real(C_DOUBLE), value, intent(IN) :: arg2
end subroutine c_fortran_generic_overloaded_1_double_bufferify
end interface
interface
function c_use_default_overload_num(num) &
result(SHT_rv) &
bind(C, name="TUT_use_default_overload_num")
use iso_c_binding, only : C_INT
implicit none
integer(C_INT), value, intent(IN) :: num
integer(C_INT) :: SHT_rv
end function c_use_default_overload_num
end interface
interface
function c_use_default_overload_num_offset(num, offset) &
result(SHT_rv) &
bind(C, name="TUT_use_default_overload_num_offset")
use iso_c_binding, only : C_INT
implicit none
integer(C_INT), value, intent(IN) :: num
integer(C_INT), value, intent(IN) :: offset
integer(C_INT) :: SHT_rv
end function c_use_default_overload_num_offset
end interface
interface
function c_use_default_overload_num_offset_stride(num, offset, &
stride) &
result(SHT_rv) &
bind(C, name="TUT_use_default_overload_num_offset_stride")
use iso_c_binding, only : C_INT
implicit none
integer(C_INT), value, intent(IN) :: num
integer(C_INT), value, intent(IN) :: offset
integer(C_INT), value, intent(IN) :: stride
integer(C_INT) :: SHT_rv
end function c_use_default_overload_num_offset_stride
end interface
interface
function c_use_default_overload_3(type, num) &
result(SHT_rv) &
bind(C, name="TUT_use_default_overload_3")
use iso_c_binding, only : C_DOUBLE, C_INT
implicit none
real(C_DOUBLE), value, intent(IN) :: type
integer(C_INT), value, intent(IN) :: num
integer(C_INT) :: SHT_rv
end function c_use_default_overload_3
end interface
interface
function c_use_default_overload_4(type, num, offset) &
result(SHT_rv) &
bind(C, name="TUT_use_default_overload_4")
use iso_c_binding, only : C_DOUBLE, C_INT
implicit none
real(C_DOUBLE), value, intent(IN) :: type
integer(C_INT), value, intent(IN) :: num
integer(C_INT), value, intent(IN) :: offset
integer(C_INT) :: SHT_rv
end function c_use_default_overload_4
end interface
interface
function c_use_default_overload_5(type, num, offset, stride) &
result(SHT_rv) &
bind(C, name="TUT_use_default_overload_5")
use iso_c_binding, only : C_DOUBLE, C_INT
implicit none
real(C_DOUBLE), value, intent(IN) :: type
integer(C_INT), value, intent(IN) :: num
integer(C_INT), value, intent(IN) :: offset
integer(C_INT), value, intent(IN) :: stride
integer(C_INT) :: SHT_rv
end function c_use_default_overload_5
end interface
interface
function typefunc(arg) &
result(SHT_rv) &
bind(C, name="TUT_typefunc")
use iso_c_binding, only : C_INT
implicit none
integer(C_INT), value, intent(IN) :: arg
integer(C_INT) :: SHT_rv
end function typefunc
end interface
interface
function enumfunc(arg) &
result(SHT_rv) &
bind(C, name="TUT_enumfunc")
use iso_c_binding, only : C_INT
implicit none
integer(C_INT), value, intent(IN) :: arg
integer(C_INT) :: SHT_rv
end function enumfunc
end interface
interface
function colorfunc(arg) &
result(SHT_rv) &
bind(C, name="TUT_colorfunc")
use iso_c_binding, only : C_INT
implicit none
integer(C_INT), value, intent(IN) :: arg
integer(C_INT) :: SHT_rv
end function colorfunc
end interface
! start get_min_max
interface
subroutine get_min_max(min, max) &
bind(C, name="TUT_get_min_max")
use iso_c_binding, only : C_INT
implicit none
integer(C_INT), intent(OUT) :: min
integer(C_INT), intent(OUT) :: max
end subroutine get_min_max
end interface
! end get_min_max
! start callback1
interface
function callback1(in, incr) &
result(SHT_rv) &
bind(C, name="TUT_callback1")
use iso_c_binding, only : C_INT
import :: callback1_incr
implicit none
integer(C_INT), value, intent(IN) :: in
procedure(callback1_incr) :: incr
integer(C_INT) :: SHT_rv
end function callback1
end interface
! end callback1
interface
function c_last_function_called() &
result(SHT_rv) &
bind(C, name="TUT_last_function_called")
use iso_c_binding, only : C_PTR
implicit none
type(C_PTR) SHT_rv
end function c_last_function_called
end interface
interface
subroutine c_last_function_called_bufferify(SHT_rv, SHT_rv_len) &
bind(C, name="TUT_last_function_called_bufferify")
use iso_c_binding, only : C_CHAR, C_INT
implicit none
character(kind=C_CHAR), intent(OUT) :: SHT_rv(*)
integer(C_INT), value, intent(IN) :: SHT_rv_len
end subroutine c_last_function_called_bufferify
end interface
interface
! splicer begin additional_interfaces
subroutine all_test1(array)
implicit none
integer, dimension(:), allocatable :: array
end subroutine all_test1
! splicer end additional_interfaces
end interface
interface fortran_generic_overloaded
module procedure fortran_generic_overloaded_0
module procedure fortran_generic_overloaded_1_float
module procedure fortran_generic_overloaded_1_double
end interface fortran_generic_overloaded
interface overloaded_function
module procedure overloaded_function_from_name
module procedure overloaded_function_from_index
end interface overloaded_function
interface template_argument
module procedure template_argument_int
module procedure template_argument_double
end interface template_argument
! start generic interface use_default_arguments
interface use_default_arguments
module procedure use_default_arguments
module procedure use_default_arguments_arg1
module procedure use_default_arguments_arg1_arg2
end interface use_default_arguments
! end generic interface use_default_arguments
interface use_default_overload
module procedure use_default_overload_num
module procedure use_default_overload_num_offset
module procedure use_default_overload_num_offset_stride
module procedure use_default_overload_3
module procedure use_default_overload_4
module procedure use_default_overload_5
end interface use_default_overload
interface
! helper copy_string
! Copy the char* or std::string in context into c_var.
subroutine TUT_SHROUD_copy_string_and_free(context, c_var, c_var_size) &
bind(c,name="TUT_ShroudCopyStringAndFree")
use, intrinsic :: iso_c_binding, only : C_CHAR, C_SIZE_T
import TUT_SHROUD_array
type(TUT_SHROUD_array), intent(IN) :: context
character(kind=C_CHAR), intent(OUT) :: c_var(*)
integer(C_SIZE_T), value :: c_var_size
end subroutine TUT_SHROUD_copy_string_and_free
end interface
contains
!>
!! Note that since a reference is returned, no intermediate string
!! is allocated. It is assumed +owner(library).
!<
function concatenate_strings(arg1, arg2) &
result(SHT_rv)
use iso_c_binding, only : C_INT
character(len=*), intent(IN) :: arg1
character(len=*), intent(IN) :: arg2
character(len=:), allocatable :: SHT_rv
! splicer begin function.concatenate_strings
integer(C_INT) SHT_arg1_len
integer(C_INT) SHT_arg2_len
type(TUT_SHROUD_array) :: SHT_rv_cdesc
SHT_arg1_len = len(arg1, kind=C_INT)
SHT_arg2_len = len(arg2, kind=C_INT)
call c_concatenate_strings_bufferify(arg1, SHT_arg1_len, arg2, &
SHT_arg2_len, SHT_rv_cdesc)
allocate(character(len=SHT_rv_cdesc%elem_len):: SHT_rv)
call TUT_SHROUD_copy_string_and_free(SHT_rv_cdesc, SHT_rv, &
SHT_rv_cdesc%elem_len)
! splicer end function.concatenate_strings
end function concatenate_strings
! start use_default_arguments
function use_default_arguments() &
result(SHT_rv)
use iso_c_binding, only : C_DOUBLE
real(C_DOUBLE) :: SHT_rv
! splicer begin function.use_default_arguments
SHT_rv = c_use_default_arguments()
! splicer end function.use_default_arguments
end function use_default_arguments
! end use_default_arguments
! start use_default_arguments_arg1
function use_default_arguments_arg1(arg1) &
result(SHT_rv)
use iso_c_binding, only : C_DOUBLE
real(C_DOUBLE), value, intent(IN) :: arg1
real(C_DOUBLE) :: SHT_rv
! splicer begin function.use_default_arguments_arg1
SHT_rv = c_use_default_arguments_arg1(arg1)
! splicer end function.use_default_arguments_arg1
end function use_default_arguments_arg1
! end use_default_arguments_arg1
! start use_default_arguments_arg1_arg2
function use_default_arguments_arg1_arg2(arg1, arg2) &
result(SHT_rv)
use iso_c_binding, only : C_BOOL, C_DOUBLE
real(C_DOUBLE), value, intent(IN) :: arg1
logical, value, intent(IN) :: arg2
real(C_DOUBLE) :: SHT_rv
! splicer begin function.use_default_arguments_arg1_arg2
logical(C_BOOL) SH_arg2
SH_arg2 = arg2 ! coerce to C_BOOL
SHT_rv = c_use_default_arguments_arg1_arg2(arg1, SH_arg2)
! splicer end function.use_default_arguments_arg1_arg2
end function use_default_arguments_arg1_arg2
! end use_default_arguments_arg1_arg2
subroutine overloaded_function_from_name(name)
use iso_c_binding, only : C_INT
character(len=*), intent(IN) :: name
! splicer begin function.overloaded_function_from_name
integer(C_INT) SHT_name_len
SHT_name_len = len(name, kind=C_INT)
call c_overloaded_function_from_name_bufferify(name, &
SHT_name_len)
! splicer end function.overloaded_function_from_name
end subroutine overloaded_function_from_name
subroutine overloaded_function_from_index(indx)
use iso_c_binding, only : C_INT
integer(C_INT), value, intent(IN) :: indx
! splicer begin function.overloaded_function_from_index
call c_overloaded_function_from_index(indx)
! splicer end function.overloaded_function_from_index
end subroutine overloaded_function_from_index
subroutine template_argument_int(arg)
use iso_c_binding, only : C_INT
integer(C_INT), value, intent(IN) :: arg
! splicer begin function.template_argument_int
call c_template_argument_int(arg)
! splicer end function.template_argument_int
end subroutine template_argument_int
subroutine template_argument_double(arg)
use iso_c_binding, only : C_DOUBLE
real(C_DOUBLE), value, intent(IN) :: arg
! splicer begin function.template_argument_double
call c_template_argument_double(arg)
! splicer end function.template_argument_double
end subroutine template_argument_double
function template_return_int() &
result(SHT_rv)
use iso_c_binding, only : C_INT
integer(C_INT) :: SHT_rv
! splicer begin function.template_return_int
SHT_rv = c_template_return_int()
! splicer end function.template_return_int
end function template_return_int
function template_return_double() &
result(SHT_rv)
use iso_c_binding, only : C_DOUBLE
real(C_DOUBLE) :: SHT_rv
! splicer begin function.template_return_double
SHT_rv = c_template_return_double()
! splicer end function.template_return_double
end function template_return_double
subroutine fortran_generic_overloaded_0()
! splicer begin function.fortran_generic_overloaded_0
call c_fortran_generic_overloaded_0()
! splicer end function.fortran_generic_overloaded_0
end subroutine fortran_generic_overloaded_0
subroutine fortran_generic_overloaded_1_float(name, arg2)
use iso_c_binding, only : C_FLOAT, C_INT
character(len=*), intent(IN) :: name
real(C_FLOAT), value, intent(IN) :: arg2
! splicer begin function.fortran_generic_overloaded_1_float
integer(C_INT) SHT_name_len
SHT_name_len = len(name, kind=C_INT)
call c_fortran_generic_overloaded_1_float_bufferify(name, &
SHT_name_len, arg2)
! splicer end function.fortran_generic_overloaded_1_float
end subroutine fortran_generic_overloaded_1_float
subroutine fortran_generic_overloaded_1_double(name, arg2)
use iso_c_binding, only : C_DOUBLE, C_INT
character(len=*), intent(IN) :: name
real(C_DOUBLE), value, intent(IN) :: arg2
! splicer begin function.fortran_generic_overloaded_1_double
integer(C_INT) SHT_name_len
SHT_name_len = len(name, kind=C_INT)
call c_fortran_generic_overloaded_1_double_bufferify(name, &
SHT_name_len, arg2)
! splicer end function.fortran_generic_overloaded_1_double
end subroutine fortran_generic_overloaded_1_double
function use_default_overload_num(num) &
result(SHT_rv)
use iso_c_binding, only : C_INT
integer(C_INT), value, intent(IN) :: num
integer(C_INT) :: SHT_rv
! splicer begin function.use_default_overload_num
SHT_rv = c_use_default_overload_num(num)
! splicer end function.use_default_overload_num
end function use_default_overload_num
function use_default_overload_num_offset(num, offset) &
result(SHT_rv)
use iso_c_binding, only : C_INT
integer(C_INT), value, intent(IN) :: num
integer(C_INT), value, intent(IN) :: offset
integer(C_INT) :: SHT_rv
! splicer begin function.use_default_overload_num_offset
SHT_rv = c_use_default_overload_num_offset(num, offset)
! splicer end function.use_default_overload_num_offset
end function use_default_overload_num_offset
function use_default_overload_num_offset_stride(num, offset, stride) &
result(SHT_rv)
use iso_c_binding, only : C_INT
integer(C_INT), value, intent(IN) :: num
integer(C_INT), value, intent(IN) :: offset
integer(C_INT), value, intent(IN) :: stride
integer(C_INT) :: SHT_rv
! splicer begin function.use_default_overload_num_offset_stride
SHT_rv = c_use_default_overload_num_offset_stride(num, offset, &
stride)
! splicer end function.use_default_overload_num_offset_stride
end function use_default_overload_num_offset_stride
function use_default_overload_3(type, num) &
result(SHT_rv)
use iso_c_binding, only : C_DOUBLE, C_INT
real(C_DOUBLE), value, intent(IN) :: type
integer(C_INT), value, intent(IN) :: num
integer(C_INT) :: SHT_rv
! splicer begin function.use_default_overload_3
SHT_rv = c_use_default_overload_3(type, num)
! splicer end function.use_default_overload_3
end function use_default_overload_3
function use_default_overload_4(type, num, offset) &
result(SHT_rv)
use iso_c_binding, only : C_DOUBLE, C_INT
real(C_DOUBLE), value, intent(IN) :: type
integer(C_INT), value, intent(IN) :: num
integer(C_INT), value, intent(IN) :: offset
integer(C_INT) :: SHT_rv
! splicer begin function.use_default_overload_4
SHT_rv = c_use_default_overload_4(type, num, offset)
! splicer end function.use_default_overload_4
end function use_default_overload_4
function use_default_overload_5(type, num, offset, stride) &
result(SHT_rv)
use iso_c_binding, only : C_DOUBLE, C_INT
real(C_DOUBLE), value, intent(IN) :: type
integer(C_INT), value, intent(IN) :: num
integer(C_INT), value, intent(IN) :: offset
integer(C_INT), value, intent(IN) :: stride
integer(C_INT) :: SHT_rv
! splicer begin function.use_default_overload_5
SHT_rv = c_use_default_overload_5(type, num, offset, stride)
! splicer end function.use_default_overload_5
end function use_default_overload_5
function last_function_called() &
result(SHT_rv)
use iso_c_binding, only : C_INT
character(len=30) :: SHT_rv
! splicer begin function.last_function_called
integer(C_INT) SHT_rv_len
SHT_rv_len = len(SHT_rv, kind=C_INT)
call c_last_function_called_bufferify(SHT_rv, SHT_rv_len)
! splicer end function.last_function_called
end function last_function_called
! splicer begin additional_functions
! splicer end additional_functions
end module tutorial_mod
|
{"hexsha": "02df73bd593d4c491844e78a279ceb8a380b4de0", "size": 26737, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "regression/reference/debugfalse/wrapftutorial.f", "max_stars_repo_name": "ExternalRepositories/shroud", "max_stars_repo_head_hexsha": "86c39d2324d947d28055f9024f52cc493eb0c813", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 73, "max_stars_repo_stars_event_min_datetime": "2017-10-11T17:01:50.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-01T21:42:12.000Z", "max_issues_repo_path": "regression/reference/debugfalse/wrapftutorial.f", "max_issues_repo_name": "ExternalRepositories/shroud", "max_issues_repo_head_hexsha": "86c39d2324d947d28055f9024f52cc493eb0c813", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 29, "max_issues_repo_issues_event_min_datetime": "2018-03-21T19:34:29.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-04T18:13:14.000Z", "max_forks_repo_path": "regression/reference/debugfalse/wrapftutorial.f", "max_forks_repo_name": "ExternalRepositories/shroud", "max_forks_repo_head_hexsha": "86c39d2324d947d28055f9024f52cc493eb0c813", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2017-11-22T14:27:01.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T08:49:03.000Z", "avg_line_length": 38.5815295815, "max_line_length": 81, "alphanum_fraction": 0.6552343195, "num_tokens": 6318}
|
import numpy as np
import pandas as pd
import re
def stack_chunks(dat_list):
'''
For preserving categories instead of converting to objects.
If you concat categories w/ different levels (or even the same in a
different order), it silently converts to object
'''
columns, dtypes = dat_list[0].columns, dat_list[0].dtypes
# preserve categories
levels = {col: set() for col in columns}
for col, dt in zip(columns, dtypes):
if str(dt) == 'category':
for d in dat_list:
levels[col] = set.union(levels[col], d[col].cat.categories)
for d in dat_list:
_newlevels = list(levels[col] - set(d[col].cat.categories))
d[col] = d[col].cat.add_categories(_newlevels)
d[col] = d[col].cat.reorder_categories(levels[col])
# recombine the chunks and return the result
return pd.concat(dat_list)
def read_hcup(data_file, sas_script, chunksize=500000, combine_chunks=True,
return_meta=False, strings_to_categorical=True, **kwargs):
'''
Arguments:
data_file (str): Path of fixed-width text data file
sas_script (str): Path of the accompanying SAS load file
chunksize (int, default 500K): Break data into chunks of size chunksize
and read/process each chunk separately (for lower memory usage)
combine_chunks (bool, default True): Return single DataFrame with all
chunks combined (True), or return list of DataFrame chunks (False)
return_meta (bool, default False): Return the data + a DataFrame of
column metadata (True), or just return the processed data (False)
strings_to_categorical (bool, default True): Convert variables defined
as CHAR in SAS script to pd.Categorical upon import
kwargs: passed on to pandas.read_fwf()
Returns:
Default: a single pandas DataFrame
If combine_chunks=False: Generator of pandas DataFrames
If return_meta=True: Return metadata (widths, dtypes, etc. ) *instead
of* the data
'''
# what dtype to use for text columns
text = 'category' if strings_to_categorical else 'object'
# read in the sas script
with open(sas_script) as f:
sas = f.readlines()
# grab the lines that define the fields. returns three match groups:
# 0 = starting position, 1 = field name, 2 = variable type
fields = [re.search(r'@\s*(\d+)\s+(\S+)\s+(\S+)\s?', x) for x in sas]
fields = [x.groups() for x in fields if x]
# from those, grab the names and starting positions, and infer the dtypes
starts = [int(x[0]) for x in fields]
names = [x[1] for x in fields]
# use different dtypes based on whether user requests metadata or data.
# in the latter case we just make everything a category for max compression
# for numerics, must use floats since int columns can't have missing values
# but it's okay because floats hardly use more space than ints
if return_meta:
dtype = [text if re.search(r'CHAR', x[2]) else float for x in fields]
else:
# keep KEY_NIS as numeric so it can be safely sorted on
dtype = [text if col != 'KEY_NIS' else float for col in names]
# convert dtype list into dictionary (for pd.read_fwf)
dtypes = dict(zip(names, dtypes))
# compute the variable widths
maxcols = int(re.search(r'LRECL = (.+);', ''.join(sas)).group(1))
widths = np.diff(starts + [maxcols+1])
# grab all the missing value codes
na_vals = re.findall(r'\'(.+)\' = \S+', ''.join(sas))
na_vals += ['.']
# return meta-data if requested
if return_meta:
return {'names': names, 'starts': starts, 'widths': widths,
'dtypes': dtype, 'na_values': na_vals}
# get a generator that reads the data in chunks
dat = pd.read_fwf(data_file, header=None, names=names, widths=widths,
dtype=dtype, na_values=na_vals, chunksize=chunksize,
**kwargs)
# return generator if requested
if not combine_chunks:
return dat
# convert generator to list and stack the dataframes if applicable
dat = list(dat)
if len(dat) > 1:
dat = stack_chunks(dat)
else:
dat = dat[0]
return dat
def read_mhos(sas_script, data_file=None, chunksize=500000, combine_chunks=True,
return_meta=False, strings_to_categorical=True, **kwargs):
'''
Arguments:
data_file (str): Path of fixed-width text data file
sas_script (str): Path of the accompanying SAS load file
chunksize (int, default 500K): Break data into chunks of size chunksize
and read/process each chunk separately (for lower memory usage)
combine_chunks (bool, default True): Return single DataFrame with all
chunks combined (True), or return list of DataFrame chunks (False)
return_meta (bool, default False): Return the data + a DataFrame of
column metadata (True), or just return the processed data (False)
strings_to_categorical (bool, default True): Convert variables defined
as CHAR in SAS script to pd.Categorical upon import
kwargs: passed on to pandas.read_fwf()
Returns:
Default: a single pandas DataFrame
If combine_chunks=False: Generator of pandas DataFrames
If return_meta=True: Return metadata (colspecs, dtypes, etc. ) *instead
of* the data
'''
if data_file is None:
return_meta = True
# what dtype to use for text columns
text = 'category' if strings_to_categorical else 'object'
# read in the sas script
with open(sas_script) as f:
sas = f.readlines()
# match groups (indexed from 1, not 0)
# 1 = prefix, 2 = field name, 3 = string, 4 = start position,
# 5 = end position, 6 = field number, 7 = field description
regex = r'^\s+(&[c|C].|&[r|R].|&[p|P].)?(\S+)\s+(\$)?\s*(\d{1,3})-?(\d{1,3})?\S*\s*/\*\s+(\d{1,3})(.*)\*/'
fields = [re.search(regex, x) for x in sas if re.search(regex, x)]
# check that we matched all and only the the right field numbers
assert [int(x.group(6)) for x in fields if x] \
== list(range(1, len(fields)+1))
# extract the meta-data
prefix = [x.group(1) for x in fields]
names = [x.group(2).lower() for x in fields]
dtypes = [str if x.group(2)=='CASE_ID' else text if x.group(3) else float
for x in fields]
starts = [int(x.group(4))-1 for x in fields]
ends = [int(x.group(5)) if x.group(5) else int(x.group(4)) for x in fields]
descriptions = [x.group(7).strip() for x in fields]
# handle duplicate names
vc = pd.Series(names).value_counts()
dupes = list(vc.index[vc > 1])
dupes = [x in dupes for x in names]
names = [prefix+name if dupe else name
for prefix, name, dupe in zip(prefix, names, dupes)]
# convert dtype list into dictionary (for pd.read_fwf)
dtypes = dict(zip(names, dtypes))
# return meta-data if requested
if return_meta:
return {'names': names, 'starts': starts, 'ends': ends,
'dtypes': dtypes, 'descriptions': descriptions}
# get a generator that reads the data in chunks
dat = pd.read_fwf(data_file, header=None, names=names,
colspecs=list(zip(starts, ends)), dtype=dtypes,
chunksize=chunksize, **kwargs)
# return generator if requested
if not combine_chunks:
return dat
# convert generator to list and stack the dataframes if applicable
dat = list(dat)
if len(dat) > 1:
dat = stack_chunks(dat)
else:
dat = dat[0]
return dat
|
{"hexsha": "04ec3b9c4ca8f326f69119022c4926baa41a241c", "size": 7735, "ext": "py", "lang": "Python", "max_stars_repo_path": "jwpy/sas_fwf.py", "max_stars_repo_name": "jake-westfall/jwpy", "max_stars_repo_head_hexsha": "12b1616339eb4a6e27dfb670154df5dfb1b309ba", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "jwpy/sas_fwf.py", "max_issues_repo_name": "jake-westfall/jwpy", "max_issues_repo_head_hexsha": "12b1616339eb4a6e27dfb670154df5dfb1b309ba", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 18, "max_issues_repo_issues_event_min_datetime": "2017-10-08T06:48:44.000Z", "max_issues_repo_issues_event_max_datetime": "2018-06-25T20:36:06.000Z", "max_forks_repo_path": "jwpy/sas_fwf.py", "max_forks_repo_name": "jake-westfall/jwpy", "max_forks_repo_head_hexsha": "12b1616339eb4a6e27dfb670154df5dfb1b309ba", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.0777202073, "max_line_length": 110, "alphanum_fraction": 0.6343891403, "include": true, "reason": "import numpy", "num_tokens": 1955}
|
\section{Introduction}
\textbf{\textit{UniSup}} is an instantaneous chat application that allows users to exchange short text messages among them.
\subsection{Description}
\textit{UniSup} name is composed by \textit{Uni} that stands for University, which is the main application scope; and \textit{Sup} which is a popular slang abbreviation that stands for \textit{"What’s up?"}.
Every time a \textbf{user} logs in correctly (an authentication check is performed), he/she will be able to see his/her chat history.
After a click on a specific chat, he/she can visualize the list of the last messages exchanged with that particular contact. Filling the text field and clicking on the \textbf{SEND} button will send a message to the selected contact.
At any time, he/she can start a new conversation with a new contact: it only requires a click on the corresponding button, typing destination username and the text Payload and click on the \textbf{SEND} button.
When a user logs into the system, he/she will receive every message sent to him/her while he/she was offline. On the contrary, while he/she is online, he/she receives messages on \textbf{REAL TIME} and the interface is automatically updated reporting the new message. Of course, messages within a chat are always displayed in chronological send order, and they are forwarded according to a \textbf{FIFO} policy.
At the application start, the user will visualize an authentication form: he/she can login with an existing account or register a new one, of course no duplicated usernames are allowed.
From the application Scene, by clicking on the \textbf{LOGOUT} button, the user logs out the system and goes back to the authentication form. The user can now login again, even with a different account.
\medskip \\
|
{"hexsha": "d69f7126fa74cb57201195d4fb43d5919f19e2a4", "size": 1782, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Documentation/Latex/chapters/introduction.tex", "max_stars_repo_name": "edofazza/uniSup", "max_stars_repo_head_hexsha": "95d87d6bfca85b1f9e123aa75ae97f2a39c81eb8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Documentation/Latex/chapters/introduction.tex", "max_issues_repo_name": "edofazza/uniSup", "max_issues_repo_head_hexsha": "95d87d6bfca85b1f9e123aa75ae97f2a39c81eb8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Documentation/Latex/chapters/introduction.tex", "max_forks_repo_name": "edofazza/uniSup", "max_forks_repo_head_hexsha": "95d87d6bfca85b1f9e123aa75ae97f2a39c81eb8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 104.8235294118, "max_line_length": 412, "alphanum_fraction": 0.790684624, "num_tokens": 392}
|
[STATEMENT]
lemma (in abelian_subgroup) a_inv_FactGroup:
"X \<in> carrier (G A_Mod H) \<Longrightarrow> inv\<^bsub>G A_Mod H\<^esub> X = a_set_inv X"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. X \<in> carrier (G A_Mod H) \<Longrightarrow> inv\<^bsub>G A_Mod H\<^esub> X = a_set_inv X
[PROOF STEP]
by (rule normal.inv_FactGroup [OF a_normal,
folded A_FactGroup_def A_SET_INV_def, simplified monoid_record_simps])
|
{"llama_tokens": 177, "file": null, "length": 1}
|
function [x,y] = combinatorialDependentRows(A)
[m,n]=size(A);
% A is a binarized [F R] matrix (each entry of A is 0 or 1)
% if this program is feasible, then there exist 2 combinatorially dependent subsets of rows of A
%cvx_solver gurobi
cvx_solver mosek
cvx_begin sdp quiet
variable x(m) binary;
variable y(m) binary;
% x,y are nonempty and disjoint subsets of rows of A
x+y<=1;
sum(x)>=1;
sum(y)>=1;
% trick to test for same support (idea: m > largest possible sum)
m*x'*A>=y'*A;
m*y'*A>=x'*A;
cvx_end
disp(find(x))
disp(find(y))
end
|
{"author": "opencobra", "repo": "cobratoolbox", "sha": "e60274d127f65d518535fd0814d20c53dc530f73", "save_path": "github-repos/MATLAB/opencobra-cobratoolbox", "path": "github-repos/MATLAB/opencobra-cobratoolbox/cobratoolbox-e60274d127f65d518535fd0814d20c53dc530f73/src/analysis/topology/FR/combinatorialDependentRows.m"}
|
import hashlib
import os
import time
import random
import subprocess as sub
import numpy as np
def assemble_population(population,genome):
population.append(genome[:])
return population
def new_population(population,fitness,pop_size,cpop):
for i in range(pop_size):
if i<2:
new_genome=[]
new_genome=cpop[i]
new_genome=new_genome[0]
print('this is my new genome!')
print(new_genome)
elif i<7:
new_genome=[]
which_genome=np.random.randint(0,3)
selected_genome=cpop[which_genome]
selected_genome=selected_genome[0]
for t in range(15):
my_num=float(np.random.randint(-10,10))
my_location=(np.random.randint(0,29))
selected_genome[my_location]=my_num/10
new_genome=selected_genome
print('this is my new genome!')
print(new_genome)
elif i>7:
new_genome=[]
for t in range(30):
my_num=float(np.random.randint(-10,10))
my_num=my_num/10
new_genome.append(my_num)
print('this is my new genome!')
print(new_genome)
population=assemble_population(population,new_genome)
return population
|
{"hexsha": "ff18e471d41b33bcd1b736c2d6b5f49330d25762", "size": 1422, "ext": "py", "lang": "Python", "max_stars_repo_path": "evosoro/_voxcad_land_water/new_population.py", "max_stars_repo_name": "kattwalker/evosoro", "max_stars_repo_head_hexsha": "75915d1d794ef6ef2ae78a6e9be00dfd2a0b3283", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "evosoro/_voxcad_land_water/new_population.py", "max_issues_repo_name": "kattwalker/evosoro", "max_issues_repo_head_hexsha": "75915d1d794ef6ef2ae78a6e9be00dfd2a0b3283", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "evosoro/_voxcad_land_water/new_population.py", "max_forks_repo_name": "kattwalker/evosoro", "max_forks_repo_head_hexsha": "75915d1d794ef6ef2ae78a6e9be00dfd2a0b3283", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.8545454545, "max_line_length": 67, "alphanum_fraction": 0.5478199719, "include": true, "reason": "import numpy", "num_tokens": 305}
|
import argparse
from meter import AverageMeter
import os
import torch
import numpy as np
from utils import *
from model import *
from vocab import Vocab
from batchify import get_batches2, get_batches, get_batches3
import time
from train import evaluate
import matplotlib.pyplot as plt
import itertools
import sys
print(torch.__version__)
parser = argparse.ArgumentParser()
parser.add_argument('--settings_file', metavar='FILE', required=True,
help='name of settings json file')
##########################################################################
checkpoint_dir = "checkpoints/yelp/daae3/"
parallel_data_dir = ""
print_outputs_flag = False
torch.autograd.set_detect_anomaly(True)
# parameters
set_seed(1111)
# pres_fn = "present.txt"
# past_fn = "past.txt"
# walk_file = "walk_test.pt"
# init_mode = "rand"
# num_epochs = 100
##########################################################################
vocab_file = os.path.join(checkpoint_dir, 'vocab.txt')
#if not os.path.isfile(vocab_file):
# Vocab.build(train_sents, vocab_file, args.vocab_size)
vocab = Vocab(vocab_file)
cuda = torch.cuda.is_available()
device = torch.device('cuda' if cuda else 'cpu')
# hyper parameters
alpha = 1
dim_emb = 128
batch_size = 256
def get_model(path):
ckpt = torch.load(path)
train_args = ckpt['args']
model = {'dae': DAE, 'vae': VAE, 'aae': AAE}['aae'](vocab, train_args).to(device)
model.load_state_dict(ckpt['model'])
model.flatten()
model.eval()
return model, train_args
def load_model(checkpoint_dir, verbose = False):
model, train_args = get_model(checkpoint_dir + "model.pt")
if verbose:
print("MODEL")
print(model)
model.train()
return model, train_args
def load_data(pres_fn, past_fn):
present_file = parallel_data_dir + pres_fn
past_file = parallel_data_dir + past_fn
present_data = load_sent(present_file)
past_data = load_sent(past_file)
n_sents = len(present_data)
print("present, past data length:", len(present_data), len(past_data))
data_batches, _ = get_batches2(present_data, past_data, vocab, batch_size, device)
B = len(data_batches)
print("number of batches:", B)
return data_batches
def initialize(init_mode):
if init_mode == "rand":
w = torch.randn(dim_emb, requires_grad=True, device=device)
elif init_mode == "zero":
w = torch.zeros(dim_emb, requires_grad=True, device=device)
elif init_mode == "arithmetic":
w = torch.load("walk_files/arithmetic_v2.pt")
w = w.to(device)
w.requires_grad = True
return w
def compute_loss(w, x, x_edit, model, lambda_adv=1.):
mu, logvar = model.encode(x)
z = reparameterize(mu, logvar)
new_latent = z + alpha * w
logits, hidden = model.decode(new_latent, x)
loss = model.loss_rec(logits, x_edit).mean()
loss_d, adv = model.loss_adv(new_latent)
loss += lambda_adv * adv
return loss
def average_loss(w, data_batches, model, lambda_adv = 1., verbose = False):
meter = AverageMeter()
model.eval()
with torch.no_grad():
total_loss = 0
B = len(data_batches)
nsents = 0
for idx in range(len(data_batches)):
x, x_edit = data_batches[idx]
mu, logvar = model.encode(x)
z = reparameterize(mu, logvar)
new_latent = z + alpha * w
logits, hidden = model.decode(new_latent, x)
loss = model.loss_rec(logits, x_edit).mean()
loss_d, adv = model.loss_adv(new_latent)
loss += lambda_adv * adv
if verbose:
losses = model.autoenc(x, x_edit)
print("autoenc", idx, ":", losses['rec'], " | shapes", x.shape, x_edit.shape)
print("my loss", idx, ":", loss)
# print("x", x.shape, "| x_edit", x_edit.shape)
if print_outputs_flag:
sents = []
edited_sents = []
walk_sents = []
batch_len = x.shape[1]
max_len = 35
dec = 'greedy'
outputs = model.generate(new_latent, max_len, dec).t()
for i in range(batch_len):
x_i = x[:,i]
sents.append([vocab.idx2word[id] for id in x_i])
xe_i = x_edit[:,i]
edited_sents.append([vocab.idx2word[id] for id in xe_i])
output_i = outputs[i]
walk_sents.append([vocab.idx2word[id] for id in output_i])
walk_sents = strip_eos(walk_sents)
for i in range(batch_len):
x_i = torch.unsqueeze(x[:,i], dim=1)
xe_i = torch.unsqueeze(x_edit[:,i], dim=1)
loss_i = compute_loss(w, x_i, xe_i, model)
print("batch", idx, ":", loss, "| sentence", i, ":", loss_i)
print("--SENT:", sents[i])
print(x[:,i])
print("--EDIT:", edited_sents[i])
print(x_edit[:,i])
print("--WALK:", walk_sents[i])
print(outputs[i])
total_loss += loss * x.shape[1]
nsents += x.shape[1]
#breakpoint()
meter.update(loss.item(), x.shape[1])
avg_loss = total_loss/nsents
if verbose:
print("avg_loss meter loss vs avg_loss", meter.avg, avg_loss)
#print("average loss", avg_loss)
#print("=" * 60)
return avg_loss
def plot_series(series_list, walk_file, results_dir):
for series in series_list:
plt.plot(series[1])
plt.xlabel("epoch")
plt.ylabel("loss")
plt.show()
#print("saved figure", results_dir+walk_file[:-2], "png")
# plt.savefig(results_dir+walk_file[:-2]+"png")
def train_walk(walk_file, w, data_batches, valid_batches, model,
num_epochs, results_dir, joint_flag = True,
lambda_adv = 1., verbose = False):
# for param in model.parameters():
# param.requires_grad = False # freeze the model
print("START TRAINING:", walk_file)
if joint_flag:
joint_walk = itertools.chain([w], model.parameters())
opt = optim.Adam(joint_walk, lr=0.0005)
else:
opt = optim.Adam([w], lr=0.01)
start_time = time.perf_counter()
meter = AverageMeter()
loss_hist_before = []
loss_hist_during = []
for e in range(num_epochs):
avg_loss_before = average_loss(w, data_batches, model, verbose)
model.train()
total_loss = 0
nsents = 0
meter.clear()
indices = list(range(len(data_batches)))
random.shuffle(indices)
for i, idx in enumerate(indices):
# opt.zero_grad()
x, x_edit = data_batches[idx]
mu, logvar = model.encode(x) # encode the input x
z = reparameterize(mu, logvar)
new_latent = z + alpha * w # add w to compute new latent
logits, hidden = model.decode(new_latent, x) # decode the new latent
# compute the loss wrt to the edit
loss_rec = model.loss_rec(logits, x_edit).mean() # cross entropy loss
if joint_flag:
# loss_d, loss_g = model.loss_adv(new_latent) # adversarial loss
loss_d, loss_g = model.loss_adv(z)
losses = {'rec': loss_rec, 'loss_d': loss_d, 'loss_g': loss_g}
loss = losses['rec'] + lambda_adv * losses['loss_g']
opt.zero_grad()
model.optD.zero_grad()
loss.backward()
losses['loss_d'].backward()
opt.step()
model.optD.step()
# print("LOSS", idx, ":", loss.item(), "| loss_d:", loss_d.item())
else:
opt.zero_grad()
loss = loss_rec
loss.backward()
opt.step()
# print("LOSS", idx, ":", loss)
total_loss += loss * x.shape[1]
nsents += x.shape[1]
meter.update(loss, x.shape[1])
print("---------------------------")
avg_loss_after = average_loss(w, data_batches, model)
print("FINISHED EPOCH", e)
print("avg loss before:", avg_loss_before)
print("avg train loss: ", total_loss/nsents)
# print("meter loss", meter.avg)
loss_hist_before.append((e, avg_loss_before.item()))
loss_hist_during.append((e, meter.avg.item()))
if verbose:
print("loss", loss)
print("nsents", nsents)
val_loss = average_loss(w, valid_batches, model, False)
print("avg valid loss: ", val_loss)
epoch_time = time.perf_counter()
print("time: ", epoch_time - start_time)
print("=" * 60)
print("FINISHED TRAINING")
best_before_loss = min(loss_hist_before, key = lambda x : x[1])
best_during_loss = min(loss_hist_during, key = lambda x : x[1])
print("best_before_loss:", best_before_loss, loss_hist_during[best_before_loss[0]])
print("best_during_loss:", best_during_loss, loss_hist_before[best_during_loss[0]])
# plot_series([loss_hist_before, loss_hist_during], walk_file, results_dir)
print(w)
### save walk and model
print(results_dir)
print(walk_file)
save_file = os.path.join(results_dir, walk_file)
print("SAVE FILE", save_file)
torch.save(w, os.path.join(results_dir, walk_file))
return w, model
#print(total_loss)
def evaluation(pres_fn, past_fn, w, model, verbose):
batches = load_data(pres_fn, past_fn)
return average_loss(w, batches, model, verbose)
import json
def main(args):
f = open(args.settings_file)
settings = json.load(f)
print(settings)
orig_fn = settings["orig_file"]
edit_fn = settings["edit_file"]
walk_file = settings["walk_file"]
results_dir = settings["results_dir"]
if results_dir == False:
sys.exit("Results dir not found!!")
checkpoint_dir = settings["checkpoint_dir"]
init_mode = settings["init_mode"]
num_epochs = int(settings["num_epochs"])
joint_flag = (settings["joint_flag"] == "True")
eval_only = (settings["eval"] == "True")
verbose = (settings["verbose"] == "True")
val_pres_fn = "parallel_data/test_present.txt"
val_past_fn = "parallel_data/test_past.txt"
model, train_args = load_model(checkpoint_dir)
train_batches = load_data(orig_fn, edit_fn)
valid_batches = load_data(val_pres_fn, val_past_fn)
print("hello")
print(results_dir)
print(walk_file)
print("0" * 50)
if verbose:
print("~" * 50)
meters = evaluate(model, train_batches)
print(' '.join(['{} {:.2f},'.format(k, meter.avg)
for k, meter in meters.items()]))
print("~" * 50)
# evaluate only
if eval_only:
w_final = torch.load(results_dir + walk_file, device)
print("EVALUATION")
print("train loss")
average_loss(w_final, train_batches, model, verbose)
print("-" * 40)
print("valid loss")
average_loss(w_final, valid_batches, model, verbose)
print("-" * 40)
# train and evaluate
else:
# print out settings
print("walk_file: \t", walk_file)
print("data files: \t", orig_fn, ",", edit_fn)
print("init mode: \t", init_mode)
print("num epochs: \t", num_epochs)
print("-" * 60)
# initial
w = initialize(init_mode)
print("INITIAL LOSS")
init_loss = average_loss(w, train_batches, model, verbose)
print("initial avg loss", init_loss)
print("-" * 40)
# training
w_final, model_final = train_walk(walk_file, w, train_batches, valid_batches,
model, num_epochs, results_dir, joint_flag, verbose)
ckpt = {'args': train_args, 'model': model_final.state_dict()}
torch.save(ckpt, os.path.join(results_dir, 'joint_model.pt'))
# evaluation
print("EVALUATION")
print("evaluating on training data")
valid_pres_file = orig_fn # "test_present.txt" usually
valid_past_file = edit_fn
valid_batches = load_data(valid_pres_file, valid_past_file)
final_loss = average_loss(w_final, valid_batches, model, verbose)
print(final_loss)
print("\n LOSS DETAILS")
#else: # train to max valid loss
if __name__ == '__main__':
args = parser.parse_args()
main(args)
|
{"hexsha": "5337cbcc0a6257e989d0cd25cc536ca993b533a3", "size": 12648, "ext": "py", "lang": "Python", "max_stars_repo_path": "steerability_joint.py", "max_stars_repo_name": "kttian/text-autoencoders", "max_stars_repo_head_hexsha": "76107444c25bdec62aae7fc5ff29b89fdb8691a4", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "steerability_joint.py", "max_issues_repo_name": "kttian/text-autoencoders", "max_issues_repo_head_hexsha": "76107444c25bdec62aae7fc5ff29b89fdb8691a4", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "steerability_joint.py", "max_forks_repo_name": "kttian/text-autoencoders", "max_forks_repo_head_hexsha": "76107444c25bdec62aae7fc5ff29b89fdb8691a4", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.8429752066, "max_line_length": 94, "alphanum_fraction": 0.5837286528, "include": true, "reason": "import numpy", "num_tokens": 3001}
|
from baconian.core.core import Env
from baconian.common.spaces import Box
from baconian.envs.gym_env import GymEnv
import numpy as np
class Wrapper(Env):
def __init__(self, env: Env):
if isinstance(env, GymEnv):
self.env = env.unwrapped_gym
self.src_env = env
else:
self.env = env
self.src_env = env
super().__init__(name=env.name + '_wrapper', copy_from_env=env)
def __getattr__(self, item):
if hasattr(self.src_env, item):
return getattr(self.src_env, item)
if hasattr(self.env, item):
return getattr(self.env, item)
raise AttributeError()
def seed(self, seed=None):
return self.src_env.seed(seed)
@property
def unwrapped(self):
return self.env.unwrapped
@property
def spec(self):
return self.env.spec
@classmethod
def class_name(cls):
return cls.__name__
def __str__(self):
return '<{}{}>'.format(type(self).__name__, self.env)
def __repr__(self):
return str(self)
def reset(self):
return self.src_env.reset()
def get_state(self):
return self.src_env.get_state()
class ObservationWrapper(Wrapper):
def reset(self):
observation = self.src_env.reset()
return self._observation(observation)
def step(self, action):
observation, reward, done, info = self.src_env.step(action)
return self.observation(observation), reward, done, info
def observation(self, observation):
return self._observation(observation)
def _observation(self, observation):
raise NotImplementedError
def get_state(self):
return self._observation(self.src_env.get_state())
class RewardWrapper(Wrapper):
def step(self, action):
observation, reward, done, info = self.src_env.step(action)
return observation, self.reward(observation, action, reward, done, info), done, info
def reward(self, observation, action, reward, done, info):
return self._reward(observation, action, reward, done, info)
def _reward(self, observation, action, reward, done, info):
raise NotImplementedError
class ActionWrapper(Wrapper):
def step(self, action):
action = self.action(action)
return self.src_env.step(action)
def action(self, action):
return self._action(action)
def _action(self, action):
raise NotImplementedError
def reverse_action(self, action):
return self._reverse_action(action)
def _reverse_action(self, action):
raise NotImplementedError
class StepObservationWrapper(ObservationWrapper):
def __init__(self, env: Env, step_limit=100000):
super().__init__(env=env)
assert isinstance(self.src_env.observation_space, Box), 'not support non Box space for step observation wrapper'
self.src_env.observation_space = Box(low=np.concatenate([self.src_env.observation_space.low, np.array([0])]),
high=np.concatenate(
[self.src_env.observation_space.high, np.array([step_limit])]))
self.src_env.env_spec.obs_space = self.src_env.observation_space
self.observation_space = self.src_env.observation_space
def _observation(self, observation):
obs = np.array(observation)
return np.concatenate([obs, np.array([self.src_env.trajectory_level_step_count])])
|
{"hexsha": "58205704e71a4b55eb578f3c1503f9f080d533fd", "size": 3515, "ext": "py", "lang": "Python", "max_stars_repo_path": "baconian/envs/env_wrapper.py", "max_stars_repo_name": "yitongx/baconian-public", "max_stars_repo_head_hexsha": "a67e23c6bc6bfe7019ec9532a3d18f06aed6bbbb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 69, "max_stars_repo_stars_event_min_datetime": "2020-01-31T17:44:43.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T13:09:11.000Z", "max_issues_repo_path": "baconian/envs/env_wrapper.py", "max_issues_repo_name": "yitongx/baconian-project", "max_issues_repo_head_hexsha": "e84508da60877e387344133a11039edaac35c5bf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2019-04-28T07:24:26.000Z", "max_issues_repo_issues_event_max_datetime": "2020-01-29T01:49:51.000Z", "max_forks_repo_path": "baconian/envs/env_wrapper.py", "max_forks_repo_name": "yitongx/baconian-project", "max_forks_repo_head_hexsha": "e84508da60877e387344133a11039edaac35c5bf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2019-05-04T02:18:11.000Z", "max_forks_repo_forks_event_max_datetime": "2019-12-04T22:05:52.000Z", "avg_line_length": 30.5652173913, "max_line_length": 120, "alphanum_fraction": 0.653769559, "include": true, "reason": "import numpy", "num_tokens": 744}
|
#redirect UC Davis Classrooms
|
{"hexsha": "e19a508d2817c8076713bffc6759f1c2098bdb78", "size": 30, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/Classrooms.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/Classrooms.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/Classrooms.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 15.0, "max_line_length": 29, "alphanum_fraction": 0.8333333333, "num_tokens": 7}
|
SubWorker{F,T,A,S} = RemoteChannel{Channel{Vector{SubProblem{F,T,A,S}}}}
ScenarioProblemChannel{S} = RemoteChannel{Channel{StochasticPrograms.ScenarioProblems{S}}}
Work = RemoteChannel{Channel{Int}}
function load_subproblems!(subworkers::Vector{SubWorker{F,T,A,S}},
scenarioproblems::AbstractScenarioProblems,
x::AbstractVector,
subsolver::SubSolver) where {F <: AbstractFeasibility,
T <: AbstractFloat,
A <: AbstractVector,
S <: LQSolver}
# Create subproblems on worker processes
@sync begin
for w in workers()
subworkers[w-1] = RemoteChannel(() -> Channel{Vector{SubProblem{F,T,A,S}}}(1), w)
@async load_worker!(scenarioproblems, w, subworkers[w-1], x, subsolver)
end
end
end
function load_worker!(sp::ScenarioProblems,
w::Integer,
worker::SubWorker,
x::AbstractVector,
subsolver::SubSolver)
n = StochasticPrograms.nscenarios(sp)
(nscen, extra) = divrem(n, nworkers())
prev = [nscen + (extra + 2 - p > 0) for p in 2:(w-1)]
start = isempty(prev) ? 1 : sum(prev) + 1
stop = min(start + nscen + (extra + 2 - w > 0) - 1, n)
prev = [begin
jobsize = nscen + (extra + 2 - p > 0)
ceil(Int, jobsize)
end for p in 2:(w-1)]
start_id = isempty(prev) ? 0 : sum(prev)
πs = [probability(sp.scenarios[i]) for i = start:stop]
return remotecall_fetch(init_subworker!,
w,
worker,
sp.parent,
sp.problems[start:stop],
πs,
x,
subsolver,
start_id)
end
function load_worker!(sp::DScenarioProblems,
w::Integer,
worker::SubWorker,
x::AbstractVector,
subsolver::SubSolver)
prev = [sp.scenario_distribution[p-1] for p in 2:(w-1)]
start_id = isempty(prev) ? 0 : sum(prev)
return remotecall_fetch(init_subworker!,
w,
worker,
sp[w-1],
x,
subsolver,
start_id)
end
function init_subworker!(subworker::SubWorker{F,T,A,S},
parent::JuMP.Model,
submodels::Vector{JuMP.Model},
πs::A,
x::A,
subsolver::SubSolver,
start_id::Integer) where {F, T <: AbstractFloat, A <: AbstractArray, S <: LQSolver}
subproblems = Vector{SubProblem{F,T,A,S}}(undef, length(submodels))
for (i,submodel) = enumerate(submodels)
y₀ = convert(A, rand(submodel.numCols))
subproblems[i] = SubProblem(submodel, parent, start_id + i, πs[i], x, y₀, get_solver(subsolver), F)
end
put!(subworker, subproblems)
return nothing
end
function init_subworker!(subworker::SubWorker{F,T,A,S},
scenarioproblems::ScenarioProblemChannel,
x::A,
subsolver::SubSolver,
start_id::Integer) where {F, T <: AbstractFloat, A <: AbstractArray, S <: LQSolver}
sp = fetch(scenarioproblems)
subproblems = Vector{SubProblem{F,T,A,S}}(undef, StochasticPrograms.nsubproblems(sp))
for (i,submodel) = enumerate(sp.problems)
y₀ = convert(A, rand(sp.problems[i].numCols))
subproblems[i] = SubProblem(submodel, sp.parent, start_id + i, probability(sp.scenarios[i]), x, y₀, get_solver(subsolver), F)
end
put!(subworker, subproblems)
return nothing
end
function resolve_subproblems!(subworker::SubWorker{F,T,A,S}, x::AbstractVector, cutqueue::CutQueue{T}, aggregator::AbstractAggregator, t::Integer, metadata::MetaData) where {F <: AbstractFeasibility, T <: AbstractFloat, A <: AbstractArray, S <: LQSolver}
# Fetch all subproblems stored in worker
subproblems::Vector{SubProblem{F,T,A,S}} = fetch(subworker)
if isempty(subproblems)
# Workers has nothing do to, return.
return nothing
end
# Aggregation policy
aggregation::AbstractAggregation = aggregator(length(subproblems), T)
# Solve subproblems
for subproblem ∈ subproblems
update_subproblem!(subproblem, x)
cut = subproblem()
aggregate_cut!(cutqueue, aggregation, metadata, t, cut, x)
end
flush!(cutqueue, aggregation, metadata, t, x)
return nothing
end
function work_on_subproblems!(subworker::SubWorker{F,T,A,S},
work::Work,
finalize::Work,
cutqueue::CutQueue{T},
decisions::Decisions{A},
metadata::MetaData,
aggregator::AbstractAggregator) where {F, T <: AbstractFloat, A <: AbstractArray, S <: LQSolver}
subproblems::Vector{SubProblem{F,T,A,S}} = fetch(subworker)
if isempty(subproblems)
# Workers has nothing do to, return.
return nothing
end
aggregation::AbstractAggregation = aggregator(length(subproblems), T)
quit = false
while true
t::Int = try
if isready(finalize)
quit = true
take!(finalize)
else
wait(work)
take!(work)
end
catch err
if err isa InvalidStateException
# Master closed the work/finalize channel. Worker finished
return nothing
end
end
t == -1 && continue
x::A = fetch(decisions,t)
for subproblem in subproblems
update_subproblem!(subproblem, x)
cut = subproblem()
!quit && aggregate_cut!(cutqueue, aggregation, metadata, t, cut, x)
end
!quit && flush!(cutqueue, aggregation, metadata, t, x)
if quit
# Worker finished
return nothing
end
end
end
function eval_second_stage(subworkers::Vector{<:SubWorker}, x::AbstractVector)
partial_objectives = Vector{Float64}(undef, nworkers())
@sync begin
for (i,w) in enumerate(workers())
@async partial_objectives[i] = remotecall_fetch(calculate_subobjective, w, subworkers[w-1], x)
end
end
return sum(partial_objectives)
end
function calculate_subobjective(subworker::SubWorker{F,T,A,S}, x::A) where {F, T <: AbstractFloat, A <: AbstractArray, S <: LQSolver}
subproblems::Vector{SubProblem{F,T,A,S}} = fetch(subworker)
if length(subproblems) > 0
return sum([subproblem.π*subproblem(x) for subproblem in subproblems])
else
return zero(T)
end
end
function fill_submodels!(subworkers::Vector{<:SubWorker}, x::AbstractVector, scenarioproblems::ScenarioProblems)
j = 0
@sync begin
for w in workers()
n = remotecall_fetch((sw)->length(fetch(sw)), w, subworkers[w-1])
for i = 1:n
k = i+j
@async fill_submodel!(scenarioproblems.problems[k],remotecall_fetch((sw,i,x)->begin
sp = fetch(sw)[i]
sp(x)
get_solution(sp)
end,
w,
subworkers[w-1],
i,
x)...)
end
j += n
end
end
return nothing
end
function fill_submodels!(subworkers::Vector{<:SubWorker}, x::AbstractVector, scenarioproblems::DScenarioProblems)
@sync begin
for w in workers()
@async remotecall_fetch(fill_submodels!,
w,
subworkers[w-1],
x,
scenarioproblems[w-1])
end
end
return nothing
end
function fill_submodels!(subworker::SubWorker{F,T,A,S},
x::A,
scenarioproblems::ScenarioProblemChannel) where {F <: AbstractFeasibility, T <: AbstractFloat, A <: AbstractArray, S <: LQSolver}
sp = fetch(scenarioproblems)
subproblems::Vector{SubProblem{F,T,A,S}} = fetch(subworker)
for (i, submodel) in enumerate(sp.problems)
subproblems[i](x)
fill_submodel!(submodel, subproblems[i])
end
return nothing
end
|
{"hexsha": "5cf1edfaed975d2f5a2c20009ed57fa497560bff", "size": 8754, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/solvers/structured/lshaped/execution/distributed.jl", "max_stars_repo_name": "JuliaTagBot/StochasticPrograms.jl", "max_stars_repo_head_hexsha": "43ab69b654368f219a13808b4359035d89bf95a4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/solvers/structured/lshaped/execution/distributed.jl", "max_issues_repo_name": "JuliaTagBot/StochasticPrograms.jl", "max_issues_repo_head_hexsha": "43ab69b654368f219a13808b4359035d89bf95a4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/solvers/structured/lshaped/execution/distributed.jl", "max_forks_repo_name": "JuliaTagBot/StochasticPrograms.jl", "max_forks_repo_head_hexsha": "43ab69b654368f219a13808b4359035d89bf95a4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.0803571429, "max_line_length": 254, "alphanum_fraction": 0.5380397533, "num_tokens": 2033}
|
#-------------------------------------------------------------------------------------------------------------------
# Packages & Settings
#-------------------------------------------------------------------------------------------------------------------
# General packages
import time
import sys
import os
import datetime
from glob import glob
# Math and data structure packages
import numpy as np
from scipy import stats
import math
import random
# Plots, Fits, etc.
import matplotlib
import matplotlib.pyplot as plt
# Writing Output
import pickle
text_folder = '/home/rettenls/data/texts/wiki/'
eval_folder = '/home/rettenls/data/evaluation/analogy/'
exp_folder = '/home/rettenls/data/experiments/wiki/'
dist_folder = '/home/rettenls/data/experiments/wiki/analysis/distribution/'
coordination_file = exp_folder + 'coordination/coordinate.txt'
date_format = '%Y-%m-%d_%H:%M:%S'
#-------------------------------------------------------------------------------------------------------------------
# Loading own Modules
#-------------------------------------------------------------------------------------------------------------------
sys.path.append('/home/rettenls/code')
from lib.model import Model
from lib.trafo import Transformation
from lib.eval import print_nn_word, get_nn_list, get_cosine_similarity, get_pip_norm, get_word_relatedness, get_common_vocab
from lib.score import evaluate_analogy
from lib.operations import align, avg, join, align_list
from lib.util import get_filename
from scipy.stats import spearmanr
#-------------------------------------------------------------------------------------------------------------------
# Experiments
#-------------------------------------------------------------------------------------------------------------------
languages = ['fi', 'hi', 'cs', 'zh', 'pt', 'pl', 'en']
models = ['word2vec']
model_type = 'skipgram'
data_type = 'shuffle'
target_word_num = int(1.e3)
total_pair_num = 10
for model in models:
for language in languages:
if model == 'glove':
folder = exp_folder + language + '/' + model + '/' + data_type
else:
folder = exp_folder + language + '/' + model + '/' + model_type + '/' + data_type
# Get Common Vocab
directory_list = list()
for i in range(total_pair_num * 2):
directory_list.append(folder + '/run_{:04d}'.format(i))
common_vocab = get_common_vocab(directory_list)
# Get Target Words
target_words = np.array(list(common_vocab))
np.random.shuffle(target_words)
target_words = target_words[:target_word_num]
# Proxy Sizes
proxy_sizes = [int(1.e3), int(1.e4), int(1.e5), len(common_vocab)]
# Variable for Final Results
spearman_technique = np.zeros((len(proxy_sizes), len(proxy_sizes)))
spearman_all = np.zeros((total_pair_num, len(proxy_sizes), len(proxy_sizes)))
# Load Models
for model_pair_index in range(total_pair_num):
m0 = Model(model)
m0.load(folder + '/run_{:04d}'.format(model_pair_index * 2))
m1 = Model(model)
m1.load(folder + '/run_{:04d}'.format(model_pair_index * 2 + 1))
m0,m1,joint = align(m0,m1)
target_word_indices = [m0.indices[word] for word in target_words]
pip_loss = np.zeros((2, len(proxy_sizes),target_word_num))
for pip_loss_calc_run in range(2):
for proxy_size_index in range(len(proxy_sizes)):
# Get Proxy Size
proxy_size = proxy_sizes[proxy_size_index]
#print('Proxy Size:', proxy_size)
# Randomly Draw Proxy Indices
proxy_indices = np.arange(len(joint))
np.random.shuffle(proxy_indices)
proxy_indices = proxy_indices[:proxy_size]
# Calculate wwrPIP Loss
eval_step_size = max(int(1.e8) // proxy_size, 1)
eval_steps = target_word_num // eval_step_size
for eval_step in range(eval_steps + 1):
# Get Eval Indices
lower = eval_step_size * eval_step
upper = min(target_word_num, lower + eval_step_size)
eval_step_indices = target_word_indices[lower:upper]
# Evaluate Expression
expression = (np.matmul(m0.embeddings[eval_step_indices], m0.embeddings[proxy_indices].T) - \
np.matmul(m1.embeddings[eval_step_indices], m1.embeddings[proxy_indices].T)) / 2
pip_loss_values = np.sqrt(np.sum(np.square(expression), axis = 1)) / math.sqrt(len(proxy_indices))
pip_loss[pip_loss_calc_run, proxy_size_index, lower:upper] = pip_loss_values
spearman = np.zeros((len(proxy_sizes), len(proxy_sizes)))
for proxy_size_index_1 in range(len(proxy_sizes)):
for proxy_size_index_2 in range(len(proxy_sizes)):
spearman[proxy_size_index_1, proxy_size_index_2] = spearmanr(pip_loss[0,proxy_size_index_1],pip_loss[1,proxy_size_index_2])[0]
print(spearman)
spearman_all[model_pair_index] = spearman
spearman_avg = np.sum(spearman_all, axis = 0)
print(model, language)
print(spearman_avg)
print()
spearman_technique += spearman_avg
spearman_technique /= (total_pair_num)
print('\n--FINAL--')
print(model)
print(spearman_technique)
print('--FINAL--\n')
|
{"hexsha": "1729be861814dd0f8cafa841138ffb7190c51a53", "size": 5035, "ext": "py", "lang": "Python", "max_stars_repo_path": "thesis/3 - Global Distance Metrics/1 - [Table 2.8] Influence of Proxy Size on PIP Loss.py", "max_stars_repo_name": "lucasrettenmeier/word-embedding-stability", "max_stars_repo_head_hexsha": "d7a93201a6b2fd85cbf52681227829323edb9ef4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-10-06T20:33:20.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-06T20:33:20.000Z", "max_issues_repo_path": "thesis/3 - Global Distance Metrics/1 - [Table 2.8] Influence of Proxy Size on PIP Loss.py", "max_issues_repo_name": "lucasrettenmeier/word-embedding-stability", "max_issues_repo_head_hexsha": "d7a93201a6b2fd85cbf52681227829323edb9ef4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "thesis/3 - Global Distance Metrics/1 - [Table 2.8] Influence of Proxy Size on PIP Loss.py", "max_forks_repo_name": "lucasrettenmeier/word-embedding-stability", "max_forks_repo_head_hexsha": "d7a93201a6b2fd85cbf52681227829323edb9ef4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.125, "max_line_length": 131, "alphanum_fraction": 0.6186693148, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1207}
|
"""
`ClimbingImage(∇V, τ, ∇V_climb,integrate!, dist, Δt)` - Set up the Climbing Image Method
### Fields
* `∇V` - In place gradient of the potential
* `τ` - Approximate tangent vector of the unstable direction
* `∇V_climb `- In place gradient of the potential with reflector
* `integrate!` - In place integrator
* `Δt` - Time step
"""
struct ClimbingImage{TGV, TT, TGVC, TI, TF<:AbstractFloat} <: ClimbingImageMethod
∇V::TGV
τ::TT
∇V_climb::TGVC
integrate!::TI
Δt::TF
end
function ClimbingImage(∇V::TGV, τ::TT, integrate!::TI, Δt::TF) where{TGV, TT, TI, TF}
function ∇V_climb(x)
gradV = ∇V(x);
gradV .-= 2 * (gradV⋅τ) * τ;
return gradV
end
return ClimbingImage(∇V, τ, ∇V_climb, integrate!, Δt);
end
"""
`climbing_image`: Run the climbing image method...
### Fields
* `u` - Initial guess for saddle
* `C` - Climbing image data structure
### Optional Fields
"""
function climbing_image(u₀, C::TC; options=SaddleOptions()) where {TC <: ClimbingImage}
u = copy(u₀);
gradV = copy(u);
u_trajectory = typeof(u)[copy(u)];
err_est = 0.;
for n in 1:options.nmax
C.integrate!(u, C.∇V_climb, C.Δt);
gradV = C.∇V(u);
err_est =norm(gradV);
if(options.verbose && mod(n, options.print_iters)==0)
@printf("[%d]: error = %g\n", n, err_est);
end
if(options.save_trajectory)
push!(u_trajectory, copy(u));
end
if (err_est< options.tol)
if(!options.save_trajectory)
push!(u_trajectory, copy(u));
end
break
end
end
if(err_est >= options.tol)
@printf("ERROR: Did not converge after %d iterartions", options.nmax)
end
return u_trajectory
end
"""
`climbing_image!`: Run the climbing image method...
### Fields
* `u` - Initial guess for saddle
* `C` - Climbing image data structure
### Optional Fields
"""
function climbing_image!(u, C::TC; options=SaddleOptions()) where {TC <: ClimbingImage}
gradV = copy(u);
err_est = 0.;
for n in 1:options.nmax
C.integrate!(u, C.∇V_climb, C.Δt);
gradV = C.∇V(u);
err_est =norm(gradV);
if(options.verbose && mod(n, options.print_iters)==0)
@printf("[%d]: error = %g\n", n, err_est);
end
if (err_est< options.tol)
break
end
end
if(err_est >= options.tol)
@printf("ERROR: Did not converge after %d iterartions", options.nmax)
end
u
end
|
{"hexsha": "8dc8e86d8426f5c84f3868a9dab744d73c516d0d", "size": 2535, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/saddle.jl", "max_stars_repo_name": "gideonsimpson/StringMethod.jl", "max_stars_repo_head_hexsha": "158e06c4ce72f2e5afcbbba5b4ae493311d09611", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/saddle.jl", "max_issues_repo_name": "gideonsimpson/StringMethod.jl", "max_issues_repo_head_hexsha": "158e06c4ce72f2e5afcbbba5b4ae493311d09611", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/saddle.jl", "max_forks_repo_name": "gideonsimpson/StringMethod.jl", "max_forks_repo_head_hexsha": "158e06c4ce72f2e5afcbbba5b4ae493311d09611", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.8673469388, "max_line_length": 88, "alphanum_fraction": 0.5854043393, "num_tokens": 779}
|
#=
https://www.allendowney.com/blog/2018/10/21/the-game-of-ur-problem/
"""
Here’s a probability puzzle to ruin your week.
In the Royal Game of Ur, players advance tokens along a track with 14 spaces.
To determine how many spaces to advance, a player rolls 4 dice with 4 sides. Two corners
on each die are marked; the other two are not. The total number of marked corners —
which is 0, 1, 2, 3, or 4 — is the number of spaces to advance.
For example, if the total on your first roll is 2, you could advance a token to space 2.
If you roll a 3 on the next roll, you could advance the same token to space 5.
Suppose you have a token on space 13. How many rolls did it take to get there?
"""
See:
https://www.allendowney.com/blog/lions-and-tigers-and-bears/
Allen Downey's solution:
http://nbviewer.jupyter.org/github/AllenDowney/ThinkBayes2/blob/master/solutions/game_of_ur_soln.ipynb?flush=true
cf ~/blog/game_of_ur_problem.blog
~/webppl/game_of_ur_problem.wppl
=#
using Turing, StatsPlots, DataFrames
include("jl_utils.jl")
@model function game_of_ur_problem(s=13)
numRolls ~ DiscreteUniform(3,20)
zroll = tzeros(numRolls)
for i in 1:numRolls
zroll[i] ~ DiscreteUniform(4)
end
sumRoll ~ DiscreteUniform(0,4*numRolls)
sumRoll ~ Dirac(sum(zroll))
true ~ Dirac(sumRoll == s)
end
model = game_of_ur_problem(13)
num_chns = 4
# chns = sample(model, Prior(), MCMCThreads(), 10_000, num_chns)
# Got weird errors for MH()
# - BoundsError: attempt to access 8-element Array{Int64,1} at index [2:20]
#
# chns = sample(model, MH(), 10_000)
# chns = sample(model, MH(), MCMCThreads(), 40_000, num_chns)
# chns = sample(model, PG(15), MCMCThreads(), 10_000, num_chns)
# chns = sample(model, PG(20), 1_000)
# chns = sample(model, SMC(1000), MCMCThreads(), 10_000, num_chns)
# chns = sample(model, SMC(1000), 10_000)
# chns = sample(model, IS(), MCMCThreads(), 10_000, num_chns)
chns = sample(model, IS(), 10_000)
# chns = sample(model, NUTS(1000,0.65), 1_000)
# chns = sample(model, HMC(0.1,5), 1_000)
# chns = sample(model, Gibbs(MH(:gender),NUTS(1000,0.65,:height)), 1_000)
# chns = sample(model, Gibbs(MH(:gender),NUTS(10,0.65,:height)), 1_000)
# chns = sample(model, Gibbs(MH(:gender),HMC(0.1,5,:height)), 1_000)
# chns = sample(model, Gibbs(PG(10,:gender),HMC(0.1,5,:height)), 1_000)
# chns = sample(model, Gibbs(MH(:gender),NUTS(1_000,0.65,:height)), 1_000)
display(chns)
# display(plot(chns))
show_var_dist_pct(chns, :numRolls,20)
println()
show_var_dist_pct(chns, :sumRoll,20)
|
{"hexsha": "48f137f0459e519c6d3d6d7641b3b4fdae44aecb", "size": 2586, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "julia/turing/game_of_ur_problem.jl", "max_stars_repo_name": "tias/hakank", "max_stars_repo_head_hexsha": "87b7f180c9393afce440864eb9e5fb119bdec1a4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 279, "max_stars_repo_stars_event_min_datetime": "2015-01-10T09:55:35.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T02:34:03.000Z", "max_issues_repo_path": "julia/turing/game_of_ur_problem.jl", "max_issues_repo_name": "tias/hakank", "max_issues_repo_head_hexsha": "87b7f180c9393afce440864eb9e5fb119bdec1a4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2017-10-05T15:48:50.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-20T12:06:52.000Z", "max_forks_repo_path": "julia/turing/game_of_ur_problem.jl", "max_forks_repo_name": "tias/hakank", "max_forks_repo_head_hexsha": "87b7f180c9393afce440864eb9e5fb119bdec1a4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 83, "max_forks_repo_forks_event_min_datetime": "2015-01-20T03:44:00.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-13T23:53:06.000Z", "avg_line_length": 31.156626506, "max_line_length": 116, "alphanum_fraction": 0.6937354988, "num_tokens": 879}
|
#!/usr/bin/env python3
# ------------------------------------------------------------------------ 79->
# Author: ${name=Kelcey Damage}
# Python: 3.5+
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Doc
# ------------------------------------------------------------------------ 79->
#
# {
# 'column': 'columnA',
# 'method': 'quicksort',
# 'axis': 0
# }
#
# Imports
# ------------------------------------------------------------------------ 79->
import numpy as np
from rtl.common.task import Task
# Globals
# ------------------------------------------------------------------------ 79->
# Classes
# ------------------------------------------------------------------------ 79->
class Sort(Task):
def __init__(self, kwargs, content):
super(Sort, self).__init__(kwargs, content)
def sort(self):
self.ndata.sort(
axis=self.axis,
kind=self.method,
order=self.column.decode()
)
return self
# Functions
# ------------------------------------------------------------------------ 79->
def sort(kwargs, contents):
return Sort(kwargs, contents).sort().getContents()
# Main
# ------------------------------------------------------------------------ 79->
|
{"hexsha": "be8df101aa8bf888bb82a233b84b65ac16f85bb4", "size": 1741, "ext": "py", "lang": "Python", "max_stars_repo_path": "rtl/tasks/sort.py", "max_stars_repo_name": "kelceydamage/raspi-tasks", "max_stars_repo_head_hexsha": "18aa323e3e2428c998b7472c226d05a00c8ae8c2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-08-10T00:27:45.000Z", "max_stars_repo_stars_event_max_datetime": "2019-08-10T00:27:45.000Z", "max_issues_repo_path": "rtl/tasks/sort.py", "max_issues_repo_name": "kelceydamage/raspi-tasks", "max_issues_repo_head_hexsha": "18aa323e3e2428c998b7472c226d05a00c8ae8c2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rtl/tasks/sort.py", "max_forks_repo_name": "kelceydamage/raspi-tasks", "max_forks_repo_head_hexsha": "18aa323e3e2428c998b7472c226d05a00c8ae8c2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.0172413793, "max_line_length": 79, "alphanum_fraction": 0.4618035612, "include": true, "reason": "import numpy", "num_tokens": 332}
|
import numpy as np
import warnings
from scipy.io import loadmat
from LFSpy import LocalFeatureSelection
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.pipeline import Pipeline
from sklearn import datasets
import matplotlib.pyplot as plt
np.random.seed(905)
def load_dataset(name, m=0):
'''
Loads a test/demo dataset.
'''
print('Loading dataset ' + name + '...')
if name is 'sample':
mat = loadmat('matlab_Data')
training_data = mat['Train'].T
training_labels = mat['TrainLables'][0]
testing_data = mat['Test'].T
testing_labels = mat['TestLables'][0]
elif name is 'iris':
# we only take the first two classes for binary classification
train_idx = np.arange(0, 100, 2)
test_idx = np.arange(1, 100, 2)
iris = datasets.load_iris()
if m > 0:
iris.data = add_noise_vars(iris.data, m)
training_data = iris.data[train_idx,:]
training_labels = iris.target[train_idx]
testing_data = iris.data[test_idx,:]
testing_labels = iris.target[test_idx]
return training_data, training_labels, testing_data, testing_labels
def add_noise_vars(x, m, std_range=[0,3]):
'''
Adds m Gaussian noise variables to data array x. Gaussian distribution have
zero mean with standard deviations sampled from a uniform distribution on
std_range.
'''
n = x.shape[0]
stds = np.random.uniform(low=std_range[0], high=std_range[1], size=m)
noise_vars = [np.random.normal(loc=0.0, scale=s, size=[n,]) for s in stds]
return np.hstack((x, np.stack(noise_vars).T))
def results_lfspy(x_train, y_train, x_test, y_test):
'''
Trains an tests and LFS model using default parameters on the given dataset.
'''
print('Training and testing an LFS model with default parameters.\nThis may take a few minutes...')
lfs = LocalFeatureSelection(rr_seed=777)
pipeline = Pipeline([('classifier', lfs)])
pipeline.fit(x_train, y_train)
y_pred = pipeline.predict(x_test)
score = pipeline.score(x_test, y_test)
return score, y_pred
def results_rforest(x_train, y_train, x_test, y_test):
print('Training and testing a Random Forest with default parameters.')
rfc = RandomForestClassifier(random_state=777)
pipeline = Pipeline([('classifier', rfc)])
pipeline.fit(x_train, y_train)
y_pred = pipeline.predict(x_test)
score = pipeline.score(x_test, y_test)
return score, y_pred
def results_fsvm(x_train, y_train, x_test, y_test):
print('Training and testing a SVM with default parameters with F-stat feature selection (25% of features selected).')
svm = LinearSVC(random_state=777)
sel = SelectKBest(f_classif, k=int(0.25*x_train.shape[1]))
pipeline = Pipeline([('feat_sel', sel), ('classifier', svm)])
pipeline.fit(x_train, y_train)
y_pred = pipeline.predict(x_test)
score = pipeline.score(x_test, y_test)
return score, y_pred
training_data, training_labels, testing_data, testing_labels = load_dataset('sample')
score_lfs, y_pred_lfs = results_lfspy(training_data, training_labels, testing_data, testing_labels)
score_rfc, y_pred_rfc = results_rforest(training_data, training_labels, testing_data, testing_labels)
score_svm, y_pred_svm = results_fsvm(training_data, training_labels, testing_data, testing_labels)
training_data, training_labels, testing_data, testing_labels = load_dataset('iris')
score_lfs_iris, y_pred_lfs_iris = results_lfspy(training_data, training_labels, testing_data, testing_labels)
score_rfc_iris, y_pred_rfc_iris = results_rforest(training_data, training_labels, testing_data, testing_labels)
score_svm_iris, y_pred_svm_iris = results_fsvm(training_data, training_labels, testing_data, testing_labels)
# Plot the comparison of results
scores = [score_lfs, score_rfc, score_svm]
scores_iris = [score_lfs_iris, score_rfc_iris, score_svm_iris]
def plotScores(scores, title=None):
'''
Plot classification scores.
'''
plt.figure()
plt.bar(['LFS','RFC','SVM'], scores)
plt.ylim([0,1])
for i, v in enumerate(scores):
plt.text(i - 0.1, 0.4, '{:.{}f}'.format(v,2), size=12)
plt.title(title, fontsize=14)
plt.savefig(title+'.png', bbox_inches='tight', pad_inches=0.1, dpi=300)
return None
plotScores(scores, 'Sample Data Classification Accuracies')
plotScores(scores_iris, 'Iris Data Classification Accuracies')
# %% Compare across number of noise variables on Iris dataset
#Score_LFS = []
Score_SVM = []
Score_RFC = []
mlist = np.arange(0, 1001, 25)
for m in mlist:
training_data, training_labels, testing_data, testing_labels = load_dataset('iris', m=m)
# s1, _ = results_lfspy(training_data, training_labels, testing_data, testing_labels)
s2, _ = results_rforest(training_data, training_labels, testing_data, testing_labels)
s3, _ = results_fsvm(training_data, training_labels, testing_data, testing_labels)
# Score_LFS.append(s1)
Score_RFC.append(s2)
Score_SVM.append(s3)
# Plot the results
plt.figure()
plt.plot(mlist, Score_LFS)
plt.plot(mlist, Score_RFC)
plt.plot(mlist, Score_SVM)
plt.vlines(100, 0, 1.2, linestyles='dashed')
plt.ylim([0,1.2])
#plt.xlabel('Number of Noise Features')
plt.title('Classification Accuracy by Number of Added Noise Variables', fontsize=14)
plt.legend(['LFS','Random Forest','SVM'], loc='lower right')
plt.savefig('IrisData_noise_Results.png', bbox_inches='tight', pad_inches=0.1, dpi=300)
|
{"hexsha": "0aaff3e0fe0f1231047b686e10d8d50b36d945b4", "size": 5648, "ext": "py", "lang": "Python", "max_stars_repo_path": "LFSpy/comparisons/comparisons.py", "max_stars_repo_name": "sauln/LFSpy", "max_stars_repo_head_hexsha": "91eed1577603e619a160a62d209d6c45543438f8", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-03-27T22:29:40.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-24T23:57:32.000Z", "max_issues_repo_path": "LFSpy/comparisons/comparisons.py", "max_issues_repo_name": "sauln/LFSpy", "max_issues_repo_head_hexsha": "91eed1577603e619a160a62d209d6c45543438f8", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "LFSpy/comparisons/comparisons.py", "max_forks_repo_name": "sauln/LFSpy", "max_forks_repo_head_hexsha": "91eed1577603e619a160a62d209d6c45543438f8", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.4039735099, "max_line_length": 121, "alphanum_fraction": 0.7144121813, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1441}
|
import os, math, multiprocessing
from os.path import join
from copy import copy
import numpy as np
from PIL import Image
import visual_words
def get_feature_from_wordmap(opts, wordmap):
'''
Compute histogram of visual words.
[input]
* opts : options
* wordmap : numpy.ndarray of shape (H,W)
[output]
* hist: numpy.ndarray of shape (K)
'''
K = opts.K
# ----- TODO -----
height = wordmap.shape[0]
width = wordmap.shape[1]
hist_array = wordmap.reshape(height*width,)
hist,patches = np.histogram(hist_array, bins = K)
hist = hist/sum(hist)
return hist
def get_feature_from_wordmap_SPM(opts, wordmap):
'''
Compute histogram of visual words using spatial pyramid matching.
[input]
* opts : options
* wordmap : numpy.ndarray of shape (H,W)
[output]
* hist_all: list of length (K*(4^L-1)/3)
'''
K = opts.K
L = opts.L
height = wordmap.shape[0]
width = wordmap.shape[1]
hist_SPM = []
for i in range(L):
num_row = num_col = pow(2,i)
len_row = height//num_row
len_col = width//num_col
if(i == 0 or i == 1):
weight = 2**(-i)
else:
weight = 2**(i-L-1)
for r in range(num_row):
for c in range(num_col):
wordmap_sub = wordmap[len_row*r:len_row*(r+1),len_col*c:len_col*(c+1)]
hist_sub = get_feature_from_wordmap(opts, wordmap_sub)*weight
hist_sub_list = list(hist_sub)
hist_SPM = hist_SPM + hist_sub_list
hist_all = hist_SPM/sum(hist_SPM)
return hist_all
def get_image_feature(opts, img_path, dictionary):
'''
Extracts the spatial pyramid matching feature.
[input]
* opts : options
* img_path : path of image file to read
* dictionary: numpy.ndarray of shape (K, 3F)
[output]
* feature: list of length K
'''
img = Image.open(img_path)
img = np.array(img).astype(np.float32)/255
wordmap = visual_words.get_visual_words(opts, img, dictionary)
features = get_feature_from_wordmap_SPM(opts, wordmap)
return features
def build_recognition_system(opts, n_worker=1):
'''
Creates a trained recognition system by generating training features from all training images.
[input]
* opts : options
* n_worker : number of workers to process in parallel
[saved]
* features: numpy.ndarray of shape (N,M)
* labels: numpy.ndarray of shape (N)
* dictionary: numpy.ndarray of shape (K,3F)
* SPM_layer_num: number of spatial pyramid layers
'''
data_dir = opts.data_dir
out_dir = opts.out_dir
SPM_layer_num = opts.L
train_files = open(join(data_dir, 'train_files.txt')).read().splitlines()
train_labels = np.loadtxt(join(data_dir, 'train_labels.txt'), np.int32)
dictionary = np.load(join(out_dir, 'dictionary.npy'))
num_pic = len(train_files)
features_all = []
print("Start computing feature pyramids for all images")
for i in range(num_pic):
img_path = join(opts.data_dir, train_files[i])
features = get_image_feature(opts, img_path, dictionary)
features_all.append(features)
progress = (i/num_pic) * 100
if(i % 10 == 0):
print("progress is: %.2f" % progress, "%.")
np.savez_compressed(join(out_dir, 'trained_system.npz'),
features=features_all,
labels=train_labels,
dictionary=dictionary,
SPM_layer_num=SPM_layer_num)
def distance_to_set(word_hist, histograms):
'''
Compute similarity between a histogram of visual words with all training image histograms.
[input]
* word_hist: numpy.ndarray of shape (K)
* histograms: numpy.ndarray of shape (N,K)
[output]
* sim: numpy.ndarray of shape (N)
'''
intersection = np.minimum(histograms, word_hist)
similarity = np.sum(intersection, axis = 1)
label_index = np.argmax(similarity)
return label_index
def evaluate_recognition_system(opts, n_worker=1):
'''
Evaluates the recognition system for all test images and returns the confusion matrix.
[input]
* opts : options
* n_worker : number of workers to process in parallel
[output]
* conf: numpy.ndarray of shape (8,8)
* accuracy: accuracy of the evaluated system
'''
data_dir = opts.data_dir
out_dir = opts.out_dir
trained_system = np.load(join(out_dir, 'trained_system.npz'))
dictionary = trained_system['dictionary']
# using the stored options in the trained system instead of opts.py
test_opts = copy(opts)
test_opts.K = dictionary.shape[0]
test_opts.L = trained_system['SPM_layer_num']
test_files = open(join(data_dir, 'test_files.txt')).read().splitlines()
test_labels = np.loadtxt(join(data_dir, 'test_labels.txt'), np.int32)
histograms = trained_system['features']
train_labels = trained_system['labels']
num_pic = len(test_files)
conf = np.zeros((8,8))
accuracy = 0
print("Start evaluating")
for i in range(num_pic):
img_path = join(opts.data_dir, test_files[i])
word_hist = get_image_feature(opts, img_path, dictionary)
label_index = distance_to_set(word_hist, histograms)
label_predict = train_labels[label_index]
label_true = test_labels[i]
progress = (i/num_pic) * 100
conf[label_true, label_predict] += 1
accuracy = np.trace(conf)/np.sum(conf)
print("progress is: ", progress, "%. ", "Accuracy is:", int(accuracy*100), "%")
return conf, accuracy
|
{"hexsha": "0f1e781e263a502af07d058bc61567c304d1e9d7", "size": 5878, "ext": "py", "lang": "Python", "max_stars_repo_path": "visual_recog.py", "max_stars_repo_name": "zhenweil/Bag-of-words-for-Scene-Classification", "max_stars_repo_head_hexsha": "7db72cb89c2f07edcb41ce2caa69b8fac0e54ab1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-12-24T05:53:46.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-24T05:53:46.000Z", "max_issues_repo_path": "visual_recog.py", "max_issues_repo_name": "zhenweil/Bag-of-words-for-Scene-Classification", "max_issues_repo_head_hexsha": "7db72cb89c2f07edcb41ce2caa69b8fac0e54ab1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "visual_recog.py", "max_forks_repo_name": "zhenweil/Bag-of-words-for-Scene-Classification", "max_forks_repo_head_hexsha": "7db72cb89c2f07edcb41ce2caa69b8fac0e54ab1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.772972973, "max_line_length": 99, "alphanum_fraction": 0.6153453556, "include": true, "reason": "import numpy", "num_tokens": 1409}
|
# Chemfiles.jl, a modern library for chemistry file reading and writing
# Copyright (C) Guillaume Fraux and contributors -- BSD license
export add_atom!, remove_atom!
export has_velocities, add_velocities!, positions, velocities
export set_cell!, set_topology!, set_step!, guess_bonds!
export distance, dihedral, out_of_plane
__ptr(frame::Frame) = __ptr(frame.__handle)
__const_ptr(frame::Frame) = __const_ptr(frame.__handle)
# A small wrapper around Array{Float64, 2}, keeping a reference to the
# corresponding frame to prevent garbage collection (see issue
# https://github.com/chemfiles/Chemfiles.jl/issues/37)
struct ChemfilesArray <: AbstractArray{Float64,2}
data::Array{Float64,2}
parent::Frame
end
# Implement the Array interface for ChemfilesArray
@inline Base.size(A::ChemfilesArray) = size(A.data)
@inline Base.getindex(A::ChemfilesArray, I::Int) = getindex(A.data, I)
@inline Base.getindex(A::ChemfilesArray, I::Int...) = getindex(A.data, I...)
@inline Base.setindex!(A::ChemfilesArray, v, I::Int) = setindex!(A.data, v, I)
@inline Base.setindex!(A::ChemfilesArray, v, I::Int...) = setindex!(A.data, v, I...)
"""
Create a new empty `Frame`.
"""
function Frame()
ptr = @__check_ptr(lib.chfl_frame())
return Frame(CxxPointer(ptr, is_const=false))
end
"""
Get the number of atoms in the `frame`.
"""
function Base.size(frame::Frame)
count = Ref{UInt64}(0)
__check(lib.chfl_frame_atoms_count(__const_ptr(frame), count))
return Int(count[])
end
"""
Get the number of atoms in the `frame`.
"""
function Base.length(frame::Frame)
size(frame)
end
"""
Resize the positions and the velocities in the `frame`, to make space for
`natoms` atoms. This function may invalidate any pointer to the positions or
the velocities if the new size is bigger than the old one. In all the cases,
previous data is conserved. This function conserve the presence or absence of
velocities.
"""
function Base.resize!(frame::Frame, natoms::Integer)
__check(lib.chfl_frame_resize(__ptr(frame), UInt64(natoms)))
end
"""
Get the positions in a `Frame` as an array. The positions are readable and
writable from this array. If the frame is resized (by writing to it, or calling
`resize!`), the array is invalidated.
"""
function positions(frame::Frame)
ptr = Ref{Ptr{Float64}}()
natoms = Ref{UInt64}(0)
__check(lib.chfl_frame_positions(__ptr(frame), ptr, natoms))
data = unsafe_wrap(Array{Float64,2}, ptr[], (3, Int(natoms[])); own=false)
return ChemfilesArray(data, frame)
end
"""
Get the velocities in a `Frame` as an array. The velocities are readable and
writable from this array. If the frame is resized (by writing to it, or calling
`resize!`), the array is invalidated.
If the frame do not have velocity, this function will error. You can use
`add_velocities!` to add velocities to a frame before calling this function.
"""
function velocities(frame::Frame)
ptr = Ref{Ptr{Float64}}()
natoms = Ref{UInt64}(0)
__check(lib.chfl_frame_velocities(__ptr(frame), ptr, natoms))
data = unsafe_wrap(Array{Float64,2}, ptr[], (3, Int(natoms[])); own=false)
return ChemfilesArray(data, frame)
end
"""
Add velocities to this `frame`. The storage is initialized with the result of
`size(frame)` as the number of atoms. If the frame already has velocities, this
does nothing.
"""
function add_velocities!(frame::Frame)
__check(lib.chfl_frame_add_velocities(__ptr(frame)))
return nothing
end
"""
Check if a `frame` contains velocity data or not.
"""
function has_velocities(frame::Frame)
result = Ref{UInt8}(0)
__check(lib.chfl_frame_has_velocities(__const_ptr(frame), result))
return convert(Bool, result[])
end
"""
Set the `cell` associated with a `frame`.
"""
function set_cell!(frame::Frame, cell::UnitCell)
__check(lib.chfl_frame_set_cell(__ptr(frame), __const_ptr(cell)))
return nothing
end
"""
Set the `topology` associated with a `frame`.
"""
function set_topology!(frame::Frame, topology::Topology)
__check(lib.chfl_frame_set_topology(__ptr(frame), __const_ptr(topology)))
return nothing
end
"""
Get the `frame` step, *i.e.* the frame number in the trajectory.
"""
function Base.step(frame::Frame)
result = Ref{UInt64}(0)
__check(lib.chfl_frame_step(__const_ptr(frame), result))
return result[]
end
"""
Set the `frame` step to `step`.
"""
function set_step!(frame::Frame, step::Integer)
__check(lib.chfl_frame_set_step(__ptr(frame), UInt64(step)))
return nothing
end
"""
Guess the bonds, angles, and dihedrals in the `frame` using a distance criteria.
"""
function guess_bonds!(frame::Frame)
__check(lib.chfl_frame_guess_bonds(__ptr(frame)))
return nothing
end
"""
Calculate the distance between two atoms.
"""
function distance(frame::Frame, i::Integer, j::Integer)
result = Ref{Float64}(0)
__check(lib.chfl_frame_distance(__const_ptr(frame), UInt64(i), UInt64(j), result))
return result[]
end
"""
Calculate the angle made by three atoms.
"""
function Base.angle(frame::Frame, i::Integer, j::Integer, k::Integer)
result = Ref{Float64}(0)
__check(lib.chfl_frame_angle(__const_ptr(frame), UInt64(i), UInt64(j), UInt64(k), result))
return result[]
end
"""
Calculate the dihedral (torsional) angle made by four unbranched atoms.
"""
function dihedral(frame::Frame, i::Integer, j::Integer, k::Integer, m::Integer)
result = Ref{Float64}(0)
__check(lib.chfl_frame_dihedral(__const_ptr(frame), UInt64(i), UInt64(j), UInt64(k), UInt64(m), result))
return result[]
end
"""
Calculate the out-of-plane (improper) angle made by four atoms.
"""
function out_of_plane(frame::Frame, i::Integer, j::Integer, k::Integer, m::Integer)
result = Ref{Float64}(0)
__check(lib.chfl_frame_out_of_plane(__const_ptr(frame), UInt64(i), UInt64(j), UInt64(k), UInt64(m), result))
return result[]
end
"""
Add an `atom` and the corresponding `position` and `velocity` data to a `frame`.
"""
function add_atom!(frame::Frame, atom::Atom, position::Vector{Float64}, velocity::Vector{Float64}=Float64[0.0,0.0,0.0])
__check(lib.chfl_frame_add_atom(__ptr(frame), __const_ptr(atom), position, velocity))
return nothing
end
"""
Remove the `atom` at `index` from the `frame`.
This function modifies all the `atoms` indexes after `index`, and invalidates
any array obtained using `positions` or `velocities`.
"""
function remove_atom!(frame::Frame, index::Integer)
__check(lib.chfl_frame_remove(__ptr(frame), UInt64(index)))
return nothing
end
"""
Set a named property for the given `Frame`.
"""
function set_property!(frame::Frame, name::String, value)
property = Property(value)
__check(lib.chfl_frame_set_property(
__ptr(frame), pointer(name), __const_ptr(property)
))
return nothing
end
"""
Get a named property for the given atom.
"""
function property(frame::Frame, name::String)::PropertyValue
ptr = lib.chfl_frame_get_property(__const_ptr(frame), pointer(name))
return extract(Property(CxxPointer(ptr, is_const=false)))
end
"""
Get the number of properties associated with a frame.
"""
function properties_count(frame::Frame)
count = Ref{UInt64}(0)
__check(lib.chfl_frame_properties_count(__const_ptr(frame), count))
return Int(count[])
end
"""
Get the names of all properties associated with a frame.
"""
function list_properties(frame::Frame)
count = UInt64(properties_count(frame))
names = Array{Ptr{UInt8}}(undef, count)
__check(lib.chfl_frame_list_properties(__const_ptr(frame), pointer(names), count))
return map(unsafe_string, names)
end
"""
Add an additional bond to the `Frame`'s `Topology`.
"""
function add_bond!(frame::Frame, i::Integer, j::Integer, order=nothing)
if order === nothing
__check(lib.chfl_frame_add_bond(__ptr(frame), UInt64(i), UInt64(j)))
else
# Check that the order is a valid BondOrder
order = BondOrder(Integer(order))
__check(lib.chfl_frame_bond_with_order(
__ptr(frame), UInt64(i), UInt64(j), lib.chfl_bond_order(order)
))
end
return nothing
end
"""
Remove a bond from the `Frame`'s `Topology`.
"""
function remove_bond!(frame::Frame, i::Integer, j::Integer)
__check(lib.chfl_frame_remove_bond(__ptr(frame), UInt64(i), UInt64(j)))
return nothing
end
"""
Remove all bonds, angles and dihedral angles from the `Frame`'s `Topology`.
"""
function clear_bonds!(frame::Frame)
__check(lib.chfl_frame_clear_bonds(__ptr(frame)))
return nothing
end
"""
Add a residue to the `Frame`'s `Topology`.
"""
function add_residue!(frame::Frame, residue::Residue)
__check(lib.chfl_frame_add_residue(__ptr(frame), __const_ptr(residue)))
return nothing
end
"""
Make a deep copy of a `Frame`.
"""
function Base.deepcopy(frame::Frame)
ptr = lib.chfl_frame_copy(__const_ptr(frame))
return Frame(CxxPointer(ptr, is_const=false))
end
# Indexing support
"""
Get the `Atom` at the given `index` of the `frame`. By default this creates a
copy so as to be safe. To not create a copy, use `@view frame[index]`.
See also [`Base.view(frame::Frame, index::Integer)`](@ref)
"""
Base.getindex(frame::Frame, index::Integer) = Atom(frame, index)
"""
Get the `Atom` at the given `index` of the `frame` without creating a copy.
!!! warning
This function can lead to unefined behavior when keeping the returned `Atom`
around. Whith code like this:
```
frame = Frame()
resize!(frame, 3)
atom = @view frame[0]
resize!(frame, 4)
```
`atom` contains a pointer to memory owned by `frame`, but this
pointer has been invalidated when resizing the frame. Using `atom` after
the second call to `resize!` might trigger undefined behavior (segmentation
fault in the best case, silent data corruption in the worst case).
"""
function Base.view(frame::Frame, index::Integer)
ptr = @__check_ptr(lib.chfl_atom_from_frame(__ptr(frame), UInt64(index)))
atom = Atom(CxxPointer(ptr, is_const=false))
return atom
end
# Iteration support
function Base.iterate(frame::Frame, atom=0)
if atom >= size(frame)
return nothing
else
return (frame[atom], atom + 1)
end
end
Base.eltype(::Frame) = Atom
|
{"hexsha": "a5f032b034b16004ace3e9c77316ce123d5e7976", "size": 10201, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Frame.jl", "max_stars_repo_name": "Ruibin-Liu/Chemfiles.jl", "max_stars_repo_head_hexsha": "2c271b4906e6db5c453d344df527dc69775ca488", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-02T14:31:11.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-02T14:31:11.000Z", "max_issues_repo_path": "src/Frame.jl", "max_issues_repo_name": "Ruibin-Liu/Chemfiles.jl", "max_issues_repo_head_hexsha": "2c271b4906e6db5c453d344df527dc69775ca488", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Frame.jl", "max_forks_repo_name": "Ruibin-Liu/Chemfiles.jl", "max_forks_repo_head_hexsha": "2c271b4906e6db5c453d344df527dc69775ca488", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.0029411765, "max_line_length": 119, "alphanum_fraction": 0.7132634055, "num_tokens": 2630}
|
program test_netcdf_layer
use kinds
use netcdf
use nc_diag_write_mod
implicit none
integer :: i
real(r_single) :: f
real(r_double) :: d
character(len=100) :: str_header
character(len=100) :: str_chaninfo
character(len=100) :: str_metadata
character(len=100) :: str_data2d
character(len=10) :: str_chaninfo_fixed
character(len=10) :: str_metadata_fixed
character(len=10) :: str_data2d_fixed(3)
character(len=11) :: str_chaninfo_fixed_bad
character(len=11) :: str_metadata_fixed_bad
character(len=11) :: str_data2d_fixed_bad(3)
f = 1.234
d = 2.34567890
! Enable info messages
call nc_set_action_display(.TRUE.)
call nc_set_info_display(.TRUE.)
call nc_diag_set_trim(.FALSE.)
!-----------------------------------------------------------------
! Fixed checks
!-----------------------------------------------------------------
call nc_diag_init("test_fixed.nc")
call nc_diag_chaninfo_dim_set(5)
str_chaninfo_fixed = "one"
str_metadata_fixed = "two"
str_data2d_fixed = (/ "three", "four", "five" /)
str_chaninfo_fixed_bad = "one"
str_metadata_fixed_bad = "two"
str_data2d_fixed_bad = (/ "three", "four", "five" /)
call nc_diag_set_trim(.FALSE.)
call nc_diag_chaninfo("chaninfo_strfix", str_chaninfo_fixed)
call nc_diag_chaninfo("chaninfo_strfix1", str_chaninfo_fixed)
str_chaninfo_fixed = "three"
call nc_diag_chaninfo("chaninfo_strfix", str_chaninfo_fixed)
call nc_diag_metadata("metadata_strfix", str_metadata_fixed)
call nc_diag_metadata("metadata_strfix1", str_metadata_fixed)
str_metadata_fixed = "four"
call nc_diag_metadata("metadata_strfix", str_metadata_fixed)
call nc_diag_data2d("data2d_strfix", str_data2d_fixed)
call nc_diag_data2d("data2d_strfix1", str_data2d_fixed)
str_data2d_fixed = (/ "six", "seven", "eight" /)
call nc_diag_data2d("data2d_strfix", str_data2d_fixed)
! This would cause errors, due to the change in un-trimmed string
! length! (Uncomment to trigger the error!)
!call nc_diag_chaninfo("chaninfo_strfix", str_chaninfo_fixed_bad)
!call nc_diag_metadata("metadata_strfix", str_metadata_fixed_bad)
!call nc_diag_data2d("data2d_strfix", str_data2d_fixed_bad)
call nc_diag_write
! Fluid mode
call nc_diag_set_trim(.TRUE.)
call nc_diag_init("test.nc")
! Test init checking + corresponding error:
!call nc_diag_init("test2.nc")
! Uncomment below line to enable strict mode, aka strict
! variable bounds checking. When strict mode is enabled, if any
! variables have different lengths from each other, or if there are
! any differences between the variable rows (e.g. an uneven array),
! an error will occur and the program will halt.
! call nc_diag_set_strict(.TRUE.)
print *, "===================="
print *, "Single:"
print *, "===================="
!call nc_diag_header("test1", (/ 123, 234, 345, 456, 567, 678, 789 /))
!call nc_diag_header("test2", (/ 123, 234, 345, 456, 567, 678, 789 /))
! 100,000,000
call nc_diag_chaninfo_dim_set(10)
do i = 1, 10
call nc_diag_chaninfo("chaninfosimple1", i)
call nc_diag_chaninfo("chaninfosimple2", i*2)
call nc_diag_chaninfo("chaninfosimple4_float", f + 1.00)
call nc_diag_chaninfo("chaninfosimple5_double", d + 1.00)
call nc_diag_metadata("metadatasimple1", i)
!call nc_diag_metadata("metadatasimple2", i*2)
!call nc_diag_metadata("metadatasimple4_float", f + 1.00 + i)
!call nc_diag_metadata("metadatasimple4_float2", f + 2.00 + i)
!call nc_diag_metadata("metadatasimple5_double", d + 1.00 + i)
call nc_diag_data2d("data2dsimple1", (/ i, i+1, i+2 /))
call nc_diag_data2d("data2dsimple2", (/ i*2, i*3, i*4 /))
call nc_diag_data2d("data2dsimple4_float", (/ f + 1.00 + i, f + 2.00 + i, f + 3.00 + i, f + 4.00 + i /))
call nc_diag_data2d("data2dsimple4_float2", (/ f + 2.00 + i, f + 4.00 + i /))
call nc_diag_data2d("data2dsimple5_double", (/ d + 1.00 + i /))
call nc_diag_data2d("data2dsimple99", (/ i /))
write(str_chaninfo, "(A, I0)") "ci6_", i
call nc_diag_chaninfo("chaninfosimple6_str", str_chaninfo)
write(str_metadata, "(A, I0)") "hellometa_", i
call nc_diag_metadata("metadatasimple6_str", str_metadata)
end do
do i = 1, 9
write(str_chaninfo, "(A, I0)") "ci_strings_", i
call nc_diag_chaninfo("chaninfosimple7_str", str_chaninfo)
end do
!print *, "str_chaninfo:"
!print *, str_chaninfo
do i = 1, 9
call nc_diag_chaninfo("chaninfosimple3_notcomplete", i*3)
end do
!do i = 1, 10000000
do i = 1, 10000!000
call nc_diag_header("headertestsimple", 123)
call nc_diag_header("headertestsimple2_float", f)
call nc_diag_header("headertestsimple3_double", d)
write(str_header, "(A, I0)") "header_", i
call nc_diag_header("headertestsimple4_str", str_header)
!call nc_diag_metadata("metadatasimple7_big", i*2)
end do
do i = 1, 10!0000
write(str_metadata, "(A, I0)") "morehellometa_", i
call nc_diag_metadata("metadatasimple8_str", str_metadata)
write(str_data2d, "(A, I0)") "data2d_", i
call nc_diag_data2d("data2dsimple6_str", (/ str_data2d, "fill1", "fill2" /))
! This is broken... but it's an interesting testcase, as it breaks
! a LOT of stuff!
! index_llong = i needs to be commented out
!call nc_diag_data2d("data2dsimple7", index_llong, (/ i, i+1, i+2 /))
call nc_diag_data2d("data2dsimple7", (/ i, i+1, i+2 /))
end do
! Add one entry... so we can test out valid/invalid data adding
! below!
call nc_diag_chaninfo("chaninfosimple8_str", "test1234")
! ...and another one, for fun with buffered writing!
call nc_diag_chaninfo("chaninfosimple9_buf", 3)
! Appending data variables
call nc_diag_chaninfo("chaninfosimple10_notcomplete", 5678)
call nc_diag_metadata("metadata_notcomplete", 1234)
call nc_diag_metadata("metadata_notcomplete", 2234)
call nc_diag_metadata("metadata_notcomplete", 3234)
call nc_diag_metadata("metadata_notcomplete", 4234)
call nc_diag_metadata("metadata_notcomplete", 5234)
call nc_diag_metadata("metadata_notcomplete", 6234)
call nc_diag_metadata("metadata_notcomplete", 7234)
call nc_diag_metadata("metadata_notcomplete", 8234)
call nc_diag_metadata("metadata_notcomplete", 9234)
call nc_diag_metadata("metadata_notcomplete", 1234)
call nc_diag_metadata("metadata_notcomplete", 2234)
call nc_diag_metadata("metadata_notcomplete", 3234)
call nc_diag_metadata("metadata_notcomplete", 4234)
call nc_diag_metadata("metadata_notcomplete", 5234)
call nc_diag_metadata("metadata_notcomplete", 6234)
call nc_diag_metadata("metadata_notcomplete", 7234)
call nc_diag_metadata("metadata_notcomplete", 8234)
call nc_diag_metadata("metadata_str_notcomplete", "abcd")
call nc_diag_metadata("metadata_str_notcomplete", "abc2")
call nc_diag_metadata("metadata_str_notcomplete", "abc3")
call nc_diag_metadata("metadata_str_notcomplete", "abc4")
call nc_diag_metadata("metadata_str_notcomplete", "abc5")
call nc_diag_metadata("metadata_str_notcomplete", "abc6")
call nc_diag_data2d("data2d_notcomplete", (/ 1, 2, 3 /))
! Invalid buffered write test - we can't do any buffered write
! until we lock definitions:
!call nc_diag_flush_buffer
! This, combined with nc_diag_set_strict(.TRUE.), will result in
! an error. We can also add 4 elements instead of 2 to achieve the
! same error. Note that after definition locking, adding more than 3
! elements (assuming we didn't add 4 elements here) won't work,
! since it violates the length check. (That means we won't even
! see the strict bounds checking error!)
call nc_diag_data2d("data2dsimple1", (/ 2000, 4000 /))
!------------------------------------------------------------------
! Variable attribute test! (With definition locking on the side!)
!------------------------------------------------------------------
! In order for variable attributes to work, we MUST call
! nc_diag_lock_def! This is due to the fact that we need the NetCDF
! variable IDs in order for attribute defining to work, and
! the variable IDs aren't created until the variables definitions
! have been created (and locked)!
call nc_diag_lock_def
! Now we can add variable attributes!
call nc_diag_varattr("data2dsimple7", "data2dsimple7_testattr1", "hi")
call nc_diag_varattr("data2dsimple7", "data2dsimple7_testattr2", (/ 1, 2, 3 /))
! We can still add more data, but now we must adhere to the maximum
! variable length (the array input length).
! This is fine:
call nc_diag_data2d("data2dsimple6_str", (/ "data2d_11", "fill1", "fill2" /))
call nc_diag_data2d("data2dsimple7", (/ -1, -2, -3 /))
call nc_diag_metadata("metadatasimple8_str", "morehellometa_11")
call nc_diag_chaninfo("chaninfosimple8_str", "test5678")
! This, however, is not. (Note that the array/string is longer than
! the others above.) (Uncomment the below lines to see what will
! happen!)
!call nc_diag_data2d("data2dsimple6_str", int8(12), (/ "data2d_122", "fill1", "fill2" /))
!call nc_diag_data2d("data2dsimple7", int8(12), (/ -4, -5, -6, -7 /))
!call nc_diag_metadata("metadatasimple8_str", "morehellometa_111")
!call nc_diag_chaninfo("chaninfosimple8_str", "test9101112")
!------------------------------------------------------------------
! Buffered writing test!
!------------------------------------------------------------------
! NOTE: For now, data2d does NOT have buffered writing enabled.
! This will be fixed in a future release.
call nc_diag_chaninfo("chaninfosimple9_buf", 6)
call nc_diag_chaninfo("chaninfosimple9_buf", 9)
call nc_diag_metadata("metadatasimple8_str", "morehellometa_b1")
call nc_diag_metadata("metadatasimple6_str", "meta_b1")
call nc_diag_metadata("metadatasimple1", 100)
call nc_diag_metadata("metadatasimple8_str", "morehellometa_b2")
call nc_diag_metadata("metadatasimple6_str", "meta_b2")
call nc_diag_data2d("data2dsimple1", (/ 1000, 2000, 3000 /))
call nc_diag_data2d("data2dsimple1", (/ 2000, 4000, 6000 /))
call nc_diag_data2d("data2dsimple2", (/ 1111, 2222, 3333 /))
call nc_diag_data2d("data2dsimple2", (/ 2222, 4444, 6666 /))
call nc_diag_data2d("data2dsimple6_str", (/ "mwahahaha", "arrrrrgh", "grrrrowwl" /))
call nc_diag_data2d("data2dsimple6_str", (/ "boink", "kabam", "peekaboo" /))
call nc_diag_data2d("data2dsimple7", (/ 20, 40, 60 /))
call nc_diag_data2d("data2dsimple7", (/ 40, 80, 120 /))
print *, "Attempting to flush buf 1:"
call nc_diag_flush_buffer
call nc_diag_chaninfo("chaninfosimple9_buf", 12)
call nc_diag_chaninfo("chaninfosimple9_buf", 15)
call nc_diag_chaninfo("chaninfosimple9_buf", 18)
call nc_diag_chaninfo("chaninfosimple9_buf", 21)
call nc_diag_metadata("metadatasimple8_str", "morehellometa_b3")
call nc_diag_metadata("metadatasimple8_str", "morehellometa_b4")
call nc_diag_metadata("metadatasimple6_str", "meta_b3")
call nc_diag_metadata("metadatasimple6_str", "meta_b4")
call nc_diag_metadata("metadatasimple1", 200)
! We can add something in the future!
call nc_diag_data2d("data2dsimple1", (/ -1000, -2000, -3000 /))
call nc_diag_data2d("data2dsimple6_str", (/ "aaaaaaaaa", "bbbbbbbb", "ccccccccc" /))
call nc_diag_data2d("data2dsimple7", (/ 4000, 8000, 12000 /))
print *, "Attempting to flush buf 2:"
call nc_diag_flush_buffer
call nc_diag_chaninfo("chaninfosimple9_buf", 24)
call nc_diag_chaninfo("chaninfosimple9_buf", 27)
call nc_diag_chaninfo("chaninfosimple9_buf", 30)
call nc_diag_metadata("metadatasimple1", 300)
call nc_diag_metadata("metadatasimple6_str", "meta_b5")
call nc_diag_metadata("metadatasimple6_str", "meta_b6")
call nc_diag_metadata("metadatasimple8_str", "morehellometa_b5")
call nc_diag_metadata("metadatasimple8_str", "morehellometa_b6")
! We can still change an old value at the end!
call nc_diag_data2d("data2dsimple1", (/ 2000, 4000, 6000 /))
call nc_diag_data2d("data2dsimple2", (/ 1111, 2222, 3333 /))
call nc_diag_data2d("data2dsimple1", (/ 4000, 6000, 8000 /))
call nc_diag_data2d("data2dsimple2", (/ 2222, 4444, 6666 /))
call nc_diag_data2d("data2dsimple1", (/ 6000, 8000, 10000 /))
call nc_diag_data2d("data2dsimple2", (/ 3333, 6666, 9999 /))
! Out of order is fine too!
call nc_diag_data2d("data2dsimple6_str", (/ "mwahahaha", "arrrrrgh", "grrrrowwl" /))
call nc_diag_data2d("data2dsimple7", (/ 20, 40, 60 /))
call nc_diag_data2d("data2dsimple7", (/ 200, 400, 600 /))
call nc_diag_data2d("data2dsimple6_str", (/ "asdfghjk", "zxcvbnm", "qwerty" /))
call nc_diag_data2d("data2dsimple6_str", (/ "boink", "kabam", "peekaboo" /))
call nc_diag_data2d("data2dsimple7", (/ 40, 80, 120 /))
! Even with buffering, you still can't overwrite nchans...
! (The following line, if uncommented, should result in an error!)
!call nc_diag_chaninfo("chaninfosimple9_buf", 33)
! Back to header stuff...
call nc_diag_header("headertestsimple5_str", "hello world")
print *, "str_header:"
print *, str_header
print *, "===================="
print *, "Vector:"
print *, "===================="
do i = 1, 1000
call nc_diag_header("headertestarr1", (/ 123, 234, 345, 456, 567, 678, 789 /))
end do
call nc_diag_header("headertestarr2", (/ 222, 234, 345, 456, 567, 678, 789 /))
call nc_diag_header("headertestarr3", (/ 333, 234, 345, 456, 567, 678, 789 /))
call nc_diag_header("headertestarr4", (/ 444, 234, 345, 456, 567, 678, 789 /))
call nc_diag_header("headertestarr5", (/ 111, 222, 333, 444, 555, 666, 777, 888, 999 /))
call nc_diag_header("headertestarr6", (/ 999, 777, 555, 333, 111 /))
call nc_diag_header("headertestsimple2", 123)
call nc_diag_header("headertestsimple3", 321)
call nc_diag_header("headertestarr7", (/ 111, 222, 333, 444, 555, 666, 777, 888, 999 /))
call nc_diag_header("headertestarr7", (/ 222, 444, 666, 888 /))
call nc_diag_header("headertestarr7", 999)
call nc_diag_header("headertestarr8", (/ f, f*2, f*3, f*4 /))
call nc_diag_header("headertestarr9", (/ d, d*2, d*3, d*4 /))
! nc_diag_header does not support arrays of strings... because
! NetCDF4 doesn't support it, either!
! (At least, that's what I remember from the docs... double check
! to make sure!)
print *, "==============================="
print *, "Writing resulting NetCDF file:"
print *, "==============================="
call nc_diag_write
! Appending - we can reopen the same file in append mode!
print *, "==============================="
print *, "Appending to NetCDF file:"
print *, "==============================="
call nc_diag_init("test.nc", .TRUE.)
call nc_diag_chaninfo("chaninfosimple8_str", "test1")
call nc_diag_chaninfo("chaninfosimple8_str", "test2")
call nc_diag_chaninfo("chaninfosimple10_notcomplete", 1)
call nc_diag_chaninfo("chaninfosimple10_notcomplete", 2)
call nc_diag_metadata("metadata_notcomplete", 5678)
call nc_diag_metadata("metadata_notcomplete", 6789)
call nc_diag_metadata("metadata_str_notcomplete", "efgh")
! This will fail due to length constraints...
!call nc_diag_metadata("metadata_str_notcomplete", "efghh")
call nc_diag_data2d("data2d_notcomplete", (/ 2, 3, 4 /))
! This will also fail due to longer length
!call nc_diag_data2d("data2d_notcomplete", (/ 2, 3, 4, 5 /))
! Flushing works, too!
call nc_diag_flush_buffer
call nc_diag_chaninfo("chaninfosimple8_str", "test3")
call nc_diag_chaninfo("chaninfosimple8_str", "test4")
call nc_diag_chaninfo("chaninfosimple10_notcomplete", 3)
call nc_diag_chaninfo("chaninfosimple10_notcomplete", 4)
call nc_diag_metadata("metadata_notcomplete", 7898)
call nc_diag_metadata("metadata_notcomplete", 8989)
call nc_diag_metadata("metadata_str_notcomplete", "ijkl")
call nc_diag_write
end program test_netcdf_layer
|
{"hexsha": "c9441a60bef0c9d65dc6fe99e73373d90255a146", "size": 16920, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "GMAO_ncdiag/nc_diag_write/tests/test_netcdf_layer.f90", "max_stars_repo_name": "GEOS-ESM/GMAO_Shared", "max_stars_repo_head_hexsha": "022af23abbc7883891006b57379be96d9a50df23", "max_stars_repo_licenses": ["NASA-1.3", "ECL-2.0", "Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-02-01T17:36:53.000Z", "max_stars_repo_stars_event_max_datetime": "2020-02-01T17:36:53.000Z", "max_issues_repo_path": "GMAO_ncdiag/nc_diag_write/tests/test_netcdf_layer.f90", "max_issues_repo_name": "GEOS-ESM/GMAO_Shared", "max_issues_repo_head_hexsha": "022af23abbc7883891006b57379be96d9a50df23", "max_issues_repo_licenses": ["NASA-1.3", "ECL-2.0", "Apache-2.0"], "max_issues_count": 105, "max_issues_repo_issues_event_min_datetime": "2019-07-08T19:27:23.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-22T02:12:16.000Z", "max_forks_repo_path": "GMAO_ncdiag/nc_diag_write/tests/test_netcdf_layer.f90", "max_forks_repo_name": "GEOS-ESM/GMAO_Shared", "max_forks_repo_head_hexsha": "022af23abbc7883891006b57379be96d9a50df23", "max_forks_repo_licenses": ["NASA-1.3", "ECL-2.0", "Apache-2.0"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2019-07-05T18:00:44.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-11T16:26:29.000Z", "avg_line_length": 42.4060150376, "max_line_length": 112, "alphanum_fraction": 0.65, "num_tokens": 5003}
|
// Boost.Varaint
// Contains multivisitors that are implemented via variadic templates, std::tuple
// and decltype(auto)
//
// See http://www.boost.org for most recent version, including documentation.
//
// Copyright Antony Polukhin, 2013-2014.
//
// Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt).
#ifndef BOOST_VARIANT_DETAIL_MULTIVISITORS_CPP14_BASED_HPP
#define BOOST_VARIANT_DETAIL_MULTIVISITORS_CPP14_BASED_HPP
#if defined(_MSC_VER)
# pragma once
#endif
#include <boost/variant/detail/multivisitors_cpp14_based.hpp>
namespace boost {
namespace detail { namespace variant {
// Forward declaration
template <typename Visitor, typename Visitables, typename... Values>
class one_by_one_visitor_and_value_referer_cpp14;
template <typename Visitor, typename Visitables, typename... Values>
inline one_by_one_visitor_and_value_referer_cpp14<Visitor, Visitables, Values... >
make_one_by_one_visitor_and_value_referer_cpp14(
Visitor& visitor, Visitables visitables, std::tuple<Values...> values
)
{
return one_by_one_visitor_and_value_referer_cpp14<Visitor, Visitables, Values... > (
visitor, visitables, values
);
}
template <typename Visitor, typename Visitables, typename... Values>
class one_by_one_visitor_and_value_referer_cpp14
{
Visitor& visitor_;
std::tuple<Values...> values_;
Visitables visitables_;
public: // structors
one_by_one_visitor_and_value_referer_cpp14(
Visitor& visitor, Visitables visitables, std::tuple<Values...> values
) BOOST_NOEXCEPT
: visitor_(visitor)
, values_(values)
, visitables_(visitables)
{}
public: // visitor interfaces
template <typename Value>
decltype(auto) operator()(Value&& value) const
{
return ::boost::apply_visitor(
make_one_by_one_visitor_and_value_referer_cpp14(
visitor_,
tuple_tail(visitables_),
std::tuple_cat(values_, std::make_tuple(wrap<Value, ! ::boost::is_lvalue_reference<Value>::value>(value)))
)
, unwrap(std::get<0>(visitables_)) // getting Head element
);
}
private:
one_by_one_visitor_and_value_referer_cpp14& operator=(const one_by_one_visitor_and_value_referer_cpp14&);
};
template <typename Visitor, typename... Values>
class one_by_one_visitor_and_value_referer_cpp14<Visitor, std::tuple<>, Values...>
{
Visitor& visitor_;
std::tuple<Values...> values_;
public:
one_by_one_visitor_and_value_referer_cpp14(
Visitor& visitor, std::tuple<> /*visitables*/, std::tuple<Values...> values
) BOOST_NOEXCEPT
: visitor_(visitor)
, values_(values)
{}
template <class Tuple, std::size_t... I>
decltype(auto) do_call(Tuple t, index_sequence<I...>) const {
return visitor_(unwrap(std::get<I>(t))...);
}
template <typename Value>
decltype(auto) operator()(Value&& value) const
{
return do_call(
std::tuple_cat(values_, std::make_tuple(wrap<Value, ! ::boost::is_lvalue_reference<Value>::value>(value))),
make_index_sequence<sizeof...(Values) + 1>()
);
}
};
}} // namespace detail::variant
template <class Visitor, class T1, class T2, class T3, class... TN>
inline decltype(auto) apply_visitor(const Visitor& visitor, T1&& v1, T2&& v2, T3&& v3, TN&&... vn,
typename boost::disable_if<
boost::detail::variant::has_result_type<Visitor>
>::type* = 0)
{
return boost::apply_visitor(
::boost::detail::variant::make_one_by_one_visitor_and_value_referer_cpp14(
visitor,
std::make_tuple(
::boost::detail::variant::wrap<T2, ! ::boost::is_lvalue_reference<T2>::value>(v2),
::boost::detail::variant::wrap<T3, ! ::boost::is_lvalue_reference<T3>::value>(v3),
::boost::detail::variant::wrap<TN, ! ::boost::is_lvalue_reference<TN>::value>(vn)...
),
std::tuple<>()
),
::boost::forward<T1>(v1)
);
}
template <class Visitor, class T1, class T2, class T3, class... TN>
inline decltype(auto) apply_visitor(Visitor& visitor, T1&& v1, T2&& v2, T3&& v3, TN&&... vn,
typename boost::disable_if<
boost::detail::variant::has_result_type<Visitor>
>::type* = 0)
{
return ::boost::apply_visitor(
::boost::detail::variant::make_one_by_one_visitor_and_value_referer_cpp14(
visitor,
std::make_tuple(
::boost::detail::variant::wrap<T2, ! ::boost::is_lvalue_reference<T2>::value>(v2),
::boost::detail::variant::wrap<T3, ! ::boost::is_lvalue_reference<T3>::value>(v3),
::boost::detail::variant::wrap<TN, ! ::boost::is_lvalue_reference<TN>::value>(vn)...
),
std::tuple<>()
),
::boost::forward<T1>(v1)
);
}
} // namespace boost
#endif // BOOST_VARIANT_DETAIL_MULTIVISITORS_CPP14_BASED_HPP
|
{"hexsha": "abf03eb57d458e93bd5d9f92ce488dbfe10796b4", "size": 5611, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "boost/variant/detail/multivisitors_cpp14_based.hpp", "max_stars_repo_name": "cpp-pm/boost", "max_stars_repo_head_hexsha": "38c6c8c07f2fcc42d573b10807fef27ec14930f8", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 12278.0, "max_stars_repo_stars_event_min_datetime": "2015-01-29T17:11:33.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T21:12:00.000Z", "max_issues_repo_path": "boost/variant/detail/multivisitors_cpp14_based.hpp", "max_issues_repo_name": "cpp-pm/boost", "max_issues_repo_head_hexsha": "38c6c8c07f2fcc42d573b10807fef27ec14930f8", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 9469.0, "max_issues_repo_issues_event_min_datetime": "2015-01-30T05:33:07.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T16:17:21.000Z", "max_forks_repo_path": "boost/variant/detail/multivisitors_cpp14_based.hpp", "max_forks_repo_name": "cpp-pm/boost", "max_forks_repo_head_hexsha": "38c6c8c07f2fcc42d573b10807fef27ec14930f8", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 892.0, "max_forks_repo_forks_event_min_datetime": "2015-01-29T16:26:19.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-20T07:44:30.000Z", "avg_line_length": 37.1589403974, "max_line_length": 126, "alphanum_fraction": 0.5965068615, "num_tokens": 1305}
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import numpy as np
import matplotlib.pyplot as plt
import torch.optim as optim
from torch.autograd import Variable
from torchvision import datasets, transforms
from torch.autograd import Variable
nclasses = 43 # GTSRB has 43 classes
DEBUG = False
def gaussian_filter(kernel_shape):
x = np.zeros(kernel_shape, dtype='float32')
def gauss(x, y, sigma=2.0):
Z = 2 * np.pi * sigma ** 2
return 1. / Z * np.exp(-(x ** 2 + y ** 2) / (2. * sigma ** 2))
mid = np.floor(kernel_shape[-1] / 2.)
for kernel_idx in range(0, kernel_shape[1]):
for i in range(0, kernel_shape[2]):
for j in range(0, kernel_shape[3]):
x[0, kernel_idx, i, j] = gauss(i - mid, j - mid)
return x / np.sum(x)
def LCN(image_tensor, gaussian, mid):
filtered= gaussian(image_tensor)
centered_image = image_tensor - filtered[:,:,mid:-mid,mid:-mid]
sum_sqr_XX = gaussian(centered_image.pow(2))
denom = sum_sqr_XX[:,:,mid:-mid,mid:-mid].sqrt()
per_img_mean = denom.mean()
divisor = torch.max(per_img_mean, denom)
divisor = np.maximum(divisor.detach().cpu().numpy(), 1e-4)
new_image = centered_image.detach().cpu() / divisor
if DEBUG: # visualize what the network sees
plt.imshow(np.transpose(filtered[0].detach().cpu().numpy(),
(1, 2, 0)).reshape(filtered.shape[2], filtered.shape[3]))
plt.title('Gaussian')
plt.show()
print('GAUSSIAN', filtered)
print('LCN', new_image)
plt.imshow(np.transpose(new_image[0, :3].detach().cpu().numpy(),
(1, 2, 0)))
plt.title('LCN')
plt.show()
return new_image.cuda()
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 200, kernel_size=7 ,stride=1, padding=2)
self.maxpool1 = nn.MaxPool2d(2, stride=2 , ceil_mode=True)
self.gfilter1 = torch.Tensor(gaussian_filter((1,200,9,9)) )
self.gaussian1 = nn.Conv2d(in_channels=200, out_channels=200,
kernel_size=9 , padding= 8 , bias=False)
self.gaussian1.weight.data = self.gfilter1
self.gaussian1.weight.requires_grad = False
self.conv2 = nn.Conv2d(200, 250, kernel_size=4 ,stride=1, padding=2)
self.maxpool2 = nn.MaxPool2d(2, stride=2 , ceil_mode=True)
self.gfilter2 = torch.Tensor(gaussian_filter((1,250,9,9)) )
self.gaussian2 = nn.Conv2d(in_channels=250, out_channels=250,
kernel_size=9 , padding= 8 , bias=False)
self.gaussian2.weight.data = self.gfilter2
self.gaussian2.weight.requires_grad = False
self.conv3 = nn.Conv2d(250, 350, kernel_size=4 ,stride=1, padding=2)
self.maxpool3 = nn.MaxPool2d(2, stride=2)
self.gfilter3 = torch.Tensor(gaussian_filter((1,350,9,9)) )
self.gaussian3 = nn.Conv2d(in_channels=350, out_channels=350,
kernel_size=9 , padding= 8 , bias=False)
self.gaussian3.weight.data = self.gfilter3
self.gaussian3.weight.requires_grad = False
self.FC1 = nn.Linear(12600, 400)
self.FC2 = nn.Linear(400, 43)
# spatial attention model, spatial transformers layers
self.st1 = nn.Sequential(
nn.MaxPool2d(2, stride=2 , ceil_mode=True),
nn.Conv2d(3, 250, kernel_size=5 ,stride=1, padding=2),
nn.ReLU(True),
nn.MaxPool2d(2, stride=2 , ceil_mode=True),
nn.Conv2d(250, 250, kernel_size=5 ,stride=1, padding=2),
nn.ReLU(True),
nn.MaxPool2d(2, stride=2 , ceil_mode=True)
)
self.FC1_ = nn.Sequential(
nn.Linear(9000, 250),
nn.ReLU(True),
nn.Linear( 250 , 6 )
)
self.st2 = nn.Sequential(
nn.MaxPool2d(2, stride=2 , ceil_mode=False),
nn.Conv2d(200, 150, kernel_size=5 ,stride=1, padding=2),
nn.ReLU(True),
nn.MaxPool2d(2, stride=2 , ceil_mode=False),
nn.Conv2d(150, 200, kernel_size=5 ,stride=1, padding=2),
nn.ReLU(True),
nn.MaxPool2d(2, stride=2 , ceil_mode=False)
)
self.FC2_ = nn.Sequential(
nn.Linear(800, 300),
nn.ReLU(True),
nn.Linear( 300 , 6 )
)
self.st3 = nn.Sequential(
nn.MaxPool2d(2, stride=2 , ceil_mode=False),
nn.Conv2d(250, 150, kernel_size=5 ,stride=1, padding=2),
nn.ReLU(True),
nn.MaxPool2d(2, stride=2 , ceil_mode=False),
nn.Conv2d(150, 200, kernel_size=5 ,stride=1, padding=2),
nn.ReLU(True),
nn.MaxPool2d(2, stride=2 , ceil_mode=False)
)
self.FC3_ = nn.Sequential(
nn.Linear(200, 300),
nn.ReLU(True),
nn.Linear( 300 , 6 )
)
self.FC1_[2].weight.data.zero_()
self.FC1_[2].bias.data.copy_(torch.tensor([1, 0, 0, 0, 1, 0], dtype=torch.float))
self.FC2_[2].weight.data.zero_()
self.FC2_[2].bias.data.copy_(torch.tensor([1, 0, 0, 0, 1, 0], dtype=torch.float))
self.FC3_[2].weight.data.zero_()
self.FC3_[2].bias.data.copy_(torch.tensor([1, 0, 0, 0, 1, 0], dtype=torch.float))
def forward(self, x):
# first layer is the Spatial Transformer Layer
# ST-1
h1 = self.st1(x)
h1 = h1.view(-1, 9000)
h1 = self.FC1_(h1)
theta1 = h1.view(-1, 2, 3)
grid1 = F.affine_grid(theta1, x.size())
x = F.grid_sample(x, grid1)
# convolution, Relu and Maxpool , SET #1
x = F.relu(self.conv1(x))
x = self.maxpool1(x)
# paper Says to apply LCN here, but LCN Layer Before Convolution Worked for me better
# ST-2
h2 = self.st2(x)
h2=h2.view(-1,800)
h2 = self.FC2_(h2)
theta2 = h2.view(-1, 2, 3)
grid2 = F.affine_grid(theta2, x.size())
x = F.grid_sample(x, grid2)
# LCN Layer : Based on paper implemntation from the github and Yann Lecun Paper 2009
mid1 = int(np.floor(self.gfilter1.shape[2] / 2.))
x = LCN(x , self.gaussian1, mid1)
# convolution, Relu and Maxpool , SET #2
x = F.relu(self.conv2(x))
x= self.maxpool2(x)
# ST-2
h3 = self.st3(x)
h3 = h3.view(-1, 200)
h3 = self.FC3_(h3)
theta3 = h3.view(-1, 2, 3)
grid3 = F.affine_grid(theta3, x.size())
x = F.grid_sample(x, grid3)
# LCN Layer : 2
mid2 = int(np.floor(self.gfilter2.shape[2] / 2.))
x = LCN(x , self.gaussian2, mid2)
# convolution, Relu and Maxpool , SET #3
x = F.relu(self.conv3(x))
x= self.maxpool3(x)
# LCN Layer : 3
mid3 = int(np.floor(self.gfilter3.shape[2] / 2.))
x = LCN(x , self.gaussian3, mid3)
# dimensions in accordance to paper
y = x.view(-1, 12600)
y = F.relu(self.FC1(y))
y = self.FC2(y)
return F.log_softmax(y, dim=-1)
|
{"hexsha": "4e46fb44a5dc70acb4b9be4f8758642d0be6b47a", "size": 7207, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/model.py", "max_stars_repo_name": "sovit-123/German-Traffic-Sign-Recognition-with-Deep-Learning", "max_stars_repo_head_hexsha": "249122f3e1a0d96043d23ce7a8aaaf9ca2cd7913", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-08-15T13:02:04.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-22T05:51:23.000Z", "max_issues_repo_path": "src/model.py", "max_issues_repo_name": "sovit-123/German-Traffic-Sign-Recognition-with-Deep-Learning", "max_issues_repo_head_hexsha": "249122f3e1a0d96043d23ce7a8aaaf9ca2cd7913", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/model.py", "max_forks_repo_name": "sovit-123/German-Traffic-Sign-Recognition-with-Deep-Learning", "max_forks_repo_head_hexsha": "249122f3e1a0d96043d23ce7a8aaaf9ca2cd7913", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-12-29T20:48:20.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-29T20:48:20.000Z", "avg_line_length": 39.3825136612, "max_line_length": 94, "alphanum_fraction": 0.5662550298, "include": true, "reason": "import numpy", "num_tokens": 2122}
|
'''
svg_to_mesh module
==================
This modules allows to read an Inkscape SVG file, parse its elements, and convert them to 3D meshes.
svg_to_mesh module API
----------------------
'''
from __future__ import print_function
from __future__ import absolute_import
import xml.etree.cElementTree as ET
from six.moves import range
try:
from soma import aims
fake_aims = False
except ImportError:
# aims is not available, use the fake light one (with reduced
# functionalities)
aims = None
fake_aims = True
import numpy as np
import scipy.linalg
import os
import six
import copy
#import traceback
import sys
import math
'''
SVG parsing as mesh objects
requires:
* xml (ElementTree)
* numpy
* scipy
* six
* optionally, soma.aims
The AIMS library is used to build and manpulate meshes.
(https://github.com/brainvisa/aims-free)
This lib is compiled (C++ + python bindings) thus is not completely
straightforward to install.
Alternately we can implement basic replacements for vector and mesh classes.
This allows to use the 2D part.
The 3D part however needs more algorithmic things in Aims, and Anatomist
to render depth maps.
'''
if fake_aims:
# implement an "aims-lite": basic Aims mesh structures mimicing part of
# the Aims API transparently.
# This allows to use the 2D part.
# The 3D part however needs more algorithmic things in Aims
print('The soma.aims (https://github.com/brainvisa/aims-free) library '
'module is not available. We will process using a light ersatz, '
'which allows to process things mainly in 2D. Other 3D parts need '
'more algorithmic processing and require the "real" aims module to '
'be present.')
class aims(object):
''' Fake aims-lite module '''
class vector(object):
''' Fixed size vector '''
def __init__(self, dtype, shape):
self._vec = np.zeros(shape, dtype=dtype)
self._dim = 1
if len(shape) >= 2:
self._dim = shape[1]
def assign(self, vec):
shape = (len(vec), self._dim)
self._vec = np.zeros(shape, dtype=self._vec.dtype)
self._vec[:] = np.asarray(vec).reshape(shape)
def __getitem__(self, item):
return self._vec.__getitem__(item)
def __setitem__(self, item, value):
return self._vec.__setitem__(item, value)
class AimsTimeSurface(object):
''' Mesh structure '''
def __init__(self, dim=3):
self._vertex = aims.vector(dtype=np.float32, shape=(0, 3))
self._polygon = aims.vector(dtype=np.uint32, shape=(0, dim))
self._header = {}
def vertex(self):
return self._vertex
def polygon(self):
return self._polygon
def header(self):
return self._header
class AimsTimeSurface_2(AimsTimeSurface):
''' Segments mesh (2 points per polygon) '''
def __init__(self):
super(aims.AimsTimeSurface_2, self).__init__(2)
class AimsTimeSurface_3(AimsTimeSurface):
''' Triangles mesh (3 points per polygon) '''
def __init__(self):
super(aims.AimsTimeSurface_3, self).__init__(3)
class SvgToMesh(object):
''' Read SVG, transforms things into meshes
'''
def __init__(self, concat_mesh='bygroup'):
'''
Parameters
----------
concat_mesh: str
concatenation method between multiple paths in SVG file.
'merge': merge all paths in a single mesh
'time': use mesh timestep to store each path
'list': return a list of meshes
'bygroup' (default): return a dict of meshes, one for each main
group, paths are concatenated inside each group
'''
self.concat_mesh = concat_mesh
self.mesh = None
self.mesh_list = []
self.mesh_dict = {}
self.debug = False
self.id_count = 1
# layers that should be taken into account even if hidden
self.explicitly_show = []
# in 2D transform mode (replace_elements), put back these properties
# from the source to the transformed xml items
self.keep_transformed_properties = set()
@staticmethod
def get_style(xml_elem):
style = xml_elem.get('style')
if not style:
return None
style = style.replace(';', '\n')
style = style.split('\n')
style = [x.strip() for x in style]
style = dict([(y.strip() for y in x.split(':')) for x in style if x])
return style
@staticmethod
def set_style(xml_elem, style):
style_l = ['%s:%s' % (k, str(v)) for k, v in six.iteritems(style)]
style_str = ';'.join(style_l)
if not style:
return None
xml_elem.set('style', style_str)
@staticmethod
def get_mesh_color(style):
if not style:
return None
color = style.get('fill')
opacity = style.get('fill-opacity')
if not color or color == 'none':
color = style.get('stroke')
opacity = style.get('stroke-opacity')
if not color:
return None
color_spec = color.split(' ')
color = [c[1:] for c in color_spec if c[0] == '#']
if color:
c = color[0]
n = int(math.ceil(len(c) / 3))
if n < 1:
n = 1
if opacity not in (None, 'none'):
opacity = float(opacity)
else:
opacity = 1.
color = (int('0x' + c[:n], 0) / 255.,
int('0x' + c[n:n*2], 0) / 255.,
int('0x' + c[n*2:n*3], 0) / 255.,
opacity)
return color
return None
def read_rect(self, xml_path, trans, style=None):
''' Read a rectangle element as a mesh
'''
if not aims:
raise RuntimeError('aims module is not available. read_rect() '
'needs it.')
if style is None:
style = self.get_style(xml_path)
color = self.get_mesh_color(style)
material = None
if color:
material = {'diffuse': color}
x = float(xml_path.get('x'))
y = float(xml_path.get('y'))
w = float(xml_path.get('width'))
h = float(xml_path.get('height'))
pts = trans * np.matrix([[x, x+w, x+w, x],
[y, y, y+h, y+h],
[1., 1., 1., 1.]])
pts[2, :] = 0 # reset Z to 0
mesh = aims.AimsTimeSurface_2()
mesh.vertex().assign(np.asarray(pts.T))
mesh.polygon().assign([(0, 1), (1, 2), (2, 3), (3, 0)])
trans3d = getattr(trans, 'transform_3d', None)
if trans3d is not None:
vert = np.vstack((np.asarray(mesh.vertex()).T,
np.ones((1, len(mesh.vertex())),
dtype=np.float32)))
vert = (trans3d * vert).T
vert = vert[:, :3]
mesh.vertex().assign(np.asarray(vert))
#if len(mesh.normal()) != 0:
#print('trans normals')
mesh.header()['transformation'] = list(np.ravel(trans3d))
if material is not None:
mesh.header()['material'] = material
return mesh
def read_circle(self, xml_path, trans, style=None):
''' Read a circle element as a mesh
'''
if not aims:
raise RuntimeError('aims module is not available. read_circle() '
'needs it.')
if style is None:
style = self.get_style(xml_path)
color = self.get_mesh_color(style)
material = None
if color:
material = {'diffuse': color}
x = xml_path.get('cx')
if x is None:
x = xml_path.get('{http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd}cx')
print('x:', x)
x = float(x)
y = xml_path.get('cy')
if y is None:
y = xml_path.get('{http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd}cy')
y = float(y)
r = xml_path.get('r')
if r is None:
r = xml_path.get('{http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd}rx')
r = float(r)
angle_s = xml_path.get('sodipodi:start')
if angle_s:
angle_s = float(angle_s)
else:
angle_s = 0.
angle_e = xml_path.get('sodipodi:end')
if angle_e:
angle_e = float(angle_e)
else:
angle_e = np.pi * 2
npt = 24
mesh = aims.SurfaceGenerator.circle_wireframe(
(x, y, 1.), r, npt, (0, 0, 1 ), (1, 0, 0), angle_s, angle_e)
pts = trans * np.matrix(mesh.vertex().np.T)
pts[2, :] = 0 # reset Z to 0
mesh.vertex().assign(np.asarray(pts.T))
trans3d = getattr(trans, 'transform_3d', None)
if trans3d is not None:
vert = np.vstack((np.asarray(mesh.vertex()).T,
np.ones((1, len(mesh.vertex())),
dtype=np.float32)))
vert = (trans3d * vert).T
vert = vert[:, :3]
mesh.vertex().assign(np.asarray(vert))
#if len(mesh.normal()) != 0:
#print('trans normals')
mesh.header()['transformation'] = list(np.ravel(trans3d))
if material is not None:
mesh.header()['material'] = material
return mesh
def read_polygon(self, xml_path, trans, style=None):
''' Read a polygon element as a mesh
'''
if not aims:
raise RuntimeError('aims module is not available. read_polygon() '
'needs it.')
if style is None:
style = self.get_style(xml_path)
color = self.get_mesh_color(style)
material = None
if color:
material = {'diffuse': color}
points = xml_path.get('points')
pl = points.split()
points = np.matrix([[float(p.strip()) for p in pt.split(',')]
for pt in pl]).T
#print('polygon points:', points.T)
points3 = np.vstack((points, np.ones((points.shape[1], ))))
pts = trans * points3
pts[2, :] = 0 # reset Z to 0
mesh = aims.AimsTimeSurface_2()
mesh.vertex().assign(np.asarray(pts.T))
trans3d = getattr(trans, 'transform_3d', None)
if trans3d is not None:
vert = np.vstack((np.asarray(mesh.vertex()).T,
np.ones((1, len(mesh.vertex())),
dtype=np.float32)))
vert = (trans3d * vert).T
vert = vert[:, :3]
mesh.vertex().assign(np.asarray(vert))
#if len(mesh.normal()) != 0:
#print('trans normals')
mesh.header()['transformation'] = list(np.ravel(trans3d))
poly = [(i, i+1) for i in range(pts.shape[1] - 1)]
poly.append((pts.shape[1] - 1, 0))
mesh.polygon().assign(poly)
if material is not None:
mesh.header()['material'] = material
return mesh
def read_path(self, xml_path, trans, style=None):
''' Read a path element as mesh, apply coords transformations
'''
def read_point(pdesc, i, pt=None, nvert=None, npoly=None):
j = i + 1
try:
while j < n and pdesc[j] in '0123456789.e-':
j += 1
x = float(pdesc[i:j])
i = j + 1
while i<len(pdesc) and pdesc[i] in ' ,':
i += 1
except Exception as e:
print(e)
print('failed reading', pt, ', i:', i, ', vertices:', nvert,
', poly:', npoly)
raise
return x, i
if not aims:
raise RuntimeError('aims module is not available. read_path() '
'needs it.')
if xml_path.tag == 'rect' or xml_path.tag.endswith('}rect') or \
xml_path.tag.endswith('}image'):
return self.read_rect(xml_path, trans, style)
if xml_path.tag == 'polygon' or xml_path.tag.endswith('}polygon'):
return self.read_polygon(xml_path, trans, style)
if xml_path.tag == 'circle' or xml_path.tag.endswith('}circle'):
return self.read_circle(xml_path, trans, style)
# read path
if style is None:
style = self.get_style(xml_path)
color = self.get_mesh_color(style)
material = None
if color:
material = {'diffuse': color}
vert = []
poly = []
pdesc = xml_path.get('d')
n = len(pdesc)
i = 0
first = 0
cmd = 'M'
x = 0
y = 0
while i < n:
while(i < n and pdesc[i] == ' '):
i += 1
if i == n:
#print('end of path in:', pdesc)
break
#print('i:', i)
last_x, last_y = x, y
if pdesc[i] in 'mMcClLhHvVsSqQtTaA':
cmd = pdesc[i]
#print('cmd:', cmd)
i += 1
while pdesc[i] == ' ':
i += 1
elif pdesc[i] in '-0123456789.':
if cmd not in 'vV':
x, i = read_point(pdesc, i, 'x', len(vert), len(poly))
if cmd >= 'a':
x += last_x
if cmd not in 'hH':
y, i = read_point(pdesc, i, 'y', len(vert), len(poly))
j = i + 1
if cmd >= 'a':
y += last_y
#print(x,', ', y)
if cmd in 'cC':
#last_x, last_y = x, y
x, i = read_point(pdesc, i, 'x2', len(vert), len(poly))
y, i = read_point(pdesc, i, 'y2', len(vert), len(poly))
if cmd >= 'a':
x += last_x
y += last_y
#last_x, last_y = x, y
x, i = read_point(pdesc, i, 'x3', len(vert), len(poly))
y, i = read_point(pdesc, i, 'y3', len(vert), len(poly))
if cmd >= 'a':
x += last_x
y += last_y
elif cmd in 'sSqQ':
#last_x, last_y = x, y
x, i = read_point(pdesc, i, 'x2', len(vert), len(poly))
y, i = read_point(pdesc, i, 'y2', len(vert), len(poly))
if cmd >= 'a':
x += last_x
y += last_y
if cmd in 'aA':
x, i = read_point(pdesc, i, 'x-axis-rotation',
len(vert), len(poly))
y, i = read_point(pdesc, i, 'large-arc-flag',
len(vert), len(poly))
y, i = read_point(pdesc, i, 'sweep-flag',
len(vert), len(poly))
x, i = read_point(pdesc, i, 'x2', len(vert), len(poly))
y, i = read_point(pdesc, i, 'y2', len(vert), len(poly))
if cmd >= 'a':
x += last_x
y += last_y
vert.append((x, y, 0.))
if len(vert) > 1 and cmd not in 'mM':
poly.append((len(vert) - 2, len(vert) - 1))
if cmd == 'm':
cmd = 'l'
first = len(vert) - 1
elif cmd == 'M':
cmd = 'L'
first = len(vert) - 1
elif pdesc[i] in 'zZ':
#print('close')
if len(vert) >= first + 3:
poly.append((len(vert) - 1, first))
x, y = vert[first][:2]
i += 1
else:
print('unknown command:', pdesc[i], 'at position', i)
i += 1
mesh = aims.AimsTimeSurface(2)
#print('vert:', vert)
#print('poly:', poly)
#print('path trans:', trans)
if not np.all(trans == np.eye(3)):
#print('trans:', trans)
#print(vert)
vert = np.asarray(vert).T
vert[2, :] = 1.
vert = (trans * vert).T
vert[:, 2] = 0.
#print('to: vert:', vert)
mesh.vertex().assign(np.asarray(vert))
mesh.polygon().assign(poly)
trans3d = getattr(trans, 'transform_3d', None)
if trans3d is not None:
vert = np.vstack((np.asarray(vert).T,
np.ones((1, len(vert)), dtype=np.float32)))
vert = (trans3d * vert).T
vert = vert[:, :3]
mesh.vertex().assign(np.asarray(vert))
#if len(mesh.normal()) != 0:
#print('trans normals')
mesh.header()['transformation'] = list(np.ravel(trans3d))
if material:
mesh.header()['material'] = material
return mesh
@staticmethod
def set_transform(xml_elem, trans):
mat_str = 'matrix(%s)' % ', '.join(str(x)
for x in np.ravel(trans[:2, :].T))
xml_elem.set('transform', mat_str)
@staticmethod
def get_transform(trans, previous=None):
'''
Parameters
----------
trans: str or XML element
if str: transform field in the SVG element.
if element: element itself
previous: np array or None
parent transform to be composed with
'''
#print('transform:', trans_str)
mat3d = None
if not isinstance(trans, str):
trans3d = trans.get('transform_3d')
if trans3d is not None:
mat3d = SvgToMesh.get_transform(trans3d)
trans_str = trans.get('transform')
if not trans_str:
mat = np.matrix(np.eye(3))
if previous is not None:
mat = previous * mat
if trans3d is None and hasattr(previous, 'transform_3d'):
mat3d = previous.transform_3d
if mat3d is not None:
mat.transform_3d = mat3d
return mat
else:
trans_str = trans
tr_list = trans_str.split(') ')
tr_list = [x + ')' for x in tr_list[:-1]] + [tr_list[-1]]
tmat = previous
for trans_strx in tr_list:
mat = np.matrix(np.eye(3))
i = trans_strx.find('(')
if not i or trans_strx[-1] != ')':
print('unrecognized transform: %s', trans_strx)
return tmat
ttype = trans_strx[:i]
tdef1 = trans_strx[i+1:-1].strip().split(',')
tdef = []
for t in tdef1:
tdef += [float(x.strip()) for x in t.strip().split(' ')]
#print(ttype, tdef)
if ttype == 'matrix':
mat[:2, 0] = np.reshape(tdef[:2], (2, 1))
mat[:2, 1] = np.reshape(tdef[2:4], (2, 1))
mat[:2, 2] = np.reshape(tdef[4:], (2, 1))
elif ttype == 'translate':
mat[0, 2] = tdef[0]
if len(tdef) > 1:
mat[1, 2] = tdef[1]
elif ttype == 'scale':
mat[0, 0] = tdef[0]
if len(tdef) > 1:
mat[1, 1] = tdef[1]
else:
mat[1, 1] = tdef[0]
elif ttype == 'rotate':
ca = np.cos(tdef[0] / 180. * np.pi)
sa = np.sin(tdef[0] / 180. * np.pi)
mat[0:2, 0] = np.reshape((ca, sa), (2, 1))
mat[0:2, 1] = np.reshape((-sa, ca), (2, 1))
if len(tdef) >= 3:
m2 = np.matrix(np.eye(3))
m2[:2, 2] = ((tdef[1], ), (tdef[2], ))
mat = m2 * mat
m2[:2, 2] *= -1
mat *= m2
elif ttype == 'skewX':
mat[0, 1] = np.tan(tdef[0] / 180. * np.pi)
elif ttype == 'skewY':
mat[1, 0] = np.tan(tdef[0] / 180. * np.pi)
if ttype == 'matrix4':
mat = np.matrix(np.eye(4))
mat[:3, 0] = np.reshape(tdef[:3], (3, 1))
mat[:3, 1] = np.reshape(tdef[3:6], (3, 1))
mat[:3, 2] = np.reshape(tdef[6:9], (3, 1))
mat[:3, 3] = np.reshape(tdef[9:], (3, 1))
if tmat is None:
tmat = mat
else:
tmat = tmat * mat
#print('mat:', tmat)
if mat3d is not None:
tmat.transform_3d = mat3d
return tmat
@staticmethod
def to_transform(matrix):
transform = 'matrix(' + ', '.join(
[str(x) for x in np.asarray(matrix[:2, :].T).ravel()]) + ')'
return transform
def boundingbox(self, element, trans=None, exhaustive=True):
todo = [(element, trans)]
bbox = [None, None]
bmin, bmax = bbox
while todo:
element, trans = todo.pop(0)
trans = self.get_transform(element, trans)
if element.tag.endswith('}g'):
todo = [(c, trans) for c in element] + todo
else:
if element.tag.endswith('}path') \
or element.tag.endswith('}rect') \
or element.tag.endswith('}image'):
if trans is None:
trans = np.matrix(np.eye(3))
mesh = self.read_path(element, trans)
for v in mesh.vertex():
if bmin is None:
bmin = [v[0], v[1]]
bmax = [v[0], v[1]]
bbox = [bmin, bmax]
else:
if v[0] < bmin[0]:
bmin[0] = v[0]
if v[0] > bmax[0]:
bmax[0] = v[0]
if v[1] < bmin[1]:
bmin[1] = v[1]
if v[1] > bmax[1]:
bmax[1] = v[1]
if not exhaustive:
break
return bbox
def transform_subtree(self, xml, in_trans, trans, otrans=None):
''' in_trans: current transform of xml subtree (out of the subtree)
trans: transform to be applied
otrans: transform in the output subtree. default=in_trans
'''
if otrans is None:
otrans = in_trans
todo = [(xml, in_trans, otrans)]
#otrans = np.matrix(scipy.linalg.inv(in_trans))
while todo:
element, c_trans, c_otrans = todo.pop(0)
transm = self.get_transform(element)
if c_trans is None:
c_trans = transm
else:
c_trans = c_trans * transm
if hasattr(transm, 'transform_3d'):
c_trans.transform_3d = transm.transform_3d
if c_otrans is None:
c_otrans = transm
else:
c_otrans = c_otrans * transm
if hasattr(transm, 'transform_3d'):
c_otrans.transform_3d = transm.transform_3d
#element.set('transform', None) # FIXME: how to remove it
if element.tag.endswith('}g'):
todo = [(c, c_trans, c_otrans) for c in element] + todo
else:
if element.tag.endswith('}path'):
iotrans = np.matrix(scipy.linalg.inv(c_otrans))
ptrans = iotrans * trans * c_trans
d = self.transform_path(element, ptrans)
element.set('d', d)
elif element.tag.endswith('}rect'):
iotrans = np.matrix(scipy.linalg.inv(c_otrans))
ptrans = iotrans * trans * c_trans
self.transform_rect(element, ptrans)
def style_to_str(self, style):
return ';'.join(['%s:%s' % (k, str(v))
for k, v in six.iteritems(style)])
def transform_style(self, xml_path, trans):
''' adapt style in path/rect to scale changes (stroke width etc)
'''
style = self.get_style(xml_path)
if style is not None:
stroke_width = style.get('stroke-width')
if stroke_width is not None:
unit = ''
i = 1
while stroke_width[-i] not in '0123456789.':
unit = stroke_width[-i] + unit
i += 1
if i > 1:
stroke_width = stroke_width[:-i+1]
stroke_width = float(stroke_width)
tp = trans.dot([[stroke_width], [0.], [1.]]) \
- trans.dot([[0.], [0.], [1.]])
stroke_width = str(tp[0, 0]) + unit
style['stroke-width'] = stroke_width
style_str = self.style_to_str(style)
xml_path.set('style', style_str)
def transform_path(self, xml_path, trans):
''' trans: transform to be applied
'''
def read_point(pdesc, i, pt=None):
j = i + 1
try:
while j < n and pdesc[j] in '0123456789.e-':
j += 1
x = float(pdesc[i:j])
i = j + 1
while i<len(pdesc) and pdesc[i] in ' ,':
i += 1
except Exception as e:
print(e)
print('failed reading', pt, ', i:', i)
raise
return x, i
cx = xml_path.get('{http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd}cx')
cy = xml_path.get('{http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd}cy')
if cx is not None and cy is not None:
tp = np.asarray(trans.dot([[float(cx)], [float(cy)],
[1.]])).ravel()
xml_path.set(
'{http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd}cx',
str(tp[0]))
xml_path.set(
'{http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd}cy',
str(tp[1]))
rx = xml_path.get(
'{http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd}rx')
ry = xml_path.get(
'{http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd}ry')
if rx is not None:
tp2 = np.asarray(trans.dot([[float(cx) + float(rx)],
[float(cy)], [1.]])).ravel()
xml_path.set(
'{http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd}rx',
str(np.abs(tp2[0] - tp[0])))
if ry is not None:
tp2 = np.asarray(trans.dot([[float(cx)],
[float(cy) + float(ry)],
[1.]])).ravel()
xml_path.set(
'{http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd}ry',
str(np.abs(tp2[1] - tp[1])))
# additional stuff for stars
r1 = xml_path.get('{http://sodipodi.sourceforge.net/DTD/'
'sodipodi-0.dtd}r1')
if r1 is not None:
scale = np.sqrt(np.sum(np.array(
trans * np.array([[1, 0, 0]]).T \
- trans * np.array([[0, 0, 0]]).T) ** 2))
xml_path.set('{http://sodipodi.sourceforge.net/DTD/'
'sodipodi-0.dtd}r1',
str(float(r1) * scale))
r2 = xml_path.get('{http://sodipodi.sourceforge.net/'
'DTD/sodipodi-0.dtd}r2')
if r2 is not None:
xml_path.set('{http://sodipodi.sourceforge.net/DTD/'
'sodipodi-0.dtd}r2',
str(float(r2) * scale))
self.transform_style(xml_path, trans)
pdesc = xml_path.get('d')
n = len(pdesc)
i = 0
cmd = 'M'
x = 0
y = 0
tp = [0, 0]
first = x, y, tp
out_cmd = []
while i < n:
while(i < n and pdesc[i] == ' '):
i += 1
if i == n:
#print('end of path in:', pdesc)
break
#print('i:', i)
last_x, last_y = x, y
last_tp = tp
if pdesc[i] in 'mMcClLhHvVsSqQtTaA':
cmd = pdesc[i]
out_cmd.append(cmd)
#print('cmd:', cmd)
i += 1
while pdesc[i] == ' ':
i += 1
elif pdesc[i] in '-0123456789.':
if cmd not in 'vV':
x, i = read_point(pdesc, i, 'x')
if cmd >= 'a':
x += last_x
if cmd not in 'hH':
y, i = read_point(pdesc, i, 'y')
j = i + 1
if cmd >= 'a':
y += last_y
tp = np.asarray(trans.dot([[x], [y], [1.]])).ravel()
if cmd not in 'vV':
if cmd >= 'a':
out_cmd.append(tp[0] - last_tp[0])
else:
out_cmd.append(tp[0])
if cmd not in 'hH':
if cmd >= 'a':
out_cmd.append(tp[1] - last_tp[1])
else:
out_cmd.append(tp[1])
if cmd not in 'vVhH':
out_cmd = out_cmd[:-2] \
+ ['%f,%f' % (out_cmd[-2], out_cmd[-1])]
#print(x,', ', y)
if cmd in 'cC':
#last_x, last_y = x, y
x, i = read_point(pdesc, i, 'x2')
y, i = read_point(pdesc, i, 'y2')
if cmd >= 'a':
x += last_x
y += last_y
tp = np.asarray(trans.dot([[x], [y], [1.]])).ravel()
out_cmd.append(
'%f,%f' % (tp[0] - last_tp[0], tp[1] - last_tp[1]))
else:
tp = np.asarray(trans.dot([[x], [y], [1.]])).ravel()
out_cmd.append('%f,%f' % (tp[0], tp[1]))
#last_x, last_y = x, y
x, i = read_point(pdesc, i, 'x3')
y, i = read_point(pdesc, i, 'y3')
if cmd >= 'a':
x += last_x
y += last_y
tp = np.asarray(trans.dot([[x], [y], [1.]])).ravel()
out_cmd.append(
'%f,%f' % (tp[0] - last_tp[0], tp[1] - last_tp[1]))
else:
tp = np.asarray(trans.dot([[x], [y], [1.]])).ravel()
out_cmd.append('%f,%f' % (tp[0], tp[1]))
elif cmd in 'sSqQ':
#last_x, last_y = x, y
x, i = read_point(pdesc, i, 'x2')
y, i = read_point(pdesc, i, 'y2')
if cmd >= 'a':
x += last_x
y += last_y
tp = np.asarray(trans.dot([[x], [y], [1.]])).ravel()
out_cmd.append(
'%f,%f' % (tp[0] - last_tp[0], tp[1] - last_tp[1]))
else:
tp = np.asarray(trans.dot([[x], [y], [1.]])).ravel()
out_cmd.append('%f,%f' % (tp[0], tp[1]))
if cmd in 'aA':
x, i = read_point(pdesc, i, 'x-axis-rotation')
y, i = read_point(pdesc, i, 'large-arc-flag')
s, i = read_point(pdesc, i, 'sweep-flag')
out_cmd += [x, int(y), int(s)]
x, i = read_point(pdesc, i, 'x2')
y, i = read_point(pdesc, i, 'y2')
if cmd >= 'a':
x += last_x
y += last_y
tp = np.asarray(trans.dot([[x], [y], [1.]])).ravel()
out_cmd.append(
'%f,%f' % (tp[0] - last_tp[0], tp[1] - last_tp[1]))
else:
tp = np.asarray(trans.dot([[x], [y], [1.]])).ravel()
out_cmd.append('%f,%f' % (tp[0], tp[1]))
if cmd == 'm':
cmd = 'l'
first = x, y, tp
elif cmd == 'M':
cmd = 'L'
first = x, y, tp
elif pdesc[i] in 'zZ':
out_cmd.append(pdesc[i])
x, y, tp = first
i += 1
else:
out_cmd.append(pdesc[i])
i += 1
return ' '.join([str(x) for x in out_cmd])
def transform_rect(self, xml_path, trans):
# additional stuff for squares
x = xml_path.get('x')
y = xml_path.get('y')
x = float(x)
y = float(y)
pos = trans.dot([[x], [y], [1.]])
xml_path.set('x', str(pos[0, 0]))
xml_path.set('y', str(pos[1, 0]))
w = xml_path.get('width')
if w is not None:
w = float(w)
pos = trans.dot([[w], [0.], [1.]]) - trans.dot([[0.], [0.], [1.]])
scale = np.sqrt(np.sum(np.array(pos) ** 2))
xml_path.set('width', str(scale))
h = xml_path.get('height')
if h is not None:
h = float(h)
pos = trans.dot([[0.], [h], [1.]]) - trans.dot([[0.], [0.], [1.]])
scale = np.sqrt(np.sum(np.array(pos) ** 2))
xml_path.set('height', str(scale))
self.transform_style(xml_path, trans)
def filter_element(xml_element, style=None):
''' Assign a processing function / method to the given element, a
cleaning function to be called after the associated sub-tree is
processed, and a bool to tell if children should be skipped.
This method can be overloaded and is called for each XML tree element.
The default implementation returns None, which means that there is no
specific processing and the default behavior should happen.
Returns
-------
proc: (proc_callable, clean_callable, skip_children) or None
proc_callable and clean_callable may be None, meaning that normal
processing should happen.
The processing callable will be called with 3 arguments:
(xml_element, transform_matrix, style_dict).
The cleaning callable will be called without arguments.
if skip_children is True, children are skipped.
'''
return None
def merge_meshes_by_group(self, meshes):
if not aims:
raise RuntimeError('aims module is not available. '
'merge_meshes_by_group() needs it.')
for key, mesh_l in six.iteritems(meshes):
if isinstance(mesh_l, list) and len(mesh_l) != 0 \
and isinstance(mesh_l[0],
(aims.AimsTimeSurface_2,
aims.AimsTimeSurface_3)):
mesh = mesh_l[0]
for smesh in mesh_l[1:]:
aims.SurfaceManip.meshMerge(mesh, smesh)
meshes[key] = mesh
def read_paths(self, xml_et):
'''
Parse XML tree and extract meshes, text and other objects
Parameters
----------
xml_et: XML tree
obtained using xml.etree.cElementTree.parse(svg_filename)
'''
if not aims:
raise RuntimeError('aims module is not available. read_paths() '
'needs it.')
trans = np.matrix(np.eye(3))
todo = [(xml_et.getroot(), trans, None)]
self.mesh = aims.AimsTimeSurface(2)
self.mesh_list = []
self.mesh_dict = {}
index = 0
while todo:
child, trans, main_group = todo.pop(0)
if child is None:
# this is a hacked special code to call cleaner
cleaners = trans
if not isinstance(cleaners, (tuple, list)):
cleaners = [cleaners]
for cleaner in cleaners:
cleaner()
continue
# allow to trick main_group
self.main_group = main_group
del main_group
style = self.get_style(child)
if style is None:
style = {} # so that read_path will not parse it again
trans = self.get_transform(child, trans)
if self.debug:
print('process child:', child)
reader = None
cleaner = None
skip_children = False
reader_cleaner = self.filter_element(child, style)
if reader_cleaner is not None:
reader, cleaner, skip_children = reader_cleaner
if reader is not None:
reader(child, trans, style)
if cleaner not in (None, [], ()):
# insert a special code to do something at the end of this tree
todo.insert(0, (None, cleaner, None))
if reader is None and style and style.get('display') == 'none' \
and child.get('{http://www.inkscape.org/namespaces/inkscape}label') \
not in self.explicitly_show:
# hidden layer, skip it
continue
if reader is not None:
pass
elif child.tag.endswith('}defs') or child.tag == 'defs':
# skip defs sub-tree
continue
elif child.tag in ('path', 'rect', 'polygon', 'circle') \
or child.tag.endswith('}path') \
or child.tag.endswith('}rect') \
or child.tag.endswith('}polygon') \
or child.tag.endswith('}circle'):
child_mesh = self.read_path(child, trans, style)
if self.concat_mesh == 'merge':
aims.SurfaceManip.meshMerge(self.mesh, child_mesh)
self.mesh.header().update(child_mesh.header())
elif self.concat_mesh == 'time':
self.mesh.vertex(index).assign(child_mesh.vertex())
self.mesh.polygon(index).assign(child_mesh.polygon())
self.mesh.header().update(child_mesh.header())
index += 1
elif self.concat_mesh == 'bygroup':
mesh = self.mesh_dict.setdefault(self.main_group,
aims.AimsTimeSurface(2))
aims.SurfaceManip.meshMerge(mesh, child_mesh)
if 'material' not in mesh.header():
mesh.header().update(child_mesh.header())
elif self.concat_mesh == 'list_bygroup':
meshes = self.mesh_dict.setdefault(self.main_group, [])
try:
meshes.append(child_mesh)
except Exception as e:
print('FAILED TO READ MESH:', e)
print('main_group:', self.main_group)
print(child.tag)
print(list(child.items()))
#aims.SurfaceManip.meshMerge(meshes, child_mesh)
raise
try:
if 'material' not in meshes[0].header():
meshes[0].header().update(child_mesh.header())
except Exception as e:
print('material:', self.main_group, meshes)
raise
else:
self.mesh_list.append(child_mesh)
elif child.tag.endswith('}clipPath') or child.tag == 'clipPath':
#print('clipPath')
# skip clipPaths
pass
elif child.tag.endswith('}text') or child.tag == 'text':
tgroup = self.main_group
if not tgroup.endswith('_text'):
tgroup += '_text'
current_text \
= self.mesh_dict.setdefault(
tgroup, {'object_type': 'List', 'objects': []})
text = child.text
if text is not None:
text = six.ensure_str(text)
current_text_o = self.text_description(
child, trans, style=style, text=text)
current_text['objects'].append(current_text_o)
size = self.text_size(current_text_o)
current_text_o['properties']['size'] = size
current_text_o['objects'][0]['properties']['position'] \
= [-size[0]/2., size[1]/2., 0]
elif child.tag.endswith('}tspan') or child.tag == 'tspan':
text = child.text
tgroup = self.main_group
if not tgroup.endswith('_text'):
tgroup += '_text'
current_text_o \
= self.mesh_dict[tgroup]['objects'][-1]
try:
current_text_d \
= current_text_o['objects'][-1]['properties']
except Exception as e:
print('error in text item:', file=sys.stderr)
print('current_text_o:', repr(current_text_o))
#traceback.print_exc()
raise
current_text = current_text_d['text']
if text is None:
print('tspan without text, id:', child.get('id'))
text = ''
else:
text = six.ensure_str(child.text)
if not current_text:
current_text = text
else:
current_text += '\n' + text
current_text_d['text'] = current_text
size = self.text_size(current_text_o)
current_text_o['properties']['size'] = size
current_text_o['objects'][0]['properties']['position'] \
= [-size[0]/2., size[1]/2., 0]
elif self.main_group is None \
and (child.tag.endswith('}g') or child.tag == 'g'):
self.main_group = child.get('id')
if not skip_children and len(child) != 0:
todo = [(c, trans, self.main_group)
for c in child] + todo
if self.concat_mesh in ('merge', 'time'):
return self.mesh
elif self.concat_mesh in ('bygroup', 'list_bygroup'):
return self.mesh_dict
return self.mesh_list
def text_description(self, xml_item, trans=None, style=None, text=''):
props = {
'object_type': 'TransformedObject',
'properties': {},
'objects': [],
}
if not xml_item.get('x') or not xml_item.get('y'):
print('text without coords, id:', xml_item.get('id'))
print(xml_item)
print(xml_item.items())
pos = (0., 0.)
else:
pos = (float(xml_item.get('x')), float(xml_item.get('y')))
if trans is not None:
p0 = np.array(((pos[0], pos[1], 1.),)).T
pos = list(np.array(trans.dot(p0)).ravel()[:2])
font_size = None
obj_props = {'text': text, 'position': [0, 0, 0.],
'font_size': 10., 'scale': 0.1, 'material': {'diffuse': [.5, .5, .5, 1.]}}
trobj_props = {'position': [pos[0], pos[1], 4.]}
props['properties'] = trobj_props
props['objects'].append({
'object_type': 'TextObject',
'name': 'Text',
'properties': obj_props,
})
if style is not None:
text_anchor = style.get('text-anchor')
if text_anchor is not None:
obj_props['text-anchor'] = text_anchor
if text_anchor == 'middle':
pass ## TODO
font_size = style.get('font-size')
if font_size is not None:
unit = ''
i = 1
while font_size[-i] not in '0123456789.':
unit = font_size[-i] + unit
i += 1
if i > 1:
font_size = font_size[:-i+1]
font_size = float(font_size)
if trans is not None:
pt = trans.dot([[0.], [font_size], [1.]]) \
- trans.dot([[0.], [0.], [1.]])
font_size = np.sqrt(pt[0, 0] * pt[0, 0]
+ pt[1, 0] * pt[1, 0])
if unit in ('', 'pt', 'px'):
font_size *= 10. / 3.95 # arbitrary
obj_props['font_size'] = font_size
font_family = style.get('font-family')
if font_family is not None:
obj_props['font_family'] = font_family
fill = style.get('fill')
if fill is not None:
col = [float(int(fill[1:3], 16)) / 255.,
float(int(fill[3:5], 16)) / 255.,
float(int(fill[5:7], 16)) / 255.,
1.]
# avoid dark colors (intensity < 0.4)
if col[0] * col[0] + col[1] * col[1] + col[2] * col[2] < 0.16:
col = [1., 1., 1., 1.]
obj_props['material'] = {'diffuse': col}
return props
@staticmethod
def text_size(text_item):
if not text_item:
return [0, 0]
text_obj = text_item['objects'][0]['properties']
scale = text_obj.get('scale', 1.) * text_obj.get('font_size', 10.)
text = text_obj.get('text')
if not text:
return [0, 0]
text = six.ensure_text(text).split('\n')
# assume fixed size font, with height/width ratio of 3.3.
# also assume a final scale factor of 2.12 (old: 0.0827)
# this is arbitrary but I don't know how to do better
scale *= 2.12 # 0.0827
hw_ratio = 2.5
height = len(text) * scale
width = max([len(line) for line in text]) * scale / hw_ratio
return [width, height]
@staticmethod
def extrude(mesh, distance):
if not aims:
raise RuntimeError('aims module is not available. extrude() '
'needs it.')
up = aims.AimsTimeSurface(mesh)
tr = aims.AffineTransformation3d()
tr.setTranslation([0., 0., distance])
trans3d = mesh.header().get('transformation')
if trans3d:
trans3d = aims.AffineTransformation3d(trans3d)
trans3d.setTranslation([0, 0, 0])
trans = trans3d.transform([0., 0., distance])
tr.setTranslation(trans)
aims.SurfaceManip.meshTransform(up, tr)
walls = aims.AimsTimeSurface(3)
walls.header().update(
dict([(k, copy.deepcopy(v))
for k, v in six.iteritems(mesh.header())]))
material = {}
if 'material' in walls.header():
material = walls.header()['material']
material['face_culling'] = 0
walls.header()['material'] = material
vert0 = mesh.vertex()
poly0 = mesh.polygon()
vert = walls.vertex()
poly = walls.polygon()
vert.assign(vert0 + up.vertex())
nv = len(vert0)
for line in poly0:
poly.append((line[0], line[1], nv + line[0]))
poly.append((line[1], nv + line[1], nv + line[0]))
walls.updateNormals()
return up, walls
@staticmethod
def prune_empty_groups(xml):
todo = [(xml.getroot(), None, True)]
count = 0
total = 0
while todo:
element, parent, begin = todo.pop(0)
total += 1
if element.tag.endswith('}g'):
if len(element) == 0:
if parent is not None:
parent.remove(element)
count += 1
continue
if begin:
added = [(child, element, True) for child in element]
if parent is not None:
added.append((element, parent, False))
todo = added + todo
print('pruned', count, 'elements out of', total)
def copy_svg(self, xml):
xml2 = copy.deepcopy(xml)
todo = [xml2]
while todo:
item = todo.pop(0)
eid = item.get('id')
if eid is None:
eid = 'copy'
elif '-' in eid:
eid = '-'.join(eid.split('-')[:-1]) + '_copy'
else:
eid = eid + '_copy'
eid += '-%d' % self.id_count
self.id_count += 1
item.set('id', eid)
if item.tag == 'g' or item.tag.endswith('}g'):
todo = item[:] + todo
return xml2
def copy_item_properties(self, source, dest):
for prop in self.keep_transformed_properties:
value = source.get(prop)
if value is not None:
dest.set(prop, value)
def replace_filter_element(self, xml):
'''
Inside replace_elements, this function is called for each xml element,
and should return either the element itself (no replacement), or None
(element is discarded), or a replaced XML element.
The default method always returns the input element.
'''
return xml
def replace_elements(self, xml, replace_dict):
# replace_dict: {'id': {eid: {label: label, element: xml,
# children: bool, center: (x, y)}},
# 'label': {label: {element: xml, children: bool,
# center: (x, y)}}}
todo = [(xml.getroot(), np.matrix(np.eye(3)), None, None, None)]
count = 0
total = 0
if replace_dict is None:
replace_dict = {}
rid = replace_dict.get('id', {})
rlabel = replace_dict.get('label', {})
# print('replace_dict:', replace_dict)
while todo:
element, trans, parent, current_id, current_label = todo.pop(0)
element2 = self.replace_filter_element(element)
if element2 is None:
parent.remove(element)
continue
element = element2
total += 1
eid = element.get('id')
glabel = element.get('glabel') # glabel can replace id
relem = None
replace_children = False
if eid in rid:
relem = rid[eid]
elif glabel in rid:
relem = rid[glabel]
eid = glabel
elif eid is not None and '-' in eid:
eid = '-'.join(eid.split('-')[:-1])
if eid in rid:
relem = rid[eid]
if relem is not None:
current_id = eid
label = element.get('label')
if label is not None:
current_label = label
if relem is not None:
elabel = relem.get('label')
if elabel is not None and elabel != label:
relem = None
else:
relem = rlabel.get(label)
if relem is not None:
item = relem['element']
replace_children = relem.get('children', False)
center = relem.get('center')
# print('replace element:', eid, label, relem)
if element.get(
'{http://www.inkscape.org/namespaces/inkscape}'
'groupmode') == 'layer':
# it's a layer: process children
# TODO: we need a better (recursive) way to handle this.
replace_children = True
if replace_children:
trans = self.get_transform(element, trans)
tr_items = []
for child in element:
# allow per-element specific replacement
child2 = self.replace_filter_element(child)
if child2 is None:
continue
child = child2
label = child.get('label')
ritem = item
rcenter = center
if label is not None:
new_relem = rlabel.get(label)
if new_relem is None:
new_relem = rid.get(label)
if new_relem is not None:
ritem = new_relem['element']
rcenter = new_relem['center']
bbox = self.boundingbox(child, trans)
ecent = ((bbox[0][0] + bbox[1][0]) / 2,
(bbox[0][1] + bbox[1][1]) / 2)
tr = np.matrix(np.eye(3))
tr[0, 2] = ecent[0] - rcenter[0]
tr[1, 2] = ecent[1] - rcenter[1]
tr_items.append((ritem, tr, child))
while len(element) != 0:
element.remove(element[0])
for ritem, tr, orig in tr_items:
new_item = self.copy_svg(ritem)
eid = new_item.get('id')
if '-' in eid:
eid = '-'.join(eid.split('-')[:-1])
eid += '-%d' % self.id_count
new_item.set('id', eid)
self.copy_item_properties(orig, new_item)
self.id_count += 1
self.transform_subtree(new_item, relem.get('trans'),
tr, trans)
element.append(new_item)
else:
bbox = self.boundingbox(element, trans)
ecent = ((bbox[0][0] + bbox[1][0]) / 2,
(bbox[0][1] + bbox[1][1]) / 2)
tr = np.matrix(np.eye(3))
tr[0, 2] = ecent[0] - center[0]
tr[1, 2] = ecent[1] - center[1]
parent.remove(element)
new_item = self.copy_svg(item)
self.copy_item_properties(element, new_item)
eid = new_item.get('id')
if '-' in eid:
eid = '-'.join(eid.split('-')[:-1])
eid += '-%d' % self.id_count
new_item.set('id', eid)
self.id_count += 1
self.transform_subtree(new_item, relem.get('trans'), tr,
trans)
parent.append(new_item)
else:
transm = self.get_transform(element, trans)
added = [(child, trans, element, current_id, current_label)
for child in element]
todo = added + todo
#def clip_path(self, xml_path, trans, bmin, bmax):
#def read_point(pdesc, i, pt=None):
#j = i + 1
#try:
#while j < n and pdesc[j] in '0123456789.e-':
#j += 1
#x = float(pdesc[i:j])
#i = j + 1
#while i<len(pdesc) and pdesc[i] in ' ,':
#i += 1
#except Exception as e:
#print(e)
#print('failed reading', pt, ', i:', i)
#raise
#return x, i
#pdesc = xml_path.get('d')
#n = len(pdesc)
#i = 0
#cmd = 'M'
#x = 0
#y = 0
#tp = [0, 0]
#first = x, y, tp
#out_cmd = []
#clip = None
#points_kept = 0
#cond_cmd = []
#valid = False
#while i < n:
#while(i < n and pdesc[i] == ' '):
#i += 1
#if i == n:
##print('end of path in:', pdesc)
#break
##print('i:', i)
#last_x, last_y = x, y
#last_valid = valid
#last_tp = tp
#if pdesc[i] in 'mMcClLhHvVsSqQtTaA':
#cmd = pdesc[i]
#clip = 0
#cond_cmd.append(cmd)
#np = 0
##print('cmd:', cmd)
#i += 1
#while pdesc[i] == ' ':
#i += 1
#elif pdesc[i] in '-0123456789.':
#if cmd not in 'vV':
#x, i = read_point(pdesc, i, 'x')
#cond_cmd.append(x)
#if cmd >= 'a':
#x += last_x
#if cmd not in 'hH':
#y, i = read_point(pdesc, i, 'y')
#cond_cmd.append(y)
#j = i + 1
#if cmd >= 'a':
#y += last_y
#tp = np.asarray(trans.dot([[x], [y], [1.]])).ravel()
#np += 1
#if tp[0] < bmin[0] or tp[0] > bmax[0] \
#or tp[1] < bmin[1] or tp[1] > bmax[1]:
#clip += 1
#valid = False
#else:
#valid = True
#if cmd in 'cC':
##last_x, last_y = x, y
#x, i = read_point(pdesc, i, 'x2')
#y, i = read_point(pdesc, i, 'y2')
#cond_cmd.append(x)
#cond_cmd.append(y)
#if cmd >= 'a':
#x += last_x
#y += last_y
#tp = np.asarray(trans.dot([[x], [y], [1.]])).ravel()
##last_x, last_y = x, y
#x, i = read_point(pdesc, i, 'x3')
#y, i = read_point(pdesc, i, 'y3')
#cond_cmd.append(x)
#cond_cmd.append(y)
#if cmd >= 'a':
#x += last_x
#y += last_y
#tp = np.asarray(trans.dot([[x], [y], [1.]])).ravel()
#elif cmd in 'sSqQ':
##last_x, last_y = x, y
#x, i = read_point(pdesc, i, 'x2')
#y, i = read_point(pdesc, i, 'y2')
#cond_cmd.append(x)
#cond_cmd.append(y)
#if cmd >= 'a':
#x += last_x
#y += last_y
#tp = np.asarray(trans.dot([[x], [y], [1.]])).ravel()
#if cmd in 'aA':
#x, i = read_point(pdesc, i, 'x-axis-rotation')
#y, i = read_point(pdesc, i, 'large-arc-flag')
#s, i = read_point(pdesc, i, 'sweep-flag')
#cond_cmd += [x, int(y), int(s)]
#x, i = read_point(pdesc, i, 'x2')
#y, i = read_point(pdesc, i, 'y2')
#cond_cmd += [x, y]
#if cmd >= 'a':
#x += last_x
#y += last_y
#tp = np.asarray(trans.dot([[x], [y], [1.]])).ravel()
#if cmd in 'cCsSqQaA':
#np += 1
#if tp[0] < bmin[0] or tp[0] > bmax[0] \
#or tp[1] < bmin[1] or tp[1] > bmax[1]:
#clip += 1
#valid2 = False
#else:
#valid2 = True
#if not valid and not valid2:
## invalid segment: keep last as potential Move
#if cmd >= 'a':
#cond_cmd = ['m', x, y]
#else:
#cond_cmd = ['M', x, y]
#if cmd == 'm':
#cmd = 'l'
#first = x, y, tp
#elif cmd == 'M':
#cmd = 'L'
#first = x, y, tp
#elif pdesc[i] in 'zZ':
#if valid:
#out_cmd.append(pdesc[i])
#cond_cmd = []
#else:
#cond_cmd = [pdesc[i], x, y]
#x, y, tp = first
#i += 1
#else:
#out_cmd.append(pdesc[i])
#i += 1
#return ' '.join([str(x) for x in out_cmd])
def remove_paths_outside_bounds(self, xml_group, bbmin, bbmax, trans=None):
trans = self.get_transform(xml_group, trans)
to_remove = []
for path in xml_group:
pbmin, pbmax = self.boundingbox(path, trans)
if pbmin[0] > bbmax[0] or pbmin[1] > bbmax[1] \
or pbmax[0] < bbmin[0] or pbmax[1] < bbmin[1]:
to_remove.append(path)
for path in to_remove:
xml_group.remove(path)
def merge_paths(self, xml_group, trans=None):
if len(xml_group) == 0:
return
trans = self.get_transform(xml_group, trans)
path = xml_group[0]
ptrans = self.get_transform(path, trans)
d = [self.transform_path(path, ptrans)]
for p in xml_group[1:]:
ptrans = self.get_transform(p, trans)
d.append(self.transform_path(p, ptrans))
d = ' '.join(d)
path.set('d', d)
for i in six.moves.xrange(len(xml_group) - 1):
xml_group.remove(xml_group[1])
def save_mesh_dict(self, meshes, dirname, mesh_format='.mesh',
mesh_wf_format='.mesh'):
if not os.path.exists(dirname):
os.makedirs(dirname)
summary = {}
for key, mesh in meshes.items():
if type(mesh) in (list, dict):
# dict object (text...), save as .aobj
filename = os.path.join(dirname,
key.replace('/', '_') + '.aobj')
print('saving:', filename)
import json
#open(filename, 'w').write(repr(mesh) + '\n')
try:
json.dump(mesh, open(filename, 'w'))
except Exception as e:
print(e)
print('while saving object:', mesh)
summary.setdefault('text_fnames', {})[filename] = key
else:
if isinstance(mesh, aims.AimsTimeSurface_2):
ext = mesh_wf_format
else:
ext = mesh_format
if isinstance(ext, (tuple, list)):
ext, format = ext
else:
format = None
filename = os.path.join(dirname,
key.replace('/', '_') + ext)
print('saving:', filename)
aims.write(mesh, filename, format=format)
summary.setdefault("meshes", {})[filename] = key
return summary
def find_element(self, xml_et, id):
doc = xml_et.getroot()
todo = [(layer, None) for layer in doc]
while todo:
elem, strans = todo.pop(0)
eid = elem.get('id')
trans = self.get_transform(elem, strans)
if eid == id:
return elem, trans
todo += [(child, trans) for child in elem]
return None
def clip_rect_from_id(self, xml_et, rect_id):
if isinstance(rect_id, str):
elem = self.find_element(xml_et, rect_id)
if not elem:
raise ValueError('element not found: %s' % rect_id)
elem, trans = elem
# un-apply element transform
telem = elem.get('transform')
if telem:
telem = self.get_transform(telem)
if trans is not None:
trans = trans * scipy.linalg.inv(telem)
else:
trans = scipy.linalg.inv(telem)
print('elem:', elem)
print(elem.items())
bbox = self.boundingbox(elem, trans)
dims = [bbox[0][0],
bbox[0][1],
bbox[1][0] - bbox[0][0],
bbox[1][1] - bbox[0][1]]
else:
dims = rect_id
return dims
def clip_page(self, xml_et, dims_or_rect):
dims = self.clip_rect_from_id(xml_et, dims_or_rect)
doc = xml_et.getroot()
init_w = float(doc.get('width'))
init_h = float(doc.get('height'))
init_vbox = [float(x) for x in doc.get('viewBox').split()[2:]]
ratio = init_w / init_vbox[0]
doc.set('width', str(dims[2] * ratio))
doc.set('height', str(dims[3] * ratio))
doc.set('viewBox', '0 0 %f %f' % (dims[2], dims[3]))
transl = 'translate(%f, %f)' % (-dims[0], -dims[1])
for layer in doc:
if layer.tag.endswith('}g'):
ltrans = layer.get('transform')
if ltrans:
ltrans = '%s %s' % (transl, ltrans)
else:
ltrans = transl
layer.set('transform', ltrans)
def read_xml(self, svg_filename):
self.svg_filename = svg_filename
self.svg = ET.parse(svg_filename)
return self.svg
if __name__ == '__main__':
filenames = [
'/volatile/riviere/neurosvn/capsul/trunk/doc/source/_static/capsul_logo.svg',
'/home/riviere/neurosvn/capsul/trunk/doc/source/_static/capsul_logo.svg',
'/tmp/galeries_big.svg',
'/home/riviere/catacombes/plans/14/big_2017/GRS-2010-galeries.svg',
'/home/riviere/catacombes/plans/14/big_2017/PARIS-2017.svg',
]
svg_filename = filenames[-1]
if not os.path.exists(svg_filename):
svg_filename = [f for f in filenames if os.path.exists(f)][0]
svg_mesh = SvgToMesh('bygroup')
xml_et = svg_mesh.read_xml(svg_filename)
mesh = svg_mesh.read_paths(xml_et)
|
{"hexsha": "944015b586009e13f508bca21e128de35d63d247", "size": 67292, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/catamap/svg_to_mesh.py", "max_stars_repo_name": "denisri/fdc_catamaps", "max_stars_repo_head_hexsha": "3636c925d0407fbb12a7dcd3ff953018568aee7e", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python/catamap/svg_to_mesh.py", "max_issues_repo_name": "denisri/fdc_catamaps", "max_issues_repo_head_hexsha": "3636c925d0407fbb12a7dcd3ff953018568aee7e", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/catamap/svg_to_mesh.py", "max_forks_repo_name": "denisri/fdc_catamaps", "max_forks_repo_head_hexsha": "3636c925d0407fbb12a7dcd3ff953018568aee7e", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.7850144092, "max_line_length": 116, "alphanum_fraction": 0.4468733282, "include": true, "reason": "import numpy,import scipy", "num_tokens": 15775}
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import deque
import numpy as np
import random
class EpisodeExperience(object):
def __init__(self, episode_len):
self.max_len = episode_len
self.episode_state = []
self.episode_actions = []
self.episode_reward = []
self.episode_terminated = []
self.episode_obs = []
self.episode_available_actions = []
self.episode_filled = []
@property
def count(self):
return len(self.episode_state)
def add(self, state, actions, reward, terminated, obs, available_actions,
filled):
assert self.count < self.max_len
self.episode_state.append(state)
self.episode_actions.append(actions)
self.episode_reward.append(reward)
self.episode_terminated.append(terminated)
self.episode_obs.append(obs)
self.episode_available_actions.append(available_actions)
self.episode_filled.append(filled)
def get_data(self):
assert self.count == self.max_len
return np.array(self.episode_state), np.array(self.episode_actions),\
np.array(self.episode_reward), np.array(self.episode_terminated),\
np.array(self.episode_obs),\
np.array(self.episode_available_actions), np.array(self.episode_filled)
class EpisodeReplayBuffer(object):
def __init__(self, max_buffer_size):
self.max_buffer_size = max_buffer_size
self.buffer = deque(maxlen=max_buffer_size)
def add(self, episode_experience):
self.buffer.append(episode_experience)
@property
def count(self):
return len(self.buffer)
def sample_batch(self, batch_size):
batch = []
if self.count < batch_size:
batch = random.sample(self.buffer, self.count)
else:
batch = random.sample(self.buffer, batch_size)
s_batch, a_batch, r_batch, t_batch, obs_batch, available_actions_batch,\
filled_batch = [], [], [], [], [], [], []
for episode in batch:
s, a, r, t, obs, available_actions, filled = episode.get_data()
s_batch.append(s)
a_batch.append(a)
r_batch.append(r)
t_batch.append(t)
obs_batch.append(obs)
available_actions_batch.append(available_actions)
filled_batch.append(filled)
filled_batch = np.array(filled_batch)
r_batch = np.array(r_batch)
t_batch = np.array(t_batch)
a_batch = np.array(a_batch)
obs_batch = np.array(obs_batch)
available_actions_batch = np.array(available_actions_batch)
return s_batch, a_batch, r_batch, t_batch, obs_batch,\
available_actions_batch, filled_batch
|
{"hexsha": "e2cc33b09abb83267017e2a06cc291e1ea701a2f", "size": 3372, "ext": "py", "lang": "Python", "max_stars_repo_path": "benchmark/torch/qmix/replay_buffer.py", "max_stars_repo_name": "lp2333/PARL", "max_stars_repo_head_hexsha": "e4bde1f5b7e69c5f8d3ee3a90a647dfe12204bd3", "max_stars_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_stars_count": 3172, "max_stars_repo_stars_event_min_datetime": "2018-05-22T02:02:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T09:14:56.000Z", "max_issues_repo_path": "benchmark/torch/qmix/replay_buffer.py", "max_issues_repo_name": "BKBK00/PARL", "max_issues_repo_head_hexsha": "f508bc6085420431b504441c7ff129e64826603e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 422, "max_issues_repo_issues_event_min_datetime": "2018-05-17T16:58:45.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T02:03:25.000Z", "max_forks_repo_path": "benchmark/torch/qmix/replay_buffer.py", "max_forks_repo_name": "BKBK00/PARL", "max_forks_repo_head_hexsha": "f508bc6085420431b504441c7ff129e64826603e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 794, "max_forks_repo_forks_event_min_datetime": "2018-05-21T18:33:19.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T13:38:09.000Z", "avg_line_length": 35.4947368421, "max_line_length": 87, "alphanum_fraction": 0.6607354686, "include": true, "reason": "import numpy", "num_tokens": 712}
|
import json
import pandas as pd
import numpy as np
from sklearn.feature_extraction import DictVectorizer
from sklearn.linear_model import Perceptron
from sklearn.model_selection import train_test_split
from sklearn.metrics import precision_recall_fscore_support, f1_score
from sklearn.externals import joblib
#Try and skip bad lines
dframe = pd.read_csv("ner.csv", encoding = "ISO-8859-1", error_bad_lines=False)
#Drop null values
dframe.dropna(inplace=True)
#Check if null values left
#print dframe[dframe.isnull().any(axis=1)].size
#First 5k rows
dframe = dframe[:10000]
#Ignore columns, tag is prediction
x_df = dframe.drop(['Unnamed: 0', 'sentence_idx', 'tag'], axis=1)
#print x_df.head()
vectorizer = DictVectorizer(sparse=False)
x = vectorizer.fit_transform(x_df.to_dict("records"))
#print x.shape
#The output class
y = dframe.tag.values
all_classes = np.unique(y)
#print all_classes.shape
#print y.shape
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=0)
print(x_train.shape)
print(y_train.shape)
clf = Perceptron(verbose=10, n_jobs=-1, n_iter=5)
all_classes = list(set(y))
clf.partial_fit(x_train, y_train, all_classes)
clf = joblib.dump(clf, 'clf.model')
print "Done"
clf = joblib.load('clf.model')
print(f1_score(clf.predict(x_test), y_test, average="micro"))
|
{"hexsha": "3b392975505944ad927f0fd1cbd23a56112b1928", "size": 1331, "ext": "py", "lang": "Python", "max_stars_repo_path": "SciKit/main.py", "max_stars_repo_name": "varunchitale/Named_Entity_Recognition", "max_stars_repo_head_hexsha": "9a2e9b5db44c09b44469c115269e74446f8d489d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "SciKit/main.py", "max_issues_repo_name": "varunchitale/Named_Entity_Recognition", "max_issues_repo_head_hexsha": "9a2e9b5db44c09b44469c115269e74446f8d489d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "SciKit/main.py", "max_forks_repo_name": "varunchitale/Named_Entity_Recognition", "max_forks_repo_head_hexsha": "9a2e9b5db44c09b44469c115269e74446f8d489d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.1833333333, "max_line_length": 88, "alphanum_fraction": 0.7655897821, "include": true, "reason": "import numpy", "num_tokens": 355}
|
from typing import Union
import numpy as np
from anndata import AnnData
def interpolation_spateo(
adata,
genes: Union[str, list] = None,
max_iter: int = 1000,
data_batch_size: int = 5000,
autoencoder_batch_size: int = 50,
) -> AnnData:
"""
Predict missing location’s gene expression and learn a continuous gene expression pattern over space.
(Not only for 3D coordinate data, but also for 2D coordinate data.)
Args:
adata: AnnData object that contains spatial (numpy.ndarray) in the `obsm` attribute.
genes: Gene list that needs to interpolate.
max_iter: The maximum iteration the network will be trained.
data_batch_size: The size of the data sample batches to be generated in each iteration.
autoencoder_batch_size: The size of the auto-encoder training batches to be generated in each iteration.
Must be no greater than batch_size.
Returns:
new_adata: adata containing the interpolated gene expression matrix.
"""
try:
from spateo.tools import DataSampler, DeepInterpolation, interpolation_nn
except ImportError:
raise ImportError(
"\nPlease install spateo-release"
"\n\n\tgit clone https://github.com/aristoteleo/spateo-release.git"
)
# X: The spatial coordinates of each cell / binning / segmentation.
X = adata.obsm["spatial"]
# Y: The expression values at the corresponding coordinates X.
Y = adata[:, genes].X
# Loads and retains the data pairs (X, Y) and delivers the batches of them to the DeepInterpolation module upon calling.
data_dict = {"X": X, "Y": Y}
velocity_data_sampler = DataSampler(data=data_dict, normalize_data=False)
NN_model = DeepInterpolation(
model=interpolation_nn,
data_sampler=velocity_data_sampler,
enforce_positivity=False,
)
NN_model.train(
max_iter=max_iter,
data_batch_size=data_batch_size,
autoencoder_batch_size=autoencoder_batch_size,
data_lr=1e-4,
autoencoder_lr=1e-4,
)
# Get the new adata after interpolation (Not only for 3D coordinate data, but also for 2D coordinate data).
new_adata = adata.copy()
new_adata[:, genes].X = np.asarray(NN_model.predict(input_x=X))
return new_adata
|
{"hexsha": "3a1f04030a78f30e7d2a0d5dfe76065ef613378d", "size": 2339, "ext": "py", "lang": "Python", "max_stars_repo_path": "stRT/preprocess/gene/gene_interpolation_spateo.py", "max_stars_repo_name": "Yao-14/stAnalysis", "max_stars_repo_head_hexsha": "d08483ce581f5b03cfcad8be500aaa64b0293f74", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "stRT/preprocess/gene/gene_interpolation_spateo.py", "max_issues_repo_name": "Yao-14/stAnalysis", "max_issues_repo_head_hexsha": "d08483ce581f5b03cfcad8be500aaa64b0293f74", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "stRT/preprocess/gene/gene_interpolation_spateo.py", "max_forks_repo_name": "Yao-14/stAnalysis", "max_forks_repo_head_hexsha": "d08483ce581f5b03cfcad8be500aaa64b0293f74", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.126984127, "max_line_length": 124, "alphanum_fraction": 0.687045746, "include": true, "reason": "import numpy", "num_tokens": 543}
|
import numpy as np
n,m = input().split()
arr = np.array([input().split() for _ in range(int(n))], int)
np.set_printoptions(legacy='1.13')
print(np.mean(arr,axis=1))
print(np.var(arr,axis=0))
print(np.std(arr))
|
{"hexsha": "7256aaf4d3387a4ad738a14e52d0d3307f00b1cd", "size": 209, "ext": "py", "lang": "Python", "max_stars_repo_path": "Mean var std.py", "max_stars_repo_name": "jibinmathew691993/PythonHackerrank", "max_stars_repo_head_hexsha": "14ab5b620435a006d5ccff17536bc01acd7c22dc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Mean var std.py", "max_issues_repo_name": "jibinmathew691993/PythonHackerrank", "max_issues_repo_head_hexsha": "14ab5b620435a006d5ccff17536bc01acd7c22dc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Mean var std.py", "max_forks_repo_name": "jibinmathew691993/PythonHackerrank", "max_forks_repo_head_hexsha": "14ab5b620435a006d5ccff17536bc01acd7c22dc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.8571428571, "max_line_length": 61, "alphanum_fraction": 0.6794258373, "include": true, "reason": "import numpy", "num_tokens": 63}
|
import copy
import random
from functools import wraps
import numpy as np
from deap.gp import PrimitiveTree, compile, cxOnePoint, mutUniform
from scipy.special import softmax
class MultipleGeneGP():
def __init__(self, content, gene_num):
self.gene = []
self.gene_num = gene_num
for i in range(self.gene_num):
self.gene.append(PrimitiveTree(content()))
def random_select(self):
return self.gene[random.randint(0, self.gene_num - 1)]
def weight_select(self):
weight = np.abs(self.coef)[:, :-1].mean(axis=0)
p = softmax(-abs(weight))
return self.gene[np.random.choice(np.arange(len(weight)), p=p)]
def deterministic_select(self):
weight = np.abs(self.coef)[:, :-1].mean(axis=0)
return self.gene[np.argmax(-weight)]
def __len__(self):
return sum([len(g) for g in self.gene])
def multiple_gene_evaluation(compiled_genes, x):
result = []
for gene in compiled_genes:
result.append(gene(*x))
return result
def multiple_gene_initialization(container, generator, gene_num=5):
return container(generator, gene_num)
def multiple_gene_compile(expr: MultipleGeneGP, pset):
gene_compiled = []
for gene in expr.gene:
gene_compiled.append(compile(gene, pset))
return gene_compiled
def cxOnePoint_multiple_gene(ind1: MultipleGeneGP, ind2: MultipleGeneGP):
cxOnePoint(ind1.random_select(), ind2.random_select())
return ind1, ind2
def mutUniform_multiple_gene(individual: MultipleGeneGP, expr, pset):
mutUniform(individual.random_select(), expr, pset)
return individual,
def cxOnePoint_multiple_gene_weight(ind1: MultipleGeneGP, ind2: MultipleGeneGP):
cxOnePoint(ind1.weight_select(), ind2.weight_select())
return ind1, ind2
def mutUniform_multiple_gene_weight(individual: MultipleGeneGP, expr, pset):
mutUniform(individual.weight_select(), expr, pset)
return individual,
def cxOnePoint_multiple_gene_deterministic(ind1: MultipleGeneGP, ind2: MultipleGeneGP):
cxOnePoint(ind1.deterministic_select(), ind2.deterministic_select())
return ind1, ind2
def mutUniform_multiple_gene_deterministic(individual: MultipleGeneGP, expr, pset):
mutUniform(individual.deterministic_select(), expr, pset)
return individual,
def staticLimit_multiple_gene(key, max_value):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
keep_inds = [copy.deepcopy(ind) for ind in args]
new_inds = list(func(*args, **kwargs))
for i, ind in enumerate(new_inds):
limit_exceed = False
for x in ind.gene:
if key(x) > max_value:
limit_exceed = True
break
if limit_exceed:
new_inds[i] = random.choice(keep_inds)
return new_inds
return wrapper
return decorator
def result_calculation(func, data):
result = multiple_gene_evaluation(func, data.T)
for i in range(len(result)):
yp = result[i]
if not isinstance(yp, np.ndarray):
yp = np.full(len(data), 0)
elif yp.size == 1:
yp = np.full(len(data), yp)
result[i] = yp
result = np.concatenate([np.array(result).T, np.ones((len(data), 1))], axis=1)
return result
|
{"hexsha": "a8e118389d3ee9616a2ad4eab5508189e6fd220e", "size": 3382, "ext": "py", "lang": "Python", "max_stars_repo_path": "pstree/multigene_gp.py", "max_stars_repo_name": "Hengzhe-Zhang/PS-Tree", "max_stars_repo_head_hexsha": "88750666f4391f7cc62b34d676f334e22fed3f68", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-06-29T23:47:59.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-09T03:16:11.000Z", "max_issues_repo_path": "pstree/multigene_gp.py", "max_issues_repo_name": "Hengzhe-Zhang/PS-Tree", "max_issues_repo_head_hexsha": "88750666f4391f7cc62b34d676f334e22fed3f68", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-03-11T07:51:35.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-11T09:47:19.000Z", "max_forks_repo_path": "pstree/multigene_gp.py", "max_forks_repo_name": "Hengzhe-Zhang/PS-Tree", "max_forks_repo_head_hexsha": "88750666f4391f7cc62b34d676f334e22fed3f68", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-11T07:38:50.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-11T07:38:50.000Z", "avg_line_length": 29.9292035398, "max_line_length": 87, "alphanum_fraction": 0.6614429332, "include": true, "reason": "import numpy,from scipy", "num_tokens": 802}
|
// Copyright 2012 Cloudera Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <boost/date_time/posix_time/posix_time.hpp>
#include <boost/date_time/gregorian/gregorian.hpp>
#include <boost/date_time/time_zone_base.hpp>
#include <boost/date_time/local_time/local_time.hpp>
#include <boost/algorithm/string.hpp>
#include <gutil/strings/substitute.h>
#include "exprs/timestamp-functions.h"
#include "exprs/expr.h"
#include "exprs/anyval-util.h"
#include "runtime/tuple-row.h"
#include "runtime/timestamp-value.h"
#include "util/path-builder.h"
#include "runtime/string-value.inline.h"
#include "udf/udf.h"
#include "udf/udf-internal.h"
#include "runtime/runtime-state.h"
#define TIMEZONE_DATABASE "be/files/date_time_zonespec.csv"
using namespace boost;
using namespace boost::posix_time;
using namespace boost::local_time;
using namespace boost::gregorian;
using namespace impala_udf;
using namespace std;
using namespace strings;
namespace impala {
// Constant strings used for DayName function.
const char* TimestampFunctions::SUNDAY = "Sunday";
const char* TimestampFunctions::MONDAY = "Monday";
const char* TimestampFunctions::TUESDAY = "Tuesday";
const char* TimestampFunctions::WEDNESDAY = "Wednesday";
const char* TimestampFunctions::THURSDAY = "Thursday";
const char* TimestampFunctions::FRIDAY = "Friday";
const char* TimestampFunctions::SATURDAY = "Saturday";
void TimestampFunctions::UnixAndFromUnixPrepare(FunctionContext* context,
FunctionContext::FunctionStateScope scope) {
if (scope != FunctionContext::THREAD_LOCAL) return;
DateTimeFormatContext* dt_ctx = NULL;
if (context->IsArgConstant(1)) {
StringVal fmt_val = *reinterpret_cast<StringVal*>(context->GetConstantArg(1));
const StringValue& fmt_ref = StringValue::FromStringVal(fmt_val);
if (fmt_val.is_null || fmt_ref.len <= 0) {
TimestampFunctions::ReportBadFormat(context, fmt_val, true);
return;
}
dt_ctx = new DateTimeFormatContext(fmt_ref.ptr, fmt_ref.len);
bool parse_result = TimestampParser::ParseFormatTokens(dt_ctx);
if (!parse_result) {
delete dt_ctx;
TimestampFunctions::ReportBadFormat(context, fmt_val, true);
return;
}
} else {
// If our format string is constant, then we benefit from it only being parsed once in
// the code above. If it's not constant, then we can reuse a context by resetting it.
// This is much cheaper vs alloc/dealloc'ing a context for each evaluation.
dt_ctx = new DateTimeFormatContext();
}
context->SetFunctionState(scope, dt_ctx);
}
void TimestampFunctions::UnixAndFromUnixClose(FunctionContext* context,
FunctionContext::FunctionStateScope scope) {
if (scope == FunctionContext::THREAD_LOCAL) {
DateTimeFormatContext* dt_ctx =
reinterpret_cast<DateTimeFormatContext*>(context->GetFunctionState(scope));
delete dt_ctx;
}
}
template <class TIME>
StringVal TimestampFunctions::FromUnix(FunctionContext* context, const TIME& intp) {
if (intp.is_null) return StringVal::null();
TimestampValue t(boost::posix_time::from_time_t(intp.val));
return AnyValUtil::FromString(context, lexical_cast<string>(t));
}
template <class TIME>
StringVal TimestampFunctions::FromUnix(FunctionContext* context, const TIME& intp,
const StringVal& fmt) {
if (fmt.is_null || fmt.len <= 0) {
TimestampFunctions::ReportBadFormat(context, fmt, false);
return StringVal::null();
}
if (intp.is_null) return StringVal::null();
TimestampValue t(boost::posix_time::from_time_t(intp.val));
void* state = context->GetFunctionState(FunctionContext::THREAD_LOCAL);
DateTimeFormatContext* dt_ctx = reinterpret_cast<DateTimeFormatContext*>(state);
if (!context->IsArgConstant(1)) {
dt_ctx->Reset(reinterpret_cast<const char*>(fmt.ptr), fmt.len);
if (!TimestampParser::ParseFormatTokens(dt_ctx)){
TimestampFunctions::ReportBadFormat(context, fmt, false);
return StringVal::null();
}
}
int buff_len = dt_ctx->fmt_out_len + 1;
StringVal result(context, buff_len);
result.len = t.Format(*dt_ctx, buff_len, reinterpret_cast<char*>(result.ptr));
if (result.len <= 0) return StringVal::null();
return result;
}
IntVal TimestampFunctions::Unix(FunctionContext* context, const StringVal& string_val,
const StringVal& fmt) {
if (fmt.is_null || fmt.len <= 0) {
TimestampFunctions::ReportBadFormat(context, fmt, false);
return IntVal::null();
}
if(string_val.is_null || string_val.len <= 0) return IntVal::null();
void* state = context->GetFunctionState(FunctionContext::THREAD_LOCAL);
DateTimeFormatContext* dt_ctx = reinterpret_cast<DateTimeFormatContext*>(state);
if (!context->IsArgConstant(1)) {
dt_ctx->Reset(reinterpret_cast<const char*>(fmt.ptr), fmt.len);
if (!TimestampParser::ParseFormatTokens(dt_ctx)){
ReportBadFormat(context, fmt, false);
return IntVal::null();
}
}
TimestampValue default_tv;
TimestampValue* tv = &default_tv;
default_tv = TimestampValue(
reinterpret_cast<const char*>(string_val.ptr), string_val.len, *dt_ctx);
if (tv->date().is_special()) return IntVal::null();
ptime temp;
tv->ToPtime(&temp);
return IntVal(to_time_t(temp));
}
IntVal TimestampFunctions::Unix(FunctionContext* context, const TimestampVal& ts_val) {
if (ts_val.is_null) return IntVal::null();
const TimestampValue& ts_value_ref = TimestampValue::FromTimestampVal(ts_val);
if (ts_value_ref.get_date().is_special()) return IntVal::null();
ptime temp;
ts_value_ref.ToPtime(&temp);
return IntVal(to_time_t(temp));
}
IntVal TimestampFunctions::Unix(FunctionContext* context) {
TimestampValue default_tv;
TimestampValue* tv = &default_tv;
default_tv = TimestampValue(context->impl()->state()->now());
if (tv->date().is_special()) return IntVal::null();
ptime temp;
tv->ToPtime(&temp);
return IntVal(to_time_t(temp));
}
IntVal TimestampFunctions::UnixFromString(FunctionContext* context, const StringVal& sv) {
if (sv.is_null) return IntVal::null();
TimestampValue tv(reinterpret_cast<const char *>(sv.ptr), sv.len);
if (tv.date().is_special()) return IntVal::null();
ptime temp;
tv.ToPtime(&temp);
return IntVal(to_time_t(temp));
}
void TimestampFunctions::ReportBadFormat(FunctionContext* context,
const StringVal& format, bool is_error) {
stringstream ss;
const StringValue& fmt = StringValue::FromStringVal(format);
if (format.is_null || format.len <= 0) {
ss << "Bad date/time coversion format: format string is NULL or has 0 length";
} else {
ss << "Bad date/time coversion format: " << fmt.DebugString();
}
if (is_error) {
context->SetError(ss.str().c_str());
} else {
context->AddWarning(ss.str().c_str());
}
}
StringVal TimestampFunctions::DayName(FunctionContext* context, const TimestampVal& ts) {
if (ts.is_null) return StringVal::null();
IntVal dow = DayOfWeek(context, ts);
switch(dow.val) {
case 1: return StringVal(SUNDAY);
case 2: return StringVal(MONDAY);
case 3: return StringVal(TUESDAY);
case 4: return StringVal(WEDNESDAY);
case 5: return StringVal(THURSDAY);
case 6: return StringVal(FRIDAY);
case 7: return StringVal(SATURDAY);
default: return StringVal::null();
}
}
IntVal TimestampFunctions::Year(FunctionContext* context, const TimestampVal& ts_val) {
if (ts_val.is_null) return IntVal::null();
const TimestampValue& ts_value = TimestampValue::FromTimestampVal(ts_val);
if (ts_value.get_date().is_special()) return IntVal::null();
return IntVal(ts_value.get_date().year());
}
IntVal TimestampFunctions::Month(FunctionContext* context, const TimestampVal& ts_val) {
if (ts_val.is_null) return IntVal::null();
const TimestampValue& ts_value = TimestampValue::FromTimestampVal(ts_val);
if (ts_value.get_date().is_special()) return IntVal::null();
return IntVal(ts_value.get_date().month());
}
IntVal TimestampFunctions::DayOfWeek(FunctionContext* context,
const TimestampVal& ts_val) {
if (ts_val.is_null) return IntVal::null();
const TimestampValue ts_value_ref = TimestampValue::FromTimestampVal(ts_val);
if (ts_value_ref.get_date().is_special()) return IntVal::null();
// Sql has the result in [1,7] where 1 = Sunday. Boost has 0 = Sunday.
return IntVal(ts_value_ref.get_date().day_of_week() + 1);
}
IntVal TimestampFunctions::DayOfMonth(FunctionContext* context,
const TimestampVal& ts_val) {
if (ts_val.is_null) return IntVal::null();
const TimestampValue& ts_value = TimestampValue::FromTimestampVal(ts_val);
if (ts_value.get_date().is_special()) return IntVal::null();
return IntVal(ts_value.get_date().day());
}
IntVal TimestampFunctions::DayOfYear(FunctionContext* context,
const TimestampVal& ts_val) {
if (ts_val.is_null) return IntVal::null();
const TimestampValue& ts_value = TimestampValue::FromTimestampVal(ts_val);
if (ts_value.get_date().is_special()) return IntVal::null();
return IntVal(ts_value.get_date().day_of_year());
}
IntVal TimestampFunctions::WeekOfYear(FunctionContext* context,
const TimestampVal& ts_val) {
if (ts_val.is_null) return IntVal::null();
const TimestampValue& ts_value = TimestampValue::FromTimestampVal(ts_val);
if (ts_value.get_date().is_special()) return IntVal::null();
return IntVal(ts_value.get_date().week_number());
}
IntVal TimestampFunctions::Hour(FunctionContext* context, const TimestampVal& ts_val) {
if (ts_val.is_null) return IntVal::null();
const TimestampValue& ts_value = TimestampValue::FromTimestampVal(ts_val);
if (ts_value.get_time().is_special()) return IntVal::null();
return IntVal(ts_value.get_time().hours());
}
IntVal TimestampFunctions::Minute(FunctionContext* context, const TimestampVal& ts_val) {
if (ts_val.is_null) return IntVal::null();
const TimestampValue& ts_value = TimestampValue::FromTimestampVal(ts_val);
if (ts_value.get_time().is_special()) return IntVal::null();
return IntVal(ts_value.get_time().minutes());
}
IntVal TimestampFunctions::Second(FunctionContext* context, const TimestampVal& ts_val) {
if (ts_val.is_null) return IntVal::null();
const TimestampValue& ts_value = TimestampValue::FromTimestampVal(ts_val);
if (ts_value.get_time().is_special()) return IntVal::null();
return IntVal(ts_value.get_time().seconds());
}
TimestampVal TimestampFunctions::Now(FunctionContext* context) {
const TimestampValue* now = context->impl()->state()->now();
if (now->NotADateTime()) return TimestampVal::null();
TimestampVal return_val;
now->ToTimestampVal(&return_val);
return return_val;
}
StringVal TimestampFunctions::ToDate(FunctionContext* context,
const TimestampVal& ts_val) {
if (ts_val.is_null) return StringVal::null();
const TimestampValue ts_value = TimestampValue::FromTimestampVal(ts_val);
string result = to_iso_extended_string(ts_value.get_date());
return AnyValUtil::FromString(context, result);
}
template <bool ISADD, class VALTYPE, class UNIT>
TimestampVal TimestampFunctions::DateAddSub(FunctionContext* context,
const TimestampVal& ts_val, const VALTYPE& count) {
if (ts_val.is_null || count.is_null) return TimestampVal::null();
const TimestampValue& ts_value = TimestampValue::FromTimestampVal(ts_val);
if (ts_value.get_date().is_special()) return TimestampVal::null();
UNIT unit(count.val);
TimestampValue value;
try {
// Adding/subtracting boost::gregorian::dates can throw (via constructing a new date)
value = TimestampValue(
(ISADD ? ts_value.get_date() + unit : ts_value.get_date() - unit),
ts_value.get_time());
} catch (const std::exception& e) {
context->AddWarning(Substitute("Cannot $0 date interval $1: $2",
ISADD ? "add" : "subtract", count.val, e.what()).c_str());
return TimestampVal::null();
}
TimestampVal return_val;
value.ToTimestampVal(&return_val);
return return_val;
}
template <bool ISADD, class VALTYPE, class UNIT>
TimestampVal TimestampFunctions::TimeAddSub(FunctionContext * context,
const TimestampVal& ts_val, const VALTYPE& count) {
if (ts_val.is_null || count.is_null) return TimestampVal::null();
const TimestampValue& ts_value = TimestampValue::FromTimestampVal(ts_val);
if (ts_value.get_date().is_special()) return TimestampVal::null();
UNIT unit(count.val);
ptime p(ts_value.get_date(), ts_value.get_time());
TimestampValue value(ISADD ? p + unit : p - unit);
TimestampVal return_val;
value.ToTimestampVal(&return_val);
return return_val;
}
IntVal TimestampFunctions::DateDiff(FunctionContext* context,
const TimestampVal& ts_val1,
const TimestampVal& ts_val2) {
if (ts_val1.is_null || ts_val2.is_null) return IntVal::null();
const TimestampValue& ts_value1 = TimestampValue::FromTimestampVal(ts_val1);
const TimestampValue& ts_value2 = TimestampValue::FromTimestampVal(ts_val2);
if (ts_value1.get_date().is_special() || ts_value2.get_date().is_special()) {
return IntVal::null();
}
return IntVal((ts_value1.get_date() - ts_value2.get_date()).days());
}
// This function uses inline asm functions, which we believe to be from the boost library.
// Inline asm is not currently supported by JIT, so this function should always be run in
// the interpreted mode. This is handled in ScalarFnCall::GetUdf().
TimestampVal TimestampFunctions::FromUtc(FunctionContext* context,
const TimestampVal& ts_val, const StringVal& tz_string_val) {
if (ts_val.is_null || tz_string_val.is_null) return TimestampVal::null();
const TimestampValue& ts_value = TimestampValue::FromTimestampVal(ts_val);
if (ts_value.NotADateTime()) return TimestampVal::null();
const StringValue& tz_string_value = StringValue::FromStringVal(tz_string_val);
time_zone_ptr timezone =
TimezoneDatabase::FindTimezone(tz_string_value.DebugString(), ts_value);
if (timezone == NULL) {
// This should return null. Hive just ignores it.
stringstream ss;
ss << "Unknown timezone '" << tz_string_value << "'" << endl;
context->AddWarning(ss.str().c_str());
return ts_val;
}
ptime temp;
ts_value.ToPtime(&temp);
local_date_time lt(temp, timezone);
TimestampValue return_value = lt.local_time();
TimestampVal return_val;
return_value.ToTimestampVal(&return_val);
return return_val;
}
// This function uses inline asm functions, which we believe to be from the boost library.
// Inline asm is not currently supported by JIT, so this function should always be run in
// the interpreted mode. This is handled in ScalarFnCall::GetUdf().
TimestampVal TimestampFunctions::ToUtc(FunctionContext* context,
const TimestampVal& ts_val, const StringVal& tz_string_val) {
if (ts_val.is_null || tz_string_val.is_null) return TimestampVal::null();
const TimestampValue& ts_value = TimestampValue::FromTimestampVal(ts_val);
if (ts_value.NotADateTime()) return TimestampVal::null();
const StringValue& tz_string_value = StringValue::FromStringVal(tz_string_val);
time_zone_ptr timezone =
TimezoneDatabase::FindTimezone(tz_string_value.DebugString(), ts_value);
// This should raise some sort of error or at least null. Hive Just ignores it.
if (timezone == NULL) {
stringstream ss;
ss << "Unknown timezone '" << tz_string_value << "'" << endl;
context->AddWarning(ss.str().c_str());
return ts_val;
}
local_date_time lt(ts_value.get_date(), ts_value.get_time(),
timezone, local_date_time::NOT_DATE_TIME_ON_ERROR);
TimestampValue return_value(lt.utc_time());
TimestampVal return_val;
return_value.ToTimestampVal(&return_val);
return return_val;
}
TimezoneDatabase::TimezoneDatabase() {
// Create a temporary file and write the timezone information. The boost
// interface only loads this format from a file. We don't want to raise
// an error here since this is done when the backend is created and this
// information might not actually get used by any queries.
char filestr[] = "/tmp/impala.tzdb.XXXXXXX";
FILE* file;
int fd;
if ((fd = mkstemp(filestr)) == -1) {
LOG(ERROR) << "Could not create temporary timezone file: " << filestr;
return;
}
if ((file = fopen(filestr, "w")) == NULL) {
unlink(filestr);
close(fd);
LOG(ERROR) << "Could not open temporary timezone file: " << filestr;
return;
}
if (fputs(TIMEZONE_DATABASE_STR, file) == EOF) {
unlink(filestr);
close(fd);
fclose(file);
LOG(ERROR) << "Could not load temporary timezone file: " << filestr;
return;
}
fclose(file);
tz_database_.load_from_file(string(filestr));
tz_region_list_ = tz_database_.region_list();
unlink(filestr);
close(fd);
}
TimezoneDatabase::~TimezoneDatabase() { }
time_zone_ptr TimezoneDatabase::FindTimezone(const string& tz, const TimestampValue& tv) {
// The backing database does not capture some subtleties, there are special cases
if ((tv.get_date().year() > 2011
|| (tv.get_date().year() == 2011 && tv.get_date().month() >= 4))
&& (iequals("Europe/Moscow", tz) || iequals("Moscow", tz) || iequals("MSK", tz))) {
// We transition in April 2011 from using the tz_database_ to a custom rule
// Russia stopped using daylight savings in 2011, the tz_database_ is
// set up assuming Russia uses daylight saving every year.
// Sun, Mar 27, 2:00AM Moscow clocks moved forward +1 hour (a total of GMT +4)
// Specifically,
// UTC Time 26 Mar 2011 22:59:59 +0000 ===> Sun Mar 27 01:59:59 MSK 2011
// UTC Time 26 Mar 2011 23:00:00 +0000 ===> Sun Mar 27 03:00:00 MSK 2011
// This means in 2011, The database rule will apply DST starting March 26 2011.
// This will be a correct +4 offset, and the database rule can apply until
// Oct 31 when tz_database_ will incorrectly attempt to turn clocks backwards 1 hour.
return TIMEZONE_MSK_2011_NODST;
}
// See if they specified a zone id
if (tz.find_first_of('/') != string::npos) {
return tz_database_.time_zone_from_region(tz);
}
for (vector<string>::const_iterator iter = tz_region_list_.begin();
iter != tz_region_list_.end(); ++iter) {
time_zone_ptr tzp = tz_database_.time_zone_from_region(*iter);
DCHECK(tzp != NULL);
if (tzp->dst_zone_abbrev() == tz)
return tzp;
if (tzp->std_zone_abbrev() == tz)
return tzp;
if (tzp->dst_zone_name() == tz)
return tzp;
if (tzp->std_zone_name() == tz)
return tzp;
}
return time_zone_ptr();
}
// Explicit template instantiation is required for proper linking. These functions
// are only indirectly called via a function pointer provided by the opcode registry
// which does not trigger implicit template instantiation.
// Must be kept in sync with common/function-registry/impala_functions.py.
template StringVal
TimestampFunctions::FromUnix<IntVal>(FunctionContext* context, const IntVal& intp, const
StringVal& fmt);
template StringVal
TimestampFunctions::FromUnix<BigIntVal>(FunctionContext* context, const BigIntVal& intp,
const StringVal& fmt);
template StringVal
TimestampFunctions::FromUnix<IntVal>(FunctionContext* context , const IntVal& intp);
template StringVal
TimestampFunctions::FromUnix<BigIntVal>(FunctionContext* context, const BigIntVal& intp);
template TimestampVal
TimestampFunctions::DateAddSub<true, IntVal, years>(FunctionContext* context,
const TimestampVal& ts_val, const IntVal& count);
template TimestampVal
TimestampFunctions::DateAddSub<true, BigIntVal, years>(FunctionContext* context,
const TimestampVal& ts_val, const BigIntVal& count);
template TimestampVal
TimestampFunctions::DateAddSub<false, IntVal, years>(FunctionContext* context,
const TimestampVal& ts_val, const IntVal& count);
template TimestampVal
TimestampFunctions::DateAddSub<false, BigIntVal, years>(FunctionContext* context,
const TimestampVal& ts_val, const BigIntVal& count);
template TimestampVal
TimestampFunctions::DateAddSub<true, IntVal, months>(FunctionContext* context,
const TimestampVal& ts_val, const IntVal& count);
template TimestampVal
TimestampFunctions::DateAddSub<true, BigIntVal, months>(FunctionContext* context,
const TimestampVal& ts_val, const BigIntVal& count);
template TimestampVal
TimestampFunctions::DateAddSub<false, IntVal, months>(FunctionContext* context,
const TimestampVal& ts_val, const IntVal& count);
template TimestampVal
TimestampFunctions::DateAddSub<false, BigIntVal, months>(FunctionContext* context,
const TimestampVal& ts_val, const BigIntVal& count);
template TimestampVal
TimestampFunctions::DateAddSub<true, IntVal, weeks>(FunctionContext* context,
const TimestampVal& ts_val, const IntVal& count);
template TimestampVal
TimestampFunctions::DateAddSub<true, BigIntVal, weeks>(FunctionContext* context,
const TimestampVal& ts_val, const BigIntVal& count);
template TimestampVal
TimestampFunctions::DateAddSub<false, IntVal, weeks>(FunctionContext* context,
const TimestampVal& ts_val, const IntVal& count);
template TimestampVal
TimestampFunctions::DateAddSub<false, BigIntVal, weeks>(FunctionContext* context,
const TimestampVal& ts_val, const BigIntVal& count);
template TimestampVal
TimestampFunctions::DateAddSub<true, IntVal, days>(FunctionContext* context,
const TimestampVal& ts_val, const IntVal& count);
template TimestampVal
TimestampFunctions::DateAddSub<true, BigIntVal, days>(FunctionContext* context,
const TimestampVal& ts_val, const BigIntVal& count);
template TimestampVal
TimestampFunctions::DateAddSub<false, IntVal, days>(FunctionContext* context,
const TimestampVal& ts_val, const IntVal& count);
template TimestampVal
TimestampFunctions::DateAddSub<false, BigIntVal, days>(FunctionContext* context,
const TimestampVal& ts_val, const BigIntVal& count);
template TimestampVal
TimestampFunctions::TimeAddSub<true, IntVal, hours>(FunctionContext* context,
const TimestampVal& ts_val, const IntVal& count);
template TimestampVal
TimestampFunctions::TimeAddSub<true, BigIntVal, hours>(FunctionContext* context,
const TimestampVal& ts_val, const BigIntVal& count);
template TimestampVal
TimestampFunctions::TimeAddSub<false, IntVal, hours>(FunctionContext* context,
const TimestampVal& ts_val, const IntVal& count);
template TimestampVal
TimestampFunctions::TimeAddSub<false, BigIntVal, hours>(FunctionContext* context,
const TimestampVal& ts_val, const BigIntVal& count);
template TimestampVal
TimestampFunctions::TimeAddSub<true, IntVal, minutes>(FunctionContext* context,
const TimestampVal& ts_val, const IntVal& count);
template TimestampVal
TimestampFunctions::TimeAddSub<true, BigIntVal, minutes>(FunctionContext* context,
const TimestampVal& ts_val, const BigIntVal& count);
template TimestampVal
TimestampFunctions::TimeAddSub<false, IntVal, minutes>(FunctionContext* context,
const TimestampVal& ts_val, const IntVal& count);
template TimestampVal
TimestampFunctions::TimeAddSub<false, BigIntVal, minutes>(FunctionContext* context,
const TimestampVal& ts_val, const BigIntVal& count);
template TimestampVal
TimestampFunctions::TimeAddSub<true, IntVal, seconds>(FunctionContext* context,
const TimestampVal& ts_val, const IntVal& count);
template TimestampVal
TimestampFunctions::TimeAddSub<true, BigIntVal, seconds>(FunctionContext* context,
const TimestampVal& ts_val, const BigIntVal& count);
template TimestampVal
TimestampFunctions::TimeAddSub<false, IntVal, seconds>(FunctionContext* context,
const TimestampVal& ts_val, const IntVal& count);
template TimestampVal
TimestampFunctions::TimeAddSub<false, BigIntVal, seconds>(FunctionContext* context,
const TimestampVal& ts_val, const BigIntVal& count);
template TimestampVal
TimestampFunctions::TimeAddSub<true, IntVal, milliseconds>(FunctionContext* context,
const TimestampVal& ts_val, const IntVal& count);
template TimestampVal
TimestampFunctions::TimeAddSub<true, BigIntVal, milliseconds>(FunctionContext* context,
const TimestampVal& ts_val, const BigIntVal& count);
template TimestampVal
TimestampFunctions::TimeAddSub<false, IntVal, milliseconds>(FunctionContext* context,
const TimestampVal& ts_val, const IntVal& count);
template TimestampVal
TimestampFunctions::TimeAddSub<false, BigIntVal, milliseconds>(FunctionContext* context,
const TimestampVal& ts_val, const BigIntVal& count);
template TimestampVal
TimestampFunctions::TimeAddSub<true, IntVal, microseconds>(FunctionContext* context,
const TimestampVal& ts_val, const IntVal& count);
template TimestampVal
TimestampFunctions::TimeAddSub<true, BigIntVal, microseconds>(FunctionContext* context,
const TimestampVal& ts_val, const BigIntVal& count);
template TimestampVal
TimestampFunctions::TimeAddSub<false, IntVal, microseconds>(FunctionContext* context,
const TimestampVal& ts_val, const IntVal& count);
template TimestampVal
TimestampFunctions::TimeAddSub<false, BigIntVal, microseconds>(FunctionContext* context,
const TimestampVal& ts_val, const BigIntVal& count);
template TimestampVal
TimestampFunctions::TimeAddSub<true, IntVal, nanoseconds>(FunctionContext* context,
const TimestampVal& ts_val, const IntVal& count);
template TimestampVal
TimestampFunctions::TimeAddSub<true, BigIntVal, nanoseconds>(FunctionContext* context,
const TimestampVal& ts_val, const BigIntVal& count);
template TimestampVal
TimestampFunctions::TimeAddSub<false, IntVal, nanoseconds>(FunctionContext* context,
const TimestampVal& ts_val, const IntVal& count);
template TimestampVal
TimestampFunctions::TimeAddSub<false, BigIntVal, nanoseconds>(FunctionContext* context,
const TimestampVal& ts_val, const BigIntVal& count);
}
|
{"hexsha": "5b50cf5a8e2c4445538c1aad53aacf8a4589e19a", "size": 26079, "ext": "cc", "lang": "C++", "max_stars_repo_path": "be/src/exprs/timestamp-functions.cc", "max_stars_repo_name": "andybab/Impala", "max_stars_repo_head_hexsha": "d520a9cdea2fc97e8d5da9fbb0244e60ee416bfa", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "be/src/exprs/timestamp-functions.cc", "max_issues_repo_name": "andybab/Impala", "max_issues_repo_head_hexsha": "d520a9cdea2fc97e8d5da9fbb0244e60ee416bfa", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "be/src/exprs/timestamp-functions.cc", "max_forks_repo_name": "andybab/Impala", "max_forks_repo_head_hexsha": "d520a9cdea2fc97e8d5da9fbb0244e60ee416bfa", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.4048780488, "max_line_length": 90, "alphanum_fraction": 0.7511407646, "num_tokens": 6389}
|
#!/usr/bin/env python3
import os
import sys
import shm
import cv2
import numpy as np
from collections import namedtuple
from conf.vehicle import VEHICLE, is_mainsub
from vision import options
from vision.modules.base import ModuleBase
from vision.framework.feature import outer_contours, contour_area, contour_centroid, min_enclosing_circle, min_enclosing_rect
from vision.framework.transform import resize, simple_gaussian_blur, morph_remove_noise, morph_close_holes, dilate, erode, rect_kernel
from vision.framework.helpers import to_umat, from_umat, to_odd
from vision.framework.color import bgr_to_lab, gray_to_bgr, range_threshold
from vision.framework.draw import draw_contours, draw_circle, draw_text
CUAUV_LOCALE = os.environ['CUAUV_LOCALE']
OPTS_ODYSSEUS = [
options.IntOption('lab_l_ref', 255, 0, 255),
options.IntOption('lab_a_ref', 163, 0, 255),
options.IntOption('lab_b_ref', 180, 0, 255),
options.IntOption('color_dist_thresh', 45, 0, 255),
options.IntOption('blur_kernel', 3, 0, 255),
options.IntOption('blur_std', 10, 0, 500),
options.DoubleOption('resize_width_scale', 0.5, 0, 1),
options.DoubleOption('resize_height_scale', 0.5, 0, 1),
options.IntOption('dilate_kernel', 1, 0, 255),
options.IntOption('erode_kernel', 1, 0, 255),
options.IntOption('min_contour_area', 30, 0, 500),
options.DoubleOption('min_contour_rect', 0.4, 0, 1),
options.DoubleOption('min_contour_ratio', 4.5, 0, 10),
options.DoubleOption('max_angle_from_vertical', 15, 0, 90),
options.DoubleOption('min_length', 15, 0, 500),
options.IntOption('auto_distance_percentile', 25, 0, 100),
options.IntOption('nonblack_thresh', 900, 0, 10000),
options.IntOption('water_a_thresh', 20, 0, 255),
options.IntOption('water_b_thresh', 25, 0, 255),
options.BoolOption('debug', True),
]
OPTS_AJAX = [
options.IntOption('lab_l_ref', 255, 0, 255),
options.IntOption('lab_a_ref', 175, 0, 255),
options.IntOption('lab_b_ref', 169, 0, 255),
options.IntOption('color_dist_thresh', 40, 0, 255),
options.IntOption('blur_kernel', 3, 0, 255),
options.IntOption('blur_std', 10, 0, 500),
options.DoubleOption('resize_width_scale', 0.25, 0, 1),
options.DoubleOption('resize_height_scale', 0.25, 0, 1),
options.IntOption('dilate_kernel', 1, 0, 255),
options.IntOption('erode_kernel', 1, 0, 255),
options.IntOption('min_contour_area', 30, 0, 500),
options.DoubleOption('min_contour_ratio', 4.5, 0, 10),
options.DoubleOption('min_contour_rect', 0.4, 0, 1),
options.DoubleOption('max_angle_from_vertical', 15, 0, 90),
options.DoubleOption('min_length', 15, 0, 500),
options.IntOption('auto_distance_percentile', 25, 0, 100),
options.IntOption('nonblack_thresh', 600, 0, 10000),
options.IntOption('water_a_thresh', 10, 0, 255),
options.IntOption('water_b_thresh', 10, 0, 255),
options.BoolOption('debug', True),
]
#OPTS_SIM = [
# options.IntOption('lab_l_ref', 0, 0, 255),
# options.IntOption('lab_a_ref', 170, 0, 255),
# options.IntOption('lab_b_ref', 180, 0, 255),
# options.IntOption('color_dist_thresh', 35, 0, 255),
# options.IntOption('blur_kernel', 3, 0, 255),
# options.IntOption('blur_std', 10, 0, 500),
# options.DoubleOption('resize_width_scale', 0.5, 0, 1),
# options.DoubleOption('resize_height_scale', 0.5, 0, 1),
# options.IntOption('dilate_kernel', 1, 0, 255),
# options.IntOption('erode_kernel', 1, 0, 255),
# options.IntOption('min_contour_area', 30, 0, 500),
# options.DoubleOption('min_contour_rect', 0.4, 0, 1),
# options.DoubleOption('min_contour_ratio', 5, 0, 10),
# options.DoubleOption('max_angle_from_vertical', 15, 0, 90),
# options.DoubleOption('min_length', 15, 0, 500),
# options.IntOption('auto_distance_percentile', 15, 0, 100),
# options.IntOption('nonblack_thresh', 1000, 0, 10000),
# options.IntOption('water_a_thresh', 20, 0, 255),
# options.IntOption('water_b_thresh', 25, 0, 255),
# options.BoolOption('debug', True),
#]
OPTS_SIM = OPTS_ODYSSEUS if VEHICLE == 'odysseus' else OPTS_AJAX
REFERENCE_BRIGHTNESS = 190 if is_mainsub else 190
CUTOFF_SCALAR = 10 if is_mainsub else 7
ContourFeats = namedtuple('ContourFeats', ['contour', 'area', 'x', 'y', 'rect', 'angle', 'length', 'ratio'])
def try_index(arr, idx):
if idx < len(arr):
return arr[idx]
return None
def thresh_color_distance(split, color, distance, auto_distance_percentile=None, ignore_channels=[], weights=[1, 1, 1]):
for idx in ignore_channels:
weights[idx] = 0
weights /= np.linalg.norm(weights)
dists = np.zeros(split[0].shape, dtype=np.float32)
for i in range(3):
if i in ignore_channels:
continue
dists += weights[i] * (np.float32(split[i]) - color[i])**2
if auto_distance_percentile:
distance = min(np.percentile(dists, auto_distance_percentile), distance**2)
else:
distance = distance**2
return range_threshold(dists, 0, distance), np.uint8(np.sqrt(dists))
def filter_duplicates_sorted_by_x(contour_feats):
MIN_DIST_BETWEEN_PIPES = 30
res = []
last_x = -MIN_DIST_BETWEEN_PIPES
last_len = 0
for c in contour_feats:
if c.x - last_x > MIN_DIST_BETWEEN_PIPES:
last_x = c.x
last_len = c.length
res.append(c)
elif last_len < c.length:
last_x = c.x
last_len = c.length
if res:
res.pop(-1)
res.append(c)
return res
class Gate(ModuleBase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def post_contours(self, name, h, w, contour_feats):
if not self.options['debug']:
return
tmp = np.zeros((h, w, 3))
draw_contours(tmp, [c.contour for c in contour_feats], color=(255, 0, 0), thickness=-1)
self.post(name, tmp)
def process(self, *mats):
results = shm.gate_vision.get()
h, w, _ = mats[0].shape
h = int(h * self.options['resize_height_scale'])
w = int(w * self.options['resize_width_scale'])
results.img_height = h
results.img_width = w
mat = resize(mats[0], w, h)
#print(np.mean(mat))
avg_brightness_ratio = np.mean(mat) / REFERENCE_BRIGHTNESS
nonblack_thresh_dist = self.options['nonblack_thresh'] * avg_brightness_ratio
lab, lab_split = bgr_to_lab(mat)
median_a = np.median(lab_split[1])
median_b = np.median(lab_split[2])
median_filter_a = range_threshold(lab_split[1], median_a - self.options['water_a_thresh'], median_a + self.options['water_a_thresh'])
median_filter_b = range_threshold(lab_split[2], median_b - self.options['water_b_thresh'], median_b + self.options['water_b_thresh'])
if self.options['debug']:
self.post('median filter a', median_filter_a)
self.post('median filter b', median_filter_b)
nonwater_mask, _ = gray_to_bgr(255 - (median_filter_a & median_filter_b))
self.post('nonwater', nonwater_mask)
# Tuned for a 320x256 image
vehicle_depth = shm.kalman.depth.get()
reflection_cutoff = min(h, int(max(0, 3 - vehicle_depth)**2 * CUTOFF_SCALAR))
mat[:reflection_cutoff] *= 0
tmp = mat.copy()
draw_text(tmp, 'Depth: {:.2f}'.format(vehicle_depth), (30, 30), 0.5, color=(255, 255, 255))
self.post('mat', tmp)
#lab, lab_split = bgr_to_lab(mat)
#nonblack_mask, _ = gray_to_bgr(np.uint8(255 * (lab_split[0] > self.options['nonblack_thresh'])))
nonblack_mask, _ = gray_to_bgr(np.uint8(255 * (np.var(mat, axis=2) > nonblack_thresh_dist)))
self.post('nonblack', nonblack_mask)
mat &= nonblack_mask
mat &= nonwater_mask
mat = to_umat(mat)
mat = simple_gaussian_blur(mat, to_odd(self.options['blur_kernel']),
self.options['blur_std'])
lab, lab_split = bgr_to_lab(mat)
threshed, dists = thresh_color_distance([lab_split[0], lab_split[1], lab_split[2]],
[self.options['lab_l_ref'], self.options['lab_a_ref'],
self.options['lab_b_ref']],
self.options['color_dist_thresh'], auto_distance_percentile=self.options['auto_distance_percentile'],
ignore_channels=[0], weights=[2, 0, 15])
if self.options['debug']:
self.post('threshed', threshed)
self.post('dists', dists)
dilated = dilate(threshed, rect_kernel(self.options['dilate_kernel']))
if self.options['debug']:
self.post('dilated', dilated)
eroded = erode(dilated, rect_kernel(self.options['erode_kernel']))
if self.options['debug']:
self.post('eroded', eroded)
contours = outer_contours(eroded)
areas = [*map(contour_area, contours)]
centroids = [*map(contour_centroid, contours)]
xs = [c[0] for c in centroids]
ys = [c[1] for c in centroids]
rects = [*map(min_enclosing_rect, contours)]
lengths = [max(r[1]) for r in rects]
ratios = [max(r[1]) / (1e-30 + min(r[1])) for r in rects]
vehicle_roll = shm.kalman.roll.get()
lines = [cv2.fitLine(c, cv2.DIST_L2, 0, 0.01, 0.01) for c in contours]
angles = [np.degrees(np.arctan2(line[1], line[0]))[0] for line in lines]
angles = [min(abs(90 - a - vehicle_roll), abs(-90 - a - vehicle_roll)) for a in angles]
rectangularities = [a / (1e-30 + rect[1][0] * rect[1][1]) for (c, a, rect) in zip(contours, areas, rects)]
contours = [ContourFeats(*feats) for feats in zip(contours, areas, xs, ys, rectangularities, angles, lengths, ratios)]
contours = [*filter(lambda c: c.area > self.options['min_contour_area'], contours)]
self.post_contours('area', h, w, contours)
contours = [*filter(lambda c: c.angle < self.options['max_angle_from_vertical'], contours)]
self.post_contours('angle', h, w, contours)
contours = [*filter(lambda c: c.length > self.options['min_length'], contours)]
self.post_contours('length', h, w, contours)
#contours = [*filter(lambda c: c.rect > self.options['min_contour_rect'], contours)]
#self.post_contours('rect', h, w, contours)
contours = [*filter(lambda c: c.ratio > self.options['min_contour_ratio'], contours)]
self.post_contours('ratio', h, w, contours)
contours = sorted(contours, key=lambda c: c.area)[:6]
contours_by_x = sorted(contours, key=lambda c: c.x)
contours_by_x = filter_duplicates_sorted_by_x(contours_by_x)
leftmost = try_index(contours_by_x, 0)
middle = try_index(contours_by_x, 1)
rightmost = try_index(contours_by_x, 2)
tmp = np.zeros((h, w, 3))
results.leftmost_visible = leftmost is not None
results.middle_visible = middle is not None
results.rightmost_visible = rightmost is not None
draw_text(tmp, 'Roll: {:.2f}'.format(vehicle_roll), (30, 30), 0.5, color=(255, 255, 255))
if leftmost is not None:
draw_contours(tmp, [leftmost.contour], color=(255, 0, 0), thickness=-1)
draw_circle(tmp, (leftmost.x, leftmost.y), 5, color=(255, 255, 255), thickness=-1)
results.leftmost_x = leftmost.x
results.leftmost_y = leftmost.y
results.leftmost_len = leftmost.length
if middle is not None:
draw_contours(tmp, [middle.contour], color=(0, 255, 0), thickness=-1)
draw_circle(tmp, (middle.x, middle.y), 5, color=(255, 255, 255), thickness=-1)
results.middle_x = middle.x
results.middle_y = middle.y
results.middle_len = middle.length
if rightmost is not None:
draw_contours(tmp, [rightmost.contour], color=(0, 0, 255), thickness=-1)
draw_circle(tmp, (rightmost.x, rightmost.y), 5, color=(255, 255, 255), thickness=-1)
results.rightmost_x = rightmost.x
results.rightmost_y = rightmost.y
results.rightmost_len = rightmost.length
shm.gate_vision.set(results)
self.post('contours', tmp)
if __name__ == '__main__':
Gate('forward', OPTS_SIM if CUAUV_LOCALE == 'simulator' else OPTS_ODYSSEUS if VEHICLE == 'odysseus' else OPTS_AJAX)()
|
{"hexsha": "1c7b3f47d1bd9595cd6b9cb6abcb5ca77117ad6b", "size": 12497, "ext": "py", "lang": "Python", "max_stars_repo_path": "vision/modules/gate.py", "max_stars_repo_name": "cuauv/software", "max_stars_repo_head_hexsha": "5ad4d52d603f81a7f254f365d9b0fe636d03a260", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 70, "max_stars_repo_stars_event_min_datetime": "2015-11-16T18:04:01.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-05T09:04:02.000Z", "max_issues_repo_path": "vision/modules/gate.py", "max_issues_repo_name": "cuauv/software", "max_issues_repo_head_hexsha": "5ad4d52d603f81a7f254f365d9b0fe636d03a260", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2016-08-03T05:13:19.000Z", "max_issues_repo_issues_event_max_datetime": "2016-08-03T06:19:39.000Z", "max_forks_repo_path": "vision/modules/gate.py", "max_forks_repo_name": "cuauv/software", "max_forks_repo_head_hexsha": "5ad4d52d603f81a7f254f365d9b0fe636d03a260", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 34, "max_forks_repo_forks_event_min_datetime": "2015-12-15T17:29:23.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-18T14:15:12.000Z", "avg_line_length": 47.5171102662, "max_line_length": 142, "alphanum_fraction": 0.6428742898, "include": true, "reason": "import numpy", "num_tokens": 3545}
|
import graphlearning as gl
import numpy as np
import matplotlib.pyplot as plt
import time
X = np.random.rand(int(10000),2)
x,y = X[:,0],X[:,1]
eps = 0.02
W = gl.weightmatrix.epsilon_ball(X, eps)
G = gl.graph(W)
bdy_set = (x < eps) | (x > 1-eps) | (y < eps) | (y > 1-eps)
u = G.peikonal(bdy_set)
plt.scatter(x,y,c=u,s=0.25)
plt.scatter(x[bdy_set],y[bdy_set],c='r',s=0.5)
plt.show()
|
{"hexsha": "7fa670a23c0d9f03e61c4a01b173bebb37e5ee70", "size": 386, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/peikonal.py", "max_stars_repo_name": "dfloeder/GraphLearning", "max_stars_repo_head_hexsha": "f9b7ea91a43622b5c271315b0b3b2fdd82e37fe4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/peikonal.py", "max_issues_repo_name": "dfloeder/GraphLearning", "max_issues_repo_head_hexsha": "f9b7ea91a43622b5c271315b0b3b2fdd82e37fe4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/peikonal.py", "max_forks_repo_name": "dfloeder/GraphLearning", "max_forks_repo_head_hexsha": "f9b7ea91a43622b5c271315b0b3b2fdd82e37fe4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.3, "max_line_length": 59, "alphanum_fraction": 0.6398963731, "include": true, "reason": "import numpy", "num_tokens": 149}
|
[STATEMENT]
lemma hcis_inverse [simp]: "\<And>a. inverse (hcis a) = hcis (- a)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>a. inverse (hcis a) = hcis (- a)
[PROOF STEP]
by transfer (rule cis_inverse)
|
{"llama_tokens": 91, "file": null, "length": 1}
|
using PolarFact
using Test, LinearAlgebra
using Random
Random.seed!(1103)
include("test_newton.jl")
include("test_halley.jl")
include("test_svd.jl")
include("test_hybrid.jl")
include("test_f32.jl") # test Float32 case
|
{"hexsha": "8fbb3da21e47016ae7a503eecc04cbf5e658d724", "size": 220, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "weijianzhang/PolarFact.jl", "max_stars_repo_head_hexsha": "6530009072c8774274508fef3c05a1487f09bd60", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2015-03-12T23:06:17.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-13T11:35:49.000Z", "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "weijianzhang/PolarFact.jl", "max_issues_repo_head_hexsha": "6530009072c8774274508fef3c05a1487f09bd60", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2015-12-18T19:56:52.000Z", "max_issues_repo_issues_event_max_datetime": "2019-10-09T08:28:45.000Z", "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "weijianzhang/PolarFact.jl", "max_forks_repo_head_hexsha": "6530009072c8774274508fef3c05a1487f09bd60", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2016-07-12T02:16:08.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-23T14:32:06.000Z", "avg_line_length": 18.3333333333, "max_line_length": 42, "alphanum_fraction": 0.7727272727, "num_tokens": 66}
|
# Author: Francesco Grussu, University College London
# <f.grussu@ucl.ac.uk> <francegrussu@gmail.com>
#
# Code released under BSD Two-Clause license
#
# Copyright (c) 2020 University College London.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
### Load libraries
import argparse, os, sys
import numpy as np
import nibabel as nib
import torch
from torch import nn
from torch import Tensor
from torch.utils.data import DataLoader
from torch import autograd
import pickle as pk
from pathlib import Path as pt
sys.path.insert(0, os.path.dirname(pt(__file__).absolute()) )
import deepqmri
if __name__ == "__main__":
### Print help and parse arguments
parser = argparse.ArgumentParser(description='This program uses a trained qMRI-net (class qmriinterp) to map a quantitative MRI signal acquired according to a first protocol to a resampled version of the the signal corresponding to a second, target protocol. Processing is preformed on a voxel-by-voxel basis. It requires as input a trained qMRI-net, an MRI scan to analyse (either a 4D NIFTI or a python pickle binary file storing a matrix with voxels along rows and measurements along columns), information on signal normalisation and an optional mask (if the input scan is NIFTI). It outputs the resampled MRI signals in either NIFTI or python pickle binary format (the same format as that of the input data to analyse is used). Author: Francesco Grussu, University College London (<f.grussu@ucl.ac.uk><francegrussu@gmail.com>). Code released under BSD Two-Clause license. Copyright (c) 2020 University College London. All rights reserved.')
parser.add_argument('sig_in', help='path to file storing a multi-contrast MRI scan to analyse (it can be a 4D NIFTI or a python pickle binary file; in the latter case, voxels are stored along rows and measurements along columns)')
parser.add_argument('sig_out', help='path of output file storing the output MRI scan resampled according to the target protocol (it will have the same format as the mandatory input sig_in, i.e. NIFTI if sig_in is a NIFTI, pickle binary otherwise)')
parser.add_argument('qmrinet_file', help='path to a pickle binary file containing a trained qMRI-net (class qmriinterp)')
parser.add_argument('max_file', help='path to a pickle binary file containing the maximum signal for normalisation (max_val.bin file provided by traininterp_deepqmri.py)')
parser.add_argument('min_file', help='path to a pickle binary file containing the minimum signal for normalisation (min_val.bin file provided by traininterp_deepqmri.py)')
parser.add_argument('--mask', metavar='<nifti>', help='path to a mask flagging with 1 voxels to analysise and with 0 voxels to ignore (the output NIFTI will contain 0 in voxels to ignore; note that the mask is ignored if sig_in is a pickle binary file)')
args = parser.parse_args()
### Print some information
print('')
print('********************************************************************')
print(' RESAMPLE MRI PROTOCOL WITH qMRI-NET (qmriinterp CLASS) ')
print('********************************************************************')
print('')
print('** Called on input MRI measurement file: {}'.format(args.sig_in))
print('** qMRI-net file (class qmriinterp): {}'.format(args.qmrinet_file))
print('** Output file storing the resampled MRI measurements: {}'.format(args.sig_out))
print('')
print('')
### Load input measurements
print('')
print(' ... loading data ...')
# Try NIFTI first
try:
# Load NIFTI
sin_obj = nib.load(args.sig_in)
sin_header = sin_obj.header
sin_affine = sin_header.get_best_affine()
sin_data = sin_obj.get_fdata()
sin_dims = sin_data.shape
imgsize = sin_data.shape
imgsize = np.array(imgsize)
sin_data = np.array(sin_data,'float32')
isnifti = True
# Check that NIFTI is 4D
if imgsize.size!=4:
print('')
raise RuntimeError('ERROR: the input 4D NIFTI file {} is not actually not 4D. Exiting with 1...'.format(args.sig_in))
# Check whether a fitting mask has been provided
if isinstance(args.mask, str)==1:
# A mask for qMRI-net has been provided
mask_obj = nib.load(args.mask)
mask_dims = mask_obj.shape
mask_header = mask_obj.header
mask_affine = mask_header.get_best_affine()
# Make sure the mask is a 3D file
mask_data = mask_obj.get_fdata()
masksize = mask_data.shape
masksize = np.array(masksize)
if masksize.size!=3:
print('')
print('WARNING: the mask file {} is not a 3D Nifti file. Ignoring mask...'.format(mask_nifti))
print('')
mask_data = np.ones(imgsize[0:3],'float64')
elif ( (np.sum(sin_affine==mask_affine)!=16) or (sin_dims[0]!=mask_dims[0]) or (sin_dims[1]!=mask_dims[1]) or (sin_dims[2]!=mask_dims[2]) ):
print('')
print('WARNING: the geometry of the mask file {} does not match that of the input data. Ignoring mask...'.format(args.mask))
print('')
mask_data = np.ones(imgsize[0:3],'float64')
else:
mask_data = np.array(mask_data,'float64')
# Make sure mask data is a numpy array
mask_data[mask_data>0] = 1
mask_data[mask_data<=0] = 0
else:
# A mask for fitting has not been provided
mask_data = np.ones(imgsize[0:3],'float64')
# NIFTI has not worked
except:
# Try pickle binary then
try:
hin = open(args.sig_in,'rb')
sin_data = pk.load(hin)
hin.close()
isnifti = False
# Unknown input data format!
except:
raise RuntimeError('the format of the input data to analyse is not understood!')
#### Load qMRI-net in evaluation mode
try:
h = open(args.qmrinet_file,'rb')
net = pk.load(h)
h.close()
except:
raise RuntimeError('the format of the trained qMRI-Net is not understood!')
net.eval()
### Load normalisation factors
try:
h = open(args.max_file,'rb')
max_val = pk.load(h)
h.close()
h = open(args.min_file,'rb')
min_val = pk.load(h)
h.close()
except:
raise RuntimeError('the format of the signal normalisation factors is not understood!')
### Resample MRI signals
print('')
print(' ... predicting MRI signals with qMRI-net ...')
# NIFTI
if(isnifti==True):
# Get normalisation factors
smax = np.float32(max_val)
smin = np.float32(min_val)
# Number of output measurements
nneurnet = net.nneurons
nmeas_out = nneurnet[-1]
# Allocate variables to store output results
sigout = np.zeros( (imgsize[0], imgsize[1], imgsize[2], nmeas_out) ) # Output resampled MRI signals
# Loop through voxels to estimate tissue parameters with a trained net
for xx in range(0, imgsize[0]):
for yy in range(0, imgsize[1]):
for zz in range(0, imgsize[2]):
# Get voxel within mask
if(mask_data[xx,yy,zz]==1):
# Get qMRI signal
myvoxel = sin_data[xx,yy,zz,:]
# Normalise qMRI signal
myvoxel[myvoxel<smin] = smin
myvoxel = np.float32( (myvoxel - smin) / (smax - smin) )
# Pass voxel through qMRI-net
myvoxel_sigs = net(Tensor(myvoxel))
myvoxel_sigs = myvoxel_sigs.detach().numpy()
# Un-normalise
myvoxel_sigs = smin + (smax - smin)*(myvoxel_sigs)
# Store resampled voxel
for mm in range(0, nmeas_out):
sigout[xx,yy,zz,mm] = myvoxel_sigs[mm]
# Save the predicted NIFTI file as output
print('')
print(' ... saving resampled MRI signals as 4D NIFTI ...')
buffer_header = sin_obj.header
buffer_header.set_data_dtype('float64') # Make sure we save output files float64, even if input is not
sigout_obj = nib.Nifti1Image(sigout,sin_obj.affine,buffer_header)
try:
nib.save(sigout_obj, args.sig_out)
except:
raise RuntimeError('the destination folder may not exist or you may lack permissions to write there!')
print('')
print(' Done!')
print('')
sys.exit(0)
# Python pickle binaries
else:
# Get normalisation factors
smax = np.float32(max_val)
smin = np.float32(min_val)
# Normalise qMRI signals
sin_data[sin_data<smin] = smin
sin_data = np.float32( (sin_data - smin) / (smax - smin) )
# Pass signals through qMRI-net as a whole batch to get resampled MRI signals
sigout = net(Tensor(sin_data)) # Predicted MRI signals
sigout = sigout.detach().numpy()
# Un-normalise
sigout = smin + (smax - smin)*sigout
# Save estimated MRI signals
try:
print('')
print(' ... saving resampled MRI signals as a pickle binary file ...')
sout_file = open(args.sig_out,'wb')
pk.dump(sigout,sout_file,pk.HIGHEST_PROTOCOL)
sout_file.close()
except:
raise RuntimeError('the destination folder may not exist or you may lack permissions to write there!')
print('')
print(' Done!')
print('')
sys.exit(0)
|
{"hexsha": "0c0ef44bce24c56499e4b132cf970b001d8c5b52", "size": 10161, "ext": "py", "lang": "Python", "max_stars_repo_path": "tools/sig2sig_qmriinterp_deepqmri.py", "max_stars_repo_name": "fragrussu/qMRINet", "max_stars_repo_head_hexsha": "418cbe22cefa2974d8a97b359324ff4c35865d22", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-10-22T23:37:36.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-18T09:39:42.000Z", "max_issues_repo_path": "tools/sig2sig_qmriinterp_deepqmri.py", "max_issues_repo_name": "fragrussu/qMRINet", "max_issues_repo_head_hexsha": "418cbe22cefa2974d8a97b359324ff4c35865d22", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tools/sig2sig_qmriinterp_deepqmri.py", "max_forks_repo_name": "fragrussu/qMRINet", "max_forks_repo_head_hexsha": "418cbe22cefa2974d8a97b359324ff4c35865d22", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.69140625, "max_line_length": 946, "alphanum_fraction": 0.7071154414, "include": true, "reason": "import numpy", "num_tokens": 2629}
|
from keras.layers import Dense, Conv2D, Deconvolution2D, \
MaxPool2D, UpSampling2D, Flatten, Dropout, Reshape,\
Concatenate
from keras.models import Sequential, Model, load_model, Input
from keras.utils import to_categorical
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, StandardScaler
import pandas as pd
import numpy as np
from PIL import Image
import os
import cv2
X = []
y_label = []
for path in os.listdir('./datasets'):
print(path)
if path == '.DS_Store':
continue
for image_path in os.listdir('./datasets/' + path):
try:
image = Image.open(os.path.join('./datasets/' + path, image_path))
except OSError:
continue
data = np.asarray(image.convert('L'))
data = data / 255
data = np.clip(data, 0, 1)
assert(data.max() <= 1)
assert(data.min() >= 0)
X.append(data)
y_label.append(image_path[0])
X = np.array(X).reshape(-1, 40, 24, 1)
lb = LabelEncoder()
y_label_transformed = lb.fit_transform(y_label)
y = to_categorical(y_label_transformed)
# encoder
input_img = Input(shape=(40, 24, 1))
x = Conv2D(16, 3, activation='selu', padding='same')(input_img)
x = Conv2D(16, 3, activation='selu', padding='same')(x)
# x = MaxPool2D(2, padding='same')(x)
x = Conv2D(16, 3, activation='selu', padding='same')(x)
# x = MaxPool2D(2, padding='same')(x)
x = Flatten()(x)
x = Dropout(0.5)(x)
x = Dense(512, activation='selu')(x)
x = Dense(256, activation='selu')(x)
encoded = Dense(26, activation='softmax')(x)
style = Dense(16, activation='sigmoid')(x)
# decoder
x = Concatenate()([encoded, style])
x = Dense(256, activation='selu')(x)
x = Dense(512, activation='selu')(x)
x = Dense(15360)(x)
x = Reshape((40, 24, 16))(x)
# x = UpSampling2D(2)(x)
x = Conv2D(16, 2, activation='selu', padding='same')(x)
# x = UpSampling2D(2)(x)
x = Conv2D(16, 2, activation='selu', padding='same')(x)
x = Conv2D(16, 2, activation='selu', padding='same')(x)
decoded = Conv2D(1, 3, activation='linear', padding='same')(x)
# input_img = Input(shape=(40, 24, 1))
# x = Flatten()(input_img)
# encoded = Dense(26, activation='softmax')(x)
# decoded = input_img
model = Model(input_img, [encoded, decoded])
model.compile(loss=['categorical_crossentropy', 'mse'],
optimizer='adam',
metrics=['accuracy', 'mse'])
model.summary()
model.fit(X, [y, X], batch_size=32, epochs=100, shuffle=True)
model.save('autoencoder_1.h5')
|
{"hexsha": "cf0fd6acca2eb0cf98449288c81beff8990d347a", "size": 2541, "ext": "py", "lang": "Python", "max_stars_repo_path": "autoencoder.py", "max_stars_repo_name": "MarkintoshZ/FontTransformer", "max_stars_repo_head_hexsha": "5051db0d38a4b8ae7602fb22c75c008f9f59d2d1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "autoencoder.py", "max_issues_repo_name": "MarkintoshZ/FontTransformer", "max_issues_repo_head_hexsha": "5051db0d38a4b8ae7602fb22c75c008f9f59d2d1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "autoencoder.py", "max_forks_repo_name": "MarkintoshZ/FontTransformer", "max_forks_repo_head_hexsha": "5051db0d38a4b8ae7602fb22c75c008f9f59d2d1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.7625, "max_line_length": 78, "alphanum_fraction": 0.6611570248, "include": true, "reason": "import numpy", "num_tokens": 752}
|
"""
Created on Thu Oct 26 14:19:44 2017
@author: Utku Ozbulak - github.com/utkuozbulak
"""
import os
import numpy as np
import torch
from torch.optim import SGD
from cnn_visualization.misc_functions import preprocess_image, recreate_image, save_image
import argparse
import torch.nn as nn
class ClassSpecificImageGeneration():
"""
Produces an image that maximizes a certain class with gradient ascent
"""
def __init__(self, model, target_class,image_size):
self.mean = [-0.485, -0.456, -0.406]
self.std = [1/0.229, 1/0.224, 1/0.225]
self.model = model
self.model.eval()
self.target_class = target_class
self.image_size = image_size
# Generate a random image
self.created_image = np.uint8(np.random.uniform(0, 255, (image_size, image_size, 3)))
# Create the folder to export images if not exists
if not os.path.exists('generated/class_'+str(self.target_class)):
os.makedirs('generated/class_'+str(self.target_class))
print("init xong ... ")
self.device = torch.device("cuda" if torch.cuda.is_available()
else "cpu")
def generate(self, iterations=150):
"""Generates class specific image
Keyword Arguments:
iterations {int} -- Total iterations for gradient ascent (default: {150})
Returns:
np.ndarray -- Final maximally activated class image
"""
print("bat dau generate xong ... ")
initial_learning_rate = 200
for i in range(1, iterations):
print(i)
# Process image and return variable
self.processed_image = preprocess_image(self.created_image, False)
# Define optimizer for the image
optimizer = SGD([self.processed_image], lr=initial_learning_rate)
# Forward
output = self.model(self.processed_image.to(self.device))
# Target specific class
print(output)
class_loss = -output[0, self.target_class]
if i % 1 == 0 or i == iterations-1:
print('Iteration:', str(i), 'Loss',
"{0:.2f}".format(class_loss.cpu().data.numpy()))
# Zero grads
self.model.zero_grad()
# Backward
class_loss.backward()
# Update image
optimizer.step()
# Recreate image
self.created_image = recreate_image(self.processed_image)
print(self.created_image.size)
if i % 1 == 0 or i == iterations-1:
# Save image
initial_learning_rate /=2
im_path = 'generated/class_'+str(self.target_class)+'/c_'+str(self.target_class)+'_'+'iter_'+str(i)+'.png'
save_image(self.created_image, im_path)
return self.processed_image
def parse_args():
parser = argparse.ArgumentParser(description="Deepfake detection")
parser.add_argument('--model_path', default="../../../model/xception/model_pytorch_4.pt", help='path to model ')
parser.add_argument('--gpu_id',type=int, default=-1, help='path to model ')
parser.add_argument('--image_size',type=int, default=256, help='path to model ')
parser.add_argument('--iterations',type=int, default=256, help='iterations random number')
subparsers = parser.add_subparsers(dest="model", help='Choose 1 of the model from: capsule,drn,resnext50, resnext ,gan,meso,xception')
## torch
parser_capsule = subparsers.add_parser('capsule', help='Capsule')
parser_drn = subparsers.add_parser('drn', help='DRN ')
parser_local_nn = subparsers.add_parser('local_nn', help='Local NN ')
parser_self_attention = subparsers.add_parser('self_attention', help='Self Attention ')
parser_resnext50 = subparsers.add_parser('resnext50', help='Resnext50 ')
parser_resnext101 = subparsers.add_parser('resnext101', help='Resnext101 ')
parser_myresnext = subparsers.add_parser('myresnext', help='My Resnext ')
parser_mnasnet = subparsers.add_parser('mnasnet', help='mnasnet pytorch ')
parser_xception_torch = subparsers.add_parser('xception_torch', help='Xception pytorch ')
parser_xception2_torch = subparsers.add_parser('xception2_torch', help='Xception2 pytorch ')
parser_dsp_fwa = subparsers.add_parser('dsp_fwa', help='DSP_SWA pytorch ')
parser_xception = subparsers.add_parser('xception', help='Xceptionnet')
parser_efficient = subparsers.add_parser('efficient', help='Efficient Net')
parser_efficient.add_argument("--type",type=str,required=False,default="0",help="Type efficient net 0-8")
parser_efficientdual = subparsers.add_parser('efficientdual', help='Efficient Net')
parser_efft = subparsers.add_parser('efft', help='Efficient Net fft')
parser_efft.add_argument("--type", type=str, required=False, default="0", help="Type efficient net 0-8")
parser_e4dfft = subparsers.add_parser('e4dfft', help='Efficient Net 4d fft')
parser_e4dfft.add_argument("--type", type=str, required=False, default="0", help="Type efficient net 0-8")
return parser.parse_args()
if __name__ == '__main__':
target_class = 0 # Flamingo
# pretrained_model = models.alexnet(pretrained=True)
args = parse_args()
print(args)
model = args.model
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu_id)
gpu_id = 0 if int(args.gpu_id) >=0 else -1
image_size = args.image_size
iterations= args.iterations
if model== "capsule":
exit(0)
pass
elif model == "drn" :
from pytorch_model.drn.drn_seg import DRNSub
model = DRNSub(1)
pass
elif model == "local_nn" :
from pytorch_model.local_nn import local_nn
model = local_nn()
elif model == "self_attention":
from pytorch_model.self_attention import self_attention
model = self_attention()
elif model == "resnext50":
from pytorch_model.model_cnn_pytorch import resnext50
model = resnext50(False)
elif model == "resnext101":
from pytorch_model.model_cnn_pytorch import resnext101
model = resnext101(False)
elif model == "myresnext":
from pytorch_model.model_cnn_pytorch import MyResNetX
model = MyResNetX()
elif model == "mnasnet":
from pytorch_model.model_cnn_pytorch import mnasnet
model = mnasnet(False)
elif model == "xception_torch":
from pytorch_model.xception import xception
model = xception(pretrained=False)
elif model == "xception2_torch":
from pytorch_model.xception import xception2
model = xception2(pretrained=False)
elif model == "dsp_fwa":
from pytorch_model.DSP_FWA.models.classifier import SPPNet
model = SPPNet(backbone=50, num_class=1)
elif model == "siamese_torch":
from pytorch_model.siamese import SiameseNetworkResnet
model = SiameseNetworkResnet(length_embed = args.length_embed,pretrained=True)
elif model == "efficient":
from pytorch_model.efficientnet import EfficientNet
model = EfficientNet.from_pretrained('efficientnet-b'+args.type,num_classes=1)
model = nn.Sequential(model,nn.Sigmoid())
elif model == "efft":
from pytorch_model.efficientnet import EfficientNet
model = EfficientNet.from_pretrained('efficientnet-b' + args.type, num_classes=1,in_channels=1)
model = nn.Sequential(model, nn.Sigmoid())
elif model == "e4dfft":
from pytorch_model.efficientnet import EfficientNet
model = EfficientNet.from_pretrained('efficientnet-b' + args.type, num_classes=1,in_channels=4)
model = nn.Sequential(model, nn.Sigmoid())
elif model == "efficientdual":
pass
from pytorch_model.xception import xception
model = xception(pretrained=False)
device = torch.device("cuda" if torch.cuda.is_available()
else "cpu")
model = model.to(device)
model.load_state_dict(torch.load(args.model_path,map_location=torch.device('cpu')))
print("Load xong ... ")
model.eval()
csig = ClassSpecificImageGeneration(model, target_class,image_size)
csig.generate(iterations = iterations)
|
{"hexsha": "1591d43ba00250badb5c4fb0808383cba8b16c8b", "size": 8238, "ext": "py", "lang": "Python", "max_stars_repo_path": "cnn_visualization/generate_class_specific_samples.py", "max_stars_repo_name": "tamlhp/dfd_benchmark", "max_stars_repo_head_hexsha": "15cc5c4708a5414c6309ea1f20a5dfa3428409fa", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2020-03-20T18:46:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T03:06:17.000Z", "max_issues_repo_path": "cnn_visualization/generate_class_specific_samples.py", "max_issues_repo_name": "tamlhp/dfd_benchmark", "max_issues_repo_head_hexsha": "15cc5c4708a5414c6309ea1f20a5dfa3428409fa", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-12-03T06:49:04.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-03T06:49:04.000Z", "max_forks_repo_path": "cnn_visualization/generate_class_specific_samples.py", "max_forks_repo_name": "tamlhp/dfd_benchmark", "max_forks_repo_head_hexsha": "15cc5c4708a5414c6309ea1f20a5dfa3428409fa", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-08-23T08:54:09.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-07T10:04:23.000Z", "avg_line_length": 44.0534759358, "max_line_length": 138, "alphanum_fraction": 0.6661811119, "include": true, "reason": "import numpy", "num_tokens": 1975}
|
import unittest
import torch
import numpy as np
from torchradon import radon, iradon, np_iradon
import matplotlib.pyplot as plt
class TestRADONBIAS(unittest.TestCase):
def test_radon_bias_circular_phantom(self):
"""
test that a uniform circular phantom has a small reconstruction bias
"""
pixels = 128
xy = np.arange(-pixels / 2, pixels / 2) + 0.5
x, y = np.meshgrid(xy, xy)
image = x**2 + y**2 <= (pixels/4)**2
image = np.asarray(image, dtype=np.float)
theta = np.linspace(0., 180., max(image.shape), endpoint=False)
image = torch.from_numpy(image).float()
theta = torch.from_numpy(theta).float()
sinogram = radon(image, theta=theta)
fbp = iradon(sinogram, theta=theta, output_size=image.size(0))
error_fbp = image - fbp
print("iradon FBP rms error: %.3g, mae error: %.3g"
% ((error_fbp**2).mean().sqrt(), error_fbp.abs().mean()))
np_fbp = np_iradon(sinogram, theta=theta, output_size=image.size(0))
error_np_fbp = image - np_fbp
print("np_iradon FBP rms error: %.3g, mae error: %.3g"
% ((error_np_fbp**2).mean().sqrt(), error_np_fbp.abs().mean()))
fig, axarr = plt.subplots(2, 3, figsize=(15, 8))
axarr[0, 0].set_title("Original")
axarr[0, 0].imshow(image.numpy(), cmap=plt.cm.Greys_r)
axarr[1, 0].set_title("Radon transform\n(Sinogram)")
axarr[1, 0].set_xlabel("Projection angle (deg)")
axarr[1, 0].set_ylabel("Projection position (pixels)")
axarr[1, 0].imshow(sinogram.numpy(), cmap=plt.cm.Greys_r,
extent=(0, 180, 0, sinogram.size(0)), aspect='auto')
axarr[0, 1].set_title("fbp[iradon]")
axarr[0, 1].imshow(fbp.numpy(), cmap=plt.cm.Greys_r)
axarr[1, 1].set_title("fbp[iradon] error (rms: %.3g)" % (error_fbp**2).mean().sqrt())
axarr[1, 1].imshow((error_fbp).numpy(), cmap=plt.cm.Greys_r)
axarr[0, 2].set_title("fbp[np_iradon]")
axarr[0, 2].imshow(np_fbp.numpy(), cmap=plt.cm.Greys_r)
axarr[1, 2].set_title("fbp[np_iradon] error(rms: %.3g)" % (error_np_fbp**2).mean().sqrt())
axarr[1, 2].imshow((error_np_fbp).numpy(), cmap=plt.cm.Greys_r)
plt.tight_layout()
plt.savefig("./test_circular.png", format='png', bbox_inches='tight')
plt.show()
def test_radon_bias_phantom(self):
"""
test that the phantom image has a small reconstruction bias
"""
from skimage.io import imread
from skimage.transform import rescale
image = imread('./phantom.png', as_gray=True) # the pixel value range is [0, 1]
#image = rescale(image, scale=0.4, mode='reflect', multichannel=False)
theta = np.linspace(0., 180., max(image.shape), endpoint=False)
image = torch.from_numpy(image).float()
theta = torch.from_numpy(theta).float()
sinogram = radon(image, theta=theta)
fbp = iradon(sinogram, theta=theta, output_size=image.size(0))
error_fbp = image - fbp
print("iradon FBP rms error: %.3g, mae error: %.3g"
% ((error_fbp**2).mean().sqrt(), error_fbp.abs().mean()))
np_fbp = np_iradon(sinogram, theta=theta, output_size=image.size(0))
error_np_fbp = image - np_fbp
print("np_iradon FBP rms error: %.3g, mae error: %.3g"
% ((error_np_fbp**2).mean().sqrt(), error_np_fbp.abs().mean()))
fig, axarr = plt.subplots(2, 3, figsize=(15, 8))
axarr[0, 0].set_title("Original")
axarr[0, 0].imshow(image.numpy(), cmap=plt.cm.Greys_r)
axarr[1, 0].set_title("Radon transform\n(Sinogram)")
axarr[1, 0].set_xlabel("Projection angle (deg)")
axarr[1, 0].set_ylabel("Projection position (pixels)")
axarr[1, 0].imshow(sinogram.numpy(), cmap=plt.cm.Greys_r,
extent=(0, 180, 0, sinogram.size(0)), aspect='auto')
axarr[0, 1].set_title("fbp iradon")
axarr[0, 1].imshow(fbp.numpy(), cmap=plt.cm.Greys_r)
axarr[1, 1].set_title("fbp[iradon] error (rms: %.3g)" % (error_fbp**2).mean().sqrt())
axarr[1, 1].imshow((error_fbp).numpy(), cmap=plt.cm.Greys_r)
axarr[0, 2].set_title("fbp np_iradon")
axarr[0, 2].imshow(np_fbp.numpy(), cmap=plt.cm.Greys_r)
axarr[1, 2].set_title("fbp[np_iradon] error(rms: %.3g)" % (error_np_fbp**2).mean().sqrt())
axarr[1, 2].imshow((error_np_fbp).numpy(), cmap=plt.cm.Greys_r)
plt.tight_layout()
plt.savefig("./test_phantom.png", format='png', bbox_inches='tight')
plt.show()
if __name__ == "__main__":
unittest.main()
|
{"hexsha": "e53b0fcd1fe8ec1da9de9d9f68505283bf7cdaa3", "size": 4203, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_phantom.py", "max_stars_repo_name": "AlbertZhangHIT/pytorch_radon", "max_stars_repo_head_hexsha": "6a08e6488dbb4244d02e37034d225fcf2a7f52e5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2020-09-01T04:39:08.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-28T03:52:03.000Z", "max_issues_repo_path": "tests/test_phantom.py", "max_issues_repo_name": "AlbertZhangHIT/pytorch_radon", "max_issues_repo_head_hexsha": "6a08e6488dbb4244d02e37034d225fcf2a7f52e5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-06-12T16:18:13.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-12T16:18:13.000Z", "max_forks_repo_path": "tests/test_phantom.py", "max_forks_repo_name": "AlbertZhangHIT/torch-radon", "max_forks_repo_head_hexsha": "6a08e6488dbb4244d02e37034d225fcf2a7f52e5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-02-03T14:30:51.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-03T14:30:51.000Z", "avg_line_length": 36.2327586207, "max_line_length": 92, "alphanum_fraction": 0.6728527242, "include": true, "reason": "import numpy", "num_tokens": 1471}
|
#redirect The Baxter House
|
{"hexsha": "f150532ef16f6fa67eface622182232554d6efd4", "size": 27, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/Baxter_House.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/Baxter_House.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/Baxter_House.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 13.5, "max_line_length": 26, "alphanum_fraction": 0.8148148148, "num_tokens": 6}
|
"""Module for target encoding."""
from typing import Union, Tuple, List
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.model_selection import check_cv, BaseCrossValidator
from target_encoding.utils import TargetStatistic, cv_splitter
__version__ = '1.1.0'
class BaseTargetEncoder:
"""Class for encoding one data by another data."""
def __init__(self, alpha: float = 0.0, max_bins: int = 30):
"""
Args:
alpha (float): smoothing parameter for generalization.
max_bins (int): maximum number of unique values in a feature.
"""
self.statistic = TargetStatistic(
alpha=alpha,
max_bins=max_bins,
)
def fit(
self,
x_array: np.ndarray,
target_array: np.ndarray,
) -> None:
"""Fit statistics for target encoding.
Args:
x_array (np.ndarray): data to transform by target encoding.
target_array (np.ndarray): targets for encoding "x_array".
Returns:
None
"""
self.statistic.fit(x_array, target_array)
def transform(self, x_array: np.ndarray) -> np.ndarray:
"""Transform data by fitted statistics.
Args:
x_array (np.ndarray): data to transform by target encoding.
Returns:
np.ndarray: Transformed data.
"""
if not self.statistic.is_fitted:
raise UserWarning("Model must be trained before transform.")
if self.statistic.bins is not None:
x_array = np.digitize(x_array, self.statistic.bins)
transformed_x_array = []
for value in x_array:
transformed_x_array.append(
self.statistic.map_cat.get(
value,
self.statistic.global_mean,
)
)
return np.array(transformed_x_array, float)
def __str__(self):
return f'{self.__class__.__name__}'
class TargetEncoder(BaseEstimator):
"""Target encoding transformer and base class
for Classifications and Regressions problems.
"""
def __init__(
self,
alpha: float = 10,
max_bins: int = 30,
split: Tuple[Union[int, BaseCrossValidator]] = (3, 3),
):
"""
Args:
alpha (float): smoothing parameter for generalization.
max_bins (int): maximum number of unique values in a feature.
split (tuple[Union[int, BaseCrossValidator]): tuple of int or
cross-validator classes.
If split len is 0, then algorithm
will encode features without cross-validation.
This situation features will over-fit on target.
If split len is 1, algorithm will encode features by using
cross-validation on folds.
In this situation you will not over-fit on tests,
but when you will validate, your score may over-fit.
If split len is 2, algorithm will separate data on first folds,
afterwords will encode features by using cross-validation
on second folds. This situation is the best way to
avoid over-fit, but algorithm will use small data for encode.
"""
self.alpha = alpha
self.max_bins = max_bins
self.split = tuple(check_cv(x_split) for x_split in split)
self._encodings = [] # type: List[BaseTargetEncoder]
def _encode_one_feature(
self,
x_array: np.ndarray,
target_array: np.ndarray,
) -> BaseTargetEncoder:
"""Fit statistic for one feature in dataset.
Args:
x_array (np.ndarray): data to transform by target encoding.
target_array (np.ndarray): targets for encoding "x_array".
Returns:
BaseTargetEncoder: Trained encoder is returned.
"""
enc = BaseTargetEncoder(self.alpha, self.max_bins)
enc.fit(x_array, target_array)
return enc
def fit(
self,
dataset: np.ndarray,
target_array: np.ndarray,
) -> None:
"""Fit statistics for each feature in dataset.
Args:
dataset (np.ndarray): Dataset for encoding,
has to have (n_rows, n_columns) shape.
target_array (np.ndarray): targets for encoding "dataset".
Returns:
None
"""
self._encodings = []
for i in range(dataset.shape[1]):
self._encodings.append(
self._encode_one_feature(dataset[:, i], target_array)
)
def transform_train(
self,
dataset: np.ndarray,
target_array: np.ndarray,
) -> np.ndarray:
"""Transform train data with split and fit statistics.
Args:
dataset (np.ndarray): Dataset for encoding,
has to have (n_rows, n_columns) shape.
target_array (np.ndarray): targets for encoding "dataset".
Returns:
np.ndarray: Transformed train data.
"""
new_dataset = np.zeros(dataset.shape)
enc = BaseTargetEncoder(self.alpha, self.max_bins)
indexes = np.array(range(dataset.shape[0]))
for (tr_index, val_index) in cv_splitter(
indexes, target_array, self.split,
):
for i in range(dataset.shape[1]):
x_column = dataset[:, i]
enc.fit(x_column[tr_index], target_array[tr_index])
new_dataset[val_index, i] = enc.transform(x_column[val_index])
self.fit(dataset, target_array)
return new_dataset
def transform_test(self, dataset: np.ndarray) -> np.ndarray:
"""Transform test data by fitted statistics.
Args:
dataset (np.ndarray): Dataset for encoding,
has to have (n_rows, n_columns) shape.
Returns:
np.ndarray: Transformed test data.
"""
if dataset.shape[1] != len(self._encodings):
raise ValueError(
f'Number of columns in train was {len(self._encodings)} \
and Number of columns in tests {dataset.shape[1]}')
new_dataset = np.zeros(dataset.shape)
for i in range(dataset.shape[1]):
enc = self._encodings[i]
new_dataset[:, i] = enc.transform(dataset[:, i])
return new_dataset
def __str__(self):
return f'{self.__class__.__name__}'
class TargetEncoderRegressor(TargetEncoder):
"""Class based on TargetEncoder for Regression problems."""
def __init__(
self,
alpha: float = 10,
max_bins: int = 30,
used_features: int = 10,
):
"""
Args:
alpha (float): smoothing parameter for generalization.
max_bins (int): maximum number of unique values in a feature.
used_features (int): Number of used features for prediction.
Value has to be between 1 and number of features.
"""
super().__init__(alpha, max_bins)
self.used_features = used_features
def predict(self, dataset: np.ndarray) -> np.ndarray:
"""Prediction for each object by using target encoding.
Args:
dataset (np.ndarray): Dataset for encoding,
has to have (n_rows, n_columns) shape.
Returns:
np.ndarray: Target encoded objects.
"""
new_x = self.transform_test(dataset)
use_features = np.argsort(new_x.std(axis=0))[-self.used_features:]
mean = np.mean(new_x[:, use_features], axis=1)
return mean
class TargetEncoderClassifier(TargetEncoderRegressor):
"""Class based on TargetEncoder for Classification problems."""
def predict_proba(self, dataset: np.ndarray) -> np.ndarray:
"""Prediction for each object by using target encoding.
Args:
dataset (np.ndarray): Dataset for encoding,
has to have (n_rows, n_columns) shape.
Returns:
np.ndarray: Target encoded objects.
"""
pred = super().predict(dataset)
return np.array([1 - pred, pred]).T
def predict(self, dataset: np.ndarray) -> np.ndarray:
"""Class prediction for each object by using target encoding.
Args:
dataset (np.ndarray): Dataset for encoding,
has to have (n_rows, n_columns) shape.
Returns:
np.ndarray: Classes by using argmax from predict_proba method.
"""
return np.argmax(self.predict_proba(dataset), axis=1)
|
{"hexsha": "6a2935075e6050266ff411b18142724f0eebb9f4", "size": 8753, "ext": "py", "lang": "Python", "max_stars_repo_path": "target_encoding/target_encoding.py", "max_stars_repo_name": "KirillTushin/target_encoding", "max_stars_repo_head_hexsha": "b8c8fc2bc46d4a5323f6271c0bb23fca5d886040", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 22, "max_stars_repo_stars_event_min_datetime": "2019-03-24T07:35:59.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-22T10:14:28.000Z", "max_issues_repo_path": "target_encoding/target_encoding.py", "max_issues_repo_name": "KirillTushin/target_encoding", "max_issues_repo_head_hexsha": "b8c8fc2bc46d4a5323f6271c0bb23fca5d886040", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "target_encoding/target_encoding.py", "max_forks_repo_name": "KirillTushin/target_encoding", "max_forks_repo_head_hexsha": "b8c8fc2bc46d4a5323f6271c0bb23fca5d886040", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2019-04-02T08:59:47.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-28T21:15:09.000Z", "avg_line_length": 30.9293286219, "max_line_length": 79, "alphanum_fraction": 0.5845995659, "include": true, "reason": "import numpy", "num_tokens": 1774}
|
import numpy as np
import pylab as plt
from ai_traineree.runners.env_runner import EnvRunner
from ai_traineree.agents.dqn import DQNAgent
from ai_traineree.tasks import GymTask
from ai_traineree.types import TaskType
env_name = "LunarLander-v2"
task: TaskType = GymTask(env_name)
config = {"batch_size": 64}
agent = DQNAgent(task.obs_size, task.action_size, config=config)
env_runner = EnvRunner(task, agent)
env_runner.interact_episode(0, render=True)
scores = env_runner.run(50, 800, eps_start=1.0, eps_end=0.05, eps_decay=0.995)
env_runner.interact_episode(0, render=True)
# plot the scores
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(np.arange(len(scores)), scores)
plt.ylabel("Score")
plt.xlabel("Episode #")
plt.savefig(f"{env_name}.png", dpi=120)
plt.show()
|
{"hexsha": "13ad136b97abeebe6a75581d04f40916c9172a24", "size": 780, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/lunar_lander_discrete.py", "max_stars_repo_name": "LuisFMCuriel/ai-traineree", "max_stars_repo_head_hexsha": "121da3ea48992d9db3ede3634e4e5f48f50f4cc3", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/lunar_lander_discrete.py", "max_issues_repo_name": "LuisFMCuriel/ai-traineree", "max_issues_repo_head_hexsha": "121da3ea48992d9db3ede3634e4e5f48f50f4cc3", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/lunar_lander_discrete.py", "max_forks_repo_name": "LuisFMCuriel/ai-traineree", "max_forks_repo_head_hexsha": "121da3ea48992d9db3ede3634e4e5f48f50f4cc3", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.8571428571, "max_line_length": 78, "alphanum_fraction": 0.7743589744, "include": true, "reason": "import numpy", "num_tokens": 217}
|
"""
Material Balance Plots
@author: Yohanes Nuwara
@email: ign.nuwara97@gmail.com
"""
class drygas():
"""
Dry-Gas Material Balance Plot
"""
def calculate_params(self, p, Bg, Gp, cf, cw, swi):
"""Calculate Material Balance Paramaters for Dry-Gas Reservoir"""
import numpy as np
pi = p[0]
Bgi = Bg[0]
# total gas FVF equals the gas FVF itself (for dry-gas)
Btg = Bg
# calculate Efw
Efw = ((cf + cw * swi) / (1 - swi)) * (pi - p)
F = []; Eg = []
for i in range(len(p)):
F_ = Bg[i] * Gp[i]
Eg_ = Btg[i] - Bgi
F.append(F_); Eg.append(Eg_)
F = np.array(F); Eg = np.array(Eg)
return F, Btg, Efw, Eg
def plot(self, p, z, Gp, F, Btg, Efw, Eg):
"""Create Material Balance Plots for Dry-Gas Reservoir"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
# plot attributes
title_size = 12
title_pad = 10
# linear function for curve-fit
def linear_zero_intercept(x, m):
y = m * x
return y
def linear_with_intercept(x, m, c):
y = m * x + c
return y
# Plot 1: F vs Eg
plt.subplot(3,2,1)
x1, y1 = Eg, F
plt.plot(x1, y1, '.-')
plt.title('Plot 1: F vs Eg', size=title_size, pad=title_pad)
plt.xlabel('Eg (RB/scf)')
plt.ylabel('F (res ft3)')
## curve-fitting to calculate the slope as OGIP
x1_norm = x1 / max(x1) # normalize x
y1_norm = y1 / max(y1) # normalize y
popt, pcov = curve_fit(linear_zero_intercept, x1_norm, y1_norm)
m = popt[0]
Gfgi = m * max(y1) / max(x1) # denormalize the slope, hence the OGIP
## plot the regression line
x1_fit = np.linspace(min(x1), max(x1), 5)
y1_fit = linear_zero_intercept(x1_fit, Gfgi)
plt.plot(x1_fit, y1_fit, label='{} MMSCF'.format(np.round(Gfgi * 1E-6, 3)))
plt.legend()
# Plot 2: p/z vs Gp
plt.subplot(3,2,2)
x2, y2 = Gp, (p / z)
plt.plot(x2, y2, '.-')
plt.title('Plot 2: p/z vs Gp', size=title_size, pad=title_pad)
plt.xlabel('Gp (scf)')
plt.ylabel('p/z (psia)')
## curve-fitting to calculate the slope as OGIP
x2_norm = x2 / max(x2) # normalize x
y2_norm = y2 / max(y2) # normalize y
popt, pcov = curve_fit(linear_with_intercept, x2_norm, y2_norm)
m, c = popt[0], popt[1]
Gfgi = (-c / m) * max(x2) # OGIP is the intercept at x-axis, and denormalized
m = m * max(y2) / max(x2) # denormalize the slope
c = c * max(y2) # denormalize the intercept
## plot the regression line
x2_fit = np.linspace(min(x2), max(x2), 5)
y2_fit = linear_with_intercept(x2_fit, m, c)
plt.plot(x2_fit, y2_fit, label='{} MMSCF'.format(np.round(Gfgi * 1E-6, 3)))
plt.legend()
# Plot 3: F/Eg vs Gp
plt.subplot(3,2,3)
x3, y3 = Gp, (F / Eg)
plt.plot(x3, y3, '.-')
plt.title('Plot 3: Waterdrive Diagnostic Plot', size=title_size, pad=title_pad)
plt.xlabel('Gp (scf)')
plt.ylabel('F/Eg (scf)')
## curve-fitting to calculate the slope as OGIP, here [1:] because NaN is removed
x3_norm = x3[1:] / max(x3[1:]) # normalize x
y3_norm = y3[1:] / max(y3[1:]) # normalize y
popt, pcov = curve_fit(linear_with_intercept, x3_norm, y3_norm)
m, c = popt[0], popt[1]
m = m * max(y3[1:]) / max(x3[1:]) # denormalize the slope
Gfgi = c * max(y3[1:]) # denormalize the intercept, hence the OGIP
## plot the regression line
x3_fit = np.linspace(min(x3[1:]), max(x3[1:]), 5)
y3_fit = linear_with_intercept(x3_fit, m, Gfgi)
plt.plot(x3_fit, y3_fit, label='{} MMSCF'.format(np.round(Gfgi * 1E-6, 3)))
plt.legend()
# Plot 6: F vs (Eg+Bgi*Efw)
plt.subplot(3,2,4)
Bgi = Btg[0]
x6, y6 = (Eg + Bgi * Efw), F
plt.plot(x6, y6, '.-')
plt.title('Plot 6: F vs (Eg+Bgi*Efw)', size=title_size, pad=title_pad)
plt.xlabel('Eg+Bgi*Efw (res ft3/scf)')
plt.ylabel('F (res ft3)')
## curve-fitting to calculate the slope as OGIP
x6_norm = x6 / max(x6) # normalize x
y6_norm = y6 / max(y6) # normalize y
popt, pcov = curve_fit(linear_zero_intercept, x6_norm, y6_norm)
m = popt[0]
Gfgi = m * max(y6) / max(x6) # denormalize the slope, hence the OGIP
## plot the regression line
x6_fit = np.linspace(min(x6), max(x6), 5)
y6_fit = linear_zero_intercept(x6_fit, Gfgi)
plt.plot(x6_fit, y6_fit, label='{} MMSCF'.format(np.round(Gfgi * 1E-6, 3)))
plt.legend()
# Plot 7: ((p/z)*(1-Efw)) vs Gp
plt.subplot(3,2,5)
x7, y7 = Gp, ((p / z) * (1 - Efw))
plt.plot(x7, y7, '.-')
plt.title('Plot 7: ((p/z)*(1-Efw)) vs Gp', size=title_size, pad=title_pad)
plt.xlabel('Gp (scf)')
plt.ylabel('(p/z)*(1-Efw) (psia)')
## curve-fitting to calculate the slope as OGIP
x7_norm = x7 / max(x7) # normalize x
y7_norm = y7 / max(y7) # normalize y
popt, pcov = curve_fit(linear_with_intercept, x7_norm, y7_norm)
m, c = popt[0], popt[1]
Gfgi = (-c / m) * max(x7) # OGIP is the intercept at x-axis, and denormalized
m = m * max(y7) / max(x7) # denormalize the slope
c = c * max(y7) # denormalize the intercept
## plot the regression line
x7_fit = np.linspace(min(x7), max(x7), 5)
y7_fit = linear_with_intercept(x7_fit, m, c)
plt.plot(x7_fit, y7_fit, label='{} MMSCF'.format(np.round(Gfgi * 1E-6, 3)))
plt.legend()
plt.tight_layout(pad=1.5)
plt.show()
return F, Eg, Efw
class gascondensate():
"""
Gas-Condensate Material Balance Plot
"""
def calculate_params(self, p, pdew, Bg, Bo, Np, Gp, Gi, cf, cw, swi, Rs, Rv):
"""Calculate Material Balance Paramaters for Gas-Condensate Reservoir"""
import numpy as np
pi = p[0]
Rvi = Rv[0]
Bgi = Bg[0]
# calculate Efw
Efw = ((cf + cw * swi) / (1 - swi)) * (pi - p)
# calculate F and Btg
F = []; Btg = []; Eg = []
for i in range(len(p)):
if p[i] >= pdew:
# gas-condensate above dewpoint pressure
F_ = Bg[i] * Gp[i]
Btg_ = Bg[i]
Eg_ = Btg_ - Bgi
if p[i] < pdew:
# gas-condensate below dewpoint pressure
F_ = (Np[i] * ((Bo[i] - (Rs[i] * Bg[i])) / (1 - (Rv[i] * Rs[i])))) + ((Gp[i] - Gi[i]) * ((Bg[i] - (Rv[i] * Bo[i])) / (1 - (Rv[i] * Rs[i]))))
Btg_ = ((Bg[i] * (1 - (Rs[i] * Rvi))) + (Bo[i] * (Rvi - Rv[i]))) / (1 - (Rv[i] * Rs[i])) # in RB/STB
Eg_ = Btg_ - Bgi
F.append(F_); Btg.append(Btg_); Eg.append(Eg_)
F, Btg, Eg = np.array(F), np.array(Btg), np.array(Eg)
return F, Btg, Efw, Eg
def plot(self, p, z, Gp, F, Btg, Efw, Eg, Rv):
"""Create Material Balance Plots for Dry-Gas Reservoir"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
def calculate_condensate_inplace(Gfgi, Rv):
"""Calculate initial condensate-in-place from the calculated OGIP"""
Rvi = Rv[0]
condensate_inplace = Rvi * Gfgi # in STB
return condensate_inplace
# plot attributes
title_size = 12
title_pad = 10
# linear function for curve-fit
def linear_zero_intercept(x, m):
y = m * x
return y
def linear_with_intercept(x, m, c):
y = m * x + c
return y
# Plot 1: F vs Eg
plt.subplot(3,2,1)
x1, y1 = Eg, F
plt.plot(x1, y1, '.-')
plt.title('Plot 1: F vs Eg', size=title_size, pad=title_pad)
plt.xlabel('Eg (RB/scf)')
plt.ylabel('F (res ft3)')
## curve-fitting to calculate the slope as OGIP
x1_norm = x1 / max(x1) # normalize x
y1_norm = y1 / max(y1) # normalize y
popt, pcov = curve_fit(linear_zero_intercept, x1_norm, y1_norm)
m = popt[0]
Gfgi = m * max(y1) / max(x1) # denormalize the slope, hence the OGIP
## calculate condensate-in-place
condensate_inplace = calculate_condensate_inplace(Gfgi, Rv)
## plot the regression line
x1_fit = np.linspace(min(x1), max(x1), 5)
y1_fit = linear_zero_intercept(x1_fit, Gfgi)
plt.plot(x1_fit, y1_fit, label='(G) {} MMSCF (C) {} MSTB'.format(np.round(Gfgi * 1E-6, 3), np.round(condensate_inplace * 1E-3, 3)))
plt.legend()
# Plot 2: p/z vs Gp
plt.subplot(3,2,2)
plt.title('Plot 2: p/z vs Gp', size=title_size, pad=title_pad)
plt.xlabel('Gp (scf)')
plt.ylabel('p/z (psia)')
if np.all(z==0) == False:
x2, y2 = Gp, (p / z)
plt.plot(x2, y2, '.-')
## curve-fitting to calculate the slope as OGIP
x2_norm = x2 / max(x2) # normalize x
y2_norm = y2 / max(y2) # normalize y
popt, pcov = curve_fit(linear_with_intercept, x2_norm, y2_norm)
m, c = popt[0], popt[1]
Gfgi = (-c / m) * max(x2) # OGIP is the intercept at x-axis, and denormalized
m = m * max(y2) / max(x2) # denormalize the slope
c = c * max(y2) # denormalize the intercept
## calculate condensate-in-place
condensate_inplace = calculate_condensate_inplace(Gfgi, Rv)
## plot the regression line
x2_fit = np.linspace(min(x2), max(x2), 5)
y2_fit = linear_with_intercept(x2_fit, m, c)
plt.plot(x2_fit, y2_fit, label='(G) {} MMSCF (C) {} MSTB'.format(np.round(Gfgi * 1E-6, 3), np.round(condensate_inplace * 1E-3, 3)))
plt.legend()
# Plot 3: F/Eg vs Gp
plt.subplot(3,2,3)
x3, y3 = Gp, (F / Eg)
plt.plot(x3, y3, '.-')
plt.title('Plot 3: Waterdrive Diagnostic Plot', size=title_size, pad=title_pad)
plt.xlabel('Gp (scf)')
plt.ylabel('F/Eg (scf)')
## curve-fitting to calculate the slope as OGIP, here [1:] because NaN is removed
x3_norm = x3[1:] / max(x3[1:]) # normalize x
y3_norm = y3[1:] / max(y3[1:]) # normalize y
popt, pcov = curve_fit(linear_with_intercept, x3_norm, y3_norm)
m, c = popt[0], popt[1]
m = m * max(y3[1:]) / max(x3[1:]) # denormalize the slope
Gfgi = c * max(y3[1:]) # denormalize the intercept, hence the OGIP
## calculate condensate-in-place
condensate_inplace = calculate_condensate_inplace(Gfgi, Rv)
## plot the regression line
x3_fit = np.linspace(min(x3[1:]), max(x3[1:]), 5)
y3_fit = linear_with_intercept(x3_fit, m, Gfgi)
plt.plot(x3_fit, y3_fit, label='(G) {} MMSCF (C) {} MSTB'.format(np.round(Gfgi * 1E-6, 3), np.round(condensate_inplace * 1E-3, 3)))
plt.legend()
# Plot 6: F vs (Eg+Bgi*Efw)
plt.subplot(3,2,4)
Bgi = Btg[0]
x6, y6 = (Eg + Bgi * Efw), F
plt.plot(x6, y6, '.-')
plt.title('Plot 6: F vs (Eg+Bgi*Efw)', size=title_size, pad=title_pad)
plt.xlabel('Eg+Bgi*Efw (res ft3/scf)')
plt.ylabel('F (res ft3)')
## curve-fitting to calculate the slope as OGIP
x6_norm = x6 / max(x6) # normalize x
y6_norm = y6 / max(y6) # normalize y
popt, pcov = curve_fit(linear_zero_intercept, x6_norm, y6_norm)
m = popt[0]
Gfgi = m * max(y6) / max(x6) # denormalize the slope, hence the OGIP
## calculate condensate-in-place
condensate_inplace = calculate_condensate_inplace(Gfgi, Rv)
## plot the regression line
x6_fit = np.linspace(min(x6), max(x6), 5)
y6_fit = linear_zero_intercept(x6_fit, Gfgi)
plt.plot(x6_fit, y6_fit, label='(G) {} MMSCF (C) {} MSTB'.format(np.round(Gfgi * 1E-6, 3), np.round(condensate_inplace * 1E-3, 3)))
plt.legend()
# Plot 7: ((p/z)*(1-Efw)) vs Gp
plt.subplot(3,2,5)
plt.title('Plot 7: ((p/z)*(1-Efw)) vs Gp', size=title_size, pad=title_pad)
plt.xlabel('Gp (scf)')
plt.ylabel('(p/z)*(1-Efw) (psia)')
if np.all(z==0) == False:
x7, y7 = Gp, ((p / z) * (1 - Efw))
plt.plot(x7, y7, '.-')
## curve-fitting to calculate the slope as OGIP
x7_norm = x7 / max(x7) # normalize x
y7_norm = y7 / max(y7) # normalize y
popt, pcov = curve_fit(linear_with_intercept, x7_norm, y7_norm)
m, c = popt[0], popt[1]
Gfgi = (-c / m) * max(x7) # OGIP is the intercept at x-axis, and denormalized
m = m * max(y7) / max(x7) # denormalize the slope
c = c * max(y7) # denormalize the intercept
## calculate condensate-in-place
condensate_inplace = calculate_condensate_inplace(Gfgi, Rv)
## plot the regression line
x7_fit = np.linspace(min(x7), max(x7), 5)
y7_fit = linear_with_intercept(x7_fit, m, c)
plt.plot(x7_fit, y7_fit, label='(G) {} MMSCF (C) {} MSTB'.format(np.round(Gfgi * 1E-6, 3), np.round(condensate_inplace * 1E-3, 3)))
plt.legend()
plt.tight_layout(pad=1.5)
plt.show()
return F, Eg, Efw
|
{"hexsha": "38867eb361c12c0ebd4864ec423ecfec69bec8e7", "size": 13693, "ext": "py", "lang": "Python", "max_stars_repo_path": "matbal/mbal.py", "max_stars_repo_name": "jikutlenova/pyreservoir", "max_stars_repo_head_hexsha": "fb5bb5f408265da060859550ec16daf3b7a1c543", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-09-18T14:33:14.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-18T14:33:14.000Z", "max_issues_repo_path": "matbal/mbal.py", "max_issues_repo_name": "jikutlenova/pyreservoir", "max_issues_repo_head_hexsha": "fb5bb5f408265da060859550ec16daf3b7a1c543", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "matbal/mbal.py", "max_forks_repo_name": "jikutlenova/pyreservoir", "max_forks_repo_head_hexsha": "fb5bb5f408265da060859550ec16daf3b7a1c543", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.8091397849, "max_line_length": 156, "alphanum_fraction": 0.5370627328, "include": true, "reason": "import numpy,from scipy", "num_tokens": 4507}
|
/*
* TesterScheduler.cpp
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "TesterScheduler.h"
#include "TesterUtil.h"
#include <boost/asio/detail/chrono.hpp>
#include <memory>
#include <thread>
#include <boost/asio.hpp>
using namespace boost::asio;
namespace FdbApiTester {
const TTaskFct NO_OP_TASK = []() {};
class AsioTimer : public ITimer {
public:
AsioTimer(io_context& io_ctx, chrono::steady_clock::duration time) : impl(io_ctx, time) {}
void cancel() override { impl.cancel(); }
boost::asio::steady_timer impl;
};
class AsioScheduler : public IScheduler {
public:
AsioScheduler(int numThreads) : numThreads(numThreads) {}
void start() override {
work = require(io_ctx.get_executor(), execution::outstanding_work.tracked);
for (int i = 0; i < numThreads; i++) {
threads.emplace_back([this]() { io_ctx.run(); });
}
}
void schedule(TTaskFct task) override { post(io_ctx, task); }
std::unique_ptr<ITimer> scheduleWithDelay(int delayMs, TTaskFct task) override {
auto timer = std::make_unique<AsioTimer>(io_ctx, boost::asio::chrono::milliseconds(delayMs));
timer->impl.async_wait([task](const boost::system::error_code& e) {
if (!e) {
task();
}
});
return timer;
}
void stop() override { work = any_io_executor(); }
void join() override {
for (auto& th : threads) {
th.join();
}
}
private:
int numThreads;
std::vector<std::thread> threads;
io_context io_ctx;
any_io_executor work;
};
std::unique_ptr<IScheduler> createScheduler(int numThreads) {
ASSERT(numThreads > 0 && numThreads <= 1000);
return std::make_unique<AsioScheduler>(numThreads);
}
} // namespace FdbApiTester
|
{"hexsha": "f3e62a3356f76fc4c02a51a2ce9e75b0d514d76f", "size": 2312, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "bindings/c/test/apitester/TesterScheduler.cpp", "max_stars_repo_name": "sfc-gh-bvr/foundationdb", "max_stars_repo_head_hexsha": "7594f5c0f92d2582dae717ce0244c11642b27dd4", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "bindings/c/test/apitester/TesterScheduler.cpp", "max_issues_repo_name": "sfc-gh-bvr/foundationdb", "max_issues_repo_head_hexsha": "7594f5c0f92d2582dae717ce0244c11642b27dd4", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "bindings/c/test/apitester/TesterScheduler.cpp", "max_forks_repo_name": "sfc-gh-bvr/foundationdb", "max_forks_repo_head_hexsha": "7594f5c0f92d2582dae717ce0244c11642b27dd4", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2022-03-01T12:28:03.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-01T12:28:03.000Z", "avg_line_length": 26.5747126437, "max_line_length": 95, "alphanum_fraction": 0.7136678201, "num_tokens": 581}
|
function beta = linearCompressibility(S,x)
% computes the linear compressibility of an elasticity tensor
%
% Description
%
% $$\beta(x) = S_{ijkk} x_i x_j$$
%
% Input
% S - elastic @complianceTensor
% x - list of @vector3d
%
% Output
% beta - linear compressibility in directions v
%
% return a function if required
if nargin == 1 || isempty(x)
beta = S2FunHarmonicSym.quadrature(@(x) linearCompressibility(S,x),'bandwidth',2,S.CS);
return
end
% compute tensor product
beta = EinsteinSum(S,[-1 -2 -3 -3],x,-1,x,-2);
|
{"author": "mtex-toolbox", "repo": "mtex", "sha": "f0ce46a720935e9ae8106ef919340534bca1adcb", "save_path": "github-repos/MATLAB/mtex-toolbox-mtex", "path": "github-repos/MATLAB/mtex-toolbox-mtex/mtex-f0ce46a720935e9ae8106ef919340534bca1adcb/TensorAnalysis/@complianceTensor/linearCompressibility.m"}
|
[STATEMENT]
lemma semantics_tm_id_map [simp]: \<open>map \<lparr>\<^bold>#, \<^bold>\<dagger>\<rparr> ts = ts\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. map \<lparr>\<^bold>#, \<^bold>\<dagger>\<rparr> ts = ts
[PROOF STEP]
by (auto cong: map_cong)
|
{"llama_tokens": 109, "file": "FOL_Axiomatic_FOL_Axiomatic_Variant", "length": 1}
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Examples of image inversion attack
"""
import numpy as np
import matplotlib.pyplot as plt
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore import Tensor, context
from mindspore import nn
from mindarmour.privacy.evaluation.inversion_attack import ImageInversionAttack
from mindarmour.utils.logger import LogUtil
from examples.common.networks.lenet5.lenet5_net import LeNet5, conv, fc_with_initialize
from examples.common.dataset.data_processing import generate_mnist_dataset
LOGGER = LogUtil.get_instance()
LOGGER.set_level('INFO')
TAG = 'InversionAttack'
# pylint: disable=invalid-name
class LeNet5_part(nn.Cell):
"""
Part of LeNet5 network.
"""
def __init__(self):
super(LeNet5_part, self).__init__()
self.conv1 = conv(1, 6, 5)
self.conv2 = conv(6, 16, 5)
self.fc1 = fc_with_initialize(16*5*5, 120)
self.fc2 = fc_with_initialize(120, 84)
self.fc3 = fc_with_initialize(84, 10)
self.relu = nn.ReLU()
self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2)
self.flatten = nn.Flatten()
def construct(self, x):
x = self.conv1(x)
x = self.relu(x)
x = self.max_pool2d(x)
x = self.conv2(x)
x = self.relu(x)
x = self.max_pool2d(x)
return x
def mnist_inversion_attack(net):
"""
Image inversion attack based on LeNet5 and MNIST dataset.
"""
# upload trained network
ckpt_path = '../../common/networks/lenet5/trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
load_dict = load_checkpoint(ckpt_path)
load_param_into_net(net, load_dict)
# get original data and their inferred fearures
data_list = "../../common/dataset/MNIST/train"
batch_size = 32
ds = generate_mnist_dataset(data_list, batch_size)
i = 0
batch_num = 1
sample_num = 30
for data in ds.create_tuple_iterator(output_numpy=True):
i += 1
images = data[0].astype(np.float32)
true_labels = data[1][: sample_num]
target_features = net(Tensor(images)).asnumpy()[:sample_num]
original_images = images[: sample_num]
if i >= batch_num:
break
# run attacking
inversion_attack = ImageInversionAttack(net, input_shape=(1, 32, 32), input_bound=(0, 1), loss_weights=[1, 0.1, 5])
inversion_images = inversion_attack.generate(target_features, iters=100)
# get the predict results of inversion images on a new trained model
net2 = LeNet5()
new_ckpt_path = '../../common/networks/lenet5/new_trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
new_load_dict = load_checkpoint(new_ckpt_path)
load_param_into_net(net2, new_load_dict)
pred_labels = np.argmax(net2(Tensor(inversion_images).astype(np.float32)).asnumpy(), axis=1)
# evaluate the quality of inversion images
avg_l2_dis, avg_ssim, avg_confi = inversion_attack.evaluate(original_images, inversion_images, true_labels, net2)
LOGGER.info(TAG, 'The average L2 distance between original images and inverted images is: {}'.format(avg_l2_dis))
LOGGER.info(TAG, 'The average ssim value between original images and inverted images is: {}'.format(avg_ssim))
LOGGER.info(TAG, 'The average prediction confidence on true labels of inverted images is: {}'.format(avg_confi))
LOGGER.info(TAG, 'True labels of original images are: %s' % true_labels)
LOGGER.info(TAG, 'Predicted labels of inverted images are: %s' % pred_labels)
# plot 10 images
plot_num = min(sample_num, 10)
for n in range(1, plot_num+1):
plt.subplot(2, plot_num, n)
if n == 1:
plt.title('Original images', fontsize=16, loc='left')
plt.gray()
plt.imshow(images[n - 1].reshape(32, 32))
plt.subplot(2, plot_num, n + plot_num)
if n == 1:
plt.title('Inverted images', fontsize=16, loc='left')
plt.gray()
plt.imshow(inversion_images[n - 1].reshape(32, 32))
plt.show()
if __name__ == '__main__':
# device_target can be "CPU", "GPU" or "Ascend"
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
# attack based on complete LeNet5
mnist_inversion_attack(LeNet5())
# attack based on part of LeNet5. The network is more shallower and can lead to a better attack result
mnist_inversion_attack(LeNet5_part())
|
{"hexsha": "75b7b363a32f14e6ae476459c8823845575f9066", "size": 5045, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/privacy/inversion_attack/mnist_inversion_attack.py", "max_stars_repo_name": "hboshnak/mindarmour", "max_stars_repo_head_hexsha": "0609a4eaea875a84667bed279add9305752880cc", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 139, "max_stars_repo_stars_event_min_datetime": "2020-03-28T02:37:07.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T15:35:39.000Z", "max_issues_repo_path": "examples/privacy/inversion_attack/mnist_inversion_attack.py", "max_issues_repo_name": "hboshnak/mindarmour", "max_issues_repo_head_hexsha": "0609a4eaea875a84667bed279add9305752880cc", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-04-02T09:50:21.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-09T06:52:57.000Z", "max_forks_repo_path": "examples/privacy/inversion_attack/mnist_inversion_attack.py", "max_forks_repo_name": "hboshnak/mindarmour", "max_forks_repo_head_hexsha": "0609a4eaea875a84667bed279add9305752880cc", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2020-03-28T02:52:42.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-15T08:05:06.000Z", "avg_line_length": 39.4140625, "max_line_length": 119, "alphanum_fraction": 0.6870168484, "include": true, "reason": "import numpy", "num_tokens": 1272}
|
[STATEMENT]
lemma tree_map_of_dom: "dom (lookup (tree_map_of' empty pres)) = dom (map_of pres)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. dom (lookup (tree_map_of' empty pres)) = dom (AList_Upd_Del.map_of pres)
[PROOF STEP]
by (induction pres) (auto simp: map_empty map_update[OF tree_map_of_invar[OF invar_empty]] tree_map_of_works)
|
{"llama_tokens": 139, "file": "Verified_SAT_Based_AI_Planning_Solve_SASP", "length": 1}
|
# parameters that govern counterfactual
remove_shock!(parameters, :global_sectoral_shock_njs)
remove_shock!(parameters, :idiosyncratic_shock_njs)
|
{"hexsha": "5ebb8b2d1d134762dc04dd334b16883b994d6432", "size": 147, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "experiments/CES0.5/nosectoral/change_parameters.jl", "max_stars_repo_name": "korenmiklos/Diversification-Through-Trade-Replication", "max_stars_repo_head_hexsha": "5e286506a97fbf60c344879110fb7c38efb0b8fc", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-11-10T00:22:37.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-13T13:41:45.000Z", "max_issues_repo_path": "experiments/no_china/nosectoral/change_parameters.jl", "max_issues_repo_name": "korenmiklos/Diversification-Through-Trade-Replication", "max_issues_repo_head_hexsha": "5e286506a97fbf60c344879110fb7c38efb0b8fc", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "experiments/no_china/nosectoral/change_parameters.jl", "max_forks_repo_name": "korenmiklos/Diversification-Through-Trade-Replication", "max_forks_repo_head_hexsha": "5e286506a97fbf60c344879110fb7c38efb0b8fc", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2019-10-07T10:30:31.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-13T13:43:49.000Z", "avg_line_length": 29.4, "max_line_length": 53, "alphanum_fraction": 0.8571428571, "num_tokens": 40}
|
#!/bin/python
import numpy as np
def test_lookup_AaronHill_by_mlb_ID(lookup_obj):
s = lookup_obj.from_mlb_ids([431094])
print(s)
assert(len(s) == 1)
assert(s.iloc[0].mlb_name == 'Aaron Hill')
def test_lookup_bad_ID(lookup_obj):
s = lookup_obj.from_mlb_ids([0])
assert(len(s) == 0)
def test_lookup_by_mlb_ids(lookup_obj):
s = lookup_obj.from_mlb_ids([643327, 554430, 621107, 435043])
print(s)
assert(len(s) == 4)
assert(s.iloc[0].mlb_name == 'Zach Duke')
assert(s.iloc[1].mlb_name == 'Zach Eflin')
assert(s.iloc[2].mlb_name == 'Zack Godley')
assert(s.iloc[3].mlb_name == 'Zack Wheeler')
def test_lookup_by_yahoo_ids(lookup_obj):
s = lookup_obj.from_yahoo_ids([9320, 9449, 9317])
print(s)
assert(len(s) == 3)
assert(s.iloc[0].yahoo_name == 'Chaz Roe')
assert(s.iloc[1].yahoo_name == 'Christian Yelich')
assert(s.iloc[2].yahoo_name == 'Hyun-Jin Ryu')
def test_lookup_by_cbs_ids(lookup_obj):
s = lookup_obj.from_cbs_ids([223481])
print(s)
assert(len(s) == 1)
assert(s.iloc[0].mlb_name == 'Kyle Lohse')
assert(s.iloc[0].cbs_name == 'Kyle Lohse')
assert(s.iloc[0].cbs_id == 223481)
assert(s.iloc[0].mlb_id == 346798)
def test_lookup_by_espn_ids(lookup_obj):
s = lookup_obj.from_espn_ids([33080])
print(s)
assert(len(s) == 1)
assert(s.iloc[0].mlb_name == 'Adonis Garcia')
assert(s.iloc[0].espn_id == 33080)
assert(s.iloc[0].mlb_id == 611177)
def test_lookup_by_fangraphs_ids(lookup_obj):
s = lookup_obj.from_fangraphs_ids([13677, 'sa739620'])
print(s)
assert(len(s) == 2)
assert(s.iloc[0].mlb_name == 'Corey Oswalt')
assert(s.iloc[0].fg_id == '13677')
assert(s.iloc[0].mlb_id == 621261)
assert(s.iloc[1].mlb_name == 'Thairo Estrada')
assert(s.iloc[1].fg_id == 'sa739620')
assert(s.iloc[1].mlb_id == 642731)
def test_lookup_by_name(lookup_obj):
s = lookup_obj.from_names(['Jose Ramirez'])
print(s)
assert(len(s) == 2)
assert(s.iloc[0].mlb_team == 'ATL')
assert(s.iloc[0].mlb_pos == 'P')
assert(s.iloc[1].mlb_team == 'CLE')
assert(s.iloc[1].mlb_pos == '3B')
def test_lookup_by_name_multi(lookup_obj):
s = lookup_obj.from_names(['Khris Davis', 'Enrique Hernandez'])
print(s)
assert(len(s) == 2)
assert(s.iloc[0].mlb_name == 'Enrique Hernandez')
assert(s.iloc[1].ottoneu_name == 'Khristopher Davis')
def test_lookup_by_name_empty(lookup_obj):
s = lookup_obj.from_names(['Joe Baseball'])
print(s)
assert(len(s) == 0)
def test_lookup_by_name_nan_yahoo_id(lookup_obj):
# In the fake database, A.J. Jimenez has nan for the yahoo ID. A.J.
# Pollock has a valid yahoo_ID.
s = lookup_obj.from_names(['A.J. Jimenez', 'A.J. Pollock'],
filter_missing='yahoo_id')
print(s)
assert(len(s) == 1)
assert(np.isnan(s.iloc(0)[0].yahoo_id))
assert(s.iloc(0)[0].mlb_name == 'A.J. Jimenez')
|
{"hexsha": "0ed9c22d794019ee1d72b87358524795b102c330", "size": 2965, "ext": "py", "lang": "Python", "max_stars_repo_path": "baseball_id/tests/test_lookup.py", "max_stars_repo_name": "spilchen/baseball_id_map", "max_stars_repo_head_hexsha": "09b824ce19a6da459b6e7409687263c179202f2a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-06-15T16:47:26.000Z", "max_stars_repo_stars_event_max_datetime": "2019-06-15T16:47:26.000Z", "max_issues_repo_path": "baseball_id/tests/test_lookup.py", "max_issues_repo_name": "spilchen/baseball_id_map", "max_issues_repo_head_hexsha": "09b824ce19a6da459b6e7409687263c179202f2a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "baseball_id/tests/test_lookup.py", "max_forks_repo_name": "spilchen/baseball_id_map", "max_forks_repo_head_hexsha": "09b824ce19a6da459b6e7409687263c179202f2a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.3564356436, "max_line_length": 72, "alphanum_fraction": 0.6435075885, "include": true, "reason": "import numpy", "num_tokens": 948}
|
[STATEMENT]
lemma "\<And>x. P x \<and> Q x \<Longrightarrow> A x \<and> B x \<Longrightarrow> R x y \<Longrightarrow> True"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x. \<lbrakk>P x \<and> Q x; A x \<and> B x; R x y\<rbrakk> \<Longrightarrow> True
[PROOF STEP]
apply match_test \<comment> \<open>Valid match, but not quite what we were expecting..\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x. \<lbrakk>P x \<and> Q x; A x \<and> B x; R x y\<rbrakk> \<Longrightarrow> True
[PROOF STEP]
back \<comment> \<open>Can backtrack over matches, exploring all bindings\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x. \<lbrakk>P x \<and> Q x; A x \<and> B x; R x y\<rbrakk> \<Longrightarrow> True
[PROOF STEP]
back
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x. \<lbrakk>P x \<and> Q x; A x \<and> B x; R x y\<rbrakk> \<Longrightarrow> True
[PROOF STEP]
back
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x. \<lbrakk>P x \<and> Q x; A x \<and> B x; R x y\<rbrakk> \<Longrightarrow> True
[PROOF STEP]
back
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x. \<lbrakk>P x \<and> Q x; A x \<and> B x; R x y\<rbrakk> \<Longrightarrow> True
[PROOF STEP]
back
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x. \<lbrakk>P x \<and> Q x; A x \<and> B x; R x y\<rbrakk> \<Longrightarrow> True
[PROOF STEP]
back \<comment> \<open>Found the other conjunction\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x. \<lbrakk>P x \<and> Q x; A x \<and> B x; R x y\<rbrakk> \<Longrightarrow> True
[PROOF STEP]
back
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x. \<lbrakk>P x \<and> Q x; A x \<and> B x; R x y\<rbrakk> \<Longrightarrow> True
[PROOF STEP]
back
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x. \<lbrakk>P x \<and> Q x; A x \<and> B x; R x y\<rbrakk> \<Longrightarrow> True
[PROOF STEP]
back
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x. \<lbrakk>P x \<and> Q x; A x \<and> B x; R x y\<rbrakk> \<Longrightarrow> True
[PROOF STEP]
oops
|
{"llama_tokens": 892, "file": null, "length": 11}
|
@testset "Node flux report" begin
fName = "report/nodeflux"
nodeFluxPlot( mpSim, 12, :in, "A junior", "B junior", "Reserve junior",
"A senior", "B senior", "Master" )
nodeFluxPlot( mpSim, 12, :out, "A junior", "B junior", "Reserve junior",
"A senior", "B senior", "Master", plotType=:stacked )
#=
nodeFluxPlot( mpSim, 12, :in, "A junior", "B junior", "Reserve junior",
"A senior", "B senior", "Master", showPlot=false,
filename="plot/node in flux test" )
nodeFluxPlot( mpSim, 12, :out, "A junior", "B junior", "Reserve junior",
"A senior", "B senior", "Master", showPlot=false, plotType=:stacked,
filename="plot/node out flux test" )
=#
end # @testset "Node flux report"
|
{"hexsha": "745d8163798646f61ffcec5405afd103394e3852", "size": 697, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/plot/nodeflux.jl", "max_stars_repo_name": "jvkerckh/ManpowerPlanningIO.jl", "max_stars_repo_head_hexsha": "90ca00b46616804034bfd962e2a79c14c4328e16", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/plot/nodeflux.jl", "max_issues_repo_name": "jvkerckh/ManpowerPlanningIO.jl", "max_issues_repo_head_hexsha": "90ca00b46616804034bfd962e2a79c14c4328e16", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/plot/nodeflux.jl", "max_forks_repo_name": "jvkerckh/ManpowerPlanningIO.jl", "max_forks_repo_head_hexsha": "90ca00b46616804034bfd962e2a79c14c4328e16", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.0, "max_line_length": 72, "alphanum_fraction": 0.6484935438, "num_tokens": 229}
|
import pathlib
import pickle
import numpy as np
from keras.models import load_model as model_loader
from utils import preprocess, create_dataset
MODEL_DIR = pathlib.Path(__file__).parent / 'models'
MODEL_NAME = 'weights-06-6.66.hdf5'
TEXT = """
Наталья Ильинишна очень хорошо со мной обходится,
— сказал Борис.
— Я не могу жаловаться, — сказал он.
— Оставьте, Борис, вы такой дипломат
(слово дипломат было в большом ходу у детей в том особом значении, какое они придавали этому слову); даже скучно, — сказала Наташа оскорбленным, дрожащим голосом.
— За что она ко мне пристает?
— Ты этого никогда не поймешь,
— сказала она, обращаясь к Вере, — потому что ты никогда никого не любила; у тебя сердца нет, ты только madame de Genlis (это прозвище, считавшееся очень обидным, было дано Вере Николаем),
и твое первое удовольствие — делать неприятности другим. Ты кокетничай с Бергом сколько хочешь, — проговорила она скоро.
"""
def load_model(model_name=MODEL_NAME):
return model_loader(str(MODEL_DIR / model_name))
def load_tokenizer(dump_name='models/tokenizer.dump'):
with open(dump_name, 'rb') as file:
return pickle.load(file)
def load_hot_encodings(dump_name='models/labels.dump'):
with open(dump_name, 'rb') as file:
return pickle.load(file)
if __name__ == '__main__':
tokenizer = load_tokenizer()
labels = load_hot_encodings()
preprocessed_text = preprocess(TEXT, read=True)
X = tokenizer.texts_to_sequences(preprocessed_text)
X_test, y = create_dataset(np.array(X), 4)
print('Preprocessed_text:', preprocessed_text)
model = load_model()
results = model.predict(X_test)
print(results)
for result in results:
rargsort = result.argsort()[::-1][:5]
for indice in rargsort:
next_word = tokenizer.index_word[labels[indice]]
print(f'{next_word}: {result[indice]}')
print('\n' + '-' * 10)
|
{"hexsha": "c490223c24f8b9fefabfe7bad918a3bb948848f6", "size": 1928, "ext": "py", "lang": "Python", "max_stars_repo_path": "lstm/predict.py", "max_stars_repo_name": "MardanovTimur/kaggle", "max_stars_repo_head_hexsha": "62392863a07fcc5de9821c28cf9c6dbbf39ced59", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lstm/predict.py", "max_issues_repo_name": "MardanovTimur/kaggle", "max_issues_repo_head_hexsha": "62392863a07fcc5de9821c28cf9c6dbbf39ced59", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lstm/predict.py", "max_forks_repo_name": "MardanovTimur/kaggle", "max_forks_repo_head_hexsha": "62392863a07fcc5de9821c28cf9c6dbbf39ced59", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.0967741935, "max_line_length": 188, "alphanum_fraction": 0.7142116183, "include": true, "reason": "import numpy", "num_tokens": 622}
|
import os
import pickle
from glob import glob
import numpy as np
import pandas as pd
from kaggle_runner.utils.kernel_utils import logger, rle2mask
# The following functions can be used to convert a value to a type compatible
# with tf.Example.
def _bytes_feature(value):
"""Returns a bytes_list from a string / byte.
Args:
value:
Returns:
"""
if isinstance(value, type(tf.constant(0))):
value = value.numpy()
# BytesList won't unpack a string from an EagerTensor.
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _float_feature(value):
"""Returns a float_list from a float / double.
Args:
value:
Returns:
"""
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def _int64_feature(value):
"""Returns an int64_list from a bool / enum / int / uint.
Args:
value:
Returns:
"""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _int64_feature_from_list(value):
"""Returns an int64_list from a bool / enum / int / uint.
Args:
value:
Returns:
"""
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
class PS_TF_DataHandler:
def __init__(self):
self.fns = None
def to_tf_from_disk(self, fns, df, TARGET_COLUMN, im_height, im_width, im_chan):
self.df = df
self.TARGET_COLUMN = TARGET_COLUMN
self.im_height = im_height
self.im_width = im_width
self.im_chan = im_chan
fns_ds = tf.data.Dataset.from_tensor_slices(fns)
image_ds = fns_ds.map(
self.load_and_preprocess_image(imgPreprocessFlag=False),
num_parallel_calls=2,
)
return image_ds
def load_and_preprocess_image(self, imgPreprocessFlag=True):
def _preprocess_image(img):
raise NotImplementedError()
# hard to do, as read_file, _id.split needs complicate op of tensor,
# easier to first read numpy then save to tfrecord
def _load_and_preprocess_image(path):
X_train = np.zeros(
(self.im_height, self.im_width, self.im_chan), dtype=np.uint8
)
Y_train = np.zeros((self.im_height, self.im_width, 1), dtype=np.uint8)
print("Getting train images and masks ... ")
_id = path
# sys.stdout.flush()
# FIXME it cannot be put to autograph!!!
raise RuntimeError("Pydicom read cannot be put to autograph!!!")
dataset = pydicom.read_file(_id)
_id_keystr = _id.split("/")[-1][:-4]
X_train = np.expand_dims(dataset.pixel_array, axis=2)
try:
mask_data = self.df.loc[_id_keystr, self.TARGET_COLUMN]
if "-1" in mask_data:
Y_train = np.zeros((1024, 1024, 1))
else:
if type(mask_data) == str:
Y_train = np.expand_dims(
rle2mask(
self.df.loc[_id_keystr, self.TARGET_COLUMN], 1024, 1024
),
axis=2,
)
else:
Y_train = np.zeros((1024, 1024, 1))
for x in mask_data:
Y_train = Y_train + np.expand_dims(
rle2mask(x, 1024, 1024), axis=2
)
except KeyError:
print(
f"Key {_id.split('/')[-1][:-4]} without mask, assuming healthy patient."
)
# Assume missing masks are empty masks.
Y_train = np.zeros((1024, 1024, 1))
if imgPreprocessFlag:
return _preprocess_image(X_train), _preprocess_image(Y_train)
return (X_train, Y_train)
return _load_and_preprocess_image
@staticmethod
def maybe_download():
# By default the file at the url origin is downloaded to the cache_dir
# ~/.keras, placed in the cache_subdir datasets, and given the filename
# fname
train_path = tf.keras.utils.get_file(TRAIN_URL.split("/")[-1], TRAIN_URL)
test_path = tf.keras.utils.get_file(TEST_URL.split("/")[-1], TEST_URL)
return train_path, test_path
@staticmethod
def get_train_dataset(train_X_np, train_Y_np): # join(dataset_dir,'labels.csv')
image_ds = tf.data.Dataset.from_tensor_slices(train_X_np)
image_mask_ds = tf.data.Dataset.from_tensor_slices(train_Y_np)
return tf.data.Dataset.zip((image_ds, image_mask_ds))
@staticmethod
def load_data(train_path, test_path):
"""Returns the iris dataset as (train_x, train_y), (test_x, test_y).
Args:
train_X_np:
train_Y_np): # join(dataset_dir:
'labels.csv')image_ds: (Default value = tf.data.Dataset.from_tensor_slices(train_X_np)image_mask_ds = tf.data.Dataset.from_tensor_slices(train_Y_np)return tf.data.Dataset.zip((image_ds)
image_mask_ds))@staticmethodload_data:
test_path:
train_Y_np): # join(dataset_dir:
image_mask_ds))@staticmethodload_data(train_path:
Returns:
"""
# train_path, test_path = maybe_download()
# here the test is really no lable we need to do CV in train part
train_X = pickle.load(open(train_path, "rb")) # (None, 2048)
# (None, 2048) 2048 features from xception net
to_predict_X = pickle.load(open(test_path, "rb"))
try:
labels = pd.read_csv(os.path.join(DATASET_DIR, "labels.csv"))
except FileNotFoundError:
labels = pd.read_csv(os.path.join(DATASET_DIR2, "labels.csv"))
labels = labels["breed"].values.tolist() # for all training data
global SPECIES
SPECIES = sorted(list(set(labels)))
_label_id_map = dict((name, index) for index, name in enumerate(SPECIES))
train_y = [_label_id_map[label] for label in labels]
return (train_X, train_y), to_predict_X
@staticmethod
def train_input_fn_bt(
features,
labels,
batch_size,
cv,
cv_train=True,
split_id=None,
n_splits=None,
ds=None,
ds_len=-1,
):
# for boost tree, need to prepare feature columns
# 2048? columns, all float
if cv:
return PS_TF_DataHandler._input_fn_bt(
features,
labels,
batch_size,
shuffle=True,
split_id=split_id,
n_splits=n_splits,
cv_train=cv_train,
ds=ds,
ds_len=ds_len,
)
else:
return PS_TF_DataHandler._input_fn_bt(
features, labels, batch_size, shuffle=True, cv=False, ds=ds
)
@staticmethod
def eval_input_fn_bt(
features, labels, batch_size, cv, split_id=None, n_splits=None
):
if cv:
return PS_TF_DataHandler._input_fn_bt(
features,
labels,
batch_size,
with_y=True,
repeat=False,
shuffle=False,
split_id=split_id,
n_splits=n_splits,
cv_train=False,
)
else:
return PS_TF_DataHandler._input_fn_bt(
features,
labels,
batch_size,
with_y=True,
repeat=False,
shuffle=False,
cv=False,
)
@staticmethod
def pred_input_fn_bt(features, batch_size):
return PS_TF_DataHandler._input_fn_bt(
features,
None,
batch_size,
with_y=False,
repeat=False,
shuffle=False,
cv=False,
)
@staticmethod
# for these, we will need to extract all the points before:
def _input_fn_bt(
features,
labels,
batch_size,
with_y=True,
repeat=True,
shuffle=True,
split_id=-1,
n_splits=10,
cv=True,
cv_train=True,
ds=None,
ds_len=-1,
):
if ds is not None:
if shuffle and ds_len <= 0:
raise ValueError("shuffle need to now data length")
data_len = ds_len
else:
data_len = len(labels)
def _to_dict(f):
# first to pandas data frame
df = pd.DataFrame(
f, columns=[str(i) for i in range(features.shape[-1])]
)
return dict(df)
features = _to_dict(features)
if with_y:
ds = tf.data.Dataset.from_tensor_slices((features, labels))
else:
ds = tf.data.Dataset.from_tensor_slices(features)
if cv:
assert split_id >= 0 and n_splits > 1 and split_id < n_splits
if cv_train:
ds = [ds.shard(n_splits, i) for i in range(n_splits)]
shards_cross = [
ds[val_id] for val_id in range(n_splits) if val_id != split_id
]
ds = shards_cross[0]
for t in shards_cross[1:]:
ds = ds.concatenate(t)
if shuffle:
# just memory is not enough ...
ds = ds.shuffle(
buffer_size=int(data_len * (n_splits - 1) / n_splits)
)
else: # cv part for evaluation, no need to shuffle
ds = ds.shard(n_splits, split_id)
else:
if shuffle:
ds = ds.shuffle(buffer_size=data_len)
# after shuffle, we do cross validtation split
# taken from Dan, https://stackoverflow.com/questions/39748660/how-to-perform-k-fold-cross-validation-with-tensorflow
# will need to append id, then remove the id?
# -> no need, we just split to 5 shards, then rearrange these shards
if repeat and cv_train:
ds = ds.repeat()
# Batch the examples
assert batch_size is not None, "batch_size must not be None"
# Return the dataset.
return ds.batch(batch_size).prefetch(1)
@staticmethod
# for these, we will need to extract all the points before:
def train_input_fn(features, labels, batch_size, split_id=-1, n_splits=10, cv=True):
"""An input function for training
Args:
features:
labels:
batch_size:
split_id: (Default value = -1)
n_splits: (Default value = 10)
cv: (Default value = True)
Returns:
"""
# read from the tfrecord file (save the extracted ones)(read the data)
ds = tf.data.Dataset.from_tensor_slices((features, labels))
if cv:
assert split_id >= 0 and n_splits > 1 and split_id < n_splits
ds = [ds.shard(n_splits, i) for i in range(n_splits)]
shards_cross = [
ds[val_id] for val_id in range(n_splits) if val_id != split_id
]
s = shards_cross[0]
for t in shards_cross[1:]:
s = s.concatenate(t)
# just memory is not enough ...
ds = s.shuffle(buffer_size=int(len(labels) * (n_splits - 1) / n_splits))
else:
ds = ds.shuffle(buffer_size=len(labels))
# after shuffle, we do cross validtation split
# taken from Dan, https://stackoverflow.com/questions/39748660/how-to-perform-k-fold-cross-validation-with-tensorflow
# will need to append id, then remove the id?
# -> no need, we just split to 5 shards, then rearrange these shards
ds = ds.repeat()
# Batch the examples
assert batch_size is not None, "batch_size must not be None"
# Return the dataset.
return ds.batch(batch_size).prefetch(1)
@staticmethod
def eval_input_fn(features, labels, batch_size, split_id, n_splits=10):
"""An input function for evaluation or prediction
Args:
features:
labels:
batch_size:
split_id:
n_splits: (Default value = 10)
Returns:
"""
assert split_id >= 0 and n_splits > 1 and split_id < n_splits
if labels is None:
# No labels, use only features.
inputs = features
else:
inputs = (features, labels)
# Convert the inputs to a Dataset.
ds = tf.data.Dataset.from_tensor_slices(inputs)
ds = ds.shard(n_splits, split_id)
# Batch the examples
assert batch_size is not None, "batch_size must not be None"
ds = ds.batch(batch_size)
# Return the dataset.
return ds
@staticmethod
def predict_input_fn(features, batch_size):
"""An input function for evaluation or prediction
Args:
features:
batch_size:
Returns:
"""
inputs = features
# Convert the inputs to a Dataset.
ds = tf.data.Dataset.from_tensor_slices(inputs)
# Batch the examples
assert batch_size is not None, "batch_size must not be None"
ds = ds.batch(batch_size)
# Return the dataset.
return ds
@staticmethod
# specific to data structure, need to split out later
def to_tfrecord(ds, file_name="train_dev.tfrec"):
ds = ds.map(lambda a, b: (tf.io.encode_jpeg(a), tf.io.encode_jpeg(b)))
writer = tf.data.experimental.TFRecordWriter(file_name)
writer.write(ds.map(lambda a, b: a))
target_writer = tf.data.experimental.TFRecordWriter(f"target_{file_name}")
target_writer.write(ds.map(lambda a, b: b))
return
@staticmethod
def from_tfrecord():
def _tf_read_jpeg(wc):
pathes = sorted(glob(wc))
logger.debug(f"recover data from {pathes}")
ds = tf.data.TFRecordDataset(pathes)
ds = ds.map(tf.io.decode_jpeg)
return ds
image_data_wildcard = "train_dev.*.tfrec"
mask_data_wildcard = "target_train_dev.*.tfrec"
return tf.data.Dataset.zip(
(_tf_read_jpeg(image_data_wildcard), _tf_read_jpeg(mask_data_wildcard))
)
@staticmethod
def serialize_PS_example(feature0, feature1):
"""NOT WORKING... don't know why
Creates a tf.Example message ready to be written to a file.
Args:
feature0:
feature1:
Returns:
"""
# Create a dictionary mapping the feature name to the
# tf.Example-compatible data type.
assert feature0.shape[0] == 1 and feature0.shape[1] == 128
assert (
feature0.shape[0] == feature1.shape[0]
and feature0.shape[1] == feature1.shape[1]
)
f0 = tf.reshape(feature0, [-1])
f1 = tf.reshape(feature1, [-1])
feature = {
"image": _int64_feature_from_list(f0),
"mask": _int64_feature_from_list(f1),
}
# Create a Features message using tf.train.Example.
logger.debug("in transforming to tf example proto")
example_proto = tf.train.Example(features=tf.train.Features(feature=feature))
logger.debug("after transforming one feature to tf example proto")
return example_proto.SerializeToString()
@staticmethod
def tf_serialize_example(f0, f1):
print(PS_TF_DataHandler.serialize_PS_example(f0, f1))
# the return type is
# <a href="..../../versions/r2.0/api_docs/python/tf#string">
# <code>tf.string</code></a>.
tf_string = tf.py_function(
PS_TF_DataHandler.serialize_PS_example,
(f0, f1), # pass these args to the above function.
tf.string,
)
return tf.reshape(tf_string, ()) # The result is a scalar
@staticmethod
def get_generator_with_features(features_dataset):
def generator():
for features in features_dataset:
yield PS_TF_DataHandler.serialize_PS_example(*features)
return generator
|
{"hexsha": "cf3e0ff6651d3079ccd560de027c90e74f2bca9f", "size": 16405, "ext": "py", "lang": "Python", "max_stars_repo_path": "kaggle_runner/datasets/data_handlers.py", "max_stars_repo_name": "pennz/kaggle_runner", "max_stars_repo_head_hexsha": "19b979ae86f1fcaff5d17f55f4d8bc3d3f2a4ced", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-08-06T09:07:49.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-06T09:07:49.000Z", "max_issues_repo_path": "kaggle_runner/datasets/data_handlers.py", "max_issues_repo_name": "pennz/kaggle_runner", "max_issues_repo_head_hexsha": "19b979ae86f1fcaff5d17f55f4d8bc3d3f2a4ced", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-05-13T10:49:42.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-15T22:52:37.000Z", "max_forks_repo_path": "kaggle_runner/datasets/data_handlers.py", "max_forks_repo_name": "pennz/kaggle_runner", "max_forks_repo_head_hexsha": "19b979ae86f1fcaff5d17f55f4d8bc3d3f2a4ced", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.2933070866, "max_line_length": 195, "alphanum_fraction": 0.568668089, "include": true, "reason": "import numpy", "num_tokens": 3662}
|
# from ast import *
from dolang.symbolic import stringify, stringify_symbol, parse_string, list_variables
from dolang.grammar import str_expression
from dolo.compiler.misc import CalibrationDict
from numpy import log, exp
import xarray
def eval_formula(expr: str, dataframe=None, context=None):
'''
expr: string
Symbolic expression to evaluate.
Example: `k(1)-delta*k(0)-i`
table: (optional) pandas dataframe
Each column is a time series, which can be indexed with dolo notations.
context: dict or CalibrationDict
'''
if context is None:
dd = {} # context dictionary
elif isinstance(context, CalibrationDict):
dd = context.flat.copy()
else:
dd = context.copy()
# compat since normalize form for parameters doesn't match calib dict.
for k in [*dd.keys()]:
dd[stringify_symbol(k)] = dd[k]
expr_ast = parse_string(expr)
variables = list_variables(expr_ast)
nexpr = stringify(expr_ast)
dd['log'] = log
dd['exp'] = exp
if dataframe is not None:
import pandas as pd
for (k, t) in variables:
dd[stringify_symbol((k, t))] = dataframe[k].shift(t)
dd['t_'] = pd.Series(dataframe.index, index=dataframe.index)
expr = str_expression(nexpr)
res = eval(expr, dd)
return res
|
{"hexsha": "b46025283ac5284d81fe0cbf2c2ced0a7780ea7b", "size": 1343, "ext": "py", "lang": "Python", "max_stars_repo_path": "dolo/compiler/eval_formula.py", "max_stars_repo_name": "s4465413/dolo.py", "max_stars_repo_head_hexsha": "6a1bfbd65a8f69a7495c5ade51af8cc6c35eded3", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "dolo/compiler/eval_formula.py", "max_issues_repo_name": "s4465413/dolo.py", "max_issues_repo_head_hexsha": "6a1bfbd65a8f69a7495c5ade51af8cc6c35eded3", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dolo/compiler/eval_formula.py", "max_forks_repo_name": "s4465413/dolo.py", "max_forks_repo_head_hexsha": "6a1bfbd65a8f69a7495c5ade51af8cc6c35eded3", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.8269230769, "max_line_length": 85, "alphanum_fraction": 0.6545048399, "include": true, "reason": "from numpy", "num_tokens": 322}
|
import TimeSeries.TimeArray
struct APIResponse
data::String
http_resp::HTTP.Messages.Response
end
"""
Description
The yahoo() method is a wrapper for downloading historical stock prices from Yahoo.
Usage
AAPL = yahoo("AAPL)
SPX = yahoo()
Method Signature(s)
yahoo(data::ASCIIString="^GSPC")
Details
The yahoo method takes a stock name in the form of a string and returns a TimeSeries.TimeArray data structure
corresponding to the Yahoo Finance ticker. With no argument, the default historical time series is the S&P 500.
References
http://www.finance.yahoo.com
See Also
fred() which accesses the St. Louis Federal Reserve financial and economic data sets.
"""
function yahoo(data::String="^GSPC")
Base.depwarn("Yahoo Finance API changed, this function may not work anymore", :yahoo)
url = "http://ichart.yahoo.com/table.csv?s=$data"
http_resp = HTTP.request("GET", url)
resp = APIResponse(data, http_resp)
TimeArray(resp)
end
"""
Description
The fred() method is a wrapper to download financial and economic time series data from the St. Louis Federal Reserve (FRED).
Usage
DGS = fred("DGS10")
CPI = fred()
Method Signature(s)
fred(data::String="CPIAUCNS")
Details
The fred() method takes a string argument that corresponds to a series code from the St. Louis Federal
Reserve (FRED) database. It returns the data in the TimeSeries.TimeArray data structure. When no argument
is provided, the default data set is the Consumer Price Index for All Urban Consumers: All Items (CPIAUCNS).
References
https://research.stlouisfed.org/fred2
See Also
yahoo() which is a wrapper from downloading financial time series for stocks from Yahoo Finance.
"""
function fred(data::String="CPIAUCNS")
url = "http://research.stlouisfed.org/fred2/series/$data/downloaddata/$data.csv"
http_resp = HTTP.request("GET", url)
resp = APIResponse(data, http_resp)
TimeArray(resp)
end
function TimeArray(resp::APIResponse)
#This function transform the Response object into a TimeArray
# Split the data on every "\n"
raw_data = String(resp.http_resp.body)
data = split(raw_data, "\n")
# Extract the head and body of the data
head = strip(data[1])
body = data[2:end]
# Parse body
body[end] == "" ? pop!(body) : nothing # remove trailing empty string if it's there
body = [split(line, ",") for line in body] # split on comma
######### Timestamp
# take the first row (assuming it's date)
# TODO: regex query needed to catch edge cases
dates = [line[1] for line in body]
timestamp = Date[Date(d) for d in dates] # parse dates
######### Values
svals = [line[2:end] for line in body] # get rows 2 to the end
fvals = zeros(length(svals),length(svals[1]))
for r in 1:size(fvals,1)
for c in 1:size(fvals,2)
# is not empty and is not equal to FRED's iconic "." sentinel for missingness
if ~isempty(svals[r][c]) && ~isequal(svals[r][c],".\r")
fvals[r,c] = parse(Float64, svals[r][c])
else
# captures FRED's "." sentinel
fvals[r,c] = NaN
end
end
end
######### Column names
names = split(head, ",")[2:end] # Won't need the Date name (fist column) for TimeArray
names = String[name for name in names]
return TimeArray(timestamp, fvals, names, resp.data)
end
|
{"hexsha": "070a5fc14c87702bbe7c6658e5f92211fca3f547", "size": 3461, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/downloads.jl", "max_stars_repo_name": "UnofficialJuliaMirror/MarketData.jl-945b72a4-3b13-509d-9b46-1525bb5c06de", "max_stars_repo_head_hexsha": "6c66eaf1c4797474fa3db156cf8a6ef13dcb81d2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/downloads.jl", "max_issues_repo_name": "UnofficialJuliaMirror/MarketData.jl-945b72a4-3b13-509d-9b46-1525bb5c06de", "max_issues_repo_head_hexsha": "6c66eaf1c4797474fa3db156cf8a6ef13dcb81d2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/downloads.jl", "max_forks_repo_name": "UnofficialJuliaMirror/MarketData.jl-945b72a4-3b13-509d-9b46-1525bb5c06de", "max_forks_repo_head_hexsha": "6c66eaf1c4797474fa3db156cf8a6ef13dcb81d2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.5811965812, "max_line_length": 129, "alphanum_fraction": 0.6677260907, "num_tokens": 881}
|
[STATEMENT]
lemma co_test_inf_distributive:
"co_test x \<Longrightarrow> inf_distributive x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. co_test x \<Longrightarrow> inf_distributive x
[PROOF STEP]
by (metis co_test_def distrib_imp1 inf_sup_distrib1 inf_distributive_def mult_zero_sup_one_dist)
|
{"llama_tokens": 116, "file": "Correctness_Algebras_Lattice_Ordered_Semirings", "length": 1}
|
#define BOOST_TEST_MAIN TestTopK
#include <boost/test/unit_test.hpp>
#include <stdexcept>
#include <iostream>
#include <sam/TestProducers.hpp>
#include <sam/TopK.hpp>
#include <sam/Filter.hpp>
#include <sam/Expression.hpp>
#include <sam/VastNetflow.hpp>
using namespace sam;
struct F
{
std::shared_ptr<FeatureMap> featureMap;
string identifier = "topk";
int N = 10000;
int b = 1000;
int k = 3;
F()
{
featureMap = std::make_shared<FeatureMap>();
}
};
BOOST_FIXTURE_TEST_CASE( test_topk_no_key, F )
{
size_t queueLength = 1000;
size_t numPopular = 2;
size_t numExamples = 100000;
double probabilityPop = 0.5;
PopularSites producer(queueLength, numExamples, numPopular,
probabilityPop);
auto topk = std::make_shared<TopK<VastNetflow, DestIp>>
(N, b, k, 0, featureMap, identifier);
producer.registerConsumer(topk);
producer.run();
std::shared_ptr<Feature const> feature = featureMap->at("", identifier);
auto func0 = [](Feature const * feature)->double {
auto topKFeature = static_cast<TopKFeature const *>(feature);
return topKFeature->getFrequencies()[0];
};
auto func1 = [](Feature const * feature)->double {
auto topKFeature = static_cast<TopKFeature const *>(feature);
return topKFeature->getFrequencies()[1];
};
double frequency0 = feature->evaluate<double>(func0);
double frequency1 = feature->evaluate<double>(func0);
BOOST_CHECK_CLOSE(frequency0, 0.25, 5);
BOOST_CHECK_CLOSE(frequency1, 0.25, 5);
}
BOOST_FIXTURE_TEST_CASE( test_topk_server, F )
{
int queueLength = 1000;
int numExamples = 100000;
int numServers = 2;
int numNonservers = 2;
// The TopKProducer creates a situation where there are numServers servers
// and numNonservers non-servers. A server is defined as > 90% of traffic
// to the top two ports.
TopKProducer producer(queueLength, numExamples, numServers, numNonservers);
// Creating the topk computation and registering it as a consumer
// of the data source: producer.
auto topk = std::make_shared<TopK<VastNetflow, DestPort, DestIp>>
(N, b, k, 0, featureMap, identifier);
producer.registerConsumer(topk);
// TODO: The below section creating the filter isn't actually tested.
// Add more tests to see that the filter works.
/////// Creating the filter expression /////////////////////////
// We create an infix expression to filter for servers, i.e.
// Servers = FILTER VertsByDest BY top2.value(0) + top2.value(1) < 0.9;
// The infix is the part after the BY.
// First function token
auto function1 = [](Feature const * feature)->double {
auto topKFeature = static_cast<TopKFeature const *>(feature);
return topKFeature->getFrequencies()[0];
};
auto funcToken1 = std::make_shared<FuncToken<VastNetflow>>(featureMap, function1,
identifier);
// Addition token
auto addOper = std::make_shared<AddOperator<VastNetflow>>(featureMap);
// Second function token
auto function2 = [](Feature const * feature)->double {
auto topKFeature = static_cast<TopKFeature const *>(feature);
return topKFeature->getFrequencies()[1];
};
auto funcToken2 = std::make_shared<FuncToken<VastNetflow>>(featureMap, function2,
identifier);
// Lessthan token
auto lessThanToken = std::make_shared<LessThanOperator<VastNetflow>>(featureMap);
// Number token
auto numberToken = std::make_shared<NumberToken<VastNetflow>>(featureMap, 0.9);
std::list<std::shared_ptr<ExpressionToken<VastNetflow>>> infixList;
infixList.push_back(funcToken1);
infixList.push_back(addOper);
infixList.push_back(funcToken2);
infixList.push_back(lessThanToken);
infixList.push_back(numberToken);
auto filterExpression = std::make_shared<Expression<VastNetflow>>(infixList);
auto filter = std::make_shared<Filter<VastNetflow, DestIp>>(
filterExpression, 0, featureMap, "servers", queueLength);
producer.registerConsumer(filter);
producer.run();
for (std::string ip : producer.getServerIps()) {
std::shared_ptr<Feature const> feature = featureMap->at(ip, identifier);
int index1 = 0;
auto function1 = [&index1](Feature const * feature)->double {
auto topKFeature = static_cast<TopKFeature const *>(feature);
return topKFeature->getFrequencies()[index1];
};
// TopKProducer sends data in a uniform random way to two ports to the
// server ips, so the top two values should both be about 0.5 frequency.
double value = feature->evaluate<double>(function1);
BOOST_CHECK_CLOSE(value, 0.5, 0.01);
index1 = 1;
value = feature->evaluate<double>(function1);
BOOST_CHECK_CLOSE(value, 0.5, 0.01);
}
for (std::string ip : producer.getNonserverIps()) {
std::shared_ptr<Feature const> feature = featureMap->at(ip, identifier);
std::vector<double> parameters;
parameters.push_back(0);
int index1 = 0;
auto function1 = [&index1](Feature const * feature)->double {
auto topKFeature = static_cast<TopKFeature const *>(feature);
return topKFeature->getFrequencies()[index1];
};
// TopKProducer sends data in a uniform random way to three ports
// to the non-server ips, so the top three values should all be about 0.33
double value = feature->evaluate<double>(function1);
BOOST_CHECK_CLOSE(value, 0.333333, 0.01);
index1 = 1;
value = feature->evaluate<double>(function1);
BOOST_CHECK_CLOSE(value, 0.333333, 0.01);
index1 = 2;
value = feature->evaluate<double>(function1);
BOOST_CHECK_CLOSE(value, 0.333333, 0.01);
}
}
|
{"hexsha": "b6ff09b26f22e6a699ab700b2497c7949625990e", "size": 5694, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "TestSrc/TestTopK.cpp", "max_stars_repo_name": "dirkcgrunwald/SAM", "max_stars_repo_head_hexsha": "0478925c506ad38fd405954cc4415a3e96e77d90", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "TestSrc/TestTopK.cpp", "max_issues_repo_name": "dirkcgrunwald/SAM", "max_issues_repo_head_hexsha": "0478925c506ad38fd405954cc4415a3e96e77d90", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "TestSrc/TestTopK.cpp", "max_forks_repo_name": "dirkcgrunwald/SAM", "max_forks_repo_head_hexsha": "0478925c506ad38fd405954cc4415a3e96e77d90", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.8928571429, "max_line_length": 83, "alphanum_fraction": 0.6861608711, "num_tokens": 1500}
|
import numpy as np
import pandas as pd
# Class for Item similarity based Recommender System model
class ItemSimilarityRecommender:
def __init__(self):
self.train_data = None
self.user_id = None
self.item_id = None
self.cooccurence_matrix = None
self.songs_dict = None
self.rev_songs_dict = None
self.item_similarity_recommendations = None
# Get unique items (songs) corresponding to a given user
def get_user_items(self, user):
user_data = self.train_data[self.train_data[self.user_id] == user]
user_items = list(user_data[self.item_id].unique())
return user_items
# Get unique users for a given item (song)
def get_item_users(self, item):
item_data = self.train_data[self.train_data[self.item_id] == item]
item_users = set(item_data[self.user_id].unique())
return item_users
# Get unique items (songs) in the training data
def get_all_items_train_data(self):
all_items = list(self.train_data[self.item_id].unique())
return all_items
# Construct cooccurence matrix
def construct_cooccurence_matrix(self, user_songs, all_songs):
# Get users for all songs in user_songs.
user_songs_users = []
for i in range(0, len(user_songs)):
user_songs_users.append(self.get_item_users(user_songs[i]))
# Initialize the item cooccurence matrix of size
# len(user_songs) X len(songs)
cooccurence_matrix = np.matrix(np.zeros(shape=(len(user_songs), len(all_songs))), float)
# Calculate similarity between user songs and all unique songs
# in the training data
for i in range(0, len(all_songs)):
# Calculate unique listeners (users) of song (item) i
songs_i_data = self.train_data[self.train_data[self.item_id] == all_songs[i]]
users_i = set(songs_i_data[self.user_id].unique())
for j in range(0, len(user_songs)):
# Get unique listeners (users) of song (item) j
users_j = user_songs_users[j]
# Calculate intersection of listeners of songs i and j
users_intersection = users_i.intersection(users_j)
# Calculate cooccurence_matrix[i,j] as Jaccard Index
if len(users_intersection) != 0:
# Calculate union of listeners of songs i and j
users_union = users_i.union(users_j)
cooccurence_matrix[j, i] = float(len(users_intersection)) / float(len(users_union))
else:
cooccurence_matrix[j, i] = 0
return cooccurence_matrix
# Use the cooccurence matrix to make top recommendations
def generate_top_recommendations(self, user, cooccurence_matrix, all_songs, user_songs):
print("Non zero values in cooccurence_matrix :%d" % np.count_nonzero(cooccurence_matrix))
# Calculate a weighted average of the scores in cooccurence matrix for all user songs.
user_sim_scores = cooccurence_matrix.sum(axis=0) / float(cooccurence_matrix.shape[0])
user_sim_scores = np.array(user_sim_scores)[0].tolist()
# Sort the indices of user_sim_scores based upon their value
# Also maintain the corresponding score
sort_index = sorted(((e, i) for i, e in enumerate(list(user_sim_scores))), reverse=True)
# Create a dataframe from the following
columns = ['user_id', 'song', 'score', 'rank']
# index = np.arange(1) # array of numbers for the number of samples
df = pd.DataFrame(columns=columns)
# Fill the dataframe with top 500 item based recommendations
rank = 1
for i in range(0, len(sort_index)):
if ~np.isnan(sort_index[i][0]) and all_songs[sort_index[i][1]] not in user_songs and rank <= 500:
df.loc[len(df)] = [user, all_songs[sort_index[i][1]], sort_index[i][0], rank]
rank = rank + 1
# Handle the case where there are no recommendations
if df.shape[0] == 0:
print("The current user has no songs for training the item similarity based recommendation model.")
return -1
else:
return df
# Create the item similarity based recommender system model
def create(self, train_data, user_id, item_id):
self.train_data = train_data
self.user_id = user_id
self.item_id = item_id
# Use the item similarity based recommender system model to make recommendations
def recommend(self, user):
user_songs = self.get_user_items(user)
all_songs = self.get_all_items_train_data()
print("No. of unique songs for the user: %d" % len(user_songs))
print("No. of unique songs in the training set: %d" % len(all_songs))
cooccurence_matrix = self.construct_cooccurence_matrix(user_songs, all_songs)
df_recommendations = self.generate_top_recommendations(user, cooccurence_matrix, all_songs, user_songs)
return df_recommendations
# Get similar items to given items
def get_similar_items(self, item_list):
user_songs = item_list
# B. Get all unique items (songs) in the training data
all_songs = self.get_all_items_train_data()
print("No. of unique songs in the training set: %d" % len(all_songs))
# C. Construct item cooccurence matrix of size
# len(user_songs) X len(songs)
cooccurence_matrix = self.construct_cooccurence_matrix(user_songs, all_songs)
# D. Use the cooccurence matrix to make recommendations
user = ""
df_recommendations = self.generate_top_recommendations(user, cooccurence_matrix, all_songs, user_songs)
return df_recommendations
def lightfm_recommendation(model, data, items, user_ids, verbose=0):
recommendation = []
n_users, n_itens = data.shape
for user_id in user_ids:
scores = model.predict(user_id, np.arange(n_itens), num_threads=2)
known_positives = items[data.tocsr()[user_id].indices]
top_items = items[np.argsort(-scores)]
top_items_df = pd.DataFrame(top_items.unique()[:500+len(known_positives)], columns=['track_id'])
top_items_df = top_items_df[~top_items_df['track_id'].isin(known_positives)]
top_items_df = top_items_df[:500]
top_items_df['pid'] = user_id
recommendation.append(top_items_df)
if verbose > 0:
print("User %s" % user_id)
if verbose == 2:
print("Known positives:")
for x in known_positives[:3]:
print(" %s" % x)
print("Recommended:")
for x in top_items[:3]:
print(" %s" % x)
return recommendation
|
{"hexsha": "62f64dbfa45d53d6631805a4807e334432e1ed9c", "size": 6844, "ext": "py", "lang": "Python", "max_stars_repo_path": "model.py", "max_stars_repo_name": "dimitreOliveira/RecsysChallenge_Spotify", "max_stars_repo_head_hexsha": "3b7cd4e4d12a51049573cca1dd86ef66603fddcb", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2018-07-01T06:41:58.000Z", "max_stars_repo_stars_event_max_datetime": "2018-07-15T20:02:05.000Z", "max_issues_repo_path": "model.py", "max_issues_repo_name": "dimitreOliveira/RecsysChallenge_Spotify", "max_issues_repo_head_hexsha": "3b7cd4e4d12a51049573cca1dd86ef66603fddcb", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "model.py", "max_forks_repo_name": "dimitreOliveira/RecsysChallenge_Spotify", "max_forks_repo_head_hexsha": "3b7cd4e4d12a51049573cca1dd86ef66603fddcb", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.0233918129, "max_line_length": 111, "alphanum_fraction": 0.6472822911, "include": true, "reason": "import numpy", "num_tokens": 1575}
|
[STATEMENT]
lemma length_Casts_vs':
"P \<turnstile> Ts Casts vs to vs' \<Longrightarrow> length Ts = length vs'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. P \<turnstile> Ts Casts vs to vs' \<Longrightarrow> length Ts = length vs'
[PROOF STEP]
by (induct rule:Casts_to.induct,simp_all)
|
{"llama_tokens": 112, "file": "CoreC++_SubObj", "length": 1}
|
# -*- coding: utf-8 -*-
"""LanguageModel.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1Z9Fn4bYa16NSDzxaTlRV_XRF4NDF1r2J
# 利用LSTM和GRU训练语言模型
"""
import os
import numpy as np
import torchtext
from torchtext.vocab import Vectors
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
# word vocab construction, idx->word, word->idx, <PAD>索引为0的部分
def word_set_construction(file_path):
with open(file_path, 'r', encoding='utf-8') as f:
text = f.readlines()
words = [word for sent in text for word in sent.split()]
words2idx = {w : i for i, w in enumerate(words, 1)} # 0 for PAD token
idx2words = {i : w for i, w in enumerate(words, 1)}
PAD_IDX = 0
idx2words[PAD_IDX] = '<PAD>'
words2idx['<PAD>'] = PAD_IDX
return words, idx2words, words2idx
# read corpus: sentences
def read_corpus(file_path):
with open(file_path, 'r', encoding='utf-8') as f:
sents = f.readlines()
sentences = [sent.strip() for sent in sents]
return sentences
# data samples with labels
def samples_labels(corpus : list, words2idx, idx2words, max_len):
samples = []
labels = []
for sample in tqdm(corpus):
words = sample.split()
sample_words = [0] * max_len
for i, w in enumerate(words[:-1]):
sample_words[i] = words2idx[w]
target_words = [0] * max_len
for i, w in enumerate(words[1:]):
target_words[i] = words2idx[w]
samples.append(sample_words)
labels.append(target_words)
return samples, labels
# get batches
def get_batches(samples, labels, batch_size):
batch_data = []
samples_tensor = torch.tensor(samples, dtype=torch.long)
labels_tensor = torch.tensor(labels, dtype=torch.long)
num, dim = samples_tensor.size()
for start in range(0, num, batch_size):
end = start + batch_size
if end > num:
break
else:
batch_samples = samples_tensor[start : end]
batch_labels = samples_tensor[start : end]
batch_data.append((batch_samples, batch_labels))
return batch_data
## RNN-based language models
class LM_Models(nn.Module):
def __init__(self, embedding_dim, hidden_dim, vocab_size, mode):
super().__init__()
self.hidden_dim = hidden_dim
self.word_embedding = nn.Embedding(vocab_size, embedding_dim)
if mode == 'LSTM' or 'lstm':
self.model = nn.LSTM(embedding_dim, hidden_dim, batch_first=True)
elif mode == 'GRU' or 'gru':
self.model = nn.GRU(embedding_dim, hidden_dim, batch_first=True)
elif mode == 'RNN' or 'rnn':
self.model = nn.RNN(embedding_dim, hidden_dim, batch_first=True)
self.hidden2word = nn.Linear(hidden_dim, vocab_size)
def forward(self, data):
embeds = self.word_embedding(data)
model_out, (h_n, c_n) = self.model(embeds)
target_embed = self.hidden2word(model_out.contiguous().view(-1, self.hidden_dim))
mask = (data != idx).view(-1)
pure_target = target_embed[mask]
target_scores = F.log_softmax(pure_target, dim=1)
return target_scores
# evaluation
def accuracy_score(y_hat, y):
y_hat = y_hat.argmax(dim=1)
num_pre_real = torch.eq(y_hat, y.view(-1))
score = num_pre_real.sum().item() / num_pre_real.size()[0]
return score
def evaluate(model, data):
model.eval()
total_acc = 0.
total_count = 0.
for x, y in data:
mask = y != idx
pure_y = y[mask]
with torch.no_grad():
target_scores = model(x)
total_count += 1
acc = accuracy_score(target_scores, pure_y)
total_acc += acc
acc = total_acc / total_count
# model.train()
return acc
# 定义文件所在路径
vocab_file = './bobsue.voc.txt'
train_file = './bobsue.lm.train.txt'
val_file = './bobsue.lm.dev.txt'
test_file = './bobsue.lm.dev.txt'
MAX_LEN = 20
BATCH_SIZE = 128
# get vocabs
words, idx2words, words2idx = word_set_construction(vocab_file)
# train batches data
train_corpus = read_corpus(train_file)
train_samples, train_labels = samples_labels(train_corpus, words2idx, idx2words, MAX_LEN)
train_batch_data = get_batches(train_samples, train_labels, BATCH_SIZE)
# val batches data
val_corpus = read_corpus(val_file)
val_samples, val_labels = samples_labels(val_corpus, words2idx, idx2words, MAX_LEN)
val_batch_data = get_batches(val_samples, val_labels, BATCH_SIZE)
# test batches data
test_corpus = read_corpus(test_file)
test_samples, test_labels = samples_labels(test_corpus, words2idx, idx2words, MAX_LEN)
test_batch_data = get_batches(test_samples, test_labels, BATCH_SIZE)
# function to train
EMBEDDING_DIM = 200
HIDDEN_DIM = 200
MODE = 'LSTM'
model = LM_Models(EMBEDDING_DIM, HIDDEN_DIM, len(words2idx), mode=MODE)
GRAD_CLIP = 5.
NUM_EPOCHS = 10
LR = 0.001
val_acces = []
loss_function = nn.NLLLoss()
optimizer = optim.Adam(model.parameters(), lr=LR)
scheduler = optim.lr_scheduler.ExponentialLR(optimizer, 0.5)
Model_Save_Name = 'LM-' + MODE + '-best.pth'
# print parameters
sum_ = 0
for name, param in model.named_parameters():
mul = 1
for size_ in param.shape:
mul *= size_
sum_ += mul
print('%14s : %s' % (name, param.shape))
print('Total Num. of params:', sum_)
# train, val and test
for epoch in range(NUM_EPOCHS):
model.train()
print('epoch:{}'.format(epoch).center(51, '*'))
idx = 0
acc_list = []
for i, (x, y) in enumerate(train_batch_data): # x: train, y: label
mask = y != idx
pure_y = y[mask]
# feedforward
model.zero_grad()
target_scores = model(x)
acc = accuracy_score(target_scores, pure_y)
acc_list.append(acc)
# loss function
loss = loss_function(target_scores, pure_y)
# backpropagagtion
loss.backward()
# optim
torch.nn.utils.clip_grad_norm_(model.parameters(), GRAD_CLIP)
optimizer.step()
if i % 20 == 0:
print('epoch:{} item:{} loss:{:.4} acc:{:.4}'.format(epoch, i, loss.item(), acc))
print('Epoch:{} avg acc:{:.4}'.format(epoch, sum(acc_list)/len(acc_list)))
# eval
val_acc = evaluate(model, val_batch_data)
if len(val_acces) == 0 or val_acc > max(val_acces):
print('Best model, val Accuracy: {:.4}'.format(val_acc))
torch.save(model.state_dict(), Model_Save_Name)
else:
print('Current val Accuracy: {:.4}'.format(val_acc))
scheduler.step()
val_acces.append(val_acc)
# test
test_acc = evaluate(model, test_batch_data)
print('Test data accuracy: {:.4}'.format(test_acc))
# print incorrect prediction
def print_incorrect_prediction(model, data):
model.eval()
incorrect_words = []
for x, y in data:
mask = [y != 0]
y = y[mask]
with torch.no_grad():
target_scores = model(x)
y_hat = target_scores.argmax(dim=1)
for i, j in zip(y_hat.tolist(), y.tolist()):
if i != j:
incorrect_words.append('|'.join([idx2words[i], idx2words[j]]))
return incorrect_words
model.load_state_dict(torch.load(Model_Save_Name))
incorrect_predictions = print_incorrect_prediction(model, test_batch_data)
for inc_pred in incorrect_predictions:
print(inc_pred)
|
{"hexsha": "29a39d674f8606d3f4d0bf0ee9cfbf4b699f9af7", "size": 7195, "ext": "py", "lang": "Python", "max_stars_repo_path": "languagemodel.py", "max_stars_repo_name": "wangwk/LanguageModelDemo", "max_stars_repo_head_hexsha": "6be04f5d072fb79e0cf5e8a465a056faba9cf91a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-02-18T13:40:11.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-18T14:51:52.000Z", "max_issues_repo_path": "languagemodel.py", "max_issues_repo_name": "wangwk/LanguageModelDemo", "max_issues_repo_head_hexsha": "6be04f5d072fb79e0cf5e8a465a056faba9cf91a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "languagemodel.py", "max_forks_repo_name": "wangwk/LanguageModelDemo", "max_forks_repo_head_hexsha": "6be04f5d072fb79e0cf5e8a465a056faba9cf91a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.358649789, "max_line_length": 90, "alphanum_fraction": 0.6774148714, "include": true, "reason": "import numpy", "num_tokens": 1967}
|
# Lint as: python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""General utilities."""
from __future__ import absolute_import
from __future__ import division
# Standard __future__ imports
from __future__ import print_function
import inspect
import sys
import traceback
from typing import Any, Callable, Dict, List, Optional, Text, Union
import numpy as np
import six
import tensorflow as tf
from tensorflow_model_analysis import constants
from tensorflow_model_analysis import types
# Separator used when combining multiple layers of Extracts keys into a single
# string. Normally we would like to use '.' or '/' as a separator, but the
# output gets written to a table backed by a proto based schema which limits the
# characters that can be used to [a-zA-Z_].
KEY_SEPARATOR = '__'
# Suffix for keys representing the top k keys associated with a sparse item.
KEYS_SUFFIX = 'keys'
# Suffix for keys representing the top k values associated with a sparse item.
VALUES_SUFFIX = 'values'
def unique_key(key: Text,
current_keys: List[Text],
update_keys: Optional[bool] = False) -> Text:
"""Returns a unique key given a list of current keys.
If the key exists in current_keys then a new key with _1, _2, ..., etc
appended will be returned, otherwise the key will be returned as passed.
Args:
key: desired key name.
current_keys: List of current key names.
update_keys: True to append the new key to current_keys.
"""
index = 1
k = key
while k in current_keys:
k = '%s_%d' % (key, index)
index += 1
if update_keys:
current_keys.append(k)
return k
def compound_key(keys: List[Text], separator: Text = KEY_SEPARATOR) -> Text:
"""Returns a compound key based on a list of keys.
Args:
keys: Keys used to make up compound key.
separator: Separator between keys. To ensure the keys can be parsed out of
any compound key created, any use of a separator within a key will be
replaced by two separators.
"""
return separator.join([key.replace(separator, separator * 2) for key in keys])
def create_keys_key(key: Text) -> Text:
"""Creates secondary key representing the sparse keys associated with key."""
return '_'.join([key, KEYS_SUFFIX])
def create_values_key(key: Text) -> Text:
"""Creates secondary key representing sparse values associated with key."""
return '_'.join([key, VALUES_SUFFIX])
def get_by_keys(data: Dict[Text, Any],
keys: List[Any],
default_value=None,
optional: bool = False) -> Any:
"""Returns value with given key(s) in (possibly multi-level) dict.
The keys represent multiple levels of indirection into the data. For example
if 3 keys are passed then the data is expected to be a dict of dict of dict.
For compatibily with data that uses prefixing to create separate the keys in a
single dict, lookups will also be searched for under the keys separated by
'/'. For example, the keys 'head1' and 'probabilities' could be stored in a
a single dict as 'head1/probabilties'.
Args:
data: Dict to get value from.
keys: Sequence of keys to lookup in data. None keys will be ignored.
default_value: Default value if not found.
optional: Whether the key is optional or not. If default value is None and
optional is False then a ValueError will be raised if key not found.
Raises:
ValueError: If (non-optional) key is not found.
"""
if not keys:
raise ValueError('no keys provided to get_by_keys: %d' % data)
format_keys = lambda keys: '->'.join([str(k) for k in keys if k is not None])
value = data
keys_matched = 0
for i, key in enumerate(keys):
if key is None:
keys_matched += 1
continue
if not isinstance(value, dict):
raise ValueError('expected dict for "%s" but found %s: %s' %
(format_keys(keys[:i + 1]), type(value), data))
if key in value:
value = value[key]
keys_matched += 1
continue
# If values have prefixes matching the key, return those values (stripped
# of the prefix) instead.
prefix_matches = {}
for k, v in value.items():
if k.startswith(key + '/'):
prefix_matches[k[len(key) + 1:]] = v
if prefix_matches:
value = prefix_matches
keys_matched += 1
continue
break
if keys_matched < len(keys) or isinstance(value, dict) and not value:
if default_value is not None:
return default_value
if optional:
return None
raise ValueError('"%s" key not found (or value is empty dict): %s' %
(format_keys(keys[:keys_matched + 1]), data))
return value
def reraise_augmented(exception: Exception, additional_message: Text) -> None:
"""Reraise a given exception with additional information.
Based on _reraise_augmented in Apache Beam.
Args:
exception: Original exception.
additional_message: Additional message to append to original exception's
message.
"""
# To emulate exception chaining (not available in Python 2).
original_traceback = sys.exc_info()[2]
try:
# Attempt to construct the same kind of exception
# with an augmented message.
#
# pytype: disable=attribute-error
# PyType doesn't know that Exception has the args attribute.
new_exception = type(exception)(
exception.args[0] + ' additional message: ' + additional_message,
*exception.args[1:])
# pytype: enable=attribute-error
except: # pylint: disable=bare-except
# If anything goes wrong, construct a RuntimeError whose message
# records the original exception's type and message.
new_exception = RuntimeError(
traceback.format_exception_only(type(exception), exception)[-1].strip()
+ ' additional message: ' + additional_message)
six.reraise(type(new_exception), new_exception, original_traceback)
def kwargs_only(fn):
"""Wraps function so that callers must call it using keyword-arguments only.
Args:
fn: fn to wrap.
Returns:
Wrapped function that may only be called using keyword-arguments.
"""
if hasattr(inspect, 'getfullargspec'):
# For Python 3
args = inspect.getfullargspec(fn)
varargs = args.varargs
keywords = args.varkw
else:
# For Python 2
args = inspect.getargspec(fn) # pylint: disable=deprecated-method
varargs = args.varargs
keywords = args.keywords
if varargs is not None:
raise TypeError('function to wrap should not have *args parameter')
if keywords is not None:
raise TypeError('function to wrap should not have **kwargs parameter')
arg_list = args.args
has_default = [False] * len(arg_list)
default_values = [None] * len(arg_list)
has_self = arg_list[0] == 'self'
if args.defaults:
has_default[-len(args.defaults):] = [True] * len(args.defaults)
default_values[-len(args.defaults):] = args.defaults
def wrapped_fn(*args, **kwargs):
"""Wrapped function."""
if args:
if not has_self or (has_self and len(args) != 1):
raise TypeError('function %s must be called using keyword-arguments '
'only.' % fn.__name__)
if has_self:
if len(args) != 1:
raise TypeError('function %s has self argument but not called with '
'exactly 1 positional argument' % fn.__name__)
kwargs['self'] = args[0]
kwargs_to_pass = {}
for arg_name, arg_has_default, arg_default_value in zip(
arg_list, has_default, default_values):
if not arg_has_default and arg_name not in kwargs:
raise TypeError('function %s must be called with %s specified' %
(fn.__name__, arg_name))
kwargs_to_pass[arg_name] = kwargs.pop(arg_name, arg_default_value)
if kwargs:
raise TypeError('function %s called with extraneous kwargs: %s' %
(fn.__name__, kwargs.keys()))
return fn(**kwargs_to_pass)
return wrapped_fn
def get_features_from_extracts(
element: types.Extracts
) -> Union[types.DictOfTensorValue, types.DictOfFetchedTensorValues]:
"""Fetch features from the extracts."""
features = None
if constants.FEATURES_PREDICTIONS_LABELS_KEY in element:
fpl = element[constants.FEATURES_PREDICTIONS_LABELS_KEY]
if not isinstance(fpl, types.FeaturesPredictionsLabels):
raise TypeError(
'Expected FPL to be instance of FeaturesPredictionsLabel. FPL was: '
'%s of type %s' % (str(fpl), type(fpl)))
features = fpl.features
elif constants.FEATURES_KEY in element:
features = element[constants.FEATURES_KEY]
else:
raise RuntimeError('Features missing, Please ensure Predict() was called.')
return features
def merge_extracts(extracts: List[types.Extracts]) -> types.Extracts:
"""Merges list of extracts into single extract with multi-dimentional data."""
def merge_with_lists(target, key, value):
if isinstance(value, dict):
if key not in target:
target[key] = {}
target = target[key]
for k, v in value.items():
merge_with_lists(target, k, v)
else:
if key not in target:
target[key] = []
if isinstance(value, np.ndarray):
value = value.tolist()
target[key].append(value)
def to_numpy(target):
if isinstance(target, dict):
return {k: to_numpy(v) for k, v in target.items()}
elif target and isinstance(target[0], tf.compat.v1.SparseTensorValue):
t = tf.sparse.concat(0, target)
return tf.compat.v1.SparseTensorValue(
indices=t.indices.numpy(),
values=t.values.numpy(),
dense_shape=t.dense_shape.numpy())
else:
arr = np.array(target)
# Flatten values that were originally single item lists into a single list
# e.g. [[1], [2], [3]] -> [1, 2, 3]
if len(arr.shape) == 2 and arr.shape[1] == 1:
return arr.squeeze(axis=1)
# Special case for empty slice arrays
# e.g. [[()], [()], [()]] -> [(), (), ()]
elif len(arr.shape) == 3 and arr.shape[1] == 1 and arr.shape[2] == 0:
return arr.squeeze(axis=1)
else:
return arr
result = {}
for x in extracts:
for k, v in x.items():
merge_with_lists(result, k, v)
return to_numpy(result)
# TODO(b/162743769): Account for pointer fanout in byte size estimation.
class SizeEstimator(object):
"""Size estimator."""
def __init__(self, size_threshold: int, size_fn: Callable[[Any], int]):
self._size_threshold = size_threshold
self._curr_size = 0
self._size_fn = size_fn
def __iadd__(self, other: 'SizeEstimator') -> 'SizeEstimator':
self._curr_size += other.get_estimate()
return self
def update(self, value: Any):
self._curr_size += self._size_fn(value)
def should_flush(self) -> bool:
return self._curr_size >= self._size_threshold
def clear(self):
self._curr_size = 0
def get_estimate(self) -> int:
return self._curr_size
|
{"hexsha": "4afa039b32d67b7d992edb9e2604a5df58af757e", "size": 11489, "ext": "py", "lang": "Python", "max_stars_repo_path": "tensorflow_model_analysis/util.py", "max_stars_repo_name": "Mikehem/tfx", "max_stars_repo_head_hexsha": "e803ea6778d8550ec77dcc92bc8172f1a3a90f38", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tensorflow_model_analysis/util.py", "max_issues_repo_name": "Mikehem/tfx", "max_issues_repo_head_hexsha": "e803ea6778d8550ec77dcc92bc8172f1a3a90f38", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tensorflow_model_analysis/util.py", "max_forks_repo_name": "Mikehem/tfx", "max_forks_repo_head_hexsha": "e803ea6778d8550ec77dcc92bc8172f1a3a90f38", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.6920821114, "max_line_length": 80, "alphanum_fraction": 0.6789973018, "include": true, "reason": "import numpy", "num_tokens": 2779}
|
(* --------------------------------------------------------------------
* (c) Copyright 2011--2012 Microsoft Corporation and Inria.
* (c) Copyright 2012--2014 Inria.
* (c) Copyright 2012--2014 IMDEA Software Institute.
* -------------------------------------------------------------------- *)
(* -------------------------------------------------------------------- *)
From mathcomp Require Import ssreflect eqtype ssrbool ssrnat ssrfun seq.
From mathcomp Require Import choice ssralg bigop.
From mathcomp Require Export word_ssrZ.
Import GRing.Theory.
Local Open Scope ring_scope.
Set Implicit Arguments.
Unset Strict Implicit.
Local Notation simpm := Monoid.simpm.
Require Import NArith ZArith BinPos Ring_polynom Field_theory.
(* -------------------------------------------------------------------- *)
Declare Scope ssring.
Reserved Notation "x %:S" (at level 2, left associativity, format "x %:S").
Notation "c %:S" := (PEc c) : ssring.
Notation "''X_' i" := (PEX _ i) : ssring.
Notation "x + y" := (PEadd x y) : ssring.
Notation "x - y" := (PEsub x y) : ssring.
Notation "- x" := (PEopp x ) : ssring.
Notation "x * y" := (PEmul x y) : ssring.
Notation "x ^+ n" := (PEpow x n) : ssring.
Notation "0" := PEO : ssring.
Notation "1" := PEI : ssring.
Delimit Scope ssring with S.
(* -------------------------------------------------------------------- *)
Declare Scope ssfield.
Notation "c %:S" := (FEc c) : ssfield.
Notation "''X_' i" := (FEX _ i) : ssfield.
Notation "x + y" := (FEadd x y) : ssfield.
Notation "x - y" := (FEsub x y) : ssfield.
Notation "- x" := (FEopp x ) : ssfield.
Notation "x * y" := (FEmul x y) : ssfield.
Notation "x ^-1" := (FEinv x) : ssfield.
Notation "x / y" := (FEdiv x y) : ssfield.
Notation "x ^+ n" := (FEpow x n) : ssfield.
Notation "0" := FEO : ssfield.
Notation "1" := FEI : ssfield.
Delimit Scope ssfield with F.
(* -------------------------------------------------------------------- *)
Local Coercion Z_of_nat : nat >-> Z.
Local Coercion N_of_nat : nat >-> N.
Local Coercion P_of_succ_nat : nat >-> positive.
Local Coercion Z.pos : positive >-> Z.
(* -------------------------------------------------------------------- *)
Class find (T : Type) (x : T) (xs : seq T) (i:nat).
#[export]
Instance find0 (T : Type) (x : T) (xs : seq T)
: find x (x :: xs) 0
:= { }.
#[export]
Instance findS (T : Type) (x : T) (y : T) (ys : seq T) i
{_: find x ys i}
: find x (y :: ys) i.+1 | 1
:= { }.
(* -------------------------------------------------------------------- *)
Class closed (T : Type) (xs : seq T).
#[export]
Instance closed_nil T
: closed (T:=T) nil
:= { }.
#[export]
Instance closed_cons T (x : T) (xs : seq T)
{_: closed xs}
: closed (x :: xs)
:= { }.
(* -------------------------------------------------------------------- *)
Class reify (R : ringType) (a : R) (t : PExpr Z) (e : seq R).
#[export]
Instance reify_zero (R : ringType) e : @reify R 0 0%S e := { }.
#[export]
Instance reify_one (R : ringType) e : @reify R 1 1%S e := { }.
#[export]
Instance reify_natconst (R : ringType) n e
: @reify R n%:R ((n : Z)%:S)%S e
:= { }.
#[export]
Instance reify_add (R : ringType) a1 a2 t1 t2 e
{_: @reify R a1 t1 e}
{_: @reify R a2 t2 e}
: reify (a1 + a2) (t1 + t2)%S e
:= { }.
#[export]
Instance reify_opp (R : ringType) a t e
{_: @reify R a t e}
: reify (-a) (-t)%S e
:= { }.
#[export]
Instance reify_natmul (R : ringType) a n t e
{_: @reify R a t e}
: reify (a *+ n) (t * (n : Z)%:S)%S e
:= { }.
#[export]
Instance reify_mul (R : ringType) a1 a2 t1 t2 e
{_: @reify R a1 t1 e}
{_: @reify R a2 t2 e}
: reify (a1 * a2) (t1 * t2)%S e
:= { }.
#[export]
Instance reify_exp (R : ringType) a n t e
{_: @reify R a t e}
: reify (a ^+ n) (t ^+ n)%S e | 1
:= { }.
#[export]
Instance reify_var (R : ringType) a i e
`{find R a e i}
: reify a ('X_i)%S e
| 100
:= { }.
Definition reifyl (R : ringType) a t e
{_: @reify R a t e}
`{closed (T := R) e}
:= (t, e).
(* -------------------------------------------------------------------- *)
Ltac reify xt xe :=
match goal with
|- ?a = 0 =>
match eval red in (reifyl (a := a)) with
| (?t, ?e) => pose xt := t; pose xe := e
end
end.
(* -------------------------------------------------------------------- *)
Class freify (F : fieldType) (a : F) (t : FExpr Z) (e : seq F).
#[export]
Instance freify_zero (F : fieldType) e : @freify F 0 0%F e := { }.
#[export]
Instance freify_one (F : fieldType) e : @freify F 1 1%F e := { }.
#[export]
Instance freify_natconst (F : fieldType) n e
: @freify F n%:R ((n : Z)%:S)%F e
:= { }.
#[export]
Instance freify_add (F : fieldType) a1 a2 t1 t2 e
{_: @freify F a1 t1 e}
{_: @freify F a2 t2 e}
: freify (a1 + a2) (t1 + t2)%F e
:= { }.
#[export]
Instance freify_opp (F : fieldType) a t e
{_: @freify F a t e}
: freify (-a) (-t)%F e
:= { }.
#[export]
Instance freify_natmul (F : fieldType) a n t e
{_: @freify F a t e}
: freify (a *+ n) (t * (n : Z)%:S)%F e
:= { }.
#[export]
Instance freify_mul (F : fieldType) a1 a2 t1 t2 e
{_: @freify F a1 t1 e}
{_: @freify F a2 t2 e}
: freify (a1 * a2) (t1 * t2)%F e
:= { }.
#[export]
Instance freify_inv (F : fieldType) a t e
{_: @freify F a t e}
: freify (a^-1) (t^-1)%F e
:= { }.
#[export]
Instance freify_exp (F : fieldType) a n t e
{_: @freify F a t e}
: freify (a ^+ n) (t ^+ n)%F e | 1
:= { }.
#[export]
Instance freify_var (F : fieldType) a i e
`{find F a e i}
: freify a ('X_i)%F e
| 100
:= { }.
Definition freifyl (F : fieldType) a t e
{_: @freify F a t e}
`{closed (T := F) e}
:= (t, e).
(* -------------------------------------------------------------------- *)
Ltac freify xt xe :=
match goal with
|- ?a = 0 =>
match eval red in (freifyl (a := a)) with
| (?t, ?e) => pose xt := t; pose xe := e
end
end.
(* -------------------------------------------------------------------- *)
Definition R_of_Z (R : ringType) (z : Z) : R :=
match z with
| Z0 => 0
| Zpos n => (nat_of_P n)%:R
| Zneg n => - (nat_of_P n)%:R
end.
Arguments R_of_Z [R].
Lemma z0E: 0%Z = 0.
Proof. by []. Qed.
Lemma zaddE (z1 z2 : Z): (z1 + z2)%Z = z1 + z2.
Proof. by []. Qed.
Lemma zoppE (z : Z): (-z)%Z = -z.
Proof. by []. Qed.
Lemma zmulE (z1 z2 : Z): (z1 * z2)%Z = z1 * z2.
Proof. by []. Qed.
Definition zE := (z0E, zaddE, zoppE).
Lemma R_of_Z_is_additive (R : ringType): additive (R_of_Z (R := R)).
Proof.
have oppm: {morph (R_of_Z (R := R)) : x / -x >-> -x}.
by case=> [|n|n] //=; rewrite ?(oppr0, opprK).
have addm z1 z2: R_of_Z (z1 + z2) = R_of_Z z1 + R_of_Z z2 :> R.
wlog: z1 z2 / (z1 <=? z2)%Z; first move=> wlog.
+ case: (boolP (z1 <=? z2))%Z; first by move/wlog.
+ move/negbTE/Z.leb_gt/Z.lt_le_incl/Z.leb_le.
by move/wlog; rewrite Z.add_comm addrC.
case: z1 z2=> [|n1|n1] [|n2|n2] //= _; rewrite ?(addr0, add0r) //.
+ by rewrite Pos2Nat.inj_add natrD.
+ case: (Z.compare_spec n1 n2) => [[->]||].
* by rewrite Z.pos_sub_diag addrC subrr.
* move=> lt; rewrite (Z.pos_sub_gt _ _ lt) /=.
rewrite (Pos2Nat.inj_sub _ _ lt) natrB 1?addrC //.
apply/leP/Pos2Nat.inj_le/Pos.lt_le_incl/Pos.ltb_lt.
by rewrite Pos2Z.inj_ltb; apply/Pos.ltb_lt.
* move=> lt; rewrite (Z.pos_sub_lt _ _ lt) /=.
rewrite (Pos2Nat.inj_sub _ _ lt) natrB ?opprB 1?addrC //.
apply/leP/Pos2Nat.inj_le/Pos.lt_le_incl/Pos.ltb_lt.
by rewrite Pos2Z.inj_ltb; apply/Pos.ltb_lt.
+ by rewrite Pos2Nat.inj_add natrD opprD.
by move=> z1 z2 /=; rewrite addm oppm.
Qed.
Canonical R_of_Z_additive (R : ringType) := Additive (R_of_Z_is_additive R).
Lemma R_of_Z_is_multiplicative (R : ringType): multiplicative (R_of_Z (R := R)).
Proof.
split=> //=; case=> [|z1|z1] [|z2|z2] //=;
rewrite ?simpm // ?(mulNr, mulrN, opprK);
by rewrite nat_of_P_mult_morphism natrM.
Qed.
Canonical R_of_Z_rmorphism (R : ringType) := AddRMorphism (R_of_Z_is_multiplicative R).
Local Notation REeval :=
(@PEeval _ 0 +%R *%R (fun x y => x - y) -%R Z R_of_Z nat nat_of_N (@GRing.exp _)).
Lemma RE (R : ringType): @ring_eq_ext R +%R *%R -%R (@eq R).
Proof. by split; do! move=> ? _ <-. Qed.
Local Notation "~%R" := (fun x y => x - y).
Local Notation "/%R" := (fun x y => x / y).
Local Notation "^-1%R" := (@GRing.inv _) (only parsing).
Lemma RR (R : comRingType): @ring_theory R 0 1 +%R *%R ~%R -%R (@eq R).
Proof.
split=> //=;
[ exact: add0r | exact: addrC | exact: addrA | exact: mul1r
| exact: mulrC | exact: mulrA | exact: mulrDl | exact: subrr ].
Qed.
Lemma RZ (R : ringType):
ring_morph (R := R) 0 1 +%R *%R ~%R -%R eq
0%Z 1%Z Zplus Zmult Zminus Z.opp Zeq_bool (@R_of_Z _).
Proof.
split=> //=.
+ by move=> x y; rewrite rmorphD.
+ by move=> x y; rewrite rmorphB.
+ by move=> x y; rewrite rmorphM.
+ by move=> x; rewrite raddfN.
+ by move=> x y /Zeq_bool_eq ->.
Qed.
Lemma PN (R : ringType):
@power_theory R 1 *%R eq nat nat_of_N (@GRing.exp R).
Proof.
split=> r [|n] //=; elim: n => //= p ih.
+ by rewrite Pos2Nat.inj_xI exprS -!ih -exprD addnn -mul2n.
+ by rewrite Pos2Nat.inj_xO -!ih -exprD addnn -mul2n.
Qed.
Lemma RF (F : fieldType): @field_theory F 0 1 +%R *%R ~%R -%R /%R ^-1%R (@eq F).
Proof.
split=> //=; first by apply RR.
+ by apply/eqP; rewrite oner_eq0.
+ by move=> x /eqP nz_z; rewrite mulVf.
Qed.
Definition Rcorrect (R : comRingType) :=
ring_correct (Eqsth R) (RE R)
(Rth_ARth (Eqsth R) (RE R) (RR R))
(RZ R) (PN R)
(triv_div_th
(Eqsth R) (RE R)
(Rth_ARth (Eqsth R) (RE R) (RR R)) (RZ R)).
Definition Fcorrect (F : fieldType) :=
Field_correct
(Eqsth F) (RE F) (congr1 GRing.inv)
(F2AF (Eqsth F) (RE F) (RF F)) (RZ F) (PN F)
(triv_div_th
(Eqsth F) (RE F)
(Rth_ARth (Eqsth F) (RE F) (RR F)) (RZ F)).
(* -------------------------------------------------------------------- *)
Fixpoint Reval (R : ringType) (l : seq R) (pe : PExpr Z) :=
match pe with
| 0%S => 0
| 1%S => 1
| (c%:S)%S => R_of_Z c
| ('X_j)%S => BinList.nth 0 j l
| (pe1 + pe2)%S => (Reval l pe1) + (Reval l pe2)
| (pe1 - pe2)%S => (Reval l pe1) - (Reval l pe2)
| (- pe1)%S => - (Reval l pe1)
| (pe1 ^+ n)%S => (Reval l pe1) ^+ (nat_of_N n)
| (pe1 * pe2)%S =>
match pe2 with
| ((Zpos n)%:S)%S => (Reval l pe1) *+ (nat_of_P n)
| _ => (Reval l pe1) * (Reval l pe2)
end
end.
Local Notation RevalC R :=
(PEeval 0 1 +%R *%R ~%R -%R (R_of_Z (R := R)) nat_of_N (@GRing.exp R)).
Lemma PEReval (R : ringType): RevalC _ =2 @Reval R.
Proof.
move=> l; elim => //=; try by do? move=> ?->.
+ move=> pe1 -> pe2 ->; case: pe2 => //=.
by case=> [|c|c] //=; rewrite mulr_natr.
Qed.
(* -------------------------------------------------------------------- *)
Fixpoint Feval (F : fieldType) (l : seq F) (pe : FExpr Z) :=
match pe with
| 0%F => 0
| 1%F => 1
| (c%:S)%F => R_of_Z c
| ('X_j)%F => BinList.nth 0 j l
| (pe1 + pe2)%F => (Feval l pe1) + (Feval l pe2)
| (pe1 - pe2)%F => (Feval l pe1) - (Feval l pe2)
| (- pe1)%F => - (Feval l pe1)
| (pe1 ^+ n)%F => (Feval l pe1) ^+ (nat_of_N n)
| (pe^-1)%F => (Feval l pe)^-1
| (pe1 / pe2)%F => (Feval l pe1) / (Feval l pe2)
| (pe1 * pe2)%F =>
match pe2 with
| ((Zpos n)%:S)%F => (Feval l pe1) *+ (nat_of_P n)
| _ => (Feval l pe1) * (Feval l pe2)
end
end.
Local Notation FevalC R :=
(FEeval 0 1 +%R *%R ~%R -%R /%R ^-1%R
(R_of_Z (R := R)) nat_of_N (@GRing.exp R)).
Lemma PEFeval (F : fieldType): FevalC _ =2 @Feval F.
Proof.
move=> l; elim => //=; try by do? move=> ?->.
+ move=> pe1 -> pe2 ->; case: pe2 => //=.
by case=> [|c|c] //=; rewrite mulr_natr.
Qed.
(* -------------------------------------------------------------------- *)
Ltac ssring :=
let xt := fresh "xt" in
let xe := fresh "xe" in
apply/eqP; rewrite -subr_eq0; apply/eqP;
reify xt xe;
apply (@Rcorrect _ 100 xe [::] xt (Coq.setoid_ring.Ring_polynom.PEc 0%Z) I);
vm_compute;exact (erefl true).
(* -------------------------------------------------------------------- *)
Ltac ssfield :=
let xt := fresh "xt" in
let xe := fresh "xe" in
apply/eqP; rewrite -subr_eq0; apply/eqP;
(* rewrite ?(mulr0, mul0r, mulr1, mul1r); *) freify xt xe;
move: (@Fcorrect _ 100 xe [::] xt (Field_theory.FEc 0) I [::] (erefl [::]));
move/(_ _ (erefl _) _ (erefl _) (erefl true)); rewrite !PEFeval;
apply=> /=; do? split; cbv delta[BinPos.Pos.to_nat] => /= {xt xe};
try (exact I || apply/eqP).
|
{"author": "jasmin-lang", "repo": "jasmin", "sha": "3c783b662000c371ba924a953d444fd80b860d9f", "save_path": "github-repos/coq/jasmin-lang-jasmin", "path": "github-repos/coq/jasmin-lang-jasmin/jasmin-3c783b662000c371ba924a953d444fd80b860d9f/proofs/3rdparty/ssrring.v"}
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""CLDNN (CNN + bidirectional LSTM + DNN) encoder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from models.encoders.core.cnn_util import conv_layer, max_pool, batch_normalization
from models.encoders.core.blstm import basiclstmcell, lstmcell, lstmblockcell, lstmblockfusedcell, cudnnlstm
############################################################
# Architecture: (feature map, kernel, stride)
# CNN1: (32, 11*21, (3,2)) * 1 layer
# ReLU
# (Batch normalization)
# max pooling
# dropout
# CNN2: (32, 11*11, (1,2)) * 1 layer
# ReLU
# (Batch normalization)
# max pooling
# dropout
# CNN3:(96, 3*3, (1,1)) * 1 layer
# ReLU
# (Batch normalization)
# max pooling
# dropout
# BLSTM: 896 * 2 layers
# fc: 896 * 1 layer
# dropout
# fc: 74 * 1 layer
# softmax
############################################################
class CLDNNEncoder(object):
"""CLDNN (CNN + bidirectional LSTM + DNN) encoder.
This implementation is based on
https://arxiv.org/abs/1702.07793.
Wang, Yisen, et al.
"Residual convolutional CTC networks for automatic speech recognition."
arXiv preprint arXiv:1702.07793 (2017).
Args:
input_size (int): the dimensions of input vectors.
This is expected to be num_channels * 3 (static + Δ + ΔΔ)
splice (int): frames to splice
num_stack (int): the number of frames to stack
num_units (int): the number of units in each layer
num_proj (int): the number of nodes in the projection layer
num_layers (int): the number of layers
lstm_impl (string, optional): a base implementation of LSTM.
- BasicLSTMCell: tf.contrib.rnn.BasicLSTMCell (no peephole)
- LSTMCell: tf.contrib.rnn.LSTMCell
- LSTMBlockCell: tf.contrib.rnn.LSTMBlockCell
- LSTMBlockFusedCell: under implementation
- CudnnLSTM: under implementation
Choose the background implementation of tensorflow.
use_peephole (bool): if True, use peephole
parameter_init (float): the range of uniform distribution to
initialize weight parameters (>= 0)
clip_activation (float): the range of activation clipping (> 0)
time_major (bool, optional): if True, time-major computation will be
performed
name (string, optional): the name of encoder
"""
def __init__(self,
input_size,
splice,
num_stack,
num_units,
num_proj,
num_layers,
lstm_impl,
use_peephole,
parameter_init,
clip_activation,
time_major=False,
name='cldnn_wang_encoder'):
assert num_proj != 0
assert input_size % 3 == 0
self.num_channels = input_size // 3
self.splice = splice
self.num_stack = num_stack
self.num_units = num_units
if lstm_impl != 'LSTMCell':
self.num_proj = None
else:
self.num_proj = num_proj
# TODO: fix this
self.num_layers = num_layers
self.lstm_impl = lstm_impl
self.use_peephole = use_peephole
self.parameter_init = parameter_init
self.clip_activation = clip_activation
self.time_major = time_major
self.name = name
def __call__(self, inputs, inputs_seq_len, keep_prob, is_training):
"""Construct model graph.
Args:
inputs (placeholder): A tensor of size
`[B, T, input_size (num_channels * (splice * num_stack) * 3)]`
inputs_seq_len (placeholder): A tensor of size` [B]`
keep_prob (placeholder, float): A probability to keep nodes
in the hidden-hidden connection
is_training (bool):
Returns:
outputs: Encoder states.
if time_major is True, a tensor of size
`[T, B, num_units (num_proj)]`
otherwise, `[B, T, num_units (num_proj)]`
final_state: A final hidden state of the encoder
"""
# inputs: 3D tensor `[B, T, input_dim]`
batch_size = tf.shape(inputs)[0]
max_time = tf.shape(inputs)[1]
input_dim = inputs.shape.as_list()[-1]
# NOTE: input_dim: num_channels * splice * num_stack * 3
# For debug
# print(input_dim)
# print(self.num_channels)
# print(self.splice)
# print(self.num_stack)
assert input_dim == self.num_channels * self.splice * self.num_stack * 3
# Reshape to 4D tensor `[B * T, num_channels, splice * num_stack, 3]`
inputs = tf.reshape(
inputs,
shape=[batch_size * max_time, self.num_channels, self.splice * self.num_stack, 3])
# NOTE: filter_size: `[H, W, C_in, C_out]`
with tf.variable_scope('CNN1'):
inputs = conv_layer(inputs,
filter_size=[11, 21, 3, 32],
stride=[3, 2],
parameter_init=self.parameter_init,
activation='relu')
# inputs = batch_normalization(inputs, is_training=is_training)
inputs = max_pool(inputs,
pooling_size=[1, 1],
stride=[1, 1],
name='max_pool')
inputs = tf.nn.dropout(inputs, keep_prob)
with tf.variable_scope('CNN2'):
inputs = conv_layer(inputs,
filter_size=[11, 11, 32, 32],
stride=[1, 2],
parameter_init=self.parameter_init,
activation='relu')
# inputs = batch_normalization(inputs, is_training=is_training)
inputs = max_pool(inputs,
pooling_size=[1, 1],
stride=[1, 1],
name='max_pool')
inputs = tf.nn.dropout(inputs, keep_prob)
with tf.variable_scope('CNN3'):
inputs = conv_layer(inputs,
filter_size=[3, 3, 32, 96],
stride=[1, 1],
parameter_init=self.parameter_init,
activation='relu')
# inputs = batch_normalization(inputs, is_training=is_training)
inputs = max_pool(inputs,
pooling_size=[1, 1],
stride=[1, 1],
name='max_pool')
inputs = tf.nn.dropout(inputs, keep_prob)
# Reshape to 3D tensor `[B, T, new_h * new_w * C_out]`
inputs = tf.reshape(
inputs, shape=[batch_size, max_time, np.prod(inputs.shape.as_list()[-3:])])
initializer = tf.random_uniform_initializer(
minval=-self.parameter_init, maxval=self.parameter_init)
if self.lstm_impl == 'BasicLSTMCell':
outputs, final_state = basiclstmcell(
self.num_units, self.num_layers,
inputs, inputs_seq_len, keep_prob, initializer,
self.time_major)
elif self.lstm_impl == 'LSTMCell':
outputs, final_state = lstmcell(
self.num_units, self.num_proj, self.num_layers,
self.use_peephole, self.clip_activation,
inputs, inputs_seq_len, keep_prob, initializer,
self.time_major)
elif self.lstm_impl == 'LSTMBlockCell':
outputs, final_state = lstmblockcell(
self.num_units, self.num_layers,
self.use_peephole, self.clip_activation,
inputs, inputs_seq_len, keep_prob, initializer,
self.time_major)
elif self.lstm_impl == 'LSTMBlockFusedCell':
outputs, final_state = lstmblockfusedcell(
self.num_units, self.num_layers,
inputs, inputs_seq_len, keep_prob, initializer,
self.time_major)
elif self.lstm_impl == 'CudnnLSTM':
outputs, final_state = cudnnlstm(
self.num_units, self.num_layers,
inputs, inputs_seq_len, keep_prob, initializer,
self.time_major)
else:
raise IndexError(
'lstm_impl is "BasicLSTMCell" or "LSTMCell" or ' +
'"LSTMBlockCell" or "LSTMBlockFusedCell" or ' +
'"CudnnLSTM".')
# Reshape to 2D tensor `[B * T (T * B), output_dim]`
output_dim = outputs.shape.as_list()[-1]
outputs = tf.reshape(
outputs, shape=[batch_size * max_time, output_dim])
with tf.variable_scope('fc1') as scope:
outputs = tf.contrib.layers.fully_connected(
inputs=outputs,
num_outputs=896,
activation_fn=tf.nn.relu,
weights_initializer=tf.truncated_normal_initializer(
stddev=self.parameter_init),
biases_initializer=tf.zeros_initializer(),
scope=scope)
outputs = tf.nn.dropout(outputs, keep_prob)
with tf.variable_scope('fc2') as scope:
outputs = tf.contrib.layers.fully_connected(
inputs=outputs,
num_outputs=74,
activation_fn=tf.nn.relu,
weights_initializer=tf.truncated_normal_initializer(
stddev=self.parameter_init),
biases_initializer=tf.zeros_initializer(),
scope=scope)
output_dim = outputs.shape.as_list()[-1]
if self.time_major:
# Reshape back to 3D tensor `[T, B, 74]`
outputs = tf.reshape(
outputs, shape=[max_time, batch_size, output_dim])
else:
# Reshape back to 3D tensor `[B, T, 74]`
outputs = tf.reshape(
outputs, shape=[batch_size, max_time, output_dim])
return outputs, final_state
|
{"hexsha": "b72afba643847329d79a9905795267b636fa596a", "size": 10323, "ext": "py", "lang": "Python", "max_stars_repo_path": "models/encoders/core/cldnn_wang.py", "max_stars_repo_name": "sundogrd/tensorflow_end2end_speech_recognition", "max_stars_repo_head_hexsha": "61e4a65fb5c9f3d9f690d713dcd77a48b1de0a14", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 351, "max_stars_repo_stars_event_min_datetime": "2017-05-27T08:31:27.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-03T16:47:27.000Z", "max_issues_repo_path": "models/encoders/core/cldnn_wang.py", "max_issues_repo_name": "eLavin11/tensorflow_end2end_speech_recognition", "max_issues_repo_head_hexsha": "65b9728089d5e92b25b92384a67419d970399a64", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 19, "max_issues_repo_issues_event_min_datetime": "2017-07-19T13:12:18.000Z", "max_issues_repo_issues_event_max_datetime": "2019-06-12T06:07:13.000Z", "max_forks_repo_path": "models/encoders/core/cldnn_wang.py", "max_forks_repo_name": "eLavin11/tensorflow_end2end_speech_recognition", "max_forks_repo_head_hexsha": "65b9728089d5e92b25b92384a67419d970399a64", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 127, "max_forks_repo_forks_event_min_datetime": "2017-06-12T16:27:21.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-29T02:22:34.000Z", "avg_line_length": 38.5186567164, "max_line_length": 108, "alphanum_fraction": 0.5504213891, "include": true, "reason": "import numpy", "num_tokens": 2246}
|
! { dg-do compile }
! various checks which verify that we don't change do-iterators
DO I=1,5 ! { dg-error "cannot be redefined" "changing do-iterator 1" }
I=1 ! { dg-error "cannot be redefined" "changing do-iterator 1" }
END DO
DO I=1,5 ! { dg-error "cannot be redefined" "changing do-iterator 2" }
READ(5,*) I ! { dg-error "cannot be redefined" "changing do-iterator 2" }
END DO
DO I=1,5 ! { dg-error "cannot be redefined" "changing do-iterator 3" }
READ(5,*,iostat=i) j ! { dg-error "cannot be redefined" "changing do-iterator 3" }
ENDDO
END
|
{"hexsha": "0e11aa23339a0b6eb436074eb64124ebd3fd296b", "size": 580, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "llvm-gcc-4.2-2.9/gcc/testsuite/gfortran.dg/do_iterator.f90", "max_stars_repo_name": "vidkidz/crossbridge", "max_stars_repo_head_hexsha": "ba0bf94aee0ce6cf7eb5be882382e52bc57ba396", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2016-04-09T02:58:13.000Z", "max_stars_repo_stars_event_max_datetime": "2016-04-09T02:58:13.000Z", "max_issues_repo_path": "llvm-gcc-4.2-2.9/gcc/testsuite/gfortran.dg/do_iterator.f90", "max_issues_repo_name": "vidkidz/crossbridge", "max_issues_repo_head_hexsha": "ba0bf94aee0ce6cf7eb5be882382e52bc57ba396", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "llvm-gcc-4.2-2.9/gcc/testsuite/gfortran.dg/do_iterator.f90", "max_forks_repo_name": "vidkidz/crossbridge", "max_forks_repo_head_hexsha": "ba0bf94aee0ce6cf7eb5be882382e52bc57ba396", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.4285714286, "max_line_length": 85, "alphanum_fraction": 0.6448275862, "num_tokens": 192}
|
import datetime
import os
import logging
import re
import six
from six.moves import cPickle
import numpy as np
import xarray as xr
import pandas as pd
import requests
log=logging.getLogger('usgs_nwis')
from ... import utils
from .. import rdb
from .common import periods
try:
import seawater
except ImportError:
seawater=None
def nwis_dataset_collection(stations,*a,**k):
"""
Fetch from multiple stations, glue together to a combined dataset.
The rest of the options are the same as for nwis_dataset().
Stations for which no data was found are omitted in the results.
"""
ds_per_site=[]
for station in stations:
ds=nwis_dataset(station,*a,**k)
if ds is None:
continue
ds['site']=('site',),[station]
ds_per_site.append(ds)
# And now glue those all together, but no filling of gaps yet.
# As cases of missing data come up, this will have to get smarter about padding
# individual sites.
if len(ds_per_site)==0:
# Annoying, but if no stations exist, just return None
return None
collection=xr.concat( ds_per_site, dim='site')
for ds in ds_per_site:
ds.close() # free up FDs
return collection
def nwis_dataset(station,start_date,end_date,products,
days_per_request='M',frequency='realtime',
cache_dir=None,clip=True,cache_only=False,
cache_no_data=False):
"""
Retrieval script for USGS waterdata.usgs.gov
Retrieve one or more data products from a single station.
station: string or numeric identifier for COOPS station.
products: list of integers identifying the variable to retrieve. See
usgs_parm_codes.tsv in the directory above this directory.
start_date,end_date: period to retrieve, as python datetime, matplotlib datenum,
or numpy datetime64.
days_per_request: batch the requests to fetch smaller chunks at a time.
if this is an integer, then chunks will start with start_date, then start_date+days_per_request,
etc.
if this is a string, it is interpreted as the frequency argument to pandas.PeriodIndex.
so 'M' will request month-aligned chunks. this has the advantage that requests for different
start dates will still be aligned to integer periods, and can reuse cached data.
cache_dir: if specified, save each chunk as a netcdf file in this directory,
with filenames that include the gage, period and products. The directory must already
exist.
clip: if True, then even if more data was fetched, return only the period requested.
frequency: defaults to "realtime" which should correspond to the original
sample frequency. Alternatively, "daily" which access daily average values.
cache_only: only read from cache, not attempting to fetch any new data.
cache_no_data: periods which successfully download but contain no data are recorded
as empty files. Otherwise it is assumed that there may be a transient error, and
nothing is written to cache. Do not use this for real-time retrievals, since it may
cache no-data results from the future.
returns an xarray dataset.
Note that names of variables are inferred from parameter codes where possible,
but this is not 100% accurate with respect to the descriptions provided in the rdb,
notably "Discharge, cubic feet per second" may be reported as
"stream_flow_mean_daily"
"""
start_date=utils.to_dt64(start_date)
end_date=utils.to_dt64(end_date)
params=dict(site_no=station,
format='rdb')
for prod in products:
params['cb_%05d'%prod]='on'
# Only for small requests of recent data:
# base_url="https://waterdata.usgs.gov/nwis/uv"
# Otherwise it redirects to here:
if frequency=='realtime':
base_url="https://nwis.waterdata.usgs.gov/usa/nwis/uv/"
elif frequency=='daily':
base_url="https://waterdata.usgs.gov/nwis/dv"
else:
raise Exception("Unknown frequency: %s"%(frequency))
params['period']=''
# generator for dicing up the request period
datasets=[]
last_url=None
for interval_start,interval_end in periods(start_date,end_date,days_per_request):
params['begin_date']=utils.to_datetime(interval_start).strftime('%Y-%m-%d')
params['end_date'] =utils.to_datetime(interval_end).strftime('%Y-%m-%d')
# This is the base name for caching, but also a shorthand for reporting
# issues with the user, since it already encapsulates most of the
# relevant info in a single tidy string.
base_fn="%s_%s_%s_%s.nc"%(station,
"-".join(["%d"%p for p in products]),
params['begin_date'],
params['end_date'])
if cache_dir is not None:
cache_fn=os.path.join(cache_dir,base_fn)
else:
cache_fn=None
if (cache_fn is not None) and os.path.exists(cache_fn):
log.info("Cached %s -- %s"%(interval_start,interval_end))
if os.path.getsize(cache_fn)==0:
# Cached no-data result
log.warning(" cache for %s -- %s says no-data"%(interval_start,interval_end))
continue
ds=xr.open_dataset(cache_fn)
elif cache_only:
log.info("Cache only - no data for %s -- %s"%(interval_start,interval_end))
continue
else:
log.info("Fetching %s"%(base_fn))
sesh = requests.Session()
sesh.mount('https://', requests.adapters.HTTPAdapter(max_retries=3))
req=sesh.get(base_url,params=params)
data=req.text
ds=rdb.rdb_to_dataset(text=data)
if ds is None: # There was no data there HERE - would like to have an option to record no data
log.warning(" %s: no data found for this period"%base_fn)
if (cache_fn is not None) and cache_no_data:
log.warning(" %s: making zero-byte cache file"%base_fn)
with open(cache_fn,'wb') as fp: pass
continue
ds.attrs['url']=req.url
if cache_fn is not None:
ds.to_netcdf(cache_fn)
# USGS returns data inclusive of the requested date range - leading to some overlap
if len(datasets):
ds=ds.isel(time=ds.time>datasets[-1].time[-1])
datasets.append(ds)
if len(datasets)==0:
# could try to construct zero-length dataset, but that sounds like a pain
# at the moment.
log.warning(" no data for station %s for any periods!"%station)
return None
if len(datasets)>1:
# it's possible that not all variables appear in all datasets
# dataset=xr.concat( datasets, dim='time')
dataset=datasets[0]
for other in datasets[1:]:
dataset=dataset.combine_first(other)
for stale in datasets:
stale.close() # maybe free up FDs?
else:
dataset=datasets[0]
if clip:
time_sel=(dataset.time.values>=start_date) & (dataset.time.values<end_date)
dataset=dataset.isel(time=time_sel)
dataset.load() # force read into memory before closing files
for d in datasets:
d.close()
for meta in ['datenum','tz_cd']:
if meta in dataset.data_vars:
dataset=dataset.set_coords(meta)
return dataset
def add_salinity(ds):
assert seawater is not None
for v in ds.data_vars:
if v.startswith('specific_conductance'):
salt_name=v.replace('specific_conductance','salinity')
if salt_name not in ds:
print("%s => %s"%(v,salt_name))
salt=seawater.eos80.salt(ds[v].values/1000. / seawater.constants.c3515,
25.0, # temperature - USGS adjusts to 25degC
0) # no pressure effects
ds[salt_name]=ds[v].dims, salt
def station_metadata(station,cache_dir=None):
if cache_dir is not None:
cache_fn=os.path.join(cache_dir,"meta-%s.pkl"%station)
if os.path.exists(cache_fn):
with open(cache_fn,'rb') as fp:
meta=cPickle.load(fp)
return meta
url="https://waterdata.usgs.gov/nwis/inventory?agency_code=USGS&site_no=%s"%station
resp=requests.get(url)
m=re.search(r"Latitude\s+([.0-9&#;']+\")",resp.text)
lat=m.group(1)
m=re.search(r"Longitude\s+([.0-9&#;']+\")",resp.text)
lon=m.group(1)
def dms_to_dd(s):
s=s.replace('°',' ').replace('"',' ').replace("'"," ").strip()
d,m,s =[float(p) for p in s.split()]
return d + m/60. + s/3600.
lat=dms_to_dd(lat)
# no mention of west longitude, but can assume it is west.
lon=-dms_to_dd(lon)
meta=dict(lat=lat,lon=lon)
if cache_dir is not None:
with open(cache_fn,'wb') as fp:
cPickle.dump(meta,fp)
return meta
|
{"hexsha": "689bfb34a7f2a910ff24decca90c67d12da6ad2a", "size": 9091, "ext": "py", "lang": "Python", "max_stars_repo_path": "stompy/io/local/usgs_nwis.py", "max_stars_repo_name": "rustychris/stompy", "max_stars_repo_head_hexsha": "4efb78824804edc68555bced275e37842f98ba1f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2017-10-12T14:53:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-26T01:24:52.000Z", "max_issues_repo_path": "stompy/io/local/usgs_nwis.py", "max_issues_repo_name": "rustychris/stompy", "max_issues_repo_head_hexsha": "4efb78824804edc68555bced275e37842f98ba1f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2018-03-12T12:43:14.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-04T17:44:31.000Z", "max_forks_repo_path": "stompy/io/local/usgs_nwis.py", "max_forks_repo_name": "rustychris/stompy", "max_forks_repo_head_hexsha": "4efb78824804edc68555bced275e37842f98ba1f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2017-09-29T21:20:11.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-28T21:29:23.000Z", "avg_line_length": 36.364, "max_line_length": 106, "alphanum_fraction": 0.6328236718, "include": true, "reason": "import numpy", "num_tokens": 2115}
|
from six.moves import input
import numpy as np
from os.path import dirname, join
#import joblib
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
import warnings
warnings.filterwarnings('ignore')
from sklearn.externals import joblib
def main():
print("Predict Type of Iris Flower")
print("=="*20)
print("Please Enter Values in the required fields")
print("=="*20)
while True:
try:
try:
sl = eval(input('sepal length (cm) : '))
except Exception as e:
print("Please Enter and Interger or Float")
break
print("*"*20)
try:
sw = eval(input('sepal width (cm) : '))
except Exception as e:
print("Please Enter and Interger or Float")
break
print("*"*20)
try:
pl = eval(input('petal length (cm) : '))
except Exception as e:
print("Please Enter and Interger or Float")
break
print("*"*20)
try:
pw = eval(input('petal width (cm): '))
except Exception as e:
print("Please Enter and Interger or Float")
break
print("*"*20)
#data = load_iris()
#x = data["data"]
#y = data["target"]
#clf = LogisticRegression(multi_class="ovr", solver="lbfgs")
#clf.fit(x, y)
file = join(dirname(__file__),"clf.pkl")
clf = joblib.load(file)
new = np.array([sl, sw, pl, pw]).reshape(1,-1)
pred = clf.predict(new)
if pred ==2:
type = 'virginica'
elif pred == 0:
type = 'setosa'
else:
type = "versicolor"
except EOFError:
break
if not sl:
break
#10print("Eponential of {} is {}!".format(name, exp))
print("Values inputted are")
print()
print("Sepal Length : ", sl)
print()
print("Sepal Width : ", sw)
print()
print("Petal Length : ", pl)
print()
print("Petal Width : ", pw)
print()
print("Predictior says the flower type is", type)
print()
proceed = input("Do you wish to continue : ")
if proceed == "yes":
continue
elif proceed == "y":
continue
else:
break
|
{"hexsha": "5257c85714f8e7574ea5fc9e9b30b735afa6b38d", "size": 2553, "ext": "py", "lang": "Python", "max_stars_repo_path": "app/src/main/python/main.py", "max_stars_repo_name": "kongkip/Ml_on_Android", "max_stars_repo_head_hexsha": "616dc4c86dc9250ec1c84854f857d083221a1deb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-09-30T03:19:35.000Z", "max_stars_repo_stars_event_max_datetime": "2019-09-30T03:19:35.000Z", "max_issues_repo_path": "app/src/main/python/main.py", "max_issues_repo_name": "kongkip/Ml_on_Android", "max_issues_repo_head_hexsha": "616dc4c86dc9250ec1c84854f857d083221a1deb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "app/src/main/python/main.py", "max_forks_repo_name": "kongkip/Ml_on_Android", "max_forks_repo_head_hexsha": "616dc4c86dc9250ec1c84854f857d083221a1deb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.0510204082, "max_line_length": 72, "alphanum_fraction": 0.4837446142, "include": true, "reason": "import numpy", "num_tokens": 560}
|
from weka.classifiers import Classifier, Evaluation
from weka.core.converters import Loader
import weka.core.jvm as jvm
import numpy as np
import tempfile
import os
from wirecaml.tools.ascii import print_notice
from wirecaml.tools.file_tools import silent_remove
from wirecaml.tools.data_tools import pandas2arff
class TAN:
def __init__(self):
self.classifier = None
self.train_data = None
self.train_fn = os.path.join(tempfile.gettempdir(), 'TAN_train_data.arff')
self.test_fn = os.path.join(tempfile.gettempdir(), 'TAN_test_data.arff')
self.mbc = ""
self.score_type = "BAYES"
def fit(self, X, Y):
# Create combined dataframe of X and Y
X['class'] = Y.as_matrix()
filename = self.to_arff(X, False)
# Remove class column
del X['class']
if not jvm.started:
print_notice("Starting JVM")
jvm.start()
loader = Loader("weka.core.converters.ArffLoader")
self.train_data = loader.load_file(filename)
self.train_data.class_is_last()
self.classifier = Classifier(classname="weka.classifiers.bayes.BayesNet",
options=["-Q", "weka.classifiers.bayes.net.search.local.TAN",
"--", "-S", self.score_type, self.mbc,
"-E", "weka.classifiers.bayes.net.estimate.SimpleEstimator",
"--", "-A", "0.9"])
self.classifier.build_classifier(self.train_data)
def predict(self, X):
evaluation = Evaluation(self.train_data)
# Add class column (we can't copy X, because this is a large object, so we add the column and remove it later)
X['class'] = None
filename = self.to_arff(X, True)
# Remove class column
del X['class']
loader = Loader("weka.core.converters.ArffLoader")
test_data = loader.load_file(filename)
test_data.class_is_last()
preds = evaluation.test_model(self.classifier, test_data)
return preds
def predict_proba(self, X):
evaluation = Evaluation(self.train_data)
# Add class column (we can't copy X, because this is a large object, so we add the column and remove it later)
X['class'] = None
filename = self.to_arff(X, True)
# Remove class column
del X['class']
loader = Loader("weka.core.converters.ArffLoader")
test_data = loader.load_file(filename)
test_data.class_is_last()
evaluation.test_model(self.classifier, test_data)
probas = None
# Return probabilities
for pred in evaluation.predictions:
if probas is None:
probas = pred.distribution
else:
probas = np.vstack([probas, pred.distribution])
return probas
def to_arff(self, df, test):
if test:
filename = self.test_fn
else:
filename = self.train_fn
print_notice("Writing ARFF data to filename %s" % filename)
pandas2arff(df, filename)
return filename
def clean_up(self):
print_notice("Removing temporary files")
silent_remove(self.train_fn)
silent_remove(self.test_fn)
print_notice("Stopping JVM")
jvm.stop()
|
{"hexsha": "c4fffa9b471c00e3058e8fb52987e0feb3b48d08", "size": 3407, "ext": "py", "lang": "Python", "max_stars_repo_path": "wirecaml/model/tan.py", "max_stars_repo_name": "jorkro/wirecaml", "max_stars_repo_head_hexsha": "819c24a127eb026b83ae41eb3c8c1ac8f77f25b8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2018-12-11T00:07:08.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-07T04:48:14.000Z", "max_issues_repo_path": "wirecaml/model/tan.py", "max_issues_repo_name": "pacraswill/wirecaml", "max_issues_repo_head_hexsha": "819c24a127eb026b83ae41eb3c8c1ac8f77f25b8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-12-02T21:09:16.000Z", "max_issues_repo_issues_event_max_datetime": "2018-12-10T19:51:39.000Z", "max_forks_repo_path": "wirecaml/model/tan.py", "max_forks_repo_name": "pacraswill/wirecaml", "max_forks_repo_head_hexsha": "819c24a127eb026b83ae41eb3c8c1ac8f77f25b8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2018-05-24T06:05:35.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-10T04:08:05.000Z", "avg_line_length": 29.6260869565, "max_line_length": 118, "alphanum_fraction": 0.6019958908, "include": true, "reason": "import numpy", "num_tokens": 753}
|
import numpy as np
from control_msgs.msg import JointTrajectoryControllerState,\
FollowJointTrajectoryActionResult, FollowJointTrajectoryActionGoal
import matplotlib.pyplot as plt
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
class PartialTrajListener():
def __init__(self):
self.start_time_goals = []
self.start_time_goals_trajectory = []
self.goal_joint_names = []
self.trajectories = []
self.start_goals = False
self.finish_goals = False
self.joints_time = []
self.ffj3_actual = []
self.ffj3_desired = []
self.ffj3_error = []
self.rfj3_actual = []
self.rfj3_desired = []
self.rfj3_error = []
self.ffj3_vel_actual = []
self.rfj3_vel_actual = []
self.ffj3_vel_desired = []
self.rfj3_vel_desired = []
self.ffj3_vel_error = []
self.rfj3_vel_error = []
rospy.Subscriber("/rh_trajectory_controller/state",
JointTrajectoryControllerState, self.callback)
rospy.Subscriber(
"/rh_trajectory_controller/follow_joint_trajectory/result",
FollowJointTrajectoryActionResult, self.callback_result)
rospy.Subscriber(
"/rh_trajectory_controller/follow_joint_trajectory/goal",
FollowJointTrajectoryActionGoal, self.callback_goal)
def callback(self, state):
self.joint_names = state.joint_names
self.ffj3_index = self.joint_names.index("rh_FFJ3")
self.rfj3_index = self.joint_names.index("rh_RFJ3")
if self.start_goals and not self.finish_goals:
self.joints_time.append(state.header.stamp.to_sec())
self.ffj3_actual.append(state.actual.positions[self.ffj3_index])
self.ffj3_desired.append(state.desired.positions[self.ffj3_index])
self.ffj3_error.append(state.error.positions[self.ffj3_index])
self.rfj3_actual.append(state.actual.positions[self.rfj3_index])
self.rfj3_desired.append(state.desired.positions[self.rfj3_index])
self.rfj3_error.append(state.error.positions[self.rfj3_index])
self.ffj3_vel_actual.append(state.actual.velocities[self.ffj3_index])
self.rfj3_vel_actual.append(state.actual.velocities[self.rfj3_index])
self.ffj3_vel_desired.append(state.desired.velocities[self.ffj3_index])
self.rfj3_vel_desired.append(state.desired.velocities[self.rfj3_index])
self.ffj3_vel_error.append(state.error.velocities[self.ffj3_index])
self.rfj3_vel_error.append(state.error.velocities[self.rfj3_index])
def callback_result(self, result):
print ("Trajectory Goal: " + result.status.goal_id.id +
" finished with status: " + str(result.status.status))
def callback_goal(self, goal):
self.start_goals = True
self.goal_joint_names.append(goal.goal.trajectory.joint_names)
self.start_time_goals.append(goal.header.stamp.to_sec())
self.start_time_goals_trajectory.append(goal.goal.trajectory.header.stamp.to_sec())
self.trajectories.append(goal.goal.trajectory.points)
def plot_settings(self, plt):
ax = plt.gca()
plt.grid(which='both', axis='both')
plt.setp(ax.get_xticklabels(), fontsize=8)
plt.setp(ax.get_yticklabels(), fontsize=8)
plt.xlabel('Time (s)')
ax.xaxis.label.set_size(10)
ax.yaxis.label.set_size(10)
def graph(self):
time_zero = self.joints_time[0]
time = np.array(self.joints_time) - time_zero
plt.figure()
# Plot goal trajectories waypoints
time_ffj3_traj = []
angle_ffj3_traj = []
time_rfj3_traj = []
angle_rfj3_traj = []
for i, traj in enumerate(self.trajectories):
ffj3_goal_index = self.goal_joint_names[i].index("rh_FFJ3") if "rh_FFJ3" in self.goal_joint_names[i] else -1
rfj3_goal_index = self.goal_joint_names[i].index("rh_RFJ3") if "rh_RFJ3" in self.goal_joint_names[i] else -1
for point in traj:
if ffj3_goal_index > -1:
time_ffj3_traj.append(point.time_from_start.to_sec() +
self.start_time_goals_trajectory[i] -
time_zero)
angle_ffj3_traj.append(point.positions[ffj3_goal_index])
if rfj3_goal_index > -1:
time_rfj3_traj.append(point.time_from_start.to_sec() +
self.start_time_goals_trajectory[i] -
time_zero)
angle_rfj3_traj.append(point.positions[rfj3_goal_index])
if ffj3_goal_index > -1:
plt.subplot(3, 2, 1)
plt.plot(time_ffj3_traj, angle_ffj3_traj, 'o',
label="Traj " + str(i + 1))
if rfj3_goal_index > -1:
plt.subplot(3, 2, 2)
plt.plot(time_rfj3_traj, angle_rfj3_traj, 'o',
label="Traj " + str(i + 1))
time_ffj3_traj = []
angle_ffj3_traj = []
time_rfj3_traj = []
angle_rfj3_traj = []
# Plot trajectories
plt.subplot(3, 2, 1)
plt.plot(time, self.ffj3_actual, 'black', label="Actual traj")
plt.plot(time, self.ffj3_desired, 'green', label="Desired traj")
plt.ylabel('FFJ3 Actual position (rad)')
self.plot_settings(plt)
plt.ylim(ymax=2.2, ymin=-0.1)
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=5,
mode="expand", borderaxespad=0., prop={'size': 8})
plt.subplot(3, 2, 2)
plt.plot(time, self.rfj3_actual, 'black', label="Actual traj")
plt.plot(time, self.rfj3_desired, 'green', label="Desired traj")
plt.ylabel('RFJ3 Actual position (rad)')
self.plot_settings(plt)
plt.ylim(ymax=2.2, ymin=-0.1)
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=4,
mode="expand", borderaxespad=0., prop={'size': 8})
plt.subplot(3, 2, 3)
plt.plot(time, self.ffj3_vel_actual, 'black', label="Actual traj")
plt.plot(time, self.ffj3_vel_desired, 'green', label="Desired traj")
plt.ylabel('FFJ3 Actual velocity')
plt.xlim(xmax=16, xmin=0)
self.plot_settings(plt)
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=4,
mode="expand", borderaxespad=0., prop={'size': 8})
plt.subplot(3, 2, 4)
plt.plot(time, self.rfj3_vel_actual, 'black', label="Actual traj")
plt.plot(time, self.rfj3_vel_desired, 'green', label="Desired traj")
plt.ylabel('RFJ3 Actual velocity')
plt.xlim(xmax=16, xmin=0)
self.plot_settings(plt)
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=4,
mode="expand", borderaxespad=0., prop={'size': 8})
plt.subplot(3, 2, 5)
plt.plot(time, self.ffj3_vel_error, 'red', label="Error traj")
plt.ylabel('FFJ3 Velocity Error')
plt.xlim(xmax=16, xmin=0)
self.plot_settings(plt)
plt.subplot(3, 2, 6)
plt.plot(time, self.rfj3_vel_error, 'red', label="Error traj")
plt.ylabel('RFJ3 Velocity Error')
plt.xlim(xmax=16, xmin=0)
self.plot_settings(plt)
plt.subplots_adjust(left=0.07, right=0.96, bottom=0.083, top=0.90)
plt.show()
|
{"hexsha": "d3b8d2ce92d06f7389652b8e5e4a468d606c62f0", "size": 7572, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/my_arm/partial_trajectory_listener.py", "max_stars_repo_name": "Tadinu/my_arm", "max_stars_repo_head_hexsha": "ac4fb295ddad7c7ee999a03d2e7d229802b64226", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-02-20T15:59:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T04:04:21.000Z", "max_issues_repo_path": "scripts/my_arm/partial_trajectory_listener.py", "max_issues_repo_name": "Tadinu/my_arm", "max_issues_repo_head_hexsha": "ac4fb295ddad7c7ee999a03d2e7d229802b64226", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-04-14T04:12:48.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-14T04:12:48.000Z", "max_forks_repo_path": "scripts/my_arm/partial_trajectory_listener.py", "max_forks_repo_name": "Tadinu/my_arm", "max_forks_repo_head_hexsha": "ac4fb295ddad7c7ee999a03d2e7d229802b64226", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-10-29T12:41:16.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-22T16:38:27.000Z", "avg_line_length": 43.5172413793, "max_line_length": 120, "alphanum_fraction": 0.6108029583, "include": true, "reason": "import numpy", "num_tokens": 1873}
|
import os
import cflearn
import numpy as np
from cfdata.tabular import TabularDataset
def test_onnx() -> None:
logging_folder = "__test_onnx__"
def _core(dataset: TabularDataset) -> None:
x, y = dataset.xy
m = cflearn.make(
model,
verbose_level=0,
use_tqdm=False,
min_epoch=1,
num_epoch=2,
max_epoch=4,
logging_folder=logging_folder,
)
m.fit(x, y)
predictions = m.predict(x)
predictor_folder = os.path.join(logging_folder, "test_onnx")
cflearn.Pack.pack(m, predictor_folder)
predictor = cflearn.Pack.get_predictor(predictor_folder)
atol = rtol = 1e-2 if model == "tree_dnn" else 1e-4
assert np.allclose(predictions, predictor.predict(x), atol=atol, rtol=rtol)
cflearn._rmtree(logging_folder)
reg_models = ["linear", "fcnn", "tree_dnn"]
for model in reg_models:
_core(TabularDataset.boston())
clf_models = reg_models + ["nnb", "ndt"]
for model in clf_models:
_core(TabularDataset.iris())
if __name__ == "__main__":
test_onnx()
|
{"hexsha": "811bac5404e0fcbfe2683e48c4b0209592f8460b", "size": 1150, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/usages/test_onnx.py", "max_stars_repo_name": "beyondacm/carefree-learn", "max_stars_repo_head_hexsha": "a9c69141163c04a16aba8317febe7a66218510b6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/usages/test_onnx.py", "max_issues_repo_name": "beyondacm/carefree-learn", "max_issues_repo_head_hexsha": "a9c69141163c04a16aba8317febe7a66218510b6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/usages/test_onnx.py", "max_forks_repo_name": "beyondacm/carefree-learn", "max_forks_repo_head_hexsha": "a9c69141163c04a16aba8317febe7a66218510b6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-01-04T02:23:00.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-04T02:23:00.000Z", "avg_line_length": 27.380952381, "max_line_length": 83, "alphanum_fraction": 0.6156521739, "include": true, "reason": "import numpy", "num_tokens": 299}
|
import argparse
import importlib
import os
import sys
import numpy as np
import torch as th
import yaml
from stable_baselines3.common.utils import set_random_seed
import utils.import_envs # noqa: F401 pylint: disable=unused-import
from utils import ALGOS, create_test_env, get_saved_hyperparams
from utils.exp_manager import ExperimentManager
from utils.utils import StoreDict, get_model_path
def main(): # noqa: C901
parser = argparse.ArgumentParser()
parser.add_argument("--env", help="environment ID", type=str, default="CartPole-v1")
parser.add_argument("-f", "--folder", help="Log folder", type=str, default="rl-trained-agents")
parser.add_argument("--algo", help="RL Algorithm", default="ppo", type=str, required=False, choices=list(ALGOS.keys()))
parser.add_argument("-n", "--n-timesteps", help="number of timesteps", default=1000, type=int)
parser.add_argument("--num-threads", help="Number of threads for PyTorch (-1 to use default)", default=-1, type=int)
parser.add_argument("--n-envs", help="number of environments", default=1, type=int)
parser.add_argument("--exp-id", help="Experiment ID (default: 0: latest, -1: no exp folder)", default=0, type=int)
parser.add_argument("--verbose", help="Verbose mode (0: no output, 1: INFO)", default=1, type=int)
parser.add_argument(
"--no-render", action="store_true", default=False, help="Do not render the environment (useful for tests)"
)
parser.add_argument("--deterministic", action="store_true", default=False, help="Use deterministic actions")
parser.add_argument("--device", help="PyTorch device to be use (ex: cpu, cuda...)", default="auto", type=str)
parser.add_argument(
"--load-best", action="store_true", default=False, help="Load best model instead of last model if available"
)
parser.add_argument(
"--load-checkpoint",
type=int,
help="Load checkpoint instead of last model if available, "
"you must pass the number of timesteps corresponding to it",
)
parser.add_argument(
"--load-last-checkpoint",
action="store_true",
default=False,
help="Load last checkpoint instead of last model if available",
)
parser.add_argument("--stochastic", action="store_true", default=False, help="Use stochastic actions")
parser.add_argument(
"--norm-reward", action="store_true", default=False, help="Normalize reward if applicable (trained with VecNormalize)"
)
parser.add_argument("--seed", help="Random generator seed", type=int, default=0)
parser.add_argument("--reward-log", help="Where to log reward", default="", type=str)
parser.add_argument(
"--gym-packages",
type=str,
nargs="+",
default=[],
help="Additional external Gym environment package modules to import (e.g. gym_minigrid)",
)
parser.add_argument(
"--env-kwargs", type=str, nargs="+", action=StoreDict, help="Optional keyword argument to pass to the env constructor"
)
args = parser.parse_args()
# Going through custom gym packages to let them register in the global registory
for env_module in args.gym_packages:
importlib.import_module(env_module)
env_id = args.env
algo = args.algo
folder = args.folder
_, model_path, log_path = get_model_path(
args.exp_id,
folder,
algo,
env_id,
args.load_best,
args.load_checkpoint,
args.load_last_checkpoint,
)
print(f"Loading {model_path}")
# Off-policy algorithm only support one env for now
off_policy_algos = ["qrdqn", "dqn", "ddpg", "sac", "her", "td3", "tqc"]
if algo in off_policy_algos:
args.n_envs = 1
set_random_seed(args.seed)
if args.num_threads > 0:
if args.verbose > 1:
print(f"Setting torch.num_threads to {args.num_threads}")
th.set_num_threads(args.num_threads)
is_atari = ExperimentManager.is_atari(env_id)
stats_path = os.path.join(log_path, env_id)
hyperparams, stats_path = get_saved_hyperparams(stats_path, norm_reward=args.norm_reward, test_mode=True)
# load env_kwargs if existing
env_kwargs = {}
args_path = os.path.join(log_path, env_id, "args.yml")
if os.path.isfile(args_path):
with open(args_path) as f:
loaded_args = yaml.load(f, Loader=yaml.UnsafeLoader) # pytype: disable=module-attr
if loaded_args["env_kwargs"] is not None:
env_kwargs = loaded_args["env_kwargs"]
# overwrite with command line arguments
if args.env_kwargs is not None:
env_kwargs.update(args.env_kwargs)
log_dir = args.reward_log if args.reward_log != "" else None
env = create_test_env(
env_id,
n_envs=args.n_envs,
stats_path=stats_path,
seed=args.seed,
log_dir=log_dir,
should_render=not args.no_render,
hyperparams=hyperparams,
env_kwargs=env_kwargs,
)
kwargs = dict(seed=args.seed)
if algo in off_policy_algos:
# Dummy buffer size as we don't need memory to enjoy the trained agent
kwargs.update(dict(buffer_size=1))
# Check if we are running python 3.8+
# we need to patch saved model under python 3.6/3.7 to load them
newer_python_version = sys.version_info.major == 3 and sys.version_info.minor >= 8
custom_objects = {}
if newer_python_version:
custom_objects = {
"learning_rate": 0.0,
"lr_schedule": lambda _: 0.0,
"clip_range": lambda _: 0.0,
}
model = ALGOS[algo].load(model_path, env=env, custom_objects=custom_objects, device=args.device, **kwargs)
obs = env.reset()
# Deterministic by default except for atari games
stochastic = args.stochastic or is_atari and not args.deterministic
deterministic = not stochastic
episode_reward = 0.0
episode_rewards, episode_lengths = [], []
ep_len = 0
# For HER, monitor success rate
successes = []
lstm_states = None
episode_start = np.ones((env.num_envs,), dtype=bool)
try:
for _ in range(args.n_timesteps):
action, lstm_states = model.predict(
obs,
state=lstm_states,
episode_start=episode_start,
deterministic=deterministic,
)
obs, reward, done, infos = env.step(action)
episode_start = done
if not args.no_render:
env.render("human")
episode_reward += reward[0]
ep_len += 1
if args.n_envs == 1:
# For atari the return reward is not the atari score
# so we have to get it from the infos dict
if is_atari and infos is not None and args.verbose >= 1:
episode_infos = infos[0].get("episode")
if episode_infos is not None:
print(f"Atari Episode Score: {episode_infos['r']:.2f}")
print("Atari Episode Length", episode_infos["l"])
if done and not is_atari and args.verbose > 0:
# NOTE: for env using VecNormalize, the mean reward
# is a normalized reward when `--norm_reward` flag is passed
print(f"Episode Reward: {episode_reward:.2f}")
print("Episode Length", ep_len)
episode_rewards.append(episode_reward)
episode_lengths.append(ep_len)
episode_reward = 0.0
ep_len = 0
# Reset also when the goal is achieved when using HER
if done and infos[0].get("is_success") is not None:
if args.verbose > 1:
print("Success?", infos[0].get("is_success", False))
if infos[0].get("is_success") is not None:
successes.append(infos[0].get("is_success", False))
episode_reward, ep_len = 0.0, 0
except KeyboardInterrupt:
pass
if args.verbose > 0 and len(successes) > 0:
print(f"Success rate: {100 * np.mean(successes):.2f}%")
if args.verbose > 0 and len(episode_rewards) > 0:
print(f"{len(episode_rewards)} Episodes")
print(f"Mean reward: {np.mean(episode_rewards):.2f} +/- {np.std(episode_rewards):.2f}")
if args.verbose > 0 and len(episode_lengths) > 0:
print(f"Mean episode length: {np.mean(episode_lengths):.2f} +/- {np.std(episode_lengths):.2f}")
env.close()
if __name__ == "__main__":
main()
|
{"hexsha": "d4b672c14fd6ff9e748c50d24a35c9a598c01bbd", "size": 8652, "ext": "py", "lang": "Python", "max_stars_repo_path": "enjoy.py", "max_stars_repo_name": "HumanCompatibleAI/rl-baselines3-zoo", "max_stars_repo_head_hexsha": "9ef5fba185b8f5c8e11823b1e1de1735675ecc32", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "enjoy.py", "max_issues_repo_name": "HumanCompatibleAI/rl-baselines3-zoo", "max_issues_repo_head_hexsha": "9ef5fba185b8f5c8e11823b1e1de1735675ecc32", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "enjoy.py", "max_forks_repo_name": "HumanCompatibleAI/rl-baselines3-zoo", "max_forks_repo_head_hexsha": "9ef5fba185b8f5c8e11823b1e1de1735675ecc32", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.625, "max_line_length": 126, "alphanum_fraction": 0.6295654184, "include": true, "reason": "import numpy", "num_tokens": 2023}
|
import numpy as np
from scipy.special import logsumexp
from scipy.sparse import lil_matrix
import sys
from utils.corpus import CorpusHelper
class HMMPOSTagger:
"""
HMM 词性标注模型,实现模型的定义,训练和预测等功能
HMM 参数:
初始状态概率向量 pi,
状态转移概率矩阵 A,
观测概率矩阵 B
"""
def __init__(self, corpus_helper, eps=None):
"""
param: corpus_helper,语料库辅助类实例,类别CorpusHelper
param: eps, 极小值,用于平滑log计算,类别float
"""
self.corpus_helper = corpus_helper
self.n_tokens = len(corpus_helper.token2id)
self.n_tags = len(corpus_helper.tag2id)
self.pi = np.zeros(self.n_tags, dtype=np.float)
self.A = np.zeros((self.n_tags, self.n_tags), dtype=np.float)
self.B = np.zeros((self.n_tags, self.n_tokens), dtype=np.float)
self.eps = np.finfo(float).eps if eps is None else eps
def train(self):
"""
训练模型,完成语料库的统计工作
"""
print("开始训练")
last_tag_id = None # 记录前一个tag,若其值为None则表明当前为新句开始。
for token_id, tag_id in self.corpus_helper.read_lines2id():
# 无论如何都要更新B的统计
self.B[tag_id, token_id] += 1
if last_tag_id is None:
# 若当前是新句子的开始,需要更新pi
self.pi[tag_id] += 1
else:
# 否则,更新A
self.A[last_tag_id, tag_id] += 1
# 更新上一时刻tag
last_tag_id = None if self.corpus_helper.is_end_tokenid(token_id) else tag_id
# 转化为概率
self.pi = self.pi / np.sum(self.pi)
self.A = self.A / np.sum(self.A, axis=1, keepdims=True)
self.B = self.B / np.sum(self.B, axis=1, keepdims=True)
print("训练结束")
print("pi:{}".format(self.pi))
print("A[0,:]:\n{}".format(self.A[0]))
def _log(self, p):
"""
log 函数,考虑平滑
"""
return np.log(p + self.eps)
def decode(self, sentence):
"""
给定句子,使用Viterbi算法找到最佳词性标注序列
param: sentence, 输入句子, 类型string
return:词性标注序列, 类型list[string]
"""
if not sentence:
print("请输入句子")
return ""
# (这里没有考虑未登录词的情况)
token_ids = [self.corpus_helper.token2id[token] for token in sentence.split(" ")]
n_tags, n_tokens = self.n_tags, len(token_ids)
A, B = self.A, self.B
# 初始化动态规划存储矩阵和记录最佳路径的回溯矩阵
dp = np.zeros((n_tags, n_tokens), dtype=np.float)
traces = np.zeros((n_tags, n_tokens), dtype=np.int)
# 初始化第一个token的位置
for i in range(n_tags):
dp[i, 0] = self._log(self.pi[i]) + self._log(self.B[i, token_ids[0]])
# 动态规划更新第二个token开始的分数
for t in range(1, n_tokens):
token_id = token_ids[t] # 当前token id
for i in range(n_tags):
dp[i, t] = -sys.maxsize # 初始值为系统最小值
for k in range(n_tags):
score = dp[k, t - 1] + self._log(A[k, i]) + self._log(B[i, token_id])
if score > dp[i, t]:
dp[i, t] = score
traces[i, t] = k
# dp中最佳路径的最终tag
last_best_tag = np.argmax(dp[:, -1])
# 回溯最佳路径
decoded = [0] * n_tokens
decoded[-1] = last_best_tag
for t in range(n_tokens - 1, 0, -1):
last_best_tag = traces[last_best_tag, t]
decoded[t - 1] = last_best_tag
pos_tags = self.corpus_helper.id_to_tags(decoded)
return pos_tags
|
{"hexsha": "47a484d22d824abdd85bf8cd4eec4257a415416a", "size": 3447, "ext": "py", "lang": "Python", "max_stars_repo_path": "CodePratices/MachineLearning/pos_tagging_hmm_project/models/hmm.py", "max_stars_repo_name": "Coolgiserz/NLP_starter", "max_stars_repo_head_hexsha": "8f6be48d1971876767a24731b95c729e03dca1c9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "CodePratices/MachineLearning/pos_tagging_hmm_project/models/hmm.py", "max_issues_repo_name": "Coolgiserz/NLP_starter", "max_issues_repo_head_hexsha": "8f6be48d1971876767a24731b95c729e03dca1c9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "CodePratices/MachineLearning/pos_tagging_hmm_project/models/hmm.py", "max_forks_repo_name": "Coolgiserz/NLP_starter", "max_forks_repo_head_hexsha": "8f6be48d1971876767a24731b95c729e03dca1c9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-13T14:40:34.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-13T14:40:34.000Z", "avg_line_length": 29.9739130435, "max_line_length": 89, "alphanum_fraction": 0.544821584, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1134}
|
"""
ABC Class of Simulator which generates dummy data.
"""
from abc import ABC, abstractmethod
from typing import Any, Dict
import numpy as np
__all__ = [
"_Simulator"
]
class _Simulator(ABC):
"""
ABC Class of DoE Simulator
Methods
-------
simulate(exmatrix: np.ndarray) -> np.ndarray
"""
@abstractmethod
def __init__(self, **kwargs: Dict[str, Any]):
"""
Parameters
----------
kwargs: Dict[str, Any]
Parameters of each simulator
"""
pass
@abstractmethod
def simulate(self, exmatrix: np.ndarray) -> np.ndarray:
"""
Generate mock data by experiment matrix
Parameters
----------
exmatrix: np.ndarray
Target experiment matrix (n_experiment x n_factor)
Returns
-------
result matrix: np.ndarray
Simulation result (n_experiment x 1)
"""
pass
|
{"hexsha": "2936e4e455b7c20e9513f80205b0dd211e179129", "size": 954, "ext": "py", "lang": "Python", "max_stars_repo_path": "tagupy/type/_abs_simulator.py", "max_stars_repo_name": "algebra-club/TaguPy", "max_stars_repo_head_hexsha": "1ff5a792f7c78cfb6741cf27659215fef287a1c1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-08-21T07:36:24.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-21T07:36:24.000Z", "max_issues_repo_path": "tagupy/type/_abs_simulator.py", "max_issues_repo_name": "algebra-club/TaguPy", "max_issues_repo_head_hexsha": "1ff5a792f7c78cfb6741cf27659215fef287a1c1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 29, "max_issues_repo_issues_event_min_datetime": "2021-08-15T18:12:58.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-12T14:48:17.000Z", "max_forks_repo_path": "tagupy/type/_abs_simulator.py", "max_forks_repo_name": "algebra-club/TaguPy", "max_forks_repo_head_hexsha": "1ff5a792f7c78cfb6741cf27659215fef287a1c1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.08, "max_line_length": 62, "alphanum_fraction": 0.5545073375, "include": true, "reason": "import numpy", "num_tokens": 202}
|
import os
from collections import defaultdict
from functools import lru_cache
from typing import Callable, Dict, List, Optional, Tuple
import networkx as nx
import numpy as np
import pytket
import pytket.cirq
from pytket.circuit import Node, Qubit
from pytket.passes import SequencePass, RoutingPass, PlacementPass
from pytket.predicates import CompilationUnit, ConnectivityPredicate
from pytket.routing import GraphPlacement
import cirq
import cirq.contrib.routing as ccr
import recirq
def calibration_data_to_graph(calib_dict: Dict) -> nx.Graph:
"""Take the calibration data in dictionary form and return a graph
representing the errors.
The edge weights are two_qubit_sycamore_gate_xeb_cycle_total_error.
The node weights are single_qubit_readout_p0_error
+ single_qubit_readout_p1_error.
"""
err_graph = nx.Graph()
for (q1, q2), err in calib_dict['two_qubit_sycamore_gate_xeb_cycle_total_error'].items():
err_graph.add_edge(q1, q2, weight=err[0])
for (q,), err in calib_dict['single_qubit_readout_p0_error'].items():
err_graph.nodes[q]['weight'] = err[0]
for (q,), err in calib_dict['single_qubit_readout_p1_error'].items():
err_graph.nodes[q]['weight'] += err[0]
return err_graph
def _qubit_index_edges(device):
"""Helper function in `_device_to_tket_device`"""
dev_graph = ccr.xmon_device_to_graph(device)
for n1, n2 in dev_graph.edges:
yield Node('grid', n1.row, n1.col), Node('grid', n2.row, n2.col)
def _device_to_tket_device(device):
"""Custom function to turn a device into a pytket device.
This supports any device that supports `ccr.xmon_device_to_graph`.
"""
arc = pytket.routing.Architecture(
list(_qubit_index_edges(device))
)
return pytket.device.Device({}, {}, arc)
def tk_to_cirq_qubit(tk: Qubit):
"""Convert a tket Qubit to either a LineQubit or GridQubit.
"""
ind = tk.index
return (
cirq.LineQubit(ind[0])
if len(ind) == 1
else cirq.GridQubit(*ind)
)
def place_on_device(circuit: cirq.Circuit,
device: cirq.google.XmonDevice,
) -> Tuple[cirq.Circuit,
Dict[cirq.Qid, cirq.Qid],
Dict[cirq.Qid, cirq.Qid]]:
"""Place a circuit on an device.
Converts a circuit to a new circuit that respects the adjacency of a given
device and is equivalent to the given circuit up to qubit ordering.
Args:
circuit: The circuit to place on a grid.
device: The device to place the circuit on.
Returns:
routed_circuit: The new circuit
initial_map: Initial placement of qubits
final_map: The final placement of qubits after action of the circuit
"""
tk_circuit = pytket.cirq.cirq_to_tk(circuit)
tk_device = _device_to_tket_device(device)
unit = CompilationUnit(tk_circuit, [ConnectivityPredicate(tk_device)])
passes = SequencePass([
PlacementPass(GraphPlacement(tk_device)),
RoutingPass(tk_device)])
passes.apply(unit)
valid = unit.check_all_predicates()
if not valid:
raise RuntimeError("Routing failed")
initial_map = {tk_to_cirq_qubit(n1): tk_to_cirq_qubit(n2)
for n1, n2 in unit.initial_map.items()}
final_map = {tk_to_cirq_qubit(n1): tk_to_cirq_qubit(n2)
for n1, n2 in unit.final_map.items()}
routed_circuit = pytket.cirq.tk_to_cirq(unit.circuit)
return routed_circuit, initial_map, final_map
def path_weight(graph: nx.Graph, path,
include_node_weights=True) -> float:
"""Returns total weight of edges along a path.
Args:
graph: a nx.Graph object with specified edge weights
path: a list of nodes specifying a path on graph
include_node_weights: whether include node weight in
overall path weight for minimization (default=True)
Returns:
total weight of edges along the path
"""
if path is None:
return float('inf')
weight = 0
for i in range(len(path) - 1):
weight += graph[path[i]][path[i + 1]]['weight']
if include_node_weights and 'weight' in graph.nodes[path[0]]:
n = len(path)
for node in path:
weight += graph.nodes[node]['weight'] / n * 2
return weight
def min_weight_simple_paths_brute_force(
graph: nx.Graph,
weight_fun: Callable[[nx.Graph, List], float] = path_weight):
"""Find all simple paths of various lengths that has minimum total weight
using brute-force.
The strategy is look at all possible simple path, calculate their path
weight, keep best one. This works reasonably quickly for sparse graph of
size <= 25 (~10 seconds). Worst case complexity is O(n!) for complete graph
Args:
graph: a networkx.Graph object with specified edge weights
weight_fun: a function that takes (graph, path) and gives a value
based on edge and node weights that we want to minimize
(default: uses path_weight)
Returns:
a dictionary in the form
{n: path containing n nodes with min weight, or None if doesn't exist}
"""
best_weights = defaultdict(lambda: float('inf'))
best_paths = {}
nodelist = list(graph.nodes())
for i in range(len(nodelist) - 1):
for j in range(i + 1, len(nodelist)):
for path in nx.all_simple_paths(graph, nodelist[i], nodelist[j]):
n = len(path)
my_weight = weight_fun(graph, path)
if my_weight < best_weights[n]:
best_paths[n] = path
best_weights[n] = my_weight
return best_paths
def min_weight_simple_path_brute_force(
graph: nx.Graph,
n: int,
weight_fun: Callable[[nx.Graph, List], float] = path_weight):
return min_weight_simple_paths_brute_force(graph, weight_fun).get(n, None)
def join_path(path1, path2):
"""Join two paths, assuming that they share an end.
A path is a list of nodes.
"""
if path1[-1] == path2[0]:
return path1 + path2[1:]
elif path2[-1] == path1[0]:
return path2 + path1[1:]
elif path1[-1] == path2[-1]:
return path1 + path2[1::-1]
elif path1[0] == path2[0]:
return path2[:0:-1] + path1
raise ValueError('Paths cannot be joined as they do not share any ends')
def min_weight_simple_path_greedy(
graph: nx.Graph,
n: int,
weight_fun: Callable[[nx.Graph, List], float] = path_weight):
"""A greedy algorithm that tries to find a simple path consisting of n
nodes in the given graph, with minimal total weight
Args:
graph: a nx.Graph object with specified edge weights
n: desired number of nodes in the simple path
weight_fun: a function that takes (graph, path) and gives a value
based on edge and node weights that we want to minimize
(default: uses path_weight)
Returns:
a list of nodes that describes the path, or None if not found
"""
def _grow_path_lowest_weight(path, partial_graph):
# grow path by one neighboring edge of lowest weight on partial_graph
# assuming partial_graph contains no edge between head and tail of path
adjacent_edges = sorted(list(partial_graph.edges([path[0], path[-1]],
data='weight')), key=lambda e: e[2])
if len(adjacent_edges) == 0:
return path
u, v, _ = adjacent_edges[0]
if path[0] == u or path[-1] == u:
partial_graph.remove_node(u)
else:
partial_graph.remove_node(v)
return join_path(path, [u, v])
edges_sorted = sorted(list(graph.edges.data('weight')), key=lambda e: e[2])
# keep only the first half since they have the lower weights, as it
# seems unnecessary to start the greedy search with high-weight edges
edges_sorted = edges_sorted[:len(edges_sorted) // 2]
best_weight = float('inf')
best_path = None
for e in edges_sorted:
subgraph = graph.copy()
path = [e[0], e[1]] # start with the given edge
subgraph.remove_edge(e[0], e[1])
while len(path) < n:
# tries to grow the path by one neighboring edge of lowest weight
path2 = _grow_path_lowest_weight(path, subgraph)
if path2 == path: # can't grow anymore
break
else:
path = path2
if subgraph.has_edge(path[0], path[-1]):
subgraph.remove_edge(path[0], path[-1])
my_weight = weight_fun(graph, path)
if len(path) == n and my_weight < best_weight:
best_path = path
best_weight = my_weight
return best_path
def make_simple_path(graph: nx.Graph, n: int):
"""Make a simple path on a graph by starting with lowest degree node
and adding nodes with low degree.
For a given graph object, this is deterministic.
Args:
graph: a nx.Graph object
n: desired number of nodes in the simple path
Returns:
a list of nodes that describes the path, or None if no such path exists
"""
subgraph = graph.copy()
path = n * [None]
degree_dict = dict(subgraph.degree())
path[0] = min(degree_dict, key=degree_dict.get)
len_so_far = 1
while len_so_far < n:
neighbors = sorted([(v, dv) for v, dv
in subgraph.degree(subgraph.neighbors(path[len_so_far - 1]))],
key=lambda x: x[1])
if len(neighbors) == 0:
return None # dead end, can't add any more nodes
next_node = neighbors[0][0]
for i in range(len(neighbors)):
if neighbors[i][1] >= 2: # if there's a neighbor with degree 2
next_node = neighbors[i][0] # change the next node to that
break
path[len_so_far] = next_node
subgraph.remove_node(path[len_so_far - 1])
len_so_far += 1
return path
def random_simple_path(graph: nx.Graph,
n: int,
num_tries=10):
"""Tries to find a simple path by growing it from a random edge.
Will use make_simple_path if it fails.
Args:
graph: a networkx.Graph object with specified edge weights
n: desired number of nodes in the simple path
num_tries: number of tries before resorting to make_simple_path
Returns:
a list of nodes that describes the path, or None if no such path exists
"""
# first try the deterministic algorithm to see if a simple path of n nodes
# even exists
default_path = make_simple_path(graph, n)
if default_path is None:
return None
def _grow_simple_path(path, subgraph):
# grow path by adding one neighboring edge
adjacent_edges = list(subgraph.edges([path[0], path[-1]]))
if len(adjacent_edges) == 0: # no neighboring edge
return path
# randomly choose a neighboring edge
u, v = adjacent_edges[np.random.randint(len(adjacent_edges))]
if path[0] == u or path[-1] == u:
subgraph.remove_node(u)
else:
subgraph.remove_node(v)
return join_path(path, [u, v])
edges = list(graph.edges)
for _ in range(num_tries):
u, v = edges[np.random.randint(len(edges))]
path = [u, v]
subgraph = graph.copy()
subgraph.remove_edge(u, v)
while len(path) < n:
new_path = _grow_simple_path(path, subgraph)
if new_path == path:
break
path = new_path
if subgraph.has_edge(path[0], path[-1]):
subgraph.remove_edge(path[0], path[-1])
if len(path) == n:
return path
# all tries have failed
return default_path
class Snake:
"""A Snake is a simple path that can wiggle, slither, or reassemble.
Used in simulated annealing algorithm for finding simple paths with minimum
total weight.
"""
def __init__(self,
graph: nx.Graph,
path: List,
weight_fun: Callable[[nx.Graph, List], float] = path_weight):
if not nx.is_simple_path(graph, path):
raise ValueError("Invalid input: path must be simple")
self.graph = graph
self.path = path
self.weight_fun = weight_fun
def wiggle(self):
"""Randomly change a node on the path to next-nearest neighbor,
and returns a new Snake if the new path is a simple path.
For example, on a grid graph, wiggling means:
+--+-- +--
| => |
--+ --+--+
"""
node_index_list = list(range(len(self.path)))
np.random.shuffle(node_index_list)
for index in node_index_list:
node = self.path[index]
neighbors = nx.single_source_shortest_path_length(self.graph,
node, cutoff=2)
for q in neighbors:
# check if changing path[index] to q still gives a simple path
if (q not in self.path
and neighbors[q] == 2
and (index == 0 or self.graph.has_edge(q, self.path[index - 1]))
and (index == len(self.path) - 1
or self.graph.has_edge(q, self.path[index + 1]))
):
new_path = self.path.copy()
new_path[index] = q
return Snake(self.graph, new_path, self.weight_fun)
return self # wasn't able to wiggle
def slither(self, head=True):
"""Tries to move forward to a neighboring node.
Set head=False to move backwards."""
if not head:
self.path.reverse()
head_neighbors = list(self.graph.neighbors(self.path[0]))
np.random.shuffle(head_neighbors)
for q in head_neighbors:
if q not in self.path or q == self.path[-1]:
test_path = self.path.copy()
test_path[1:] = test_path[:-1]
test_path[0] = q
return Snake(self.graph, test_path, self.weight_fun)
if not head:
# reverse back so you can try other moves in same orientation
self.path.reverse()
return self # wasn't able to slither
def reassemble(self, head=True):
"""If the head is near a part of the main body, break the path
and reassemble to get a new head.
Set head=False to reassemble at the tail.
For example, on a grid graph, reassembling means:
+--+--+ +--+--+
| | => |
--+ +--+ --+--+--+
"""
if not head:
self.path.reverse()
head_neighbors = list(self.graph.neighbors(self.path[0]))
np.random.shuffle(head_neighbors)
for q in head_neighbors:
if q != self.path[1] and q in self.path:
new_path = self.path.copy()
q_index = new_path.index(q)
new_path[:q_index] = reversed(new_path[:q_index])
return Snake(self.graph, new_path, self.weight_fun)
if not head:
# reverse back so you can try other moves in same orientation
self.path.reverse()
return self # wasn't able to reassemble
def random_move(self):
"""Randomly wiggle, slither, or reassemble."""
coin = np.random.randint(3)
if coin == 0:
return self.wiggle()
elif coin == 1:
return self.slither(head=np.random.choice(True, False))
else:
return self.reassemble(head=np.random.choice(True, False))
def force_random_move(self):
"""Force a random move among wiggle, slither forward or backward,
or reassemble at head or tail.
Returns self only if there is no move possible.
"""
moves = [self.wiggle,
self.slither,
self.reassemble,
lambda: self.slither(head=False),
lambda: self.reassemble(head=False)]
np.random.shuffle(moves)
for mv in moves:
new_snake = mv()
if new_snake is not self:
return new_snake
return self # wasn't able to make any moves
def total_weight(self):
"""Returns the total weight of path"""
return self.weight_fun(self.graph, self.path)
def to_path(self):
return self.path
def min_weight_simple_path_anneal(
graph: nx.Graph,
n: int,
start_path=None,
anneal_schedule: Optional[np.array] = None,
weight_fun: Callable[[nx.Graph, List], float] = path_weight):
"""A simulated-annealing algorithm for finding a simple path consisting of
n nodes in the given graph, with minimal total weight
Args:
graph: a networkx.Graph object with specified edge weights
n: desired number of nodes in the simple path
start_path: a path serving as starting point of simulated annealing
(default is None, in which case uses random_simple_path)
anneal_schedule: an array of temperature values used for annealing.
The number of elements in the array is the number of steps.
(default is None, in which case uses a linear schedule)
Returns:
a list of nodes that describes the path,
or None if no simple path is found
"""
if n < 2:
raise ValueError('n needs to be >= 2')
if start_path is None:
if start_path is None:
start_path = random_simple_path(graph, n)
if start_path is None:
return None
my_snake = Snake(graph, start_path, weight_fun)
else:
my_snake = Snake(graph, start_path, weight_fun)
my_E = my_snake.total_weight()
best_E = my_E
best_snake = my_snake
if anneal_schedule is None:
weights = [w for u, v, w in graph.edges(data='weight')]
T_max = max(weights) * 2
T_min = min(weights) / 2
anneal_schedule = np.linspace(T_max, T_min, 3 * n * len(graph))
for T in anneal_schedule:
new_snake = my_snake.force_random_move()
if new_snake is my_snake:
# stuck as no move is possible
break
new_E = new_snake.total_weight()
if new_E <= best_E:
best_snake = new_snake
best_E = new_E
if np.exp(-(new_E - my_E) / T) >= np.random.rand():
my_snake = new_snake
my_E = new_E
return best_snake.to_path()
def min_weight_simple_paths_mst(
graph: nx.Graph,
weight_fun: Callable[[nx.Graph, List], float] = path_weight):
"""A heuristic algorithm to find minimal weight simple paths by
constructing the minimum spanning tree (MST).
This works best to for simple paths of short lengths.
"""
mst = nx.minimum_spanning_tree(graph)
path_of_node_pairs = dict(nx.all_pairs_shortest_path(mst))
best_paths = {}
for u in path_of_node_pairs:
for v in path_of_node_pairs[u]:
n = len(path_of_node_pairs[u][v])
if n < 2:
continue
if (n not in best_paths
or weight_fun(mst, best_paths[n])
> weight_fun(mst, path_of_node_pairs[u][v])):
best_paths[n] = path_of_node_pairs[u][v]
return best_paths
def min_weight_simple_path_mst(graph: nx.Graph, n: int,
weight_func: Callable[[nx.Graph, List], float] = path_weight):
return min_weight_simple_paths_mst(graph, weight_func).get(n, None)
def min_weight_simple_path_mixed_strategy(
graph: nx.Graph,
n: int,
num_restarts=10,
weight_fun: Callable[[nx.Graph, List], float] = path_weight):
"""Find a simple path of minimal weight on a graph using a mixed strategy.
We use the better of MST-based and greedy algorithm to generate a good
starting point, and then further optimize using simulated annealing
with restarts.
Args:
graph: a networkx.Graph object with specified edge weights
n: desired number of nodes in the simple path
num_restarts: number of restarts in simulated annealing
Returns:
a list of nodes that describes the best simple path found,
or None if no simple path is found
"""
paths_mst = min_weight_simple_paths_mst(graph)
start_path = paths_mst.get(n, None)
path_greedy = min_weight_simple_path_greedy(graph, n)
if weight_fun(graph, path_greedy) < weight_fun(graph, start_path):
start_path = path_greedy
best_path = start_path
for _ in range(num_restarts):
path = min_weight_simple_path_anneal(graph, n, start_path=start_path)
if weight_fun(graph, path) < weight_fun(graph, best_path):
best_path = path
return best_path
@lru_cache()
def _get_device_calibration(device_name: str):
"""Get device calibration. Use an LRU cache to avoid repeated calls to
the web interface. It's possible this is not what you want.
TODO: move to recirq.engine_utils.
"""
processor_id = recirq.get_processor_id_by_device_name(device_name)
if processor_id is None:
# TODO: https://github.com/quantumlib/ReCirq/issues/14
device_obj = recirq.get_device_obj_by_name(device_name)
dummy_graph = ccr.gridqubits_to_graph_device(device_obj.qubits)
nx.set_edge_attributes(dummy_graph, name='weight', values=0.01)
return dummy_graph
engine = cirq.google.Engine(project_id=os.environ['GOOGLE_CLOUD_PROJECT'])
calibration = engine.get_latest_calibration(processor_id)
err_graph = calibration_data_to_graph(calibration)
return err_graph
PLACEMENT_STRATEGIES = {
'brute_force': min_weight_simple_path_brute_force,
'random': random_simple_path,
'greedy': min_weight_simple_path_greedy,
'anneal': min_weight_simple_path_anneal,
'mst': min_weight_simple_path_mst,
'mixed': min_weight_simple_path_mixed_strategy,
}
def place_line_on_device(
device_name: str,
n: int,
line_placement_strategy: str,
err_graph=None,
) -> List[cirq.GridQubit]:
if line_placement_strategy not in PLACEMENT_STRATEGIES.keys():
raise ValueError(f"Unknown line placement strategy {line_placement_strategy}")
if err_graph is None:
err_graph = _get_device_calibration(device_name)
return PLACEMENT_STRATEGIES[line_placement_strategy](err_graph, n)
|
{"hexsha": "76f39b090f9b3b310051af51f99884b3941754d3", "size": 22863, "ext": "py", "lang": "Python", "max_stars_repo_path": "recirq/qaoa/placement.py", "max_stars_repo_name": "PawelPamula/ReCirq", "max_stars_repo_head_hexsha": "79a351310cd98f67524a9df0c4ef9f300bf9eea4", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "recirq/qaoa/placement.py", "max_issues_repo_name": "PawelPamula/ReCirq", "max_issues_repo_head_hexsha": "79a351310cd98f67524a9df0c4ef9f300bf9eea4", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "recirq/qaoa/placement.py", "max_forks_repo_name": "PawelPamula/ReCirq", "max_forks_repo_head_hexsha": "79a351310cd98f67524a9df0c4ef9f300bf9eea4", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.6934749621, "max_line_length": 93, "alphanum_fraction": 0.6187289507, "include": true, "reason": "import numpy,import networkx", "num_tokens": 5338}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Create a plot of the Roche potential for a simple binary system.
"""
import numpy as np
from constants import PI, MSOL, G
from matplotlib import pyplot as plt
def calc_orbital_separation(
period: float, m_star: float, m_seco: float
) -> float:
"""
Calculate the oribtal separation a for a binary system given the orbital
period and masses - uses Kepler's law.
The period should be given in hours.
Parameters
----------
period: float
The period of the binary system
m_star: float
The mass of the primary (most compact) object in the system
m_seco: float
The mass of the secondary object in the system
Returns
-------
t: float
The orbitical separation of the binary system in m.
"""
period *= 3600
tmp = period ** 2 * (m_star + m_seco) / (16 * PI ** 2)
return tmp ** (1 / 3)
def roche_potential(
m1: float, m2: float, a: float, omega: float, x: np.ndarray, xc: float, y: np.ndarray, z: np.ndarray
) -> np.ndarray:
"""
Calculate the Roche potential.
Parameters
----------
m1: float
The mass of primary object in the system.
m2: float
The mass of the secondary object in the system.
a: float
The orbital separation of the system.
omega: float
The
x: np.array[float]
The x-range to calculate the Roche potential over.
xc: float
Some value
y: np.array[float]
The y-range to calculate the Roche potential over.
z: np.array[float]
The z-range to calculate the Roche potential over.
Returns
-------
phi: np.array[float]
The Roche potential at a bunch of points r (x**2+y**2+z**2) etc
"""
r1 = x ** 2 + y ** 2 + z ** 2
r2 = (x - a) ** 2 + y ** 2 + z ** 2
phi_1 = - G * m1 / r1
phi_2 = - G * m2 / r2
phi_3 = - 0.5 * omega ** 2 * ((x - xc) ** 2 + y ** 2)
phi = phi_1 + phi_2 + phi_3
return phi
def dimensionless_roche_potential(
r1: float, r2: float, q: float, x: float, y: foat
) -> float:
"""
Calculate the shape of the Roche potential independent of the size of the
binary system.
"""
phi_1 = 2 / (1 + q) / r1
phi_2 = 2 * q / (1 + q) / r2
phi_3 = (x + q / (1 + q)) ** 2 - y ** 2
phi = phi_1 + phi_2 + phi_3
return phi
def main():
"""
Main function for plotting Roche potential as a function of orbial
separation.
"""
# Standard CV
period = 5.57
m_star = 0.80 * MSOL
m_seco = 0.60 * MSOL
# TDE
# period = 131
# m_star = 3e7 * MSOL
# m_seco = 1.6 * MSOL
# Calculate some system parameters
q = m_star / m_seco
a = calc_orbital_separation(period, m_star, m_seco)
omega = G * (m_star + m_seco) / a ** 3
xc = a * m_seco / (m_star + m_seco)
# Now we can calculate the Roche potential for a given xrange
l_xlim, u_xlim = 3 * np.array([-a, a])
x_range = np.linspace(l_xlim, u_xlim, 500)
y_range = 0
z_range = 0
phi = roche_potential(m_star, m_seco, a, omega, x_range, xc, y_range, z_range)
plt.plot(x_range / a, phi)
# plt.axhline(np.max(phi), 0, 1, linestyle="--", linewidth=1, color="k")
plt.xlabel(r"$x / a$")
plt.ylabel(r"Roche Potential $\Phi (x)$")
plt.ylim(-3, 0)
plt.xlim(l_xlim / a, u_xlim / a)
plt.show()
return
if __name__ == "__main__":
main()
|
{"hexsha": "713c89e450b0bf4f66c1edd1c695b16dc8d3c2a4", "size": 3429, "ext": "py", "lang": "Python", "max_stars_repo_path": "roche_potential.py", "max_stars_repo_name": "saultyevil/Python-scripts", "max_stars_repo_head_hexsha": "38eaf3c448a509b3751409cae22ecd553cdd2bfc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-05-04T01:04:24.000Z", "max_stars_repo_stars_event_max_datetime": "2019-05-04T13:27:30.000Z", "max_issues_repo_path": "roche_potential.py", "max_issues_repo_name": "saultyevil/Python-scripts", "max_issues_repo_head_hexsha": "38eaf3c448a509b3751409cae22ecd553cdd2bfc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "roche_potential.py", "max_forks_repo_name": "saultyevil/Python-scripts", "max_forks_repo_head_hexsha": "38eaf3c448a509b3751409cae22ecd553cdd2bfc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.979020979, "max_line_length": 104, "alphanum_fraction": 0.5855934675, "include": true, "reason": "import numpy", "num_tokens": 1083}
|
function cond = condition_linpack ( n, a )
%*****************************************************************************80
%
%% CONDITION_LINPACK estimates the L1 condition number of a matrix.
%
% Discussion:
%
% The R8GE storage format is used for a general M by N matrix. A storage
% space is made for each logical entry. The two dimensional logical
% array is mapped to a vector, in which storage is by columns.
%
% For the system A * X = B, relative perturbations in A and B
% of size EPSILON may cause relative perturbations in X of size
% EPSILON*RCOND.
%
% Licensing:
%
% This code is distributed under the GNU LGPL license.
%
% Modified:
%
% 21 March 2004
%
% Author:
%
% Original FORTRAN77 version by Dongarra, Bunch, Moler, Stewart.
% MATLAB version by John Burkardt.
%
% Reference:
%
% Dongarra, Bunch, Moler, Stewart,
% LINPACK User's Guide,
% SIAM, 1979
%
% Parameters:
%
% Input, integer N, the order of the matrix A.
%
% Input, real A(N,N), a matrix to be factored.
%
% Output, real COND, an estimate of the condition number of A.
%
%
% Compute the L1 norm of A.
%
anorm = norm ( a, 1 );
%
% Compute the LU factorization.
%
[ a_lu, pivot, info ] = r8ge_fa ( n, a );
%
% COND = norm(A) * (estimate of norm(inverse(A)))
%
% estimate of norm(inverse(A)) = norm(Z) / norm(Y)
%
% where
% A * Z = Y
% and
% A' * Y = E
%
% The components of E are chosen to cause maximum local growth in the
% elements of W, where U'*W = E. The vectors are frequently rescaled
% to avoid overflow.
%
% Solve U' * W = E.
%
ek = 1.0;
z(1:n,1) = 0.0;
for k = 1 : n
if ( z(k,1) ~= 0.0 )
ek = - r8_sign ( z(k,1) ) * abs ( ek );
end
if ( abs ( a_lu(k,k) ) < abs ( ek - z(k,1) ) )
s = abs ( a_lu(k,k) ) / abs ( ek - z(k,1) );
z(1:n,1) = s * z(1:n,1);
ek = s * ek;
end
wk = ek - z(k,1);
wkm = -ek - z(k,1);
s = abs ( wk );
sm = abs ( wkm );
if ( a_lu(k,k) ~= 0.0 )
wk = wk / a_lu(k,k);
wkm = wkm / a_lu(k,k);
else
wk = 1.0;
wkm = 1.0;
end
if ( k + 1 <= n )
for j = k + 1 : n
sm = sm + abs ( z(j,1) + wkm * a_lu(k,j) );
z(j,1) = z(j,1) + wk * a_lu(k,j);
s = s + abs ( z(j,1) );
end
if ( s < sm )
t = wkm - wk;
wk = wkm;
z(k+1:n,1) = z(k+1:n,1) + t * a_lu(k,k+1:n)';
end
end
z(k,1) = wk;
end
t = sum ( abs ( z(1:n,1) ) );
z(1:n,1) = z(1:n,1) / t;
%
% Solve L' * Y = W
%
for k = n : -1 : 1
z(k,1) = z(k,1) + a_lu(k+1:n,k)' * z(k+1:n,1);
t = abs ( z(k,1) );
if ( 1.0 < t )
z(1:n,1) = z(1:n,1) / t;
end
l = pivot(k);
t = z(l,1);
z(l,1) = z(k,1);
z(k,1) = t;
end
z(1:n,1) = z(1:n,1) / sum ( abs ( z(1:n,1) ) );
ynorm = 1.0;
%
% Solve L * V = Y.
%
for k = 1 : n
l = pivot(k);
t = z(l,1);
z(l,1) = z(k,1);
z(k,1) = t;
z(k+1:n,1) = z(k+1:n,1) + t * a_lu(k+1:n,k);
if ( 1.0 < abs ( z(k,1) ) )
ynorm = ynorm / abs ( z(k,1) );
z(1:n) = z(1:n) / abs ( z(k,1) );
end
end
s = sum ( abs ( z(1:n,1) ) );
z(1:n,1) = z(1:n,1) / s;
ynorm = ynorm / s;
%
% Solve U * Z = V.
%
for k = n : -1 : 1
if ( abs ( a_lu(k,k) ) < abs ( z(k,1) ) )
s = abs ( a_lu(k,k) ) / abs ( z(k,1) );
z(1:n,1) = s * z(1:n,1);
ynorm = s * ynorm;
end
if ( a_lu(k,k) ~= 0.0 )
z(k,1) = z(k,1) / a_lu(k,k);
else
z(k,1) = 1.0;
end
z(1:k-1,1) = z(1:k-1,1) - a_lu(1:k-1,k) * z(k,1);
end
%
% Normalize Z in the L1 norm.
%
s = 1.0 / sum ( abs ( z(1:n,1) ) );
z(1:n,1) = s * z(1:n,1);
ynorm = s * ynorm;
cond = anorm / ynorm;
return
end
|
{"author": "johannesgerer", "repo": "jburkardt-m", "sha": "1726deb4a34dd08a49c26359d44ef47253f006c1", "save_path": "github-repos/MATLAB/johannesgerer-jburkardt-m", "path": "github-repos/MATLAB/johannesgerer-jburkardt-m/jburkardt-m-1726deb4a34dd08a49c26359d44ef47253f006c1/condition/condition_linpack.m"}
|
# coding=utf-8
# Author: Rafael Menelau Oliveira e Cruz <rafaelmenelau@gmail.com>
#
# License: BSD 3 clause
import numpy as np
from sklearn.metrics import check_scoring
from sklearn.utils.validation import check_X_y, check_is_fitted, check_array
from .base import BaseStaticEnsemble
class SingleBest(BaseStaticEnsemble):
"""Classification method that selects the classifier in the pool with
highest score to be used for classification. Usually, the performance of
the single best classifier is estimated based on the validation data.
Parameters
----------
pool_classifiers : list of classifiers (Default = None)
The generated_pool of classifiers trained for the corresponding
classification problem. Each base classifiers should support the method
"predict". If None, then the pool of classifiers is a bagging
classifier.
scoring : string, callable (default = None)
A single string or a callable to evaluate the predictions on the
validation set.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
n_jobs : int, default=-1
The number of parallel jobs to run. None means 1 unless in
a joblib.parallel_backend context. -1 means using all processors.
Doesn’t affect fit method.
References
----------
Britto, Alceu S., Robert Sabourin, and Luiz ES Oliveira. "Dynamic selection
of classifiers—a comprehensive review."
Pattern Recognition 47.11 (2014): 3665-3680.
Kuncheva, Ludmila I. Combining pattern classifiers: methods and algorithms.
John Wiley & Sons, 2004.
R. M. O. Cruz, R. Sabourin, and G. D. Cavalcanti, “Dynamic classifier
selection: Recent advances and perspectives,”
Information Fusion, vol. 41, pp. 195 – 216, 2018.
"""
def __init__(self, pool_classifiers=None, scoring=None,
random_state=None, n_jobs=-1):
super(SingleBest, self).__init__(pool_classifiers=pool_classifiers,
random_state=random_state,
n_jobs=n_jobs)
self.scoring = scoring
def fit(self, X, y):
"""Fit the model by selecting the base classifier with the highest
accuracy in the dataset. The single best classifier is kept in
self.best_clf and its index is kept in self.best_clf_index.
Parameters
----------
X : array of shape (n_samples, n_features)
Data used to fit the model.
y : array of shape (n_samples)
class labels of each example in X.
"""
X, y = check_X_y(X, y)
super(SingleBest, self).fit(X, y)
if not self.base_already_encoded_:
y_encoded = y
else:
y_encoded = self.enc_.transform(y)
performances = self._estimate_performances(X, y_encoded)
self.best_clf_index_ = np.argmax(performances)
self.best_clf_ = self.pool_classifiers_[self.best_clf_index_]
return self
def _estimate_performances(self, X, y):
performances = np.zeros(self.n_classifiers_)
for idx, clf in enumerate(self.pool_classifiers_):
scorer = check_scoring(clf, self.scoring)
performances[idx] = scorer(clf, X, y)
return performances
def predict(self, X):
"""Predict the label of each sample in X and returns the predicted
label.
Parameters
----------
X : array of shape (n_samples, n_features)
The data to be classified
Returns
-------
predicted_labels : array of shape (n_samples)
Predicted class for each sample in X.
"""
X = check_array(X)
self._check_is_fitted()
predicted_labels = self._encode_base_labels(self.best_clf_.predict(X))
return self.classes_.take(predicted_labels.astype(np.int))
def predict_proba(self, X):
"""Estimates the posterior probabilities for each class for each sample
in X. The returned probability estimates for all classes are ordered by
the label of classes.
Parameters
----------
X : array of shape (n_samples, n_features)
The data to be classified
Returns
-------
predicted_proba : array of shape (n_samples, n_classes)
Posterior probabilities estimates for each class.
"""
self._check_is_fitted()
if "predict_proba" not in dir(self.best_clf_):
raise ValueError(
"Base classifier must support the predict_proba function.")
predicted_proba = self.best_clf_.predict_proba(X)
return predicted_proba
def _check_is_fitted(self):
"""Verify if the estimator algorithm was fitted. Raises an error if it
is not fitted.
"""
check_is_fitted(self, "best_clf_")
|
{"hexsha": "0c8b13a401d82a960ccafaa107b34d33336e9092", "size": 5201, "ext": "py", "lang": "Python", "max_stars_repo_path": "deslib/static/single_best.py", "max_stars_repo_name": "walbermr/DESlib", "max_stars_repo_head_hexsha": "6ae0f5a8f837f1843ccf0a48ae89e911108aa3d2", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "deslib/static/single_best.py", "max_issues_repo_name": "walbermr/DESlib", "max_issues_repo_head_hexsha": "6ae0f5a8f837f1843ccf0a48ae89e911108aa3d2", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "deslib/static/single_best.py", "max_forks_repo_name": "walbermr/DESlib", "max_forks_repo_head_hexsha": "6ae0f5a8f837f1843ccf0a48ae89e911108aa3d2", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.1418918919, "max_line_length": 79, "alphanum_fraction": 0.6441069025, "include": true, "reason": "import numpy", "num_tokens": 1126}
|
import csv
import math
import os
import pickle
import numpy as np
import tensorflow as tf
from scipy import misc
import facenet
vgg_num_image_per_folder = 1 # -1 means all
vgg_gender_meta_file_path = "../datasets/VGGFace2/meta/identity_meta.csv"
vgg_age_meta_file_path = "../datasets/VGGFace2/meta/test_agetemp_imglist.txt"
vgg_images_path = "../datasets/VGGFace2/train_mtcnnpy_182"
lfw_meta_female_file_path = "../datasets/lfw/gender/female_names.txt"
lfw_meta_male_file_path = "../datasets/lfw/gender/male_names.txt"
lfw_images_path = "../datasets/lfw/lfw_mtcnnpy_160"
data_save_path = "../datasets/VGGFace2/gender/saved_data"
# inception resnet v2 trained with vggface2, acc: 0.992, validation: 0.958
feature_extraction_model = "../models/facenet/20180324-080308"
batch_size = 100
image_size = 160
do_flip = True
image_paths_m = []
image_paths_f = []
def build_id2gender_map():
result = {}
# empty newline is to fix Windows bug
with open(vgg_gender_meta_file_path, "r", newline="") as file:
reader = csv.reader(file)
# skip header
next(reader)
for line in reader:
id, _, _, _, gender = line
result[id] = gender.strip()
return result
def load_data(img_paths, flip, img_size, do_prewhiten=True):
m = len(img_paths)
imgs = np.zeros((m, img_size, img_size, 3))
for i in range(m):
img = misc.imread(img_paths[i])
if img.ndim == 2:
img = facenet.to_rgb(img)
if do_prewhiten:
img = facenet.prewhiten(img)
img = facenet.crop(img, False, image_size)
if flip:
img = np.fliplr(img)
imgs[i, :, :, :] = img
return imgs
id2gender = build_id2gender_map()
# each element is a map:
# image_path: image full path
# gender: 'm', 'f'
# age_young: True, False
data = []
image_paths = []
with open(vgg_age_meta_file_path, "r", newline="") as file:
lines = file.readlines()
id = None
gender = None
age_young = False
for i, line in enumerate(lines):
if i % 20 == 0:
id = line.split("/")[0]
gender = id2gender[id]
if i % 10 == 0:
age_young = not age_young
image_path = os.path.abspath(os.path.join(vgg_images_path, line.strip()))
image_paths.append(image_path)
data.append({"image_path": image_path, "gender": gender, "age_young": age_young})
with tf.Graph().as_default():
with tf.Session() as sess:
# Load the model
print('Loading feature extraction model')
facenet.load_model(feature_extraction_model)
# Get input and output tensors
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
embedding_size = embeddings.get_shape()[1]
# Run forward pass to calculate embeddings
print('Calculating features for images')
nrof_images = len(image_paths)
nrof_batches_per_epoch = int(math.ceil(1.0 * nrof_images / batch_size))
emb_array = np.zeros((nrof_images, embedding_size))
for i in range(nrof_batches_per_epoch):
start_index = i * batch_size
end_index = min((i + 1) * batch_size, nrof_images)
paths_batch = image_paths[start_index:end_index]
images = load_data(paths_batch, do_flip, image_size)
feed_dict = {images_placeholder: images, phase_train_placeholder: False}
emb_array[start_index:end_index, :] = sess.run(embeddings, feed_dict=feed_dict)
for i, d in enumerate(data):
d["embedding"] = emb_array[i]
# Saving data array
data_save_path_exp = os.path.expanduser(data_save_path)
if do_flip:
data_save_path_exp += "_flip"
# data is a list with length 2000
# elements are {
# 'image_path': str
# 'gender': 'f'/'m'
# 'age_young': bool
# 'embedding': ndarray with shape (128,) dtype float64
# }
with open(data_save_path_exp, 'wb') as outfile:
pickle.dump(data, outfile)
print('Saved data to file "%s"' % data_save_path_exp)
|
{"hexsha": "4e0f2eb1531d953f7bcaf5de3eb98ee68cace704", "size": 4174, "ext": "py", "lang": "Python", "max_stars_repo_path": "contributed/vggface2_gender_age_dataset_builder.py", "max_stars_repo_name": "helloyide/facenet", "max_stars_repo_head_hexsha": "e3f096afe122c54b3a3ff5720b6dfb1c581149b7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "contributed/vggface2_gender_age_dataset_builder.py", "max_issues_repo_name": "helloyide/facenet", "max_issues_repo_head_hexsha": "e3f096afe122c54b3a3ff5720b6dfb1c581149b7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "contributed/vggface2_gender_age_dataset_builder.py", "max_forks_repo_name": "helloyide/facenet", "max_forks_repo_head_hexsha": "e3f096afe122c54b3a3ff5720b6dfb1c581149b7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.3565891473, "max_line_length": 92, "alphanum_fraction": 0.6674652611, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1082}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 5 12:18:36 2020
@author: alex
###############################################################################
# KalmanFilter.py
#
# Revision: 1.02
# Date: 8/17/2020
# Author: Alex
#
# Purpose: Implement a general-purpose Kalman filter class.
#
# KalmanFilter class:
# 1. All inputs to the class must be 2D Numpy arrays. Even scalars must be
# represented as 2D Numpy arrays of shape (1, 1).
# 2. See the __init__ function for a full description of inputs.
#
# Functions:
# 1. constant_voltage_example -- A test function to verify KalmanFilter class
#
# Notes:
# 1. Notation based on professors Greg Welch and Gary Bishop's course pack on
# Kalman filters from the University of North Carolina at Chapel Hill.
# 2. The terminology is based on that used in Alex Becker's tutorial located at
# www.kalmanfilters.net.
# 3. Running this Python file directly will plot a simple example of a Kalman
# filter used to estimate a constant voltage.
#
##############################################################################
"""
import numpy as np
from matplotlib import pyplot as plt
class KalmanFilter:
"""Implements a general-purpose Kalman filter."""
def __init__(self, **kwargs):
"""Initialize Kalman filter parameters and verify matrix dimensions."""
# Initial values (time step = 0) of parameters
self.A = kwargs['A'] # State transition matrix, size (nxn)
self.B = kwargs['B'] # Control-input model matrix, size (nxj)
self.H = kwargs['H'] # Observation model matrix, size (mxn)
self.P = kwargs['P'] # Estimate error covariance matrix, size (nxn)
self.Q = kwargs['Q'] # Process noise covariance matrix, size (nxn)
self.R = kwargs['R'] # Measurement noise covariance matrix, size (mxm)
self.u = kwargs['u'] # Control variable vector, size (jx1)
self.x = kwargs['x'] # Estimated state vector, size (nx1)
# Derived parameters
self.j = self.u.shape[0] # Number of control dimensions
self.m = self.H.shape[0] # Number of measurement dimensions
self.n = self.x.shape[0] # Number of state dimensions
j, m, n = self.j, self.m, self.n
self.Gain = np.zeros((n,m)) # Kalman gain, size (nxm)
self.IM = np.eye(n) # Identity matrix, size (nxn)
# Check dimensions of parameters
assert self.A.shape == (n,n), "A dimensions must be {}".format((n,n))
assert self.B.shape == (n,j), "B dimensions must be {}".format((n,j))
assert self.H.shape == (m,n), "H dimensions must be {}".format((m,n))
assert self.P.shape == (n,n), "P dimensions must be {}".format((n,n))
assert self.Q.shape == (n,n), "Q dimensions must be {}".format((n,n))
assert self.R.shape == (m,m), "R dimensions must be {}".format((m,m))
assert self.u.shape == (j,1), "u dimensions must be {}".format((j,1))
assert self.x.shape == (n,1), "x dimensions must be {}".format((n,1))
def predict(self):
"""Time update (Predict)"""
# State extrapolation (a priori state estimate)
self.x = self.A @ self.x + self.B @ self.u
# Estimate error covariance extrapolation (a priori estimate error)
self.P = self.A @ self.P @ self.A.T + self.Q
def correct(self, z, Q=None, R=None, u=None):
"""Measurement Update (Correct)"""
if Q is None: Q = self.Q
if R is None: R = self.R
if u is None: u = self.u
m = self.m
assert z.shape == (m, 1), "z dimensions must be {}".format((m,1))
# Compute Kalman gain
_ = self.H @ self.P @ self.H.T + R
self.Gain = self.P @ self.H.T @ np.linalg.inv(_)
# Update state estimate with measurement (a posteriori state estimate)
self.x = self.x + self.Gain @ (z - self.H @ self.x)
# Update the estimate error covariance (a posteriori estimate error)
self.P = (self.IM - (self.Gain @ self.H)) @ self.P
# Update any changes made to measurement/process noise or control inputs
self.Q, self.R, self.u = Q, R, u
return self.x, self.P
def constant_voltage_example():
"""Visualize and validate the Kalman Filter class with a simple estimate
of a constant voltage. Example from section 4.3 of:
https://www.cs.unc.edu/~tracker/media/pdf/SIGGRAPH2001_CoursePack_08.pdf
"""
# Input initial values (time step = 0) for all parameters
A = np.array([[1]]) # Shape of (1, 1). All parameters must be 2D arrays
B = np.array([[0]])
H = np.array([[1]])
P = np.array([[1]])
Q = np.array([[1e-5]])
R = np.array([[0.1 ** 2]])
u = np.array([[0]])
x = np.array([[0]])
filter_kwargs = {
'A' : A,
'B' : B,
'H' : H,
'P' : P,
'Q' : Q,
'R' : R,
'u' : u,
'x' : x}
# Instantiate filter class
kf = KalmanFilter(**filter_kwargs)
# Generate measurements
num_steps = 50 # Number of time steps to iterate over
meas_mean = -0.37727 # True/actual voltage value
meas_sigma = 0.1 # True/actual measurement standard deviation
measurements = np.random.normal(meas_mean, meas_sigma, num_steps)
# Initialize variables to store and plot results
true_value = np.ones((num_steps,1)) * meas_mean
estimates = []
estimate_uncertainty = []
step_range = np.arange(0, num_steps)
# Filter loop
for k in step_range:
kf.predict()
z = np.array([[measurements[k]]])
estimate, uncertainty = kf.correct(z)
estimates.append(np.squeeze(estimate))
estimate_uncertainty.append(np.squeeze(uncertainty))
# Plot results
plt.figure()
plt.plot(step_range+1, true_value, label='True Voltage')
plt.plot(step_range+1, measurements, marker='+', linestyle="None", label='Measurements')
plt.plot(step_range+1, estimates, linestyle="dashed", label='Filter Estimate')
plt.xlabel('Iteration')
plt.ylabel('Voltage (Volts)')
plt.title('Kalman Filter Constant Voltage Exercise')
plt.legend()
plt.show()
# Plot uncertainty
plt.figure()
plt.plot(step_range+1, estimate_uncertainty, label='Estimate Uncertainty')
plt.xlabel('Iteration')
plt.ylabel('Uncertainty (Volts^2)')
plt.title('Kalman Filter Estimate Uncertainty')
plt.legend()
plt.show()
if __name__ == '__main__':
constant_voltage_example()
|
{"hexsha": "db193766ba03fa4312ba70b720c078a777ef57ac", "size": 6549, "ext": "py", "lang": "Python", "max_stars_repo_path": "KalmanFilter.py", "max_stars_repo_name": "AlexMGitHub/KalmanCar", "max_stars_repo_head_hexsha": "ae146efcaa9a9b393c2bf574eb11fe9f51355da1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-06T21:56:41.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-06T21:56:41.000Z", "max_issues_repo_path": "KalmanFilter.py", "max_issues_repo_name": "AlexMGitHub/KalmanCar", "max_issues_repo_head_hexsha": "ae146efcaa9a9b393c2bf574eb11fe9f51355da1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "KalmanFilter.py", "max_forks_repo_name": "AlexMGitHub/KalmanCar", "max_forks_repo_head_hexsha": "ae146efcaa9a9b393c2bf574eb11fe9f51355da1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.93125, "max_line_length": 92, "alphanum_fraction": 0.599938922, "include": true, "reason": "import numpy", "num_tokens": 1676}
|
from measure.measure_average_precision import average_precision
from measure.measure_hamming_loss import hamming_loss
from measure.measure_ranking_loss import ranking_loss
from measure.measure_example_auc import example_auc
from measure.measure_example_f1 import example_f1
from measure.measure_one_error import one_error
from measure.measure_macro_auc import macro_auc
from measure.measure_micro_auc import micro_auc
from measure.measure_macro_f1 import macro_f1
from measure.measure_micro_f1 import micro_f1
from measure.measure_coverage import coverage
from loader.load_data import get_batch
from torch.autograd import Variable
import numpy as np
evaluation = ['average_precision', 'coverage', 'ranking_loss', 'macro_auc', 'micro_auc', 'example_auc',
'hamming_loss', 'one_error', 'macro_f1', 'micro_f1', 'example_f1'] # 评价指标
view_name = ['Image', 'Text', 'Title']
def test_single_view(x, y, hp):
result = {}
if 0 in hp['eval']:
result[evaluation[0]] = average_precision(x, y)
if 1 in hp['eval']:
result[evaluation[1]] = coverage(x, y)
if 2 in hp['eval']:
result[evaluation[2]] = ranking_loss(x, y)
if 3 in hp['eval']:
result[evaluation[3]] = macro_auc(x, y)
if 4 in hp['eval']:
result[evaluation[4]] = micro_auc(x, y)
if 5 in hp['eval']:
result[evaluation[5]] = example_auc(x, y)
if 6 in hp['eval']:
if 'thread' in hp.keys():
result[evaluation[6]] = hamming_loss(x, y, thread=hp['thread'])
else:
print('No thread for prediction(default = 0.5)!!')
result[evaluation[6]] = hamming_loss(x, y)
if 7 in hp['eval']:
result[evaluation[7]] = one_error(x, y)
if 8 in hp['eval']:
if 'thread' in hp.keys():
result[evaluation[8]] = macro_f1(x, y, thread=hp['thread'])
else:
print('No thread for prediction(default = 0.5)!!')
result[evaluation[8]] = macro_f1(x, y)
if 9 in hp['eval']:
if 'thread' in hp.keys():
result[evaluation[9]] = micro_f1(x, y, thread=hp['thread'])
else:
print('No thread for prediction(default = 0.5)!!')
result[evaluation[9]] = micro_f1(x, y)
if 10 in hp['eval']:
if 'thread' in hp.keys():
result[evaluation[0]] = example_f1(x, y, thread=hp['thread'])
else:
print('No thread for prediction(default = 0.5)!!')
result[evaluation[0]] = example_f1(x, y)
return result
def test(test_data, hp, models, stage):
print("----------start testing models----------")
view_num = len(models)
for i in range(view_num):
models[i].cuda()
models[i].eval()
# calculate output
bag_num = len(test_data)
batch_size = hp['test_size'][0]
max_step = int(bag_num / batch_size)
while max_step * batch_size < bag_num:
max_step += 1
h = [None for i in range(view_num)]
label = [None for i in range(view_num)]
for step in range(max_step):
# get data
step_data = get_batch(test_data,list(range(step * batch_size,min((step + 1) * batch_size,bag_num))),hp)
x1, x2, bag1, bag2, y = step_data
print('img_bag',bag1)
print('txt_bag',bag2)
x_img = Variable(x1, volatile=True).cuda()
x_text = Variable(x2, volatile=True).cuda()
h1,_,_ = models[0](x_img,bag1)
h2,_,_ = models[1](x_text,bag2)
if step == 0:
h[0] = h1.cpu().data.numpy()
label[0] = y.numpy()
else:
h[0] = np.concatenate((h[0], h1.cpu().data.numpy()))
label[0] = np.concatenate((label[0], y.numpy()))
if step == 0:
h[1] = h2.cpu().data.numpy()
label[1] = y.numpy()
else:
h[1] = np.concatenate((h[1], h2.cpu().data.numpy()))
label[1] = np.concatenate((label[1], y.numpy()))
result = {}
# test single view
for i in range(view_num):
print(i)
# test
result[view_name[i]] = test_single_view(h[i], label[i], hp)
# show test result
print("test result : ", view_name[i])
for key in result[view_name[i]].keys():
print(key, result[view_name[i]][key], '\n')
np.save('{}predict-single-{}.npy'.format(hp['rootdir'],stage), result)
# test all view
h_average = []
h_max = []
for i in range(h[0].shape[0]):
z = []
for j in range(view_num):
z.append(h[j][i].reshape(1, -1))
z = np.concatenate(z)
h_average.append(np.mean(z, axis=0).reshape(1, -1))
h_max.append(np.max(z, axis=0).reshape(1, -1))
h_average = np.concatenate(h_average, axis=0)
h_max = np.concatenate(h_max, axis=0)
result['avg'] = test_single_view(h_average, label[0], hp)
print("test result : average all")
for key in result['avg'].keys():
print(key, result['avg'][key], '\n')
result['max'] = test_single_view(h_max, label[0], hp)
print("test result : max all")
for key in result['max'].keys():
print(key, result['max'][key], '\n')
print("----------end testing models----------")
np.save('{}predict-all-{}.npy'.format(hp['rootdir'],stage), result)
save_result(hp['rootdir'],stage,result)
return result
def save_result(filepath, stage, result):
path = "{}test-result-{}.txt".format(filepath,stage)
with open(path, 'w') as f:
for key in result.keys():
f.write(key + '\n')
for k in result[key].keys():
f.write("\t" + k + ' ' + str(result[key][k]) + '\n')
|
{"hexsha": "40ed04de195ecbfbfd14fbf28eb837b76d32d141", "size": 5620, "ext": "py", "lang": "Python", "max_stars_repo_path": "model/test.py", "max_stars_repo_name": "sunshiding/M3DNS", "max_stars_repo_head_hexsha": "363301b6f95f7a144a94ecf6f88a3efe378cd562", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-05-18T04:57:24.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-18T04:57:24.000Z", "max_issues_repo_path": "model/test.py", "max_issues_repo_name": "sunshiding/M3DNS", "max_issues_repo_head_hexsha": "363301b6f95f7a144a94ecf6f88a3efe378cd562", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "model/test.py", "max_forks_repo_name": "sunshiding/M3DNS", "max_forks_repo_head_hexsha": "363301b6f95f7a144a94ecf6f88a3efe378cd562", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.4935064935, "max_line_length": 111, "alphanum_fraction": 0.5859430605, "include": true, "reason": "import numpy", "num_tokens": 1534}
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import copy
import scipy.stats as stats
import matplotlib.pyplot as plt
#my imports
from pddm.samplers import trajectory_sampler
from pddm.utils.helper_funcs import do_groundtruth_rollout
from pddm.utils.helper_funcs import turn_acs_into_acsK
from pddm.utils.calculate_costs import calculate_costs
import pddm
class CEM(object):
def __init__(self, env, dyn_models, reward_func, rand_policy, use_ground_truth_dynamics,
execute_sideRollouts, plot_sideRollouts, params):
###########
## params
###########
self.K = params.K
self.horizon = params.horizon
self.N = params.num_control_samples
self.rand_policy = rand_policy
self.use_ground_truth_dynamics = use_ground_truth_dynamics
self.dyn_models = dyn_models
self.execute_sideRollouts = execute_sideRollouts
self.plot_sideRollouts = plot_sideRollouts
self.reward_func = reward_func
self.env = copy.deepcopy(env)
#############
## params for CEM controller
#############
self.max_iters = params.cem_max_iters
self.num_elites = params.cem_num_elites
self.sol_dim = self.env.env.action_space.shape[0] * self.horizon
self.ub = 1
self.lb = -1
self.epsilon = 0.001
self.alpha = 0
def get_action(self, step_number, curr_state_K, actions_taken_so_far,
starting_fullenvstate, evaluating, take_exploratory_actions):
"""Select optimal action
Args:
curr_state_K:
current "state" as known by the dynamics model
actually a concatenation of (1) current obs, and (K-1) past obs
step_number:
which step number the rollout is currently on (used to calculate costs)
actions_taken_so_far:
used to restore state of the env to correct place,
when using ground-truth dynamics
starting_fullenvstate
full state of env before this rollout, used for env resets (when using ground-truth dynamics)
evaluating
if True: default to not having any noise on the executing action
take_exploratory_actions
if True: select action based on disagreement of ensembles
if False: (default) select action based on predicted costs
Returns:
best_action: optimal action to perform, according to this controller
resulting_states_list: predicted results of executing the candidate action sequences
"""
#initial mean and var of the sampling normal dist
mean = np.zeros((self.sol_dim,))
var = 5 * np.ones((self.sol_dim,))
X = stats.truncnorm(
self.lb, self.ub, loc=np.zeros_like(mean), scale=np.ones_like(mean))
#stop if variance is very low, or if enough iters
t = 0
while ((t < self.max_iters) and (np.max(var) > self.epsilon)):
#variables
lb_dist = mean - self.lb
ub_dist = self.ub - mean
constrained_var = np.minimum(
np.minimum(np.square(lb_dist / 2), np.square(ub_dist / 2)), var)
#get samples
all_samples_orig = X.rvs(size=[self.N, self.sol_dim]) * np.sqrt(
constrained_var) + mean # [N, ac*h]
all_samples = all_samples_orig.reshape(
self.N, self.horizon, -1) #interpret each row as a sequence of actions
all_samples = np.clip(all_samples, -1, 1)
########################################################################
### make each action element be (past K actions) instead of just (curr action)
########################################################################
#all_samples is [N, horizon, ac_dim]
all_acs = turn_acs_into_acsK(actions_taken_so_far, all_samples,
self.K, self.N, self.horizon)
#all_acs should now be [N, horizon, K, ac_dim]
############################
### have model predict the result of executing those candidate action sequences
############################
if self.use_ground_truth_dynamics:
paths = trajectory_sampler.sample_paths_parallel(
self.N,
all_samples,
actions_taken_so_far,
starting_fullenvstate,
self.env,
suppress_print=True,
) #list of dicts, each w observations/actions/etc.
#the taken number of paths is num_cpu*(floor(self.N/num_cpu))
#rather than self.N, so update parameter accordingly
self.N = len(paths)
all_samples = all_samples[:self.N]
resulting_states = [entry['observations'] for entry in paths]
resulting_states = np.swapaxes(resulting_states, 0, 1)
resulting_states_list = [resulting_states]
else:
resulting_states_list = self.dyn_models.do_forward_sim(
[curr_state_K, 0],
np.copy(all_acs))
resulting_states_list = np.swapaxes(
resulting_states_list, 0,
1) #this is now [ensSize, horizon+1, N, statesize]
############################
### evaluate the predicted trajectories
############################
#calculate costs : [N,]
costs, mean_costs, std_costs = calculate_costs(resulting_states_list, all_samples,
self.reward_func, evaluating, take_exploratory_actions)
#pick elites, and refit mean/var
#Note: these are costs, so pick the lowest few to be elites
indices = np.argsort(costs)
elites = all_samples_orig[indices][:self.num_elites]
new_mean = np.mean(elites, axis=0)
new_var = np.var(elites, axis=0)
#interpolate between old mean and new one
mean = self.alpha * mean + (1 - self.alpha) * new_mean
var = self.alpha * var + (1 - self.alpha) * new_var
#next iteration
t += 1
#return the best action
best_score = np.min(costs)
best_sequence = mean.reshape(
self.horizon, -1) #interpret the 'row' as a sequence of actions
best_action = np.copy(best_sequence[0]) #(acDim,)
#########################################
### execute the candidate action sequences on the real dynamics
### instead of just on the model
### useful for debugging/analysis...
#########################################
if self.execute_sideRollouts:
if ((step_number % self.horizon) == 0):
cmap = plt.get_cmap('jet_r')
num_sims = 10 ##5
indices_to_vis = [0, 1, 2]
curr_plot = 1
num_plots = len(indices_to_vis)
for index_state_to_vis in indices_to_vis:
plt.subplot(num_plots, 1, curr_plot)
for sim_num in range(num_sims):
true_states = do_groundtruth_rollout(
all_samples[sim_num], self.env,
starting_fullenvstate, actions_taken_so_far)
color = cmap(float(sim_num) / num_sims)
plt.plot(
resulting_states_list[-1]
[:, sim_num, index_state_to_vis],
'--',
c=color,
label=sim_num)
plt.plot(
np.array(true_states)[:, index_state_to_vis],
'-',
c=color)
curr_plot += 1
if self.plot_sideRollouts:
plt.legend()
plt.show()
return best_action, resulting_states_list
|
{"hexsha": "0ffa976971d299ca7a56248864ad7fcb0d9b68fd", "size": 8809, "ext": "py", "lang": "Python", "max_stars_repo_path": "pddm/policies/cem.py", "max_stars_repo_name": "Jianshu-Hu/pddm", "max_stars_repo_head_hexsha": "66c23e7a50cbaafc94e8001214481ac194889df1", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 76, "max_stars_repo_stars_event_min_datetime": "2019-12-13T01:15:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-17T08:29:48.000Z", "max_issues_repo_path": "pddm/policies/cem.py", "max_issues_repo_name": "Jianshu-Hu/pddm", "max_issues_repo_head_hexsha": "66c23e7a50cbaafc94e8001214481ac194889df1", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2019-12-21T19:24:57.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-25T23:32:07.000Z", "max_forks_repo_path": "pddm/policies/cem.py", "max_forks_repo_name": "Jianshu-Hu/pddm", "max_forks_repo_head_hexsha": "66c23e7a50cbaafc94e8001214481ac194889df1", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 30, "max_forks_repo_forks_event_min_datetime": "2019-12-13T13:45:47.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-19T22:54:28.000Z", "avg_line_length": 41.5518867925, "max_line_length": 109, "alphanum_fraction": 0.5490975139, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1791}
|
import numpy as np
from numpy import linalg as line
import sklearn
from sklearn.model_selection import KFold
from sklearn.decomposition import PCA
from scipy import interpolate
import torch
import torchvision.transforms as transforms
def de_preprocess(tensor):
"""preprocess function
"""
return tensor * 0.5 + 0.5
def hflip_batch(imgs_tensor):
""" bacth data Horizontally flip
"""
hflip = transforms.Compose([
de_preprocess,
transforms.ToPILImage(),
transforms.functional.hflip,
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5])
])
hfliped_imgs = torch.empty_like(imgs_tensor)
for i, img_ten in enumerate(imgs_tensor):
hfliped_imgs[i] = hflip(img_ten)
return hfliped_imgs
def ccrop_batch(imgs_tensor):
"""crop image tensor
"""
ccrop = transforms.Compose([
de_preprocess,
transforms.ToPILImage(),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
ccropped_imgs = torch.empty_like(imgs_tensor)
for i, img_ten in enumerate(imgs_tensor):
ccropped_imgs[i] = ccrop(img_ten)
return ccropped_imgs
def l2_norm(input, axis=1):
"""l2 normalize
"""
norm = torch.norm(input, 2, axis, True)
output = torch.div(input, norm)
return output
def calculate_roc(thresholds,
embeddings1,
embeddings2,
actual_issame,
nrof_folds=10,
pca=0):
""" Calculate accuracy with k-fold test method.
The whole test set divided into k folds, in every test loop,
the k-1 folds data is used to choose the best threshold, and
the left 1 fold is used to calculate acc with the best threshold.
The defauld nrof_folds is 10.
"""
assert (embeddings1.shape[0] == embeddings2.shape[0])
assert (embeddings1.shape[1] == embeddings2.shape[1])
nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
nrof_thresholds = len(thresholds)
k_fold = KFold(n_splits=nrof_folds, shuffle=False)
tprs = np.zeros((nrof_folds, nrof_thresholds))
fprs = np.zeros((nrof_folds, nrof_thresholds))
accuracy = np.zeros((nrof_folds))
best_thresholds = np.zeros((nrof_folds))
indices = np.arange(nrof_pairs)
bad_case = []
diff = np.subtract(embeddings1, embeddings2)
dist = np.sum(np.square(diff), 1)
for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
if pca > 0:
print("doing pca on", fold_idx)
embed1_train = embeddings1[train_set]
embed2_train = embeddings2[train_set]
_embed_train = np.concatenate((embed1_train, embed2_train), axis=0)
pca_model = PCA(n_components=pca)
pca_model.fit(_embed_train)
embed1 = pca_model.transform(embeddings1)
embed2 = pca_model.transform(embeddings2)
embed1 = sklearn.preprocessing.normalize(embed1)
embed2 = sklearn.preprocessing.normalize(embed2)
diff = np.subtract(embed1, embed2)
dist = np.sum(np.square(diff), 1)
# Find the best threshold for the fold
acc_train = np.zeros((nrof_thresholds))
for threshold_idx, threshold in enumerate(thresholds):
_, _, acc_train[threshold_idx] = calculate_accuracy(
threshold,
dist[train_set],
actual_issame[train_set])
best_threshold_index = np.argmax(acc_train)
best_thresholds[fold_idx] = thresholds[best_threshold_index]
for threshold_idx, threshold in enumerate(thresholds):
tprs[fold_idx, threshold_idx], \
fprs[fold_idx, threshold_idx], _ = calculate_accuracy(
threshold,
dist[test_set],
actual_issame[test_set])
_, _, accuracy[fold_idx] = calculate_accuracy(
thresholds[best_threshold_index],
dist[test_set],
actual_issame[test_set])
for i in test_set:
if actual_issame[i] and dist[i] > thresholds[best_threshold_index]:
bad_case.append(i)
tpr = np.mean(tprs, 0)
fpr = np.mean(fprs, 0)
return tpr, fpr, accuracy, best_thresholds, bad_case
def calculate_accuracy(threshold, dist, actual_issame):
""" calculate acc, tpr, fpr by given threshold
"""
predict_issame = np.less(dist, threshold)
tp = np.sum(np.logical_and(predict_issame, actual_issame))
fp = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))
tn = np.sum(np.logical_and(np.logical_not(predict_issame),
np.logical_not(actual_issame)))
fn = np.sum(np.logical_and(np.logical_not(predict_issame), actual_issame))
tpr = 0 if (tp + fn == 0) else float(tp) / float(tp + fn)
fpr = 0 if (fp + tn == 0) else float(fp) / float(fp + tn)
acc = float(tp + tn) / dist.size
return tpr, fpr, acc
def evaluate(embeddings, actual_issame, nrof_folds=10, pca=0):
""" evaluate function
"""
thresholds = np.arange(0, 4, 0.01)
embeddings1 = embeddings[0::2]
embeddings2 = embeddings[1::2]
tpr, fpr, accuracy, best_thresholds, bad_case = calculate_roc(
thresholds,
embeddings1,
embeddings2,
np.asarray(actual_issame),
nrof_folds=nrof_folds,
pca=pca)
return tpr, fpr, accuracy, best_thresholds, bad_case
def gen_plot(fpr, tpr):
""" plot roc curve
"""
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import io
plt.figure()
plt.xlabel("FPR", fontsize=14)
plt.ylabel("TPR", fontsize=14)
plt.title("ROC Curve", fontsize=14)
plt.plot(fpr, tpr, linewidth=2)
buf = io.BytesIO()
plt.savefig(buf, format='jpeg')
buf.seek(0)
plt.close()
return buf
def perform_val(embedding_size,
batch_size,
backbone,
carray,
issame,
nrof_folds=10,
tta=True):
""" Perform accuracy and threshold with the carray is read from bcolz dir.
When tta is set True, each test sample should be fliped, then the embedding
is fused by the original one and the fliped one.
"""
backbone.eval()
idx = 0
embeddings = np.zeros([len(carray), embedding_size])
with torch.no_grad():
while idx + batch_size <= len(carray):
batch = torch.tensor(carray[idx:idx + batch_size][:, [2, 1, 0], :, :])
if tta:
ccropped = ccrop_batch(batch)
fliped = hflip_batch(ccropped)
emb_batch = backbone(ccropped.cuda()).cpu() + backbone(fliped.cuda()).cpu()
embeddings[idx:idx + batch_size] = l2_norm(emb_batch)
else:
ccropped = ccrop_batch(batch)
embeddings[idx:idx + batch_size] = l2_norm(backbone(ccropped.cuda())).cpu()
idx += batch_size
if idx < len(carray):
batch = torch.tensor(carray[idx:][:, [2, 1, 0], :, :])
if tta:
ccropped = ccrop_batch(batch)
fliped = hflip_batch(ccropped)
emb_batch = backbone(ccropped.cuda()).cpu() + backbone(fliped.cuda()).cpu()
embeddings[idx:] = l2_norm(emb_batch)
else:
ccropped = ccrop_batch(batch)
embeddings[idx:] = l2_norm(backbone(ccropped.cuda())).cpu()
tpr, fpr, accuracy, best_thresholds, bad_case = evaluate(embeddings, issame, nrof_folds)
return accuracy.mean(), best_thresholds.mean()
def perform_val_bin(embedding_size,
batch_size,
backbone,
carray,
issame,
nrof_folds=10,
tta=True):
""" Perform accuracy and threshold with the carray is read from bin.
When tta is set True, each test sample should be fliped, then the embedding
is fused by the original one and the fliped one.
"""
backbone.eval()
idx = 0
embeddings = np.zeros([len(carray), embedding_size])
with torch.no_grad():
while idx + batch_size <= len(carray):
batch = torch.tensor(carray[idx:idx + batch_size])
if tta:
ccropped = batch
fliped = torch.flip(ccropped, dims=[3])
emb_batch = backbone(ccropped.cuda()).cpu() + backbone(fliped.cuda()).cpu()
embeddings[idx:idx + batch_size] = l2_norm(emb_batch)
else:
ccropped = batch
embeddings[idx:idx + batch_size] = l2_norm(backbone(ccropped.cuda())).cpu()
idx += batch_size
if idx < len(carray):
batch = torch.tensor(carray[idx:])
if tta:
ccropped = batch
fliped = torch.flip(ccropped, dims=[3])
emb_batch = backbone(ccropped.cuda()).cpu() + backbone(fliped.cuda()).cpu()
embeddings[idx:] = l2_norm(emb_batch)
else:
ccropped = batch
embeddings[idx:] = l2_norm(backbone(ccropped.cuda())).cpu()
tpr, fpr, accuracy, best_thresholds, bad_case = evaluate(embeddings, issame, nrof_folds)
return accuracy.mean(), best_thresholds.mean()
def rfw_evaluate(embeddings, actual_issame, nrof_folds=10, pca = 0):
"""evaluate fucntion
"""
thresholds = np.arange(-1, 1, 0.001)
embeddings1 = embeddings[0::2]
embeddings2 = embeddings[1::2]
tpr, fpr, accuracy, _, _ = calculate_roc(thresholds, embeddings1, embeddings2,
np.asarray(actual_issame), nrof_folds=nrof_folds, pca = pca)
thresholds = np.arange(-1, 1, 0.001)
val, val_std, far = rfw_calculate_val(thresholds, embeddings1, embeddings2,
np.asarray(actual_issame), 1e-3, nrof_folds=nrof_folds)
return tpr, fpr, accuracy, val, val_std, far
def rfw_calculate_val(thresholds, embeddings1, embeddings2, actual_issame, far_target, nrof_folds=10):
"""evaluate fucntion
"""
assert(embeddings1.shape[0] == embeddings2.shape[0])
assert(embeddings1.shape[1] == embeddings2.shape[1])
nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
nrof_thresholds = len(thresholds)
k_fold = KFold(n_splits=nrof_folds, shuffle=False)
val = np.zeros(nrof_folds)
far = np.zeros(nrof_folds)
veclist = np.concatenate((embeddings1, embeddings2), axis=0)
mean_ = np.mean(veclist, axis=0)
embeddings1 -= mean_
embeddings2 -= mean_
dist = np.sum(embeddings1 * embeddings2, axis=1)
dist = dist / line.norm(embeddings1, axis=1) / line.norm(embeddings2, axis=1)
indices = np.arange(nrof_pairs)
for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
far_train = np.zeros(nrof_thresholds)
for threshold_idx, threshold in enumerate(thresholds):
_, far_train[threshold_idx] = rfw_calculate_val_far(threshold, dist[train_set], actual_issame[train_set])
if np.max(far_train) >= far_target:
f = interpolate.interp1d(far_train, thresholds, kind='slinear')
threshold = f(far_target)
else:
threshold = 0.0
val[fold_idx], far[fold_idx] = rfw_calculate_val_far(threshold, dist[test_set], actual_issame[test_set])
val_mean = np.mean(val)
far_mean = np.mean(far)
val_std = np.std(val)
return val_mean, val_std, far_mean
def rfw_calculate_val_far(threshold, dist, actual_issame):
"""evaluate fucntion
"""
predict_issame = np.less(dist, threshold)
true_accept = np.sum(np.logical_and(predict_issame, actual_issame))
false_accept = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))
n_same = np.sum(actual_issame)
n_diff = np.sum(np.logical_not(actual_issame))
val = float(true_accept) / float(n_same)
far = float(false_accept) / float(n_diff)
return val, far
def perform_rfw_val_bin(data_set, model, device, batch_size=64, nfolds=10):
""" Perform accuracy and threshold with the carray is read from bin.
"""
data_list = data_set[0]
issame_list = data_set[1]
embeddings_list = []
with torch.no_grad():
for i in range(len(data_list)):
data = data_list[i]
embeddings = None
ba = 0
while ba < data.shape[0]:
bb = min(ba+batch_size, data.shape[0])
count = bb-ba
_data = torch.tensor(data[bb-batch_size:bb, ...]).to(device)
_embeddings = model(_data).cpu().numpy()
if embeddings is None:
embeddings = np.zeros((data.shape[0], _embeddings.shape[1]))
embeddings[ba:bb, :] = _embeddings[(batch_size-count):, :]
ba = bb
embeddings_list.append(embeddings)
_xnorm = 0.0
_xnorm_cnt = 0
for embed in embeddings_list:
for i in range(embed.shape[0]):
_em = embed[i]
_norm = np.linalg.norm(_em)
_xnorm += _norm
_xnorm_cnt += 1
_xnorm /= _xnorm_cnt
acc1 = 0.0
std1 = 0.0
embeddings = embeddings_list[0] + embeddings_list[1]
embeddings = sklearn.preprocessing.normalize(embeddings)
_, _, accuracy, val, val_std, far = rfw_evaluate(embeddings, issame_list, nrof_folds=nfolds)
acc2, std2 = np.mean(accuracy), np.std(accuracy)
return acc1, std1, acc2, std2, _xnorm, embeddings_list
|
{"hexsha": "5bf42bea9ec4895bbf3b5898f33091b85787840d", "size": 13752, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/utils.py", "max_stars_repo_name": "sarvex/TFace", "max_stars_repo_head_hexsha": "b3d8a1392816e0d941425c30ad843d185e286431", "max_stars_repo_licenses": ["PSF-2.0"], "max_stars_count": 764, "max_stars_repo_stars_event_min_datetime": "2021-05-26T15:40:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T03:26:31.000Z", "max_issues_repo_path": "test/utils.py", "max_issues_repo_name": "Charlee-du/TFace", "max_issues_repo_head_hexsha": "490cf90a1f042b86d7d03042f26d0a7cf6b1f0c0", "max_issues_repo_licenses": ["PSF-2.0"], "max_issues_count": 45, "max_issues_repo_issues_event_min_datetime": "2021-06-07T12:57:19.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-25T16:04:54.000Z", "max_forks_repo_path": "test/utils.py", "max_forks_repo_name": "Charlee-du/TFace", "max_forks_repo_head_hexsha": "490cf90a1f042b86d7d03042f26d0a7cf6b1f0c0", "max_forks_repo_licenses": ["PSF-2.0"], "max_forks_count": 139, "max_forks_repo_forks_event_min_datetime": "2021-06-04T09:25:21.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T22:49:23.000Z", "avg_line_length": 37.5737704918, "max_line_length": 117, "alphanum_fraction": 0.609220477, "include": true, "reason": "import numpy,from numpy,from scipy", "num_tokens": 3475}
|
\documentclass{article}
\usepackage{hyperref}
\usepackage{amsmath,amssymb}
\usepackage{graphicx}
\usepackage{caption}
\usepackage{subcaption}
\usepackage{color}
\usepackage[section]{placeins}
\usepackage{listings}
\title{Combustion Theory Final Take Home Exam}
\author{Nicholas Malaya\\ Department of Mechanical Engineering \\
University of Texas at Austin}
\date{}
\begin{document}
\maketitle
\newpage
\section*{Turbulent diffusion flames}
In this problem, I want you to assume that the system is turbulent and
that you know the turbulence mass diffusivity ($D_T$) is 10 times the
laminar value. Assume that the fluctuation squared of the mixture
fraction is equal to the gradient of the mean mixture fraction squared
multiplied by the characteristic diffusion length scale squared, i.e.
\begin{equation}
\bar{z'z'} = \frac{1}{2} (\nabla \bar z)^2 \frac{D_T L_x}{u}
\end{equation}
\subsection*{a) Write down the solution for the mean mixture fraction
field with the turbulent diffusivity.}
We start with the steady state species equation,
\begin{equation}
\rho u \frac{\partial Y_i}{\partial x} = \rho D \frac{\partial^2
Y_i}{\partial y^2} \pm \omega_i.
\end{equation}
We note that,
\begin{equation}
\omega = \frac{\omega_i}{\nu_i W_i} = \frac{\omega_F}{\nu_F W_F} =
\frac{\omega_O}{\nu_O W_O}.
\end{equation}
This hints at a conserved scalar form of the species equation, where
with,
\begin{equation}
\beta = \frac{\omega_F}{\nu_F W_F} - \frac{\omega_O}{\nu_O W_O}
\end{equation}
then our reaction is decoupled from the convection-diffusion of a
conserved scalar quantity. In particular,
\begin{equation}
\mathcal{L}(\beta) = \mathcal{L}\left(\frac{\omega_F}{\nu_F W_F} -
\frac{\omega_O}{\nu_O W_O} \right)
\Rightarrow \rho u \frac{\partial \beta_i}{\partial x} - \rho D \frac{\partial^2
\beta_i}{\partial y^2} = 0.
\end{equation}
Now, we construct z,
\begin{equation}
z = \frac{\beta -\beta_{O}}{\beta_F - \beta_{O}}
\end{equation}
Here, the boundary conditions are that $z=1$ for all $y>0$ and $x<0$
(e.g. the fuel reserve) and $z=0$ for $y<0$ and $x<0$ (e.g. the oxygen
reserve).
Thus, we are solving,
\begin{equation}
\rho u \frac{\partial z}{\partial x} - \rho D \frac{\partial^2
z}{\partial y^2} = 0.
\end{equation}
and,
\begin{equation}
\rho u \frac{\partial \bar z}{\partial x} - \rho D_T \frac{\partial^2
\bar z}{\partial y^2} = 0.
\end{equation}
Where the first equation is from the laminar flow, and the latter case
is the favre-averaged mean field. We now need to discretize this
equation, in order to solve it numerically (It looks like it would be a
trainwreck to solve analytically!). We will completely wimp out, and
only use finite difference methods,
\begin{align}
\frac{\partial \bar z}{\partial x} &= \frac{\bar z_{i+1}-\bar z_{i}}{\Delta x} \\
\label{first}
\frac{\partial^2 \bar z}{\partial y^2} &= \frac{\bar z_{j+1}-2\bar z_{j}+\bar z_{j-1}}{\Delta y^2}
\end{align}
The boundary conditions are $\bar z = 1$ $\forall x<0,y>0$, $\bar z = 0$
$\forall x<0,y<0$. I additionally imposed a Neuman (zero flux) boundary
condition on the top and bottom of the box, essentially forcing the
derivatives to zero at $\pm \infty$. As with the previous examination,
we are now in a position to instantiate this on a computer using python
to solve for the $\bar z(x,y)$ field.
As an interesting implementation detail, because the solution is only
first order in x, we only need one boundary condition for that
direction, as shown above. This is equivalent to an initial condition on
a first order (in time) ODE. Thus, we do not actually need to store our
entire x-domain in memory, but can simply solve for all y-values at our
particular x-coordinate, and then step forward in space and solve for
the next grid location.
%
%
%
%
\subsection*{b) Plot the fluctuation and mean value of the mixture
fraction at 5 cm, 30 cm, and 50 cm.}
\begin{figure}[!htb]
\begin{center}
\includegraphics[width = 12 cm]{figs/mean.pdf}
\caption{The mixture fraction plotted as a function of y.}
\label{mean}
\end{center}
\end{figure}
The method to arrive at the mean value mixture fraction was described
above. The value of the mixture fraction at several locations is plotted
in figure \ref{mean}. The flow at $x=0$ cm is a sharp interface between
the fuel and the oxidizer. As you move downstream, the fuel and oxidants
mix, which will create a mixing layer that diffuses out and increases in
y-width as a function of distance downstream.
The fluctuation $\bar{z'z'}$
must also be determined. Normally, this would require solving another
differential equation, and potentially using submodels for the scalar
dissipation rate as well. However, we were given a simplified expression
(model) for the variance, namely,
\begin{equation}
\bar{z'z'} = \frac{1}{2} (\nabla \bar z)^2 \frac{D_T L_x}{u}.
\end{equation}
Variations in y will be much larger than variations in x, and this
expression
can be simplified to be,
\begin{equation}
\bar{z'z'} = \frac{1}{2} (\frac{\partial \bar z}{\partial y})^2 \frac{D_T L_x}{u}.
\label{fluc}
\end{equation}
This expression is a model for the variance of the mixture
fraction. Intuitively, this expression is reasonable, as we expect the
variance (in some sense, our uncertainty) of the value of the mixture
fraction to be largest in regions with large gradients. In the mixing
layer, the gradient will be quite large near the layer ($y=0,x=0$) and expanding outward at larger x.
We discretize equation \ref{fluc} using the finite difference scheme
shown in equation \ref{first}, however, now the indicies are changed
from i to j, to reflect the different direction of the
derivative. However, upon running the code, I found that a forward
finite difference was very numerically noisy for the solution at $x=5$
cm. This was because the interface at this distance is still quite
sharp, and so squaring the derivative ``blew-up'' the noise. I therefore
switched a centered finite difference scheme, namely:
\begin{equation}
\frac{\partial \bar z}{\partial y} = \frac{\bar z_{i+1}-\bar z_{i-1}}{2\Delta y}.
\end{equation}
\begin{figure}[!htb]
\begin{center}
\includegraphics[width = 12 cm]{figs/fluc.pdf}
\caption{The mixture fraction variance plotted as a function of y.}
\label{fluc}
\end{center}
\end{figure}
The results are plotted in figure \ref{fluc}. The results of this figure
display that at anything but low values of x, the variance is nearly
zero. It is only near the sharp interface that the flow is
turbulent. Outside of just a few centimeters, the variance is very
nearly zero, implying essentially laminar flow. As we move downstream,
the pdf opens up, as the mixing layer grows and entrains more fluid
around it. The peak also grows, implying that the turbulence grows to a
higher reynolds number, with larger fluctuations.
%
%
%
%
\subsection*{c) Plot the PDF of the mixture fraction at two points, $y=0$ cm,
$x=30$ and at $y=15$ cm, $x=30$ cm.}
$P(z) =$ probability of z in $z+\Delta z$. If we knew the joint
probability density function we could calculate it directly. However, we
don't know P() (the PDF). Instead, we will use what Peters calls the
``Presumed Shape PDF Approach''. We must pick a probability
distribution. We are limited to two parameter distributions for the
model to be closed, because we only possess $\bar z$ and
$\bar{z'z'}$. Essentially, our choice is between a clipped Gaussian and
the Beta function distribution. We will use the Beta distribution, as
it should have much more appropriate limit behavior for the mixing
layer. In particular, due to the effect of intermittency, we expect the
edges of the mixing layer to act like a delta function. This effect can
be captured by the Beta distribution. The distribution function pdf has
the form,
\begin{equation}
P(\bar z) = \frac{\bar z^{\alpha-1}(1-\bar z)^{\beta-1}}{\Gamma(\alpha)\Gamma(\beta)}\Gamma(\alpha + \beta)
\end{equation}
We further define,
\begin{align}
\alpha &= \bar z \gamma \\
\beta &= (1-\bar z) \gamma
\end{align}
Where the variable $\gamma$ is defined as,
\begin{equation}
\gamma = \frac{\bar z (1-\bar z)}{\bar{z'z'}^2} -1 \geq 0.
\end{equation}
For the two locations, we can already predict what the distributions
will look like. The first, at $y=0$, will be roughly gaussian, with a
mean centered around the expected value of the mixture fraction at that
location. Given that we expect the mixture fraction to be one half at that
location, the mean should be around that value as well. For the location
off-center, we expect a distribution that is skewed towards the side
with more fuel or oxidizer. This is the oxidizer side, so we expect the
mixture fraction pdf to have a mean less than zero. Furthermore, we note
from figure \ref{mean} that this is far away from the mixing layer. We
expect there to be essentially no mixing this far away, and therefore,
the distribution should be essentially completely oxidizer, e.g. a
distribution very sharply pushed up against zero.
\begin{figure}[!htb]
\begin{center}
\includegraphics[width = 12 cm]{figs/pdf.pdf}
\caption{The distribution of the mixture fraction, using the assumed
pdf approach (with an assumed $\beta$ distribution). The green line
on the right is at $x=30,y=0$, and the blue distribution is at $x=30,y=15$.}
\label{pdf}
\end{center}
\end{figure}
The results of this when instantiated numerically are shown in figure
\ref{pdf}. The results are precisely what we expected. The distribution
at $x=30,y=0$ is not exactly at 0.5, but it is nearly so (0.53). The
distribution is not skewed to either the fuel or oxidizer side.
I expect that this is accurate.
One might note
that the beta distribution, despite how sharp it is near zero, is still
giving non-trivial weight to less than zero mixture
fractions. Personally, I am skeptical this is not an over-estimate, and
it is likely a weakness of the assumed beta distribution for this edge
case. However, note that assuming a Gaussian pdf would break down even
more severely in this case.
%
%
%
%
\subsection*{d) Plot the laminar and mean turbulent temperature
distributions at $x=30$ cm.}
We already know that the laminar profile will have a linear profile,
from the Burke-Schumann solution. This takes the form,
\begin{equation}
a z + b = T(z).
\label{eq}
\end{equation}
This is equivalent to saying that at each grid point, we expect the
temperature distribution to be a delta function around the temperature
predicted by the mixing fraction at that location.
The turbulent system is more complicated. Now, we expect the non-zero
variance in mixing fraction concentration to impact the temperature.
In order to account for the turbulent fluctuations, we need to integrate
over the probability density, e.g.
\begin{equation}
\tilde T = \int P(\bar z) T(\bar z) dz
\end{equation}
Using equation \ref{eq}, this is equivalent to,
\begin{equation}
\tilde T = \int P(\bar z) (a \bar z + b) dz
\end{equation}
Essentially, this ``blurs'' the temperature profile by averaging each
location with its neighboring temperatures.
\begin{figure}[!htb]
\begin{center}
\includegraphics[width = 12 cm]{figs/temperature.pdf}
\caption{The temperature profiles for the laminar (blue) and
turbulent (green) simulations at $x=30$.}
\label{temp}
\end{center}
\end{figure}
These results are plotted in figure \ref{temp}. These profiles are
qualitatively similar to what we expected. The peak is lower for the
turbulent profile, but the temperature peak is wider and the temperature
is elevated farther away from the mixing layer, due to the increased
mixing from the turbulence. I was surprised that the slope is
not perfectly linear in the case of the laminar profile.
\subsection*{e) Discuss the results.}
The entire formulation utilized chemical
equilibrium models to find the long time stable solution. The model
cannot predict extinction or ignition. This also assumes that the
chemical time scales are much smaller than the turbulence time scales,
e.g. that the Damkohler number is large.
We are also ignoring instantaneous turbulence effects, on account of
using a Favre-Averaging scheme, instead of resolving the fine turbulence
scales. So these results are certainly not expected to be accurate
instantaneously, but only represent mean quantities. Furthermore, they may
be far from realistic for some time, as the flow transitions to
turbulence and large eddies form downstream of the splitter.
Finally, we either ``turned-on'' or ``turned-off'' the turbulence
(e.g. it was fully developed turbulence, or completely laminar). In
reality, the flow might be intermittent or laminar away from the mixing
layer at $y=0$ and turbulent near it. So a mixing of the models might be
more appropriate. Ideally, the flow would be fully turbulent near the
centerline, intermittent at the edges of the layer, and laminar
outside. This could be more accurately modeled using the ``composite PDF
approach for intermittency''.
Given these assumptions, we would have to be careful using the results
of this model in a predictive context. Those caveats aside, the results
have the correct qualitative character for a flame, and so are certainly
much more useful than only expert opinion or conjecture. This is not to be too
negative, this is an extremely complex system we are simulating, and I
am able to generate all the plot used in this report in a few seconds on
my computer.
\newpage
All the work contained in this report was entirely my own.
Thank you for the class!
\vspace{1in}
\newline
References:
``Turbulent Combustion'', Norbert Peters
``Combustion Physics'', Chung K. Law
\subsection*{Code}
I wrote these routines entire from scratch, using python 2.X. The only
libraries necessary to run these routines should be Numpy, SciPy and
Matplotlib.
\lstinputlisting{flow.py}
\end{document}
|
{"hexsha": "077796d85458aee5d2660e6e69735ac18593d7a9", "size": 13960, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "combustion/final/report.tex", "max_stars_repo_name": "nicholasmalaya/paleologos", "max_stars_repo_head_hexsha": "11959056caa80d3c910759b714a0f8e42f986f0f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-04T17:49:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-04T17:49:42.000Z", "max_issues_repo_path": "combustion/final/report.tex", "max_issues_repo_name": "nicholasmalaya/paleologos", "max_issues_repo_head_hexsha": "11959056caa80d3c910759b714a0f8e42f986f0f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "combustion/final/report.tex", "max_forks_repo_name": "nicholasmalaya/paleologos", "max_forks_repo_head_hexsha": "11959056caa80d3c910759b714a0f8e42f986f0f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-01-04T16:08:18.000Z", "max_forks_repo_forks_event_max_datetime": "2019-12-16T19:34:24.000Z", "avg_line_length": 40.6997084548, "max_line_length": 107, "alphanum_fraction": 0.7476361032, "num_tokens": 3798}
|
using Interpolations
include("UnitConversions.jl")
"""
Fluid(gravityAPI::T, gasGravity::T, solutionGOR::T)
Creates a struct to represent a crude oil sample.
All properties must be given at standard conditions (60F, 14.5 psia)
"""
mutable struct Fluid{T}
gravityAPI::T
gasGravity::T
solutionGOR::T
end
"""
oilGravity(fluid::Fluid)
Computer the oil gravity using the API of the fluid.
"""
oilGravity(fluid::Fluid) = 141.5/(131.5+fluid.gravityAPI)
"""
oilGravity(density::Float64)
Compute the oil gravity if density in lb/cuft is given
"""
oilGravity(density) = density/62.4
"""
oilDensity(f::Fluid, temperature::Float64)
Compute the oil density in lb/cuft given a fluid and a temperature in Fahrenheit
"""
function oilDensity(fluid::Fluid, temperature)
oilGrav = oilGravity(fluid)
rs = fluid.solutionGOR
gasGrav = fluid.gasGravity
num = 62.4*oilGrav+0.0136*rs*gasGrav
den = 0.972+0.000147*(rs*((gasGrav/oilGrav)^0.5)+(1.25*(temperature)))^1.175
return num/den
end
"""
adiabaticOilModulus(f::Fluid, temperature::Float64, pressure::Float64)
Compute the adiabatic oil modulus in psi using the Arco formula.
Temperature must be given in Fahrenheit.
Pressure must be given in psia
"""
function adiabaticOilBulkModulus(fluid::Fluid, temperature, pressure)
a,b,c,d,e,f = 1.286e6, 13.55, 4.122e4, 4.53e3, 10.59, 3.228
temp = fahrenhetToRankine(temperature)
ka = a+b*pressure-c*(temp^0.5)-d*(fluid.gravityAPI)-e*(fluid.gravityAPI^2)+f*temp*(fluid.gravityAPI)
end
"""
pressureGradient(density::Float64)
Compute the pressure gradient given a density in lb/cuft
"""
pressureGradient(density::Float64) = density/144.0
"""
adiabaticWaterBulkModulus(temperature::Float, pressure::Float)
Calculates the adiabatic bulk modulus for water.
Temperature must be in Fahrenheit
Pressure must be in psia.
"""
function adiabaticWaterBulkModulus(temperature::Float64, pressure::Float64)
tempArray = [32, 50, 68, 86, 104, 122, 140, 158, 176, 194, 212]
kosArray = [289, 308, 323, 333, 340, 345, 348, 348, 341, 342, 336]
ipt = LinearInterpolation(tempArray, kosArray)
ko = ipt(temperature)
return 1000*ko+3.4*pressure
end
"""
gasDensity(fluid::Fluid, pressure::Float64, temperature::Float64)
Calculates gas density in lb/cuft.
pressure must be in psia
temperature must be in Fahrenheit
"""
function gasDensity(fluid::Fluid, pressure::Float64, temperature::Float64)
ma = 28.96*fluid.gasGravity
temp = fahrenhetToRankine(temperature)
return (pressure*ma)/(10.73*temp)
end
"""
mixtureDensity(oilDensity, waterDensity, waterFraction)
"""
function mixtureDensity(oilDensity::Float64, waterDensity::Float64, waterFraction::Float64)
return oilDensity*(1-waterFraction)+waterDensity*waterFraction
end
"""
mixtureBulkModulus(oilDensity, waterDensity, waterFraction)
"""
function mixtureBulkModulus(oilBulkModulus::Float64, waterBulkModulus::Float64, waterFraction::Float64)
return oilBulkModulus*(1-waterFraction)+waterBulkModulus*waterFraction
end
|
{"hexsha": "0a8c3e3758333ff0d9dc5cff013733c17d1ac0e9", "size": 3071, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Fluid.jl", "max_stars_repo_name": "leytzher/Shot", "max_stars_repo_head_hexsha": "b35d2dab13e74dc1cd251197029ae31d291fdbe4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Fluid.jl", "max_issues_repo_name": "leytzher/Shot", "max_issues_repo_head_hexsha": "b35d2dab13e74dc1cd251197029ae31d291fdbe4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Fluid.jl", "max_forks_repo_name": "leytzher/Shot", "max_forks_repo_head_hexsha": "b35d2dab13e74dc1cd251197029ae31d291fdbe4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.1743119266, "max_line_length": 104, "alphanum_fraction": 0.732985998, "num_tokens": 903}
|
#!/usr/bin/env python
import sys
import os
import fnmatch
import re
import traceback
import random
import math
import matplotlib.pyplot as plt; plt.rcdefaults()
import numpy as np
import matplotlib.pyplot as plt
obj = ["#Satisfied","#CRCs","#CLCs","#CLCDiff","#nodeDiff","#flowDiff","CRCpathlength","CLCpathlength","CLCload","controlRatio","runtime", "netmodtime", "realtime", "tmod", "tgen", "tstop"]
used_mod_time = "netmodtime"
class TrafficDistribution:
def __init__(self, logDir,network,scenario):
self.LogDir = logDir
self.network = network
self.scenario = scenario
self.FileList = []
self.AllTraffic = {}
def readclientlogfile(self):
totalClientFile = 0
totalFlowConnected = 0
totalFlowStatistics = 0
# read input file
try:
filename = "emu_results_MaxiNet_7200_" + str(self.scenario) + "_Network_" + str(self.network) + ".dat"
filepath = self.LogDir + "/" + filename
self.FileList.append(filepath)
timepat = "Time: *"
systimepat = "System Time Passed: *"
flowsatpat = "Flows satisfied: *"
for file in self.FileList:
print file
tfile = os.path.basename(file)
self.AllTraffic[tfile] = []
fin = open(file, "r")
tmp = fin.readline()
while True:
tmp = fin.readline().split(" ")
entry = {}
try:
# entry["counter"] += 1
entry["time"] = float(tmp[0])
entry["flows"] = int(tmp[1])
for i in range(0, 6):
entry[obj[i]] = int(tmp[i + 2])
for i in range(6, len(obj)):
entry[obj[i]] = float(tmp[i + 2])
self.AllTraffic[tfile].append(entry)
except:
#traceback.print_exc(file=sys.stdout)
break
emutime = []
algodifftime = []
emudifftime = []
totalgodifftime = 0
totemudifftime = 0
for entry in self.AllTraffic[filename]:
emutime.append(entry['time'])
algodifftime.append(entry['runtime'] * 1000)
totalgodifftime = totalgodifftime + entry['runtime'] * 1000
emudifftime.append(entry[used_mod_time] * 1000)
totemudifftime = totemudifftime + entry[used_mod_time] * 1000
plt.plot(emutime, algodifftime, marker='o', color='c', label="FlexCAPF runtime")
plt.plot(emutime, emudifftime, marker='^', color='r', label="Reconfiguration time")
# figtxt = "Average = " + str(totalgodifftime/len(algodifftime))
# plt.figtext(0.15, 0.91, figtxt, bbox=dict(facecolor='cyan'))
# figtxt = "Average = " + str(totemudifftime/len(emudifftime))
# plt.figtext(0.6, 0.91, figtxt, bbox=dict(facecolor='red'))
plt.ylabel('Runtime (ms)')
plt.xlabel('Emulation time in seconds')
# plt.title('Emulation Time Vs Runtime')
plt.legend(loc='best', shadow=False, ncol=1)
plt.savefig('plots/plot_emu_rt_' + str(network) + '_' + str(scenario) + '.pdf', bbox_inches='tight')
except:
traceback.print_exc(file=sys.stdout)
#traceback.print_stack()
return False
if '__main__' == __name__:
logDir = "results"
network = str(sys.argv[1])
scenario = str(sys.argv[2])
tf = TrafficDistribution(logDir,network,scenario)
tf.readclientlogfile()
|
{"hexsha": "83433f5a7dd1e5566bc3838ba4d2a3f3feddb0ab", "size": 3039, "ext": "py", "lang": "Python", "max_stars_repo_path": "testbed/runtimeplot.py", "max_stars_repo_name": "CN-UPB/fcapp", "max_stars_repo_head_hexsha": "ca8708b74b84dcf29e966635e7ba91b31d358a52", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "testbed/runtimeplot.py", "max_issues_repo_name": "CN-UPB/fcapp", "max_issues_repo_head_hexsha": "ca8708b74b84dcf29e966635e7ba91b31d358a52", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "testbed/runtimeplot.py", "max_forks_repo_name": "CN-UPB/fcapp", "max_forks_repo_head_hexsha": "ca8708b74b84dcf29e966635e7ba91b31d358a52", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-02-06T05:53:57.000Z", "max_forks_repo_forks_event_max_datetime": "2019-02-06T05:53:57.000Z", "avg_line_length": 31.0102040816, "max_line_length": 189, "alphanum_fraction": 0.6640342218, "include": true, "reason": "import numpy", "num_tokens": 915}
|
# Copyright (c) 2021, Greg Michael
# Licensed under BSD 3-Clause License. See LICENSE.txt for details.
import numpy as np
import gm
def poisson(k,lam,cumulative=False):
#poisson pmf
#vectorised in lambda, but not k
threshold=23 #threshold for switching to normal approximation
if k<threshold: #poisson
if cumulative:
res = 0.
for i in range(k+1):
res += (lam**i) / np.math.factorial(i)
return np.exp(-lam) * res
else:
return lam**k * np.exp(-lam) / np.math.factorial(k)
else: #normal
if cumulative:
return gm.normal(lam, np.sqrt(lam), k, cumulative=True)
else:
return gm.normal(lam, np.sqrt(lam), k)
#
# import matplotlib.pyplot as plt
#
# if __name__ == '__main__':
#
# ns=1000
# x=np.linspace(0,100,ns)
# fig, (ax1, ax2) = plt.subplot(1, 2)
# for i in range(1,99):
# ax1.plot(x,poisson(i,x),color='0' if i<30 else '0.')
# plt.show()
|
{"hexsha": "6b0c0b76014d188fde3d0247e87192c74d056172", "size": 1011, "ext": "py", "lang": "Python", "max_stars_repo_path": "gm/maths/poisson.py", "max_stars_repo_name": "thareUSGS/craterstats", "max_stars_repo_head_hexsha": "7c3cdc3c55f8ba357c8e0e17a87e28e3d48b1d0d", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-03-19T09:17:06.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-19T09:17:06.000Z", "max_issues_repo_path": "gm/maths/poisson.py", "max_issues_repo_name": "thareUSGS/craterstats", "max_issues_repo_head_hexsha": "7c3cdc3c55f8ba357c8e0e17a87e28e3d48b1d0d", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gm/maths/poisson.py", "max_forks_repo_name": "thareUSGS/craterstats", "max_forks_repo_head_hexsha": "7c3cdc3c55f8ba357c8e0e17a87e28e3d48b1d0d", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.5106382979, "max_line_length": 68, "alphanum_fraction": 0.5746785361, "include": true, "reason": "import numpy", "num_tokens": 297}
|
import numpy as np
lines = np.loadtxt("test_3D_VITESSE_U.son", comments="#", delimiter=" ", unpack=False)
time=[]
uan=[]
err=[]
for i in range(len(lines)):
time.append(0)
uan.append(0)
err.append(0)
for n in range(0,len(lines)):
time[n]=lines[n,0]
uan[n]=lines[n,0]/2+0.1
err[n]=abs((lines[n,3]-uan[n])/uan[n])
print(n, time[len(lines)-1], max(err[:]))
s=[(n, time[len(lines)-1], max(err[:]))]
with open('max.txt', 'w+') as datafile_:
np.savetxt(datafile_, s, fmt=['%d','%1.2f','%1.1e'])
|
{"hexsha": "b93d62ecac096d94d864912ec2676be9c81d5a68", "size": 522, "ext": "py", "lang": "Python", "max_stars_repo_path": "Validation/Rapports_automatiques/Verification/Verification_codage/Time_dependent_velocity/src/post_run3.py", "max_stars_repo_name": "cea-trust-platform/trust-code", "max_stars_repo_head_hexsha": "c4f42d8f8602a8cc5e0ead0e29dbf0be8ac52f72", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2021-06-30T18:50:38.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T09:03:16.000Z", "max_issues_repo_path": "Validation/Rapports_automatiques/Verification/Verification_codage/Time_dependent_velocity/src/post_run3.py", "max_issues_repo_name": "pledac/trust-code", "max_issues_repo_head_hexsha": "46ab5c5da3f674185f53423090f526a38ecdbad1", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Validation/Rapports_automatiques/Verification/Verification_codage/Time_dependent_velocity/src/post_run3.py", "max_forks_repo_name": "pledac/trust-code", "max_forks_repo_head_hexsha": "46ab5c5da3f674185f53423090f526a38ecdbad1", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-10-04T09:19:39.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-15T14:21:04.000Z", "avg_line_length": 21.75, "max_line_length": 86, "alphanum_fraction": 0.5862068966, "include": true, "reason": "import numpy", "num_tokens": 178}
|
import tensorflow_advanced_segmentation_models as tasm
import os
import cv2
import numpy as np
from time import time
import tensorflow as tf
import albumentations as A
import matplotlib.pyplot as plt
import tensorflow.keras.backend as K
from functools import partial
from tensorflow.keras import mixed_precision
import math
DATA_DIR = "/netscratch/minouei/versicherung/version2"
x_train_dir = os.path.join(DATA_DIR, 'images/train')
y_train_dir = os.path.join(DATA_DIR, 'annotations/train')
# x_train_dir = os.path.join(DATA_DIR, 'images/val')
# y_train_dir = os.path.join(DATA_DIR, 'annotations/val')
x_valid_dir = os.path.join(DATA_DIR, 'images/val')
y_valid_dir = os.path.join(DATA_DIR, 'annotations/val')
x_test_dir = os.path.join(DATA_DIR, 'images/val')
y_test_dir = os.path.join(DATA_DIR, 'annotations/val')
TOTAL_CLASSES = ['background', 'headerlogo', 'twocoltabel', 'recieveraddress', 'text', 'senderaddress', 'ortdatum',
'companyinfo', 'fulltabletyp1', 'fulltabletyp2', 'copylogo', 'footerlogo', 'footertext',
'signatureimage', 'fulltabletyp3']
MODEL_CLASSES = TOTAL_CLASSES
# ALL_CLASSES = False
# if MODEL_CLASSES == TOTAL_CLASSES:
# MODEL_CLASSES = MODEL_CLASSES[:-1]
# ALL_CLASSES = True
ALL_CLASSES = True
BATCH_SIZE = 16
N_CLASSES = 15
HEIGHT = 640
WIDTH = 640
"""## Data Generation Functions"""
################################################################################
# Data Generator
################################################################################
def get_filtered(dir):
included_extensions = ['jpg', 'jpeg', 'png', ]
file_names = [fn for fn in os.listdir(dir)
if any(fn.endswith(ext) for ext in included_extensions)]
return sorted(file_names)
def create_image_label_path_generator(images_dir, masks_dir):
ids = get_filtered(images_dir)
mask_ids = get_filtered(masks_dir)
images_fps = [os.path.join(images_dir, image_id) for image_id in ids]
masks_fps = [os.path.join(masks_dir, image_id) for image_id in mask_ids]
while True:
for i in range(len(images_fps)):
yield [images_fps[i], masks_fps[i]]
def get_validation_augmentation(height, width):
"""Add paddings to make image shape divisible by 32"""
test_transform = [
A.PadIfNeeded(height, width),
A.Resize(height, width, always_apply=True)
]
return A.Compose(test_transform)
def process_image_label(images_paths, masks_paths, classes, augmentation=None, preprocessing=None):
class_values = [TOTAL_CLASSES.index(cls.lower()) for cls in classes]
# read data
image = cv2.imread(images_paths)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
mask = cv2.imread(masks_paths, 0)
# extract certain classes from mask (e.g. cars)
masks = [(mask == v) for v in class_values]
mask = np.stack(masks, axis=-1).astype('float')
# apply augmentations
if augmentation:
sample = augmentation(image=image, mask=mask)
image, mask = sample['image'], sample['mask']
# apply preprocessing
if preprocessing:
sample = preprocessing(image=image, mask=mask)
image, mask = sample['image'], sample['mask']
return image, mask
def DataGenerator(train_dir, label_dir, height, width, classes, augmentation):
image_label_path_generator = create_image_label_path_generator(
train_dir, label_dir)
while True:
images = np.zeros(shape=[height, width, 3])
labels = np.zeros(shape=[height, width, len(classes)], dtype=np.float32)
image_path, label_path = next(image_label_path_generator)
image, label = process_image_label(image_path, label_path, classes=classes,augmentation=augmentation)
images, labels = image, label
yield tf.convert_to_tensor(images), tf.convert_to_tensor(labels, tf.float32)
def _is_chief(task_type, task_id):
"""Determines if the replica is the Chief."""
return task_type is None or task_type == 'chief' or (
task_type == 'worker' and task_id == 0)
def _get_saved_model_dir(base_path, task_type, task_id):
"""Returns a location for the SavedModel."""
saved_model_path = base_path
if not _is_chief(task_type, task_id):
temp_dir = os.path.join('/tmp', task_type, str(task_id))
tf.io.gfile.makedirs(temp_dir)
saved_model_path = temp_dir
return saved_model_path
TrainSetwoAug = partial(DataGenerator,
x_train_dir,
y_train_dir,
HEIGHT,
WIDTH,
classes=MODEL_CLASSES,
augmentation=get_validation_augmentation(height=HEIGHT, width=WIDTH),
)
# ValidationSet =partial(DataGenerator,
# x_valid_dir,
# y_valid_dir,
# HEIGHT,
# WIDTH,
# classes=MODEL_CLASSES,
# )
slurm_resolver = tf.distribute.cluster_resolver.SlurmClusterResolver(port_base=15000)
mirrored_strategy = tf.distribute.MultiWorkerMirroredStrategy(cluster_resolver=slurm_resolver)
print('----------------------mirrored_strategy.num_replicas_in_sync')
print(mirrored_strategy.num_replicas_in_sync)
TrainSet = tf.data.Dataset.from_generator(
TrainSetwoAug,
(tf.float32, tf.float32),
(tf.TensorShape([None, None, 3]), tf.TensorShape([None, None, N_CLASSES]))
).batch(BATCH_SIZE, drop_remainder=True).prefetch(tf.data.AUTOTUNE)
# ValSet = tf.data.Dataset.from_generator(
# ValidationSet,
# (tf.float32, tf.float32),
# (tf.TensorShape([None, None, 3]), tf.TensorShape([None, None,N_CLASSES]))
# ).batch(BATCH_SIZE, drop_remainder=True).prefetch(tf.data.AUTOTUNE)
options = tf.data.Options()
options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.DATA
TrainSet = TrainSet.with_options(options)
# ValSet = ValSet.with_options(options)
train_dist_dataset = mirrored_strategy.experimental_distribute_dataset(TrainSet)
# val_dist_dataset = mirrored_strategy.experimental_distribute_dataset(ValSet)
with mirrored_strategy.scope():
# mixed_precision.set_global_policy('mixed_float16')
# base_model, layers, layer_names = tasm.create_base_model(name=BACKBONE_NAME, weights=WEIGHTS, height=HEIGHT, width=WIDTH, include_top=False, pooling=None)
model = tasm.DeeplabV3_plus(N_CLASSES, HEIGHT, WIDTH)
for layer in model.layers:
layer.trainable = True
# print(layer.name + ": " + str(layer.trainable))
opt = tf.keras.optimizers.SGD(learning_rate=0.1, momentum=0.9)
metrics = [tasm.metrics.IOUScore(threshold=0.5)]
categorical_focal_dice_loss = tasm.losses.CategoricalFocalLoss(alpha=0.25, gamma=2.0) + tasm.losses.DiceLoss()
model.compile(
optimizer=opt,
loss=categorical_focal_dice_loss,
metrics=metrics,
)
# model.run_eagerly = False
# learning rate schedule
def step_decay(epoch):
initial_lrate = 0.1
drop = 0.5
epochs_drop = 2.0
lrate = initial_lrate * math.pow(drop, math.floor((1 + epoch) / epochs_drop))
return lrate
callbacks = [
tf.keras.callbacks.TensorBoard(log_dir='./logs'),
tf.keras.callbacks.experimental.BackupAndRestore(backup_dir='./backup'),
tf.keras.callbacks.LearningRateScheduler(step_decay)
]
steps_per_epoch = np.floor(len(os.listdir(x_train_dir)) / BATCH_SIZE)
model.fit(
train_dist_dataset,
steps_per_epoch=steps_per_epoch,
epochs=1,
callbacks=callbacks,
# validation_data=val_dist_dataset,
# validation_steps=len(os.listdir(x_valid_dir)),
)
task_type, task_id = (mirrored_strategy.cluster_resolver.task_type,
mirrored_strategy.cluster_resolver.task_id)
saved_model_dir = _get_saved_model_dir('saved_model_path', task_type, task_id)
model.save(os.path.join(saved_model_dir,'model.h5'))
try:
if not _is_chief(task_type, task_id):
tf.io.gfile.rmtree(os.path.dirname(saved_model_dir))
except:
pass
|
{"hexsha": "88538309b460ac94fed2c8805114ad9bb1859b47", "size": 7951, "ext": "py", "lang": "Python", "max_stars_repo_path": "slurm_deeplabv3.py", "max_stars_repo_name": "minouei-kl/TensorFlow-Advanced-Segmentation-Models", "max_stars_repo_head_hexsha": "a53a8264028bfa1abc52c9f60ec47e1736690731", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "slurm_deeplabv3.py", "max_issues_repo_name": "minouei-kl/TensorFlow-Advanced-Segmentation-Models", "max_issues_repo_head_hexsha": "a53a8264028bfa1abc52c9f60ec47e1736690731", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "slurm_deeplabv3.py", "max_forks_repo_name": "minouei-kl/TensorFlow-Advanced-Segmentation-Models", "max_forks_repo_head_hexsha": "a53a8264028bfa1abc52c9f60ec47e1736690731", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.1244635193, "max_line_length": 160, "alphanum_fraction": 0.6923657402, "include": true, "reason": "import numpy", "num_tokens": 1859}
|
from pandas import read_csv
import numpy as np
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import RepeatedKFold
from sklearn.linear_model import ElasticNet, ElasticNetCV
from sklearn.model_selection import ShuffleSplit
from sklearn import metrics
from scipy.stats import pearsonr
from itertools import chain
import pickle
import re
import scipy.sparse as sp
def lasso_mpm(alphas, mse_path):
mse_mean = np.mean(mse_path, axis=1)
mse_std = np.std(mse_path, axis=1)
mse_min_idx = np.argmin(mse_mean)
mse_min = mse_mean[mse_min_idx]
mse_min_std = mse_std[mse_min_idx]
mse_min_std_min = mse_min - mse_min_std
mse_min_std_max = mse_min + mse_min_std
mse_mpm_idx = mse_min_idx
for i in range(mse_min_idx-1, -1, -1):
if (mse_mean[i]>=mse_min_std_min) and (mse_mean[i]<=mse_min_std_max):
mse_mpm_idx = i
alpha_mpm = alphas[mse_mpm_idx]
mse_mean_mpm = mse_mean[mse_mpm_idx]
mse_std_mpm = mse_std[mse_mpm_idx]
return alpha_mpm, mse_mean_mpm, mse_std_mpm
def stat_cov(ix,iy):
cov=0
total_kmr=np.count_nonzero(ix)
ic=ix*iy
ic[ic==1]=0
valid_kmr=np.count_nonzero(ic)
if total_kmr==0:
cov=0
else:
cov=float(valid_kmr/total_kmr)
return [cov,valid_kmr,total_kmr]
def cal_cov_all(ix,iy):
total_kmr=np.count_nonzero(ix,axis=0)
cov_kmr=ix*iy
cov_kmr[cov_kmr<=1]=0
cov_kmr=np.count_nonzero(cov_kmr,axis=0)
cov=cov_kmr/total_kmr
return cov
def reject_outliers(data,x, n = 3):
mean=np.mean(data)
sigma=np.std(data)
remove_idx=np.where(abs(data-mean)>n*sigma)
new_y=np.delete(data, remove_idx)
new_x=np.delete(x, remove_idx, axis=0)
'''
d = np.abs(data - np.median(data))
mdev = np.median(d)
remove_idx=np.where(abs(data-np.median(data))>m*mdev)
new_y=np.delete(data, remove_idx)
new_x=np.delete(x, remove_idx,axis=0)
'''
#print(new_y.shape,new_x.shape)
#exit()
#print(remove_idx)
#exit()
#s = d/mdev if mdev else 0.
#return data[s<m]
return new_y,new_x
def merge_x(dX,all_id,cls_info,dominat,dx):
c=1
d={}
X=dX
#X=np.array(X)
for i in all_id:
d[i]=c
c+=1
f=open(cls_info,'r')
new_x=dx
while True:
line=f.readline().strip()
if not line:break
ele=line.split('\t')
strains=re.split(',',ele[-1])
if not int(d[ele[1]])==int(dominat):continue
for s in strains:
if int(d[s])==int(dominat):continue
new_x=new_x+X[:,int(d[s])-1]
new_x[new_x>1]=1
return new_x
def get_remainc(dominat,used_kmer,pXt_tem,py,strain_remainc):
npXt=2*used_kmer+pXt_tem
npXt[npXt>1]=0
#print(npXt.shape)
#exit()
if not len(npXt)>8000000:
for i in range(len(npXt)):
if i==dominat:continue
all_k=np.sum(npXt[i])
tem_c=npXt[i]*py
tem_c[tem_c==1]=0
tem_c[tem_c>1]=1
check=np.sum(tem_c)
if all_k==0:
strain_remainc[i]=0
else:
strain_remainc[i]=check/all_k
else:
total_kmr=np.count_nonzero(npXt,axis=1)
total_kmr[total_kmr==0]=-1
tem_c=npXt*py
tem_c[tem_c==1]=0
cov_kmr=np.count_nonzero(tem_c,axis=1)
ct=cov_kmr/total_kmr
for i in range(len(npXt)):
if i==dominat:continue
if ct[i]<0:
strain_remainc[i]=0
else:
strain_remainc[i]=ct[i]
return strain_remainc
def get_avg_depth(dominat,pX,py):
doarr=pX[:,dominat]*py
doarr[doarr==1]=0
doarr_noz=doarr[doarr!=0]
if not len(doarr)>8000000:
f25=np.percentile(doarr_noz,25,interpolation='nearest')
f75=np.percentile(doarr_noz,75,interpolation='nearest')
doarr_noz[doarr_noz<f25]=0
doarr_noz[doarr_noz>f75]=0
doarr_final=doarr_noz[doarr_noz!=0]
avg_depth=np.mean(doarr_final)
return avg_depth
def get_candidate_arr(ix,iy):
res={}
c=0
if not len(iy)>8000000:
for n in ix:
tem_c=ix[c]*iy
tem_c[tem_c==1]=0
tem_c[tem_c>1]=1
check=np.sum(tem_c)
res[c]=check
c+=1
hc=sorted(res.items(),key=lambda d:d[1],reverse=True)
candidate=hc[0][0]
hv=hc[0][1]
else:
mx=ix*iy
res=np.sum(mx,axis=0)
candidate=np.where(res==np.max(res))[0][0]
hv=res[candidate]
return candidate,hv
def optimize_dominat_y_et(ix,iy):
c=0
res=[]
#res_std=[]
#res_cv=[]
#print(ix)
#exit()
mx=ix*iy
res=np.sum(mx,axis=0)
dominat=np.where(res==np.max(res))[0][0]
#da_final=da_noz[da_noz!=0]
return dominat
def optimize_dominat_y(ix,iy):
c=0
res=[]
for i in range(ix.shape[1]):
da=ix[:,c]*iy
da_noz=da[da!=0]
if np.sum(da_noz)==0 or len(da_noz)<1:
res.append(0)
else:
f25=np.percentile(da_noz,5,interpolation='nearest')
f75=np.percentile(da_noz,95,interpolation='nearest')
tem_iy=np.copy(iy)
tem_iy[tem_iy<f25]=0
tem_iy[tem_iy>f75]=0
res.append(np.dot(ix[:,c].T,tem_iy))
c+=1
res=np.array(res)
dominat=np.where(res==np.max(res))[0][0]
#print(res,res[28])
return dominat
def detect_strains(input_csv,input_y,ids,ksize,npp25,npp75,npp_out,cls_cov,omatrix,all_cls,l2,msn):
#data_frame1=read_csv(input_csv)
new_als=[]
for a in all_cls:
new_als.append(int(a-1))
#data_frame2=read_csv(omatrix,usecols=new_als)
#dX=data_frame2.values[:,:]
#dX=np.array(dX)
#all_id=pickle.load(open(raw_id,"rb"))
#X=data_frame1.values[:,:]
#om=data_frame2.values[:,:]
omx=sp.load_npz(omatrix)
om=omx.A
om=om[:,new_als]
#y=data_frame2.values
#y=list(chain(*y))
ln=np.sum(om,axis=1)
ln[ln>1]=0
#pX=np.array(X)
X=sp.load_npz(input_csv)
pX=X.A
#py=np.array(y)
py=input_y
py_u=input_y*ln
py_2d=np.reshape(py,(len(py),-1))
pyu_2d=np.reshape(py,(len(py_u),-1))
'''
i=0
pX=[]
py=[]
for v in opy:
if v<=npp25 or v>=npp75:
i+=1
continue
else:
pX.append(opX[i])
py.append(v)
i+=1
pX=np.array(pX)
py=np.array(py)
'''
sid=pickle.load(open(ids, "rb"))
cutoff=msn*ksize
if len(py)>8000000:
cutoff=msn*ksize*10
if len(py)<100000:
cutoff=ksize*10
# We also need cov and iterate valid k-mer (diff_kmer)
def Pre_Scan(pX,py,sid,cutoff,cls_cov,py_u,py_2d,pyu_2d):
strain_cov={} # Strain -> [Coverage,covered_kmr,total_kmr]
strain_val={} # Strain -> Valid kmr
strain_remainc={}
final_src={}
res_std=[]
res_cv=[]
mannual_depth={} # Strain -> Mannual Depth
out_columns=[]
out_strain=[]
pXt=pX.T
cov_arr=cal_cov_all(pX,py_2d)
#cov_arr=np.array(cov_arr)
#print(cov_arr)
#exit()
dominat_avg_depth=0
default_cov=0.7
'''
if cls_cov>=0.9:
default_cov=0.9
'''
if np.max(cov_arr)>default_cov: # Filter strains that cov < 0.7
cov_arr[cov_arr<=default_cov]=0
cov_arr[cov_arr>default_cov]=1
pXt_tem=((pXt.T)*cov_arr).T
else:
pXt_tem=pXt
#pXt_tem=pXt
#py_tem=py*py_u
#py_tem[py_tem<0]=0
'''
tem_check_arr=pXt_tem*py
o=open('Tem_check.txt','w+')
c=0
for t in tem_check_arr:
o.write(sid[c]+'\t'+str(np.mean(t))+'\t'+str(np.median(t))+'\n')
c+=1
exit()
'''
mcov=np.max(cov_arr)
#print(cov_arr,mcov)
if l2==1 and mcov<0.01:
dominat=np.where(cov_arr==np.max(cov_arr))[0][0]
if np.sum(py_u)>0:
dominat_avg_depth=get_avg_depth(dominat,pX,py_u)
else:
dominat_avg_depth=get_avg_depth(dominat,pX,py)
else:
print(len(py))
if np.sum(py_u)>0:
if len(py)>8000000:
dominat=optimize_dominat_y_et(pX,pyu_2d)
else:
dominat=optimize_dominat_y(pX,py_u)
dominat_avg_depth=get_avg_depth(dominat,pX,py_u)
else:
if len(py)>8000000:
dominat=optimize_dominat_y_et(pX,py_2d)
else:
dominat=optimize_dominat_y(pX,py)
dominat_avg_depth=get_avg_depth(dominat,pX,py)
#dominat_avg_depth=get_avg_depth(dominat,pX,py)
##dominat_global=int(data_frame1.columns[dominat])
#new_x=merge_x(dX,all_id,cls_info,dominat_global,pXt[dominat])
out_columns.append(dominat)
out_strain.append(sid[dominat])
#print(out_strain)
#exit()
strain_cov[sid[dominat]]=stat_cov(pX[:,dominat],py)
strain_val[sid[dominat]]=strain_cov[sid[dominat]][1]
strain_remainc[sid[dominat]]=stat_cov(pX[:,dominat],py)[0]
final_src[sid[dominat]]=stat_cov(pX[:,dominat],py)[0]
#print(out_strain)
#exit()
# Now we will start iterative pre-scan process to find multiple strains.
max_iter=15 # The maximum iterate times
used_kmer=pXt[dominat]
#print(out_strain)
#exit()
#used_kmer=new_x
strain_remainc=get_remainc(dominat,used_kmer,pXt_tem,py_u,strain_remainc)
#print(strain_remainc)
#exit()
for i in range(max_iter):
#if remain_kmer<cutoff:break
npXt=2*used_kmer+pXt_tem
npXt[npXt>1]=0
#npXt[npXt==3]=0
#candidate_arr=np.dot(npXt,py)
#if np.max(candidate_arr)<2*cutoff:break
#candidate=np.where(candidate_arr==np.max(candidate_arr))[0][0]
if np.sum(py_u)>0:
if not len(py_u)>8000000:
candidate,check=get_candidate_arr(npXt,py_u)
else:
candidate,check=get_candidate_arr(npXt.T,pyu_2d)
else:
if not len(py)>8000000:
candidate,check=get_candidate_arr(npXt,py)
else:
candidate,check=get_candidate_arr(npXt.T,py_2d)
#print(candidate,check)
#tem_c=npXt[candidate]*py
#tem_c[tem_c==1]=0
#tem_c[tem_c>1]=1
#check1=np.sum(tem_c)
#print(check1)
'''
tem_c=tem_c*py_u # remove union k-mers from other cluster
tem_c[tem_c<0]=0
'''
#check=np.sum(tem_c)
#print(npXt[candidate],np.max(candidate_arr))
#exit()
if check>=cutoff:
if len(py)<100000:
rc=0.01
else:
rc=0.2
if strain_remainc[candidate]>rc:
out_columns.append(candidate)
out_strain.append(sid[candidate])
strain_cov[sid[candidate]]=stat_cov(pX[:,candidate],py)
strain_val[sid[candidate]]=check
final_src[sid[candidate]]=strain_remainc[candidate]
#candidate_global=int(data_frame1.columns[candidate])
#new_x=merge_x(dX,all_id,cls_info,candidate_global,pXt[candidate])
used_kmer=used_kmer+pXt[candidate]
#used_kmer=used_kmer+new_x
used_kmer[used_kmer>1]=1
else:
break
return out_columns,out_strain,strain_cov,strain_val,final_src,dominat_avg_depth
out_columns,out_strains,strain_cov,strain_val,final_src,dominat_avg_depth=Pre_Scan(pX,py,sid,cutoff,cls_cov,py_u,py_2d,pyu_2d)
#print(out_strains,res_std,res_cv)
#print(out_strains)
#exit()
if len(out_columns)==1:
res=dict(zip(out_strains,[1]))
res2=dict(zip(out_strains,[dominat_avg_depth]))
return res,res2,strain_cov,strain_val,final_src
#out_strains=sid
oX=pX[:,out_columns]
#X=pX
oy=py
#print(out_strains)
#print(npp25,npp75)
#X=oX
#y=py
#tem_check_arr=X*y
#for t in tem_check_arr:
#y,X=reject_outliers(oy,oX)
i=0
X=[]
y=[]
for v in oy:
#if v<=1:
if v<npp25 or v>npp75 or v>npp_out:
i+=1
continue
else:
if np.sum(oX[i])==0 or np.sum(oX[i])==len(out_columns):
i+=1
continue
else:
X.append(oX[i])
y.append(v)
i+=1
X=np.array(X)
y=np.array(y)
'''
tem_check_arr=(X.T)*y
o=open('Tem_check.txt','w+')
c=0
for t in tem_check_arr:
o.write(out_strains[c]+'\t'+str(np.mean(t))+'\t'+str(np.median(t))+'\n')
c+=1
exit()
'''
#print(X,y)
#exit()
print('Pre-scan finished, now we will start ElasticNet fitting...')
CV_NITER = 20
NALPHA = 50
MAX_NITER = 5000
TEST_SIZE = 0.5
cv = ShuffleSplit(n_splits=CV_NITER, test_size=TEST_SIZE, random_state=0)
lasso_cv = ElasticNetCV(eps=0.001, n_alphas=NALPHA,fit_intercept=False, normalize=False,precompute='auto', max_iter=MAX_NITER,tol=0.0001, copy_X=True, cv=cv, verbose=False,n_jobs=1, positive=True, random_state=0,selection='cyclic')
lasso_cv.fit(X, y)
alpha, mse_ave, mse_std = lasso_mpm(lasso_cv.alphas_, lasso_cv.mse_path_)
#MAX_NITER = 10000
#print(alpha)
# Main Part
lasso = ElasticNet(alpha=alpha, fit_intercept=False, normalize=False,precompute=False, copy_X=True, max_iter=MAX_NITER,tol=0.0001, warm_start=False, positive=True,random_state=0, selection='cyclic')
#lasso=Lasso(alpha=alpha)
lasso.fit(X, y)
lasso_coef = np.atleast_1d(lasso.coef_)
#fy=lasso.predict(X)
#print ("RMSE:", np.sqrt(metrics.mean_squared_error(y, fy)))
#exit()
#print(lasso_coef)
#print(out_strains)
#exit()
if not np.sum(lasso_coef)==0:
coef_norm = lasso_coef / np.sum(lasso_coef)
res=dict(zip(out_strains,list(coef_norm)))
res2=dict(zip(out_strains,list(lasso_coef)))
else:
res={}
res2={}
#print(coef_norm)
#res=dict(zip(out_strains,list(coef_norm)))
#res2=dict(zip(out_strains,list(lasso_coef)))
return res,res2,strain_cov,strain_val,final_src
#import pickle
#sid=pickle.load(open("../StrainVote_DB_Lasso_Test/Kmer_Sets/Kmer_Sets/C6/All_Kmer/id2strain_re.pkl", "rb"))
#print(sid)
#print(zip(list(lasso_coef),sid))
#y_pred = X.dot(coef_norm)
#r,pval = pearsonr(y, y_pred)
#print(r,pval)
#print(lasso_coef)
|
{"hexsha": "9bc996cd66460e097c854345bb8b420bd06e5d4f", "size": 12368, "ext": "py", "lang": "Python", "max_stars_repo_path": "library/identify_strains_L2_Enet_Pscan_new_sp_et.py", "max_stars_repo_name": "liaoherui/StrainScan", "max_stars_repo_head_hexsha": "7cb953232e08d286124b24dc27e83ea6d6e3a9a3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2021-11-26T02:49:14.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-10T04:19:32.000Z", "max_issues_repo_path": "library/identify_strains_L2_Enet_Pscan_new_sp_et.py", "max_issues_repo_name": "johnson951101/StrainScan", "max_issues_repo_head_hexsha": "f449055fa3b2bdeb4794d607745856772ade81d7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "library/identify_strains_L2_Enet_Pscan_new_sp_et.py", "max_forks_repo_name": "johnson951101/StrainScan", "max_forks_repo_head_hexsha": "f449055fa3b2bdeb4794d607745856772ade81d7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-11-26T02:49:18.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-16T12:10:12.000Z", "avg_line_length": 23.9689922481, "max_line_length": 232, "alphanum_fraction": 0.6987386805, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 4517}
|
import pandas as pd
from math import sqrt
import numpy as np
from tqdm import tqdm
import os
import glob
import re
import os
import re
import subprocess
from scipy.stats import pearsonr, spearmanr
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import Ridge
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import LinearSVR
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
from sklearn.preprocessing import MinMaxScaler, RobustScaler
from sklearn.externals import joblib
from sklearn.base import clone
from sklearn.model_selection import KFold
import matplotlib.pyplot as plt
import seaborn as sns
import sys
sys.path.insert(0, '../utils')
import geoutils
from settings import data_dir, model_dir, scaler_dir, preds_dir, features, indicators, dept_dir, grid250_dir, feats250_dir, preds250_dir
def calculate_metrics(y_true, y_pred):
'''
Calculates metrics of accuracy between actual values and model predicted values.
Args
y_true (list): Actual WASH indicator values
y_pred (list): Model predicted values
Returns
(dict): Dictionary of correlation, r-squared, mean absolute error, and root mean squared error
'''
return {
'correlation': pearsonr(y_true, y_pred)[0],
'r2': pearsonr(y_true, y_pred)[0]**2,#r2_score(y_true, y_pred),
'mae': mean_absolute_error(y_true, y_pred),
'rmse': sqrt(mean_squared_error(y_true, y_pred)),
}
def _fit_with_one_holdout(df, clf, features, indicator, test_area, scale = True):
'''
Trains input model for input indicator using input features, holding out input test_area
Args
df (dataframe): dataset source of training data
clf (sklearn model): unfitted model
features (list of str): list of features used for training
indicator (str): indicator being modelled
test_area (str): area to serve as testing data
scale (bool): scale features based sklearn RobustScaler
Returns
clf (sklearn model): fitted model
metrics (dict): dictionary of accuracies from calculate_metrics()
scaler (sklearn object): fitted scaler object on train data
'''
global df_train, y_true, y_pred
raw_ = df.copy()
df_train = raw_.query(f"adm1_name != '{test_area}'")
df_test = raw_.query(f"adm1_name == '{test_area}'")
y = df_train[indicator]
y_true = df_test[indicator]
if scale:
scaler = RobustScaler()
scaler.fit(df_train[features])
df_train = scaler.transform(df_train[features])
df_test = scaler.transform(df_test[features])
X = df_train
X_test = df_test
clf.fit(X, y)
y_pred = clf.predict(X_test)
metrics = calculate_metrics(y_true, y_pred)
return clf, metrics, scaler
def fit_models(df, features, indicators, test_areas, prefix = ''):
'''
Wrapper function that:
- Runs _fit_with_one_holdout() over test areas
- Creates sklearn model objects, 1 Ridge Regressor and 1 Random Forest Regressor
- Saves models and scalers as pkl files to model and scaler dirs specified in settings.py
Args
df (dataframe): dataset source of training data
features (list of str): list of features used for training
indicators (list of str): list of indicators being modelled
test_areas (list of str): list of areas to serve as testing data
prefix (str): prefix added to filename used in saving pkl files
Returns
results (DataFrame): aggregated table of accuracies, broken down based on indicator, model type, and test area
'''
recs = []
lr = Ridge(alpha=1.0, random_state=42)
rf = RandomForestRegressor(random_state=42)
clfs = [lr, rf]
print('Fitting using the following features:')
print(features)
print('Evaluating on the following test areas:')
print(test_areas)
# remove previous results
fileList = glob.glob(f'{preds_dir}{prefix}_perc_hh_no*.csv')
for filePath in fileList:
os.remove(filePath)
for indicator in tqdm(indicators):
for model in clfs:
for area in test_areas:
unfitted = clone(model)
fitted, metrics, scaler = _fit_with_one_holdout(df, unfitted, features, indicator, area)
model_type = str(type(fitted)).split('.')[-1].replace("'>", '')
rec = (
indicator,
area,
model_type,
metrics['r2'],
metrics['correlation'],
metrics['mae'],
metrics['rmse']
)
recs.append(rec)
joblib.dump(fitted, model_dir + prefix + '_' + indicator + '_' + area + '_' + model_type + '.pkl')
joblib.dump(scaler, scaler_dir + prefix + '_' + indicator + '_' + area + '_' + model_type + '.pkl')
cols = ['indicator', 'area', 'model', 'r2_score', 'correlation', 'mae', 'rmse']
results = pd.DataFrame(recs, columns = cols)
return results
def _predict_one_holdout_area(df, features, indicator, area, model_type, prefix = ''):
'''
Wrapper function that
- Predicts using a saved model and scaler from pkl files
- Outputs a csv file of grid-level predictions
Args
df (dataframe): dataset source of training data
features (list of str): list of features used for training
indicator (str): indicator being modelled
area (str): area to serve as testing data
model_type (str): type of sklearn model e.g. RandomForestRegressor
prefix (str): prefix added to filename used in saving pkl files
Returns
None
'''
model = joblib.load(model_dir + prefix + '_' + indicator + '_' + area + '_' + model_type + '.pkl')
scaler = joblib.load(scaler_dir + prefix + '_' + indicator + '_' + area + '_' + model_type + '.pkl')
sub = df.query(f"adm1_name == '{area}'")
X = sub[features]
X = scaler.transform(X)
sub['pred_' + indicator] = model.predict(X)
keep_cols = ['adm1_name', 'id', 'geometry', indicator, 'pred_' + indicator]
sub[keep_cols].to_csv(preds_dir + prefix + '_' + indicator + '_' + area + '_' + model_type + '.csv', index = False)
def find_ind(text):
'''
Finds indicator text in filename
Args
text (str): filename, e.g. 'all_perc_hh_no_toilet_bogot_dc_RandomForestRegressor.csv'
Returns
ind (str): indicator text, e.g. 'toilet'
'''
#text =
m1 = re.search('water', text)
m2 = re.search('toilet', text)
m3 = re.search('sewage', text)
if m1 is None:
if m2 is None:
ind = m3[0]
else:
ind = m2[0]
else:
ind = m1[0]
return ind
def predict_on_holdout_areas(df, test_areas, features, indicators, prefix = ''):
'''
Wrapper function that
- Runs _predict_one_holdout_area() across holdout areas
- Consolidates to one csv file of grid-level predictions
Args
df (dataframe): dataset source of training data
test_areas (list of str): list of areas to serve as testing data
features (list of str): list of features used for training
indicators (list of str): list of indicators being modelled
prefix (str): prefix added to filename used in saving pkl files
Returns
None
'''
out_file = f'{prefix}_predictions.csv'
for indicator in tqdm(indicators):
for model_type in ['RandomForestRegressor']:
for area in test_areas:
_predict_one_holdout_area(df, features, indicator, area, model_type, prefix)
# consolidate hold out results to one df
files_ = glob.glob(f'{preds_dir}{prefix}_perc_hh_no*.csv')
dfs = []
for f in files_:
df = pd.read_csv(f)
df.columns = ['adm1_name', 'id', 'geometry', 'y_true', 'y_pred']
df['indicator'] = find_ind(f)
dfs.append(df)
mega_df = pd.concat(dfs, axis = 0)
mega_df['absdiff'] = abs(mega_df['y_true'] - mega_df['y_pred'])
mega_df.to_csv(data_dir + out_file, index= False)
def evaluate_results(indicators, prefix = ''):
'''
Prints out accuracy metrics and scatter plots of actual value vs predicted
Args
indicators (list of str): list of indicators to evaluate on
Returns
None
'''
out_file = f'{prefix}_predictions.csv'
for indicator in indicators:
print(indicator)
ind = indicator.split('_')[3]
df = pd.read_csv(data_dir + out_file).query(f"indicator == '{ind}'")
print(calculate_metrics(df['y_true'], df['y_pred']))
df.plot(x = 'y_true', y = 'y_pred', kind = 'scatter', figsize = (5,5), xlim = (0,1), ylim = (0,1), title = indicator)
def check_nulls_and_outliers(df, columns = None):
'''
1. Find rows missing some values
2. Find outliers
'''
if columns is None:
columns = df.columns
sub = df[columns]
rows, cols = df.shape
print(f'Total rows: {rows}')
print('Variables with missing values:')
print(sub.describe().transpose().query(f"count < {rows}")[['count', 'mean', 'std', 'min', 'max']])
scaler = MinMaxScaler()
scaler.fit(sub)
scaler.transform(sub)
sub.boxplot(figsize=(12,4))
def average_results(df, iterate_over = 'adm1_name', prefix = 'all'):
'''
Calculate accuracies by calculating per subgroup, then averaging across all subgroups
Args
df (DataFrame): source of training data
iterate_over (str): column over which to iterate, i.e. subgroups used for calculation
prefix (str): string prepended to saved csv file
Returns
res (DataFrame): metrics per iteration
'''
inds = list(df.indicator.unique())
dfs = []
for ind in inds:
ind_ = ind.replace('perc_hh_no_', '').split('_')[0]
## Subset to indicator
sub1 = df.query(f"indicator == '{ind}'")
list_ = list(sub1[iterate_over].unique())
recs = []
## Subset to fold
for item in list_:
sub2 = sub1.query(f"{iterate_over} == '{item}'")
metrics_ = calculate_metrics(sub2['y_true'], sub2['y_pred'])
recs.append((ind_, item, metrics_['correlation'], metrics_['r2'], metrics_['rmse']))
df_ = pd.DataFrame(recs, columns = ['indicator', iterate_over, 'correlation', 'r2', 'rmse']).set_index(iterate_over)
dfs.append(df_)
res = pd.concat(dfs, axis = 0)
return(res)
def plot_preds_test(y_tests, y_preds, ind, avg_metrics):
'''
Scatter plot of the predicted value and true value
Args
y_tests (series): true values
y_preds (series): predicted values
ind (str): string that indicates indicator used in plot title
avg_metrics (DatFrame): averaged accuracies of the indicator
Returns
None
'''
plt.scatter(y_preds, y_tests, alpha=0.4)
plt.plot([0, 1],[0, 1], color='black')
plt.xlabel('y_pred')
plt.ylabel('y_test')
plt.title('{} (r2 = {:.2f})'.format(ind, avg_metrics['r2']))
plt.show()
def summarize_metrics(df, iterate_over = 'adm1_name', prefix = 'all'):
'''
Prints the averaged accuracies across all subgroups with a scatter plot of the true and predicted values
Args
df (DataFrame): source of training data
iterate_over (str): column over which to iterate, i.e. subgroups used for calculation
prefix (str): string prepended to saved csv file
Returns
None
'''
res = average_results(df, iterate_over = iterate_over, prefix = 'all')
#res = res.set_index('indicator')
inds = list(df.indicator.unique())
for ind in inds:
ind_ = ind.replace('perc_hh_no_', '').split('_')[0]
sub = df.query(f"indicator == '{ind}'")
y_tests = sub.y_true
y_preds = sub.y_pred
sub1 = res.query(f"indicator == '{ind_}'")
sub1 = sub1.set_index('indicator')
avg_metrics = {key: np.mean(values) for key, values in sub1.items()}
print(f"\nAccess to {ind_}")
print('\nAverage Metrics')
for key, val in avg_metrics.items():
print('{}: {:.4f}'.format(key, val))
plot_preds_test(y_tests, y_preds,ind_,avg_metrics)
def consolidate_results(df, prefix = 'all'):
'''
Calculate accuracies by consolidating all predictions
Args
df (DataFrame): source of training data
iterate_over (str): column over which to iterate, i.e. subgroups used for calculation
prefix (str): string prepended to saved csv file
Returns
None
'''
inds = list(df.indicator.unique())
for ind in inds:
ind_ = ind.replace('perc_hh_no_', '')
print(f"Access to {ind_}")
sub = df.query(f"indicator == '{ind}'")
print(calculate_metrics(sub['y_true'], sub['y_pred']))
def fit_with_randomsplit(df, clf, features, indicators, scale = True, n_splits = 5, prefix = 'all'):
'''
Trains input model for input indicator using input features, using randomly selected 20% of rows
Args
df (dataframe): dataset source of training data
clf (sklearn model): unfitted model
features (list of str): list of features used for training
indicators (list of str): indicators being modelled
scale (bool): scale features based sklearn RobustScaler
n_splits (int):
Returns
None
'''
global X
raw_ = df.copy()
dfs = []
for indicator in tqdm(indicators):
#print(indicator)
X = raw_[features]
y = raw_[indicator]
kf = KFold(n_splits=n_splits, shuffle = True, random_state = 42)
c = 0
for train_index, test_index in kf.split(X):
#print(c)
c+=1
clf_ = clone(clf)
X_train, X_test = X.iloc[train_index,:], X.iloc[test_index,:]
y_train, y_test = y[train_index], y[test_index]
if scale:
scaler = RobustScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
clf_.fit(X_train, y_train)
y_pred = clf_.predict(X_test)
df_ = pd.DataFrame({
'split_id': str(c),
'indicator': indicator,
'grid_id': raw_.loc[test_index, 'id'],
'adm1_name': raw_.loc[test_index, 'adm1_name'],
'y_true': y_test,
'y_pred': y_pred,
})
dfs.append(df_)
cons_df = pd.concat(dfs, axis = 0)
cons_df.to_csv(data_dir + prefix + '_randomsplit_results.csv', index = False)
return cons_df
def model_rollout(train_df, test_df, fit = False, save = False, verbose = True):
"""
Fit model and return test_df with predictions
Args
train_df (dataframe): data to train model on (2018)
test_df (dataframe): data to predict on (2019/2020)
fit (bool): if False, load saved pkl file of model; else fit on train_df
save (bool): if True, save model and scaler as pkl files
Returns
test_df (dataframe): original test_df but with predictions per indicator
top_features (dataframe): top features sorted by random forest importance
"""
global clf
clf = RandomForestRegressor(random_state=42)
if verbose:
iterable = tqdm(indicators)
else:
iterable = indicators
feats = []
for indicator in iterable:
avg_metrics = {'correlation':[], 'r2':[], 'mae':[], 'rmse':[]}
X_train, y_train = train_df[features], train_df[indicator]
X_test = test_df[features]
scaler = RobustScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
if fit:
clf.fit(X_train, y_train)
else:
path = model_dir + 'model_' + indicator + '_2018_250mv2.pkl'
clf = joblib.load(path)
y_pred = clf.predict(X_test)
test_df['pred_' + indicator] = y_pred
feature_importances = pd.DataFrame({'feature': list(train_df[features].columns)
, 'importance': list(clf.feature_importances_)})
top_features = (feature_importances
.sort_values(by=['importance'], ascending = False))
top_features['indicator'] = indicator
feats.append(top_features)
if save:
joblib.dump(clf, model_dir + 'model_' + indicator + '_2018_250mv2.pkl')
joblib.dump(scaler, scaler_dir + 'scaler_2018_250mv2.pkl') # writes 3 times, but might be cleaner to read
return test_df, pd.concat(feats, axis = 0).reset_index(drop = True)
def _aggregate_by_metro_area():
'''
Aggregates predictions by metro area. Content originally from 20200914_check_trends.ipynb, Section 'check trends'
Datasets read were created from scripts in github branch validation/geih-metro-areas
'''
import pandas as pd
import re
def clean_name(text):
return (re.sub('[^a-z ]','', text.lower()).replace(' ', '_')
.replace('area_metropolitana_de_', '')
.replace('area_metropolitana_del_', ''))
wash18 = pd.read_csv(data_dir + '20200916_dataset.csv')
grid_in_metro = pd.read_csv(data_dir + 'grids_in_metro_areas.csv')
metro19 = pd.read_csv(data_dir + '20200831_GEIH_Metro_Areas.csv')
metro20 = pd.read_csv(data_dir + '20200908_GEIH_Metro_Areas_2020.csv')
metro_name = pd.read_csv(data_dir + 'metro_areas_id_name.csv')
pred_metro18 = pd.read_csv(data_dir + 'metro_area_predictions_2018.csv')
pred_metro19 = pd.read_csv(data_dir + 'metro_area_predictions.csv')
pred_metro20 = pd.read_csv(data_dir + 'metro_area_predictions_2020.csv')
metro_name['a_mtro'] = metro_name['a_mtro'].apply(clean_name)
metro_name = metro_name.rename(columns = {'OBJECTID': 'metro_id'})
# Actual
spanish = {
'd_hogares': 'population',
'd_c_acuedu': 'hh_no_water_supply',
'd_c_alcant': 'hh_no_sewage',
'd_c_sanita': 'hh_no_toilet',
}
df1 = (pd.merge(grid_in_metro, wash18[['id'] + list(spanish.keys())], how = 'left', on = 'id')
.rename(columns = spanish))
df2 = df1.groupby('metro_id').agg('sum').reset_index()
for indicator in indicators:
df2[indicator] = 100*df2[indicator.replace('perc_', '')] / df2['population']
metro18 = df2
spanish = {
'OBJECTID': 'metro_id',
'personas': 'population',
'c_acueduct': 'hh_no_water_supply',
'c_alcantar': 'hh_no_sewage',
'c_sanitari': 'hh_no_toilet',
'mc_acueduc': 'perc_hh_no_water_supply',
'mc_alcanta': 'perc_hh_no_sewage',
'mc_sanitar': 'perc_hh_no_toilet',
}
metro19 = metro19.rename(columns = spanish)
metro20 = metro20.rename(columns = spanish)
cols = ['metro_id', 'year'] + indicators
metro18['year'] = 2018
metro19['year'] = 2019
metro20['year'] = 2020
df3 = pd.concat([
metro18[cols],
metro19[cols],
metro20[cols],
], axis = 0)
df4 = pd.merge(metro_name, df3, how = 'left', on = 'metro_id')
df5 = df4.set_index(['metro_id', 'a_mtro', 'year']).stack().reset_index()
df5.columns = ['metro_id', 'a_mtro', 'year', 'indicator', 'value']
# Predicted
rnm = {
'pred_perc_hh_no_water_supply': 'perc_hh_no_water_supply',
'pred_perc_hh_no_toilet': 'perc_hh_no_toilet',
'pred_perc_hh_no_sewage': 'perc_hh_no_sewage'
}
pred_metro18['year'] = 2018
pred_metro19['year'] = 2019
pred_metro20['year'] = 2020
cols2 = ['metro_id', 'year'] + list(rnm.keys())
df6 = pd.concat([
pred_metro18[cols2].rename(columns = rnm),#metro18[cols],
pred_metro19[cols2].rename(columns = rnm),
pred_metro20[cols2].rename(columns = rnm)
], axis = 0)
df7 = pd.merge(metro_name, df6, how = 'left', on = 'metro_id')
df8 = df7.set_index(['metro_id', 'a_mtro', 'year']).stack().reset_index()
df8.columns = ['metro_id', 'a_mtro', 'year', 'indicator', 'value']
df5['val_type'] = 'actual'
df8['val_type'] = 'pred'
df9 = pd.concat([df5, df8], axis = 0)
# df9.to_csv(data_dir + 'metro_trends.csv', index = False)
return df9
def _aggregate_by_department():
'''
Aggregates predictions by department. Content originally from 03_Rollout.ipynb, Section 'what changed'
'''
scaler = joblib.load(scaler_dir + 'scaler_2018_250mv2.pkl')
agg_level = 'adm1_name'
keep_cols = [agg_level] + features + indicators
def clean_name(text):
return re.sub('[^a-z ]','', text.lower()).replace(' ', '_')
raw = pd.read_csv(data_dir + '20200916_dataset.csv')
raw['adm1_name'] = raw['adm1_name'].apply(clean_name)
feats_2020 = pd.read_csv(data_dir + '20200914_dataset_2020.csv')
preds_2020 = pd.read_csv(data_dir + '20200914_predictions2020.csv').rename(columns = {
'pred_perc_hh_no_water_supply': 'perc_hh_no_water_supply',
'pred_perc_hh_no_toilet': 'perc_hh_no_toilet',
'pred_perc_hh_no_sewage': 'perc_hh_no_sewage',
})[['id', 'perc_hh_no_water_supply', 'perc_hh_no_toilet', 'perc_hh_no_sewage']]
# join
wash_grid_2018_ = raw
wash_grid_2020_ = pd.merge(feats_2020, preds_2020, on = 'id')
# filter to 2018 grids only for comparability
wash_grid_2020_ = wash_grid_2020_[wash_grid_2020_['id'].isin(list(wash_grid_2018_['id'].unique()))]
# scale features except population
wash_grid_2018 = wash_grid_2018_[keep_cols].copy()
wash_grid_2020 = wash_grid_2020_[keep_cols].copy()
wash_grid_2018.loc[:,features] = scaler.transform(wash_grid_2018[features])
wash_grid_2020.loc[:,features] = scaler.transform(wash_grid_2020[features])
wash_grid_2018['population'] = wash_grid_2018_['population']
wash_grid_2020['population'] = wash_grid_2020_['population']
# standardize naming
to_replace = {'laguajira': 'la_guajira','valledelcauca': 'valle_del_cauca'}
wash_grid_2018['adm1_name'] = wash_grid_2018['adm1_name'].replace(to_replace)
wash_grid_2020['adm1_name'] = wash_grid_2020['adm1_name'].replace(to_replace)
# get median for everything except population
agg_type = {
'vegetation': 'median',
'aridity_cgiarv2': 'median',
'temperature': 'median',
'nighttime_lights': 'median',
'population': 'sum',
'elevation': 'median',
'urban_index': 'median',
'nearest_waterway': 'median',
'nearest_commercial': 'median',
'nearest_restaurant': 'median',
'nearest_hospital': 'median',
'nearest_airport': 'median',
'nearest_highway': 'median',
'perc_hh_no_water_supply': 'median',
'perc_hh_no_toilet': 'median',
'perc_hh_no_sewage': 'median',
}
wash_metro_2018 = wash_grid_2018.groupby(agg_level).agg(agg_type).reset_index()
wash_metro_2020 = wash_grid_2020.groupby(agg_level).agg(agg_type).reset_index()
# combine (wide format)
wash_agg = pd.merge(
wash_metro_2018, wash_metro_2020, left_on = agg_level, right_on = agg_level, suffixes = ['', '_2020']
, how = 'left'
)
# convert to long
df_ = wash_agg.set_index('adm1_name').stack().reset_index()
df_.columns = ['adm1_name', 'feature', 'value']
df_['year'] = 2018
for i, row in df_.iterrows():
if row.feature[-5:] == '_2020':
df_.loc[i, 'year'] = 2020
df_.loc[i, 'feature'] = df_.loc[i, 'feature'][:-5]
# df_.to_csv('wash_agg.csv', index = False)
return df_
def aggregate_predictions(by = 'department'):
'''
Aggregates predictions according to the level specified.
Returns
(DataFrame): aggregated features and metrics per year (long format)
'''
if by == 'department':
return _aggregate_by_department()
elif by == 'metro_area':
return _aggregate_by_metro_area()
else:
print('Unrecognized aggregation level.')
## Rollout utils ----
def save_output(test_df, out_file = preds250_dir + 'test.gpkg'):
'''
This function saves a geopackage per dataframe provided.
Args
test_df (DataFrame): contains features, geometries and predictions
out_file (str): filename to save to
Returns
None
'''
# keep only geometries and predictions
keep_cols = ['id', 'geometry', 'centroid_geometry', 'adm1_name', 'adm2_name', 'pred_perc_hh_no_water_supply', 'pred_perc_hh_no_toilet', 'pred_perc_hh_no_sewage']
df2 = test_df[keep_cols]
df2['geometry'] = df2['geometry'].apply(loads)
# output to geopackage
gdf2 = gpd.GeoDataFrame(df2, geometry = 'geometry')
gdf2['centroid_geometry'] = gdf2['centroid_geometry'].astype(str)
gdf2.crs = 'epsg:4326'
gdf2.to_file(out_file, driver = 'GPKG')
def predict_by_chunk(adm1, in_dir = grid250_dir, out_dir = preds250_dir, chunksize = 30000):
'''
Splits data per department into chunks of 30K rows, so that prediction does not crash
Args
adm1 (str): Department to predict on
in_dir (str): directory where grids by department is located (e.g. amazonas.csv contains the grids + geometries of Amazon)
out_dir (str): directory where predictions will be saved
chunksize (int): how many rows to read at a time
Returns
None
'''
geom_col = 'centroid_geometry'
chunk_dir = out_dir + adm1 + '/'
if not os.path.exists(chunk_dir):
os.makedirs(chunk_dir)
# to resume at nth_chunk, use skiprows=nth_chunk*chunksize
c = 0
with tqdm() as pbar:
for chunk in pd.read_csv(data_dir + in_dir + adm1 + '.csv', chunksize=chunksize):
c += 1
df = chunk
df[geom_col] = df[geom_col].apply(loads)
gdf = gpd.GeoDataFrame(df, geometry = geom_col)
test_df = geoutils.generate_data(gdf = gdf, year = '2018', out_file = chunk_dir + f'{c}.csv', save = True, verbose = False)
test_df, top_features = modelutils.model_rollout(train_df, test_df, fit = False, save = False, verbose = False)
save_output(test_df, out_file = chunk_dir + f'{c}.gpkg')
pbar.update(1)
def gpkgs_to_raster(adm1, verbose = True):
'''
This function converts the chunked gpkg predictions to 1 raster per department. Before running, make a gdal conda environment using:
# conda create --name gdal_env
# conda activate gdal_env
# conda install -c conda-forge gdal
Args
adm1 (str): department to consolidate data on
verbose (bool): display logging
Returns
None
'''
# get list of numerical gpkgs
fnames = [
fname for fname in os.listdir(preds250_dir + adm1)
if '.gpkg' in fname
and re.search('[0-9]',fname) is not None
]
fnames.sort()
if verbose:
iterable = tqdm(fnames)
else:
iterable = fnames
gdfs = []
if verbose: print('Consolidating to one geopackage..');
for fname in iterable:
gdfs.append(gpd.read_file(preds250_dir + f'{adm1}/{fname}'))
gdf = pd.concat(gdfs, axis = 0)
x1, y1, x2, y2 = tuple(gdf.total_bounds)
gdf.to_file(preds250_dir + f'{adm1}/cons.gpkg', driver = 'GPKG')
text = '''
eval "$(conda shell.bash hook)"
conda activate gdal_env
gdal_rasterize -a pred_perc_hh_no_water_supply -tr 0.00225225 0.00225225 -co "COMPRESS=DEFLATE" -a_nodata 0.0 -te {x1} {y1} {x2} {y2} -ot Float32 -of GTiff {in_dir}cons.gpkg {out_dir}{adm1}_perc_hh_no_water_supply.tif
gdal_rasterize -a pred_perc_hh_no_sewage -tr 0.00225225 0.00225225 -co "COMPRESS=DEFLATE" -a_nodata 0.0 -te {x1} {y1} {x2} {y2} -ot Float32 -of GTiff {in_dir}cons.gpkg {out_dir}{adm1}_perc_hh_no_sewage.tif
gdal_rasterize -a pred_perc_hh_no_toilet -tr 0.00225225 0.00225225 -co "COMPRESS=DEFLATE" -a_nodata 0.0 -te {x1} {y1} {x2} {y2} -ot Float32 -of GTiff {in_dir}cons.gpkg {out_dir}{adm1}_perc_hh_no_toilet.tif
# gdal_merge.py -o {adm1}.tif 1.tif 2.tif 3.tif -co COMPRESS=LZW -co BIGTIFF=YES -co PREDICTOR=2 -co TILED=YES
'''
rpl = {'{x1}': x1, '{x2}': x2, '{y1}': y1, '{y2}': y2, '{adm1}': adm1, '{in_dir}': preds250_dir + adm1 + '/', '{out_dir}': preds250_dir}
for k, v in rpl.items():
text = text.replace(k, str(v))
if verbose: print('Running rasterization');
f = open('rasterize.sh', 'w')
f.writelines(text)
f.close()
result = subprocess.run('sh rasterize.sh', shell = True, stdout=subprocess.PIPE)
if verbose: print(result.stdout);
|
{"hexsha": "ab71469904597d03cfa039e33e2d00388e85c882", "size": 28908, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/modelutils.py", "max_stars_repo_name": "thinkingmachines/geoai-immap-wash", "max_stars_repo_head_hexsha": "466d027bc4ffdd5418375b1c67d5b6f786b01add", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "utils/modelutils.py", "max_issues_repo_name": "thinkingmachines/geoai-immap-wash", "max_issues_repo_head_hexsha": "466d027bc4ffdd5418375b1c67d5b6f786b01add", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils/modelutils.py", "max_forks_repo_name": "thinkingmachines/geoai-immap-wash", "max_forks_repo_head_hexsha": "466d027bc4ffdd5418375b1c67d5b6f786b01add", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.0615384615, "max_line_length": 221, "alphanum_fraction": 0.6305175038, "include": true, "reason": "import numpy,from scipy", "num_tokens": 7661}
|
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 27 01:06:59 2016
@author: yxl
"""
from imagepy import IPy, wx
import numpy as np
from imagepy.core.engine import Simple, Filter
from imagepy.core.manager import WindowsManager
from scipy.ndimage import label, generate_binary_structure
from skimage.measure import regionprops
class Mark:
def __init__(self, data):
self.data = data
def draw(self, dc, f, **key):
dc.SetPen(wx.Pen((255,255,0), width=1, style=wx.SOLID))
dc.SetTextForeground((255,255,0))
font = wx.Font(8, wx.FONTFAMILY_DEFAULT,
wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False)
dc.SetFont(font)
data = self.data[0 if len(self.data)==1 else key['cur']]
for i in range(len(data)):
pos = f(*(data[i][0][1], data[i][0][0]))
dc.DrawCircle(pos[0], pos[1], 2)
dc.DrawText('id={}'.format(i), pos[0], pos[1])
if data[i][1]==None:continue
k1, k2, a = data[i][1]
aixs = np.array([[-np.sin(a), np.cos(a)],
[np.cos(a), np.sin(a)]])*[k1/2, k2/2]
ar = np.linspace(0, np.pi*2,25)
xy = np.vstack((np.cos(ar), np.sin(ar)))
arr = np.dot(aixs, xy).T+data[i][0]
dc.DrawLines([f(*i) for i in arr[:,::-1]])
# center, area, l, extent, cov
class RegionCounter(Simple):
title = 'Geometry Analysis'
note = ['8-bit', '16-bit']
para = {'con':'8-connect', 'center':True, 'area':True, 'l':True, 'extent':False, 'cov':False, 'slice':False,
'ed':False, 'holes':False, 'ca':False, 'fa':False, 'solid':False}
view = [(list, ['4-connect', '8-connect'], str, 'conection', 'con', 'pix'),
(bool, 'slice', 'slice'),
('lab','========= indecate ========='),
(bool, 'center', 'center'),
(bool, 'area', 'area'),
(bool, 'perimeter', 'l'),
(bool, 'extent', 'extent'),
(bool, 'equivalent diameter', 'ed'),
(bool, 'convex area', 'ca'),
(bool, 'holes', 'holes'),
(bool, 'filled area', 'fa'),
(bool, 'solidity', 'solid'),
(bool, 'cov', 'cov')]
#process
def run(self, ips, imgs, para = None):
if not para['slice']:imgs = [ips.img]
k = ips.unit[0]
titles = ['Slice', 'ID'][0 if para['slice'] else 1:]
if para['center']:titles.extend(['Center-X','Center-Y'])
if para['area']:titles.append('Area')
if para['l']:titles.append('Perimeter')
if para['extent']:titles.extend(['Min-Y','Min-X','Max-Y','Max-X'])
if para['ed']:titles.extend(['Diameter'])
if para['ca']:titles.extend(['ConvexArea'])
if para['holes']:titles.extend(['Holes'])
if para['fa']:titles.extend(['FilledArea'])
if para['solid']:titles.extend(['Solidity'])
if para['cov']:titles.extend(['Major','Minor','Ori'])
buf = imgs[0].astype(np.uint16)
data, mark = [], []
strc = generate_binary_structure(2, 1 if para['con']=='4-connect' else 2)
for i in range(len(imgs)):
label(imgs[i], strc, output=buf)
ls = regionprops(buf)
dt = [[i]*len(ls), list(range(len(ls)))]
if not para['slice']:dt = dt[1:]
if not para['cov']: cvs = [None] * len(ls)
else: cvs = [(i.major_axis_length, i.minor_axis_length, i.orientation) for i in ls]
centroids = [i.centroid for i in ls]
mark.append([(center, cov) for center,cov in zip(centroids, cvs)])
if para['center']:
dt.append([round(i.centroid[1]*k,1) for i in ls])
dt.append([round(i.centroid[0]*k,1) for i in ls])
if para['area']:
dt.append([i.area*k**2 for i in ls])
if para['l']:
dt.append([round(i.perimeter*k,1) for i in ls])
if para['extent']:
for j in (0,1,2,3):
dt.append([i.bbox[j]*k for i in ls])
if para['ed']:
dt.append([round(i.equivalent_diameter*k, 1) for i in ls])
if para['ca']:
dt.append([i.convex_area*k**2 for i in ls])
if para['holes']:
dt.append([1-i.euler_number for i in ls])
if para['fa']:
dt.append([i.filled_area*k**2 for i in ls])
if para['solid']:
dt.append([round(i.solidity, 2) for i in ls])
if para['cov']:
dt.append([round(i.major_axis_length*k, 1) for i in ls])
dt.append([round(i.minor_axis_length*k, 1) for i in ls])
dt.append([round(i.orientation*k, 1) for i in ls])
data.extend(list(zip(*dt)))
ips.mark = Mark(mark)
IPy.table(ips.title+'-region', data, titles)
# center, area, l, extent, cov
class RegionFilter(Filter):
title = 'Geometry Filter'
note = ['8-bit', '16-bit', 'auto_msk', 'auto_snap','preview']
para = {'con':'4-connect', 'inv':False, 'area':0, 'l':0, 'holes':0, 'solid':0, 'e':0, 'front':255, 'back':100}
view = [(list, ['4-connect', '8-connect'], str, 'conection', 'con', 'pix'),
(bool, 'invert', 'inv'),
('lab','Filter: "+" means >=, "-" means <'),
(int, (0, 255), 0, 'front color', 'front', ''),
(int, (0, 255), 0, 'back color', 'back', ''),
(float, (-1e6, 1e6), 1, 'area', 'area', 'unit^2'),
(float, (-1e6, 1e6), 1, 'perimeter', 'l', 'unit'),
(int, (-10,10), 0, 'holes', 'holes', 'num'),
(float, (-1, 1,), 1, 'solidity', 'solid', 'ratio'),
(float, (-100,100), 1, 'eccentricity', 'e', 'ratio')]
#process
def run(self, ips, snap, img, para = None):
k, unit = ips.unit
strc = generate_binary_structure(2, 1 if para['con']=='4-connect' else 2)
lab, n = label(snap==0 if para['inv'] else snap, strc, output=np.uint16)
idx = (np.ones(n+1)*(0 if para['inv'] else para['front'])).astype(np.uint8)
ls = regionprops(lab)
for i in ls:
if para['area'] == 0: break
if para['area']>0:
if i.area*k**2 < para['area']: idx[i.label] = para['back']
if para['area']<0:
if i.area*k**2 >= -para['area']: idx[i.label] = para['back']
for i in ls:
if para['l'] == 0: break
if para['l']>0:
if i.perimeter*k < para['l']: idx[i.label] = para['back']
if para['l']<0:
if i.perimeter*k >= -para['l']: idx[i.label] = para['back']
for i in ls:
if para['holes'] == 0: break
if para['holes']>0:
if 1-i.euler_number < para['holes']: idx[i.label] = para['back']
if para['holes']<0:
if 1-i.euler_number >= -para['holes']: idx[i.label] = para['back']
for i in ls:
if para['solid'] == 0: break
if para['solid']>0:
if i.solidity < para['solid']: idx[i.label] = para['back']
if para['solid']<0:
if i.solidity >= -para['solid']: idx[i.label] = para['back']
for i in ls:
if para['e'] == 0: break
if para['e']>0:
if i.minor_axis_length>0 and i.major_axis_length/i.minor_axis_length < para['e']:
idx[i.label] = para['back']
if para['e']<0:
if i.minor_axis_length>0 and i.major_axis_length/i.minor_axis_length >= -para['e']:
idx[i.label] = para['back']
idx[0] = para['front'] if para['inv'] else 0
img[:] = idx[lab]
plgs = [RegionCounter, RegionFilter]
|
{"hexsha": "fc0f51c3ffa5673b1a5b164ae24ac7e5dc1902fc", "size": 7781, "ext": "py", "lang": "Python", "max_stars_repo_path": "imagepy/menus/Analysis/Region Analysis/regionprops_plgs.py", "max_stars_repo_name": "siyemuxu888/imagepy", "max_stars_repo_head_hexsha": "a933526483a15da282bacac54608d44d2173beb4", "max_stars_repo_licenses": ["BSD-4-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "imagepy/menus/Analysis/Region Analysis/regionprops_plgs.py", "max_issues_repo_name": "siyemuxu888/imagepy", "max_issues_repo_head_hexsha": "a933526483a15da282bacac54608d44d2173beb4", "max_issues_repo_licenses": ["BSD-4-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "imagepy/menus/Analysis/Region Analysis/regionprops_plgs.py", "max_forks_repo_name": "siyemuxu888/imagepy", "max_forks_repo_head_hexsha": "a933526483a15da282bacac54608d44d2173beb4", "max_forks_repo_licenses": ["BSD-4-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.9889502762, "max_line_length": 114, "alphanum_fraction": 0.4953090862, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2215}
|
#The first 270 lines were provided graciously from Nicole Paulat, https://github.com/npaulat
import sys
import os
import argparse
import itertools
import subprocess
from pyfaidx import Faidx
from pyfaidx import Fasta
import time
import re
import stat
import shutil
from shutil import copyfile
import errno
from Bio import SeqIO
from Bio import SeqRecord
from Bio import Seq
#import numpy as np
#import pandas as pd
# Where RepeatMasker is stored
REPEATMASKER = "/lustre/work/daray/software/RepeatMasker"
# Where this script can find liftUp, twoBitInfo and twoBitToFa
BIN_DIR = "/lustre/work/daray/software"
# Define arguments
def get_args():
#What this script does
parser = argparse.ArgumentParser(description="Generate SGE cluster runs for RepeatMasker; built in RepeatMasker parameters are -xsmall [softmasks repetitive regions] -a [.align output file] -gff [generates a GFF format output] -pa [runs in parallel], please see RepeatMasker for details of these run options", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
required = parser.add_argument_group('required arguments')
#Give input genome FASTA
parser.add_argument('-i', '--input', type=str, help='genome file in FASTA format', required=True)
#Argument of species name
parser.add_argument('-sp', '--species', type=str, help='Source species of query DNA FASTA', required=False)
# Desired batch number
parser.add_argument('-b', '--batch_count', type=int, help='Batch count', default=50)
# Input genome directory
parser.add_argument('-dir', '--genome_dir', type=str, help='Path to genome FASTA', required=True)
# Argument for output directory
parser.add_argument('-od', '--outdir', type=str, help='Location of directory for the output subdirectory', default='.')
# Which queue to use
parser.add_argument('-q', '--queue', type=str, help='Select the queue to run RepeatMasker in [quanah|hrothgar] with the quanah option being the general quanah omni queue, and hrothgar being the communitycluster Chewie queue', choices=['quanah', 'hrothgar'], default='quanah')
#Argument of RepeatMasker run parameter
parser.add_argument('-lib', type=str, help='RepeatMasker run parameter custom library "-lib [filename]" option', required=False)
#Argument of RepeatMasker run parameter
parser.add_argument('-xsmall', type=str, help='Select a RepeatMasker masking option as lowercase bases [-xsmall], default is to mask as Ns', action='store_true')
#Argument of RepeatMasker run parameter
parser.add_argument('-engine', type=str, help='RepeatMasker run parameter "-engine <search_engine>" option; select a non-default search engine to use, otherwise RepeatMasker will used the default configured at install time; [crossmatch|abblast|rmblast|hmmer]', choices=['crossmatch', 'abblast', 'rmblast', 'hmmer'], required=False)
#Argument of RepeatMasker run parameter
parser.add_argument('-inv', type=str, help='RepeatMasker parameter flag "-inv" option; alignments are presented in the orientation of the repeat', action='store_true')
#Argument of RepeatMasker run parameter
parser.add_argument('-nolow', type=str, help='RepeatMasker parameter flag "-nolow" option; does not mask low complexity DNA or simple repeats', action='store_true')
#Argument of RepeatMasker run parameter
parser.add_argument('-s', '-speed', type=str, help='RepeatMasker run parameter "-q" or "-s" option; q=quick search; 5-10% less sensitive, 3-4 times faster than default; s=slow search; 0-5% more sensitive, 2.5 times slower than default', choices=['q', 's'], required=False)
#Argument of RepeatMasker run parameter
parser.add_argument('-div', type=int, help='RepeatMasker run parameter "-div [number]" option; masks only those repeats that are less than [number] percent diverged from the consensus sequence', required=False)
args = parser.parse_args()
GENOME = args.input
SPECIES = args.species
BATCH_COUNT = args.batch_count
GENOME_DIR = args.genome_dir
OUTDIR = args.outdir
QUEUE = args.queue
LIBRARY = args.lib
XSMALL = args.xsmall
ENGINE = args.engine
INV = args.inv
NOLOW = args.nolow
SPEED = args.speed
DIV = args.div
return GENOME, SPECIES, BATCH_COUNT, GENOME_DIR, OUTDIR, QUEUE, LIBRARY, XSMALL, ENGINE, INV, NOLOW, SPEED, DIV
GENOME, SPECIES, BATCH_COUNT, GENOME_DIR, OUTDIR, QUEUE, LIBRARY, XSMALL, ENGINE, INV, NOLOW, SPEED, DIV = get_args()
# Sanity checks
print("The query genome is {}.\n".format(GENOME))
print("{} batches will be made.\n".format(str(BATCH_COUNT)))
print("The genome FASTA is located in '{}'.\n".format(GENOME_DIR))
print("The output directory is '{}'.\n".format(OUTDIR))
print("The job queue is {}.\n".format(QUEUE))
if not SPECIES or LIBRARY:
sys.exit("Must supply value for option 'species' or 'lib'!")
if SPECIES and LIBRARY:
sys.exit("Only supply a value for one option: 'species' or 'lib'! Not both!")
FLAGS = [LIBRARY, XSMALL, ENGINE, INV, NOLOW, SPEED, DIV]
if not FLAGS:
print("All default RepeatMasker parameters were used, no custom library.")
else:
print("Custom parameters used:\n")
if XSMALL:
print("-xsmall flag used.\n")
if INV:
print("-inv flag used.\n")
if NOLOW:
print("-nolow flag used.\n")
if LIBRARY:
print("-lib flag used. Custom library is '{}'.\n".format(os.path.basename(LIBRARY)))
if ENGINE:
print("-engine flag used. Changed search engine to {}.\n".format(ENGINE))
if SPEED:
print("-{} flag used. Search sensitivity has changed.\n".format(SPEED))
if DIV:
print("-div flag used. RepeatMasker will mask only repeats that are less than {}% diverged from the consensus sequence.\n".format(str(DIV)))
if not os.path.isdir(GENOME_DIR):
sys.exit("The given genome directory, '{}', does not exist.".format(GENOME_DIR))
GENOME_FASTA = os.path.join(GENOME_DIR, GENOME)
#if not os.path.isfile(GENOME_FASTA):
# sys.exit("The given genome file '{}' does not exist.").format(GENOME_FASTA)
#if os.stat(GENOME).st_size==0:
# sys.exit("The genome file, '{}', is empty.").format(GENOME_FASTA)
#if not os.path.isfile(LIBRARY):
# sys.exit("The given library file '{}' does not exist.").format(LIBRARY)
#if os.stat(LIBRARY).st_size==0:
# sys.exit("The library file, '{}', is empty.").format(LIBRARY)
try:
if not os.path.getsize(GENOME_FASTA) > 0:
sys.exit("The genome file, '{}', is empty.".format(GENOME_FASTA))
except OSError as e:
sys.exit("The genome file '{}' does not exist or is inaccessible.".format(GENOME_FASTA))
try:
if not os.path.getsize(LIBRARY) > 0:
sys.exit("The library file, '{}', is empty.".format(LIBRARY))
except OSError as e:
sys.exit("The library file '{}' does not exist or is inaccessible.".format(LIBRARY))
if not os.path.isdir(OUTDIR):
sys.exit("The output directory '{}' does not exist.".format(OUTDIR))
PARTITION_DIR = os.path.join(GENOME_DIR, "RMPart")
SLOTS_PER_BATCH = 10
MAX_DIR_SIZE = 1000
NUM_BATCHES = BATCH_COUNT
check_empty(PARTITION_DIR)
# &checkEmpty($partitionDir);
if LIBRARY:
copyfile(LIBRARY, PARTITION_DIR)
LIB_FILE = os.path.basename(LIBRARY)
LIBRARY = os.path.join(PARTITION_DIR, LIB_FILE)
# my $lib;
# if ( exists $options{'lib'} )
# {
# system("cp $options{'lib'} $partitionDir");
# my ( $vol, $dir, $file ) = File::Spec->splitpath( $options{'lib'} );
# $lib="$partitionDir/$file";
# }
#simple_partition()
#build_DoLift()
# &simplePartition();
# &buildDoLift();
# exit;
############## FUNCTIONS #################
# Subroutine (1)
def check_empty(PARTITION_DIR):
PARTITION_DIR = os.path.abspath(PARTITION_DIR)
if not os.path.exists(PARTITION_DIR):
try:
os.makedirs(PARTITION_DIR)
except OSError as e:
if e.errno != errno.EEXIST:
raise
print("Made '{}' directory.".format(PARTITION_DIR))
else:
if not os.listdir(PARTITION_DIR):
print("'{}' is empty. Continuing.".format(PARTITION_DIR))
else:
print("'{}' is not empty. Removing contents and continuing.".format(PARTITION_DIR))
# os.remove(os.path.join(PARTITION_DIR, '*'))
## To remove anything in the RMPart folder from a previous run (all symbolic links (not expected here) and files and subdirectories) without deleting the RMPart directory itself
for FILE in os.listdir(PARTITION_DIR):
FILE_PATH = os.path.join(PARTITION_DIR, FILE)
try:
shutil.rmtree(FILE_PATH)
except OSError:
os.remove(FILE_PATH)
# Subroutine (2)
def get_batches(BATCH_NUM, GENOME_FASTA):
# Return a 3-level list(ref): partitions -> chunks -> chunk properties (scaffold + coordinates)
PARTS = []
GENOME_NAME = os.path.basename(GENOME_FASTA).split(".")[0]
TOTAL_SIZE = 0
SEQS = {}
FAIDX = Faidx(GENOME_FASTA)
FASTA_IDX = GENOME_FASTA + ".fai"
with open(FASTA_IDX) as FILE:
for LINE in FILE:
LINE = LINE.rstrip()
SEQ, SEQ_SIZE, JUNK = LINE.split("\t", 2)
TOTAL_SIZE += int(SEQ_SIZE)
SEQS[SEQ] = int(SEQ_SIZE)
if NUM_BATCHES > 0:
CHUNK_SIZE = int(TOTAL_SIZE / NUM_BATCHES) + 1
BATCHES = []
CURRENT_BATCH_SIZE = 0
for SCAFFOLD in SEQS:
SEQ_SIZE = SEQS[SCAFFOLD]
SEQ_IDX = 0
while SEQ_SIZE > 0:
if (CURRENT_BATCH_SIZE + SEQ_SIZE) > CHUNK_SIZE:
FILL_SIZE = CHUNK_SIZE - CURRENT_BATCH_SIZE
CHUNK_INFO = str(GENOME_NAME + ":" + SCAFFOLD + ":" + str(SEQ_IDX) + "-" + str(SEQ_SIZE))
#NOTE: For scaffold size, always refer back to the index dict, not SEQ_SIZE,
# since SEQ_SIZE changes depending on if the whole scaffold was used in
# a single batch or not (as in the if statement of this loop)
PARTS.append([SCAFFOLD, SEQS[SCAFFOLD], SEQ_IDX, FILL_SIZE, CHUNK_INFO])
BATCHES.append([PARTS])
PARTS = []
SEQ_IDX += FILL_SIZE
SEQ_SIZE -= FILL_SIZE
CURRENT_BATCH_SIZE = 0
else:
CHUNK_INFO = str(GENOME_NAME + ":" + SCAFFOLD + ":" + str(SEQ_IDX) + "-" + str(SEQ_SIZE))
PARTS.append([SCAFFOLD, SEQS[SCAFFOLD], SEQ_IDX, SEQ_SIZE, CHUNK_INFO])
CURRENT_BATCH_SIZE += SEQ_SIZE
SEQ_SIZE = 0
#unclear if BATCHES will be in the appropriate hierarchy of lists/parts(elements) atm
if PARTS:
BATCHES.append([PARTS])
return BATCHES
# Subroutine (3)
def part_path_from_num(PART_NUM, LEVELS):
# Given a partition ID number and number of levels in directory tree,
# determine its path and base filename.
LEAF_ID = PART_NUM % MAX_DIR_SIZE
PART_MOD = int(PART_NUM / MAX_DIR_SIZE)
#Use this line if actually include that dumb loop below
#PATH = str('{:03d}'.format(LEAF_ID))
PATH = str('{:03d}/'.format(LEAF_ID))
## So the below loop should never come into play (as far as I can tell),
## because LEVELS should always = 1, so w/ i=1 i is never less than LEVELS (?????)
## w/n an RMPart dir, PATH folders are always 000 to n, no addnl nums on either side
#for i in range(1, LEVELS):
#i = 1
#while i < LEVELS:
#PATH = PATH + str('{:03d}/'.format(PART_MOD % MAX_DIR_SIZE))
#PART_MOD = int(PART_MOD / MAX_DIR_SIZE)
#i+= 1
#my partName = $path;
PART_NAME = PATH
#$partName =~ s@/@@g ## is telling it to replace "/" with nothing, globally (90% sure)
# var match operator of must contain s is strict or s is the start of s///g???
PART_NAME = PART_NAME.replace('/', '')
return(PATH, PART_NAME)
def simplePartition():
print("Generating list of batches..")
parts = {BATCHES()}
num_Parts = scalar(parts)
levels = 1 + int(log(num_Parts) / log(MAX_DIR_SIZE))
|
{"hexsha": "70de33fbe2b395cf8a487a5af3d71d011e987603", "size": 11178, "ext": "py", "lang": "Python", "max_stars_repo_path": "PERM.py", "max_stars_repo_name": "MoamenElmassry/PERM-Pythonized-RepeatMasker-", "max_stars_repo_head_hexsha": "2d9ae54281cac328d40562d760c2aef67a2b47c9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "PERM.py", "max_issues_repo_name": "MoamenElmassry/PERM-Pythonized-RepeatMasker-", "max_issues_repo_head_hexsha": "2d9ae54281cac328d40562d760c2aef67a2b47c9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "PERM.py", "max_forks_repo_name": "MoamenElmassry/PERM-Pythonized-RepeatMasker-", "max_forks_repo_head_hexsha": "2d9ae54281cac328d40562d760c2aef67a2b47c9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.9214285714, "max_line_length": 366, "alphanum_fraction": 0.7193594561, "include": true, "reason": "import numpy", "num_tokens": 3111}
|
#!/usr/bin/env python
# compatibility with python 2/3:
from __future__ import print_function
from __future__ import division
import os, sys
import string
from pylab import *
import astropy.io.fits as pyfits
import numpy as np
import glob
from matplotlib import pylab as plt
from . import ac_settings as ac_set
def compute_flux(wave, flux, blaze, noise, ln_ctr, ln_win, bandtype, frac=True, test_plot=False):
"""
Calculates the flux inside a bandpass. Interpolates between flux between pixel edges and bandpass limits.
"""
step = 1e-4
def interpolate_band_lims(array, wave, wmin, wmax, step):
from scipy.interpolate import interp1d
res_ratio = np.mean(np.diff(wave))/step
mask = (wave >= wmin) & (wave <= wmax)
wave_int_low = (wave[wave < wmin][-1], wave[wave >= wmin][0])
wave_int_high = (wave[wave <= wmax][-1], wave[wave > wmax][0])
interv_low = (wave >= wave_int_low[0]) & (wave <= wave_int_low[1])
interv_high = (wave >= wave_int_high[0]) & (wave <= wave_int_high[1])
wave_low = wave[interv_low]
wave_high = wave[interv_high]
array_low = array[interv_low]
array_high = array[interv_high]
interp_low = interp1d(wave_low, array_low, kind='linear')
interp_high= interp1d(wave_high, array_high, kind='linear')
wave_i_low = np.arange(np.min(wave_low), np.max(wave_low), step)
array_i_low = interp_low(wave_i_low)
wave_i_high = np.arange(np.min(wave_high), np.max(wave_high), step)
array_i_high = interp_high(wave_i_high)
wave_i = np.r_[wave_i_low, wave[mask], wave_i_high]
array_i = np.r_[array_i_low/(array_i_low.size*res_ratio), array[mask], array_i_high/(array_i_high.size*res_ratio)]
return wave_i, array_i
ctr = ln_ctr
win = ln_win
print("Executing compute_flux")
if blaze is None: blaze = np.ones(len(flux))
# make all important values the size of px_size
px_size = np.diff(wave)
wave = wave[1:]
flux = flux[1:]
blaze = blaze[1:]
# BANDPASS TYPES
if bandtype == 'tri':
wmin = ln_ctr - ln_win
wmax = ln_ctr + ln_win
# used for frac = false:
cond = (wave > wmin) & (wave < wmax)
bandfunc = -np.abs(wave-ln_ctr)/ln_win + 1.
bandfunc = np.where(bandfunc > 0, bandfunc, bandfunc*0.0)
if bandtype == 'sq':
wmin = ln_ctr - ln_win/2.
wmax = ln_ctr + ln_win/2.
# used for frac = false:
cond = (wave > wmin) & (wave < wmax)
bandfunc = np.ones(len(wave))
bandfunc = np.where(cond, 1., 0.)
# HARPS DRS METHOD
if frac == False:
flux_i = flux[cond]
wave_i = wave[cond]
blaze_i = blaze[cond]
dflux_i = flux[cond]/blaze[cond]
bp_i = bandfunc[cond]
dflux_i = flux_i/blaze_i
# ACTIN METHOD
if frac == True:
wave_i, flux_i = interpolate_band_lims(flux, wave, wmin, wmax, step)
_, dflux_i = interpolate_band_lims(flux/blaze, wave, wmin, wmax, step)
if bandtype == 'tri':
bp_i = 1 - np.abs(wave_i - ctr)/win
bp_i = np.where(bp_i > 0, bp_i, bp_i*0.0)
elif bandtype == 'sq':
bp_mask = (wave_i >= ctr - win/2) & (wave_i <= ctr + win/2)
bp_i = np.where(bp_mask, 1, 0.0)
r_neg_ln, _, _, flg_negflux = check_negflux(dflux_i, verb=False)
# Flux sum and variance for line:
if not noise: noise = 0.0
f_sum = sum(dflux_i * bp_i)/win
f_sum_var = sum((flux_i + noise**2) * bp_i**2)/win**2
return f_sum, f_sum_var, bandfunc, flg_negflux, r_neg_ln
def check_negflux(flux, verb=True):
flux = np.asarray(flux)
# Total absolute flux in given spectral line:
tot_flux = np.sum(abs(flux))
# Number of pixels with negative flux
neg_pixs = flux[flux < 0].size
# Negative flux in given spectral line
neg_flux = np.sum(flux[flux < 0])
# Positive flux in given spectral line
pos_flux = np.sum(flux[flux > 0])
# Negative flux ratio for a given spectral line:
r_neg_ln = abs(neg_flux)/tot_flux
flg_negflux = "OK"
if neg_pixs > 0:
if verb:
print(f"Negative flux detected")
flg_negflux = "negFlux"
return r_neg_ln, neg_flux, tot_flux, flg_negflux
def remove_output2(star_name, instr, file_type, save_output):
file_rmv = "{}_{}_{}_data.rdb".format(star_name, instr, file_type)
files = glob.glob(os.path.join(save_output, star_name, file_rmv))
if files:
for file in files:
if os.path.isfile(file):
print(f"Removing file {file}")
os.remove(file)
#else:
# print("There are no files to remove.")
def files_by_star_and_ftype(files):
"""
Organize files by file type.
"""
# different file types in files
ft = []
for k in range(len(files)):
ft.append(get_file_type(files[k]))
ft = list(set(ft))
# different stars in files
sp = []
for k in range(len(files)):
sp.append(os.path.split(files[k])[0])
sp = list(set(sp))
files_list = []
for k in range(len(sp)):
lists_sp = []
for i in range(len(ft)):
lists_ft = []
for j in range(len(files)):
if sp[k] in files[j] and ft[i] in files[j]:
lists_ft.append(files[j])
if lists_ft:
lists_sp.append(lists_ft)
files_list.append(lists_sp)
return files_list
def override_obj(obj, obj_name):
"""
Override object name with name given in obj_name option.
"""
if type(obj_name) is list and len(obj_name) == 1:
return obj_name[0]
elif type(obj_name) is list and len(obj_name) > 1:
sys.exit("*** ERROR: obj_name requires only one name, more than one given.")
else: return obj_name
def check_files(files):
for k in range(len(files)):
if not os.path.isfile(files[k]):
sys.exit("*** ERROR: File {} not found.".format(files[k]))
def get_file_type(file):
"""
Checks file name for known file types and returns it.
"""
for k in range(len(ac_set.ftypes['all'])):
if ac_set.ftypes['all'][k] in file:
return ac_set.ftypes['all'][k]
def get_instr(fits_file):
if ".rdb" not in fits_file:
hdu = pyfits.open(fits_file)
tel = hdu[0].header['TELESCOP']
instr = hdu[0].header['INSTRUME']
hdu.close()
return tel, instr
if ".rdb" in fits_file: return False, False
def get_target(fits_file):
"""
Returns the object targeted in the fits file 'fits_file'.
"""
hdu = pyfits.open(fits_file)
try:
obj = hdu[0].header['OBJECT']
hdu.close()
except:
try:
obj = hdu[0].header['ESO OBS TARG NAME']
hdu.close()
except:
try:
obj = hdu[0].header['TNG OBS TARG NAME']
hdu.close()
except:
print("*** ERROR: Cannot identify object.")
return
return obj
def check_targ(fits_file, targets):
"""
Checks if a fits file belongs to a target in a list of targets.
"""
print("Executing: check_targ")
print("Targets = {}".format(targets))
obj = get_target(fits_file)
print("Object = {}".format(obj))
if obj in targets: return True
else:
print("*** INFO: {} is not in the target list.".format(obj))
print("*** INFO: file {}".format(fits_file))
return False
def test_actin(test, path, calc_index):
if not calc_index:
calc_index = ("I_CaII", "I_Ha06")
if test == "S1D":
files = os.path.join(path, "test_files", "HD41248_1_1_S1D_A.fits")
elif test == "S2D":
files = os.path.join(path, "test_files", "HD41248_1_1_S2D_A.fits")
elif test == "e2ds":
files = os.path.join(path, "test_files", "HARPS.2003-12-13T06:19:48.371_e2ds_A.fits")
elif test == "s1d":
files = os.path.join(path, "test_files", "HARPS.2010-09-18T23:42:36.178_s1d_A.fits")
elif test == "adp":
files = os.path.join(path, "test_files", "ADP.2014-09-16T11:04:45.123.fits")
elif test == 'rdb':
files = os.path.join(path, "test_files", "2010-09-18T23:42:36.178_spec.rdb")
else:
print("*** ERROR:")
print("*** Test can only be 'S1D', 'S2D', 'e2ds', 's1d', 'adp', or 'rdb'")
return None, None
return calc_index, files
def read_rdb(filename):
"""
Reads an .rdb file and returns a dictionary with the headers as keys and data as lists ('output'), and also a list of headers ('keys').
use: table = pyrdb.read_rdb(file)[0] for data
use: table = pyrdb.read_rdb(file)[1] to get the keys by order
"""
f = open(filename, 'r')
data = f.readlines()
f.close()
key = str.split(data[0][:-1],'\t')
output = {}
for i in range(len(key)): output[key[i]] = []
for line in data[2:]:
qq = str.split(line[:-1],'\t')
for i in range(len(key)):
try: value = float(qq[i])
except ValueError: value = qq[i]
output[key[i]].append(value)
return output, key
def save_rdb(dic, keys,file):
"""
From a disctionary 'dic' saves the columns related to the specified 'keys' into an .rdb file named 'file'.
"""
out = open(file,'w')
n_keys = len(keys)
for k in range(n_keys):
if k != n_keys-1: out.write('{}\t'.format(keys[k]))
else: out.write('{}'.format(keys[k]))
out.write('\n')
for k in range(n_keys):
if k != n_keys-1: out.write('{}\t'.format('-'*len(keys[k])))
else: out.write('{}'.format('-'*len(keys[k])))
out.write('\n')
for i in range(len(dic[keys[0]])):
for k in range(n_keys):
if k != n_keys-1: out.write('{}\t'.format(dic[keys[k]][i]))
else: out.write('{}'.format(dic[keys[k]][i]))
out.write('\n')
out.close()
def add_rdb(dic,keys, file_name):
"""
Adds data to an existing .rdb file. The 'keys' must match the headers already present in the file.
"""
out = open(file_name,'a')
n_keys = len(keys)
for i in range(len(dic[keys[0]])):
for k in range(n_keys):
if k != n_keys-1: out.write('{}\t'.format(dic[keys[k]][i]))
else: out.write('{}\t'.format(dic[keys[k]][i]))
out.write('\n')
out.close()
def plot_params(width=6, height=3.5):
"""
Parameters for plots.
"""
rcdefaults()
rc('axes', linewidth=1)
rc('lines', markeredgewidth=0.5)
rc('errorbar', capsize=2)
rc('ytick', direction='in')
rc('xtick', direction='in')
rc('ytick', right='True')
rc('xtick', top='True')
rc('xtick.minor', visible='True')
rc('xtick.major', size=7)
rc('ytick.major', size=7)
rc('xtick.minor', size=3)
rc('ytick.minor', size=3)
rc("font", family="sans-serif")
rc("font", size=10)
rc("figure.subplot", left=(0.15))
rc("figure.subplot", right=(0.93))
rc("figure.subplot", bottom=(0.12))
rc("figure.subplot", top=(0.95))
return width, height
|
{"hexsha": "4e760e44c048df82fd23a64b362605481c2ad1dd", "size": 11267, "ext": "py", "lang": "Python", "max_stars_repo_path": "actin/actin_files/ac_tools.py", "max_stars_repo_name": "j-faria/ACTIN", "max_stars_repo_head_hexsha": "f16d597472ccabb7627753179032e18ae21e83a2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "actin/actin_files/ac_tools.py", "max_issues_repo_name": "j-faria/ACTIN", "max_issues_repo_head_hexsha": "f16d597472ccabb7627753179032e18ae21e83a2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "actin/actin_files/ac_tools.py", "max_forks_repo_name": "j-faria/ACTIN", "max_forks_repo_head_hexsha": "f16d597472ccabb7627753179032e18ae21e83a2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.9640102828, "max_line_length": 139, "alphanum_fraction": 0.5809887281, "include": true, "reason": "import numpy,from scipy,import astropy", "num_tokens": 3168}
|
// Boost.Geometry (aka GGL, Generic Geometry Library)
// Copyright (c) 2015, Oracle and/or its affiliates.
// Licensed under the Boost Software License version 1.0.
// http://www.boost.org/users/license.html
// Contributed and/or modified by Menelaos Karavelas, on behalf of Oracle
#ifndef BOOST_GEOMETRY_ALGORITHMS_DETAIL_OVERLAY_SPLIT_RINGS_HPP
#define BOOST_GEOMETRY_ALGORITHMS_DETAIL_OVERLAY_SPLIT_RINGS_HPP
#include <deque>
#include <iterator>
#include <list>
#include <set>
#include <stack>
#include <boost/range.hpp>
#include <boost/geometry/core/assert.hpp>
#include <boost/geometry/core/closure.hpp>
#include <boost/geometry/core/point_type.hpp>
#include <boost/geometry/policies/compare.hpp>
#include <boost/geometry/policies/robustness/segment_ratio_type.hpp>
#include <boost/geometry/util/condition.hpp>
#include <boost/geometry/algorithms/detail/signed_size_type.hpp>
#include <boost/geometry/algorithms/detail/overlay/get_turn_info.hpp>
#include <boost/geometry/algorithms/detail/overlay/inconsistent_turns_exception.hpp>
#include <boost/geometry/algorithms/detail/overlay/overlay_type.hpp>
#include <boost/geometry/algorithms/detail/overlay/self_turn_points.hpp>
#include <boost/geometry/algorithms/detail/overlay/turn_info.hpp>
namespace boost { namespace geometry
{
namespace detail { namespace overlay
{
// the ring_as_dcl class implements a ring using a doubly-connected list;
// it is a model of a bidirectional-traversal Boost.Range and
// supports:
// (1) constant-time push_back(), size() and empty() operations;
// (2) splitting at two vertices (in order to create two rings out of
// the initial ring) also in constant time.
//
// The second operation cannot be done in constant time using typical
// random access ranges, which is precisely the reason why this class
// has been introduced and used.
template
<
typename Point,
closure_selector Closure,
typename List = std::list<Point>
>
class ring_as_dcl
{
public:
typedef Point point_type;
typedef List list_type;
typedef typename List::size_type size_type;
typedef typename List::iterator iterator;
typedef typename List::const_iterator const_iterator;
ring_as_dcl()
: m_list()
{}
inline void push_back(Point const& point)
{
m_list.push_back(point);
}
inline void split_at(iterator pos1, iterator pos2, ring_as_dcl& other)
{
// for this method to work, I think that the iterator pos1
// must preceed iterator pos2 in the original list node sequence
other.m_list.splice(other.m_list.end(), m_list, pos1, pos2);
if (BOOST_GEOMETRY_CONDITION(Closure == closed))
{
other.m_list.push_back(*other.m_list.begin());
}
}
inline iterator begin() { return m_list.begin(); }
inline const_iterator begin() const { return m_list.begin(); }
inline iterator end() { return m_list.end(); }
inline const_iterator end() const { return m_list.end(); }
inline void swap(ring_as_dcl& other)
{
m_list.swap(other.m_list);
}
private:
List m_list;
};
template <closure_selector Closure>
class find_duplicate_points
{
static bool const is_closed = Closure == closed;
struct point_iterator_less
{
template <typename PointIterator>
inline bool operator()(PointIterator it1, PointIterator it2) const
{
return geometry::less
<
typename std::iterator_traits<PointIterator>::value_type
>()(*it1, *it2);
}
};
public:
template <typename Ring, typename Iterator>
static inline bool apply(Ring const& ring, Iterator& pos1, Iterator& pos2)
{
typedef typename Ring::iterator iterator_type;
typedef std::set<iterator_type, point_iterator_less> point_set_type;
point_set_type point_set;
Ring& nc_ring = const_cast<Ring&>(ring);
iterator_type last
= is_closed ? --boost::end(nc_ring) : boost::end(nc_ring);
for (iterator_type it = boost::begin(nc_ring); it != last; ++it)
{
std::pair<typename point_set_type::iterator, bool> res
= point_set.insert(it);
if (! res.second)
{
pos1 = *res.first;
pos2 = it;
return true;
}
}
// initialize pos1 and pos2 with something
pos1 = boost::begin(nc_ring);
pos2 = pos1;
return false;
}
};
template <overlay_type OverlayType, typename Ring, typename RobustPolicy>
struct split_ring
{
template <typename RingCollection>
static inline void apply(Ring const& ring,
RingCollection& collection,
RobustPolicy const& robust_policy)
{
split_ring
<
overlay_union, Ring, RobustPolicy
>::apply(ring, collection, robust_policy);
}
};
// specialization for union
// TODO: add another specialization for intersection once implemented
template <typename Ring, typename RobustPolicy>
class split_ring<overlay_union, Ring, RobustPolicy>
{
typedef turn_info
<
typename point_type<Ring>::type,
typename geometry::segment_ratio_type
<
typename point_type<Ring>::type, RobustPolicy
>::type
> turn_type;
typedef std::deque<turn_type> turns_container_type;
struct no_interrupt_policy
{
static bool const enabled = false;
static bool const has_intersections = false;
template <typename Range>
static inline bool apply(Range const&)
{
return false;
}
};
template <typename Turn>
static inline
typename Turn::turn_operation_type get_correct_op(Turn const& t)
{
return
(t.operations[0].fraction.is_zero()
|| t.operations[0].fraction.is_one())
?
t.operations[1]
:
t.operations[0]
;
}
template <typename MAA_Turn>
struct maa_turn_less
{
bool operator()(MAA_Turn const& t1, MAA_Turn const& t2) const
{
#if ! defined(BOOST_GEOMETRY_OVERLAY_NO_THROW)
if (t1.method != method_touch_interior
||
(! t1.both(operation_union)
&& ! t1.both(operation_intersection))
||
t2.method != method_touch_interior
||
(! t2.both(operation_union)
&& ! t2.both(operation_intersection))
)
{
throw inconsistent_turns_exception();
}
#else
BOOST_GEOMETRY_ASSERT(t1.method == method_touch_interior);
BOOST_GEOMETRY_ASSERT(t1.both(operation_union)
||
t1.both(operation_intersection));
BOOST_GEOMETRY_ASSERT(t2.method == method_touch_interior);
BOOST_GEOMETRY_ASSERT(t2.both(operation_union)
||
t2.both(operation_intersection));
#endif
typename MAA_Turn::turn_operation_type op1 = get_correct_op(t1);
typename MAA_Turn::turn_operation_type op2 = get_correct_op(t2);
BOOST_GEOMETRY_ASSERT(! op1.fraction.is_zero()
&& ! op1.fraction.is_one());
BOOST_GEOMETRY_ASSERT(! op2.fraction.is_zero()
&& ! op2.fraction.is_one());
if (op1.seg_id.segment_index != op2.seg_id.segment_index)
{
return op1.seg_id.segment_index < op2.seg_id.segment_index;
}
return op1.fraction < op2.fraction;
}
};
template <typename InterruptPolicy>
static inline void get_self_turns(Ring const& ring,
turns_container_type& turns,
RobustPolicy const& robust_policy,
InterruptPolicy const& policy)
{
geometry::self_turns
<
get_turn_info<assign_null_policy>
>(ring, robust_policy, turns, policy);
}
static inline void get_self_turns(Ring const& ring,
turns_container_type& turns,
RobustPolicy const& robust_policy)
{
get_self_turns(ring, turns, robust_policy, no_interrupt_policy());
}
template <typename MAA_Turns, typename RingOut>
static inline void insert_maa_turns(Ring const& ring,
MAA_Turns const& maa_turns,
RingOut& ring_out)
{
typedef typename boost::range_size<Ring>::type size_type;
typedef typename boost::range_iterator
<
MAA_Turns const
>::type iterator_type;
size_type point_index = 0;
for (iterator_type it = maa_turns.begin(); it != maa_turns.end(); ++it)
{
signed_size_type segment_index
= get_correct_op(*it).seg_id.segment_index;
while (point_index <= static_cast<size_type>(segment_index))
{
ring_out.push_back(ring[point_index]);
++point_index;
}
ring_out.push_back(it->point);
}
while (point_index < ring.size())
{
ring_out.push_back(ring[point_index]);
++point_index;
}
}
template <typename RingIn, typename Collection>
static inline void copy_to_collection(RingIn const& ring,
Collection& collection)
{
typedef typename boost::range_value<Collection>::type ring_out_type;
ring_out_type tmp;
for (typename boost::range_iterator<RingIn const>::type
it = ring.begin();
it != ring.end();
++it)
{
geometry::traits::push_back<ring_out_type>::apply(tmp, *it);
}
collection.push_back(tmp);
}
template <typename Stack>
static inline void move_to_top(Stack& stack,
typename Stack::value_type& value)
{
typedef typename Stack::value_type value_type;
stack.push(value_type());
stack.top().swap(value);
}
template <closure_selector Closure, typename RingType, typename Collection>
static inline void split_one_ring(RingType& ring, Collection& collection)
{
// create and initialize a stack with the input ring
std::stack<RingType> stack;
move_to_top(stack, ring);
// while the stack is not empty:
// look for duplicates, split and push to stack;
// otherwise, copy to output collection
while (! stack.empty())
{
RingType& top_ring = stack.top();
typename boost::range_iterator<RingType>::type pos1, pos2;
bool duplicate_found = find_duplicate_points
<
Closure
>::apply(top_ring, pos1, pos2);
if (duplicate_found)
{
RingType other_ring;
top_ring.split_at(pos1, pos2, other_ring);
move_to_top(stack, other_ring);
}
else
{
copy_to_collection(top_ring, collection);
stack.pop();
}
}
}
public:
template <typename RingCollection>
static inline void apply(Ring const& ring,
RingCollection& collection,
RobustPolicy const& robust_policy)
{
typedef std::set<turn_type, maa_turn_less<turn_type> > maa_turn_set;
typedef ring_as_dcl
<
typename point_type<Ring>::type, closure<Ring>::value
> ring_dcl_type;
// compute the ring's self turns
turns_container_type turns;
get_self_turns(ring, turns, robust_policy);
// collect the ring's m:u/u and m:i/i turns (the latter can
// appear when we perform an intersection and the intersection
// result consists of a multipolygon whose polygons touch each
// other);
// notice the use of std::set; we want to record coinciding
// m:u/u and m:i/i turns only once
maa_turn_set maa_turns;
for (typename turns_container_type::const_iterator it = turns.begin();
it != turns.end();
++it)
{
if (it->method == method_touch_interior)
{
#if ! defined(BOOST_GEOMETRY_OVERLAY_NO_THROW)
if (! it->both(operation_union)
&&
! it->both(operation_intersection))
{
throw inconsistent_turns_exception();
}
#else
BOOST_GEOMETRY_ASSERT(it->both(operation_union)
||
it->both(operation_intersection));
#endif
maa_turns.insert(*it);
}
}
// insert the m:u/u turns as points in the original ring
ring_dcl_type output;
insert_maa_turns(ring, maa_turns, output);
// split the ring into simple rings
split_one_ring<closure<Ring>::value>(output, collection);
}
};
template <overlay_type OverlayType>
struct split_rings
{
template <typename RingCollection, typename RobustPolicy>
static inline void apply(RingCollection& collection,
RobustPolicy const& robust_policy)
{
typedef typename boost::range_iterator
<
RingCollection
>::type ring_iterator_type;
RingCollection new_collection;
for (ring_iterator_type rit = boost::begin(collection);
rit != boost::end(collection);
++rit)
{
split_ring
<
OverlayType,
typename boost::range_value<RingCollection>::type,
RobustPolicy
>::apply(*rit, new_collection, robust_policy);
}
collection.swap(new_collection);
}
};
// specialization for union
// TODO: add another specialization for intersection once implemented
template <>
struct split_rings<overlay_union>
{
template <typename RingCollection, typename RobustPolicy>
static inline void apply(RingCollection& collection,
RobustPolicy const& robust_policy)
{
typedef typename boost::range_iterator
<
RingCollection
>::type ring_iterator_type;
RingCollection new_collection;
for (ring_iterator_type rit = boost::begin(collection);
rit != boost::end(collection);
++rit)
{
split_ring
<
overlay_union,
typename boost::range_value<RingCollection>::type,
RobustPolicy
>::apply(*rit, new_collection, robust_policy);
}
collection.swap(new_collection);
}
};
}} // namespace detail::overlay
}} // namespace boost::geometry
#endif // BOOST_GEOMETRY_ALGORITHMS_DETAIL_OVERLAY_SPLIT_RINGS_HPP
|
{"hexsha": "5833665ba7bbafb533b947ee8d6d54395420dbab", "size": 15533, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "other/mysql/include/boost_1_59_0/boost/geometry/algorithms/detail/overlay/split_rings.hpp", "max_stars_repo_name": "35niavlys/teeworlds-fng2-mod", "max_stars_repo_head_hexsha": "d73be893c060fb3832656679d7756a5c42bca7cc", "max_stars_repo_licenses": ["Zlib"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2017-08-11T03:57:33.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-26T07:38:05.000Z", "max_issues_repo_path": "other/mysql/include/boost_1_59_0/boost/geometry/algorithms/detail/overlay/split_rings.hpp", "max_issues_repo_name": "35niavlys/teeworlds-fng2-mod", "max_issues_repo_head_hexsha": "d73be893c060fb3832656679d7756a5c42bca7cc", "max_issues_repo_licenses": ["Zlib"], "max_issues_count": 6.0, "max_issues_repo_issues_event_min_datetime": "2017-10-25T21:40:40.000Z", "max_issues_repo_issues_event_max_datetime": "2017-11-25T21:58:47.000Z", "max_forks_repo_path": "other/mysql/include/boost_1_59_0/boost/geometry/algorithms/detail/overlay/split_rings.hpp", "max_forks_repo_name": "35niavlys/teeworlds-fng2-mod", "max_forks_repo_head_hexsha": "d73be893c060fb3832656679d7756a5c42bca7cc", "max_forks_repo_licenses": ["Zlib"], "max_forks_count": 28.0, "max_forks_repo_forks_event_min_datetime": "2017-09-16T15:15:54.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-09T17:32:20.000Z", "avg_line_length": 31.5711382114, "max_line_length": 84, "alphanum_fraction": 0.5873302002, "num_tokens": 3146}
|
[STATEMENT]
lemma mp_objT_correct[simp]: "Mapping.lookup mp_constT = constT"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Mapping.lookup mp_constT = constT
[PROOF STEP]
unfolding mp_constT_def constT_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Mapping.lookup (Mapping.of_alist (consts D)) = map_of (consts D)
[PROOF STEP]
by transfer (simp add: Map_To_Mapping.map_apply_def)
|
{"llama_tokens": 160, "file": "AI_Planning_Languages_Semantics_PDDL_STRIPS_Checker", "length": 2}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.